aboutsummaryrefslogtreecommitdiff
path: root/src/Panda/Panda.C
blob: 6b933e6f36f55fd09e55a32df3e07e6c79847012 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
#include "definitions.h"
#include "Panda.h"


/* Currently the sequential case is unsupported */

extern MPIFS *MPIFS_global_obj;
int  global_system_type_;

Panda::Panda()
{
  file_system_type_ = UNIX_SYSTEM;
  file_system_ = NULL;
}

/* This is the interface for regular Panda (i.e no part-time io nodes) */
Panda::Panda(int node_type, int app_num , int relative_rank,int app_size,
		int* world_ranks)
{
  global_system_type_ = file_system_type_  = MPI_SYSTEM;
  file_system_ = new MPIFS(node_type, app_num, relative_rank, app_size,
			world_ranks);
}

/* This is the interface for regular Panda (i.e no part-time io nodes) */
Panda::Panda(int node_type, int app_num , int relative_rank,int app_size,
		int* world_ranks, Boolean shared_flag)
{
  global_system_type_ = file_system_type_  = MPI_SYSTEM;
  file_system_ = new MPIFS(node_type, app_num, relative_rank, app_size,
			world_ranks, shared_flag);
}

/* This is the interface for part-time io nodes */
Panda::Panda(int node_type, int comp_rank, int comp_size, int *comp_world_ranks,
	int io_rank, int io_size, int *io_world_ranks)
{
  global_system_type_ = file_system_type_  = MPI_SYSTEM;
  file_system_ = new MPIFS(node_type, comp_rank, comp_size, comp_world_ranks,
		io_rank, io_size, io_world_ranks);
}

/* This is the simplest interface for initializing the I/O and compute nodes.
 * It can be used only for regular and shread i/o (i.e it cannot be used for
 * part-time i/o nodes. The constructor assumes that MPIRUN library has been 
 * installed and you have distinct applications at the mpirun level 
 */

/*
Panda::Panda(int node_type)
{
  int app_size, app_rank, *world_ranks, leader;

  file_system_type_ = MPI_SYSTEM;
  if ((node_type == COMPUTE_NODE) || (node_type == IO_NODE)){
    MPI_Comm_size(MPIRUN_APP_COMM, &app_size);
    MPI_Comm_rank(MPIRUN_APP_COMM, &app_rank);
    leader = MPIRUN_APP_LEADERS[MPIRUN_APP_ID];
    world_ranks = (int *) malloc(sizeof(int)*app_size);
    for(int i=0; i<app_size;i++)
      world_ranks[i] = leader + i;
    file_system_ = new MPIFS(node_type, MPIRUN_APP_ID, app_rank, app_size,
			     world_ranks);
    free(world_ranks);
    world_ranks = NULL;
  } else {
    printf("Error: Invalid constructor for this node_type %d\n", node_type);
    exit(1);
  }
}
*/

/* This is the simplest interface for the part-time i/o nodes. Here the number
 * of i/o nodes is specified. The first <num_io_nodes> are designated as part
 * time i/o nodes and the remaining as part-time compute. This requires mpirun
 * library to be initialized and there should be only one mpirun application.
 */
/*
Panda::Panda(int node_type, int num_io_nodes)
{
  int app_rank, app_size, *io_ranks, *world_ranks;

  file_system_type_ = MPI_SYSTEM;
  if ((node_type == COMPUTE_NODE) || (node_type == IO_NODE)){
    printf("Error: Invalid constructor\n");
    exit(1);
  } else if (MPIRUN_NUM_APPS == 1){
    MPI_Comm_size(MPIRUN_APP_COMM, &app_size);
    MPI_Comm_rank(MPIRUN_APP_COMM, &app_rank);
    world_ranks = (int *)malloc(sizeof(int)*app_size);
    io_ranks = (int *) malloc(sizeof(int)*num_io_nodes);
    for(int i=0;i<app_size; i++) world_ranks[i] = i;
    for(i=0;i<num_io_nodes;i++) io_ranks[i] =i;
    if (app_rank < num_io_nodes){
      file_system_ = new MPIFS(PART_TIME_IO, app_rank, app_size, world_ranks,
			       app_rank, num_io_nodes, io_ranks);
    } else {
      file_system_ = new MPIFS(PART_TIME_COMPUTE, app_rank, app_size, world_ranks,
			       -1, num_io_nodes, io_ranks);
    }
  } else {
    printf("Error: Part-time I/O nodes - More than one mpirun app running\n");
    exit(1);
  }
}
*/
  
Panda::~Panda()
{
  if (file_system_) delete file_system_;
  file_system_ = NULL;

}

void Panda::global_barrier()
{
  if (file_system_type_ == MPI_SYSTEM)
  {
     ((MPIFS *) file_system_)->user_commands(GLOBAL_BARRIER);
  }
}

void Panda::app_barrier()
{
  if (file_system_type_ == MPI_SYSTEM)
  {
     ((MPIFS *) file_system_)->user_commands(APP_BARRIER);
  }
}

void Panda::flushfiles()
{
  if (file_system_type_ == MPI_SYSTEM)
  {
     ((MPIFS *) file_system_)->user_commands(FLUSHFILES);
  }
}

void Panda::cleanfiles()
{
  if (file_system_type_ == MPI_SYSTEM)
  {
     ((MPIFS *) file_system_)->user_commands(CLEANFILES);
  }
}

void Panda::createfiles()
{
  if (file_system_type_ == MPI_SYSTEM)
  {
     ((MPIFS *) file_system_)->user_commands(CREATEFILES);
  }
}