aboutsummaryrefslogtreecommitdiff
path: root/src/MPIutils.hh
diff options
context:
space:
mode:
Diffstat (limited to 'src/MPIutils.hh')
-rw-r--r--src/MPIutils.hh389
1 files changed, 389 insertions, 0 deletions
diff --git a/src/MPIutils.hh b/src/MPIutils.hh
new file mode 100644
index 0000000..e4da9d4
--- /dev/null
+++ b/src/MPIutils.hh
@@ -0,0 +1,389 @@
+#ifndef __MPIUTILS_HH_
+#define __MPIUTILS_HH_
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+
+struct HpfBlockType { // or just HPFblock
+ enum Type {Block,None};
+};
+
+/*
+struct MPItype {
+ enum Type {
+};*/
+
+class MPIbuffer {
+ char *mybuf;
+ int bufsize;
+ void attach(int size);
+ void detach();
+public:
+ MPIbuffer(int size);
+ ~MPIbuffer();
+ void resize(int size);
+ void grow(int size);
+ void check();
+};
+/*
+class MPIthread{
+public:
+ MPI_Comm createCommunicator();
+ int getRank();
+ int growBufferTo(int size);
+ int snapBufferTo(int size);
+ int getTypeSize(MPI_Datatype type);
+};
+*/
+/* Everything is inlined... this is a wrapper after all */
+
+class MPIcomm {
+ int mypid;
+ int nprocs;
+ MPI_Comm comm;
+ int default_tag;
+ static MPIbuffer *buffer;
+public:
+ static MPI_Status defstat;
+ MPIcomm(MPI_Comm commworld = MPI_COMM_WORLD):
+ comm(commworld),default_tag(0){
+ // since these don't change, optimize by assigning
+ // to class variables to reduce subroutine call overhead
+ MPI_Comm_rank(comm,&mypid);
+ MPI_Comm_size(comm,&nprocs);
+ }
+ double time(){return MPI_Wtime();}
+ int numProcs() { return nprocs; }
+ int rank() { return mypid; }
+ void setDefaultTag(int d){ default_tag=d; }
+ inline int send(int dest,int tag,MPI_Datatype type,int nelem,void *data){
+ return MPI_Send(data,nelem,type,dest,tag,comm);
+ }
+ inline int send(int dest,MPI_Datatype type,int nelem,void *data){
+ return send(dest,default_tag,type,nelem,data);
+ }
+ inline int send(int dest,int nelem,float *data){
+ return send(dest,default_tag,MPI_FLOAT,nelem,data);
+ }
+ inline int send(int dest,int nelem,double *data){
+ return send(dest,default_tag,MPI_DOUBLE,nelem,data);
+ }
+ inline int send(int dest,int nelem,int *data){
+ return send(dest,default_tag,MPI_INT,nelem,data);
+ }
+ inline int send(int dest,int nelem,char *data){
+ return send(dest,default_tag,MPI_CHAR,nelem,data);
+ }
+ inline int recv(int src,int tag,MPI_Datatype type,int nelem,void *data,MPI_Status &s=MPIcomm::defstat){
+ return MPI_Recv(data,nelem,type,src,tag,comm,&s);
+ }
+ inline int recv(MPI_Datatype type,int nelem,void *data,MPI_Status &s=MPIcomm::defstat){
+ return recv(MPI_ANY_SOURCE,MPI_ANY_TAG,type,nelem,data,s);
+ }
+
+ inline int recv(int src,int nelem,float *data,MPI_Status &s=MPIcomm::defstat){
+ return recv(src,MPI_ANY_TAG,MPI_FLOAT,nelem,data,s);
+ }
+ inline int recv(int src,int nelem,double *data,MPI_Status &s=MPIcomm::defstat){
+ return recv(src,MPI_ANY_TAG,MPI_DOUBLE,nelem,data,s);
+ }
+ inline int recv(int src,int nelem,int *data,MPI_Status &s=MPIcomm::defstat){
+ return recv(src,MPI_ANY_TAG,MPI_INT,nelem,data,s);
+ }
+ inline int recv(int src,int nelem,char *data,MPI_Status &s=MPIcomm::defstat){
+ return recv(src,MPI_ANY_TAG,MPI_CHAR,nelem,data,s);
+ }
+
+ inline int recv(int nelem,float *data,MPI_Status &s=MPIcomm::defstat){
+ return recv(MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_FLOAT,nelem,data,s);
+ }
+ inline int recv(int nelem,double *data,MPI_Status &s=MPIcomm::defstat){
+ return recv(MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_DOUBLE,nelem,data,s);
+ }
+ inline int recv(int nelem,int *data,MPI_Status &s=MPIcomm::defstat){
+ return recv(MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_INT,nelem,data,s);
+ }
+ inline int recv(int nelem,char *data,MPI_Status &s=MPIcomm::defstat){
+ return recv(MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_CHAR,nelem,data,s);
+ }
+ // Need support for
+ // S, B, R sends
+ // Buffered: Need to preallocate or auto-alloc back buffers.
+ // Synchronous: Blocks until matching receive posted
+ // Ready: Error unless matching receive has already been posted.
+ void setBufferSize(int nbytes){
+ if(buffer) buffer->resize(nbytes);
+ else buffer = new MPIbuffer(nbytes);
+ }
+ void setBufferSize(MPI_Datatype type,int nelem){
+ // compute size
+ int elemsize = 8;
+ if(buffer) buffer->resize(nelem*elemsize);
+ else buffer = new MPIbuffer(nelem*elemsize);
+ }
+ void growBufferSize(int nbytes){
+ if(buffer) buffer->grow(nbytes);
+ else buffer = new MPIbuffer(nbytes);
+ }
+ void growBufferSize(MPI_Datatype type,int nelem){
+ // compute size
+ int elemsize = 8;
+ if(buffer) buffer->grow(nelem*elemsize);
+ else buffer = new MPIbuffer(nelem*elemsize);
+ }
+ inline int bSend(int dest,int tag,MPI_Datatype type,int nelem,void *data){
+ // need to get MPI size for the datatype
+ if(!buffer) growBufferSize(nelem*8);
+ return MPI_Bsend(data,nelem,type,dest,tag,comm);
+ }
+ inline int bSend(int dest,MPI_Datatype type,int nelem,void *data){
+ return bSend(dest,default_tag,type,nelem,data);
+ }
+
+ inline int bSend(int dest,int nelem,float *data){
+ return bSend(dest,default_tag,MPI_FLOAT,nelem,data);
+ }
+ inline int bSend(int dest,int nelem,double *data){
+ return bSend(dest,default_tag,MPI_DOUBLE,nelem,data);
+ }
+ inline int bSend(int dest,int nelem,int *data){
+ return bSend(dest,default_tag,MPI_INT,nelem,data);
+ }
+ inline int bSend(int dest,int nelem,char *data){
+ return bSend(dest,default_tag,MPI_CHAR,nelem,data);
+ }
+ inline int iSend(int dest,int tag,MPI_Datatype type,int nelem,void *data,MPI_Request &req){
+ // need to get MPI size for the datatype
+ return MPI_Isend(data,nelem,type,dest,tag,comm,&req);
+ }
+ inline int iSend(int dest,MPI_Datatype type,int nelem,void *data,MPI_Request &req){
+ return iSend(dest,default_tag,type,nelem,data,req);
+ }
+
+ inline int iSend(int dest,int tag,int nelem,float *data,MPI_Request &req){
+ return iSend(dest,tag,MPI_FLOAT,nelem,data,req);
+ }
+ inline int iSend(int dest,int tag,int nelem,double *data,MPI_Request &req){
+ return iSend(dest,tag,MPI_DOUBLE,nelem,data,req);
+ }
+ inline int iSend(int dest,int tag,int nelem,int *data,MPI_Request &req){
+ return iSend(dest,tag,MPI_INT,nelem,data,req);
+ }
+ inline int iSend(int dest,int tag,int nelem,char *data,MPI_Request &req){
+ return iSend(dest,tag,MPI_CHAR,nelem,data,req);
+ }
+
+ inline int iSend(int dest,int nelem,float *data,MPI_Request &req){
+ return iSend(dest,default_tag,MPI_FLOAT,nelem,data,req);
+ }
+ inline int iSend(int dest,int nelem,double *data,MPI_Request &req){
+ return iSend(dest,default_tag,MPI_DOUBLE,nelem,data,req);
+ }
+ inline int iSend(int dest,int nelem,int *data,MPI_Request &req){
+ return iSend(dest,default_tag,MPI_INT,nelem,data,req);
+ }
+ inline int iSend(int dest,int nelem,char *data,MPI_Request &req){
+ return iSend(dest,default_tag,MPI_CHAR,nelem,data,req);
+ }
+
+ inline int isSend(int dest,int tag,MPI_Datatype type,int nelem,void *data,MPI_Request &req){
+ // need to get MPI size for the datatype
+ return MPI_Issend(data,nelem,type,dest,tag,comm,&req);
+ }
+ inline int isSend(int dest,MPI_Datatype type,int nelem,void *data,MPI_Request &req){
+ return isSend(dest,default_tag,type,nelem,data,req);
+ }
+
+ inline int isSend(int dest,int tag,int nelem,float *data,MPI_Request &req){
+ return isSend(dest,tag,MPI_FLOAT,nelem,data,req);
+ }
+ inline int isSend(int dest,int tag,int nelem,double *data,MPI_Request &req){
+ return isSend(dest,tag,MPI_DOUBLE,nelem,data,req);
+ }
+ inline int isSend(int dest,int tag,int nelem,int *data,MPI_Request &req){
+ return isSend(dest,tag,MPI_INT,nelem,data,req);
+ }
+ inline int isSend(int dest,int tag,int nelem,char *data,MPI_Request &req){
+ return isSend(dest,tag,MPI_CHAR,nelem,data,req);
+ }
+
+ inline int isSend(int dest,int nelem,float *data,MPI_Request &req){
+ return isSend(dest,default_tag,MPI_FLOAT,nelem,data,req);
+ }
+ inline int isSend(int dest,int nelem,double *data,MPI_Request &req){
+ return isSend(dest,default_tag,MPI_DOUBLE,nelem,data,req);
+ }
+ inline int isSend(int dest,int nelem,int *data,MPI_Request &req){
+ return isSend(dest,default_tag,MPI_INT,nelem,data,req);
+ }
+ inline int isSend(int dest,int nelem,char *data,MPI_Request &req){
+ return isSend(dest,default_tag,MPI_CHAR,nelem,data,req);
+ }
+
+ inline int ibSend(int dest,int tag,MPI_Datatype type,int nelem,void *data,MPI_Request &req){
+ // need to get MPI size for the datatype
+ if(!buffer) growBufferSize(nelem*8);
+ return MPI_Ibsend(data,nelem,type,dest,tag,comm,&req);
+ }
+ inline int ibSend(int dest,MPI_Datatype type,int nelem,void *data,MPI_Request &req){
+ return ibSend(dest,default_tag,type,nelem,data,req);
+ }
+ inline int iRecv(int src,int tag,MPI_Datatype type,int nelem,void *data,MPI_Request &req){
+ return MPI_Irecv(data,nelem,type,src,tag,comm,&req);
+ }
+ inline int iRecv(MPI_Datatype type,int nelem,void *data,MPI_Request &req){
+ return iRecv(MPI_ANY_SOURCE,MPI_ANY_TAG,type,nelem,data,req);
+ }
+
+ inline int iRecv(int src, int tag, int nelem,float *data,MPI_Request &req){
+ return iRecv(src,tag,MPI_FLOAT,nelem,data,req);
+ }
+ inline int iRecv(int src, int tag, int nelem,double *data,MPI_Request &req){
+ return iRecv(src,tag,MPI_DOUBLE,nelem,data,req);
+ }
+ inline int iRecv(int src, int tag, int nelem,int *data,MPI_Request &req){
+ return iRecv(src,tag,MPI_INT,nelem,data,req);
+ }
+ inline int iRecv(int src, int tag, int nelem,char *data,MPI_Request &req){
+ return iRecv(src,tag,MPI_CHAR,nelem,data,req);
+ }
+
+ inline int iRecv(int nelem,float *data,MPI_Request &req){
+ return iRecv(MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_FLOAT,nelem,data,req);
+ }
+ inline int iRecv(int nelem,double *data,MPI_Request &req){
+ return iRecv(MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_DOUBLE,nelem,data,req);
+ }
+ inline int iRecv(int nelem,int *data,MPI_Request &req){
+ return iRecv(MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_INT,nelem,data,req);
+ }
+ inline int iRecv(int nelem,char *data,MPI_Request &req){
+ return iRecv(MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_CHAR,nelem,data,req);
+ }
+ // waiting routines
+ int wait(MPI_Request &req,MPI_Status &stat){
+ return MPI_Wait(&req,&stat);
+ }
+ int test(MPI_Request &req,MPI_Status &stat,int &flag){
+ return MPI_Test(&req,&flag,&stat);
+ }
+ int requestFree(MPI_Request &req){
+ return MPI_Request_free(&req);
+ }
+ int waitAny(int nrequests,MPI_Request *requestarray,int &completed,MPI_Status &stat){
+ return MPI_Waitany(nrequests,requestarray,&completed,&stat);
+ }
+ int waitAll(int nreq, MPI_Request *reqarray,MPI_Status *statarray){
+ return MPI_Waitall(nreq,reqarray,statarray);
+ }
+ int probe(int source,int tag,MPI_Status &stat){
+ return MPI_Probe(source,tag,comm,&stat);
+ }
+ int probe(int &flag,MPI_Status &stat){
+ return MPI_Probe(MPI_ANY_SOURCE,MPI_ANY_TAG,comm,&stat);
+ }
+ int iProbe(int source,int tag,int &flag,MPI_Status &stat){
+ return MPI_Iprobe(source,tag,comm,&flag,&stat);
+ }
+ int iProbe(int &flag,MPI_Status &stat){
+ return MPI_Iprobe(MPI_ANY_SOURCE,MPI_ANY_TAG,comm,&flag,&stat);
+ }
+ //-collective comm--
+ int gather(int root,MPI_Datatype type,int localnelem,void *senddata,void *recvdata){
+ return MPI_Gather(senddata,localnelem,type,recvdata,localnelem,type,root,comm);
+ }
+
+ int gather(int root,int localnelem,float *senddata,float *recvdata){
+ return MPI_Gather(senddata,localnelem,MPI_FLOAT,recvdata,localnelem,MPI_FLOAT,root,comm);
+ }
+ int gather(int root,int localnelem,double *senddata,double *recvdata){
+ return MPI_Gather(senddata,localnelem,MPI_DOUBLE,recvdata,localnelem,MPI_DOUBLE,root,comm);
+ }
+ int gather(int root,int localnelem,int *senddata,int *recvdata){
+ return MPI_Gather(senddata,localnelem,MPI_INT,recvdata,localnelem,MPI_INT,root,comm);
+ }
+ int gather(int root,int localnelem,char *senddata,char *recvdata){
+ return MPI_Gather(senddata,localnelem,MPI_CHAR,recvdata,localnelem,MPI_CHAR,root,comm);
+ }
+
+ int gatherv(int root,MPI_Datatype type,
+ int localnelem,int *globalnelem,int *displacements,
+ void *senddata,void *recvdata){
+ return MPI_Gatherv(senddata,localnelem,type,
+ recvdata,globalnelem,displacements,type,root,comm);
+ }
+ int allgather(MPI_Datatype type,int nelem,void *senddata,void *recvdata){
+ return MPI_Allgather(senddata,nelem,type,recvdata,nelem,type,comm);
+ }
+ int alltoall(MPI_Datatype type,int nelem,void *senddata,void *recvdata){
+ return MPI_Alltoall(senddata,nelem,type,recvdata,nelem,type,comm);
+ }
+ int scatter(int root,MPI_Datatype type,int localnelem,void *senddata,void *recvdata){
+ return MPI_Scatter(senddata,localnelem,type,recvdata,localnelem,type,root,comm);
+ }
+ int scatter(int root,int localnelem,float *senddata,float *recvdata){
+ return MPI_Scatter(senddata,localnelem,MPI_FLOAT,recvdata,localnelem,MPI_FLOAT,root,comm);
+ }
+ int scatter(int root,int localnelem,double *senddata,double *recvdata){
+ return MPI_Scatter(senddata,localnelem,MPI_DOUBLE,recvdata,localnelem,MPI_DOUBLE,root,comm);
+ }
+ int scatter(int root,int localnelem,int *senddata,int *recvdata){
+ return MPI_Scatter(senddata,localnelem,MPI_INT,recvdata,localnelem,MPI_INT,root,comm);
+ }
+ int scatter(int root,int localnelem,char *senddata,char *recvdata){
+ return MPI_Scatter(senddata,localnelem,MPI_CHAR,recvdata,localnelem,MPI_CHAR,root,comm);
+ }
+ int scatterv(int root,MPI_Datatype type,
+ int localnelem,int *globalnelem,int *displacements,
+ void *senddata,void *recvdata){
+ return MPI_Scatterv(senddata,globalnelem,displacements,type,
+ recvdata,localnelem,type,root,comm);
+ }
+ void print(char *s){
+ for(int i=0,last=numProcs();i<last;i++){
+ barrier();
+ if(rank()==i)
+ fprintf(stderr,"PE(%u): %s\n",i,s);
+ }
+ }
+ int bcast(int root,MPI_Datatype type,int nelements,void *data){
+ return MPI_Bcast(data,nelements,type,root,comm);
+ }
+ int bcast(int root,int nelements,float *data){
+ return bcast(root,MPI_FLOAT,nelements,data);
+ }
+ int bcast(int root,int nelements,double *data){
+ return bcast(root,MPI_DOUBLE,nelements,data);
+ }
+ int bcast(int root,int nelements,int *data){
+ // fprintf(stderr,"\tBCAST: PE=%u Nelem=%u\n",rank(),nelements);
+ return bcast(root,MPI_INT,nelements,data);
+ }
+ int bcast(int root,int &data){
+ return bcast(root,1,&data);
+ }
+ int barrier(){
+ return MPI_Barrier(comm);
+ }
+ //double time(){
+ // return MPI_Wtime();
+ //}
+};
+
+/* grouping mechanisms must be here in the MPIenv */
+class MPIenv {
+ MPIcomm *defaultcomm;
+public:
+ MPIenv(int &argc,char **&argv){
+ MPI_Init(&argc,&argv);
+ defaultcomm = new MPIcomm(MPI_COMM_WORLD);
+ }
+ ~MPIenv(){ delete defaultcomm; MPI_Finalize();}
+ MPIcomm *getComm() { return defaultcomm; }
+ MPIcomm *getComm(int custom_communicator){
+ return new MPIcomm(custom_communicator);
+ }
+};
+
+#endif // __MPIUTILS_HH_