aboutsummaryrefslogtreecommitdiff
path: root/src/MPIO.old.hh
diff options
context:
space:
mode:
Diffstat (limited to 'src/MPIO.old.hh')
-rw-r--r--src/MPIO.old.hh80
1 files changed, 80 insertions, 0 deletions
diff --git a/src/MPIO.old.hh b/src/MPIO.old.hh
new file mode 100644
index 0000000..8b90946
--- /dev/null
+++ b/src/MPIO.old.hh
@@ -0,0 +1,80 @@
+#ifndef __MPIO_HH_
+#define __MPIO_HH_
+#include "IO.hh"
+#include "MPIutils.hh"
+#include "Timer.hh"
+
+/* Simplifies assumptions about domain decomposition */
+/*
+ Module: MPIO
+
+ Description:
+ Implements slice-based Parallel IO for IEEEIO libraries.
+ Uses iSend/iRecv of XY slices to hide message passing latency.
+ This is a simplified version of PIO which makes fewer assumptions
+ about the layout of the data.
+
+ Will eventually be subclassed from IObase so that you can
+ drop-in replace a single-threaded IO system with this
+ multithreaded one. writeChunk() will be used to announce
+ the local chunks and perform the write in parallel rather
+ than collecting serially.
+
+ Needs to have extensions to automatically remove ghost zones
+ setLocalDims(origin,dims,minindex,maxindex)
+
+ Limitations:
+ * currently only works for 3D grids
+ * only understands Float32 and Float64 datatypes
+ * Assumes HPF layout (eg, canonical processor layout where
+ all PE's are aligned to grid... no partial overlaps)
+ * Uses XY slices (assumes fortran ordering for dimensions)
+ * Collects only to a single file (cannot do a multi-gather).
+ */
+class MPIO /* : public IObase */ {
+ int *gdims,*gorigins,grank;
+ MPIcomm &comm;
+ int globaldims[3]; // for master
+ int localorigin[3],localdims[3];
+ int root,myid;
+ IObase *file;
+ Timer slicewait,slicewrite,slicecollect,sliceselect;
+ Timer sliceallwait;
+ // MPI_Request *slice_send_requests;
+ int ghostmin[3],ghostmax[3];
+ MPI_Request *sendreq;
+ int syncsend;
+protected:
+ void sendSlice(int z,float *data,MPI_Request *req);
+ void requestSlice(int z,float *slicebuffer,MPI_Request *req);
+ void waitForSlice(int z,float *slicebuffer,float *destbuffer,MPI_Request *req);
+ void sendSlice(int z,double *data,MPI_Request *req);
+ void requestSlice(int z,double *slicebuffer,MPI_Request *req);
+ void waitForSlice(int z,double *slicebuffer,double *destbuffer,MPI_Request *req);
+public:
+ //MPIO(IObase *io);
+ MPIO(IObase *io,MPIcomm &c=*(new MPIcomm()));
+ ~MPIO();
+ inline int isRoot(){ return ((myid == root)?1:0);}
+ inline void useSyncrhonousSend(int f=1){syncsend=f;}
+ inline IObase *getFileHandle() { return (isRoot())?file:(IObase*)0; }
+ void setLocalDims(int rank,int *origin, int *dims);
+ void setLocalDims(int rank,int *origin, int *dims,
+ int *nghostmin,int *nghostmax);
+ virtual int write(IObase::DataType type,int rank,int *dims,void *data);
+ virtual int write(IObase::DataType type,void *data);
+ void write(float *data){
+ asyncWrite(data);
+ asyncFinalize();
+ }
+ void write(double *data){
+ asyncWrite(data);
+ asyncFinalize();
+ }
+ void asyncWrite(IObase::DataType type, void *data);
+ void asyncWrite(float *data);
+ void asyncWrite(double *data);
+ void asyncFinalize();
+};
+
+#endif // __MPIO_HH_