From f7002523f8962935769110933611b271516c6b0e Mon Sep 17 00:00:00 2001 From: tradke Date: Tue, 28 Oct 2008 17:53:31 +0000 Subject: use collective transfers when writing datasets using parallel HDF5 git-svn-id: http://svn.cactuscode.org/arrangements/CactusPUGHIO/IOHDF5Util/trunk@138 7842ec3a-9562-4be5-9c5b-06ba18f2b668 --- src/DumpVar.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/DumpVar.c b/src/DumpVar.c index ab5d6b1..c657ac5 100644 --- a/src/DumpVar.c +++ b/src/DumpVar.c @@ -769,13 +769,11 @@ static void WriteDataCollective (const cGH *GH, const ioRequest *request, } HDF5_ERROR (dataset = H5Dcreate (file, name, hdf5type, filespace, plist)); HDF5_ERROR (H5Pclose (plist)); - if (CCTK_MyProc (GH) == 0) - { - IOHDF5Util_DumpCommonAttributes (GH, request, dataset); - } + IOHDF5Util_DumpCommonAttributes (GH, request, dataset); /* increase the buffer size if the default isn't sufficient */ HDF5_ERROR (plist = H5Pcreate (H5P_DATASET_XFER)); + HDF5_ERROR (H5Pset_dxpl_mpio (plist, H5FD_MPIO_COLLECTIVE)); buffersize = H5Dget_storage_size (dataset); if (buffersize > H5Pget_buffer (plist, NULL, NULL)) { -- cgit v1.2.3