aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authortradke <tradke@7842ec3a-9562-4be5-9c5b-06ba18f2b668>2003-07-14 08:27:18 +0000
committertradke <tradke@7842ec3a-9562-4be5-9c5b-06ba18f2b668>2003-07-14 08:27:18 +0000
commitdf0db0f516dc47dc68dd9420219f7891f5dd5169 (patch)
tree87b2bf8ba2d71f553a70c0a1ae66ccbf33798da6 /src
parent438d5b8eef1217272d7f6328582b2331a4ea65bd (diff)
Better wording in warning message which is issued if the reduction operation
failed while dumping the value of a grid scalar during a multiprocessor run. This closes BetaThorns/1564. git-svn-id: http://svn.cactuscode.org/arrangements/CactusPUGHIO/IOHDF5Util/trunk@92 7842ec3a-9562-4be5-9c5b-06ba18f2b668
Diffstat (limited to 'src')
-rw-r--r--src/DumpVar.c67
1 files changed, 39 insertions, 28 deletions
diff --git a/src/DumpVar.c b/src/DumpVar.c
index 4da072e..6ae7532 100644
--- a/src/DumpVar.c
+++ b/src/DumpVar.c
@@ -91,7 +91,7 @@ int IOHDF5Util_DumpVar (const cGH *GH, const ioRequest *request, hid_t file)
/* build the unique name for the file object to write */
fullname = CCTK_FullName (request->vindex);
- objectname = (char *) malloc (strlen (fullname) + 80);
+ objectname = malloc (strlen (fullname) + 80);
sprintf (objectname, "%s timelevel %d at iteration %d",
fullname, request->timelevel, GH->cctk_iteration);
@@ -181,37 +181,48 @@ static int WriteGS (const cGH *GH, const ioRequest *request, const char *name,
hid_t dataset, hdf5type;
- ioUtilGH = (const ioGH *) CCTK_GHExtension (GH, "IO");
+ ioUtilGH = CCTK_GHExtension (GH, "IO");
myproc = CCTK_MyProc (GH);
nprocs = CCTK_nProcs (GH);
fullname = CCTK_FullName (request->vindex);
hdatatypesize = CCTK_VarTypeSize (request->hdatatype);
- buffer = (char *) calloc (nprocs, hdatatypesize);
+ buffer = calloc (nprocs, hdatatypesize);
memcpy (buffer + myproc*hdatatypesize,
CCTK_VarDataPtrI (GH, request->timelevel, request->vindex),
hdatatypesize);
- i = CCTK_ReductionHandle ("sum");
- if (CCTK_ReduceArray (GH, -1, i, nprocs, request->hdatatype,
- buffer, 1, 1, request->hdatatype, nprocs, buffer))
+ if (nprocs > 1)
{
- CCTK_VWarn (1, __LINE__, __FILE__, CCTK_THORNSTRING,
- "WriteGS: Failed to reduce grid scalar '%s'", fullname);
- }
- else
- {
- retval = 0;
- for (i = 1; i < nprocs; i++)
+ i = CCTK_ReductionHandle ("sum");
+ if (i >= 0)
{
- retval |= memcmp (buffer, buffer + i*hdatatypesize, hdatatypesize);
+ i = CCTK_ReduceArray (GH, -1, i, nprocs, request->hdatatype,
+ buffer, 1, 1, request->hdatatype, nprocs, buffer);
}
- if (retval)
+ if (i < 0)
{
CCTK_VWarn (1, __LINE__, __FILE__, CCTK_THORNSTRING,
- "WriteGS: value of grid scalar variable '%s' (timelevel %d) "
- "differs between processors, only value from processor 0 "
- "will be written", fullname, request->timelevel);
+ "WriteGS: Cannot check whether values on different "
+ "processors are the same for grid scalar '%s'", fullname);
+
+ /* copy this processor's value to the start of buffer */
+ memcpy (buffer, buffer + myproc*hdatatypesize, hdatatypesize);
+ }
+ else
+ {
+ retval = 0;
+ for (i = 1; i < nprocs; i++)
+ {
+ retval |= memcmp (buffer, buffer + i*hdatatypesize, hdatatypesize);
+ }
+ if (retval)
+ {
+ CCTK_VWarn (1, __LINE__, __FILE__, CCTK_THORNSTRING,
+ "WriteGS: value of grid scalar variable '%s' (timelevel %d)"
+ " differs between processors, only value from processor 0 "
+ "will be written", fullname, request->timelevel);
+ }
}
}
@@ -226,7 +237,7 @@ static int WriteGS (const cGH *GH, const ioRequest *request, const char *name,
}
else
{
- myGH = (const ioHDF5UtilGH *) CCTK_GHExtension (GH, "IOHDF5Util");
+ myGH = CCTK_GHExtension (GH, "IOHDF5Util");
hdf5type = IOHDF5Util_DataType (myGH, request->hdatatype);
HDF5_ERROR (dataset = H5Dcreate (file, name, hdf5type,
myGH->scalar_dataspace, H5P_DEFAULT));
@@ -358,7 +369,7 @@ static int WriteGA (const cGH *GH, const ioRequest *request, const char *name,
return (-2);
}
- ioUtilGH = (const ioGH *) CCTK_GHExtension (GH, "IO");
+ ioUtilGH = CCTK_GHExtension (GH, "IO");
#ifdef CCTK_MPI
#ifdef H5_HAVE_PARALLEL
@@ -513,13 +524,13 @@ static void WriteData (const cGH *GH, const ioRequest *request,const char *name,
return;
}
- ioUtilGH = (ioGH *) CCTK_GHExtension (GH, "IO");
- myGH = (ioHDF5UtilGH *) CCTK_GHExtension (GH, "IOHDF5Util");
+ ioUtilGH = CCTK_GHExtension (GH, "IO");
+ myGH = CCTK_GHExtension (GH, "IOHDF5Util");
/* copy the size arrays from CCTK_INT to appropriate types
note that HDF5 wants elements in reverse order */
- chunk_origin = (hssize_t *) malloc (request->hdim * sizeof (hssize_t));
- chunk_dims = (hsize_t *) malloc (2*request->hdim * sizeof (hsize_t));
+ chunk_origin = malloc (request->hdim * sizeof (hssize_t));
+ chunk_dims = malloc (2*request->hdim * sizeof (hsize_t));
file_dims = chunk_dims + request->hdim;
for (i = 0; i < request->hdim; i++)
{
@@ -599,7 +610,7 @@ static void WriteData (const cGH *GH, const ioRequest *request,const char *name,
if (memspace >= 0)
{
/* now the chunk dataset for each processor is created within the group */
- chunkname = (char *) malloc (strlen (name) + 20);
+ chunkname = malloc (strlen (name) + 20);
sprintf (chunkname, "%s/chunk%d", name, proc - myproc);
HDF5_ERROR (plist = H5Pcreate (H5P_DATASET_CREATE));
/* enable compression for chunked dataset if compression was requested */
@@ -696,8 +707,8 @@ static void WriteDataCollective (const cGH *GH, const ioRequest *request,
/* copy the size arrays from CCTK_INT to appropriate types
note that HDF5 wants elements in reverse order */
- chunk_origin = (hssize_t *) malloc (request->hdim * sizeof (hssize_t));
- chunk_dims = (hsize_t *) malloc (2*request->hdim * sizeof (hsize_t));
+ chunk_origin = malloc (request->hdim * sizeof (hssize_t));
+ chunk_dims = malloc (2*request->hdim * sizeof (hsize_t));
file_dims = chunk_dims + request->hdim;
for (i = 0; i < request->hdim; i++)
{
@@ -716,7 +727,7 @@ static void WriteDataCollective (const cGH *GH, const ioRequest *request,
/* the I/O processor creates the dataset and adds the common attributes
when writing its own data, otherwise the dataset is reopened */
- myGH = (const ioHDF5UtilGH *) CCTK_GHExtension (GH, "IOHDF5Util");
+ myGH = CCTK_GHExtension (GH, "IOHDF5Util");
hdf5type = IOHDF5Util_DataType (myGH, request->hdatatype);
/* enable compression for chunked dataset if compression was requested */