aboutsummaryrefslogtreecommitdiff
path: root/Carpet/CarpetIOHDF5/src
diff options
context:
space:
mode:
Diffstat (limited to 'Carpet/CarpetIOHDF5/src')
-rw-r--r--Carpet/CarpetIOHDF5/src/CarpetIOHDF5.cc80
-rw-r--r--Carpet/CarpetIOHDF5/src/CarpetIOHDF5.hh109
-rw-r--r--Carpet/CarpetIOHDF5/src/Input.cc131
-rw-r--r--Carpet/CarpetIOHDF5/src/Output.cc99
-rw-r--r--Carpet/CarpetIOHDF5/src/OutputSlice.cc1458
-rw-r--r--Carpet/CarpetIOHDF5/src/make.code.defn2
-rw-r--r--Carpet/CarpetIOHDF5/src/make.configuration.defn4
-rw-r--r--Carpet/CarpetIOHDF5/src/make.configuration.deps2
-rw-r--r--[-rwxr-xr-x]Carpet/CarpetIOHDF5/src/util/SubstituteDeprecatedParameters.pl0
-rw-r--r--Carpet/CarpetIOHDF5/src/util/hdf5_recombiner.cc253
-rw-r--r--Carpet/CarpetIOHDF5/src/util/hdf5toascii_slicer.cc7
-rw-r--r--Carpet/CarpetIOHDF5/src/util/hdf5tobinary_slicer.cc814
12 files changed, 2859 insertions, 100 deletions
diff --git a/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.cc b/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.cc
index b9bf36307..e34204c3c 100644
--- a/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.cc
+++ b/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.cc
@@ -57,9 +57,6 @@ static void GetVarIndex (int vindex, const char* optstring, void* arg);
static void CheckSteerableParameters (const cGH *const cctkGH,
CarpetIOHDF5GH *myGH);
-static int WriteMetadata (const cGH *cctkGH, int nioprocs,
- int firstvar, int numvars,
- bool called_from_checkpoint, hid_t file);
static int WriteAttribute (hid_t const group,
char const * const name,
@@ -98,6 +95,10 @@ int CarpetIOHDF5_Startup (void)
const int GHExtension = CCTK_RegisterGHExtension (CCTK_THORNSTRING);
CCTK_RegisterGHExtensionSetupGH (GHExtension, SetupGH);
+ IOHDF5<0>::Startup();
+ IOHDF5<1>::Startup();
+ IOHDF5<2>::Startup();
+
return (0);
}
@@ -110,6 +111,12 @@ void CarpetIOHDF5_Init (CCTK_ARGUMENTS)
*next_output_iteration = 0;
*next_output_time = cctk_time;
+ for (int d=0; d<3; ++d) {
+ this_iteration_slice[d] = 0;
+ last_output_iteration_slice[d] = 0;
+ last_output_time_slice[d] = cctk_time;
+ }
+
last_checkpoint_iteration = cctk_iteration;
last_checkpoint_walltime = CCTK_RunTime() / 3600.0;
}
@@ -271,6 +278,29 @@ hid_t CCTKtoHDF5_Datatype (const cGH* const cctkGH,
}
+// add attributes to an HDF5 slice dataset
+int AddSliceAttributes(const cGH* const cctkGH,
+ const char* const fullname,
+ const int refinementlevel,
+ const vector<double>& origin,
+ const vector<double>& delta,
+ const vector<int>& iorigin,
+ hid_t& dataset)
+{
+ int error_count = 0;
+
+ error_count += WriteAttribute(dataset, "time", cctkGH->cctk_time);
+ error_count += WriteAttribute(dataset, "timestep", cctkGH->cctk_iteration);
+ error_count += WriteAttribute(dataset, "name", fullname);
+ error_count += WriteAttribute(dataset, "level", refinementlevel);
+ error_count += WriteAttribute(dataset, "origin", &origin[0], origin.size());
+ error_count += WriteAttribute(dataset, "delta", &delta[0], delta.size());
+ error_count += WriteAttribute(dataset, "iorigin", &iorigin[0], iorigin.size());
+
+ return error_count;
+}
+
+
//////////////////////////////////////////////////////////////////////////////
// private routines
//////////////////////////////////////////////////////////////////////////////
@@ -422,20 +452,18 @@ static void CheckSteerableParameters (const cGH *const cctkGH,
// notify the user about the new setting
if (not CCTK_Equals (verbose, "none")) {
int count = 0;
- string msg ("Periodic HDF5 output requested for '");
- for (int i = CCTK_NumVars () - 1; i >= 0; i--) {
- if (myGH->requests[i]) {
- if (count++) {
- msg += "', '";
- }
- char *fullname = CCTK_FullName (i);
- msg += fullname;
+ ostringstream msg;
+ msg << "Periodic scalar output requested for:";
+ for (int vi=0; vi<CCTK_NumVars(); ++vi) {
+ if (myGH->requests[vi]) {
+ ++count;
+ char* const fullname = CCTK_FullName(vi);
+ msg << eol << " " << fullname;
free (fullname);
}
}
- if (count) {
- msg += "'";
- CCTK_INFO (msg.c_str());
+ if (count > 0) {
+ CCTK_INFO (msg.str().c_str());
}
}
@@ -756,7 +784,7 @@ static int OutputVarAs (const cGH* const cctkGH, const char* const fullname,
CCTK_REAL local[2], global[2];
local[0] = io_files;
local[1] = io_bytes;
- MPI_Allreduce (local, global, 2, dist::datatype (local[0]), MPI_SUM, dist::comm());
+ MPI_Allreduce (local, global, 2, dist::mpi_datatype (local[0]), MPI_SUM, dist::comm());
io_files = global[0];
io_bytes = global[1];
}
@@ -910,7 +938,7 @@ static void Checkpoint (const cGH* const cctkGH, int called_from)
CCTK_REAL local[2], global[2];
local[0] = io_files;
local[1] = io_bytes;
- MPI_Allreduce (local, global, 2, dist::datatype (local[0]), MPI_SUM, dist::comm());
+ MPI_Allreduce (local, global, 2, dist::mpi_datatype (local[0]), MPI_SUM, dist::comm());
io_files = global[0];
io_bytes = global[1];
}
@@ -1001,9 +1029,9 @@ static void Checkpoint (const cGH* const cctkGH, int called_from)
} // Checkpoint
-static int WriteMetadata (const cGH * const cctkGH, int const nioprocs,
- int const firstvar, int const numvars,
- bool const called_from_checkpoint, hid_t const file)
+int WriteMetadata (const cGH * const cctkGH, int const nioprocs,
+ int const firstvar, int const numvars,
+ bool const called_from_checkpoint, hid_t const file)
{
DECLARE_CCTK_PARAMETERS;
hid_t group;
@@ -1048,12 +1076,6 @@ static int WriteMetadata (const cGH * const cctkGH, int const nioprocs,
(group, "config id", static_cast<char const *> (UniqueConfigID (cctkGH)));
}
- // Unique source tree identifier
- if (CCTK_IsFunctionAliased ("UniqueSourceID")) {
- error_count += WriteAttribute
- (group, "source id", static_cast<char const *> (UniqueSourceID (cctkGH)));
- }
-
// unique build identifier
if (CCTK_IsFunctionAliased ("UniqueBuildID")) {
error_count += WriteAttribute
@@ -1142,9 +1164,11 @@ static int WriteMetadata (const cGH * const cctkGH, int const nioprocs,
// Save grid structure
if (called_from_checkpoint or not CCTK_Equals (out_save_parameters, "no")) {
+ vector <vector <vector <region_t> > > grid_superstructure (maps);
vector <vector <vector <region_t> > > grid_structure (maps);
vector <vector <vector <CCTK_REAL> > > grid_times (maps);
for (int m = 0; m < maps; ++ m) {
+ grid_superstructure.at(m) = vhh.at(m)->superregions;
grid_structure.at(m) = vhh.at(m)->regions.at(0);
grid_times.at(m).resize(mglevels);
for (int ml = 0; ml < mglevels; ++ ml) {
@@ -1155,6 +1179,12 @@ static int WriteMetadata (const cGH * const cctkGH, int const nioprocs,
}
}
ostringstream gs_buf;
+ // We could write this information only into one of the checkpoint
+ // files (to save space), or write it into a separate metadata
+ // file
+ gs_buf << grid_superstructure;
+ // We could omit the grid structure (to save space), or write it
+ // only into one of the checkpoint files
gs_buf << grid_structure;
gs_buf << grid_times;
gs_buf << leveltimes;
diff --git a/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.hh b/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.hh
index 3d03939c0..f31be614c 100644
--- a/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.hh
+++ b/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.hh
@@ -4,6 +4,8 @@
#define H5_USE_16_API 1
#include <hdf5.h>
+#include <vector>
+
#include "cctk_Arguments.h"
#include "CactusBase/IOUtil/src/ioutil_Utils.h"
#include "carpet.hh"
@@ -15,7 +17,7 @@
// some macros for HDF5 group names
#define METADATA_GROUP "Parameters and Global Attributes"
#define ALL_PARAMETERS "All Parameters"
-#define GRID_STRUCTURE "Grid Structure"
+#define GRID_STRUCTURE "Grid Structure v2"
// atomic HDF5 datatypes for the generic CCTK datatypes
// (the one for CCTK_COMPLEX is created at startup as a compound HDF5 datatype)
@@ -56,6 +58,16 @@
} while (0)
+// datatype of the start[] parameter in a call to H5Sselect_hyperslab()
+// (the HDF5 API has changed in this respect after version 1.6.3)
+#if (H5_VERS_MAJOR == 1 && \
+ (H5_VERS_MINOR < 6 || (H5_VERS_MINOR == 6 && H5_VERS_RELEASE < 4)))
+#define slice_start_size_t hssize_t
+#else
+#define slice_start_size_t hsize_t
+#endif
+
+
// CarpetIOHDF5 GH extension structure
typedef struct
{
@@ -114,11 +126,106 @@ namespace CarpetIOHDF5
const ioRequest* const request,
bool called_from_checkpoint);
+ int WriteMetadata (const cGH * const cctkGH, int const nioprocs,
+ int const firstvar, int const numvars,
+ bool const called_from_checkpoint, hid_t const file);
+
+ int AddSliceAttributes(const cGH* const cctkGH,
+ const char* const fullname,
+ const int refinementlevel,
+ const vector<double>& origin,
+ const vector<double>& delta,
+ const vector<int>& iorigin,
+ hid_t& dataset);
+
// returns an HDF5 datatype corresponding to the given CCTK datatype
hid_t CCTKtoHDF5_Datatype (const cGH* const cctkGH,
int cctk_type,
bool single_precision);
+
+ // Everything is a class template, so that it can easily be
+ // instantiated for all output dimensions
+
+ template<int outdim>
+ struct IOHDF5 {
+
+ // name of the output directory
+ static char* my_out_slice_dir;
+
+ // list of variables to output
+ static char* my_out_slice_vars;
+
+ // I/O request description list (for all variables)
+ static vector<ioRequest*> slice_requests;
+
+
+
+ // Scheduled functions
+ static int Startup();
+
+ // Registered functions
+ static void* SetupGH (tFleshConfig* fc, int convLevel, cGH* cctkGH);
+
+ static int OutputGH (const cGH* cctkGH);
+ static int OutputVarAs (const cGH* cctkGH,
+ const char* varname, const char* alias);
+ static int TimeToOutput (const cGH* cctkGH, int vindex);
+ static int TriggerOutput (const cGH* cctkGH, int vindex);
+
+ // Other functions
+ static void CheckSteerableParameters (const cGH* cctkGH);
+
+ static bool DidOutput (const cGH* cctkGH,
+ int vindex,
+ string basefilename,
+ bool& is_new_file, bool& truncate_file);
+
+ static bool DirectionIsRequested (const vect<int,outdim>& dirs);
+
+ static void OutputDirection (const cGH* cctkGH,
+ int vindex,
+ string alias,
+ string basefilename,
+ const vect<int,outdim>& dirs,
+ bool is_new_file,
+ bool truncate_file);
+
+ static int OpenFile (const cGH* cctkGH,
+ int m,
+ int vindex,
+ int numvars,
+ string alias,
+ string basefilename,
+ const vect<int,outdim>& dirs,
+ bool is_new_file,
+ bool truncate_file,
+ hid_t& file);
+
+ static int WriteHDF5 (const cGH* cctkGH,
+ hid_t& file,
+ vector<gdata*> const gfdatas,
+ const bbox<int,dim>& gfext,
+ const int vi,
+ const vect<int,dim>& org,
+ const vect<int,outdim>& dirs,
+ const int rl,
+ const int ml,
+ const int m,
+ const int c,
+ const int tl,
+ const CCTK_REAL coord_time,
+ const vect<CCTK_REAL,dim>& coord_lower,
+ const vect<CCTK_REAL,dim>& coord_upper);
+
+ static int CloseFile (const cGH* cctkGH,
+ hid_t& file);
+
+ static ivect GetOutputOffset (const cGH* cctkGH, int m,
+ const vect<int,outdim>& dirs);
+
+ }; // struct IOHDF5
+
// scheduled routines (must be declared as C according to schedule.ccl)
extern "C" {
diff --git a/Carpet/CarpetIOHDF5/src/Input.cc b/Carpet/CarpetIOHDF5/src/Input.cc
index 738e78ad0..c89cb652e 100644
--- a/Carpet/CarpetIOHDF5/src/Input.cc
+++ b/Carpet/CarpetIOHDF5/src/Input.cc
@@ -60,6 +60,7 @@ typedef struct {
double delta_time;
vector<double> mgleveltimes; // [num_mglevels*num_reflevels]
+ vector<vector<vector<region_t> > > grid_superstructure; // [map][reflevel][component]
vector<vector<vector<region_t> > > grid_structure; // [map][reflevel][component]
vector<vector<vector<CCTK_REAL> > > grid_times; // [map][mglevel][reflevel]
vector<vector<CCTK_REAL> > leveltimes; // [mglevel][reflevel]
@@ -100,10 +101,15 @@ int CarpetIOHDF5_RecoverParameters ()
//////////////////////////////////////////////////////////////////////////////
void CarpetIOHDF5_RecoverGridStructure (CCTK_ARGUMENTS)
{
+ DECLARE_CCTK_PARAMETERS;
DECLARE_CCTK_ARGUMENTS;
fileset_t & fileset = * filesets.begin();
+ if (not CCTK_Equals (verbose, "none")) {
+ CCTK_INFO ("recovering grid structure");
+ }
+
// Abort with an error if there is no grid structure in the
// checkpoint file, or if the number of maps is wrong
if (fileset.grid_structure.empty()) {
@@ -115,7 +121,7 @@ void CarpetIOHDF5_RecoverGridStructure (CCTK_ARGUMENTS)
int(fileset.grid_structure.size()), maps);
}
- vector<vector<vector<region_t> > > superregsss = fileset.grid_structure;
+ vector<vector<vector<region_t> > > superregsss = fileset.grid_superstructure;
vector<vector<vector<vector<region_t> > > > regssss (maps);
int type;
@@ -184,7 +190,7 @@ void CarpetIOHDF5_RecoverGridStructure (CCTK_ARGUMENTS)
for (int m = 0; m < maps; ++ m) {
// Regrid
- RegridMap (cctkGH, m, superregsss.at(m), regssss.at(m));
+ RegridMap (cctkGH, m, superregsss.at(m), regssss.at(m), false);
// Set time hierarchy correctly after RegridMap created it
for (int ml = 0; ml < mglevels; ++ ml) {
@@ -199,10 +205,12 @@ void CarpetIOHDF5_RecoverGridStructure (CCTK_ARGUMENTS)
// Set level times correctly after PostRegrid created them
leveltimes = fileset.leveltimes;
-
+
for (int rl = 0; rl < reflevels; ++ rl) {
Recompose (cctkGH, rl, false);
}
+
+ RegridFree (cctkGH, false);
}
//////////////////////////////////////////////////////////////////////////////
@@ -384,16 +392,26 @@ int Recover (cGH* cctkGH, const char *basefilename, int called_from)
for (unsigned int gindex = 0; gindex < group_bboxes.size(); gindex++) {
group_bboxes[gindex].resize (maps);
const int grouptype = CCTK_GroupTypeI (gindex);
- BEGIN_MAP_LOOP (cctkGH, grouptype) {
- struct arrdesc& data = arrdata.at(gindex).at(Carpet::map);
-
- BEGIN_LOCAL_COMPONENT_LOOP (cctkGH, grouptype) {
- if (grouptype == CCTK_GF or (mglevel == 0 and reflevel == 0)) {
- group_bboxes[gindex][Carpet::map] |=
- data.dd->boxes.at(mglevel).at(reflevel).at(component).exterior;
+ if (grouptype == CCTK_GF) {
+ for (int m=0; m<maps; ++m) {
+ struct arrdesc& data = arrdata.at(gindex).at(m);
+ for (int lc=0; lc<data.hh->local_components(reflevel); ++lc) {
+ int const c = data.hh->get_component(reflevel,lc);
+ group_bboxes[gindex][m] |=
+ data.dd->boxes.at(mglevel).at(reflevel).at(c).exterior;
}
- } END_LOCAL_COMPONENT_LOOP;
- } END_MAP_LOOP;
+ group_bboxes[gindex][m].normalize();
+ }
+ } else {
+ if (mglevel==0 and reflevel==0) {
+ int const m=0;
+ struct arrdesc& data = arrdata.at(gindex).at(m);
+ int const c=dist::rank();
+ group_bboxes[gindex][m] |=
+ data.dd->boxes.at(mglevel).at(reflevel).at(c).exterior;
+ group_bboxes[gindex][m].normalize();
+ }
+ }
}
// mark variables in groups with no grid points (size 0) as already done
@@ -513,9 +531,10 @@ int Recover (cGH* cctkGH, const char *basefilename, int called_from)
// actually read the patch
if (not read_completely.at(patch->vindex).at(patch->timelevel)) {
- ReadVar (cctkGH, file.file, io_bytes, patch,
- bboxes_read.at(patch->vindex).at(patch->timelevel),
- in_recovery);
+ error_count +=
+ ReadVar (cctkGH, file.file, io_bytes, patch,
+ bboxes_read.at(patch->vindex).at(patch->timelevel),
+ in_recovery);
// update the read_completely entry
const int gindex = CCTK_GroupIndexFromVarI (patch->vindex);
@@ -563,6 +582,10 @@ int Recover (cGH* cctkGH, const char *basefilename, int called_from)
(ioUtilGH->do_inVars and ioUtilGH->do_inVars[vindex])) {
continue;
}
+ if (called_from == FILEREADER_DATA and tl > 0) {
+ // file reader reads only timelevel 0
+ continue;
+ }
if (not read_completely[vindex][tl]) {
// check if the variable has been read partially
@@ -578,14 +601,28 @@ int Recover (cGH* cctkGH, const char *basefilename, int called_from)
"(variable has option tag \"CHECKPOINT = 'no'\")",
fullname, tl);
} else {
- CCTK_VWarn (2, __LINE__, __FILE__, CCTK_THORNSTRING,
- "variable '%s' timelevel %d has not been read",
- fullname, tl);
+ // TODO: With the file reader, the files are read
+ // sequentially, and each file contains only some of the
+ // variables. The warning below is emitted for all files,
+ // even those which don't contain the variable. This
+ // makes this warning useless, so we omit it when called
+ // for the file reader.
+ if (called_from != FILEREADER_DATA) {
+ CCTK_VWarn (2, __LINE__, __FILE__, CCTK_THORNSTRING,
+ "variable '%s' timelevel %d has not been read",
+ fullname, tl);
+ }
}
} else {
CCTK_VWarn (1, __LINE__, __FILE__, CCTK_THORNSTRING,
"variable '%s' timelevel %d has been read only partially",
fullname, tl);
+ // for (int m = 0; m < maps; m++) {
+ // bboxes_read[vindex][tl][m].normalize();
+ // const int gindex = CCTK_GroupIndexFromVarI (vindex);
+ // cout << "Need: " << group_bboxes[gindex][m] << endl;
+ // cout << "Read: " << bboxes_read[vindex][tl][m] << endl;
+ // }
num_incomplete++;
}
free (fullname);
@@ -593,6 +630,8 @@ int Recover (cGH* cctkGH, const char *basefilename, int called_from)
}
}
if (num_incomplete) {
+ // cout.flush();
+ // cerr.flush();
CCTK_VWarn (0, __LINE__, __FILE__, CCTK_THORNSTRING,
"%d variables on mglevel %d reflevel %d have been read "
"only partially", num_incomplete, mglevel, reflevel);
@@ -602,7 +641,7 @@ int Recover (cGH* cctkGH, const char *basefilename, int called_from)
CCTK_REAL local[2], global[2];
local[0] = io_files;
local[1] = io_bytes;
- MPI_Allreduce (local, global, 2, dist::datatype (local[0]), MPI_SUM, dist::comm());
+ MPI_Allreduce (local, global, 2, dist::mpi_datatype (local[0]), MPI_SUM, dist::comm());
io_files = global[0];
io_bytes = global[1];
}
@@ -645,6 +684,9 @@ int Recover (cGH* cctkGH, const char *basefilename, int called_from)
}
}
}
+ if (error_count and abort_on_io_errors) {
+ CCTK_WARN(0, "Found errors while trying to restart from checkpoint, aborting.");
+ }
if (in_recovery and not CCTK_Equals (verbose, "none")) {
CCTK_VInfo (CCTK_THORNSTRING,
@@ -831,6 +873,7 @@ static void ReadMetadata (fileset_t& fileset, hid_t file)
H5P_DEFAULT, &gs_cstr.front()));
HDF5_ERROR (H5Dclose (dataset));
istringstream gs_buf (&gs_cstr.front());
+ gs_buf >> fileset.grid_superstructure;
gs_buf >> fileset.grid_structure;
gs_buf >> fileset.grid_times;
gs_buf >> fileset.leveltimes;
@@ -975,7 +1018,7 @@ static int ReadVar (const cGH* const cctkGH,
CCTK_VWarn (1, __LINE__, __FILE__, CCTK_THORNSTRING,
"Cannot input variable '%s' (no storage)", fullname);
free (fullname);
- return 0;
+ return 1;
}
// filereader reads the current timelevel
@@ -999,7 +1042,10 @@ static int ReadVar (const cGH* const cctkGH,
// Traverse all local components on all maps
hid_t filespace = -1, dataset = -1;
- BEGIN_MAP_LOOP (cctkGH, group.grouptype) {
+ hid_t xfer = H5P_DEFAULT;
+ H5Z_EDC_t checksums = H5Z_NO_EDC;
+
+ BEGIN_LOCAL_MAP_LOOP (cctkGH, group.grouptype) {
// skip this dataset if it belongs to another map
if (group.grouptype == CCTK_GF and patch->map != Carpet::map) {
@@ -1023,8 +1069,10 @@ static int ReadVar (const cGH* const cctkGH,
upper[patch->rank-1] = newupper;
}
const ibbox filebox(lower, upper, stride);
+ // cout << "Found in file: " << filebox << endl;
- ibbox& interior_membox =
+#if 0
+ const ibbox& interior_membox =
data.dd->boxes.at(mglevel).at(reflevel).at(component).interior;
// skip this dataset if it doesn't overlap with this component's interior
@@ -1032,6 +1080,19 @@ static int ReadVar (const cGH* const cctkGH,
if (interior_overlap.empty()) {
continue;
}
+#endif
+ const ibbox& exterior_membox =
+ data.dd->boxes.at(mglevel).at(reflevel).at(component).exterior;
+
+ // skip this dataset if it doesn't overlap with this component's
+ // exterior, or if it only reads what has already been read
+ const ibbox exterior_overlap = exterior_membox & filebox;
+ const ibset exterior_overlaps =
+ exterior_overlap - bboxes_read.at(Carpet::map);
+ if (exterior_overlaps.empty()) {
+ // cout << "Skipping this bbox" << endl;
+ continue;
+ }
if (CCTK_Equals (verbose, "full")) {
char *fullname = CCTK_FullName (patch->vindex);
@@ -1041,21 +1102,19 @@ static int ReadVar (const cGH* const cctkGH,
}
// get the overlap with this component's exterior
- ibbox& membox =
+ const ibbox& membox =
data.dd->boxes.at(mglevel).at(reflevel).at(component).exterior;
const ibbox overlap = membox & filebox;
+ // cout << "Overlap: " << overlap << endl;
bboxes_read.at(Carpet::map) |= overlap;
+ bboxes_read.at(Carpet::map).normalize();
+ // cout << "New read: " << bboxes_read.at(Carpet::map) << endl;
// calculate hyperslab selection parameters
// before HDF5-1.6.4 the H5Sselect_hyperslab() function expected
// the 'start' argument to be of type 'hssize_t'
-#if (H5_VERS_MAJOR == 1 && \
- (H5_VERS_MINOR < 6 || (H5_VERS_MINOR == 6 && H5_VERS_RELEASE < 4)))
- hssize_t memorigin[dim], fileorigin[dim];
-#else
- hsize_t memorigin[dim], fileorigin[dim];
-#endif
+ slice_start_size_t memorigin[dim], fileorigin[dim];
hsize_t memdims[dim], count[dim];
for (int i = 0; i < patch->rank; i++) {
memdims[patch->rank-i-1] =
@@ -1073,6 +1132,14 @@ static int ReadVar (const cGH* const cctkGH,
if (dataset < 0) {
HDF5_ERROR (dataset = H5Dopen (file, patch->patchname.c_str()));
HDF5_ERROR (filespace = H5Dget_space (dataset));
+ xfer = H5Pcreate (H5P_DATASET_XFER);
+ checksums = H5Pget_edc_check (xfer);
+ if (use_checksums && (checksums == H5Z_DISABLE_EDC))
+ CCTK_WARN(1, "Checksums not enabled in HDF5 library, but "
+ "requested in parameter, reading data without "
+ "tests on checksums.");
+ if (!use_checksums && (checksums == H5Z_ENABLE_EDC))
+ H5Pset_edc_check(xfer, H5Z_DISABLE_EDC);
}
// read the hyperslab
@@ -1082,7 +1149,7 @@ static int ReadVar (const cGH* const cctkGH,
NULL, count, NULL));
HDF5_ERROR (H5Sselect_hyperslab (memspace, H5S_SELECT_SET, memorigin,
NULL, count, NULL));
- HDF5_ERROR (H5Dread (dataset, datatype, memspace, filespace, H5P_DEFAULT,
+ HDF5_ERROR (H5Dread (dataset, datatype, memspace, filespace, xfer,
cctkGH->data[patch->vindex][timelevel]));
hid_t datatype;
HDF5_ERROR (datatype = H5Dget_type (dataset));
@@ -1098,14 +1165,14 @@ static int ReadVar (const cGH* const cctkGH,
/ (delta_time * mglevelfact)) );
}
- } END_MAP_LOOP;
+ } END_LOCAL_MAP_LOOP;
if (dataset >= 0) {
HDF5_ERROR (H5Dclose (dataset));
HDF5_ERROR (H5Sclose (filespace));
}
- return 1;
+ return error_count;
}
} // namespace CarpetIOHDF5
diff --git a/Carpet/CarpetIOHDF5/src/Output.cc b/Carpet/CarpetIOHDF5/src/Output.cc
index 675c51bc6..62f9d9778 100644
--- a/Carpet/CarpetIOHDF5/src/Output.cc
+++ b/Carpet/CarpetIOHDF5/src/Output.cc
@@ -72,18 +72,25 @@ int WriteVarUnchunked (const cGH* const cctkGH,
BEGIN_MAP_LOOP (cctkGH, group.grouptype) {
// Collect the set of all components' bboxes
ibset bboxes;
- BEGIN_COMPONENT_LOOP (cctkGH, group.grouptype) {
+ for (int c=0; c<arrdata.at(gindex).at(Carpet::map).hh->components(refinementlevel); ++c) {
// Using "interior" removes ghost zones and refinement boundaries.
+#if 0
bboxes += arrdata.at(gindex).at(Carpet::map).dd->
- boxes.at(mglevel).at(refinementlevel).at(component).interior;
- } END_COMPONENT_LOOP;
+ boxes.at(mglevel).at(refinementlevel).at(c).interior;
+#endif
+ bboxes += arrdata.at(gindex).at(Carpet::map).dd->
+ boxes.at(mglevel).at(refinementlevel).at(c).exterior;
+ }
// Normalise the set, i.e., try to represent the set with fewer bboxes.
- //
+ bboxes.normalize();
+
+#if 0
// According to Cactus conventions, DISTRIB=CONSTANT arrays
// (including grid scalars) are assumed to be the same on all
// processors and are therefore stored only by processor 0.
- if (group.disttype != CCTK_DISTRIB_CONSTANT) bboxes.normalize();
+ if (group.disttype != CCTK_DISTRIB_CONSTANT);
+#endif
// Loop over all components in the bbox set
int bbox_id = 0;
@@ -150,6 +157,11 @@ int WriteVarUnchunked (const cGH* const cctkGH,
HDF5_ERROR (H5Pset_chunk (plist, group.dim, shape));
HDF5_ERROR (H5Pset_deflate (plist, compression_lvl));
}
+ // enable checksums if requested
+ if (use_checksums) {
+ HDF5_ERROR (H5Pset_chunk (plist, group.dim, shape));
+ HDF5_ERROR (H5Pset_filter (plist, H5Z_FILTER_FLETCHER32, 0, 0, NULL));
+ }
HDF5_ERROR (dataset = H5Dcreate (outfile, datasetname.str().c_str(),
filedatatype, dataspace, plist));
HDF5_ERROR (H5Pclose (plist));
@@ -160,24 +172,33 @@ int WriteVarUnchunked (const cGH* const cctkGH,
BEGIN_COMPONENT_LOOP (cctkGH, group.grouptype) {
// Get the intersection of the current component with this combination
// (use either the interior or exterior here, as we did above)
+ gh const * const hh = arrdata.at(gindex).at(Carpet::map).hh;
+ dh const * const dd = arrdata.at(gindex).at(Carpet::map).dd;
+#if 0
ibbox const overlap = *bbox &
- arrdata.at(gindex).at(Carpet::map).dd->
- boxes.at(mglevel).at(refinementlevel).at(component).interior;
+ dd->boxes.at(mglevel).at(refinementlevel).at(component).interior;
+#endif
+ ibbox const overlap = *bbox &
+ dd->boxes.at(mglevel).at(refinementlevel).at(component).exterior;
// Continue if this component is not part of this combination
if (overlap.empty()) continue;
// Copy the overlap to the local processor
const ggf* ff = arrdata.at(gindex).at(Carpet::map).data.at(var);
- const gdata* const data = (*ff) (request->timelevel,
- refinementlevel,
- component, mglevel);
+ const gdata* const data =
+ local_component >= 0
+ ? (*ff) (request->timelevel, refinementlevel,
+ local_component, mglevel)
+ : NULL;
+ // TODO: This does not work; data may be NULL
gdata* const processor_component =
data->make_typed (request->vindex, error_centered, op_sync);
processor_component->allocate (overlap, 0);
for (comm_state state; not state.done(); state.step()) {
- processor_component->copy_from (state, data, overlap);
+ int const p = hh->processor(refinementlevel,component);
+ processor_component->copy_from (state, data, overlap, 0, p);
}
// Write data
@@ -207,12 +228,7 @@ int WriteVarUnchunked (const cGH* const cctkGH,
// before HDF5-1.6.4 the H5Sselect_hyperslab() function expected
// the 'start' argument to be of type 'hssize_t'
-#if (H5_VERS_MAJOR == 1 && \
- (H5_VERS_MINOR < 6 || (H5_VERS_MINOR == 6 && H5_VERS_RELEASE < 4)))
- hssize_t overlaporigin[dim];
-#else
- hsize_t overlaporigin[dim];
-#endif
+ slice_start_size_t overlaporigin[dim];
for (int d = 0; d < group.dim; ++d) {
assert (group.dim-1-d>=0 and group.dim-1-d<dim);
overlaporigin[group.dim-1-d] =
@@ -321,8 +337,10 @@ int WriteVarChunkedSequential (const cGH* const cctkGH,
BEGIN_MAP_LOOP (cctkGH, group.grouptype) {
BEGIN_COMPONENT_LOOP (cctkGH, group.grouptype) {
// Using "exterior" includes ghost zones and refinement boundaries.
- ibbox& bbox = arrdata.at(gindex).at(Carpet::map).dd->
- boxes.at(mglevel).at(refinementlevel).at(component).exterior;
+ gh const * const hh = arrdata.at(gindex).at(Carpet::map).hh;
+ dh const * const dd = arrdata.at(gindex).at(Carpet::map).dd;
+ ibbox const& bbox =
+ dd->boxes.at(mglevel).at(refinementlevel).at(component).exterior;
// Get the shape of the HDF5 dataset (in Fortran index order)
hsize_t shape[dim];
@@ -338,14 +356,19 @@ int WriteVarChunkedSequential (const cGH* const cctkGH,
// Copy the overlap to the local processor
const ggf* ff = arrdata.at(gindex).at(Carpet::map).data.at(var);
- const gdata* const data = (*ff) (request->timelevel, refinementlevel,
- component, mglevel);
+ const gdata* const data =
+ local_component >= 0
+ ? (*ff) (request->timelevel, refinementlevel,
+ local_component, mglevel)
+ : NULL;
+ // TODO: This does not work; data may be NULL
gdata* const processor_component =
- data->make_typed (request->vindex,error_centered, op_sync);
+ data->make_typed (request->vindex, error_centered, op_sync);
processor_component->allocate (bbox, 0);
for (comm_state state; not state.done(); state.step()) {
- processor_component->copy_from (state, data, bbox);
+ int const p = hh->processor(refinementlevel,component);
+ processor_component->copy_from (state, data, bbox, 0, p);
}
// Write data on I/O processor 0
@@ -412,6 +435,11 @@ int WriteVarChunkedSequential (const cGH* const cctkGH,
HDF5_ERROR (H5Pset_chunk (plist, group.dim, shape));
HDF5_ERROR (H5Pset_deflate (plist, compression_lvl));
}
+ // enable checksums if requested
+ if (use_checksums) {
+ HDF5_ERROR (H5Pset_chunk (plist, group.dim, shape));
+ HDF5_ERROR (H5Pset_filter (plist, H5Z_FILTER_FLETCHER32, 0, 0, NULL));
+ }
HDF5_ERROR (dataspace = H5Screate_simple (group.dim, shape, NULL));
HDF5_ERROR (dataset = H5Dcreate (outfile, datasetname.str().c_str(),
filedatatype, dataspace, plist));
@@ -479,13 +507,17 @@ int WriteVarChunkedParallel (const cGH* const cctkGH,
out_single_precision and not called_from_checkpoint));
// Traverse all maps
- BEGIN_MAP_LOOP (cctkGH, group.grouptype) {
+ BEGIN_LOCAL_MAP_LOOP (cctkGH, group.grouptype) {
BEGIN_LOCAL_COMPONENT_LOOP (cctkGH, group.grouptype) {
- const ggf* ff = arrdata.at(gindex).at(Carpet::map).data.at(var);
- const ibbox& bbox = (*ff) (request->timelevel, refinementlevel,
- group.disttype == CCTK_DISTRIB_CONSTANT ?
- 0 : component, mglevel)->extent();
+ // const ggf* ff = arrdata.at(gindex).at(Carpet::map).data.at(var);
+ // const ibbox& bbox = (*ff) (request->timelevel, refinementlevel,
+ // group.disttype == CCTK_DISTRIB_CONSTANT ?
+ // 0 : component, mglevel)->extent();
+ const dh* dd = arrdata.at(gindex).at(Carpet::map).dd;
+ const ibbox& bbox =
+ dd->boxes.AT(mglevel).AT(refinementlevel)
+ .AT(group.disttype == CCTK_DISTRIB_CONSTANT ? 0 : component).exterior;
// Don't create zero-sized components
if (bbox.empty()) continue;
@@ -499,8 +531,8 @@ int WriteVarChunkedParallel (const cGH* const cctkGH,
MPI_Datatype datatype;
switch (group.vartype) {
-#define TYPECASE(N,T) \
- case N: { T dummy; datatype = dist::datatype(dummy); } break;
+#define TYPECASE(N,T) \
+ case N: { T dummy; datatype = dist::mpi_datatype(dummy); } break;
#include "carpet_typecase.hh"
#undef TYPECASE
default: assert (0 and "invalid datatype");
@@ -565,6 +597,11 @@ int WriteVarChunkedParallel (const cGH* const cctkGH,
HDF5_ERROR (H5Pset_chunk (plist, group.dim, shape));
HDF5_ERROR (H5Pset_deflate (plist, compression_lvl));
}
+ // enable checksums if requested
+ if (use_checksums) {
+ HDF5_ERROR (H5Pset_chunk (plist, group.dim, shape));
+ HDF5_ERROR (H5Pset_filter (plist, H5Z_FILTER_FLETCHER32, 0, 0, NULL));
+ }
HDF5_ERROR (dataspace = H5Screate_simple (group.dim, shape, NULL));
HDF5_ERROR (dataset = H5Dcreate (outfile, datasetname.str().c_str(),
filedatatype, dataspace, plist));
@@ -581,7 +618,7 @@ int WriteVarChunkedParallel (const cGH* const cctkGH,
if (data != mydata) free (data);
} END_LOCAL_COMPONENT_LOOP;
- } END_MAP_LOOP;
+ } END_LOCAL_MAP_LOOP;
free (fullname);
diff --git a/Carpet/CarpetIOHDF5/src/OutputSlice.cc b/Carpet/CarpetIOHDF5/src/OutputSlice.cc
new file mode 100644
index 000000000..156332acb
--- /dev/null
+++ b/Carpet/CarpetIOHDF5/src/OutputSlice.cc
@@ -0,0 +1,1458 @@
+#include <cassert>
+#include <cctype>
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <cstring>
+
+#include <map>
+#include <string>
+
+#include "cctk.h"
+#include "util_Table.h"
+
+#include "CactusBase/IOUtil/src/ioGH.h"
+
+#include "CarpetTimers.hh"
+
+#include "CarpetIOHDF5.hh"
+
+
+
+// That's a hack
+namespace Carpet {
+ void UnsupportedVarType (const int vindex);
+}
+
+
+#define GetParameter(parameter) \
+ outdim == 0 ? out0D_##parameter : \
+ outdim == 1 ? out1D_##parameter : out2D_##parameter
+
+namespace CarpetIOHDF5 {
+
+ using namespace std;
+ using namespace Carpet;
+
+
+
+ // routines which are independent of the output dimension
+ static ibbox GetOutputBBox (const cGH* cctkGH,
+ int group,
+ int m, int c,
+ const ibbox& ext);
+
+ static void GetCoordinates (const cGH* cctkGH, int m,
+ const cGroup& groupdata,
+ const ibbox& ext,
+ CCTK_REAL& coord_time,
+ rvect& coord_lower, rvect& coord_upper);
+
+ static int GetGridOffset (const cGH* cctkGH, int m, int dir,
+ const char* itempl, const char* iglobal,
+ const char* ctempl, const char* cglobal,
+ CCTK_REAL cfallback);
+ static int CoordToOffset (const cGH* cctkGH, int m, int dir,
+ CCTK_REAL coord, int ifallback);
+
+
+
+ // IO processor
+ const int ioproc = 0;
+ const int nioprocs = 1;
+
+
+
+ // Global configuration parameters
+ bool stop_on_parse_errors = false;
+
+
+
+ // Definition of static members
+ template<int outdim> char* IOHDF5<outdim>::my_out_slice_dir;
+ template<int outdim> char* IOHDF5<outdim>::my_out_slice_vars;
+ template<int outdim> vector<ioRequest*> IOHDF5<outdim>::slice_requests;
+
+
+
+ template<int outdim>
+ int IOHDF5<outdim>::Startup()
+ {
+ ostringstream msg;
+ msg << "AMR " << outdim << "D HDF5 I/O provided by CarpetIOHDF5";
+ CCTK_RegisterBanner (msg.str().c_str());
+
+ ostringstream extension_name;
+ extension_name << "CarpetIOHDF5_" << outdim << "D";
+ const int GHExtension =
+ CCTK_RegisterGHExtension(extension_name.str().c_str());
+ CCTK_RegisterGHExtensionSetupGH (GHExtension, SetupGH);
+
+ ostringstream method_name;
+ method_name << "IOHDF5_" << outdim << "D";
+ const int IOMethod = CCTK_RegisterIOMethod (method_name.str().c_str());
+ CCTK_RegisterIOMethodOutputGH (IOMethod, OutputGH);
+ CCTK_RegisterIOMethodOutputVarAs (IOMethod, OutputVarAs);
+ CCTK_RegisterIOMethodTimeToOutput (IOMethod, TimeToOutput);
+ CCTK_RegisterIOMethodTriggerOutput (IOMethod, TriggerOutput);
+
+ return 0;
+ }
+
+
+
+ template<int outdim>
+ void* IOHDF5<outdim>::SetupGH (tFleshConfig* const fc,
+ const int convLevel, cGH* const cctkGH)
+ {
+ DECLARE_CCTK_PARAMETERS;
+ const void *dummy;
+
+ dummy = &fc;
+ dummy = &convLevel;
+ dummy = &cctkGH;
+ dummy = &dummy;
+
+ if (not CCTK_Equals (verbose, "none")) {
+ CCTK_VInfo (CCTK_THORNSTRING, "I/O Method 'IOHDF5_%dD' registered: "
+ "%dD AMR output of grid variables to HDF5 files",
+ outdim, outdim);
+ }
+
+ const int numvars = CCTK_NumVars();
+ slice_requests.resize (numvars);
+
+ // initial I/O parameter check
+ my_out_slice_dir = 0;
+ my_out_slice_vars = strdup ("");
+ stop_on_parse_errors = strict_io_parameter_check != 0;
+ CheckSteerableParameters (cctkGH);
+ stop_on_parse_errors = false;
+
+ // We register only once, ergo we get only one handle. We store
+ // that statically, so there is no need to pass anything to
+ // Cactus.
+ return NULL;
+ }
+
+
+
+ template<int outdim>
+ void IOHDF5<outdim>::CheckSteerableParameters (const cGH* const cctkGH)
+ {
+ DECLARE_CCTK_PARAMETERS;
+
+ // re-parse the 'IOHDF5::out%dD_dir' parameter if it has changed
+ const char* the_out_dir = GetParameter(dir);
+ if (CCTK_EQUALS (the_out_dir, "")) {
+ the_out_dir = io_out_dir;
+ }
+
+ if (not my_out_slice_dir or strcmp (the_out_dir, my_out_slice_dir)) {
+ free (my_out_slice_dir);
+ my_out_slice_dir = strdup (the_out_dir);
+
+ // create the output directory
+ const int result = IOUtil_CreateDirectory (cctkGH, my_out_slice_dir, 0, 0);
+ if (result < 0) {
+ CCTK_VWarn (1, __LINE__, __FILE__, CCTK_THORNSTRING,
+ "Problem creating %dD-output directory '%s'",
+ outdim, my_out_slice_dir);
+ } else if (result > 0 and CCTK_Equals (verbose, "full")) {
+ CCTK_VInfo (CCTK_THORNSTRING,
+ "%dD-output directory '%s' already exists",
+ outdim, my_out_slice_dir);
+ }
+ }
+
+ // re-parse the 'IOHDF5::out%d_vars' parameter if it has changed
+ const char* const out_slice_vars = GetParameter(vars);
+ if (strcmp (out_slice_vars, my_out_slice_vars)) {
+ ostringstream parameter_name;
+ parameter_name << "IOHDF5::out" << outdim << "D_vars";
+ IOUtil_ParseVarsForOutput (cctkGH, CCTK_THORNSTRING,
+ parameter_name.str().c_str(),
+ stop_on_parse_errors, out_slice_vars,
+ -1, -1.0, &slice_requests[0]);
+
+ // notify the user about the new setting
+ if (not CCTK_Equals (verbose, "none")) {
+ int count = 0;
+ ostringstream msg;
+ msg << "Periodic " << outdim << "D AMR output requested for:";
+ for (int vi=0; vi< CCTK_NumVars(); ++vi) {
+ if (slice_requests.at(vi)) {
+ ++count;
+ char* const fullname = CCTK_FullName(vi);
+ msg << "\n" << " " << fullname;
+ free (fullname);
+ }
+ }
+ if (count > 0) {
+ CCTK_INFO (msg.str().c_str());
+ }
+ }
+
+ // save the last setting of 'IOHDF5::out%d_vars' parameter
+ free (my_out_slice_vars);
+ my_out_slice_vars = strdup (out_slice_vars);
+ }
+ }
+
+
+
+ template<int outdim>
+ int IOHDF5<outdim>::OutputGH (const cGH* const cctkGH)
+ {
+ static Carpet::Timer * timer = NULL;
+ if (not timer) {
+ ostringstream timer_name;
+ timer_name << "CarpetIOHDF5<" << outdim << ">::OutputGH";
+ timer = new Carpet::Timer (timer_name.str().c_str());
+ }
+
+ timer->start();
+ for (int vi=0; vi<CCTK_NumVars(); ++vi) {
+ if (TimeToOutput(cctkGH, vi)) {
+ TriggerOutput(cctkGH, vi);
+ }
+ }
+ timer->stop();
+
+ return 0;
+ }
+
+
+
+ template<int outdim>
+ int IOHDF5<outdim>::TimeToOutput (const cGH* const cctkGH, const int vindex)
+ {
+ DECLARE_CCTK_ARGUMENTS;
+ DECLARE_CCTK_PARAMETERS;
+
+ assert (vindex >= 0 and vindex < CCTK_NumVars());
+
+ if (CCTK_GroupTypeFromVarI(vindex) != CCTK_GF and not do_global_mode) {
+ return 0;
+ }
+
+ CheckSteerableParameters (cctkGH);
+
+ // check if output for this variable was requested
+ if (not slice_requests.at(vindex)) {
+ return 0;
+ }
+
+ // check whether this refinement level should be output
+ if (not (slice_requests.at(vindex)->refinement_levels & (1 << reflevel))) {
+ return 0;
+ }
+
+ // check if output for this variable was requested individually by
+ // a "<varname>{ out_every = <number> }" option string
+ // this will overwrite the output criterion setting
+ const char* myoutcriterion = GetParameter(criterion);
+ if (CCTK_EQUALS(myoutcriterion, "default")) {
+ myoutcriterion = io_out_criterion;
+ }
+ if (slice_requests.at(vindex)->out_every >= 0) {
+ myoutcriterion = "divisor";
+ }
+
+ if (CCTK_EQUALS (myoutcriterion, "never")) {
+ return 0;
+ }
+
+ // check whether to output at this iteration
+ bool output_this_iteration = false;
+
+ if (CCTK_EQUALS (myoutcriterion, "iteration")) {
+ int myoutevery = GetParameter(every);
+ if (myoutevery == -2) {
+ myoutevery = io_out_every;
+ }
+ if (myoutevery > 0) {
+ if (cctk_iteration == this_iteration_slice[outdim]) {
+ // we already decided to output this iteration
+ output_this_iteration = true;
+ } else if (cctk_iteration >=
+ last_output_iteration_slice[outdim] + myoutevery) {
+ // it is time for the next output
+ output_this_iteration = true;
+ last_output_iteration_slice[outdim] = cctk_iteration;
+ this_iteration_slice[outdim] = cctk_iteration;
+ }
+ }
+ } else if (CCTK_EQUALS (myoutcriterion, "divisor")) {
+ int myoutevery = GetParameter(every);
+ if (myoutevery == -2) {
+ myoutevery = io_out_every;
+ }
+ if (slice_requests[vindex]->out_every >= 0) {
+ myoutevery = slice_requests[vindex]->out_every;
+ }
+ if (myoutevery > 0 and (cctk_iteration % myoutevery) == 0) {
+ // we already decided to output this iteration
+ output_this_iteration = true;
+ }
+ } else if (CCTK_EQUALS (myoutcriterion, "time")) {
+ CCTK_REAL myoutdt = GetParameter(dt);
+ if (myoutdt == -2) {
+ myoutdt = io_out_dt;
+ }
+ if (myoutdt == 0 or cctk_iteration == this_iteration_slice[outdim]) {
+ output_this_iteration = true;
+ } else if (myoutdt > 0) {
+ int do_output =
+ cctk_time / cctk_delta_time >=
+ (last_output_time_slice[outdim] + myoutdt) / cctk_delta_time - 1.0e-12;
+ MPI_Bcast (&do_output, 1, MPI_INT, 0, dist::comm());
+ if (do_output) {
+ // it is time for the next output
+ output_this_iteration = true;
+ last_output_time_slice[outdim] = cctk_time;
+ this_iteration_slice[outdim] = cctk_iteration;
+ }
+ }
+ } // select output criterion
+
+ return output_this_iteration ? 1 : 0;
+ }
+
+
+
+ template<int outdim>
+ int IOHDF5<outdim>::TriggerOutput (const cGH* const cctkGH, const int vindex)
+ {
+ DECLARE_CCTK_PARAMETERS;
+
+ assert (vindex >= 0 and vindex < CCTK_NumVars());
+
+ char* const fullname = CCTK_FullName(vindex);
+
+ int retval;
+ if (one_file_per_group) {
+ char* const alias = CCTK_GroupNameFromVarI (vindex);
+ for (char* p = alias; *p; ++p) *p = (char) tolower (*p);
+ retval = OutputVarAs (cctkGH, fullname, alias);
+ free (alias);
+ } else {
+ const char* const alias = CCTK_VarName (vindex);
+ retval = OutputVarAs (cctkGH, fullname, alias);
+ }
+
+ free (fullname);
+
+ return retval;
+ }
+
+
+
+ static void GetVarIndex (const int vindex, const char* const optstring,
+ void* const arg)
+ {
+ if (optstring) {
+ char* const fullname = CCTK_FullName(vindex);
+ CCTK_VWarn (2, __LINE__, __FILE__, CCTK_THORNSTRING,
+ "Option string '%s' will be ignored for HDF5 output of "
+ "variable '%s'", optstring, fullname);
+ free (fullname);
+ }
+
+ *static_cast<int*>(arg) = vindex;
+ }
+
+ template<int outdim>
+ int IOHDF5<outdim>::OutputVarAs (const cGH* const cctkGH,
+ const char* const varname,
+ const char* const alias)
+ {
+ DECLARE_CCTK_PARAMETERS;
+
+ int vindex = -1;
+
+ if (CCTK_TraverseString (varname, GetVarIndex, &vindex, CCTK_VAR) < 0) {
+ CCTK_VWarn (1, __LINE__, __FILE__, CCTK_THORNSTRING,
+ "error while parsing variable name '%s' (alias name '%s')",
+ varname, alias);
+ return -1;
+ }
+
+ if (vindex < 0) {
+ return -1;
+ }
+
+ if (not (is_level_mode() or
+ (is_singlemap_mode() and maps == 1) or
+ (is_local_mode() and maps == 1 and
+ vhh.at(Carpet::map)->local_components(reflevel) == 1)))
+ {
+ CCTK_WARN (1, "OutputVarAs must be called in level mode");
+ return -1;
+ }
+
+ BEGIN_LEVEL_MODE (cctkGH) {
+
+ // Get information
+ const int group = CCTK_GroupIndexFromVarI (vindex);
+ assert (group >= 0);
+ cGroup groupdata;
+ {
+ int const ierr = CCTK_GroupData (group, & groupdata);
+ assert (not ierr);
+ }
+
+ // Check information
+ if (groupdata.grouptype != CCTK_GF) {
+ assert (do_global_mode);
+ }
+
+ if (outdim >= groupdata.dim) {
+ CCTK_VWarn (1, __LINE__, __FILE__, CCTK_THORNSTRING,
+ "Cannot produce %dD slice HDF5 output file '%s' for variable '%s' "
+ "because it has only %d dimensions",
+ outdim, alias, varname, groupdata.dim);
+ return -1;
+ }
+
+ // Check for storage
+ if (not CCTK_QueryGroupStorageI (cctkGH, group)) {
+ // This may be okay if storage is e.g. scheduled only in the
+ // analysis bin
+ CCTK_VWarn (4, __LINE__, __FILE__, CCTK_THORNSTRING,
+ "Cannot output variable '%s' because it has no storage",
+ varname);
+ return 0;
+ }
+
+ ostringstream basefilenamebuf;
+ basefilenamebuf << my_out_slice_dir << "/" << alias;
+ const string basefilename = basefilenamebuf.str();
+
+ // Check if the file has been created already
+ bool is_new_file, truncate_file;
+ const bool did_output =
+ DidOutput (cctkGH, vindex, basefilename, is_new_file, truncate_file);
+ if (did_output) {
+ return 0;
+ }
+
+ // Loop over all direction combinations
+ vect<int,outdim> dirs (0);
+ bool done;
+ do {
+
+ // Output each combination only once
+ bool ascending = true;
+ for (int d1=0; d1<outdim; ++d1) {
+ for (int d2=d1+1; d2<outdim; ++d2) {
+ ascending = ascending and dirs[d1] < dirs[d2];
+ }
+ }
+
+ // Skip output if the dimensions are not ascending
+ if (ascending) {
+
+ // Skip output if not requested
+ if (DirectionIsRequested(dirs)) {
+ OutputDirection (cctkGH, vindex, alias, basefilename, dirs,
+ is_new_file, truncate_file);
+ }
+
+ } // if ascending
+
+ // Next direction combination
+ done = true;
+ for (int d=0; d<outdim; ++d) {
+ ++dirs[d];
+ if (dirs[d]<groupdata.dim + (outdim == 1 ? 1 : 0)) {
+ done = false;
+ break;
+ }
+ dirs[d] = 0;
+ }
+
+ } while (not done); // output all directions
+
+ } END_LEVEL_MODE;
+
+ return 0;
+ }
+
+
+
+ // Traverse all maps and components on this refinement and multigrid
+ // level
+ template<int outdim>
+ void IOHDF5<outdim>::OutputDirection (const cGH* const cctkGH,
+ const int vindex,
+ const string alias,
+ const string basefilename,
+ const vect<int,outdim>& dirs,
+ const bool is_new_file,
+ const bool truncate_file)
+ {
+ DECLARE_CCTK_PARAMETERS;
+
+ // Get information
+ const int group = CCTK_GroupIndexFromVarI (vindex);
+ assert (group >= 0);
+ const int vindex0 = CCTK_FirstVarIndexI (group);
+ assert (vindex0 >= 0 and vindex >= vindex0);
+ const int var = vindex - vindex0;
+ cGroup groupdata;
+ {
+ int const ierr = CCTK_GroupData (group, & groupdata);
+ assert (not ierr);
+ }
+
+ const int ml = groupdata.grouptype == CCTK_GF ? mglevel : 0;
+ const int rl = groupdata.grouptype == CCTK_GF ? reflevel : 0;
+
+ const int num_tl = CCTK_NumTimeLevelsFromVarI (vindex);
+ assert (num_tl >= 1);
+
+ const int numvars = CCTK_NumVarsInGroupI(group);
+
+
+
+ // Loop over all maps
+ const int m_min = 0;
+ const int m_max = groupdata.grouptype == CCTK_GF ? Carpet::maps : 1;
+ for (int m = m_min; m < m_max; ++ m) {
+
+ hid_t file = -1;
+ int error_count = 0;
+ error_count += OpenFile (cctkGH, m, vindex, numvars, alias, basefilename,
+ dirs, is_new_file, truncate_file, file);
+
+ // Find the output offset
+ const ivect offset =
+ groupdata.grouptype == CCTK_GF ? GetOutputOffset (cctkGH, m, dirs) : 0;
+
+ const gh* const hh = arrdata.at(group).at(m).hh;
+ const dh* const dd = arrdata.at(group).at(m).dd;
+
+ // Traverse all components on this multigrid level, refinement
+ // level, and map
+ const int c_min = 0;
+ const int c_max =
+ groupdata.grouptype == CCTK_GF ?
+ vhh.at(m)->components(reflevel) :
+ groupdata.disttype != CCTK_DISTRIB_CONSTANT ?
+ CCTK_nProcs(cctkGH) :
+ 1;
+ for (int c = c_min; c < c_max; ++ c) {
+ int const lc = hh->get_local_component(rl,c);
+ int const proc = hh->processor(rl,c);
+ if (dist::rank() == proc or dist::rank() == ioproc) {
+
+ const ibbox& data_ext = dd->boxes.at(ml).at(rl).at(c).exterior;
+ const ibbox ext = GetOutputBBox (cctkGH, group, m, c, data_ext);
+
+ CCTK_REAL coord_time;
+ rvect coord_lower, coord_upper;
+ GetCoordinates (cctkGH, m, groupdata, ext,
+ coord_time, coord_lower, coord_upper);
+
+ // Apply offset
+ ivect offset1;
+ if (groupdata.grouptype == CCTK_GF) {
+ const ibbox& baseext = hh->baseextents.at(ml).at(rl);
+ offset1 = baseext.lower() + offset * ext.stride();
+ } else {
+ offset1 = offset * ext.stride();
+ }
+ for (int d=0; d<outdim; ++d) {
+ if (dirs[d] < 3) {
+ offset1[dirs[d]] = ext.lower()[dirs[d]];
+ }
+ }
+
+ const int tl_min = 0;
+ const int tl_max = output_all_timelevels ? num_tl : 1;
+ for (int tl = tl_min; tl < tl_max; ++tl) {
+
+ mempool pool;
+
+ const int n_min = one_file_per_group ? 0 : var;
+ const int n_max =
+ one_file_per_group ? CCTK_NumVarsInGroupI(group) : var+1;
+ vector<const gdata*> datas(n_max - n_min);
+ for (size_t n = 0; n < datas.size(); ++ n) {
+ if (dist::rank() == proc) {
+ const ggf* const ff =
+ arrdata.at(group).at(m).data.at(n + n_min);
+ datas.at(n) = (*ff) (tl, rl, lc, ml);
+ } else {
+ datas.at(n) = NULL;
+ }
+ }
+
+ vector<gdata*> tmpdatas(datas.size());
+
+ if (proc != ioproc) {
+
+ for (size_t n = 0; n < datas.size(); ++ n) {
+ const ggf* const ff =
+ arrdata.at(group).at(m).data.at(n + n_min);
+ tmpdatas.at(n) = ff->typed_data (tl, rl, c, ml);
+ size_t const memsize =
+ tmpdatas.at(n)->allocsize (data_ext, ioproc);
+ void * const memptr = pool.alloc (memsize);
+ tmpdatas.at(n)->allocate(data_ext, ioproc, memptr, memsize);
+ } // for n
+
+ for (comm_state state; not state.done(); state.step()) {
+ for (size_t n=0; n<datas.size(); ++n) {
+ tmpdatas.at(n)->copy_from
+ (state, datas.at(n), data_ext, ioproc, proc);
+ }
+ }
+
+ } else {
+
+ for (size_t n=0; n<datas.size(); ++n) {
+ tmpdatas.at(n) = const_cast<gdata*> (datas.at(n));
+ }
+
+ }
+
+ if (dist::rank() == ioproc) {
+ error_count +=
+ WriteHDF5 (cctkGH, file, tmpdatas, ext, vindex,
+ offset1, dirs,
+ rl, ml, m, c, tl,
+ coord_time, coord_lower, coord_upper);
+ }
+
+ if (proc != ioproc) {
+ for (size_t n=0; n<tmpdatas.size(); ++n) {
+ delete tmpdatas.at(n);
+ }
+ }
+
+ } // for tl
+
+ }
+ } // for c
+
+ error_count += CloseFile (cctkGH, file);
+ if (error_count > 0 and abort_on_io_errors) {
+ CCTK_WARN (0, "Aborting simulation due to previous I/O errors");
+ }
+
+ } // for m
+ }
+
+
+
+ template<int outdim>
+ bool IOHDF5<outdim>::DidOutput (const cGH* const cctkGH,
+ const int vindex,
+ const string basefilename,
+ bool& is_new_file, bool& truncate_file)
+ {
+ DECLARE_CCTK_PARAMETERS;
+
+ typedef std::map<string, vector<vector<vector<int> > > > filelist;
+ static filelist created_files;
+
+ filelist::iterator thisfile = created_files.find (basefilename);
+ is_new_file = thisfile == created_files.end();
+ truncate_file = is_new_file and IO_TruncateOutputFiles (cctkGH);
+
+ if (is_new_file) {
+ const int numelems =
+ one_file_per_group ? CCTK_NumGroups() : CCTK_NumVars();
+ vector<vector<vector<int> > > last_outputs; // [ml][rl][var]
+ last_outputs.resize(mglevels);
+ for (int ml=0; ml<mglevels; ++ml) {
+ last_outputs.at(ml).resize (maxreflevels);
+ for (int rl=0; rl<maxreflevels; ++rl) {
+ last_outputs.at(ml).at(rl).resize
+ (numelems, cctkGH->cctk_iteration - 1);
+ }
+ }
+ // TODO: this makes a copy of last_outputs, which is expensive;
+ // change this to use a reference instead
+ thisfile = created_files.insert
+ (thisfile, filelist::value_type (basefilename, last_outputs));
+ assert (thisfile != created_files.end());
+ }
+
+ // Check if this variable has been output already during this
+ // iteration
+ const int elem =
+ one_file_per_group ? CCTK_GroupIndexFromVarI(vindex) : vindex;
+ int& last_output = thisfile->second.at(mglevel).at(reflevel).at(elem);
+ if (last_output == cctkGH->cctk_iteration) {
+ // has already been output during this iteration
+ char* const fullname = CCTK_FullName (vindex);
+ CCTK_VWarn (5, __LINE__, __FILE__, CCTK_THORNSTRING,
+ "Skipping output for variable '%s', because this variable "
+ "has already been output during the current iteration -- "
+ "probably via a trigger during the analysis stage",
+ fullname);
+ free (fullname);
+ return true;
+ }
+ assert (last_output < cctkGH->cctk_iteration);
+ last_output = cctkGH->cctk_iteration;
+
+ return false;
+ }
+
+
+
+ CCTK_REAL io_files;
+ CCTK_REAL io_bytes;
+
+ template<int outdim>
+ int IOHDF5<outdim>::OpenFile (const cGH* const cctkGH,
+ const int m,
+ const int vindex,
+ const int numvars,
+ const string alias,
+ const string basefilename,
+ const vect<int,outdim>& dirs,
+ const bool is_new_file,
+ const bool truncate_file,
+ hid_t& file)
+ {
+ DECLARE_CCTK_PARAMETERS;
+
+ int error_count = 0;
+
+ BeginTimingIO (cctkGH);
+ io_files = 0;
+ io_bytes = 0;
+
+ if (dist::rank() == ioproc) {
+
+ const int grouptype = CCTK_GroupTypeFromVarI(vindex);
+ assert (grouptype >= 0);
+
+ // Invent a file name
+ ostringstream filenamebuf;
+ filenamebuf << basefilename;
+ if (maps > 1 and grouptype == CCTK_GF) {
+ filenamebuf << "." << m;
+ }
+ filenamebuf << ".";
+ for (int d=0; d<outdim; ++d) {
+ const char* const coords = "xyzd";
+ filenamebuf << coords[dirs[d]];
+ }
+ filenamebuf << ".h5";
+ // we need a persistent temporary here
+ const string filenamestr = filenamebuf.str();
+ const char* const filename = filenamestr.c_str();
+
+ // Open the file
+ bool file_exists = false;
+ if (not truncate_file) {
+ H5E_BEGIN_TRY {
+ file_exists = H5Fis_hdf5(filename) > 0;
+ } H5E_END_TRY;
+ }
+
+ if (truncate_file or not file_exists) {
+ HDF5_ERROR(file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT,
+ H5P_DEFAULT));
+ // write metadata information
+ error_count +=
+ WriteMetadata(cctkGH, nioprocs, vindex, numvars, false, file);
+ } else {
+ HDF5_ERROR (file = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT));
+ }
+ io_files += 1;
+
+ } // if on the I/O processor
+
+ return error_count;
+ }
+
+ template<int outdim>
+ int IOHDF5<outdim>::CloseFile (const cGH* const cctkGH,
+ hid_t& file)
+ {
+ DECLARE_CCTK_PARAMETERS;
+
+ int error_count = 0;
+
+ if (dist::rank() == ioproc) {
+ if (file >= 0) {
+ HDF5_ERROR(H5Fclose(file));
+ }
+ }
+ if (nioprocs > 1) {
+ CCTK_REAL local[2], global[2];
+ local[0] = io_files;
+ local[1] = io_bytes;
+ MPI_Allreduce (local, global, 2, dist::mpi_datatype (local[0]), MPI_SUM, dist::comm());
+ io_files = global[0];
+ io_bytes = global[1];
+ }
+
+ EndTimingIO (cctkGH, io_files, io_bytes, true);
+
+ return error_count;
+ }
+
+
+
+ // Check whether this output direction has been requested
+ template<int outdim>
+ bool IOHDF5<outdim>::DirectionIsRequested (const vect<int,outdim>& dirs)
+ {
+ DECLARE_CCTK_PARAMETERS;
+
+ switch (outdim) {
+
+ case 0:
+ // Output is always requested (if switched on)
+ return true;
+
+ case 1:
+ switch (dirs[0]) {
+ case 0: return out1D_x;
+ case 1: return out1D_y;
+ case 2: return out1D_z;
+ case 3: return out1D_d;
+ default: assert (0);
+ }
+
+ case 2:
+ if (dirs[0]==0 and dirs[1]==1) return out2D_xy;
+ if (dirs[0]==0 and dirs[1]==2) return out2D_xz;
+ if (dirs[0]==1 and dirs[1]==2) return out2D_yz;
+ assert (0);
+
+// case 3:
+// // Output is always requested (if switched on)
+// return true;
+
+ default:
+ assert (0);
+ // Prevent compiler warning about missing return statement
+ return false;
+ }
+ }
+
+
+
+ // Get the region that should be output, in terms of grid points;
+ // this is the offset perpendicular to the output hyperslab
+ template<int outdim>
+ ivect IOHDF5<outdim>::GetOutputOffset (const cGH* const cctkGH, const int m,
+ const vect<int,outdim>& dirs)
+ {
+ DECLARE_CCTK_PARAMETERS;
+
+ // Default is zero
+ ivect offset (0);
+
+ switch (outdim) {
+
+ case 0:
+ // 0D output
+ offset[0] = GetGridOffset (cctkGH, m, 1,
+ "out0D_point_xi", /*"out_point_xi"*/ NULL,
+ "out0D_point_x", /*"out_point_x"*/ NULL,
+ /*out_point_x*/ 0.0);
+ offset[1] = GetGridOffset (cctkGH, m, 2,
+ "out0D_point_yi", /*"out_point_yi"*/ NULL,
+ "out0D_point_y", /*"out_point_y"*/ NULL,
+ /*out_point_y*/ 0.0);
+ offset[2] = GetGridOffset (cctkGH, m, 3,
+ "out0D_point_zi", /*"out_point_zi"*/ NULL,
+ "out0D_point_z", /*"out_point_z"*/ NULL,
+ /*out_point_z*/ 0.0);
+ break;
+
+ case 1:
+ // 1D output
+ switch (dirs[0]) {
+ case 0:
+ offset[1] = GetGridOffset (cctkGH, m, 2,
+ "out1D_xline_yi", "out_xline_yi",
+ "out1D_xline_y", "out_xline_y",
+ out_xline_y);
+ offset[2] = GetGridOffset (cctkGH, m, 3,
+ "out1D_xline_zi", "out_xline_zi",
+ "out1D_xline_z", "out_xline_z",
+ out_xline_z);
+ break;
+ case 1:
+ offset[0] = GetGridOffset (cctkGH, m, 1,
+ "out1D_yline_xi", "out_yline_xi",
+ "out1D_yline_x", "out_yline_x",
+ out_yline_x);
+ offset[2] = GetGridOffset (cctkGH, m, 3,
+ "out1D_yline_zi", "out_yline_zi",
+ "out1D_yline_z", "out_yline_z",
+ out_yline_z);
+ break;
+ case 2:
+ offset[0] = GetGridOffset (cctkGH, m, 1,
+ "out1D_zline_xi", "out_zline_xi",
+ "out1D_zline_x", "out_zline_x",
+ out_zline_x);
+ offset[1] = GetGridOffset (cctkGH, m, 2,
+ "out1D_zline_yi", "out_zline_yi",
+ "out1D_zline_y", "out_zline_y",
+ out_zline_y);
+ break;
+ case 3:
+ // the diagonal: we don't care about the offset
+ break;
+ default:
+ assert (0);
+ }
+ break;
+
+ case 2:
+ // 2D output
+ if (dirs[0]==0 and dirs[1]==1) {
+ offset[2] = GetGridOffset (cctkGH, m, 3,
+ "out2D_xyplane_zi", "out_xyplane_zi",
+ "out2D_xyplane_z", "out_xyplane_z",
+ out_xyplane_z);
+ } else if (dirs[0]==0 and dirs[1]==2) {
+ offset[1] = GetGridOffset (cctkGH, m, 2,
+ "out2D_xzplane_yi", "out_xzplane_yi",
+ "out2D_xzplane_y", "out_xzplane_y",
+ out_xzplane_y);
+ } else if (dirs[0]==1 and dirs[1]==2) {
+ offset[0] = GetGridOffset (cctkGH, m, 1,
+ "out2D_yzplane_xi", "out_yzplane_xi",
+ "out2D_yzplane_x", "out_yzplane_x",
+ out_yzplane_x);
+ } else {
+ assert (0);
+ }
+ break;
+
+// case 3:
+// // 3D output: the offset does not matter
+// break;
+
+ default:
+ assert (0);
+ }
+
+ return offset;
+ }
+
+
+
+ // Omit symmetry and ghost zones if requested
+ ibbox GetOutputBBox (const cGH* const cctkGH,
+ const int group,
+ const int m, const int c,
+ const ibbox& ext)
+ {
+ DECLARE_CCTK_PARAMETERS;
+
+ const int groupdim = CCTK_GroupDimI(group);
+ assert (groupdim >= 0);
+ const int grouptype = CCTK_GroupTypeI(group);
+ assert (grouptype >= 0);
+
+ // TODO: This is a bit ad hoc
+ CCTK_INT symtable;
+ if (grouptype == CCTK_GF and groupdim == cctkGH->cctk_dim) {
+ symtable = SymmetryTableHandleForGrid (cctkGH);
+ if (symtable < 0) CCTK_WARN (0, "internal error");
+ } else {
+ symtable = SymmetryTableHandleForGI (cctkGH, group);
+ if (symtable < 0) CCTK_WARN (0, "internal error");
+ }
+
+ CCTK_INT symbnd[2*dim];
+ int const ierr = Util_TableGetIntArray
+ (symtable, 2*groupdim, symbnd, "symmetry_handle");
+ if (ierr != 2*groupdim) CCTK_WARN (0, "internal error");
+
+ bool is_symbnd[2*dim];
+ for (int d=0; d<2*groupdim; ++d) {
+ is_symbnd[d] = symbnd[d] >= 0;
+ }
+
+ ivect lo = ext.lower();
+ ivect hi = ext.upper();
+ const ivect str = ext.stride();
+
+ const b2vect obnds = vhh.at(m)->outer_boundaries(reflevel,c);
+ const i2vect ghost_width = arrdata.at(group).at(m).dd->ghost_width;
+
+ for (int d=0; d<groupdim; ++d) {
+ bool const output_lower_ghosts =
+ obnds[0][d]
+ ? (is_symbnd[2*d]
+ ? output_symmetry_points
+ : out3D_outer_ghosts)
+ : out3D_ghosts;
+ bool const output_upper_ghosts =
+ obnds[1][d]
+ ? (is_symbnd[2*d+1]
+ ? output_symmetry_points
+ : out3D_outer_ghosts)
+ : out3D_ghosts;
+
+ if (not output_lower_ghosts) {
+ lo[d] += ghost_width[0][d] * str[d];
+ }
+ if (not output_upper_ghosts) {
+ hi[d] -= ghost_width[1][d] * str[d];
+ }
+ }
+
+ return ibbox(lo,hi,str);
+ }
+
+
+
+ // Determine coordinates
+ void GetCoordinates (const cGH* const cctkGH, const int m,
+ const cGroup& groupdata,
+ const ibbox& ext,
+ CCTK_REAL& coord_time,
+ rvect& coord_lower, rvect& coord_upper)
+ {
+ coord_time = cctkGH->cctk_time;
+
+ rvect global_lower;
+ rvect coord_delta;
+ if (groupdata.grouptype == CCTK_GF) {
+ rvect const cctk_origin_space = origin_space.at(m).at(mglevel);
+ rvect const cctk_delta_space = delta_space.at(m) * rvect(mglevelfact);
+ for (int d=0; d<dim; ++d) {
+ // lower boundary of Carpet's integer indexing
+ global_lower[d] = cctk_origin_space[d];
+ // grid spacing of Carpet's integer indexing
+ coord_delta[d] = (cctk_delta_space[d] /
+ vhh.at(m)->baseextents.at(0).at(0).stride()[d]);
+ }
+ } else {
+ for (int d=0; d<dim; ++d) {
+ global_lower[d] = 0.0;
+ coord_delta[d] = 1.0;
+ }
+ }
+
+ coord_lower = global_lower + coord_delta * rvect(ext.lower());
+ coord_upper = global_lower + coord_delta * rvect(ext.upper());
+ }
+
+
+
+ int GetGridOffset (const cGH* const cctkGH, const int m, const int dir,
+ const char* const iparam, const char* const iglobal,
+ const char* const cparam, const char* const cglobal,
+ const CCTK_REAL cfallback)
+ {
+ // First choice: explicit coordinate
+ const int ncparam = CCTK_ParameterQueryTimesSet (cparam, CCTK_THORNSTRING);
+ assert (ncparam >= 0);
+ if (ncparam > 0) {
+ int ptype;
+ const CCTK_REAL* const pcoord
+ = ((const CCTK_REAL*)CCTK_ParameterGet
+ (cparam, CCTK_THORNSTRING, &ptype));
+ assert (pcoord);
+ const CCTK_REAL coord = *pcoord;
+ assert (ptype == PARAMETER_REAL);
+ return CoordToOffset (cctkGH, m, dir, coord, 0);
+ }
+
+ // Second choice: explicit index
+ const int niparam = CCTK_ParameterQueryTimesSet (iparam, CCTK_THORNSTRING);
+ assert (niparam >= 0);
+ if (niparam > 0) {
+ int ptype;
+ const int* const pindex
+ = (const int*)CCTK_ParameterGet (iparam, CCTK_THORNSTRING, &ptype);
+ assert (pindex);
+ const int index = *pindex;
+ assert (ptype == PARAMETER_INT);
+ return index;
+ }
+
+ // Third choice: explicit global coordinate
+ const char* iothorn = CCTK_ImplementationThorn ("IO");
+ assert (iothorn);
+ if (cglobal) {
+ const int ncglobal = CCTK_ParameterQueryTimesSet (cglobal, iothorn);
+ assert (ncglobal >= 0);
+ if (ncglobal > 0) {
+ int ptype;
+ const CCTK_REAL* const pcoord
+ = (const CCTK_REAL*)CCTK_ParameterGet (cglobal, iothorn, &ptype);
+ assert (pcoord);
+ const CCTK_REAL coord = *pcoord;
+ assert (ptype == PARAMETER_REAL);
+ return CoordToOffset (cctkGH, m, dir, coord, 0);
+ }
+ }
+
+ // Fourth choice: explicit global index
+ if (iglobal) {
+ const int niglobal = CCTK_ParameterQueryTimesSet (iglobal, iothorn);
+ assert (niglobal >= 0);
+ if (niglobal > 0) {
+ int ptype;
+ const int* const pindex
+ = (const int*)CCTK_ParameterGet (iglobal, iothorn, &ptype);
+ assert (pindex);
+ const int index = *pindex;
+ assert (ptype == PARAMETER_INT);
+ return index;
+ }
+ }
+
+ // Fifth choice: default coordinate
+ return CoordToOffset (cctkGH, m, dir, cfallback, 0);
+ }
+
+
+
+ int CoordToOffset (const cGH* cctkGH, const int m, const int dir,
+ const CCTK_REAL coord, const int ifallback)
+ {
+ assert (m>=0 and m<Carpet::maps and dir>=1 and dir<=dim);
+
+ assert (mglevel!=-1 and reflevel!=-1 and Carpet::map==-1);
+
+ rvect const cctk_origin_space = origin_space.at(m).at(mglevel);
+ rvect const cctk_delta_space = delta_space.at(m) * rvect (mglevelfact);
+ ivect const cctk_levfac = spacereffacts.at (reflevel);
+ ibbox const & coarseext = vhh.at(m)->baseextents.at(mglevel).at(0 );
+ ibbox const & baseext = vhh.at(m)->baseextents.at(mglevel).at(reflevel);
+ ivect const cctk_levoff = baseext.lower() - coarseext.lower();
+ ivect const cctk_levoffdenom = baseext.stride();
+
+ const CCTK_REAL delta = cctk_delta_space[dir-1] / cctk_levfac[dir-1];
+ const CCTK_REAL lower = cctk_origin_space[dir-1] + cctk_delta_space[dir-1] / cctk_levfac[dir-1] * cctk_levoff[dir-1] / cctk_levoffdenom[dir-1];
+
+ const CCTK_REAL rindex = (coord - lower) / delta;
+ int cindex = (int)floor(rindex + 0.75);
+
+ return cindex;
+ }
+
+
+
+ // Output
+ template<int outdim>
+ int IOHDF5<outdim>::WriteHDF5 (const cGH* cctkGH,
+ hid_t& file,
+ vector<gdata*> const gfdatas,
+ const bbox<int,dim>& gfext,
+ const int vi,
+ const vect<int,dim>& org,
+ const vect<int,outdim>& dirs,
+ const int rl,
+ const int ml,
+ const int m,
+ const int c,
+ const int tl,
+ const CCTK_REAL coord_time,
+ const vect<CCTK_REAL,dim>& coord_lower,
+ const vect<CCTK_REAL,dim>& coord_upper)
+ {
+ DECLARE_CCTK_PARAMETERS;
+
+ assert (outdim<=dim);
+
+ cGroup groupdata;
+ {
+ int const gi = CCTK_GroupIndexFromVarI (vi);
+ assert (gi >= 0);
+ int const ierr = CCTK_GroupData (gi, & groupdata);
+ assert (not ierr);
+ }
+
+ // boolean that says if we are doing 1D-diagonal output
+ // This is not beautiful, but works for the moment
+ bool const diagonal_output = outdim == 1 and dirs[0] == 3;
+
+ // Check whether the output bbox overlaps
+ // with the extent of the data to be output
+// FIXME: move this check up in the call stack
+ bool output_bbox_overlaps_data_extent;
+ if (not diagonal_output) {
+
+ const vect<int,outdim> lo = gfext.lower()[dirs];
+ const vect<int,outdim> up = gfext.upper()[dirs];
+ const vect<int,outdim> str = gfext.stride()[dirs];
+ const bbox<int,outdim> ext(lo,up,str);
+
+ // Check whether the output origin is contained in the extent
+ // of the data that should be output
+ ivect org1(org);
+ for (int d=0; d<outdim; ++d) org1[dirs[d]] = ext.lower()[d];
+ output_bbox_overlaps_data_extent = gfext.contains(org1);
+
+ } else {
+
+ gh const & hh = *vhh.at(m);
+ ibbox const & base = hh.baseextents.at(mglevel).at(reflevel);
+
+ assert (base.stride()[0] == base.stride()[1] and
+ base.stride()[0] == base.stride()[2]);
+
+ // Check if any point on the diagonal is in our gf's extent
+ output_bbox_overlaps_data_extent = false;
+ for (int i=maxval(base.lower());
+ i<=minval(base.upper()); i+=base.stride()[0]) {
+
+ ivect const pos = ivect(i,i,i);
+ output_bbox_overlaps_data_extent |= gfext.contains(pos);
+ }
+ }
+ // Shortcut if there is nothing to output
+ if (not output_bbox_overlaps_data_extent) {
+ return 0;
+ }
+
+ int error_count = 0;
+
+ ostringstream datasetname_suffix;
+ datasetname_suffix << " it=" << cctkGH->cctk_iteration << " tl=" << tl;
+ if (mglevels > 1) datasetname_suffix << " ml=" << ml;
+ if (groupdata.grouptype == CCTK_GF) {
+ if (maps > 1) datasetname_suffix << " m=" << m;
+ datasetname_suffix << " rl=" << rl;
+ }
+ if (groupdata.grouptype == CCTK_GF or
+ groupdata.disttype != CCTK_DISTRIB_CONSTANT) {
+ datasetname_suffix << " c=" << c;
+ }
+
+ // enable compression and checksums if requested
+ hid_t plist;
+ HDF5_ERROR(plist = H5Pcreate(H5P_DATASET_CREATE));
+ if (compression_level) {
+ HDF5_ERROR(H5Pset_deflate(plist, compression_level));
+ }
+ if (use_checksums) {
+ HDF5_ERROR(H5Pset_filter(plist, H5Z_FILTER_FLETCHER32, 0, 0, 0));
+ }
+
+ // enable datatype conversion if requested
+ const hid_t mem_type =
+ CCTKtoHDF5_Datatype(cctkGH, groupdata.vartype, false);
+ const hid_t slice_type =
+ CCTKtoHDF5_Datatype(cctkGH, groupdata.vartype, out_single_precision);
+
+ if (not diagonal_output) { // not outputting the diagonal
+
+ const vect<int,outdim> lo = gfext.lower()[dirs];
+ const vect<int,outdim> up = gfext.upper()[dirs];
+ const vect<int,outdim> str = gfext.stride()[dirs];
+ const bbox<int,outdim> ext(lo,up,str);
+
+ // Check whether the output origin is contained in the extent of
+ // the data that should be output
+ ivect org1(org);
+ for (int d=0; d<outdim; ++d) org1[dirs[d]] = ext.lower()[d];
+ assert (gfext.contains(org1));
+
+ // HDF5 wants ranks to be >= 1
+ const int rank = outdim > 0 ? outdim : 1;
+ vector<hsize_t> mem_shape(dim);
+ vector<hsize_t> slice_shape(rank, 1);
+ for (int d = 0; d < dim; d++) {
+ mem_shape[dim-1-d] = gfext.shape()[d] / gfext.stride()[d];
+ if (d < outdim) {
+ slice_shape[outdim-1-d] = ext.shape()[d] / ext.stride()[d];
+ }
+ }
+
+ ivect slice_lower(org - gfext.lower());
+ for (int d = 0; d < outdim; d++) {
+ slice_lower[dirs[d]] = 0;
+ }
+ ivect slice_upper(slice_lower);
+ for (int d = 0; d < outdim; d++) {
+ slice_upper[dirs[d]] = ext.upper()[d] - ext.lower()[d];
+ }
+ slice_lower /= gfext.stride();
+ slice_upper /= gfext.stride();
+
+ slice_start_size_t slice_start[dim];
+ hsize_t slice_count[dim];
+ for (int d = 0; d < dim; d++) {
+ slice_start[dim-1-d] = slice_lower[d];
+ slice_count[dim-1-d] = slice_upper[d] - slice_lower[d] + 1;
+ }
+ if (compression_level or use_checksums) {
+ HDF5_ERROR(H5Pset_chunk(plist, slice_shape.size(), &slice_shape[0]));
+ }
+ hid_t slice_space, mem_space;
+ HDF5_ERROR(slice_space =
+ H5Screate_simple (slice_shape.size(), &slice_shape[0], NULL));
+ HDF5_ERROR(mem_space =
+ H5Screate_simple (mem_shape.size(), &mem_shape[0], NULL));
+ HDF5_ERROR(H5Sselect_hyperslab (mem_space, H5S_SELECT_SET,
+ slice_start, NULL, slice_count, NULL));
+
+ vector<int> iorigin(rank, 0);
+ vector<double> delta(rank, 0), origin(rank, 0);
+ for (int d = 0; d < outdim; d++) {
+ assert(gfext.upper()[dirs[d]] - gfext.lower()[dirs[d]] >= 0);
+ iorigin[d] = ext.lower()[d];
+ delta[d] = 0;
+ origin[d] = coord_lower[dirs[d]];
+ if (gfext.upper()[dirs[d]] - gfext.lower()[dirs[d]] > 0) {
+ delta[d] = (coord_upper[dirs[d]] - coord_lower[dirs[d]]) /
+ (gfext.upper()[dirs[d]] - gfext.lower()[dirs[d]]) * gfext.stride()[dirs[d]];
+ origin[d] += (org1[dirs[d]] - gfext.lower()[dirs[d]]) * delta[d];
+ }
+ }
+
+ // now loop over all variables
+ for (size_t n = 0; n < gfdatas.size(); n++) {
+
+ // create a unique name for this variable's dataset
+ char *fullname = CCTK_FullName(vi + n);
+ string datasetname (fullname);
+ datasetname.append (datasetname_suffix.str());
+
+ // remove an already existing dataset of the same name
+ if (slice_requests.at(vi + n)->check_exist) {
+ H5E_BEGIN_TRY {
+ H5Gunlink(file, datasetname.c_str());
+ } H5E_END_TRY;
+ }
+
+ // write the dataset
+ hid_t dataset;
+ HDF5_ERROR(dataset =
+ H5Dcreate (file, datasetname.c_str(),
+ slice_type, slice_space, plist));
+ HDF5_ERROR(H5Dwrite (dataset, mem_type, mem_space, H5S_ALL,
+ H5P_DEFAULT, gfdatas[n]->storage()));
+ error_count +=
+ AddSliceAttributes (cctkGH, fullname, rl, origin, delta, iorigin,
+ dataset);
+ HDF5_ERROR(H5Dclose (dataset));
+ free (fullname);
+
+ io_bytes +=
+ H5Sget_simple_extent_npoints(slice_space) * H5Tget_size(slice_type);
+ } // for n
+
+ HDF5_ERROR(H5Sclose (mem_space));
+ HDF5_ERROR(H5Sclose (slice_space));
+
+ } else { // taking care of the diagonal
+
+ const ivect lo = gfext.lower();
+ const ivect up = gfext.upper();
+ const ivect str = gfext.stride();
+ const ibbox ext(lo,up,str);
+
+ gh const & hh = *vhh.at(m);
+ ibbox const & base = hh.baseextents.at(mglevel).at(reflevel);
+
+ assert (base.stride()[0] == base.stride()[1] and
+ base.stride()[0] == base.stride()[2]);
+
+ // count the number of points on the diagonal
+ hsize_t npoints = 0;
+ for (int i = maxval(base.lower());
+ i <= minval(base.upper());
+ i += base.stride()[0])
+ {
+ if (gfext.contains(i)) {
+ ++ npoints;
+ }
+ }
+ assert(npoints > 0);
+
+ // allocate a contiguous buffer for the diagonal points
+ vector<char> buffer (CCTK_VarTypeSize(groupdata.vartype) *
+ npoints * gfdatas.size());
+
+ // copy diagonal points into contiguous buffer
+ hsize_t offset = 0;
+ for (int i = maxval(base.lower());
+ i <= minval(base.upper());
+ i += base.stride()[0])
+ {
+ ivect const pos = ivect(i,i,i);
+ if(gfext.contains(pos)) {
+ for (size_t n = 0; n < gfdatas.size(); n++) {
+ switch (groupdata.vartype) {
+#define TYPECASE(N,T) \
+ case N: { T* typed_buffer = (T*) &buffer.front(); \
+ typed_buffer[offset + n*npoints] = \
+ (*(const data<T>*)gfdatas.at(n))[pos]; \
+ break; \
+ }
+#include "carpet_typecase.hh"
+#undef TYPECASE
+ }
+ }
+ ++ offset;
+ }
+ }
+ assert(offset == npoints);
+
+ if (compression_level or use_checksums) {
+ HDF5_ERROR(H5Pset_chunk(plist, 1, &npoints));
+ }
+ hid_t slice_space;
+ HDF5_ERROR(slice_space = H5Screate_simple(1, &npoints, NULL));
+
+ // loop over all variables and write out diagonals
+ for (size_t n = 0; n < gfdatas.size(); n++) {
+
+ // create a unique name for this variable's dataset
+ char *fullname = CCTK_FullName(vi + n);
+ string datasetname (fullname);
+ free (fullname);
+ datasetname.append (datasetname_suffix.str());
+
+ // remove an already existing dataset of the same name
+ if (slice_requests[vi + n]->check_exist) {
+ H5E_BEGIN_TRY {
+ H5Gunlink(file, datasetname.c_str());
+ } H5E_END_TRY;
+ }
+
+ // write the dataset
+ hid_t dataset;
+ HDF5_ERROR(dataset =
+ H5Dcreate(file, datasetname.c_str(),
+ slice_type, slice_space, plist));
+ HDF5_ERROR(H5Dwrite (dataset, mem_type,
+ H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ &buffer.front() + n*npoints*gfdatas.size()));
+ HDF5_ERROR(H5Dclose (dataset));
+
+ io_bytes +=
+ H5Sget_simple_extent_npoints(slice_space) * H5Tget_size(slice_type);
+ }
+
+ HDF5_ERROR(H5Sclose(slice_space));
+
+ } // if(not diagonal_output)
+
+ HDF5_ERROR(H5Pclose(plist));
+
+ return error_count;
+ }
+
+
+
+ // Explicit instantiation for all slice output dimensions
+ template class IOHDF5<0>;
+ template class IOHDF5<1>;
+ template class IOHDF5<2>;
+// template class IOHDF5<3>;
+
+} // namespace CarpetIOHDF5
diff --git a/Carpet/CarpetIOHDF5/src/make.code.defn b/Carpet/CarpetIOHDF5/src/make.code.defn
index 930eab3af..223c95158 100644
--- a/Carpet/CarpetIOHDF5/src/make.code.defn
+++ b/Carpet/CarpetIOHDF5/src/make.code.defn
@@ -1,7 +1,7 @@
# Main make.code.defn file for thorn CarpetIOHDF5
# Source files in this directory
-SRCS = CarpetIOHDF5.cc Input.cc Output.cc
+SRCS = CarpetIOHDF5.cc Input.cc Output.cc OutputSlice.cc
# Extend CXXFLAGS if HDF5 library was built with LFS support
ifneq ($(strip $(HDF5_LFS_FLAGS)),)
diff --git a/Carpet/CarpetIOHDF5/src/make.configuration.defn b/Carpet/CarpetIOHDF5/src/make.configuration.defn
index b2c159945..ca3b64978 100644
--- a/Carpet/CarpetIOHDF5/src/make.configuration.defn
+++ b/Carpet/CarpetIOHDF5/src/make.configuration.defn
@@ -1,2 +1,2 @@
-# add the Carpet HDF5-to-ASCII converter/slicer
-ALL_UTILS += hdf5toascii_slicer hdf5_slicer
+# add the Carpet HDF5-to-ASCII converter/slicer/recombiner
+ALL_UTILS += hdf5toascii_slicer hdf5tobinary_slicer hdf5_slicer hdf5_recombiner
diff --git a/Carpet/CarpetIOHDF5/src/make.configuration.deps b/Carpet/CarpetIOHDF5/src/make.configuration.deps
index 61b8e22e0..475206db9 100644
--- a/Carpet/CarpetIOHDF5/src/make.configuration.deps
+++ b/Carpet/CarpetIOHDF5/src/make.configuration.deps
@@ -2,7 +2,7 @@ CARPETIOHDF5_BUILD_DIR = $(BUILD_DIR)/CarpetIOHDF5
CARPETIOHDF5_SRC_DIR = $(PACKAGE_DIR)/Carpet/CarpetIOHDF5/src/util
CARPETIOHDF5_CFLAGS = -DCCODE $(CFLAGS)
-CARPETIOHDF5_LDFLAGS = $(DEBUG_LD) $(LDFLAGS) $(EXTRAFLAGS) $(HDF5_LIB_DIRS:%=-L%) $(HDF5_LIBS:%=-l%)
+CARPETIOHDF5_LDFLAGS = $(DEBUG_LD) $(LDFLAGS) $(EXTRAFLAGS) $(GENERAL_LIBRARIES)
# Extend CFLAGS if HDF5 library was built with LFS support
ifneq ($(strip $(HDF5_LFS_FLAGS)),)
diff --git a/Carpet/CarpetIOHDF5/src/util/SubstituteDeprecatedParameters.pl b/Carpet/CarpetIOHDF5/src/util/SubstituteDeprecatedParameters.pl
index bfd1b17b0..bfd1b17b0 100755..100644
--- a/Carpet/CarpetIOHDF5/src/util/SubstituteDeprecatedParameters.pl
+++ b/Carpet/CarpetIOHDF5/src/util/SubstituteDeprecatedParameters.pl
diff --git a/Carpet/CarpetIOHDF5/src/util/hdf5_recombiner.cc b/Carpet/CarpetIOHDF5/src/util/hdf5_recombiner.cc
new file mode 100644
index 000000000..ecee483d3
--- /dev/null
+++ b/Carpet/CarpetIOHDF5/src/util/hdf5_recombiner.cc
@@ -0,0 +1,253 @@
+#include <cassert>
+#include <cstdio>
+#include <cstring>
+#include <iostream>
+#include <list>
+#include <string>
+#include <vector>
+
+#include <cctk_Config.h>
+
+#include <hdf5.h>
+
+using namespace std;
+
+
+
+// Global constants
+int const dim = 3;
+
+// Files and datasets
+int const num_input_files = 0;
+char const *const input_file_pattern = "interptoarray::parrays3d.file_%d.h5";
+int const iteration_divisor = 1024;
+char const *const output_file_name = "parrays3d-float.h5";
+char const *const output_dataset_name = "phi";
+hsize_t const output_dims[] = { 0, 1024, 1024, 1024 }; // [t,z,y,x]
+hsize_t const output_maxdims[] = { H5S_UNLIMITED, 1024, 1024, 1024 };
+typedef float output_t;
+hid_t const output_type = H5T_NATIVE_FLOAT;
+
+// Technical details
+hsize_t const chunk_size[] = { 1, 128, 128, 128 }; // 8 MB for float
+int const compression_level = 1;
+bool const write_checksum = true;
+
+
+
+// Check a return value
+#define check(_expr) do { bool const _val = (_expr); assert(_val); } while(0)
+
+
+
+static herr_t add_name (hid_t group, const char *name,
+ H5L_info_t const *info, void *op_data);
+
+
+
+int main (int argc, char **argv)
+{
+ cout << "hdf5_recombiner" << endl
+ << "Copyright 2009 Erik Schnetter <schnetter@cct.lsu.edu>" << endl
+ << endl;
+
+
+
+ cout << "Opening output file \"" << output_file_name << "\"" << endl;
+
+ htri_t is_hdf5;
+ H5E_BEGIN_TRY {
+ is_hdf5 = H5Fis_hdf5 (output_file_name);
+ } H5E_END_TRY;
+ bool const file_exists = is_hdf5 > 0;
+ hid_t const output_file =
+ file_exists ?
+ H5Fopen (output_file_name, H5F_ACC_RDWR, H5P_DEFAULT) :
+ H5Fcreate (output_file_name, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT);
+
+
+
+ cout << "Opening output dataset \"" << output_dataset_name << "\""<< endl;
+
+ hid_t const output_datatype = output_type;
+
+ hid_t const output_dataspace =
+ H5Screate_simple (dim+1, output_dims, output_maxdims);
+ assert (output_dataspace >= 0);
+
+ hid_t const output_properties = H5Pcreate (H5P_DATASET_CREATE);
+ assert (output_properties >= 0);
+ check (not H5Pset_chunk (output_properties, dim+1, chunk_size));
+ if (compression_level > 0) {
+ check (not H5Pset_deflate (output_properties, compression_level));
+ }
+ if (write_checksum) {
+ check (not H5Pset_fletcher32 (output_properties));
+ }
+
+ hid_t const output_dataset =
+ file_exists ?
+ H5Dopen (output_file, output_dataset_name, H5P_DEFAULT) :
+ H5Dcreate (output_file, output_dataset_name,
+ output_datatype, output_dataspace,
+ H5P_DEFAULT, output_properties, H5P_DEFAULT);
+ assert (output_dataset >= 0);
+
+
+
+ list<string> input_file_names;
+ for (int input_file_num=0; input_file_num<num_input_files; ++input_file_num) {
+ char input_file_name[10000];
+ snprintf (input_file_name, sizeof input_file_name,
+ input_file_pattern, input_file_num);
+ input_file_names.push_back (input_file_name);
+ }
+ for (int n=1; n<argc; ++n) {
+ input_file_names.push_back (argv[n]);
+ }
+
+ for (list<string>::const_iterator iinput_file_name = input_file_names.begin();
+ iinput_file_name != input_file_names.end();
+ ++ iinput_file_name)
+ {
+ string const& input_file_name = *iinput_file_name;
+ cout << "Opening input file \"" << input_file_name << "\"" << endl;
+ hid_t const input_file =
+ H5Fopen (input_file_name.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
+ assert (input_file >= 0);
+
+ typedef list<string> names_t;
+ names_t names;
+ hsize_t idx = 0;
+ check (not H5Literate (input_file,
+ H5_INDEX_NAME, H5_ITER_NATIVE,
+ &idx, add_name, &names));
+ for (names_t::const_iterator iname =
+ names.begin(); iname!=names.end(); ++iname)
+ {
+ char const *const input_dataset_name = iname->c_str();
+ cout << "Reading input dataset \"" << input_dataset_name << "\"" << endl;
+
+ char varname[10000];
+ int it, tl, c;
+ int const icnt = sscanf (input_dataset_name,
+ "%s it=%d tl=%d c=%d", varname, &it, &tl, &c);
+ assert (icnt == 4);
+ assert (it % iteration_divisor == 0);
+ int const iteration = it / iteration_divisor;
+
+ hid_t const input_dataset =
+ H5Dopen (input_file, input_dataset_name, H5P_DEFAULT);
+ assert (input_dataset >= 0);
+
+ int iorigin[dim];
+ hid_t const iorigin_attr =
+ H5Aopen (input_dataset, "iorigin", H5P_DEFAULT);
+ assert (iorigin_attr >= 0);
+ check (not H5Aread (iorigin_attr, H5T_NATIVE_INT, iorigin));
+ check (not H5Aclose (iorigin_attr));
+
+ hid_t const input_dataspace = H5Dget_space (input_dataset);
+ assert (input_dataspace >= 0);
+ assert (H5Sis_simple (input_dataspace) > 0);
+ assert (H5Sget_simple_extent_ndims (input_dataspace) == dim);
+ hsize_t input_dims[dim];
+ check (H5Sget_simple_extent_dims (input_dataspace, input_dims, NULL) ==
+ dim);
+
+ hid_t const input_memory_dataspace =
+ H5Screate_simple (dim, input_dims, NULL);
+ assert (input_memory_dataspace);
+ hssize_t const input_memory_npoints =
+ H5Sget_simple_extent_npoints (input_memory_dataspace);
+ vector<output_t> data (input_memory_npoints);
+
+ check (not H5Dread (input_dataset,
+ output_type, input_memory_dataspace,
+ input_dataspace,
+ H5P_DEFAULT, &data.front()));
+
+ check (not H5Sclose (input_memory_dataspace));
+ check (not H5Sclose (input_dataspace));
+ check (not H5Dclose (input_dataset));
+
+
+
+ cout << "Writing output dataset" << endl;
+
+ hsize_t output_extent[dim+1];
+ output_extent[0] = iteration + 1;
+ for (int d=0; d<dim; ++d) {
+ output_extent[d+1] = output_dims[d+1];
+ }
+ check (not H5Dextend (output_dataset, output_extent));
+
+ hid_t const output_dataspace =
+ H5Screate_simple (dim+1, output_extent, output_maxdims);
+ assert (output_dataspace >= 0);
+
+ hsize_t output_offset[dim+1];
+ hsize_t output_dims[dim+1];
+ output_offset[0] = iteration;
+ output_dims [0] = 1;
+ for (int d=0; d<dim; ++d) {
+ output_offset[d+1] = iorigin[dim-1-d];
+ output_dims [d+1] = input_dims[d];
+ }
+ check (not H5Sselect_hyperslab (output_dataspace, H5S_SELECT_SET,
+ output_offset, NULL, output_dims, NULL));
+ cout << " extent ["
+ << output_offset[3] << ","
+ << output_offset[2] << ","
+ << output_offset[1] << ","
+ << output_offset[0] << "] - ["
+ << output_offset[3] + output_dims[3] << ","
+ << output_offset[2] + output_dims[2] << ","
+ << output_offset[1] + output_dims[1] << ","
+ << output_offset[0] + output_dims[0] << "] "
+ << "(" << data.size() << " points)" << endl;
+
+ hid_t const output_memory_dataspace =
+ H5Screate_simple (dim+1, output_dims, NULL);
+ assert (output_memory_dataspace);
+ hssize_t const output_memory_npoints =
+ H5Sget_simple_extent_npoints (output_memory_dataspace);
+ assert (output_memory_npoints == input_memory_npoints);
+
+ check (not H5Dwrite (output_dataset,
+ output_type, output_memory_dataspace,
+ output_dataspace,
+ H5P_DEFAULT, &data.front()));
+
+ H5Sclose (output_memory_dataspace);
+ H5Sclose (output_dataspace);
+
+ } // for names
+
+ check (not H5Fclose (input_file));
+
+ } // for input_file_num
+
+
+
+ check (not H5Dclose (output_dataset));
+ check (not H5Pclose (output_properties));
+ check (not H5Sclose (output_dataspace));
+ check (not H5Fclose (output_file));
+
+ cout << "Done." << endl;
+ return 0;
+}
+
+
+
+static herr_t add_name (hid_t const group, const char *const name,
+ H5L_info_t const *const info, void *const op_data)
+{
+ typedef list<string> names_t;
+ names_t& names = * static_cast<names_t*> (op_data);
+ if (strcmp (name, "Parameters and Global Attributes") != 0) {
+ names.push_back (name);
+ }
+ return 0;
+}
diff --git a/Carpet/CarpetIOHDF5/src/util/hdf5toascii_slicer.cc b/Carpet/CarpetIOHDF5/src/util/hdf5toascii_slicer.cc
index 9472f41b0..0c9d32885 100644
--- a/Carpet/CarpetIOHDF5/src/util/hdf5toascii_slicer.cc
+++ b/Carpet/CarpetIOHDF5/src/util/hdf5toascii_slicer.cc
@@ -18,16 +18,9 @@
#include <cstring>
#include <regex.h>
-// some macros to fix compatibility issues as long
-// as 1.8.0 is in beta phase
#define H5_USE_16_API 1
-
#include <hdf5.h>
-#if (H5_VERS_MAJOR == 1 && (H5_VERS_MINOR == 8) && (H5_VERS_RELEASE == 0))
-#warning "Hacking HDF5 1.8.0 compatiblity with 1.6.x; fix once 1.8.0 stable"
-#endif
-
using namespace std;
/*****************************************************************************
diff --git a/Carpet/CarpetIOHDF5/src/util/hdf5tobinary_slicer.cc b/Carpet/CarpetIOHDF5/src/util/hdf5tobinary_slicer.cc
new file mode 100644
index 000000000..d1d75c169
--- /dev/null
+++ b/Carpet/CarpetIOHDF5/src/util/hdf5tobinary_slicer.cc
@@ -0,0 +1,814 @@
+ /*@@
+ @file hdf5tobinary_slicer.cc
+ @date April 23, 2009
+ @author Erik Schnetter, based on hdf5toascii_slicer.cc by Thomas Radke
+ @desc
+ Utility program to extract a 1D line or 2D slice from 3D datasets
+ in datafiles generated by CarpetIOHDF5.
+ The extracted slab is written to a binary file.
+ @enddesc
+ @@*/
+
+#include <algorithm>
+#include <iostream>
+#include <iomanip>
+#include <limits>
+#include <sstream>
+#include <string>
+#include <vector>
+#include <cassert>
+#include <cmath>
+#include <cstdio>
+#include <cstring>
+#include <regex.h>
+
+#define H5_USE_16_API 1
+#include <hdf5.h>
+
+using namespace std;
+
+/*****************************************************************************
+ ************************* Macro Definitions ***************************
+ *****************************************************************************/
+
+// uncomment the following line to get some debug output
+// #define DEBUG 1
+
+// value for an unset parameter
+#define PARAMETER_UNSET -424242.0
+
+// fuzzy factor for comparing dataset timesteps with user-specified value
+#define FUZZY_FACTOR 1e-6
+
+// the datatype for the 'start' argument in H5Sselect_hyperslab()
+#if (H5_VERS_MAJOR == 1 && \
+ (H5_VERS_MINOR < 6 || (H5_VERS_MINOR == 6 && H5_VERS_RELEASE < 4)))
+#define h5size_t hssize_t
+#else
+#define h5size_t hsize_t
+#endif
+
+// macro to check the return code of calls to the HDF5 library
+#define CHECK_HDF5(fn_call) { \
+ const int _retval = fn_call; \
+ if (_retval < 0) { \
+ cerr << "HDF5 call " << #fn_call \
+ << " in file " << __FILE__ << " line " << __LINE__ \
+ << " returned error code " << _retval << endl; \
+ } \
+ }
+
+
+/*****************************************************************************
+ ************************** Type Definitions ***************************
+ *****************************************************************************/
+
+typedef struct {
+ hid_t file;
+ string name;
+ bool is_real;
+ hsize_t dims[3]; // C order
+
+ int map, mglevel, component, iteration, timelevel, rflevel;
+ double time;
+ int iorigin[3]; // Fortran order
+ double origin[3], delta[3]; // Fortran order
+} patch_t;
+
+
+/*****************************************************************************
+ ************************* Global Data *************************
+ *****************************************************************************/
+
+// the slab coordinate as selected by the user (Fortan order)
+static double slab_coord[3] = {PARAMETER_UNSET, PARAMETER_UNSET, PARAMETER_UNSET};
+
+// flag for outputting data in full 3D
+static bool out3D = false;
+
+// the specific timestep selected by the user
+static double timestep = PARAMETER_UNSET;
+
+// the regular expression to match against dataset names
+static const char* regex = NULL;
+static regex_t preg;
+
+// the dataset name, if given explicitly
+static const char* datasetname = NULL;
+
+// the list of all patches
+static vector<patch_t> patchlist;
+
+// maximum refinement level across all patches
+static int max_rflevel = -1;
+
+// whether to print omitted patches
+static bool verbose = false;
+
+// pointer to array that is filled with the data (Fortran order)
+static int datadims[3] = {-1,-1,-1};
+static float* dataptr = NULL;
+static unsigned char* bdataptr = NULL;
+
+// output file name
+static char* basename1 = NULL;
+
+// output format, either raw binary or HDF5
+static bool out_hdf5 = false;
+static int out_hdf5_4d_index = -1;
+
+// output type (float or byte)
+static bool out_float = false;
+static float data_min = -1.0;
+static float data_max = +1.0;
+
+/*****************************************************************************
+ ************************* Function Prototypes *************************
+ *****************************************************************************/
+static herr_t FindPatches (hid_t group, const char *name, void *_file);
+static void ReadPatch (const patch_t& patch, int last_iteration);
+static bool ComparePatches (const patch_t& a, const patch_t& b);
+
+
+ /*@@
+ @routine main
+ @date Sun 30 July 2006
+ @author Thomas Radke
+ @desc
+ Evaluates command line options and opens the input files.
+ @enddesc
+
+ @var argc
+ @vdesc number of command line parameters
+ @vtype int
+ @vio in
+ @endvar
+ @var argv
+ @vdesc array of command line parameters
+ @vtype char* const []
+ @vio in
+ @endvar
+ @@*/
+int main (int argc, char *const argv[])
+{
+ int i;
+ bool help = false;
+#if 0
+ int iorigin[3] = {-1, -1, -1};
+#endif
+ int num_slab_options = 0;
+
+
+ // evaluate command line parameters
+ for (i = 1; i < argc; i++) {
+ if (strcmp (argv[i], "--help") == 0) {
+ help = true; break;
+ } else if (strcmp (argv[i], "--verbose") == 0) {
+ verbose = true;
+ } else if (strcmp (argv[i], "--out3d-cube") == 0) {
+ out3D = true;
+ } else if (strcmp (argv[i], "--timestep") == 0 and i+1 < argc) {
+ timestep = atof (argv[++i]);
+ } else if (strcmp (argv[i], "--match") == 0 and i+1 < argc) {
+ regex = argv[++i];
+ if (regcomp (&preg, regex, REG_EXTENDED)) {
+ cerr << "Error: invalid regular expression '" << regex << "' given"
+ << endl << endl;
+ return (-1);
+ }
+ } else if (strcmp (argv[i], "--dataset") == 0 and i+1 < argc) {
+ datasetname = argv[++i];
+ } else if (strcmp (argv[i], "--out-precision") == 0 and i+1 < argc) {
+ cout << setprecision (atoi (argv[++i]));
+ } else if (strcmp (argv[i], "--out0d-point") == 0 and i+3 < argc) {
+ slab_coord[0] = atof (argv[++i]);
+ slab_coord[1] = atof (argv[++i]);
+ slab_coord[2] = atof (argv[++i]); num_slab_options++;
+ } else if (strcmp (argv[i], "--out1d-xline-yz") == 0 and i+2 < argc) {
+ slab_coord[1] = atof (argv[++i]);
+ slab_coord[2] = atof (argv[++i]); num_slab_options++;
+ } else if (strcmp (argv[i], "--out1d-yline-xz") == 0 and i+2 < argc) {
+ slab_coord[0] = atof (argv[++i]);
+ slab_coord[2] = atof (argv[++i]); num_slab_options++;
+ } else if (strcmp (argv[i], "--out1d-zline-xy") == 0 and i+2 < argc) {
+ slab_coord[0] = atof (argv[++i]);
+ slab_coord[1] = atof (argv[++i]); num_slab_options++;
+ } else if (strcmp (argv[i], "--out2d-yzplane-x") == 0 and i+1 < argc) {
+ slab_coord[0] = atof (argv[++i]); num_slab_options++;
+ } else if (strcmp (argv[i], "--out2d-xzplane-y") == 0 and i+1 < argc) {
+ slab_coord[1] = atof (argv[++i]); num_slab_options++;
+ } else if (strcmp (argv[i], "--out2d-xyplane-z") == 0 and i+1 < argc) {
+ slab_coord[2] = atof (argv[++i]); num_slab_options++;
+#if 0
+ } else if (strcmp (argv[i], "--out2d-yzplane-xi") == 0 and i+1 < argc) {
+ iorigin[0] = atoi (argv[++i]); num_slab_options++;
+ } else if (strcmp (argv[i], "--out2d-xzplane-yi") == 0 and i+1 < argc) {
+ iorigin[1] = atoi (argv[++i]); num_slab_options++;
+ } else if (strcmp (argv[i], "--out2d-xyplane-zi") == 0 and i+1 < argc) {
+ iorigin[2] = atoi (argv[++i]); num_slab_options++;
+#endif
+ } else if (strcmp (argv[i], "--size") == 0 and i+3 < argc) {
+ datadims[0] = atof (argv[++i]);
+ datadims[1] = atof (argv[++i]);
+ datadims[2] = atof (argv[++i]);
+ } else if (strcmp (argv[i], "--basename") == 0 and i+1 < argc) {
+ basename1 = argv[++i];
+ } else if (strcmp (argv[i], "--out-hdf5") == 0) {
+ out_hdf5 = true;
+ } else if (strcmp (argv[i], "--out-float") == 0) {
+ out_float = true;
+ } else if (strcmp (argv[i], "--data-min") == 0 and i+1 < argc) {
+ data_min = atof (argv[++i]);
+ } else if (strcmp (argv[i], "--data-max") == 0 and i+1 < argc) {
+ data_max = atof (argv[++i]);
+ } else if (strcmp (argv[i], "--out-hdf5-4d-index") == 0 and i+1 < argc) {
+ out_hdf5_4d_index = atof (argv[++i]);
+ } else {
+ break;
+ }
+ }
+
+ /* give some help if called with incorrect number of parameters */
+ if (help or i >= argc or num_slab_options != (out3D ? 0 : 1)) {
+ const string indent (strlen (argv[0]) + 1, ' ');
+ cerr << endl << " ---------------------------"
+ << endl << " Carpet HDF5 to ASCII Slicer"
+ << endl << " ---------------------------" << endl
+ << endl
+ << "Usage: " << endl
+ << argv[0] << " [--help]" << endl
+ << indent << "[--match <regex string>]" << endl
+ << indent << "[--dataset <string (can contain %p)>]" << endl
+ << indent << "[--out-precision <digits>]" << endl
+ << indent << "[--timestep <cctk_time value>]" << endl
+ << indent << "[--verbose]" << endl
+ << indent << "<--out0d-point value value value> | <--out1d-line value value> | <--out2d-plane value> | <out3d-cube>" << endl
+ << indent << "--size <ni> <nj> <nk>" << endl
+ << indent << "--basename <filename>" << endl
+ << indent << "[--out-hdf5]" << endl
+ << indent << "[--out-float]" << endl
+ << indent << "[--out-hdf5-4d-index <t index>]" << endl
+ << indent << "<hdf5_infiles>" << endl << endl
+ << " where" << endl
+ << " [--help] prints this help" << endl
+ << " [--match <regex string>] selects HDF5 datasets by their names" << endl
+ << " matching a regex string using POSIX" << endl
+ << " Extended Regular Expression syntax" << endl
+ << " [--out-precision <digits>] sets the output precision" << endl
+ << " for floating-point numbers" << endl
+ << " [--timestep <cctk_time value>] selects all HDF5 datasets which" << endl
+ << " (fuzzily) match the specified time" << endl
+ << " [--verbose] lists skipped HDF5 datasets on stderr" << endl
+ << endl
+ << " and either --out0d-point value value value> or <--out1d-line value value> or <--out2d-plane value> or <--out3d-cube>" << endl
+ << " must be specified as in the following:" << endl
+ << " --out0d-point <origin_x> <origin_y> <origin_z>" << endl
+ << endl
+ << " --out1d-xline-yz <origin_y> <origin_z>" << endl
+ << " --out1d-yline-xz <origin_x> <origin_z>" << endl
+ << " --out1d-zline-xy <origin_x> <origin_y>" << endl
+ << endl
+ << " --out2d-yzplane-x <origin_x>" << endl
+ << " --out2d-xzplane-y <origin_y>" << endl
+ << " --out2d-xyplane-z <origin_z>" << endl
+ << endl
+ << " --out3d-cube" << endl
+#if 0
+ << " --out2d-yzplane-xi <origin_xi>" << endl
+ << " --out2d-xzplane-yi <origin_yi>" << endl
+ << " --out2d-xyplane-zi <origin_zi>" << endl
+#endif
+ << endl
+ << " eg, to extract a 2D xy-plane at z = 0:" << endl
+ << " " << argv[0] << " --out2d-xyplane-z 0 alp.file_*.h5" << endl
+ << endl
+ << " or the same plane but only for datasets of refinement level 0:" << endl
+ << " " << argv[0] << " --match 'ADMBASE::alp it=[0-9]+ tl=0 rl=0' --out2d-xyplane-z 0 alp.file_*.h5" << endl;
+ return (0);
+ }
+
+ if (not basename1) {
+ cerr << "Parameter --basename is not set" << endl;
+ return 1;
+ }
+
+ if (regex and datasetname) {
+ cerr << "Parameters --match and --dataset cannot be used together" << endl;
+ return 1;
+ }
+
+ {
+ if (datadims[0]<0 || datadims[1]<0 || datadims[2]<0) {
+ cerr << "Parameter --size is not set" << endl;
+ return 1;
+ }
+ size_t const npoints = (size_t)datadims[0]*datadims[1]*datadims[2];
+ dataptr = new float[npoints];
+ assert (dataptr);
+ bdataptr = new unsigned char[npoints];
+ assert (bdataptr);
+ }
+
+ vector<hid_t> filelist;
+ for (; i < argc; i++) {
+ hid_t file;
+
+ H5E_BEGIN_TRY {
+ file = H5Fopen (argv[i], H5F_ACC_RDONLY, H5P_DEFAULT);
+ } H5E_END_TRY;
+
+ if (file < 0) {
+ cerr << "Could not open Carpet HDF5 input file '" << argv[i] << "'"
+ << endl << endl;
+ continue;
+ }
+
+ if (not datasetname) {
+ cout << "File " << argv[i] << ":" << endl;
+ size_t const before = patchlist.size();
+ CHECK_HDF5 (H5Giterate (file, "/", NULL, FindPatches, &file));
+ size_t const after = patchlist.size();
+ size_t const count = after - before;
+ cout << " will read " << count << " "
+ << (count==1 ? "patch" : "patches") << endl;
+ } else {
+ const string ppmarker ("%p");
+ const string filemarker (".file_");
+ string name (datasetname);
+ const size_t pppos = name.find(ppmarker);
+ if (pppos != string::npos) {
+ const string filename (argv[i]);
+ const size_t procpos = filename.find(filemarker);
+ assert (procpos != string::npos);
+ const int proc = atoi(filename.c_str() + procpos + filemarker.length());
+ ostringstream namebuf;
+ namebuf << name.substr(0,pppos) << proc
+ << name.substr(pppos + ppmarker.length());
+ name = namebuf.str();
+ }
+ cout << " Examining dataset " << name << endl;
+ hid_t group;
+ CHECK_HDF5 (group = H5Dopen (file, name.c_str()));
+ FindPatches (group, name.c_str(), &file);
+ CHECK_HDF5 (H5Dclose (group));
+ }
+
+ filelist.push_back (file);
+ }
+
+ if (! patchlist.empty()) {
+ /* now sort the patches by their time attribute */
+ sort (patchlist.begin(), patchlist.end(), ComparePatches);
+
+ cout << "# 1D ASCII output created by";
+ for (int j = 0; j < argc; j++) cout << " " << argv[j];
+ cout << endl << "#" << endl;
+
+ int last_iteration = patchlist[0].iteration - 1;
+ for (size_t j = 0; j < patchlist.size(); j++) {
+ ReadPatch (patchlist[j], last_iteration);
+ last_iteration = patchlist[j].iteration;
+ }
+ } else {
+ cerr << "No valid datasets found" << endl << endl;
+ }
+
+ // find minimum and maximum
+ {
+ float const float_max = numeric_limits<float>::max();
+ float data_min1 = +float_max;
+ float data_max1 = -float_max;
+ double data_sum = 0.0;
+ double data_sum2 = 0.0;
+ double data_count = 0.0;
+ size_t const npoints = (size_t)datadims[0]*datadims[1]*datadims[2];
+ for (size_t n=0; n<npoints; ++n) {
+ float const data = dataptr[n];
+ if (data > -sqrt(float_max) and data < sqrt(float_max)) {
+ data_min1 = min(data_min1, data);
+ data_max1 = max(data_max1, data);
+ data_sum += (double)data;
+ data_sum2 += pow((double)data, 2.0);
+ ++ data_count;
+ }
+ }
+ double const data_avg = data_sum / data_count;
+ double const data_stddev =
+ sqrt(max(0.0, data_sum2/data_count - pow(data_avg, 2)));
+
+ cerr << "Minimum: " << data_min1 << endl
+ << "Maximum: " << data_max1 << endl
+ << "Average: " << data_avg << endl
+ << "Standard deviation: " << data_stddev << endl
+ << "Valid points: " << data_count << endl;
+ }
+
+ // convert from float to byte
+ if (not out_float) {
+ unsigned char const byte_min = 0;
+ unsigned char const byte_max = 255;
+ float const scale_factor =
+ ((float)byte_max - (float)byte_min + 1) / (data_max - data_min);
+ size_t const npoints = (size_t)datadims[0]*datadims[1]*datadims[2];
+ for (size_t n=0; n<npoints; ++n) {
+ float const data = dataptr[n];
+ float const data_scaled =
+ (float)byte_min + floor((data - data_min) * scale_factor);
+ float const data_clamped =
+ max((float)byte_min, min((float)byte_max, data_scaled));
+ unsigned char const bdata =
+ (unsigned char)data_clamped;
+ bdataptr[n] = bdata;
+ }
+ }
+
+ hid_t const out_type = out_float ? H5T_NATIVE_FLOAT : H5T_NATIVE_UCHAR;
+ size_t const out_size = out_float ? sizeof *dataptr : sizeof *bdataptr;
+ void const * const out_data =
+ out_float ?
+ static_cast<void const*>(dataptr) :
+ static_cast<void const*>(bdataptr);
+
+ // write output files
+ if (out_hdf5) {
+ // HDF5 output
+ if (out_hdf5_4d_index == -1) {
+ // 3D output
+ char filename[1000];
+ snprintf (filename, sizeof filename, "%s.h5", basename1);
+ hid_t const outfile = H5Fcreate (filename, H5F_ACC_TRUNC,
+ H5P_DEFAULT, H5P_DEFAULT);
+ assert (outfile >= 0);
+ hsize_t const dims[3] = { datadims[2], datadims[1], datadims[0] };
+ hid_t const dataspace = H5Screate_simple (3, dims, NULL);
+ assert (dataspace >= 0);
+ hid_t const dataset_properties = H5Pcreate (H5P_DATASET_CREATE);
+ assert (dataset_properties >= 0);
+ hid_t const dataset = H5Dcreate (outfile, "data",
+ out_type, dataspace,
+ dataset_properties);
+ assert (dataset >= 0);
+ hid_t const transfer_properties = H5Pcreate (H5P_DATASET_XFER);
+ assert (transfer_properties >= 0);
+ {
+ herr_t const herr = H5Dwrite (dataset, out_type,
+ H5S_ALL, H5S_ALL,
+ transfer_properties, out_data);
+ assert (herr >= 0);
+ }
+ CHECK_HDF5 (H5Dclose (dataset));
+ CHECK_HDF5 (H5Sclose (dataspace));
+ CHECK_HDF5 (H5Fclose (outfile));
+ } else {
+ // 4d output
+ char filename[1000];
+ snprintf (filename, sizeof filename, "%s.h5", basename1);
+ htri_t is_hdf5;
+ H5E_BEGIN_TRY {
+ is_hdf5 = H5Fis_hdf5 (filename);
+ } H5E_END_TRY;
+ bool const file_is_new = not (is_hdf5 > 0);
+ hid_t const outfile =
+ file_is_new ?
+ H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT) :
+ H5Fopen (filename, H5F_ACC_RDWR, H5P_DEFAULT);
+ assert (outfile >= 0);
+
+ hsize_t const dims[4] =
+ { 0, datadims[2], datadims[1], datadims[0] };
+ hsize_t const maxdims[4] =
+ { H5S_UNLIMITED, datadims[2], datadims[1], datadims[0] };
+ hid_t const dataspace = H5Screate_simple (4, dims, maxdims);
+ assert (dataspace >= 0);
+ hid_t const dataset_properties = H5Pcreate (H5P_DATASET_CREATE);
+ assert (dataset_properties >= 0);
+ // hsize_t const chunk_dims[4] = {1, 32, 32, 32 };
+ // Chunks must be < 4 GByte
+ // hsize_t const chunk_dims[4] =
+ // {1, datadims[2], datadims[1], datadims[0] };
+ hsize_t const chunk_dims[4] =
+ {1, min(128,datadims[2]), min(128,datadims[1]), min(128,datadims[0]) };
+ CHECK_HDF5 (H5Pset_chunk (dataset_properties, 4, chunk_dims));
+ int const compression_level = 1;
+ CHECK_HDF5 (H5Pset_deflate (dataset_properties, compression_level));
+ hid_t const dataset =
+ file_is_new ?
+ H5Dcreate (outfile, "data",
+ out_type, dataspace, dataset_properties) :
+ H5Dopen (outfile, "data");
+ assert (dataset >= 0);
+ // TODO: assert that the dataset's dataspace has the correct
+ // dimensions
+
+ hsize_t const extended_dims[4] =
+ { out_hdf5_4d_index+1, datadims[2], datadims[1], datadims[0] };
+ CHECK_HDF5 (H5Dextend (dataset, extended_dims));
+
+ // for debugging only
+ // CHECK_HDF5 (H5Fflush (outfile, H5F_SCOPE_GLOBAL));
+
+ hsize_t const memory_dims[4] =
+ { 1, datadims[2], datadims[1], datadims[0] };
+ hid_t const memory_dataspace = H5Screate_simple (4, memory_dims, NULL);
+ assert (memory_dataspace >= 0);
+
+ hid_t const file_dataspace = H5Screate_simple (4, extended_dims, NULL);
+ assert (file_dataspace >= 0);
+ // hssize_t const file_offset[4] =
+ // { out_hdf5_4d_index, 0, 0, 0 };
+ // CHECK_HDF5 (H5Soffset_simple (file_dataspace, file_offset));
+ hsize_t const file_offset[4] = { out_hdf5_4d_index, 0, 0, 0 };
+ CHECK_HDF5 (H5Sselect_hyperslab (file_dataspace, H5S_SELECT_SET,
+ file_offset, NULL, memory_dims, NULL));
+
+ hid_t const transfer_properties = H5Pcreate (H5P_DATASET_XFER);
+ assert (transfer_properties >= 0);
+ CHECK_HDF5 (H5Dwrite (dataset,
+ out_type, memory_dataspace, file_dataspace,
+ transfer_properties, out_data));
+
+ CHECK_HDF5 (H5Sclose (file_dataspace));
+ CHECK_HDF5 (H5Sclose (memory_dataspace));
+ CHECK_HDF5 (H5Dclose (dataset));
+ CHECK_HDF5 (H5Sclose (dataspace));
+ CHECK_HDF5 (H5Fclose (outfile));
+ }
+ } else {
+ // raw binary output
+ {
+ char filename[1000];
+ snprintf (filename, sizeof filename, "%s.bin", basename1);
+ FILE* const outfile = fopen(filename, "w");
+ assert (outfile);
+ size_t const npoints = (size_t)datadims[0]*datadims[1]*datadims[2];
+ size_t const icnt = fwrite (out_data, out_size, npoints, outfile);
+ assert (icnt == npoints);
+ int const ierr = fclose (outfile);
+ assert (not ierr);
+ }
+ }
+
+ {
+ delete[] dataptr;
+ dataptr = NULL;
+ delete[] bdataptr;
+ bdataptr = NULL;
+ }
+
+ // close all input files
+ for (size_t j = 0; j < filelist.size(); j++) {
+ if (filelist[j] >= 0) {
+ CHECK_HDF5 (H5Fclose (filelist[j]));
+ }
+ }
+
+ return (0);
+}
+
+
+ /*@@
+ @routine FindPatches
+ @date Tue 8 August 2006
+ @author Thomas Radke
+ @desc
+ The worker routine which is called by H5Giterate().
+ It checks whether the current HDF5 object is a patch matching
+ the user's slab criteria, and adds it to the patches list.
+ @enddesc
+
+ @var group
+ @vdesc HDF5 object to start the iteration
+ @vtype hid_t
+ @vio in
+ @endvar
+ @var name
+ @vdesc name of the object at the current iteration
+ @vtype const char *
+ @vio in
+ @endvar
+ @var _file
+ @vdesc pointer to the descriptor of the currently opened file
+ @vtype void *
+ @vio in
+ @endvar
+@@*/
+static herr_t FindPatches (hid_t group, const char *name, void *_file)
+{
+ int ndims;
+ hid_t dataset, datatype, attr;
+ H5G_stat_t object_info;
+ H5T_class_t typeclass;
+ patch_t patch;
+
+
+ /* we are interested in datasets only - skip anything else */
+ CHECK_HDF5 (H5Gget_objinfo (group, name, 0, &object_info));
+ if (object_info.type != H5G_DATASET) {
+ return (0);
+ }
+
+ /* check the dataset's datatype - make sure it is either integer or real */
+ CHECK_HDF5 (dataset = H5Dopen (group, name));
+ patch.name = name;
+
+ CHECK_HDF5 (datatype = H5Dget_type (dataset));
+ CHECK_HDF5 (typeclass = H5Tget_class (datatype));
+ CHECK_HDF5 (H5Tclose (datatype));
+
+ // read the timestep
+ CHECK_HDF5 (attr = H5Aopen_name (dataset, "time"));
+ CHECK_HDF5 (H5Aread (attr, H5T_NATIVE_DOUBLE, &patch.time));
+ CHECK_HDF5 (H5Aclose (attr));
+
+ // read the dimensions
+ hid_t dataspace = -1;
+ CHECK_HDF5 (dataspace = H5Dget_space (dataset));
+ ndims = H5Sget_simple_extent_ndims (dataspace);
+
+ bool is_okay = false;
+ if (typeclass != H5T_FLOAT and typeclass != H5T_INTEGER) {
+ if (verbose) {
+ cerr << "skipping dataset '" << name << "':" << endl
+ << " is not of integer or floating-point datatype" << endl;
+ }
+ } else if (ndims != 3) {
+ if (verbose) {
+ cerr << "skipping dataset '" << name << "':" << endl
+ << " dataset has " << ndims << " dimensions" << endl;
+ }
+ } else if (regex && regexec (&preg, name, 0, NULL, 0)) {
+ if (verbose) {
+ cerr << "skipping dataset '" << name << "':" << endl
+ << " name doesn't match regex" << endl;
+ }
+ } else if (timestep != PARAMETER_UNSET and
+ fabs (timestep - patch.time) > FUZZY_FACTOR) {
+ if (verbose) {
+ cerr << "skipping dataset '" << name << "':" << endl
+ << " timestep (" << patch.time << ") doesn't match" << endl;
+ }
+ } else {
+ if (not out3D) {
+ CHECK_HDF5 (attr = H5Aopen_name (dataset, "origin"));
+ CHECK_HDF5 (H5Aread (attr, H5T_NATIVE_DOUBLE, patch.origin));
+ CHECK_HDF5 (H5Aclose (attr));
+ CHECK_HDF5 (attr = H5Aopen_name (dataset, "delta"));
+ CHECK_HDF5 (H5Aread (attr, H5T_NATIVE_DOUBLE, patch.delta));
+ CHECK_HDF5 (H5Aclose (attr));
+ }
+ CHECK_HDF5 (H5Sget_simple_extent_dims (dataspace, patch.dims, NULL));
+
+ int i;
+ is_okay = out3D;
+ if (not out3D) {
+ for (i = 0; i < 3; i++) {
+ if (slab_coord[i] != PARAMETER_UNSET) {
+ is_okay = slab_coord[i] >= patch.origin[i] and
+ slab_coord[i] <= patch.origin[i] +
+ (patch.dims[2-i]-1)*patch.delta[i];
+ if (not is_okay) break;
+ }
+ }
+ }
+ if (not is_okay) {
+ if (verbose) {
+ cerr << "skipping dataset '" << name << "':" << endl
+ << " slab " << slab_coord[i] << " is out of dataset range ["
+ << patch.origin[i] << ", "
+ << patch.origin[i] + (patch.dims[2-i]-1)*patch.delta[i] << "]"
+ << endl;
+ }
+ }
+ }
+ CHECK_HDF5 (H5Sclose (dataspace));
+
+ if (not is_okay) {
+ CHECK_HDF5 (H5Dclose (dataset));
+ return (0);
+ }
+
+ patch.file = *(hid_t *) _file;
+ patch.is_real = typeclass == H5T_FLOAT;
+
+ /* read attributes */
+ CHECK_HDF5 (attr = H5Aopen_name (dataset, "timestep"));
+ CHECK_HDF5 (H5Aread (attr, H5T_NATIVE_INT, &patch.iteration));
+ CHECK_HDF5 (H5Aclose (attr));
+ CHECK_HDF5 (attr = H5Aopen_name (dataset, "level"));
+ CHECK_HDF5 (H5Aread (attr, H5T_NATIVE_INT, &patch.rflevel));
+ CHECK_HDF5 (H5Aclose (attr));
+ CHECK_HDF5 (attr = H5Aopen_name (dataset, "group_timelevel"));
+ CHECK_HDF5 (H5Aread (attr, H5T_NATIVE_INT, &patch.timelevel));
+ CHECK_HDF5 (H5Aclose (attr));
+ CHECK_HDF5 (attr = H5Aopen_name (dataset, "carpet_mglevel"));
+ CHECK_HDF5 (H5Aread (attr, H5T_NATIVE_INT, &patch.mglevel));
+ CHECK_HDF5 (H5Aclose (attr));
+ CHECK_HDF5 (attr = H5Aopen_name (dataset, "iorigin"));
+ CHECK_HDF5 (H5Aread (attr, H5T_NATIVE_INT, patch.iorigin));
+ CHECK_HDF5 (H5Aclose (attr));
+
+ CHECK_HDF5 (H5Dclose (dataset));
+
+ // the component and map info are not contained in the HDF5 dataset
+ // and thus have to be parsed from the dataset's name
+ patch.map = patch.component = 0;
+ string::size_type loc = patch.name.find (" m=", 0);
+ if (loc != string::npos) patch.map = atoi (patch.name.c_str() + loc + 3);
+ loc = patch.name.find (" c=", 0);
+ if (loc != string::npos) patch.component = atoi(patch.name.c_str() + loc + 3);
+
+ if (max_rflevel < patch.rflevel) max_rflevel = patch.rflevel;
+
+ patchlist.push_back (patch);
+
+ return (0);
+}
+
+
+ /*@@
+ @routine ReadPatch
+ @date Tue 8 August 2006
+ @author Thomas Radke
+ @desc
+ Reads a slab from the given patch
+ and outputs it in CarpetIOASCII format.
+ @enddesc
+
+ @var patch
+ @vdesc patch to output
+ @vtype const patch_t&
+ @vio in
+ @endvar
+ @var last_iteration
+ @vdesc iteration number of the last patch that was output
+ @vtype int
+ @vio in
+ @endvar
+@@*/
+static void ReadPatch (const patch_t& patch, int last_iteration)
+{
+ if (patch.iteration != last_iteration) cout << endl;
+
+ cout << "# iteration " << patch.iteration << endl
+ << "# refinement level " << patch.rflevel
+ << " multigrid level " << patch.mglevel
+ << " map " << patch.map
+ << " component " << patch.component
+ << " time level " << patch.timelevel
+ << endl;
+
+ // C order
+ h5size_t slabstart[3] =
+ {patch.iorigin[2], patch.iorigin[1], patch.iorigin[0]};
+ hsize_t slabcount[3] = {patch.dims[0], patch.dims[1], patch.dims[2]};
+ hsize_t slabsize[3] = {datadims[2], datadims[1], datadims[0]};
+ for (int d=0; d<3; ++d) {
+ if (slab_coord[d] != PARAMETER_UNSET) {
+ slabstart[2-d] = (h5size_t) ((slab_coord[d] - patch.origin[d]) /
+ patch.delta[d] + 0.5);
+ slabcount[2-d] = 1;
+ }
+ }
+ for (int d=0; d<3; ++d) {
+ cout << "slab[" << d << "]: lbnd=" << slabstart[2-d] << " lsh=" << slabcount[2-d] << " gsh=" << slabsize[2-d] << endl;
+ }
+ for (int d=0; d<3; ++d) {
+ assert (slabstart[2-d] >= 0);
+ assert (slabcount[2-d] >= 0);
+ assert (slabstart[2-d] + slabcount[2-d] <= slabsize[2-d]);
+ }
+
+ hid_t dataset, filespace, slabspace;
+ CHECK_HDF5 (dataset = H5Dopen (patch.file, patch.name.c_str()));
+ CHECK_HDF5 (filespace = H5Dget_space (dataset));
+#if 0
+ CHECK_HDF5 (slabspace = H5Screate_simple (3, slabcount, slabsize));
+ CHECK_HDF5 (H5Soffset_simple (slabspace, (hssize_t*)slabstart));
+// CHECK_HDF5 (H5Sselect_hyperslab (filespace, H5S_SELECT_SET,
+// slabstart, NULL, slabcount, NULL));
+#endif
+ CHECK_HDF5 (slabspace = H5Screate_simple (3, slabsize, NULL));
+ CHECK_HDF5 (H5Sselect_hyperslab (slabspace, H5S_SELECT_SET,
+ slabstart, NULL, slabcount, NULL));
+ assert (patch.is_real);
+ CHECK_HDF5 (H5Dread (dataset,
+ H5T_NATIVE_FLOAT,
+ slabspace, filespace, H5P_DEFAULT,
+ dataptr));
+ CHECK_HDF5 (H5Sclose (slabspace));
+ CHECK_HDF5 (H5Sclose (filespace));
+ CHECK_HDF5 (H5Dclose (dataset));
+}
+
+
+// comparison function to sort patches by iteration number, refinement level,
+// multigrid level, map, and component
+static bool ComparePatches (const patch_t& a, const patch_t& b)
+{
+ if (a.iteration != b.iteration) return (a.iteration < b.iteration);
+ if (a.rflevel != b.rflevel) return (a.rflevel < b.rflevel);
+ if (a.mglevel != b.mglevel) return (a.mglevel < b.mglevel);
+ if (a.map != b.map) return (a.map < b.map);
+ if (a.component != b.component) return (a.component < b.component);
+ return (a.timelevel < b.timelevel);
+}