aboutsummaryrefslogtreecommitdiff
path: root/Carpet/CarpetIOHDF5
diff options
context:
space:
mode:
authorIan Hinder <ian.hinder@aei.mpg.de>2010-05-17 15:01:39 +0200
committerBarry Wardell <barry.wardell@gmail.com>2011-12-14 18:21:10 +0000
commitd3d28d5ce4b2896d09b92253a23fda46ee3bdd73 (patch)
tree03eacc08cf9cc0d4c283db58560017d60a62330e /Carpet/CarpetIOHDF5
parent4b184603a63657af2a63c9c0e1f72ea4a9d998c0 (diff)
CarpetIOHDF5: Index file support
Scanning the attributes of a large CarpetIOHDF5 output file, as is necessary in the visitCarpetHDF5 plugin, can be very time consuming. This commit adds support for writing an "index" HDF5 file at the same time as the data file, conditional on a parameter "CarpetIOHDF5::output_index". The index file is the same as the data file except it contains null datasets, and hence is very small. The attributes can be read from this index file instead of the data file, greatly increasing performance. The datasets will have size 1 in the index file, so an additional attribute (h5space) is added to the dataset to specify the correct dataset dimensions.
Diffstat (limited to 'Carpet/CarpetIOHDF5')
-rw-r--r--Carpet/CarpetIOHDF5/param.ccl4
-rw-r--r--Carpet/CarpetIOHDF5/src/CarpetIOHDF5.cc28
-rw-r--r--Carpet/CarpetIOHDF5/src/CarpetIOHDF5.hh3
-rw-r--r--Carpet/CarpetIOHDF5/src/Output.cc41
4 files changed, 70 insertions, 6 deletions
diff --git a/Carpet/CarpetIOHDF5/param.ccl b/Carpet/CarpetIOHDF5/param.ccl
index c8d7afa30..2054d6918 100644
--- a/Carpet/CarpetIOHDF5/param.ccl
+++ b/Carpet/CarpetIOHDF5/param.ccl
@@ -424,3 +424,7 @@ INT compression_level "Compression level to use for writing HDF5 data" STEERABLE
BOOLEAN use_checksums "Use checksums for the HDF5 data" STEERABLE = ALWAYS
{
} "no"
+
+BOOLEAN output_index "Output an index file for each output file" STEERABLE = ALWAYS
+{
+} "no"
diff --git a/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.cc b/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.cc
index 452323ba2..cee765395 100644
--- a/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.cc
+++ b/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.cc
@@ -371,6 +371,12 @@ static void* SetupGH (tFleshConfig* const fleshconfig,
"'proc'. Ignoring setting for IO::out_unchunked...");
}
+ if (output_index && CCTK_EQUALS (out_mode, "onefile"))
+ CCTK_VWarn (CCTK_WARN_COMPLAIN, __LINE__, __FILE__, CCTK_THORNSTRING,
+ "CarpetIOHDF5::output_index with IO::out_mode = '%s' is not implemented in %s. "
+ "Index will not be output",
+ out_mode, CCTK_THORNSTRING);
+
// Now set hdf5 complex datatypes
HDF5_ERROR (myGH->HDF5_COMPLEX =
H5Tcreate (H5T_COMPOUND, sizeof (CCTK_COMPLEX)));
@@ -694,7 +700,12 @@ static int OutputVarAs (const cGH* const cctkGH, const char* const fullname,
snprintf (buffer, sizeof (buffer), ".file_%d", ioproc);
filename.append (buffer);
}
+
+ string base_filename(filename);
filename.append (out_extension);
+
+ string index_filename(base_filename + ".idx" + out_extension);
+
const char* const c_filename = filename.c_str();
// check if the file has been created already
@@ -751,6 +762,7 @@ static int OutputVarAs (const cGH* const cctkGH, const char* const fullname,
// Open the output file if this is a designated I/O processor
hid_t file = -1;
+ hid_t index_file = -1;
CCTK_REAL io_files = 0;
CCTK_REAL io_bytes = 0;
BeginTimingIO (cctkGH);
@@ -765,11 +777,23 @@ static int OutputVarAs (const cGH* const cctkGH, const char* const fullname,
if (is_new_file) {
HDF5_ERROR (file = H5Fcreate (c_filename, H5F_ACC_TRUNC, H5P_DEFAULT,
H5P_DEFAULT));
+ if (output_index) {
+ HDF5_ERROR (index_file = H5Fcreate (index_filename.c_str(),
+ H5F_ACC_TRUNC, H5P_DEFAULT,
+ H5P_DEFAULT));
+ }
// write metadata information
error_count +=
WriteMetadata (cctkGH, nioprocs, firstvar, numvars, false, file);
+
+ if (output_index) {
+ error_count +=
+ WriteMetadata (cctkGH, nioprocs, firstvar, numvars, false, index_file);
+ }
} else {
HDF5_ERROR (file = H5Fopen (c_filename, H5F_ACC_RDWR, H5P_DEFAULT));
+ if (output_index)
+ HDF5_ERROR (index_file = H5Fopen (index_filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT));
}
io_files += 1;
}
@@ -795,7 +819,7 @@ static int OutputVarAs (const cGH* const cctkGH, const char* const fullname,
} else if (CCTK_EQUALS (out_mode, "onefile")) {
error_count += WriteVarChunkedSequential (cctkGH, file, io_bytes, r, false);
} else {
- error_count += WriteVarChunkedParallel (cctkGH, file, io_bytes, r, false);
+ error_count += WriteVarChunkedParallel (cctkGH, file, io_bytes, r, false, index_file);
}
if (r != myGH->requests[var]) IOUtil_FreeIORequest (&r);
@@ -811,6 +835,8 @@ static int OutputVarAs (const cGH* const cctkGH, const char* const fullname,
// Close the file
if (file >= 0) {
HDF5_ERROR (H5Fclose (file));
+ if (output_index)
+ HDF5_ERROR (H5Fclose (index_file));
}
{
CCTK_REAL local[2], global[2];
diff --git a/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.hh b/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.hh
index 3a30ba48f..dfe3e01cc 100644
--- a/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.hh
+++ b/Carpet/CarpetIOHDF5/src/CarpetIOHDF5.hh
@@ -124,7 +124,8 @@ namespace CarpetIOHDF5
hid_t file,
CCTK_REAL & io_bytes,
const ioRequest* const request,
- bool called_from_checkpoint);
+ bool called_from_checkpoint,
+ hid_t index = -1);
int WriteMetadata (const cGH * const cctkGH, int const nioprocs,
int const firstvar, int const numvars,
diff --git a/Carpet/CarpetIOHDF5/src/Output.cc b/Carpet/CarpetIOHDF5/src/Output.cc
index 1a77b2a39..4967785d6 100644
--- a/Carpet/CarpetIOHDF5/src/Output.cc
+++ b/Carpet/CarpetIOHDF5/src/Output.cc
@@ -25,7 +25,7 @@ using namespace Carpet;
static int AddAttributes (const cGH *const cctkGH, const char *fullname,
int vdim, int refinementlevel,
const ioRequest* const request,
- const ibbox& bbox, hid_t dataset);
+ const ibbox& bbox, hid_t dataset, bool is_index = false);
int WriteVarUnchunked (const cGH* const cctkGH,
@@ -486,7 +486,8 @@ int WriteVarChunkedParallel (const cGH* const cctkGH,
hid_t outfile,
CCTK_REAL & io_bytes,
const ioRequest* const request,
- bool called_from_checkpoint)
+ bool called_from_checkpoint,
+ hid_t indexfile)
{
DECLARE_CCTK_PARAMETERS;
@@ -586,20 +587,24 @@ int WriteVarChunkedParallel (const cGH* const cctkGH,
if (request->check_exist) {
H5E_BEGIN_TRY {
H5Gunlink (outfile, datasetname.str().c_str());
+ if (indexfile != -1)
+ H5Gunlink (indexfile, datasetname.str().c_str());
} H5E_END_TRY;
}
// Get the shape of the HDF5 dataset (in Fortran index order)
hsize_t shape[dim];
+ hsize_t index_shape[dim];
hssize_t origin[dim];
for (int d = 0; d < group.dim; ++d) {
assert (group.dim-1-d>=0 and group.dim-1-d<dim);
origin[group.dim-1-d] = (bbox.lower() / bbox.stride())[d];
shape[group.dim-1-d] = (bbox.shape() / bbox.stride())[d];
+ index_shape[group.dim-1-d] = 1;
}
// Write the component as an individual dataset
- hid_t plist, dataspace, dataset;
+ hid_t plist, dataspace, dataset, index_dataspace, index_dataset;
HDF5_ERROR (plist = H5Pcreate (H5P_DATASET_CREATE));
// enable compression if requested
const int compression_lvl = request->compression_level >= 0 ?
@@ -617,6 +622,14 @@ int WriteVarChunkedParallel (const cGH* const cctkGH,
HDF5_ERROR (dataspace = H5Screate_simple (group.dim, shape, NULL));
HDF5_ERROR (dataset = H5Dcreate (outfile, datasetname.str().c_str(),
filedatatype, dataspace, plist));
+
+ if (indexfile != -1) {
+ HDF5_ERROR (index_dataspace = H5Screate_simple (group.dim,
+ index_shape, NULL));
+ HDF5_ERROR (index_dataset = H5Dcreate (indexfile, datasetname.str().c_str(),
+ filedatatype, index_dataspace, H5P_DEFAULT));
+ }
+
io_bytes +=
H5Sget_simple_extent_npoints (dataspace) * H5Tget_size (filedatatype);
HDF5_ERROR (H5Pclose (plist));
@@ -627,6 +640,13 @@ int WriteVarChunkedParallel (const cGH* const cctkGH,
request, bbox, dataset);
HDF5_ERROR (H5Dclose (dataset));
+ if (indexfile != -1) {
+ HDF5_ERROR (H5Sclose (index_dataspace));
+ error_count += AddAttributes (cctkGH, fullname, group.dim,refinementlevel,
+ request, bbox, index_dataset, true);
+ HDF5_ERROR (H5Dclose (index_dataset));
+ }
+
if (data != mydata) free (data);
} END_LOCAL_COMPONENT_LOOP;
@@ -643,7 +663,7 @@ int WriteVarChunkedParallel (const cGH* const cctkGH,
static int AddAttributes (const cGH *const cctkGH, const char *fullname,
int vdim, int refinementlevel,
const ioRequest* request,
- const ibbox& bbox, hid_t dataset)
+ const ibbox& bbox, hid_t dataset, bool is_index)
{
assert (vdim>=0 and vdim<=dim);
int error_count = 0;
@@ -790,6 +810,19 @@ static int AddAttributes (const cGH *const cctkGH, const char *fullname,
HDF5_ERROR (H5Awrite (attr, H5T_NATIVE_INT, &iorigin[0]));
HDF5_ERROR (H5Aclose (attr));
+ hsize_t shape[vdim];
+ for (int d = 0; d < vdim; ++d) {
+ assert (vdim-1-d>=0 and vdim-1-d<vdim);
+ shape[vdim-1-d] = (bbox.shape() / bbox.stride())[d];
+ }
+
+ if (is_index) {
+ HDF5_ERROR (attr = H5Acreate (dataset, "h5shape", H5T_NATIVE_HSIZE,
+ dataspace, H5P_DEFAULT));
+ HDF5_ERROR (H5Awrite (attr, H5T_NATIVE_HSIZE, &shape[0]));
+ HDF5_ERROR (H5Aclose (attr));
+ }
+
HDF5_ERROR (H5Sclose (dataspace));
// return the number of errors that occured during this output