aboutsummaryrefslogtreecommitdiff
path: root/Carpet/CarpetIOHDF5
diff options
context:
space:
mode:
authorErik Schnetter <schnetter@gmail.com>2012-11-16 18:55:47 -0500
committerErik Schnetter <schnetter@gmail.com>2012-11-22 09:59:16 -0500
commit53a1c146bc09c67ea709c14dab4c73ebebed86dc (patch)
treeb5bd034cf3c0e6de4cb422ceffe52e01de06f505 /Carpet/CarpetIOHDF5
parentdf843816d07d18e2c0407915d1b8113bfe7ab720 (diff)
Allow padding in transport operators
Rewrite padding infrastructure. Add padded array extents to transport operator APIs.
Diffstat (limited to 'Carpet/CarpetIOHDF5')
-rw-r--r--Carpet/CarpetIOHDF5/src/Input.cc12
-rw-r--r--Carpet/CarpetIOHDF5/src/Output.cc40
2 files changed, 45 insertions, 7 deletions
diff --git a/Carpet/CarpetIOHDF5/src/Input.cc b/Carpet/CarpetIOHDF5/src/Input.cc
index 637bb7032..4f932adee 100644
--- a/Carpet/CarpetIOHDF5/src/Input.cc
+++ b/Carpet/CarpetIOHDF5/src/Input.cc
@@ -1391,6 +1391,13 @@ static int ReadVar (const cGH* const cctkGH,
#endif
const ibbox& exterior_membox =
data.dd->light_boxes.at(mglevel).at(reflevel).at(component).exterior;
+ int const var0 = CCTK_FirstVarIndexI(gindex);
+ assert (var0>=0 and var0<CCTK_NumVars());
+ int const var = patch->vindex - var0;
+ assert (var>=0 and var<CCTK_NumVarsInGroupI(gindex));
+ const ggf* const ff = data.data.AT(var);
+ const gdata* const processor_component = ff->data_pointer
+ (timelevel, reflevel, local_component, mglevel);
// skip this dataset if it doesn't overlap with this component's
// exterior, or if it only reads what has already been read
@@ -1424,8 +1431,9 @@ static int ReadVar (const cGH* const cctkGH,
slice_start_size_t memorigin[dim], fileorigin[dim];
hsize_t memdims[dim], count[dim];
for (int i = 0; i < patch->rank; i++) {
- memdims[patch->rank-i-1] =
- (membox.upper() - membox.lower())[i] / stride[i] + 1;
+ assert (processor_component->shape()[i] ==
+ (membox.upper() - membox.lower())[i] / stride[i] + 1);
+ memdims[patch->rank-i-1] = processor_component->padded_shape()[i];
count[patch->rank-i-1] =
(overlap.upper() - overlap.lower())[i] / stride[i] + 1;
memorigin[patch->rank-i-1] =
diff --git a/Carpet/CarpetIOHDF5/src/Output.cc b/Carpet/CarpetIOHDF5/src/Output.cc
index 7288c9bc2..a6ac668cf 100644
--- a/Carpet/CarpetIOHDF5/src/Output.cc
+++ b/Carpet/CarpetIOHDF5/src/Output.cc
@@ -237,12 +237,17 @@ int WriteVarUnchunked (const cGH* const cctkGH,
// before HDF5-1.6.4 the H5Sselect_hyperslab() function expected
// the 'start' argument to be of type 'hssize_t'
slice_start_size_t overlaporigin[dim];
+ hsize_t hyperslab_start[dim], hyperslab_count[dim];
for (int d = 0; d < group.dim; ++d) {
assert (group.dim-1-d>=0 and group.dim-1-d<dim);
overlaporigin[group.dim-1-d] =
((overlap.lower() - bbox->lower()) / overlap.stride())[d];
overlapshape[group.dim-1-d] =
- (overlap.shape() / overlap.stride())[d];
+ processor_component->padded_shape()[d];
+ assert (all (processor_component->shape() ==
+ overlap.shape() / overlap.stride()));
+ hyperslab_start[group.dim-1-d] = 0;
+ hyperslab_count[group.dim-1-d] = processor_component->shape()[d];
}
// Write the processor component as a hyperslab into the recombined
@@ -250,6 +255,9 @@ int WriteVarUnchunked (const cGH* const cctkGH,
hid_t overlap_dataspace;
HDF5_ERROR (overlap_dataspace =
H5Screate_simple (group.dim, overlapshape, NULL));
+ HDF5_ERROR (H5Sselect_hyperslab (overlap_dataspace, H5S_SELECT_SET,
+ hyperslab_start, NULL,
+ hyperslab_count, NULL));
HDF5_ERROR (H5Sselect_hyperslab (dataspace, H5S_SELECT_SET,
overlaporigin, NULL,
overlapshape, NULL));
@@ -436,10 +444,15 @@ int WriteVarChunkedSequential (const cGH* const cctkGH,
hsize_t shape[dim];
hssize_t origin[dim];
+ hsize_t hyperslab_start[dim], hyperslab_count[dim];
for (int d = 0; d < group.dim; ++d) {
assert (group.dim-1-d>=0 and group.dim-1-d<dim);
origin[group.dim-1-d] = (bbox.lower() / bbox.stride())[d];
- shape[group.dim-1-d] = (bbox.shape() / bbox.stride())[d];
+ shape[group.dim-1-d] = processor_component->padded_shape()[d];
+ assert (all (processor_component->shape() ==
+ bbox.shape() / bbox.stride()));
+ hyperslab_start[group.dim-1-d] = 0;
+ hyperslab_count[group.dim-1-d] = processor_component->shape()[d];
}
// Write the component as an individual dataset
@@ -459,6 +472,9 @@ int WriteVarChunkedSequential (const cGH* const cctkGH,
HDF5_ERROR (H5Pset_filter (plist, H5Z_FILTER_FLETCHER32, 0, 0, NULL));
}
HDF5_ERROR (dataspace = H5Screate_simple (group.dim, shape, NULL));
+ HDF5_ERROR (H5Sselect_hyperslab (dataspace, H5S_SELECT_SET,
+ hyperslab_start, NULL,
+ hyperslab_count, NULL));
HDF5_ERROR (dataset = H5Dcreate (outfile, datasetname.str().c_str(),
filedatatype, dataspace, plist));
@@ -545,10 +561,15 @@ int WriteVarChunkedParallel (const cGH* const cctkGH,
// const ibbox& bbox = (*ff) (request->timelevel, refinementlevel,
// group.disttype == CCTK_DISTRIB_CONSTANT ?
// 0 : component, mglevel)->extent();
- const dh* dd = arrdata.at(gindex).at(Carpet::map).dd;
+ const dh* const dd = arrdata.AT(gindex).AT(Carpet::map).dd;
const ibbox& bbox =
dd->light_boxes.AT(mglevel).AT(refinementlevel)
.AT(group.disttype == CCTK_DISTRIB_CONSTANT ? 0 : component).exterior;
+ const ggf* const ff = arrdata.AT(gindex).AT(Carpet::map).data.AT(var);
+ const gdata* const processor_component = ff->data_pointer
+ (request->timelevel, refinementlevel,
+ group.disttype == CCTK_DISTRIB_CONSTANT ? 0 : local_component,
+ mglevel);
// Don't create zero-sized components
if (bbox.empty()) continue;
@@ -618,10 +639,15 @@ int WriteVarChunkedParallel (const cGH* const cctkGH,
// Get the shape of the HDF5 dataset (in Fortran index order)
hsize_t shape[dim];
hssize_t origin[dim];
+ hsize_t hyperslab_start[dim], hyperslab_count[dim];
for (int d = 0; d < group.dim; ++d) {
assert (group.dim-1-d>=0 and group.dim-1-d<dim);
origin[group.dim-1-d] = (bbox.lower() / bbox.stride())[d];
- shape[group.dim-1-d] = (bbox.shape() / bbox.stride())[d];
+ shape[group.dim-1-d] = processor_component->padded_shape()[d];
+ assert (all (processor_component->shape() ==
+ bbox.shape() / bbox.stride()));
+ hyperslab_start[group.dim-1-d] = 0;
+ hyperslab_count[group.dim-1-d] = processor_component->shape()[d];
}
// Write the component as an individual dataset
@@ -641,6 +667,9 @@ int WriteVarChunkedParallel (const cGH* const cctkGH,
HDF5_ERROR (H5Pset_filter (plist, H5Z_FILTER_FLETCHER32, 0, 0, NULL));
}
HDF5_ERROR (dataspace = H5Screate_simple (group.dim, shape, NULL));
+ HDF5_ERROR (H5Sselect_hyperslab (dataspace, H5S_SELECT_SET,
+ hyperslab_start, NULL,
+ hyperslab_count, NULL));
HDF5_ERROR (dataset = H5Dcreate (outfile, datasetname.str().c_str(),
filedatatype, dataspace, plist));
@@ -827,7 +856,8 @@ static int AddAttributes (const cGH *const cctkGH, const char *fullname,
dataspace, H5P_DEFAULT));
HDF5_ERROR (H5Awrite (attr, H5T_NATIVE_INT, &ioffsetdenom[0]));
HDF5_ERROR (H5Aclose (attr));
- ivect const ioffset = ((bbox.lower() % bbox.stride()) + bbox.stride()) % bbox.stride();
+ ivect const ioffset =
+ ((bbox.lower() % bbox.stride()) + bbox.stride()) % bbox.stride();
HDF5_ERROR (attr = H5Acreate (dataset, "ioffset", H5T_NATIVE_INT,
dataspace, H5P_DEFAULT));
HDF5_ERROR (H5Awrite (attr, H5T_NATIVE_INT, &ioffset[0]));