From 729a50f636ece42cc16c2db840e6dacb98180fdf Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Mon, 14 Apr 2014 16:51:38 +0200 Subject: tmp --- cactus_utils.py | 119 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 62 insertions(+), 57 deletions(-) diff --git a/cactus_utils.py b/cactus_utils.py index 867b175..8fce228 100644 --- a/cactus_utils.py +++ b/cactus_utils.py @@ -1001,6 +1001,14 @@ class HDFObjWrapper: def value(self): return self._wrap(self._obj.value) + @property + def dtype(self): + return self._wrap(self._obj.dtype) + + @property + def shape(self): + return self._wrap(self._obj.shape) + class HDFSource(dict): """ @@ -1121,6 +1129,38 @@ class RefinementLevel: self._sd = sd self.n = n + itertimes = {} + + for gf in sd.gf.values(): + if n >= len(gf.reflevels): + continue + rl = gf.reflevels[n] + + if self.dt == 0.0: + df = gf.df + dt = _get_timestep(df) + self.dt = dt / (1 << n) + + for it, time in rl: + itertimes[it] = time + + if self.dx is None: + data = gf.data(n, it) + + try: + self.dx = data.attrs['delta'] + self.ghosts = data.attrs['cctk_nghostzones'] + except KeyError: + pass + + if len(itertimes) == 0: + raise ValueError("No data on refinement level %d" % n) + + self.itertimes = zip(sorted(itertimes.keys()), sorted(itertimes.values())) + + def __repr__(self): + return '' % (self.n, self._sd) + def slice(self, iteration = -1, time = -1): return RefinedSlice(self._sd, self, time, iteration) @@ -1140,10 +1180,10 @@ class GridFunction: # order. reflevels = None - ### private ### # the data file this function is located in - _df = None + df = None + ### private ### # Keys into the data file used for extracting the data. # The value is a list running over reflevels, each list entry is a dict with # keys being iterations (as integers) and values are the strings for @@ -1151,7 +1191,7 @@ class GridFunction: _keys = None def __init__(self, datafile, fileobj, name): - self._df = datafile + self.df = datafile self.thorn, self.name = name.split('::') @@ -1213,7 +1253,7 @@ class GridFunction: iteration = rl[0][0] key = self._keys[reflevel][iteration] - return self._df[key] + return self.df[key] class GridFunctionDict(dict): @@ -1257,19 +1297,12 @@ class DataFile: self.gridfunctions = GridFunctionDict() - with h5py.File(path, 'r') as f: - for gf in f[self._PARPREFIX]['Datasets'].value.split(): - self.gridfunctions[gf] = GridFunction(self, f, gf) + f = HDFSource(path) + for gf in f[self._PARPREFIX]['Datasets'].value.split(): + self.gridfunctions[gf] = GridFunction(self, f, gf) def __getitem__(self, key): - f = None - try: - f = h5py.File(self._path, 'r') - return Dataset(f[key], f) - except: - if f is not None: - f.close() - raise + return HDFSource(self._path)[key] def __repr__(self): return '%s.DataFile(\'%s\')' % (self.__module__, self._path) @@ -1279,11 +1312,11 @@ class SimulationData: # directory name dirname = None - # datafiles + # a dictionary of DataFile objects, indexed by the file name df = None - # a dictionary of all the { gridfunction : datafile it is located in } - # pairs for this set of data + # A GridFunctionDict containing all the GridFunction objects + # contained in this data, indexed by the function name gf = None # courant factor of the time integration (dx / dt) @@ -1295,61 +1328,33 @@ class SimulationData: def __init__(self, dirname): self.dirname = os.path.abspath(dirname) self.df = {} - self.gf = {} + self.gf = GridFunctionDict() # open all the hdf5 files in the dir for f in os.listdir(dirname): if not f.endswith('.h5') or f.startswith('checkpoint'): continue - self.df[f[:-3]] = h5py.File('%s/%s' % (dirname, f), 'r') + self.df[f[:-3]] = DataFile('%s/%s' % (dirname, f)) if len(self.df) == 0: raise ValueError('No HDF5 data files in the directory.') + nb_reflevels = 0 + # add all the gridfunctions to the dict for df in self.df.values(): - funcs = df['Parameters and Global Attributes']['Datasets'] - - if funcs.dtype.type != np.string_: - funcs_str = ''.join(map(chr, funcs)).strip('\x00') - else: - funcs_str = funcs.value - - for ds in funcs_str.strip().split(): - if ds in self.gf: - raise ValueError('Gridfunction %s present in more than one datafile: %s and %s' % (ds, self.gf[ds].filename, df.filename)) - self.gf[ds] = df - - # pick a representative datafile and get the grid properties from it - if 'H' in self.df: - df = self.df['H'] - else: - df = self.df.values()[0] - - gf = get_default_gridfunc(df) + for gf in df.gridfunctions: + if gf in self.gf: + raise ValueError('Gridfunction %s present in more than one datafile' % gf) + self.gf[gf] = df.gridfunctions[gf] + nb_reflevels = max(nb_reflevels, len(df.gridfunctions[gf].reflevels)) # get the refinement levels, iterations and times self.rl = [] - while True: - cur_rl = len(self.rl) - try: - ds = df['%s it=0 tl=0 rl=%d' % (gf, cur_rl)] - except KeyError: - break - - self.rl.append(RefinementLevel(self, len(self.rl))) - rl = self.rl[-1] - - rl.itertimes = gf_itertime(df, cur_rl) - rl.dx = ds.attrs['delta'] - rl.ghosts = ds.attrs["cctk_nghostzones"] - rl.dt = _get_timestep(df) / (1 << (len(self.rl) - 1)) + for i in xrange(nb_reflevels): + self.rl.append(RefinementLevel(self, i)) self.courant = self.rl[0].dx[0] / self.rl[0].dt - def __del__(self): - if self.df: - map(h5py.File.close, self.df.values()) - def calc_velocities(self, get_data, rl = 0, t_start = 0, t_end = float('inf'), offsets = None): rl = self.rl[rl] dt = rl.itertimes[1][1] - rl.itertimes[0][1] -- cgit v1.2.3