summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnton Khirnov <anton@khirnov.net>2023-06-27 16:29:43 +0200
committerAnton Khirnov <anton@khirnov.net>2023-06-27 16:29:43 +0200
commit450ca92a2e67f4d81e7ba012e300f750802a02cd (patch)
tree49207e66c4fbc9fbb5a88f855a13dc7f73bad78b
parentd10560b81394d8bfa71c96f5622e4c0fb11d8a7f (diff)
Remove obsolete cactus_utils.py
-rw-r--r--cactus_utils.py1189
1 files changed, 0 insertions, 1189 deletions
diff --git a/cactus_utils.py b/cactus_utils.py
deleted file mode 100644
index af5959e..0000000
--- a/cactus_utils.py
+++ /dev/null
@@ -1,1189 +0,0 @@
-import h5py
-import numpy as np
-import pylab
-import scipy.ndimage.interpolation
-import scipy.interpolate
-import scipy.integrate
-import scipy.optimize
-
-import collections
-import os
-import os.path
-import re
-
-axis_prop = { 'x' : 0, 'y' : 1, 'z' : 2 }
-axis_data = { 'x' : 2, 'y' : 1, 'z' : 0 }
-
-EPSILON = 1e-3
-
-
-def ndinterpolate(in_coords, values, out_coords):
- """
- A wrapper around scipy.image.interpolation.map_coordinates() providing more convenient API.
-
- Given the values of a function on an N-dimensional regular grid, interpolate
- its value on given coordinates.
-
- @param in_coords An N+1-dimensional array of the coordinates of the input values.
- The first axis goes over the dimensions (i.e. in_coords[0] is
- an N-dimensional array that gives the x-coordinate for each value).
- @param values An N-dimensional array of the values (at in_coords) of the function to
- be interpolated
- @param out_coords A 2-dimensional array of the coordinates at which the output is
- desired. The first axis goes over the dimensions (i.e. out_coords[0]
- is a 1-dimensional array of length N of the x-coordinates for every
- desired interpolation point).
- """
- N = len(values.shape)
-
- if len(in_coords.shape) != N + 1 or in_coords.shape[0] != N:
- raise ValueError('Invalid coords/values shapes: ' + str(in_coords.shape) + str(values.shape))
-
- out_coords = np.copy(out_coords)
- for i in xrange(N):
- max = np.max(in_coords[i])
- min = np.min(in_coords[i])
- out_coords[i] -= min
- out_coords[i] *= values.shape[i] / (max - min)
-
- return scipy.ndimage.interpolation.map_coordinates(values, out_coords)
-
-
-def get_default_gridfunc(datafile):
- """
- Get the name of the default grid function from a given data file
- """
- funcs = datafile['Parameters and Global Attributes']['Datasets']
-
- if funcs.dtype.type != np.string_:
- funcs_str = ''.join(map(chr, funcs))
- else:
- funcs_str = funcs.value
-
- return funcs_str.strip().split('\n')[0]
-
-
-def _get_iteration_from_time(datafile, time):
- """
- Get the simulation iteration with the given time.
- """
- for item in datafile.values():
- try:
- if item.attrs['time'] == time:
- return item.attrs['timestep']
- except KeyError:
- pass
-
- raise ValueError("No timelevel with time %d" % (time))
-
-
-def _get_last_iteration(datafile, name, rl = 0):
- """
- Get the last iteration for which the data for the given gridfunction
- on the given refinement level exists in the data file.
- """
- last_it = -1
-
- for item in datafile.values():
- try:
- if (item.attrs['name'] == name and
- item.attrs['level'] == rl):
- last_it = max(last_it, item.attrs['timestep'])
- except KeyError:
- pass
-
- if last_it < 0:
- raise KeyError("No compatible datafile in the file")
-
- return last_it
-
-
-def _extract_paren(string):
- """
- From a string starting with a parentheses-like pairwise construct --
- '(,[,{,<', extract the part inside it, taking into account nesting.
- """
- if string[0] == '[':
- parens = ('[', ']')
- elif string[0] == '(':
- parens = ('(', ')')
- elif string[0] == '{':
- parens = ('{', '}')
- elif string[0] == '<':
- parens = ('<', '>')
- else:
- raise ValueError("String '%s' does not start with a recognized pairwise delimiter" % string)
-
- cnt = 0
- for i in xrange(len(string)):
- if string[i] == parens[0]:
- cnt += 1
- elif string[i] == parens[1]:
- cnt -= 1
-
- if cnt == 0:
- return string[:i + 1]
-
- raise ValueError("Unbalanced parentheses")
-
-
-def _parse_grid_struct(string):
- """
- Parse the grid structure v5 string into a dict.
- """
- ret = {}
- s = string
- while len(s) > 0:
- try:
- key, tail = s.split(':', 1)
- except ValueError:
- if len(ret) > 0:
- return ret
- else:
- return string
-
- val = _extract_paren(tail)
- ret[key] = val
-
- s = tail[len(val) + 1:]
-
- return ret
-
-
-def _get_timestep(datafile):
- """
- Get the time step on the coarsest grid.
- """
- gridstruct = datafile['Parameters and Global Attributes']['Grid Structure v5']
-
- if gridstruct.dtype.type != np.string_:
- s = ''.join(map(chr, gridstruct))
- else:
- s = gridstruct.value
-
- match = re.search('grid_delta_times:\[(.*?)\],', s)
- if match is None:
- raise ValueError("Invalid grid structure entry")
-
- deltas = eval('list(%s)' % (match.group(1)))
- return deltas[0]
-
-
-def _get_grid_spacing(datafile):
- """
- Get the numbers of grid points (not counting ghosts) in each direction.
- """
- gridstruct = datafile['Parameters and Global Attributes']['Grid Structure v5']
-
- if gridstruct.dtype.type != np.string_:
- s = ''.join(map(chr, gridstruct))
- else:
- s = gridstruct.value
-
- struct = _parse_grid_struct(s)
- match = re.search('\[([0-9]*),([0-9]*),([0-9]*)\]/[0-9]*\)', struct['grid_structure'])
- gridpoints = np.array(map(int, (match.group(1), match.group(2), match.group(3))))
-
- match = re.search('\[([0-9]*),([0-9]*),([0-9]*)\]', struct['grid_ghosts'])
- ghosts = np.array(map(int, (match.group(1), match.group(2), match.group(3))))
-
- gridpoints -= 2 * ghosts
-
- return gridpoints
-
-
-def _get_data_axis(dataset, axis, axvalue):
- """
- Get a tuple of (coordinates, data) along the specified axis ('x', 'y', or 'z').
- axvalue is the value of the other two axes, it may be a number for the same
- value on both or an iterable with at least two elements for distinct values.
- """
- slicelist = [None] * 3
- try:
- if len(axvalue) > 2:
- raise ValueError("Need at most two axis values")
- elif len(axvalue) == 1:
- axvalue = [axvalue] * 2
- except TypeError:
- axvalue = [axvalue] * 2
-
- # data is stored in [z, y, x] order, properties are in normal [x, y, z] order
- axes = collections.OrderedDict([["x", {"data_idx" : 2, "idx" : 0 }],
- ["y", {"data_idx" : 1, "idx" : 1 }],
- ["z", {"data_idx" : 0, "idx" : 2 }]])
-
- # for the requested axis, just remove the ghost zones
- ax = axes.pop(axis)
- ghosts = dataset.attrs["cctk_nghostzones"][ax["idx"]]
- delta = dataset.attrs["delta"][ax["idx"]]
- origin = dataset.attrs["origin"][ax["idx"]]
- slicelist[ax["data_idx"]] = slice(ghosts, -ghosts)
-
- # for the other two axes, locate the proper values
- while len(axes):
- axname, ax = axes.popitem(last = False)
-
- o = dataset.attrs["origin"][ax["idx"]]
- d = dataset.attrs["delta"][ax["idx"]]
- idx = int((axvalue.pop(0) - o) / d)
-
- if idx >= dataset.shape[ax["data_idx"]]:
- raise ValueError("Axis %s is outside the domain [%f, %f]" % (axname, o, o + d * dataset.shape[ax["data_idx"]]))
-
- slicelist[ax["data_idx"]] = idx
-
- data = dataset[tuple(slicelist)]
-
- # now compute the corresponding distances from coordinate origin
- r = np.array([(origin + ghosts * delta + n * delta) for n in xrange(data.shape[0])])
-
- return r, data
-
-
-def plot(datafile, axis = "x", axisvalue = 0.0, iteration = "last", time = -1, gridfunc = None, rl = 0, c = -1, mod = None, label = None):
- """
- Draw a 2D plot.
- """
- # guess dataset name
- if gridfunc is None:
- gridfunc = get_default_gridfunc(datafile)
-
- if time >= 0:
- iteration = _get_iteration_from_time(datafile, time)
- elif iteration == "last":
- iteration = _get_last_iteration(datafile, gridfunc, rl)
-
- dataset = datafile['%s it=%d tl=0 rl=%d%s' % (gridfunc, iteration, rl, " c=%d" % c if c >= 0 else "")]
- if axis == "x" or axis == "y" or axis == "z":
- r, data = _get_data_axis(dataset, axis, axisvalue)
-# elif len(axis) == 2 and re.match("[xyz]{2}", axis):
-# r, data = get_data_diagonal(dataset, axis)
- else:
- raise ValueError("Invalid axis specifier %s" % axis)
-
- if mod:
- data = mod(data)
-
- if not label:
- label = "%s time=%f level=%d" % (gridfunc, dataset.attrs['time'], rl)
-
- pylab.plot(r, data, label = label)
- pylab.legend()
-
-
-def plot_diff(datafile, axis = "x", axisvalue = 0.0, it1 = 0, it2 = "last", time1 = -1, time2 = -1, gridfunc = None, rl = 0, absdiff = 0):
- """
- Plot a difference between two times or iterations.
- """
- # guess dataset name
- if gridfunc is None:
- gridfunc = get_default_gridfunc(datafile)
-
- if time1 >= 0:
- it1 = _get_iteration_from_time(datafile, time1)
- elif it1 == "last":
- it1 = _get_last_iteration(datafile, gridfunc, rl)
-
- if time2 >= 0:
- it2 = _get_iteration_from_time(datafile, time2)
- elif it2 == "last":
- it2 = _get_last_iteration(datafile, gridfunc, rl)
-
- dataset1 = datafile['%s it=%d tl=0 rl=%d' % (gridfunc, it1, rl)]
- dataset2 = datafile['%s it=%d tl=0 rl=%d' % (gridfunc, it2, rl)]
- if axis == "x" or axis == "y" or axis == "z":
- r, data1 = _get_data_axis(dataset1, axis, axisvalue)
- r, data2 = _get_data_axis(dataset2, axis, axisvalue)
-# elif len(axis) == 2 and re.match("[xyz]{2}", axis):
-# r, data = get_data_diagonal(dataset, axis)
- else:
- raise ValueError("Invalid axis specifier %s" % axis)
-
- data = data1 - data2
- if absdiff:
- data = np.abs(data)
-
- pylab.plot(r, data, label = "%s" % (gridfunc))
- pylab.legend()
-
-
-def plot_convergence_time(datafiles, axis, gridfunc, rl):
- pass
-
-
-def plot_convergence_space(datafiles, axis, gridfunc, rl):
- pass
-
-
-def plot_convergence(datafiles, axis = "x", gridfunc = None, rl = 0, time = "last", mode = "selfconsistent"):
- if mode == "selfconsistent" and len(datafiles) < 3:
- raise ValueError("At least three data sets needed for self-consistent convergence plot")
- elif len(datafiles) < 2:
- raise ValueError("At least two data sets needed for convergence plot")
-
- if gridfunc is None:
- gridfunc = get_default_gridfunc(datafiles[0])
-
- # check if the datafiles have changing time steps
- prev_timestep = 0
- timestep_changing = 0
- for df in datafiles:
- timestep = _get_timestep(df)
- if prev_timestep == 0:
- prev_timestep = timestep
- elif prev_timestep != timestep:
- timestep_changing = 1
- break
-
- datasets = [f['%s it=0 tl=0 rl=%d' % (gridfunc, rl)] for f in datafiles]
-
- # check if our chosen datasets have different grid spacings
- prev_spacing = None
- spacing_changing = 0
- for ds in datasets:
- spacing = ds.attrs['delta']
- if prev_spacing is None:
- prev_spacing = spacing
- elif not np.all(spacing == prev_spacing):
- spacing_changing = 1
- break
- del datasets
-
- if timestep_changing and spacing_changing:
- raise ValueError("Both the time step and grid spacing change between files.")
- if not timestep_changing and not spacing_changing:
- raise ValueError("Neither the time step nor grid spacing change between files.")
-
- if timestep_changing:
- plot_convergence_time(datafiles, axis, gridfunc, rl)
- else:
- plot_convergence_space(datafiles, axis, gridfunc, rl)
-
-
-def dminus(arr, dir, dx):
- """
- Fourth order centered derivative in the given direction.
- Second order centered derivatives are used for the points next to the
- border ones and first order for the border points.
- """
- ret = np.empty_like(arr)
- slicelist_ret = [slice(None)] * len(arr.shape)
- slicelist_ret[dir] = slice(1, None)
- slicelist_m1 = [slice(None)] * len(arr.shape)
- slicelist_m1[dir] = slice(0, -1)
- slicelist_0 = [slice(None)] * len(arr.shape)
- slicelist_0[dir] = slice(1, None)
-
- ret[tuple(slicelist_ret)] = (arr[tuple(slicelist_0)] - arr[tuple(slicelist_m1)]) / dx
-
- slicelist_ret[dir] = slice(0, 1)
- slicelist_m1[dir] = slice(1, 2)
- slicelist_0[dir] = slice(0, 1)
- ret[tuple(slicelist_ret)] = (arr[tuple(slicelist_m1)] - arr[tuple(slicelist_0)]) / dx
-
- return ret
-
-
-def dplus(arr, dir, dx):
- """
- Fourth order centered derivative in the given direction.
- Second order centered derivatives are used for the points next to the
- border ones and first order for the border points.
- """
- ret = np.empty_like(arr)
- slicelist_ret = [slice(None)] * len(arr.shape)
- slicelist_ret[dir] = slice(0, -1)
- slicelist_p1 = [slice(None)] * len(arr.shape)
- slicelist_p1[dir] = slice(1, None)
- slicelist_0 = [slice(None)] * len(arr.shape)
- slicelist_0[dir] = slice(0, -1)
-
- ret[tuple(slicelist_ret)] = (arr[tuple(slicelist_p1)] - arr[tuple(slicelist_0)]) / dx
-
- slicelist_ret[dir] = slice(-1, None)
- slicelist_p1[dir] = slice(-2, -1)
- slicelist_0[dir] = slice(-1, None)
- ret[tuple(slicelist_ret)] = (arr[tuple(slicelist_0)] - arr[tuple(slicelist_p1)]) / dx
-
- return ret
-
-
-def diff4(arr, dir, dx, coeff = None, div = 12.):
- """
- Fourth order centered derivative in the given direction.
- Second order centered derivatives are used for the points next to the
- border ones and first order for the border points.
- """
- ret = np.zeros_like(arr)
- slicelist_ret = [slice(None)] * len(arr.shape)
- slicelist_ret[dir] = slice(2, -2)
- slicelist_0 = [slice(None)] * len(arr.shape)
- slicelist_0[dir] = slice(2, -2)
- slicelist_p1 = [slice(None)] * len(arr.shape)
- slicelist_p1[dir] = slice(3, -1)
- slicelist_p2 = [slice(None)] * len(arr.shape)
- slicelist_p2[dir] = slice(4, None)
- slicelist_m1 = [slice(None)] * len(arr.shape)
- slicelist_m1[dir] = slice(1, -3)
- slicelist_m2 = [slice(None)] * len(arr.shape)
- slicelist_m2[dir] = slice(0, -4)
-
- sl = (slicelist_p2, slicelist_p1, slicelist_0, slicelist_m1, slicelist_m2)
- if coeff is None:
- coeff = (-1, 8, 0, -8, 1)
-
- for s, c in zip(sl, coeff):
- ret[tuple(slicelist_ret)] += c * arr[tuple(s)]
- ret[tuple(slicelist_ret)] /= (div * dx)
-
- slicelist_ret[dir] = slice(0, 1)
- slicelist_p1[dir] = slice(1, 2)
- slicelist_m1[dir] = slice(0, 1)
- ret[tuple(slicelist_ret)] = (arr[tuple(slicelist_p1)] - arr[tuple(slicelist_m1)]) / dx
-
- slicelist_ret[dir] = slice(-1, None)
- slicelist_p1[dir] = slice(-1, None)
- slicelist_m1[dir] = slice(-2, -1)
- ret[tuple(slicelist_ret)] = (arr[tuple(slicelist_p1)] - arr[tuple(slicelist_m1)]) / dx
-
- slicelist_ret[dir] = slice(1, 2)
- slicelist_p1[dir] = slice(2, 3)
- slicelist_m1[dir] = slice(0, 1)
- ret[tuple(slicelist_ret)] = (arr[tuple(slicelist_p1)] - arr[tuple(slicelist_m1)]) / (2 * dx)
-
- slicelist_ret[dir] = slice(-2, -1)
- slicelist_p1[dir] = slice(-1, None)
- slicelist_m1[dir] = slice(-3, -2)
- ret[tuple(slicelist_ret)] = (arr[tuple(slicelist_p1)] - arr[tuple(slicelist_m1)]) / (2 * dx)
- return ret
-
-
-def get_meshgrid(datafile, gridfunc = None, rl = 0, c = -1, axis = "z"):
- """
- Get a tuple of coordinate mesh grids for the given data file.
- """
- if not gridfunc:
- gridfunc = get_default_gridfunc(datafile)
- dataset = datafile['%s it=0 tl=0 rl=%d%s' % (gridfunc, rl, " c=%d" % c if c >= 0 else "")]
- o = dataset.attrs['origin']
- d = dataset.attrs['delta']
- if axis == 'z':
- idx = (0, 1)
- elif axis == 'y':
- idx = (0, 2)
- elif axis == 'x':
- idx = (1, 2)
- else:
- raise ValueError('Invalid axis %s' % axis)
-
- shape = (dataset.shape[2], dataset.shape[1], dataset.shape[0])
- return np.meshgrid(np.array([o[idx[0]] + n * d[idx[0]] for n in xrange(shape[idx[0]])]), np.array([o[idx[1]] + n * d[idx[1]] for n in xrange(shape[idx[1]])]))
-
-
-def _get_extrema(ar, rad = 3):
- """
- Find local extrema of a 1-D array.
- Returned is a tuple (maxima, minima) of the (coordinate, value) of the
- extrema. rad determines the number of points in each direction over which
- the data must be monotonous for the center to be classified as an extremum.
- """
- maxima = []
- minima = []
- dar = diff4(ar, 0, 1)
- spl = scipy.interpolate.UnivariateSpline(range(ar.shape[0]), ar)
-
- for i in xrange(rad, len(ar) - rad):
- maximum = minimum = True
- for j in xrange(1, rad):
- for dir in (1, -1):
- idx = i + j * dir
- if ar[idx] > ar[idx - dir]:
- maximum = False
- elif ar[idx] < ar[idx - dir]:
- minimum = False
-
- if maximum or minimum:
- x = np.array(range(2 * rad + 1))
- y = dar[i - rad : i + rad + 1]
- roots = scipy.interpolate.UnivariateSpline(x, y).roots()
- if len(roots) > 0:
- root = roots[0] + i - rad
- (maxima if maximum else minima).append((root, spl(root)))
-
- return maxima, minima
-
-
-def _get_movement(coord_old, val_old, ex_new):
- """
- Return the number of points an extremum i in val_old moves in val_new
- or None if no movement could be detected.
- """
- dist = None
- for coord_new, val_new in ex_new:
- ratio = val_new / val_old
- dist_new = coord_new - coord_old
- if ratio < 1.1 and ratio > 0.9 and (dist is None or abs(dist_new) < abs(dist)):
- dist = dist_new
- return dist
-
-
-def calc_movement(ar1, ar2):
- """
- Find all local extrema in ar1 and calculate how much the move in ar2.
- Return the list of matched movements, in points.
- """
- maxima1, minima1 = _get_extrema(ar1)
- maxima2, minima2 = _get_extrema(ar2)
-
- ret = []
- for coord, val in maxima1:
- diff = _get_movement(coord, val, maxima2)
- if diff is not None:
- ret.append(diff)
- for coord, val in minima1:
- diff = _get_movement(coord, val, minima2)
- if diff is not None:
- ret.append(diff)
-
- return ret
-
-
-def gf_itertime(datafile, rl = 0):
- """
- Find all iterations that are stored in the given datafile.
- Return an ordered list of (iteration, time) tuples.
- """
- ret = set()
-
- rl = ' rl=%d' % rl
- for key in datafile.keys():
- if not rl in key:
- continue
-
- match = re.search('it=([0-9]*)', key)
- if match:
- ret.add((int(match.group(1)), datafile[key].attrs['time']))
-
- return sorted(ret)
-
-
-def gf_iter(gridfunc, rl = 0):
- """
- Find all iterations that are stored in the given datafile.
- Return an ordered list of iterations.
- """
- return zip(*gf_itertime(gridfunc, rl))[0]
-
-
-def gf_time(gridfunc, rl = 0):
- """
- Find all iterations that are stored in the given datafile.
- Return an ordered list of times.
- """
- return zip(*gf_itertime(gridfunc, rl))[1]
-
-
-def trumpet_r(R, M = 1.0):
- fac1 = ((2 * R + M + np.sqrt(4 * (R ** 2) + 4 * M * R + 3 * (M ** 2))) / 4)
- fac2 = ((4 + 3 * np.sqrt(2)) * (2 * R - 3 * M)) / (8 * R + 6 * M + 3 * np.sqrt(8 * (R ** 2) + 8 * M * R + 6 * (M ** 2)))
- return fac1 * (fac2 ** (1. / np.sqrt(2)))
-
-
-def trumpet_log1_C2(n):
- s = np.sqrt(4 + 9 * (n ** 2))
- a_c = np.sqrt((s - 3 * n) / (s + 3 * n))
- C2 = ((3 * n + s) ** 3) / (128 * (n ** 3)) * np.exp(-2 * a_c / n)
- return C2
-
-
-def _trumpet_log1_alphabar(alpha, R, M, n):
- num = 3 * M - 2 * R + 2 * R * (alpha ** 2)
- den = (R - 2 * M + n * R * alpha - R * (alpha ** 2))
- ret = - n * num / (R * den)
- return ret
-
-
-def _trumpet_log1_alpha_implicit(alpha, R, M, n):
- C2 = trumpet_log1_C2(n)
- return alpha ** 2 - 1 + 2 * M / R - C2 * np.exp(2 * alpha / n) / (R ** 4)
-
-
-def trumpet_log1_lapse(R, M = 1.0, n = 2.0):
- R_c = (3 * (n ** 2) * M + np.sqrt((2 * n * M) ** 2 + (3 * (n ** 2) * M) ** 2)) / (4 * (n ** 2))
- s = np.sqrt(4 + 9 * (n ** 2))
- a_c = np.sqrt((s - 3 * n) / (s + 3 * n))
-
- for i in xrange(R.shape[0]):
- if R[i] > R_c:
- break
-
- tmp = scipy.integrate.odeint(_trumpet_log1_alphabar, a_c, [R_c, R[i - 1]], args = (M, n))[1]
- ret1 = scipy.integrate.odeint(_trumpet_log1_alphabar, tmp, R[:i][::-1], args = (M, n))[::-1]
- ret1 = ret1.reshape(ret1.shape[0])
-
- tmp = scipy.integrate.odeint(_trumpet_log1_alphabar, a_c, [R_c, R[i]], args = (M, n))[1]
- ret2 = scipy.integrate.odeint(_trumpet_log1_alphabar, tmp, R[i:], args = (M, n))
- ret2 = ret2.reshape(ret2.shape[0])
-
- return np.concatenate((ret1, ret2))
-
-
-def trumpet_log1_trk(R, M = 1.0, n = 2.0):
- a = trumpet_log1_lapse(R, M, n)
- b = np.sqrt((a ** 2) - (1 - 2 * M / R))
- bbar = diff4(b, 0, R[1] - R[0])
-
- K = 2 * b / R + bbar
- return K
-
-
-def trumpet_lapse(R, M = 1.0):
- C2 = 27. * (M ** 4) / 16
- return np.sqrt(1 - 2 * M / R + C2 / (R ** 4))
-
-
-def trumpet_log1_r(R, M = 1.0, n = 2.0):
- R_aux = np.linspace(1.32, 100, 10000)
- alpha_aux = trumpet_log1_lapse(R_aux)
-
- R_alpha_aux = scipy.interpolate.UnivariateSpline(alpha_aux, R_aux, s = 0)
- alpha_s = alpha_aux[20]
- coeff = R_alpha_aux(alpha_s) ** (1. / alpha_s)
-
- def integrand_aux(a, R_alpha = R_alpha_aux, alpha = alpha_aux):
- if a > alpha[-1]:
- RR = 2 / (1 - a ** 2)
- else:
- RR = R_alpha(a)
- ret = np.log(RR) / (a ** 2)
- return ret
- C0, err = scipy.integrate.quad(integrand_aux, alpha_s, 1)
-
- alpha = trumpet_log1_lapse(R, M, n)
- R_alpha = scipy.interpolate.UnivariateSpline(alpha, R, s = 0)
- Rbar_alpha = scipy.interpolate.UnivariateSpline(alpha[1:-1], (R[2:] - R[:-2]) / (2 * (R[1] - R[0])), s = 0)
-
- def integrand(a, R_alpha = R_alpha, alpha = alpha):
- if a > alpha[-1]:
- RR = 2 / (1 - a ** 2)
- else:
- RR = R_alpha(a)
- ret = np.log(RR) / (a ** 2)
- return ret
-
- def integrand2(a, R_alpha = R_alpha, alpha = alpha):
- if a > alpha[-1]:
- RR = 2 / (1 - a)
- Rbar = - 4 * a / ((1 - a ** 2) ** 2)
- else:
- RR = R_alpha(a)
- Rbar = Rbar_alpha(a)
- return Rbar / (RR * a)
-
- r1 = []
- r2 = []
-
- if alpha[0] >= alpha_s:
- i = 0
- else:
- for i in xrange(alpha.shape[0]):
- if alpha[i] >= alpha_s:
- break
-
- for j in xrange(i):
- a = alpha[j]
- integral, err, = scipy.integrate.quad(integrand2, a, alpha_s)
- r1.append(np.exp(-integral - C0) * coeff)
-
- int_prev = 0
- alpha_prev = alpha_s
- for j in xrange(i, alpha.shape[0]):
- a = alpha[j]
- integral, err, = scipy.integrate.quad(integrand, alpha_prev, a)
- integral += int_prev
- alpha_prev = a
- int_prev = integral
-
- r2.append(np.exp(integral - C0) * R_alpha(a) ** (1. / a))
-
- return np.array(r1 + r2)
-
-
-def calc_kretschmann(s, axis = 2, xoffset = 0):
- rl = s.rl
-
- sliceidx = [slice(0, 7), slice(0, 7), slice(xoffset, xoffset + 7)]
- sliceidx[axis] = slice(None)
- sliceidx = tuple(sliceidx)
-
- offsetidx = [3] * 3
- offsetidx[axis] = slice(None)
- offsetidx = tuple(offsetidx)
-
- #sliceidx = [slice(None)]*3
- #sliceidx = tuple(sliceidx)
-
- #offsetidx = [slice(None)]*3
- #offsetidx = tuple(offsetidx)
-
- # load grid functions
- phi = s['phi'][sliceidx]
- trK = s['trK'][sliceidx]
- gt = np.array([[s['gt33'][sliceidx], s['gt23'][sliceidx], s['gt13'][sliceidx]],
- [s['gt23'][sliceidx], s['gt22'][sliceidx], s['gt12'][sliceidx]],
- [s['gt13'][sliceidx], s['gt12'][sliceidx], s['gt11'][sliceidx]]])
- At = np.array([[s['At33'][sliceidx], s['At23'][sliceidx], s['At13'][sliceidx]],
- [s['At23'][sliceidx], s['At22'][sliceidx], s['At12'][sliceidx]],
- [s['At13'][sliceidx], s['At12'][sliceidx], s['At11'][sliceidx]]])
-
- # compute derivatives needed later
- dphi = np.array([diff4(phi, i, rl.dx[0]) for i in 0, 1, 2])
- d2phi = np.array([diff4(dphi, i, rl.dx[0]) for i in 1, 2, 3])
- dgt = np.array([diff4(gt, i, rl.dx[0]) for i in 2, 3, 4])
- d2gt = np.array([diff4(dgt, i, rl.dx[0]) for i in 3, 4, 5])
- dAt = np.array([diff4(At, i, rl.dx[0]) for i in 2, 3, 4])
- dtrK = np.array([diff4(trK, i, rl.dx[0]) for i in 0, 1, 2])
-
- phi = phi[offsetidx]
- trK = trK[offsetidx]
- gt = gt[(slice(None),) * 2 + offsetidx]
- At = At[(slice(None),) * 2 + offsetidx]
-
- dphi = dphi [(slice(None),) + offsetidx]
- d2phi = d2phi[(slice(None),) * 2 + offsetidx]
- dgt = dgt [(slice(None),) * 3 + offsetidx]
- d2gt = d2gt [(slice(None),) * 4 + offsetidx]
- dAt = dAt [(slice(None),) * 3 + offsetidx]
- dtrK = dtrK [(slice(None),) + offsetidx]
-
- e4phi = 1. / phi ** 2
- gtu = np.zeros_like(gt)
- for i in xrange(gt.shape[2]):
- gtu[:, :, i] = np.linalg.inv(gt[:, :, i])
- #for i in xrange(gt.shape[2]):
- # for j in xrange(gt.shape[3]):
- # for k in xrange(gt.shape[4]):
- # gtu[:, :, i, j, k] = np.linalg.inv(gt[:, :, i, j, k])
-
- g = e4phi * gt
- gu = gtu / e4phi
-
- K = e4phi * At + (1. / 3.) * trK * g
-
- Dphi = -0.5 * dphi / phi
- D2phi = - 0.5 * (d2phi - np.einsum('i...,j...->ij...', dphi, dphi) / phi) / phi
-
- Dg = e4phi * (4 * np.einsum('i...,jk...->ijk...', Dphi, gt) + dgt)
- D2g = e4phi * (4 * np.einsum('ij...,kl...->ijkl...', D2phi + 4 * np.einsum('i...,j...->ij...', Dphi, Dphi), gt) + 4 * np.einsum('i...,jkl...->ijkl...', Dphi, dgt) + 4 * np.einsum('i...,jkl...->jikl...', Dphi, dgt) + d2gt)
-
- Gl = 0.5 * (np.einsum('cab...->abc...', Dg) + np.einsum('bac...->abc...', Dg) - Dg)
- G = np.einsum('ij...,jkl...->ikl...', gu, Gl)
-
- R = (0.5 * (np.einsum('adbc...->abcd...', D2g) + np.einsum('bcad...->abcd...', D2g) - np.einsum('bdac...->abcd...', D2g) - np.einsum('acbd...->abcd...', D2g)) +
- np.einsum('ead...,ebc...->abcd...', Gl, G) - np.einsum('eac...,ebd...->abcd...', Gl, G))
-
- A = R + np.einsum('ik...,jl...->ijkl...', K, K) - np.einsum('il...,jk...->ijkl...', K, K)
- trA = np.einsum('kl...,kilj...->ij...', gu, A)
- term1 = (np.einsum('ai...,bj...,ck...,dl...,abcd...,ijkl...', gu, gu, gu, gu, A, A))
- term3 = 4 * np.einsum('ai...,bj...,ab...,ij...', gu, gu, trA, trA)
-
- t1 = e4phi * (4 * np.einsum('i...,jk...->ijk...', Dphi, At) + dAt)
- t2 = (1. / 3.) * (np.einsum('i...,jk...->ijk...', dtrK, g) + trK * Dg)
- t3 = - np.einsum('iab...,ic...->bac...', G, K) - np.einsum('icb...,ia...->bac...', G, K)
- CDK = t1 + t2 + t3
- CDKu = np.einsum('ai...,bj...,ck...,ijk...->abc...', gu, gu, gu, CDK)
-
- term2 = 8 * (np.einsum('abc...,bac...', CDK, CDKu) - np.einsum('abc...,abc...', CDK, CDKu))
-
- return term1 + term2 + term3
-
-
-def calc_kretschmann2(s):
- rl = s.rl
-
- sliceidx = [slice(0, 7), slice(None), slice(None)]
- sliceidx = tuple(sliceidx)
-
- offsetidx = [3, slice(None), slice(None)]
- offsetidx = tuple(offsetidx)
-
- #sliceidx = [slice(None)]*3
- #sliceidx = tuple(sliceidx)
-
- #offsetidx = [slice(None)]*3
- #offsetidx = tuple(offsetidx)
-
- # load grid functions
- phi = s['phi'][sliceidx]
- trK = s['trK'][sliceidx]
- gt = np.array([[s['gt33'][sliceidx], s['gt23'][sliceidx], s['gt13'][sliceidx]],
- [s['gt23'][sliceidx], s['gt22'][sliceidx], s['gt12'][sliceidx]],
- [s['gt13'][sliceidx], s['gt12'][sliceidx], s['gt11'][sliceidx]]])
- At = np.array([[s['At33'][sliceidx], s['At23'][sliceidx], s['At13'][sliceidx]],
- [s['At23'][sliceidx], s['At22'][sliceidx], s['At12'][sliceidx]],
- [s['At13'][sliceidx], s['At12'][sliceidx], s['At11'][sliceidx]]])
-
- # compute derivatives needed later
- dphi = np.array([diff4(phi, i, rl.dx[0]) for i in 0, 1, 2])
- d2phi = np.array([diff4(dphi, i, rl.dx[0]) for i in 1, 2, 3])
- dgt = np.array([diff4(gt, i, rl.dx[0]) for i in 2, 3, 4])
- d2gt = np.array([diff4(dgt, i, rl.dx[0]) for i in 3, 4, 5])
- dAt = np.array([diff4(At, i, rl.dx[0]) for i in 2, 3, 4])
- dtrK = np.array([diff4(trK, i, rl.dx[0]) for i in 0, 1, 2])
-
- phi = phi[offsetidx]
- trK = trK[offsetidx]
- gt = gt[(slice(None),) * 2 + offsetidx]
- At = At[(slice(None),) * 2 + offsetidx]
-
- dphi = dphi [(slice(None),) + offsetidx]
- d2phi = d2phi[(slice(None),) * 2 + offsetidx]
- dgt = dgt [(slice(None),) * 3 + offsetidx]
- d2gt = d2gt [(slice(None),) * 4 + offsetidx]
- dAt = dAt [(slice(None),) * 3 + offsetidx]
- dtrK = dtrK [(slice(None),) + offsetidx]
-
- e4phi = 1. / phi ** 2
- gtu = np.zeros_like(gt)
- for i in xrange(gt.shape[2]):
- for j in xrange(gt.shape[3]):
- gtu[:, :, i, j] = np.linalg.inv(gt[:, :, i, j])
- #for i in xrange(gt.shape[2]):
- # for j in xrange(gt.shape[3]):
- # for k in xrange(gt.shape[4]):
- # gtu[:, :, i, j, k] = np.linalg.inv(gt[:, :, i, j, k])
-
- g = e4phi * gt
- gu = gtu / e4phi
-
- K = e4phi * At + (1. / 3.) * trK * g
-
- Dphi = -0.5 * dphi / phi
- D2phi = - 0.5 * (d2phi - np.einsum('i...,j...->ij...', dphi, dphi) / phi) / phi
-
- Dg = e4phi * (4 * np.einsum('i...,jk...->ijk...', Dphi, gt) + dgt)
- D2g = e4phi * (4 * np.einsum('ij...,kl...->ijkl...', D2phi + 4 * np.einsum('i...,j...->ij...', Dphi, Dphi), gt) + 4 * np.einsum('i...,jkl...->ijkl...', Dphi, dgt) + 4 * np.einsum('i...,jkl...->jikl...', Dphi, dgt) + d2gt)
-
- Gl = 0.5 * (np.einsum('cab...->abc...', Dg) + np.einsum('bac...->abc...', Dg) - Dg)
- G = np.einsum('ij...,jkl...->ikl...', gu, Gl)
-
- R = (0.5 * (np.einsum('adbc...->abcd...', D2g) + np.einsum('bcad...->abcd...', D2g) - np.einsum('bdac...->abcd...', D2g) - np.einsum('acbd...->abcd...', D2g)) +
- np.einsum('ead...,ebc...->abcd...', Gl, G) - np.einsum('eac...,ebd...->abcd...', Gl, G))
-
- A = R + np.einsum('ik...,jl...->ijkl...', K, K) - np.einsum('il...,jk...->ijkl...', K, K)
- trA = np.einsum('kl...,kilj...->ij...', gu, A)
- term1 = (np.einsum('ai...,bj...,ck...,dl...,abcd...,ijkl...', gu, gu, gu, gu, A, A))
- term3 = 4 * np.einsum('ai...,bj...,ab...,ij...', gu, gu, trA, trA)
-
- t1 = e4phi * (4 * np.einsum('i...,jk...->ijk...', Dphi, At) + dAt)
- t2 = (1. / 3.) * (np.einsum('i...,jk...->ijk...', dtrK, g) + trK * Dg)
- t3 = - np.einsum('iab...,ic...->bac...', G, K) - np.einsum('icb...,ia...->bac...', G, K)
- CDK = t1 + t2 + t3
- CDKu = np.einsum('ai...,bj...,ck...,ijk...->abc...', gu, gu, gu, CDK)
-
- term2 = 8 * (np.einsum('abc...,bac...', CDK, CDKu) - np.einsum('abc...,abc...', CDK, CDKu))
-
- return term1 + term2 + term3
-
-
-def l2norm(sd, it, gf, maxrl = None, minrl = None):
- results = []
-
- if maxrl is None:
- maxrl = len(sd.rl) - 1
- if minrl is None:
- minrl = 0
-
- prev_bounds_low = []
- prev_bounds_high = []
- for rl in sd.rl[maxrl:minrl:-1]:
- s = rl.slice(it)
- trim = tuple([slice(rl.ghosts[i], -rl.ghosts[i]) for i in xrange(3)])
- data = s[gf][trim] ** 2
- x = s.x[trim[0]]
- y = s.y[trim[1]]
- z = s.z[trim[2]]
-
- if not prev_bounds_low:
- while len(data.shape) > 0:
- data = scipy.integrate.simps(data, dx = rl.dx[0])
- results.append(data)
- else:
- res = 0.0
- if prev_bounds_low[0] > x[0]:
- for i, xx in enumerate(x):
- if prev_bounds_low[0] <= xx:
- break
-
- tmp = scipy.integrate.simps(data[:, :, :i], dx = rl.dx[0])
- while len(tmp.shape) > 0:
- tmp = scipy.integrate.simps(tmp, dx = rl.dx[0])
- res += tmp
- if prev_bounds_low[1] > y[0]:
- for i, yy in enumerate(y):
- if prev_bounds_low[1] <= yy:
- break
-
- tmp = scipy.integrate.simps(data[:, :i], axis = 1, dx = rl.dx[0])
- while len(tmp.shape) > 0:
- tmp = scipy.integrate.simps(tmp, dx = rl.dx[0])
- res += tmp
- if prev_bounds_low[2] > z[0]:
- for i, zz in enumerate(z):
- if prev_bounds_low[2] <= zz:
- break
-
- tmp = scipy.integrate.simps(data[:i], axis = 0, dx = rl.dx[0])
- while len(tmp.shape) > 0:
- tmp = scipy.integrate.simps(tmp, dx = rl.dx[0])
- res += tmp
- if prev_bounds_high[0] < x[-1]:
- for i, xx in enumerate(x):
- if prev_bounds_high[0] >= xx:
- i -= 1
- break
-
- tmp = scipy.integrate.simps(data[:, :, i:], dx = rl.dx[0])
- while len(tmp.shape) > 0:
- tmp = scipy.integrate.simps(tmp, dx = rl.dx[0])
- res += tmp
- if prev_bounds_high[1] < y[-1]:
- for i, yy in enumerate(y):
- if prev_bounds_high[1] >= yy:
- i -= 1
- break
-
- tmp = scipy.integrate.simps(data[:, i:], axis = 1, dx = rl.dx[0])
- while len(tmp.shape) > 0:
- tmp = scipy.integrate.simps(tmp, dx = rl.dx[0])
- res += tmp
- if prev_bounds_high[2] < z[-1]:
- for i, zz in enumerate(z):
- if prev_bounds_high[2] >= zz:
- i -= 1
- break
-
- tmp = scipy.integrate.simps(data[i:], axis = 0, dx = rl.dx[0])
- while len(tmp.shape) > 0:
- tmp = scipy.integrate.simps(tmp, dx = rl.dx[0])
- res += tmp
-
- results.append(res)
-
- prev_bounds_low = [x[0], y[0], z[0]]
- prev_bounds_high = [x[-1], y[-1], z[-1]]
-
- return sum(results)
-
-
-class RefinedSlice:
- # simulation time on this slice
- time = -1
- # simulation iteration on this slice
- it = -1
-
- # [x, y, z] coordinates of the origin of the grid on this slice
- o = None
-
- # a list of arrays of coordinate points along each direction [x, y, z]
- c = None
-
- # arrays of coordinate points along each direction
- x = None
- y = None
- z = None
-
- rl = None
-
- # private
- _sd = None
- _rl = None
-
- def __init__(self, sd, rl, time = -1, iteration = -1):
- self._sd = sd
- self._rl = rl
- self.rl = rl
-
- if iteration == 'last':
- iteration = rl.itertimes[-1][0]
-
- if time >= 0:
- for it, t in rl.itertimes:
- if abs(t - time) < EPSILON:
- self.time = time
- self.it = it
- break
- if self.time < 0:
- raise IndexError('No such time: ' + str(time))
- elif iteration >= 0:
- for it, t in rl.itertimes:
- if it == iteration:
- self.time = t
- self.it = it
- break
- if self.it < 0:
- raise IndexError('No such iteration: ' + str(iteration))
- else:
- raise TypeError('Neither time nor iteration provided')
-
- # pick a representative datafile and get the grid properties from it
- if 'H' in self._sd.df:
- df = self._sd.df['H']
- else:
- df = self._sd.df.values()[0]
-
- gf = get_default_gridfunc(df)
- ds = df['%s it=%d tl=0 rl=%d' % (gf, self.it, rl.n)]
-
- self.o = ds.attrs['origin']
-
- self.c = [np.linspace(self.o[axis_prop[i]], self.o[axis_prop[i]] + rl.dx[axis_prop[i]] * ds.shape[axis_data[i]], ds.shape[axis_data[i]], endpoint = False) for i in ('x', 'y', 'z')]
- self.x, self.y, self.z = self.c
-
- def __getitem__(self, key):
- if key in self._sd.df:
- gf = get_default_gridfunc(self._sd.df[key])
- return self._sd.df[key]['%s it=%d tl=0 rl=%d' % (gf, self.it, self._rl.n)]
- elif key in self._sd.gf:
- df = self._sd.gf[key]
- return df['%s it=%d tl=0 rl=%d' % (key, self.it, self._rl.n)]
- raise IndexError
-
-
-class RefinementLevel:
- # refinement level number
- n = -1
-
- # [dx, dy, dz] grid spacings
- dx = None
- # number of ghost gridpoints in [x, y, z] dimensions
- ghosts = None
-
- # the time step
- dt = 0.0
-
- # a sorted tuple of all the (iteration, time) pairs for which the data
- # is present on this refinement level
- itertimes = None
-
- # private
- _sd = None
-
- def __init__(self, sd, n):
- self._sd = sd
- self.n = n
-
- def slice(self, iteration = -1, time = -1):
- return RefinedSlice(self._sd, self, time, iteration)
-
- def __str__(self):
- return 'Refinement level %d; extent [%g, %g, %g] -- [%g, %g, %g] ([%g, %g, %g] -- [%g, %g, %g]); dx = [%g, %g, %g]; dt = %g' % \
- (self.n, self.x[self.ghosts[0]], self.y[self.ghosts[1]], self.z[self.ghosts[2]],
- self.x[-self.ghosts[0] - 1], self.y[-self.ghosts[1] - 1], self.z[-self.ghosts[2] - 1],
- self.x[0], self.y[0], self.z[0], self.x[-1], self.y[-1], self.z[-1],
- self.dx[0], self.dx[1], self.dx[2], self.dt)
-
-
-class SimulationData:
- # directory name
- dirname = None
-
- # datafiles
- df = None
-
- # a dictionary of all the { gridfunction : datafile it is located in }
- # pairs for this set of data
- gf = None
-
- # courant factor of the time integration (dx / dt)
- courant = 0
-
- # per-refinement level parameters
- rl = None
-
- def __init__(self, dirname):
- self.dirname = os.path.abspath(dirname)
- self.df = {}
- self.gf = {}
-
- # open all the hdf5 files in the dir
- for f in os.listdir(dirname):
- if not f.endswith('.h5') or f.startswith('checkpoint'):
- continue
-
- self.df[f[:-3]] = h5py.File('%s/%s' % (dirname, f), 'r')
- if len(self.df) == 0:
- raise ValueError('No HDF5 data files in the directory.')
-
- for df in self.df.values():
- funcs = df['Parameters and Global Attributes']['Datasets']
-
- if funcs.dtype.type != np.string_:
- funcs_str = ''.join(map(chr, funcs)).strip('\x00')
- else:
- funcs_str = funcs.value
-
- for ds in funcs_str.strip().split():
- if ds in self.gf:
- raise ValueError('Gridfunction %s present in more than one datafile: %s and %s' % (ds, self.gf[ds].filename, df.filename))
- self.gf[ds] = df
-
- # pick a representative datafile and get the grid properties from it
- if 'H' in self.df:
- df = self.df['H']
- else:
- df = self.df.values()[0]
-
- gf = get_default_gridfunc(df)
-
- # get the refinement levels, iterations and times
- self.rl = []
- while True:
- cur_rl = len(self.rl)
- try:
- ds = df['%s it=0 tl=0 rl=%d' % (gf, cur_rl)]
- except KeyError:
- break
-
- self.rl.append(RefinementLevel(self, len(self.rl)))
- rl = self.rl[-1]
-
- rl.itertimes = gf_itertime(df, cur_rl)
- rl.dx = ds.attrs['delta']
- rl.ghosts = ds.attrs["cctk_nghostzones"]
- rl.dt = _get_timestep(df) / (1 << (len(self.rl) - 1))
-
- self.courant = self.rl[0].dx[0] / self.rl[0].dt
-
- def __del__(self):
- if self.df:
- map(h5py.File.close, self.df.values())
-
- def calc_velocities(self, get_data, rl = 0, t_start = 0, t_end = float('inf'), offsets = None):
- rl = self.rl[rl]
- dt = rl.itertimes[1][1] - rl.itertimes[0][1]
-
- ret = []
- for idx, (iter, t) in enumerate(rl.itertimes):
- if t < t_start:
- continue
- if t > t_end:
- break
-
- data1 = get_data(self, iter)
-
- if offsets is None:
- step = int(self.courant)
- offsets = xrange(step, 10 * step, step)
-
- for offset in offsets:
- diff_t = offset * dt
- try:
- data2 = get_data(self, rl.itertimes[idx + offset][0])
- except IndexError:
- break
- mov = calc_movement(data1, data2)
-
- for it in mov:
- ret.append(it * rl.dx[0] / diff_t)
- return ret