/*@@ @file Operator.c @date Tue Apr 15 18:22:45 1997 @author Paul Walker @desc Definition of interpolation operators for regular uniform grids. @enddesc @history @date Sun Jul 04 1999 @author Thomas Radke @hdesc conversion to Cactus 4.0 (copied from pughGetPoints.c) @date Wed 31 Jan 2001 @author Thomas Radke @hdesc translation of fortran interpolators into C @endhistory @version $Id$ @@*/ #include #include #include /* floor(3) */ #include "cctk.h" #include "CactusPUGH/PUGH/src/include/pugh.h" #include "pughInterpGH.h" /* the rcs ID and its dummy function to use it */ static const char *rcsid = "$Header$"; CCTK_FILEVERSION(CactusPUGH_PUGHInterp_Operator_c) /* uncomment this to get some debugging output */ /* #define PUGHINTERP_DEBUG 1 */ /* macro do sort interpolation results from a single communication buffer into their appropriate output arrays */ #define SORT_TYPED_ARRAY(cctk_type) \ { \ int _i; \ cctk_type *_src, *_dst; \ \ \ _src = (cctk_type *) this->buf; \ _dst = (cctk_type *) output_arrays[array]; \ for (_i = 0; _i < myGH->N_points_from[proc]; _i++) \ { \ _dst[myGH->indices[_i + offset]] = *_src++; \ } \ this->buf = (char *) _src; \ } #ifdef CCTK_MPI /* internal structure describing a handle for a single CCTK data type */ typedef struct { int vtypesize; /* variable type's size in bytes */ MPI_Datatype mpitype; /* corresponding MPI datatype */ int N_arrays; /* number of in/out arrays */ void *sendbuf; /* communication send buffer for this type */ void *recvbuf; /* communication receive buffer for this type */ char *buf; /* work pointer for sendbuf */ } type_desc_t; #endif /* prototypes of routines defined in this source file */ static int CheckArguments (const cGH *GH, int N_dims, int N_points, int N_input_arrays, int N_output_arrays, const int interp_coord_array_types[]); static int CheckOutOfBounds (const cGH *GH, const char *coord_system_name, int order, int N_dims, int N_points, const int *dims, const CCTK_REAL *const *coords); #ifdef CCTK_MPI static int GetLocalCoords (const cGH *GH, int N_points, const char *coord_system_name, const pGExtras *extras, const CCTK_REAL *const coords[], int *N_local_points, CCTK_REAL **local_coords); #endif /*@@ @routine PUGHInterp_InterpGV @date Sun Jul 04 1999 @author Thomas Radke @desc The interpolation operator registered with the CCTK under the name "regular uniform cartesian". Interpolates a list of CCTK variables (domain-decomposed grid functions or arrays) to a list of output arrays (one-to-one) at a given number of interpolation points (indicated by their coordinates). The points are located on a coordinate system which is assumed to be a uniform cartesian. @enddesc @var GH @vdesc Pointer to CCTK grid hierarchy @vtype cGH * @vio in @endvar @var order @vdesc interpolation order @vtype int @vio in @endvar @var coord_system_name @vdesc name of coordinate system to use for interpolation @vtype const char * @vio in @endvar @var N_points @vdesc number of points to be interpolated on this processor @vtype int @vio in @endvar @var N_input_arrays @vdesc number of input arrays (given by their indices) to interpolate from @vtype int @vio in @endvar @var N_output_arrays @vdesc number of output arrays to interpolate to @vtype int @vio in @endvar @var interp_coord_arrays @vdesc coordinates of points to interpolate at @vtype void *const [N_dims] @vio in @endvar @var interp_coord_array_types @vdesc CCTK data type of coordinate arrays @vtype int [N_dims] @vio in @endvar @var input_arrays @vdesc list of input arrays to interpolate on @vtype void *[N_input_arrays] @vio in @endvar @var input_array_types @vdesc CCTK data types of input arrays @vtype int [N_input_arrays] @vio in @endvar @var output_arrays @vdesc list of output arrays to interpolate to @vtype void *const [N_output_arrays] @vio out @endvar @var output_array_types @vdesc CCTK data types of output arrays @vtype int [N_output_arrays] @vio in @endvar @returntype int @returndesc 0 - successful interpolation -1 - in case of any errors @endreturndesc @@*/ int PUGHInterp_InterpGV (cGH *GH, int order, const char *coord_system_name, int N_points, int N_input_arrays, int N_output_arrays, const void *const interp_coord_arrays[], const int interp_coord_array_types[], const int input_array_indices[], void *const output_arrays[], const int output_array_types[]) { int i, nprocs, N_dims, point, array, retval; CCTK_REAL *interp_local_coords; CCTK_REAL *origin, *delta; const CCTK_REAL *const *data; const void **input_arrays; const pGH *pughGH; const pGExtras *extras; cGroupDynamicData group_data; #ifdef CCTK_MPI int offset, proc, type, maxtype, N_local_points; void **local_output_arrays; pughInterpGH *myGH; type_desc_t *this, *type_desc; #endif /* get dimensionality of the coordinate system */ N_dims = CCTK_CoordSystemDim (coord_system_name); if (N_dims <= 0) { CCTK_VWarn (1, __LINE__, __FILE__, CCTK_THORNSTRING, "Cannot get dimensions of coordinate system '%s'", coord_system_name); return (-1); } /* check other arguments */ retval = CheckArguments (GH, N_dims, N_points, N_input_arrays, N_output_arrays, interp_coord_array_types); if (retval <= 0) { return (retval); } /* get extension handle for PUGH */ pughGH = CCTK_GHExtension (GH, "PUGH"); /* get the extras pointer of the first coordinate This is used later on to verify the layout of the input arrays as well as for mapping points to processors. */ i = CCTK_CoordIndex (1, NULL, coord_system_name); extras = ((const pGA *) pughGH->variables[i][0])->extras; /* get dimensions, origin, and delta of the processor-local grid */ /* NOTE: getting the dimensions should be a flesh routine as well for now we get the dimensions of every coordinate and take the i'th element - this is inconsistent !! */ origin = malloc (2 * N_dims * sizeof (CCTK_REAL)); delta = origin + N_dims; input_arrays = malloc (N_input_arrays * sizeof (void *)); for (i = 0; i < N_dims; i++) { CCTK_CoordLocalRange (GH, &origin[i], &delta[i], i + 1, NULL, coord_system_name); delta[i] = (delta[i] - origin[i]) / extras->lnsize[i]; } /* check that the input arrays dimensions match the coordinate system (but their dimensionality can be less) */ for (array = 0; array < N_input_arrays; array++) { if (CCTK_GroupDynamicData (GH, CCTK_GroupIndexFromVarI(input_array_indices[array]), &group_data) < 0) { CCTK_VWarn (1, __LINE__, __FILE__, CCTK_THORNSTRING, "Invalid input array index %d", input_array_indices[array]); retval = -1; continue; } if (group_data.dim > N_dims) { CCTK_VWarn (1, __LINE__, __FILE__, CCTK_THORNSTRING, "Input array variable with index %d has more dimensions " "than coordinate system '%s'", input_array_indices[array], coord_system_name); retval = -1; continue; } if (memcmp (group_data.lsh, extras->lnsize, group_data.dim * sizeof (int))) { CCTK_VWarn (1, __LINE__, __FILE__, CCTK_THORNSTRING, "Dimensions of input array variable with index %d " "doesn't match with coordinate system '%s'", input_array_indices[array], coord_system_name); retval = -1; } /* get the data pointer to the input array (use current timelevel) */ input_arrays[array] = CCTK_VarDataPtrI (GH, 0, input_array_indices[array]); } if (retval >= 0) { /* check for out-of-bounds points This check was originally in the local interpolator code but was disabled there and moved up here instead. Now local arrays can have out-of-bounds points, grid arrays cannot. */ retval = CheckOutOfBounds (GH, coord_system_name, order, N_dims, N_points, extras->nsize, (const CCTK_REAL *const *) interp_coord_arrays); } if (retval < 0) { free (input_arrays); free (origin); return (retval); } /* single-processor case is easy: no communication or buffering, just direct interpolation of interp_coord_arrays from input_arrays into output_arrays */ nprocs = CCTK_nProcs (GH); if (nprocs == 1) { /* sort the individual interpolation coordinate arrays into a single one */ interp_local_coords = malloc (N_dims * N_points * sizeof (CCTK_REAL)); data = (const CCTK_REAL *const *) interp_coord_arrays; for (point = 0; point < N_points; point++) { for (i = 0; i < N_dims; i++) { *interp_local_coords++ = data[i][point]; } } interp_local_coords -= N_dims * N_points; /* call the interpolator function */ retval = PUGHInterp_Interpolate (order, N_points, N_dims, N_output_arrays, extras->lnsize, interp_local_coords, origin, delta, output_array_types, input_arrays, output_array_types, output_arrays); /* free allocated resources */ free (interp_local_coords); free (input_arrays); free (origin); return (retval); } #ifdef CCTK_MPI /*** Here follows the multi-processor case: All processors locate their points to interpolate at and exchange the coordinates so that every processor gets those points which it can process locally. After interpolation the results have to be send back to the requesting processors. For both communications MPI_Alltoallv() is used. In order to minimize the total number of MPI_Alltoallv() calls (which are quite expensive) we collect the interpolation results for all output arrays of the same CCTK data type into a single communication buffer. That means, after communication the data needs to be resorted from the buffer into the output arrays. ***/ /* first of all, set up a structure with information of the CCTK data types we have to deal with */ /* get the maximum value of the output array CCTK data types NOTE: we assume that CCTK data types are defined as consecutive positive constants starting from zero */ for (array = maxtype = 0; array < N_output_arrays; array++) { if (maxtype < output_array_types[array]) { maxtype = output_array_types[array]; } } /* now allocate an array of structures for all potential types */ type_desc = calloc (maxtype + 1, sizeof (type_desc_t)); /* count the number of arrays of same type (the N_arrays element was already initialized to zero by calloc() */ for (array = 0; array < N_output_arrays; array++) { type_desc[output_array_types[array]].N_arrays++; } /* fill in the type description information */ for (type = retval = 0, this = type_desc; type <= maxtype; type++, this++) { if (this->N_arrays > 0) { /* get the variable type size in bytes */ this->vtypesize = CCTK_VarTypeSize (type); if (this->vtypesize <= 0) { CCTK_VWarn (1, __LINE__, __FILE__, CCTK_THORNSTRING, "Invalid variable type %d passed, " "arrays of such type will be skipped during interpolation", type); this->N_arrays = 0; continue; } /* get the MPI data type to use for communicating such a CCTK data type */ this->mpitype = PUGH_MPIDataType (pughGH, type); if (this->mpitype == MPI_DATATYPE_NULL) { CCTK_VWarn (1, __LINE__, __FILE__, CCTK_THORNSTRING, "No MPI data type defined for variable type %d, " "arrays of such type will be skipped during interpolation", type); this->N_arrays = 0; continue; } retval++; } } /* check that there's at least one array with a valid CCTK data type */ if (retval <= 0) { free (input_arrays); free (origin); free (type_desc); return (-1); } /* map the requested points to interpolate at onto the processors they belong to and gather the coordinates of all the points that this processor owns the number of processor-local points is returned in N_local_points, their coordinates in interp_local_coords */ retval = GetLocalCoords (GH, N_points, coord_system_name, extras, (const CCTK_REAL *const *) interp_coord_arrays, &N_local_points, &interp_local_coords); if (retval) { free (input_arrays); free (origin); free (type_desc); return (retval); } /* allocate contiguous communication buffers for each CCTK data type holding the local interpolation results from all input arrays of that type If there are no points to process on this processor set the buffer pointer to an invalid but non-NULL value otherwise we might get trouble with NULL pointers in MPI_Alltoallv () */ for (type = 0, this = type_desc; type <= maxtype; type++, this++) { if (this->N_arrays > 0 && N_local_points > 0) { this->sendbuf = malloc (N_local_points * this->N_arrays *this->vtypesize); this->buf = this->sendbuf; } else { /* dereferencing such an address should code crash on most systems */ this->sendbuf = (void *) this->vtypesize; } } /* get extension handle for interp */ myGH = CCTK_GHExtension (GH, "PUGHInterp"); /* allocate new output_arrays array for local interpolation results from this processor */ local_output_arrays = calloc (N_output_arrays, sizeof (void *)); /* now, in a loop over all processors, do the interpolation and put the results in the communication buffer at the proper offset */ for (proc = 0; proc < nprocs; proc++) { for (type = 0, this = type_desc; type <= maxtype; type++, this++) { if (this->N_arrays > 0) { for (array = 0; array < N_output_arrays; array++) { if (output_array_types[array] == type) { local_output_arrays[array] = this->buf; this->buf += myGH->N_points_to[proc] * this->vtypesize; } } } } /* call the interpolation operator to process all points of all output arrays for this processor */ PUGHInterp_Interpolate (order, myGH->N_points_to[proc], N_dims, N_output_arrays, extras->lnsize, interp_local_coords, origin, delta, output_array_types, input_arrays, output_array_types, local_output_arrays); /* have to add offset for this processor to coordinates array */ interp_local_coords += myGH->N_points_to[proc] * N_dims; } /* end of loop over all processors */ /* don't need these anymore */ if (N_local_points > 0) { interp_local_coords -= N_local_points * N_dims; free (interp_local_coords); } free (local_output_arrays); free (input_arrays); free (origin); /* now send the interpolation results back to the processors they were requested from, also receive my own results that were computed by other processors Since all the locally computed results are in a single contiguous buffer we need to call MPI_Alltoall() only once for each CCTK data type. */ for (type = 0, this = type_desc; type <= maxtype; type++, this++) { /* skip unused types */ if (this->N_arrays <= 0) { continue; } /* set up the communication (this is type-independent) */ myGH->sendcnt[0] = this->N_arrays * myGH->N_points_to[0]; myGH->recvcnt[0] = this->N_arrays * myGH->N_points_from[0]; myGH->senddispl[0] = myGH->recvdispl[0] = 0; for (proc = 1; proc < pughGH->nprocs; proc++) { myGH->sendcnt[proc] = this->N_arrays * myGH->N_points_to[proc]; myGH->recvcnt[proc] = this->N_arrays * myGH->N_points_from[proc]; myGH->senddispl[proc] = myGH->senddispl[proc-1] + myGH->sendcnt[proc-1]; myGH->recvdispl[proc] = myGH->recvdispl[proc-1] + myGH->recvcnt[proc-1]; } /* allocate buffer for receiving my own requested points */ /* avoid NULL pointers here because MPI_Alltoallv() doesn't like it */ if (N_points > 0) { this->recvbuf = malloc (N_points * this->N_arrays * this->vtypesize); } else { /* access to such a fake address should crash the code on most systems */ this->recvbuf = (void *) this->vtypesize; } /* now exchange the data for this CCTK data type */ CACTUS_MPI_ERROR (MPI_Alltoallv (this->sendbuf, myGH->sendcnt, myGH->senddispl, this->mpitype, this->recvbuf, myGH->recvcnt, myGH->recvdispl, this->mpitype, pughGH->PUGH_COMM_WORLD)); /* now that the data is sent we don't need the buffer anymore */ if (N_local_points > 0) { free (this->sendbuf); } /* no sort neccessary if there are no points */ if (N_points <= 0) { continue; } /* Go back from processor-sorted data to input-ordered data. The creation of the indices array above makes this not so bad. */ this->buf = this->recvbuf; for (proc = offset = 0; proc < nprocs; proc++) { for (array = 0; array < N_output_arrays; array++) { if (output_array_types[array] != type) { continue; } /* now do the sorting according to the CCTK data type */ if (output_array_types[array] == CCTK_VARIABLE_CHAR) { SORT_TYPED_ARRAY (CCTK_BYTE); } else if (output_array_types[array] == CCTK_VARIABLE_INT) { SORT_TYPED_ARRAY (CCTK_INT); } else if (output_array_types[array] == CCTK_VARIABLE_REAL) { SORT_TYPED_ARRAY (CCTK_REAL); } else if (output_array_types[array] == CCTK_VARIABLE_COMPLEX) { SORT_TYPED_ARRAY (CCTK_COMPLEX); } #ifdef CCTK_REAL4 else if (output_array_types[array] == CCTK_VARIABLE_REAL4) { SORT_TYPED_ARRAY (CCTK_REAL4); } else if (output_array_types[array] == CCTK_VARIABLE_COMPLEX8) { SORT_TYPED_ARRAY (CCTK_COMPLEX8); } #endif #ifdef CCTK_REAL8 else if (output_array_types[array] == CCTK_VARIABLE_REAL8) { SORT_TYPED_ARRAY (CCTK_REAL8); } else if (output_array_types[array] == CCTK_VARIABLE_COMPLEX16) { SORT_TYPED_ARRAY (CCTK_COMPLEX16); } #endif #ifdef CCTK_REAL16 else if (output_array_types[array] == CCTK_VARIABLE_REAL16) { SORT_TYPED_ARRAY (CCTK_REAL16); } else if (output_array_types[array] == CCTK_VARIABLE_COMPLEX32) { SORT_TYPED_ARRAY (CCTK_COMPLEX32); } #endif else { CCTK_WARN (0, "Implementation error"); } } /* end of loop over all output arrays */ /* advance the offset into the communication receive buffer */ offset += myGH->N_points_from[proc]; } /* end of loop over all processors */ /* this communication receive buffer isn't needed anymore */ free (this->recvbuf); } /* end of loop over all types */ /* free remaining resources allocated within this run */ if (myGH->whichproc) { free (myGH->whichproc); myGH->whichproc = NULL; } free (type_desc); #endif /* MPI */ return (0); } /**************************************************************************/ /* local routines */ /**************************************************************************/ #ifdef CCTK_MPI /*@@ @routine GetLocalCoords @date Sun Jul 04 1999 @author Thomas Radke @desc Collect the coordinates of all points to be processed locally into an array coords[N_dims][N_local_points]. This means for the single-processor case to sort inCoords1[N_points], inCoords2[npoints], ..., inCoords[npoints] into coords[N_dims][N_points] where N_points == N_local_points. In the multiprocessor case all processors map their points' coordinates to the processor that owns this point and exchange this information via MPI_Alltoall (). N_local_points is then the number of all processors' points to be interpolated locally on this processor. This routine returns the number of points to be processed locally and a pointer to the allocated array of their coordinates. @enddesc @var GH @vdesc Pointer to CCTK grid hierarchy @vtype const cGH * @vio in @endvar @var N_points @vdesc number of points to be interpolated on this processor @vtype int @vio in @endvar @var isGlobal @vdesc flag indicating that coordinates are global (and need to be collected over all processors) @vtype int @vio in @endvar @var N_dims @vdesc number of coordinate dimensions for each point @vtype int @vio in @endvar @var coords @vdesc coordinates of each point to be interpolated on this processor @vtype CCTK_REAL array of size N_dims @vio in @endvar @var N_local_points @vdesc number of points to be processed by this processor @vtype int * @vio out @endvar @var local_coords @vdesc coordinates of each point to be processed by this processor @vtype pointer to CCTK_REAL array of dims[N_dims][N_local_points] @vio out @endvar @@*/ static int GetLocalCoords (const cGH *GH, int N_points, const char *coord_system_name, const pGExtras *extras, const CCTK_REAL *const coords[], int *N_local_points, CCTK_REAL **local_coords) { int dim, point; int tmp, proc, nprocs; pGH *pughGH; pughInterpGH *myGH; CCTK_REAL *range_min, *range_max; CCTK_REAL *origin, *delta; CCTK_REAL *proc_coords; #define FUDGE 0.0 /* get GH extension handles for PUGHInterp and PUGH */ myGH = CCTK_GHExtension (GH, "PUGHInterp"); pughGH = CCTK_GHExtension (GH, "PUGH"); /* This holds the proccessor for *each* of N_points points */ if (N_points > 0) { myGH->whichproc = malloc (2 * N_points * sizeof (int)); } else { myGH->whichproc = NULL; } /* indices[] is used to make the sorting easier when receiving the output data */ myGH->indices = myGH->whichproc + N_points; /* initialize whichproc with invalid processor number -1 */ for (point = 0; point < N_points; point++) { myGH->whichproc[point] = -1; } /* initialize N_points_from to 0 for counting it up in the following loop */ nprocs = CCTK_nProcs (GH); memset (myGH->N_points_from, 0, nprocs * sizeof (CCTK_INT)); /* allocate the ranges for my local coordinates */ range_min = malloc (4 * extras->dim * sizeof (CCTK_REAL)); range_max = range_min + 1*extras->dim; origin = range_min + 2*extras->dim; delta = range_min + 3*extras->dim; /* get the global origin and delta of the coordinate system */ for (dim = 0; dim < extras->dim; dim++) { CCTK_CoordRange (GH, &origin[dim], &delta[dim], dim+1, NULL, coord_system_name); delta[dim] = (delta[dim] - origin[dim]) / (extras->nsize[dim]-1); } /* locate the points to interpolate at */ for (proc = 0; proc < nprocs; proc++) { for (dim = 0; dim < extras->dim; dim++) { /* compute the coordinate ranges */ /* TODO: use bbox instead -- but the bboxes of other processors are now known */ int const has_lower = extras->lb[proc][dim] == 0; int const has_upper = extras->ub[proc][dim] == GH->cctk_gsh[dim]-1; range_min[dim] = origin[dim] + (extras->lb[proc][dim] + (!has_lower) * (extras->nghostzones[dim]-0.5) - FUDGE)*delta[dim]; range_max[dim] = origin[dim] + (extras->ub[proc][dim] - (!has_upper) * (extras->nghostzones[dim]-0.5) + FUDGE)*delta[dim]; } /* and now which point will be processed by what processor */ for (point = 0; point < N_points; point++) { /* skip points which have already been located */ if (myGH->whichproc[point] >= 0) { continue; } /* check whether the point belongs to this processor (must be within min/max in all dimensions) */ tmp = 0; for (dim = 0; dim < extras->dim; dim++) { if (coords[dim][point] >= range_min[dim] && coords[dim][point] <= range_max[dim]) { tmp++; } } if (tmp == extras->dim) { myGH->whichproc[point] = proc; myGH->N_points_from[proc]++; } } } /* don't need this anymore */ free (range_min); /* make sure that all points could be mapped onto a processor */ for (point = tmp = 0; point < N_points; point++) { if (myGH->whichproc[point] < 0) { int i; char *msg = malloc (80 + extras->dim*20); sprintf (msg, "Unable to locate point %d [%f", point, coords[0][point]); for (i = 1; i < extras->dim; i++) { sprintf (msg, "%s %f", msg, coords[i][point]); } sprintf (msg, "%s]", msg); CCTK_WARN (1, msg); free (msg); tmp = 1; /* mark as error */ } } if (tmp) { if (myGH->whichproc) { free (myGH->whichproc); myGH->whichproc = NULL; } return (-1); } /* Now we want to resolve the N_points_from[]. Currently this is the form of ( in 2 proc mode ) P1: Num from P1 NFP2 P2: NFP1 NFP2 and this needs to become P1: Num to P1 NTP2 P2: NTP1 NTP1 Since NTP1 = NFP2 (and this works in more proc mode too) this is an all-to-all communication. */ CACTUS_MPI_ERROR (MPI_Alltoall (myGH->N_points_from, 1, PUGH_MPI_INT, myGH->N_points_to, 1, PUGH_MPI_INT, pughGH->PUGH_COMM_WORLD)); #ifdef PUGHINTERP_DEBUG for (proc = 0; proc < nprocs; proc++) { printf ("processor %d <-> %d From: %d To: %d\n", CCTK_MyProc (GH), proc, myGH->N_points_from[proc], myGH->N_points_to[proc]); } #endif /* Great. Now we know how many to expect from each processor, and how many to send to each processor. So first we have to send the locations to the processors which hold our data. This means I send coords[dim][point] to whichproc[point]. I have N_points_from[proc] to send to each processor. */ /* This is backwards in the broadcast location; the number of points we are getting is how many everyone else is sending to us, eg, N_points_to, not how many we get back from everyone else, eg, N_points_from. The number we are sending, of course, is all of our locations, eg, N_points */ *N_local_points = 0; for (proc = 0; proc < nprocs; proc++) { *N_local_points += myGH->N_points_to[proc]; } #ifdef PUGHINTERP_DEBUG printf ("processor %d gets %d points in total\n", CCTK_MyProc (GH), *N_local_points); #endif /* allocate the local coordinates array (sorted in processor order) and the resulting coordinates array that I have to process */ proc_coords = malloc (extras->dim * N_points * sizeof (CCTK_REAL)); *local_coords = malloc (extras->dim * *N_local_points * sizeof (CCTK_REAL)); /* now sort my own coordinates as tupels of [extras->dim] */ for (proc = tmp = 0; proc < nprocs; proc++) { for (point = 0; point < N_points; point++) { if (myGH->whichproc[point] == proc) { for (dim = 0; dim < extras->dim; dim++) { *proc_coords++ = coords[dim][point]; } myGH->indices[tmp++] = point; } } } proc_coords -= tmp * extras->dim; /* So load up the send and recv stuff */ /* Send extras->dim elements per data point */ myGH->sendcnt[0] = extras->dim * myGH->N_points_from[0]; myGH->recvcnt[0] = extras->dim * myGH->N_points_to[0]; myGH->senddispl[0] = myGH->recvdispl[0] = 0; for (proc = 1; proc < nprocs; proc++) { myGH->sendcnt[proc] = extras->dim * myGH->N_points_from[proc]; myGH->recvcnt[proc] = extras->dim * myGH->N_points_to[proc]; myGH->senddispl[proc] = myGH->senddispl[proc-1] + myGH->sendcnt[proc-1]; myGH->recvdispl[proc] = myGH->recvdispl[proc-1] + myGH->recvcnt[proc-1]; } /* Great, and now exchange the coordinates and collect the ones that I have to process in *local_coords[] */ CACTUS_MPI_ERROR (MPI_Alltoallv (proc_coords, myGH->sendcnt, myGH->senddispl, PUGH_MPI_REAL, *local_coords, myGH->recvcnt, myGH->recvdispl, PUGH_MPI_REAL, pughGH->PUGH_COMM_WORLD)); /* don't need this anymore */ free (proc_coords); return (0); } #endif /* CCTK_MPI */ /*@@ @routine CheckArguments @date Thu 25 Jan 2001 @author Thomas Radke @desc Checks the interpolation arguments passed in via the flesh's general interpolation calling interface This routine also verifies that the parameters meet the limitations of PUGHInterp's interpolation operators. @enddesc @var GH @vdesc Pointer to CCTK grid hierarchy @vtype const cGH * @vio in @endvar @var N_dims @vdesc dimensionality of the underlying grid @vtype int @vio in @endvar @var N_points @vdesc number of points to interpolate at @vtype int @vio in @endvar @var N_input_arrays @vdesc number of passed input arrays @vtype int @vio in @endvar @var N_output_arrays @vdesc number of passed input arrays @vtype int @vio in @endvar @var interp_coord_array_types @vdesc types of passed coordinates to interpolate at @vtype int [N_dims] @vio in @endvar @returntype int @returndesc +1 for success 0 for success but nothing to do -1 for failure (wrong parameters passed or limitations not met) @endreturndesc @@*/ static int CheckArguments (const cGH *GH, int N_dims, int N_points, int N_input_arrays, int N_output_arrays, const int interp_coord_array_types[]) { int i; /* check for invalid arguments */ if (N_dims < 0 || N_points < 0 || N_input_arrays < 0 || N_output_arrays < 0) { return (-1); } /* check if there's anything to do at all */ /* NOTE: N_points can be 0 in a collective call */ if (N_dims == 0 || (CCTK_nProcs (GH) == 1 && N_points == 0) || N_input_arrays == 0 || N_output_arrays == 0) { return (0); } /* for now we can only deal with coordinates of type CCTK_REAL */ for (i = 0; i < N_dims; i++) { if (interp_coord_array_types[i] != CCTK_VARIABLE_REAL) { CCTK_WARN (1, "Interpolation coordinates must be of type CCTK_REAL"); return (-1); } } /* PUGHInterp's interpolation operators compute one output array per input array */ if (N_input_arrays != N_output_arrays) { CCTK_WARN (1, "Number of input arrays must match number of output arrays"); return (-1); } return (1); } static int CheckOutOfBounds (const cGH *GH, const char *coord_system_name, int order, int N_dims, int N_points, const int *dims, const CCTK_REAL *const *coords) { int i, p, point, out_of_bounds, retval; CCTK_REAL *origin, *delta, *delta_inv, *below; char *msg; msg = malloc (100 + N_dims*(10 + 4*30)); origin = malloc (4 * N_dims * sizeof (CCTK_REAL)); delta = origin + 1*N_dims; delta_inv = origin + 2*N_dims; below = origin + 3*N_dims; /* get the global origin and delta of the coordinate system */ for (i = 0; i < N_dims; i++) { CCTK_CoordRange (GH, &origin[i], &delta[i], i+1, NULL, coord_system_name); delta[i] = (delta[i] - origin[i]) / (dims[i]-1); /* avoid expensive divisions by delta later on */ delta_inv[i] = 1.0 / delta[i]; } retval = 0; for (p = 0; p < N_points; p++) { /* reset the out-of-bounds flag */ out_of_bounds = 0; /* loop over all dimensions */ for (i = 0; i < N_dims; i++) { /* grid point of the lower-left stencil point */ point = floor ((coords[i][p] - origin[i]) * delta_inv[i] - 0.5 * (order - 1)); /* test bounds */ out_of_bounds |= point < 0 || point+order >= dims[i]; /* physical coordinate of that grid point */ below[i] = origin[i] + point * delta[i]; } /* check bounds */ if (out_of_bounds) { /* put all information into a single message string for output */ sprintf (msg, "Interpolation stencil out of bounds at interpolation " "coordinate [%f", (double) coords[0][p]); for (i = 1; i < N_dims; i++) { sprintf (msg, "%s, %f", msg, (double) coords[i][p]); } sprintf (msg, "%s]\nrange would be min/max [%f / %f", msg, (double) below[0], (double) (below[0] + order*delta[0])); for (i = 1; i < N_dims; i++) { sprintf (msg, "%s, %f / %f", msg, (double) below[i], (double) (below[i] + order*delta[i])); } sprintf (msg, "%s]\ngrid is min/max [%f / %f", msg, (double) origin[0], (double) (origin[0] + (dims[0]-1)*delta[0])); for (i = 1; i < N_dims; i++) { sprintf (msg, "%s, %f / %f", msg, (double)origin[i], (double)(origin[i] + (dims[i]-1)*delta[i])); } sprintf (msg, "%s]", msg); CCTK_WARN (1, msg); retval--; } } /* free allocated resources */ free (origin); free (msg); return (retval); }