aboutsummaryrefslogtreecommitdiff
path: root/src/macros
diff options
context:
space:
mode:
authoreschnett <eschnett@105869f7-3296-0410-a4ea-f4349344b45a>2013-01-16 20:17:39 +0000
committereschnett <eschnett@105869f7-3296-0410-a4ea-f4349344b45a>2013-01-16 20:17:39 +0000
commitf4e59032c42c1df52717d2663760072cd0510f30 (patch)
treee6d3021b3fb440b20087655844f013bee9b3b522 /src/macros
parent33e3cc3261b81b28f83a6e4d41d1ae97944dfa49 (diff)
Major update
Disable AVX emulation Set default for streaming stores to "no" Correct QPX vectorisation (IBM Blue Gene/Q) Add MIC vectorisation (Intel Xeon Phi) Convert SSE and AVX vectorisation to using inline functions instead of macros for code clarity Define CCTK_BOOLEAN, CCTK_INTEGER and CCTK_BOOLEAN_VEC, CCTK_INTEGER_VEC to make boolean and integer vectors explicit git-svn-id: https://svn.cct.lsu.edu/repos/numrel/LSUThorns/Vectors/trunk@77 105869f7-3296-0410-a4ea-f4349344b45a
Diffstat (limited to 'src/macros')
-rw-r--r--src/macros/vectors-4-SSE.h457
-rw-r--r--src/macros/vectors-4-default.h134
-rw-r--r--src/macros/vectors-8-AVX.h325
-rw-r--r--src/macros/vectors-8-SSE2.h427
-rw-r--r--src/macros/vectors-8-default.h132
5 files changed, 1475 insertions, 0 deletions
diff --git a/src/macros/vectors-4-SSE.h b/src/macros/vectors-4-SSE.h
new file mode 100644
index 0000000..2be477b
--- /dev/null
+++ b/src/macros/vectors-4-SSE.h
@@ -0,0 +1,457 @@
+// Vectorise using Intel's or AMD's SSE
+
+// Use the type __m128 directly, without introducing a wrapper class
+// Use macros instead of inline functions
+
+
+
+#include <assert.h>
+#include <math.h>
+
+#include <xmmintrin.h>
+#ifdef __SSE4_1__
+// Intel's SSE 4.1
+# include <smmintrin.h>
+#endif
+#ifdef __SSE4A__
+// AMD's SSE 4a
+# include <ammintrin.h>
+#endif
+#ifdef __FMA4__
+# include <fma4intrin.h>
+#endif
+
+
+
+#ifdef __SSE4_1__
+# define vec4_architecture_SSE4_1 "+SSE4.1"
+#else
+# define vec4_architecture_SSE4_1 ""
+#endif
+#ifdef __SSE4A__
+# define vec4_architecture_SSE4a "+SSE4A"
+#else
+# define vec4_architecture_SSE4a ""
+#endif
+#ifdef __FMA4__
+# define vec4_architecture_FMA4 "+FMA4"
+#else
+# define vec4_architecture_FMA4 ""
+#endif
+#define vec4_architecture "SSE" vec4_architecture_SSE4_1 vec4_architecture_SSE4a vec4_architecture_FMA4 " (32-bit precision)"
+
+
+
+// Vector type corresponding to CCTK_REAL
+#define CCTK_REAL4_VEC __m128
+
+// Number of vector elements in a CCTK_REAL_VEC
+#define CCTK_REAL4_VEC_SIZE 4
+
+// Integer and boolean types corresponding to this real type
+#define CCTK_INTEGER4 CCTK_REAL4
+#define CCTK_BOOLEAN4 CCTK_REAL4
+#define CCTK_INTEGER4_VEC CCTK_REAL4_VEC
+#define CCTK_BOOLEAN4_VEC CCTK_REAL4_VEC
+
+
+
+union k4const_t {
+ unsigned i[4];
+ float f[4];
+ __m128i vi;
+ __m128 vf;
+};
+
+#define K4_ZERO 0x00000000UL
+#define K4_IMIN 0x80000000UL
+#define K4_IMAX 0x7fffffffUL
+
+
+
+// Create vectors, extract vector elements
+
+#define vec4_set1(a) (_mm_set1_ps(a))
+#define vec4_set(a,b,c,d) (_mm_set_ps(d,c,b,a)) // note reversed arguments
+
+// original order is 0123
+#define vec4_swap1032(x_) \
+ ({ \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4_VEC const x=x__; \
+ _mm_shuffle_ps(x,x, _MM_SHUFFLE(2,3,0,1)); \
+ })
+#define vec4_swap2301(x_) \
+ ({ \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4_VEC const x=x__; \
+ _mm_shuffle_ps(x,x, _MM_SHUFFLE(1,0,3,2)); \
+ })
+#define vec4_swap3210(x_) \
+ ({ \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4_VEC const x=x__; \
+ _mm_shuffle_ps(x,x, _MM_SHUFFLE(0,1,2,3)); \
+ })
+
+#if defined(__PGI)
+// _mm_cvtss_f32 does not exist on PGI compilers
+# define vec4_elt0(x) \
+ ({ \
+ CCTK_REAL4 a; \
+ asm ("" : "=x" (a) : "0" (x)); \
+ a; \
+ })
+#else
+# define vec4_elt0(x) (_mm_cvtss_f32(x)) // this is a no-op
+#endif
+#define vec4_elt1(x) vec4_elt0(vec4_swap1032(x))
+#define vec4_elt2(x) vec4_elt0(vec4_swap2301(x))
+#define vec4_elt3(x) vec4_elt0(vec4_swap3210(x))
+#if defined(__PGI)
+# define vec4_elt(x_,d) \
+ ({ \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4_VEC const x=x__; \
+ CCTK_REAL4 a; \
+ if (d==0) a=vec4_elt0(x); \
+ else if (d==1) a=vec4_elt1(x); \
+ else if (d==2) a=vec4_elt2(x); \
+ else if (d==3) a=vec4_elt3(x); \
+ a; \
+ })
+#else
+# define vec4_elt(x_,d) \
+ ({ \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4_VEC const x=x__; \
+ CCTK_REAL4 a; \
+ switch (d) { \
+ case 0: a=vec4_elt0(x); break; \
+ case 1: a=vec4_elt1(x); break; \
+ case 2: a=vec4_elt2(x); break; \
+ case 3: a=vec4_elt3(x); break; \
+ } \
+ a; \
+ })
+#endif
+
+
+
+// Load and store vectors
+
+// Load a vector from memory (aligned and unaligned); this loads from
+// a reference to a scalar
+#define vec4_load(p) (_mm_load_ps(&(p)))
+#define vec4_loadu(p) (_mm_loadu_ps(&(p)))
+#if ! VECTORISE_ALWAYS_USE_ALIGNED_LOADS
+# define vec4_load_off1(p) vec_loadu(p)
+# define vec4_load_off2(p) vec_loadu(p)
+# define vec4_load_off3(p) vec_loadu(p)
+#else
+# define vec4_load_off1(p_) \
+ ({ \
+ CCTK_REAL4 const& p__=(p_); \
+ CCTK_REAL4 const& p=p__; \
+ CCTK_REAL4_VEC const lo=vec4_load((&p)[-1]); \
+ CCTK_REAL4_VEC const hi=vec4_load((&p)[+3]); \
+ assert(0); \
+ CCTK_REAL4_VEC const hi2=_mm_shuffle_ps(lo,hi, _MM_SHUFFLE(0,1,2,3)); \
+ _mm_shuffle_ps(lo,hi2, _MM_SHUFFLE(2,1,3,0)); \
+ })
+# define vec4_load_off2(p_) \
+ ({ \
+ CCTK_REAL4 const& p__=(p_); \
+ CCTK_REAL4 const& p=p__; \
+ CCTK_REAL4_VEC const lo=vec4_load((&p)[-2]); \
+ CCTK_REAL4_VEC const hi=vec4_load((&p)[+2]); \
+ _mm_shuffle_ps(lo,hi, _MM_SHUFFLE(1,0,3,2)); \
+ })
+# define vec4_load_off3(p_) \
+ ({ \
+ CCTK_REAL4 const& p__=(p_); \
+ CCTK_REAL4 const& p=p__; \
+ CCTK_REAL4_VEC const lo=vec4_load((&p)[-1]); \
+ CCTK_REAL4_VEC const hi=vec4_load((&p)[+3]); \
+ assert(0); \
+ CCTK_REAL4_VEC const lo2=_mm_shuffle_ps(lo,hi, _MM_SHUFFLE(0,1,2,3)); \
+ _mm_shuffle_ps(lo2,hi, _MM_SHUFFLE(3,0,2,1)); \
+ })
+#endif
+
+// Load a vector from memory that may or may not be aligned, as
+// decided by the offset off and the vector size
+#if VECTORISE_ALWAYS_USE_UNALIGNED_LOADS
+// Implementation: Always use unaligned load
+# define vec4_loadu_maybe(off,p) vec4_loadu(p)
+# define vec4_loadu_maybe3(off1,off2,off3,p) vec4_loadu(p)
+#else
+# define vec4_loadu_maybe(off,p_) \
+ ({ \
+ CCTK_REAL4 const& p__=(p_); \
+ CCTK_REAL4 const& p=p__; \
+ (off) % CCTK_REAL4_VEC_SIZE == 0 ? \
+ vec4_load(p) : \
+ vec4_loadu(p); \
+ })
+# if VECTORISE_ALIGNED_ARRAYS
+// Assume all array x sizes are multiples of the vector size
+# define vec4_loadu_maybe3(off1,off2,off3,p) \
+ vec4_loadu_maybe(off1,p)
+# else
+# define vec4_loadu_maybe3(off1,off2,off3,p) \
+ vec4_loadu_maybe((off1)|(off2)|(off3),p)
+# endif
+#endif
+
+// Store a vector to memory (aligned and non-temporal); this stores to
+// a reference to a scalar
+#define vec4_store(p,x) (_mm_store_ps(&(p),x))
+#define vec4_storeu(p,x) (_mm_storeu_ps(&(p),x))
+#if ! VECTORISE_STREAMING_STORES
+# define vec4_store_nta(p,x) vec4_store(p,x)
+#else
+# define vec4_store_nta(p,x) (_mm_stream_ps(&(p),x))
+#endif
+
+// Store a partial vector (aligned and non-temporal)
+#define vec4_store_partial_prepare(i,imin,imax) \
+ int v4stp_lo_skip = (imin)-(i); \
+ int v4stp_hi_skip = (i)+CCTK_REAL_VEC_SIZE-(imax); \
+ if (CCTK_BUILTIN_EXPECT(v4stp_lo_skip < 0, true)) v4stp_lo_skip = 0; \
+ if (CCTK_BUILTIN_EXPECT(v4stp_hi_skip < 0, true)) v4stp_hi_skip = 0;
+// Ignoring VECTORISE_STREAMING_STORES for partial stores
+#define vec4_store_nta_partial(p_,x_) \
+ ({ \
+ CCTK_REAL4& p__=(p_); \
+ CCTK_REAL4& p=p__; \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4_VEC const x=x__; \
+ if (CCTK_BUILTIN_EXPECT(v4stp_lo_skip==0 and v4stp_hi_skip==0, true)) { \
+ vec4_store_nta(p,x); \
+ } else { \
+ /* these cases fall through */ \
+ switch (v4stp_lo_skip) { \
+ case 0: \
+ (&p)[0] = vec4_elt0(x); \
+ case 1: \
+ if (v4stp_hi_skip>=3) break; \
+ (&p)[1] = vec4_elt1(x); \
+ case 2: \
+ if (v4stp_hi_skip>=2) break; \
+ (&p)[2] = vec4_elt2(x); \
+ case 3: \
+ if (v4stp_hi_skip>=1) break; \
+ (&p)[3] = vec4_elt3(x); \
+ } \
+ } \
+ })
+
+// Ignoring VECTORISE_STREAMING_STORES for partial stores
+#define vec4_store_nta_partial_lo(p_,x_,n) \
+ ({ \
+ CCTK_REAL4 & p__=(p_); \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4 & p=p__; \
+ CCTK_REAL4_VEC const x=x__; \
+ /* these cases fall through */ \
+ switch (n) { \
+ case 3: (&p)[2] = vec4_elt2(x); \
+ case 2: (&p)[1] = vec4_elt1(x); \
+ case 1: (&p)[0] = vec4_elt0(x); \
+ } \
+ })
+#define vec4_store_nta_partial_hi(p_,x_,n) \
+ ({ \
+ CCTK_REAL4 & p__=(p_); \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4 & p=p__; \
+ CCTK_REAL4_VEC const x=x__; \
+ /* these cases fall through */ \
+ switch (n) { \
+ case 3: (&p)[1]=vec4_elt1(x); \
+ case 2: (&p)[2]=vec4_elt2(x); \
+ case 1: (&p)[3]=vec4_elt3(x); \
+ } \
+ })
+#define vec4_store_nta_partial_mid(p_,x_,nlo,nhi) \
+ ({ \
+ CCTK_REAL4 & p__=(p_); \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4 & p=p__; \
+ CCTK_REAL4_VEC const x=x__; \
+ /* these cases fall through */ \
+ switch (nhi) { \
+ case 3: if (nlo<2) break; (&p)[1] = vec4_elt1(x); \
+ case 2: if (nlo<3) break; (&p)[2] = vec4_elt2(x); \
+ } \
+ })
+
+
+
+// Functions and operators
+
+static const k4const_t k4sign_mask = {{ K4_IMIN, K4_IMIN, K4_IMIN, K4_IMIN, }};
+
+// Operators
+#define k4neg(x) (_mm_xor_ps(k4sign_mask.vf,x))
+// #define k4inv(x)
+// TODO: provide k4inv via rcp and Newton-Raphson
+// This is described in AMD's publication 47414.
+// This should apply for AVX as well.
+
+#define k4add(x,y) (_mm_add_ps(x,y))
+#define k4sub(x,y) (_mm_sub_ps(x,y))
+#define k4mul(x,y) (_mm_mul_ps(x,y))
+// TODO: use k4inv and k4mul instead
+#define k4div(x,y) (_mm_div_ps(x,y))
+
+// Fused multiply-add, defined as [+-]x*y[+-]z
+#ifdef __FMA4__
+# define k4madd(x,y,z) (_mm_macc_ps(x,y,z))
+# define k4msub(x,y,z) (_mm_msub_ps(x,y,z))
+# define k4nmadd(x,y,z) (_mm_nmsub_ps(x,y,z))
+# define k4nmsub(x,y,z) (_mm_nmacc_ps(x,y,z))
+#else
+# define k4madd(x,y,z) (k4add(k4mul(x,y),z))
+# define k4msub(x,y,z) (k4sub(k4mul(x,y),z))
+# define k4nmadd(x,y,z) (k4sub(k4neg(z),k4mul(x,y)))
+# define k4nmsub(x,y,z) (k4sub(z,k4mul(x,y)))
+#endif
+
+// Cheap functions
+#define k4copysign(x,y) \
+ (_mm_or_ps(_mm_andnot_ps(k4sign_mask.vf,x), \
+ _mm_and_ps(k4sign_mask.vf,y)))
+#define k4fabs(x) (_mm_andnot_ps(k4sign_mask.vf,x))
+#define k4fmax(x,y) (_mm_max_ps(x,y))
+#define k4fmin(x,y) (_mm_min_ps(x,y))
+#define k4fnabs(x) (_mm_or_ps(k4sign_mask.vf,x))
+#define k4sgn(x_) \
+ ({ \
+ CCTK_REAL_VEC const x__=(x_); \
+ CCTK_REAL_VEC const x=x__; \
+ CCTK_REAL_VEC const iszero = _mm_cmpeq_ps(vec4_set1(0.0f), x); \
+ CCTK_REAL_VEC const sign = _mm_and_ps(k4sign_mask.vf, x); \
+ CCTK_REAL_VEC const signedone = _mm_or_ps(vec4_set1(1.0f), sign); \
+ k4ifthen(iszero, vec4_set1(0.0f), signedone); \
+ })
+// TODO: maybe use rsqrt and Newton-Raphson
+#define k4sqrt(x) (_mm_sqrt_ps(x))
+
+// Expensive functions
+#define K4REPL(f,x_) \
+ ({ \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4_VEC const x=x__; \
+ vec4_set(f(vec4_elt0(x)), \
+ f(vec4_elt1(x)), \
+ f(vec4_elt2(x)), \
+ f(vec4_elt3(x))); \
+ })
+#define K4REPL2S(f,x_,a_) \
+ ({ \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4 const a__=(a_); \
+ CCTK_REAL4_VEC const x=x__; \
+ CCTK_REAL4 const a=a__; \
+ vec4_set(f(vec4_elt0(x),a), \
+ f(vec4_elt1(x),a), \
+ f(vec4_elt2(x),a), \
+ f(vec4_elt3(x),a)); \
+ })
+#define K4REPL2(f,x_,y_) \
+ ({ \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4_VEC const y__=(y_); \
+ CCTK_REAL4_VEC const x=x__; \
+ CCTK_REAL4_VEC const y=y__; \
+ vec4_set(f(vec4_elt0(x),vec4_elt0(y)), \
+ f(vec4_elt1(x),vec4_elt1(y)), \
+ f(vec4_elt2(x),vec4_elt2(y)), \
+ f(vec4_elt3(x),vec4_elt3(y))); \
+ })
+
+#define k4acos(x) K4REPL(acosf,x)
+#define k4acosh(x) K4REPL(acoshf,x)
+#define k4asin(x) K4REPL(asinf,x)
+#define k4asinh(x) K4REPL(asinhf,x)
+#define k4atan(x) K4REPL(atanf,x)
+#define k4atan2(x,y) K4REPL2(atan2f,x,y)
+#define k4atanh(x) K4REPL(atanhf,x)
+#define k4cos(x) K4REPL(cosf,x)
+#define k4cosh(x) K4REPL(coshf,x)
+#define k4exp(x) K4REPL(expf,x)
+#define k4log(x) K4REPL(logf,x)
+#define k4pow(x,a) K4REPL2S(powf,x,a)
+#define k4sin(x) K4REPL(sinf,x)
+#define k4sinh(x) K4REPL(sinhf,x)
+#define k4tan(x) K4REPL(tanf,x)
+#define k4tanh(x) K4REPL(tanhf,x)
+
+static const k4const_t k4lfalse_ = {{ 0U, 0U, 0U, 0U, }};
+static const k4const_t k4ltrue_ = {{ ~0U, ~0U, ~0U, ~0U, }};
+#define k4lfalse (k4lfalse_.vf)
+#define k4ltrue (k4ltrue_.vf)
+#define k4lnot(x) (_mm_xor_ps(k4ltrue,x))
+#define k4land(x,y) (_mm_and_ps(x,y))
+#define k4lor(x,y) (_mm_or_ps(x,y))
+#define k4lxor(x,y) (_mm_xor_ps(x,y))
+
+#ifdef __SSE4_1__
+# define k4ifthen(x,y,z) (_mm_blendv_ps(z,y,x))
+#elif 0
+# ifdef __cplusplus
+# define k4signbit(x) ({ using namespace std; signbit(x); })
+# else
+# define k4signbit(x) (signbitf(x))
+# endif
+# define k4ifthen(x,y,z) \
+ ({ \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4_VEC const y__=(y_); \
+ CCTK_REAL4_VEC const z__=(z_); \
+ CCTK_REAL4_VEC const x=x__; \
+ CCTK_REAL4_VEC const y=y__; \
+ CCTK_REAL4_VEC const z=z__; \
+ vec4_set(k4signbit(vec4_elt0(x)) ? vec4_elt0(y) : vec4_elt0(z), \
+ k4signbit(vec4_elt1(x)) ? vec4_elt1(y) : vec4_elt1(z), \
+ k4signbit(vec4_elt2(x)) ? vec4_elt2(y) : vec4_elt2(z), \
+ k4signbit(vec4_elt3(x)) ? vec4_elt3(y) : vec4_elt3(z)); \
+ })
+#elif 0
+// We don't need to shift -- the condition (mask) will be either all
+// zeros or all ones
+# define k4ifthen(x_,y_,z_) \
+ ({ \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4_VEC const y__=(y_); \
+ CCTK_REAL4_VEC const z__=(z_); \
+ CCTK_REAL4_VEC const x=x__; \
+ CCTK_REAL4_VEC const y=y__; \
+ CCTK_REAL4_VEC const z=z__; \
+ CCTK_REAL4_VEC const mask = \
+ (__m128)_mm_srai_epi32((__m128i)x, 31); \
+ /* (z & ~mask) | (y & mask) */ \
+ _mm_or_ps(_mm_andnot_ps(mask, z), _mm_and_ps(mask, y)); \
+ })
+#else
+# define k4ifthen(x_,y_,z_) \
+ ({ \
+ CCTK_REAL4_VEC const x__=(x_); \
+ CCTK_REAL4_VEC const y__=(y_); \
+ CCTK_REAL4_VEC const z__=(z_); \
+ CCTK_REAL4_VEC const x=x__; \
+ CCTK_REAL4_VEC const y=y__; \
+ CCTK_REAL4_VEC const z=z__; \
+ /* (z & ~mask) | (y & mask) where imask = ~mask */ \
+ _mm_or_ps(_mm_and_ps(x, y), _mm_andnot_ps(x, z)); \
+ })
+#endif
+
+#define k4cmpeq(x,y) (_mm_cmpeq_ps(x,y))
+#define k4cmpne(x,y) (_mm_cmpneq_ps(x,y))
+#define k4cmpgt(x,y) (_mm_cmpgt_ps(x,y))
+#define k4cmpge(x,y) (_mm_cmpge_ps(x,y))
+#define k4cmplt(x,y) (_mm_cmplt_ps(x,y))
+#define k4cmple(x,y) (_mm_cmple_ps(x,y))
diff --git a/src/macros/vectors-4-default.h b/src/macros/vectors-4-default.h
new file mode 100644
index 0000000..0cd49ac
--- /dev/null
+++ b/src/macros/vectors-4-default.h
@@ -0,0 +1,134 @@
+// Fallback vectorisation implementation: Do not vectorise
+
+// We use macros here, so that we are not surprised by compilers which
+// don't like to inline functions. This should also make debug builds
+// (which may not inline) more efficient.
+
+
+
+#include <assert.h>
+#include <math.h>
+
+
+
+#define vec4_architecture "scalar (no vectorisation, 32-bit precision)"
+
+// Use CCTK_REAL4
+#define CCTK_REAL4_VEC CCTK_REAL4
+
+// Number of vector elements in a vector
+#define CCTK_REAL4_VEC_SIZE 1
+
+// Integer and boolean types corresponding to this real type
+#define CCTK_INTEGER4 CCTK_REAL4
+#define CCTK_BOOLEAN4 CCTK_REAL4
+#define CCTK_INTEGER4_VEC CCTK_REAL4_VEC
+#define CCTK_BOOLEAN4_VEC CCTK_REAL4_VEC
+
+
+
+// Create a vector replicating a scalar
+#define vec4_set1(a) (a)
+// Create a vector from N scalars
+#define vec4_set(a) (a)
+
+// Access vectors elements
+#define vec4_elt0(x) (x)
+#define vec4_elt(x,d) (x)
+
+
+
+// Load an aligned vector from memory
+#define vec4_load(p) (p)
+// Load an unaligned vector from memory
+#define vec4_loadu(p) (p)
+
+// Load a vector from memory that may or may not be aligned, as
+// decided by the offset and the vector size. These functions are
+// useful e.g. for loading neightbouring grid points while evaluating
+// finite differencing stencils.
+#define vec4_loadu_maybe(off,p) (p)
+#define vec4_loadu_maybe3(off1,off2,off3,p) (p)
+
+// Aligned store
+#define vec4_store(p,x) ((p)=(x))
+#define vec4_storeu(p,x) ((p)=(x))
+
+// Unaligned store
+#define vec4_store_nta(p,x) ((p)=(x))
+
+#define vec4_store_partial_prepare(i,imin,imax) (0)
+#define vec4_store_nta_partial(p,x) (vec4_store_nta(p,x))
+// Store the n lower elements of a vector to memory
+#define vec4_store_nta_partial_lo(p,x,n) (assert(0))
+// Store the n higher elements of a vector into memory. This stores
+// the vector elements into memory locations as if element 0 were
+// stored at p.
+#define vec4_store_nta_partial_hi(p,x,n) (assert(0))
+#define vec4_store_nta_partial_mid(p,x,nlo,nhi) (assert(0))
+
+
+
+// Operators
+#define k4neg(x) (-(x))
+
+#define k4add(x,y) ((x)+(y))
+#define k4sub(x,y) ((x)-(y))
+#define k4mul(x,y) ((x)*(y))
+#define k4div(x,y) ((x)/(y))
+
+// Fused multiply-add, defined as [+-]x*y[+-]z
+#define k4madd(x,y,z) (+(x)*(y)+(z))
+#define k4msub(x,y,z) (+(x)*(y)-(z))
+#define k4nmadd(x,y,z) (-(x)*(y)-(z))
+#define k4nmsub(x,y,z) (-(x)*(y)+(z))
+
+// Functions
+#define k4acos(x) (acosf(x))
+#define k4acosh(x) (acoshf(x))
+#define k4asin(x) (asinf(x))
+#define k4asinh(x) (asinhf(x))
+#define k4atan(x) (atanf(x))
+#define k4atan2(x,y) (atan2f(x,y))
+#define k4atanh(x) (atanhf(x))
+#define k4copysign(x,y) (copysign(x,y))
+#define k4cos(x) (cosf(x))
+#define k4cosh(x) (coshf(x))
+#define k4exp(x) (expf(x))
+#define k4fabs(x) (fabsf(x))
+#define k4fmax(x,y) (fmaxf(x,y))
+#define k4fmin(x,y) (fminf(x,y))
+#define k4fnabs(x) (-fabsf(x))
+#define k4log(x) (logf(x))
+#define k4pow(x,a) (powf(x,a))
+#define k4sin(x) (sinf(x))
+#define k4sinh(x) (sinhf(x))
+#define k4sqrt(x) (sqrtf(x))
+#define k4tan(x) (tanf(x))
+#define k4tanh(x) (tanhf(x))
+
+#define k4sgn(x_) \
+ ({ \
+ CCTK_REAL x__=(x_); \
+ CCTK_REAL x=x__; \
+ x==(CCTK_REAL)0.0 ? (CCTK_REAL)0.0 : std::copysign((CCTK_REAL)1.0, x); \
+ })
+#define k4signbit(x) (std::signbit(x))
+
+#define k4l2r(x_) ({ CCTK_INT4 x__=(x_); CCTK_INT4 x=x__; *(CCTK_REAL4*)&x; })
+#define k4r2l(x_) ({ CCTK_REAL4 x__=(x_); CCTK_REAL4 x=x__; *(CCTK_INT4*)&x; })
+#define k4lfalse k4l2r(0)
+#define k4ltrue k4l2r(1)
+#define k4lnot(x) k4l2r(!k4r2l(x))
+#define k4land(x,y) k4l2r(k4r2l(x) && k4r2l(y))
+#define k4lor(x,y) k4l2r(k4r2l(x) || k4r2l(y))
+#define k4lxor(x,y) k4l2r(!k4r2l(x) != !k4r2l(y))
+
+#define k4ifthen(x,y,z) (k4r2l(x)?(y):(z))
+
+#define k4cmpeq(x,y) k4l2r((x)==(y))
+#define k4cmpne(x,y) k4l2r((x)!=(y))
+#define k4cmpgt(x,y) k4l2r((x)>(y))
+#define k4cmpge(x,y) k4l2r((x)>=(y))
+#define k4cmplt(x,y) k4l2r((x)<(y))
+#define k4cmple(x,y) k4l2r((x)<=(y))
diff --git a/src/macros/vectors-8-AVX.h b/src/macros/vectors-8-AVX.h
new file mode 100644
index 0000000..6882523
--- /dev/null
+++ b/src/macros/vectors-8-AVX.h
@@ -0,0 +1,325 @@
+// Vectorise using Intel's or AMD's AVX
+
+// Use the type __m256d directly, without introducing a wrapper class
+// Use macros instead of inline functions
+
+
+
+#if VECTORISE_EMULATE_AVX
+# include "avxintrin_emu.h"
+#else
+# include <immintrin.h>
+#endif
+#ifdef __FMA4__
+# include <fma4intrin.h>
+#endif
+
+
+
+#ifdef __FMA4__
+# define vec8_architecture_FMA4 "+FMA4"
+#else
+# define vec8_architecture_FMA4 ""
+#endif
+#define vec8_architecture "AVX" vec8_architecture_FMA4 " (64-bit precision)"
+
+
+
+// Vector type corresponding to CCTK_REAL
+#define CCTK_REAL8_VEC __m256d
+
+// Number of vector elements in a CCTK_REAL_VEC
+#define CCTK_REAL8_VEC_SIZE 4
+
+// Integer and boolean types corresponding to this real type
+#define CCTK_INTEGER8 CCTK_REAL8
+#define CCTK_BOOLEAN8 CCTK_REAL8
+#define CCTK_INTEGER8_VEC CCTK_REAL8_VEC
+#define CCTK_BOOLEAN8_VEC CCTK_REAL8_VEC
+
+
+
+union k8const_t {
+ unsigned long long i[4];
+ double f[4];
+ __m256i vi;
+ __m256d vf;
+};
+
+#define K8_ZERO 0x0000000000000000ULL
+#define K8_NOTZERO 0xffffffffffffffffULL
+#define K8_IMIN 0x8000000000000000ULL
+#define K8_IMAX 0x7fffffffffffffffULL
+
+
+
+// Create vectors, extract vector elements
+
+#define vec8_set1(a) (_mm256_set1_pd(a))
+#define vec8_set(a,b,c,d) (_mm256_set_pd(d,c,b,a)) // note reversed arguments
+
+#define vec8_elt0(x) (((CCTK_REAL8 const*)&(x))[0])
+#define vec8_elt1(x) (((CCTK_REAL8 const*)&(x))[1])
+#define vec8_elt2(x) (((CCTK_REAL8 const*)&(x))[2])
+#define vec8_elt3(x) (((CCTK_REAL8 const*)&(x))[3])
+#define vec8_elt(x,d) (((CCTK_REAL8 const*)&(x))[d])
+
+
+
+// Load and store vectors
+
+// Load a vector from memory (aligned and unaligned); this loads from
+// a reference to a scalar
+#define vec8_load(p) (_mm256_load_pd(&(p)))
+#define vec8_loadu(p) (_mm256_loadu_pd(&(p)))
+#if ! VECTORISE_ALWAYS_USE_ALIGNED_LOADS
+# define vec8_load_off1(p) vec_loadu(p)
+#else
+# error "VECTORISE_ALWAYS_USE_ALIGNED_LOADS not yet supported"
+#endif
+
+// Load a vector from memory that may or may not be aligned, as
+// decided by the offset off and the vector size
+#if VECTORISE_ALWAYS_USE_UNALIGNED_LOADS
+// Implementation: Always use unaligned load
+# define vec8_loadu_maybe(off,p) (vec8_loadu(p))
+# define vec8_loadu_maybe3(off1,off2,off3,p) (vec8_loadu(p))
+#else
+# define vec8_loadu_maybe(off,p_) \
+ ({ \
+ CCTK_REAL8 const& p__=(p_); \
+ CCTK_REAL8 const& p=p__; \
+ (off) % CCTK_REAL8_VEC_SIZE == 0 ? \
+ vec8_load(p) : \
+ vec8_load_off1(p); \
+ })
+# if VECTORISE_ALIGNED_ARRAYS
+// Assume all array x sizes are multiples of the vector size
+# define vec8_loadu_maybe3(off1,off2,off3,p) \
+ vec8_loadu_maybe(off1,p)
+# else
+# define vec8_loadu_maybe3(off1,off2,off3,p_) \
+ ({ \
+ CCTK_REAL8 const& p__=(p_); \
+ CCTK_REAL8 const& p=p__; \
+ ((off2) % CCTK_REAL8_VEC_SIZE != 0 or \
+ (off3) % CCTK_REAL8_VEC_SIZE != 0) ? \
+ vec8_loadu(p) : \
+ vec8_loadu_maybe(off1,p); \
+ })
+# endif
+#endif
+
+// Store a vector to memory (aligned and non-temporal); this stores to
+// a reference to a scalar
+#define vec8_store(p,x) (_mm256_store_pd(&(p),x))
+#define vec8_storeu(p,x) (_mm256_storeu_pd(&(p),x))
+#if ! VECTORISE_STREAMING_STORES
+# define vec8_store_nta(p,x) (vec8_store(p,x))
+#else
+# define vec8_store_nta(p,x) (_mm256_stream_pd(&(p),x))
+#endif
+
+// Store a partial vector (aligned and non-temporal)
+#define vec8_store_partial_prepare(i,imin_,imax_) \
+ bool v8stp_all; \
+ __m256i v8stp_mask; \
+ ({ \
+ ptrdiff_t const imin__=(imin_); \
+ ptrdiff_t const imin=imin__; \
+ ptrdiff_t const imax__=(imax_); \
+ ptrdiff_t const imax=imax__; \
+ \
+ v8stp_all = i>=imin and i+CCTK_REAL_VEC_SIZE-1<imax; \
+ \
+ if (not CCTK_BUILTIN_EXPECT(v8stp_all, true)) { \
+ /* \
+ __m256i const v8stp_mask = \
+ _mm256_andnot_pd(_mm256_add_epi64(_mm256_set1_epi64x(i-imin), \
+ vec_index), \
+ _mm256_add_epi64(_mm256_set1_epi64x(i-imax), \
+ vec_index)); \
+ */ \
+ __m128i const termlo0 = \
+ _mm_add_epi64(_mm_set1_epi64x(i-imin), _mm_set_epi64x(1, 0)); \
+ __m128i const termup0 = \
+ _mm_add_epi64(_mm_set1_epi64x(i-imax), _mm_set_epi64x(1, 0)); \
+ __m128i const term0 = _mm_andnot_si128(termlo0, termup0); \
+ __m128i const termlo1 = \
+ _mm_add_epi64(_mm_set1_epi64x(i-imin), _mm_set_epi64x(3, 2)); \
+ __m128i const termup1 = \
+ _mm_add_epi64(_mm_set1_epi64x(i-imax), _mm_set_epi64x(3, 2)); \
+ __m128i const term1 = _mm_andnot_si128(termlo1, termup1); \
+ v8stp_mask = \
+ _mm256_insertf128_si256(_mm256_castsi128_si256(term0), term1, 1); \
+ } \
+ })
+
+#define vec8_store_nta_partial(p,x) \
+ ({ \
+ if (CCTK_BUILTIN_EXPECT(v8stp_all, true)) { \
+ vec8_store_nta(p,x); \
+ } else { \
+ _mm256_maskstore_pd(&p,v8stp_mask,x); \
+ } \
+ })
+
+// Store a lower or higher partial vector (aligned and non-temporal);
+// the non-temporal hint is probably ignored
+// Masks indicating which vector element should be stored:
+static const k8const_t k8store_lo[5] =
+ {
+ {{ K8_ZERO , K8_ZERO , K8_ZERO , K8_ZERO , }},
+ {{ K8_NOTZERO, K8_ZERO , K8_ZERO , K8_ZERO , }},
+ {{ K8_NOTZERO, K8_NOTZERO, K8_ZERO , K8_ZERO , }},
+ {{ K8_NOTZERO, K8_NOTZERO, K8_NOTZERO, K8_ZERO , }},
+ {{ K8_NOTZERO, K8_NOTZERO, K8_NOTZERO, K8_NOTZERO, }},
+ };
+static const k8const_t k8store_hi[5] =
+ {
+ {{ K8_ZERO , K8_ZERO , K8_ZERO , K8_ZERO , }},
+ {{ K8_ZERO , K8_ZERO , K8_ZERO , K8_NOTZERO, }},
+ {{ K8_ZERO , K8_ZERO , K8_NOTZERO, K8_NOTZERO, }},
+ {{ K8_ZERO , K8_NOTZERO, K8_NOTZERO, K8_NOTZERO, }},
+ {{ K8_NOTZERO, K8_NOTZERO, K8_NOTZERO, K8_NOTZERO, }},
+ };
+#if !defined(__INTEL_COMPILER) && defined(__GNUC__) && __GNUC__==4 && __GNUC_MINOR__<=4
+// gcc 4.4 uses a wrong prototype for _mm256_maskstore_pd
+# define vec8_store_nta_partial_lo(p,x,n) \
+ (_mm256_maskstore_pd(&(p),_mm256_castsi256_pd(k8store_lo[n].vi),x))
+# define vec8_store_nta_partial_hi(p,x,n) \
+ (_mm256_maskstore_pd(&(p),_mm256_castsi256_pd(k8store_hi[n].vi),x))
+# define vec8_store_nta_partial_mid(p,x,nlo,nhi) \
+ (_mm256_maskstore_pd \
+ (&(p), \
+ _mm256_castsi256_pd(k8store_lo[nlo].vi & k8store_hi[nhi].vi), \
+ x))
+#else
+# define vec8_store_nta_partial_lo(p,x,n) \
+ (_mm256_maskstore_pd(&(p),k8store_lo[n].vi,x))
+# define vec8_store_nta_partial_hi(p,x,n) \
+ (_mm256_maskstore_pd(&(p),k8store_hi[n].vi,x))
+# define vec8_store_nta_partial_mid(p,x,nlo,nhi) \
+ (_mm256_maskstore_pd \
+ (&(p), \
+ _mm256_castpd_si256(_mm256_and_pd(k8store_lo[nlo].vf, \
+ k8store_hi[nhi].vf)), \
+ x))
+#endif
+
+
+
+// Functions and operators
+
+static const k8const_t k8sign_mask = {{ K8_IMIN, K8_IMIN, K8_IMIN, K8_IMIN, }};
+
+// Operators
+#define k8neg(x) (_mm256_xor_pd(x,k8sign_mask.vf))
+
+#define k8add(x,y) (_mm256_add_pd(x,y))
+#define k8sub(x,y) (_mm256_sub_pd(x,y))
+#define k8mul(x,y) (_mm256_mul_pd(x,y))
+#define k8div(x,y) (_mm256_div_pd(x,y))
+
+// Fused multiply-add, defined as [+-]x*y[+-]z
+#ifdef __FMA4__
+# define k8madd(x,y,z) (_mm256_macc_pd(x,y,z))
+# define k8msub(x,y,z) (_mm256_msub_pd(x,y,z))
+# define k8nmadd(x,y,z) (_mm256_nmsub_pd(x,y,z))
+# define k8nmsub(x,y,z) (_mm256_nmacc_pd(x,y,z))
+#else
+# define k8madd(x,y,z) (k8add(k8mul(x,y),z))
+# define k8msub(x,y,z) (k8sub(k8mul(x,y),z))
+# define k8nmadd(x,y,z) (k8sub(k8neg(z),k8mul(x,y)))
+# define k8nmsub(x,y,z) (k8sub(z,k8mul(x,y)))
+#endif
+
+// Cheap functions
+#define k8copysign(x,y) \
+ (_mm256_or_pd(_mm256_andnot_pd(k8sign_mask.vf,x), \
+ _mm256_and_pd(k8sign_mask.vf,y)))
+#define k8fabs(x) (_mm256_andnot_pd(k8sign_mask.vf,x))
+#define k8fmax(x,y) (_mm256_max_pd(x,y))
+#define k8fmin(x,y) (_mm256_min_pd(x,y))
+#define k8fnabs(x) (_mm256_or_pd(x,k8sign_mask.vf))
+static const k8const_t k8zero = { f: { 0.0, 0.0, 0.0, 0.0, }};
+static const k8const_t k8one = { f: { 1.0, 1.0, 1.0, 1.0, }};
+#define k8sgn(x_) \
+ ({ \
+ CCTK_REAL_VEC x__=(x_); \
+ CCTK_REAL_VEC x=x__; \
+ CCTK_REAL_VEC iszero = _mm256_cmp_pd(x, k8zero.vf, _CMP_EQ_OQ); \
+ CCTK_REAL_VEC sign = _mm256_and_pd(k8sign_mask.vf, x); \
+ CCTK_REAL_VEC signedone = _mm256_or_pd(sign, k8one.vf); \
+ k8ifthen(iszero, k8zero.vf, signedone); \
+ })
+#define k8sqrt(x) (_mm256_sqrt_pd(x))
+
+// Expensive functions
+#define K8REPL(f,x_) \
+ ({ \
+ CCTK_REAL8_VEC const x__=(x_); \
+ CCTK_REAL8_VEC const x=x__; \
+ vec8_set(f(vec8_elt0(x)), \
+ f(vec8_elt1(x)), \
+ f(vec8_elt2(x)), \
+ f(vec8_elt3(x))); \
+ })
+#define K8REPL2S(f,x_,a_) \
+ ({ \
+ CCTK_REAL8_VEC const x__=(x_); \
+ CCTK_REAL8 const a__=(a_); \
+ CCTK_REAL8_VEC const x=x__; \
+ CCTK_REAL8 const a=a__; \
+ vec8_set(f(vec8_elt0(x),a), \
+ f(vec8_elt1(x),a), \
+ f(vec8_elt2(x),a), \
+ f(vec8_elt3(x),a)); \
+ })
+#define K8REPL2(f,x_,y_) \
+ ({ \
+ CCTK_REAL8_VEC const x__=(x_); \
+ CCTK_REAL8_VEC const y__=(y_); \
+ CCTK_REAL8_VEC const x=x__; \
+ CCTK_REAL8_VEC const y=y__; \
+ vec8_set(f(vec8_elt0(x),vec8_elt0(y)), \
+ f(vec8_elt1(x),vec8_elt1(y)), \
+ f(vec8_elt2(x),vec8_elt2(y)), \
+ f(vec8_elt3(x),vec8_elt3(y))); \
+ })
+
+#define k8acos(x) K8REPL(acos,x)
+#define k8acosh(x) K8REPL(acosh,x)
+#define k8asin(x) K8REPL(asin,x)
+#define k8asinh(x) K8REPL(asinh,x)
+#define k8atan(x) K8REPL(atan,x)
+#define k8atan2(x,y) K8REPL2(atan2,x,y)
+#define k8atanh(x) K8REPL(atanh,x)
+#define k8cos(x) K8REPL(cos,x)
+#define k8cosh(x) K8REPL(cosh,x)
+#define k8exp(x) K8REPL(exp,x)
+#define k8log(x) K8REPL(log,x)
+#define k8pow(x,a) K8REPL2S(pow,x,a)
+#define k8sin(x) K8REPL(sin,x)
+#define k8sinh(x) K8REPL(sinh,x)
+#define k8tan(x) K8REPL(tan,x)
+#define k8tanh(x) K8REPL(tanh,x)
+
+static const k8const_t k8lfalse_ =
+ {{ K8_ZERO, K8_ZERO, K8_ZERO, K8_ZERO, }};
+static const k8const_t k8ltrue_ =
+ {{ K8_NOTZERO, K8_NOTZERO, K8_NOTZERO, K8_NOTZERO, }};
+#define k8lfalse (k8lfalse_.vf)
+#define k8ltrue (k8ltrue_.vf)
+#define k8lnot(x) (_mm256_xor_pd(k8ltrue,x))
+#define k8land(x,y) (_mm256_and_pd(x,y))
+#define k8lor(x,y) (_mm256_or_pd(x,y))
+#define k8lxor(x,y) (_mm256_xor_pd(x,y))
+#define k8ifthen(x,y,z) (_mm256_blendv_pd(z,y,x))
+
+#define k8cmpeq(x,y) (_mm256_cmp_pd(x,y,_CMP_EQ_OQ))
+#define k8cmpne(x,y) (_mm256_cmp_pd(x,y,_CMP_NEQ_OQ))
+#define k8cmpgt(x,y) (_mm256_cmp_pd(x,y,_CMP_GT_OQ))
+#define k8cmpge(x,y) (_mm256_cmp_pd(x,y,_CMP_GE_OQ))
+#define k8cmplt(x,y) (_mm256_cmp_pd(x,y,_CMP_LT_OQ))
+#define k8cmple(x,y) (_mm256_cmp_pd(x,y,_CMP_LE_OQ))
diff --git a/src/macros/vectors-8-SSE2.h b/src/macros/vectors-8-SSE2.h
new file mode 100644
index 0000000..a8823ab
--- /dev/null
+++ b/src/macros/vectors-8-SSE2.h
@@ -0,0 +1,427 @@
+// Vectorise using Intel's or AMD's SSE2
+
+// Use the type __m128d directly, without introducing a wrapper class
+// Use macros instead of inline functions
+
+
+
+#include <assert.h>
+#include <math.h>
+
+#include <emmintrin.h>
+#ifdef __SSE4_1__
+// Intel's SSE 4.1
+# include <smmintrin.h>
+#endif
+#ifdef __SSE4A__
+// AMD's SSE 4a
+# include <ammintrin.h>
+
+// Intel compilers don't support SSE 4a. Here is how we can implement
+// these instructions in assembler instead:
+
+// inline void __attribute__((__always_inline__))
+// _mm_stream_sd (double *p, __m128d x)
+// {
+// asm ("movntsd %[x],%[p]" : "=m" (*p) : [p] "m" (*p), [x] "x" (x));
+// }
+
+#endif
+#ifdef __FMA4__
+# include <fma4intrin.h>
+#endif
+
+
+
+#ifdef __SSE4_1__
+# define vec8_architecture_SSE4_1 "+SSE4.1"
+#else
+# define vec8_architecture_SSE4_1 ""
+#endif
+#ifdef __SSE4A__
+# define vec8_architecture_SSE4a "+SSE4A"
+#else
+# define vec8_architecture_SSE4a ""
+#endif
+#ifdef __FMA4__
+# define vec8_architecture_FMA4 "+FMA4"
+#else
+# define vec8_architecture_FMA4 ""
+#endif
+#define vec8_architecture "SSE2" vec8_architecture_SSE4_1 vec8_architecture_SSE4a vec8_architecture_FMA4 " (64-bit precision)"
+
+
+
+// Vector type corresponding to CCTK_REAL
+#define CCTK_REAL8_VEC __m128d
+
+// Number of vector elements in a CCTK_REAL_VEC
+#define CCTK_REAL8_VEC_SIZE 2
+
+// Integer and boolean types corresponding to this real type
+#define CCTK_INTEGER8 CCTK_REAL8
+#define CCTK_BOOLEAN8 CCTK_REAL8
+#define CCTK_INTEGER8_VEC CCTK_REAL8_VEC
+#define CCTK_BOOLEAN8_VEC CCTK_REAL8_VEC
+
+
+
+union k8const_t {
+ long long i[2];
+ double f[2];
+ __m128i vi;
+ __m128d vf;
+};
+
+#define K8_IMIN ((long long)0x8000000000000000ULL)
+
+
+
+// Create vectors, extract vector elements
+
+#define vec8_set1(a) (_mm_set1_pd(a))
+#define vec8_set(a,b) (_mm_set_pd(b,a)) // note reversed arguments
+
+// original order is 01
+#define vec8_swap10(x_) \
+ ({ \
+ CCTK_REAL8_VEC const x__=(x_); \
+ CCTK_REAL8_VEC const x=x__; \
+ _mm_shuffle_pd(x,x, _MM_SHUFFLE2(0,1)); \
+ })
+
+#define vec8_elt0(x) (((CCTK_REAL8 const*)&(x))[0])
+#define vec8_elt1(x) (((CCTK_REAL8 const*)&(x))[1])
+#define vec8_elt(x,d) (((CCTK_REAL8 const*)&(x))[d])
+
+
+
+// Load and store vectors
+
+// Load a vector from memory (aligned and unaligned); this loads from
+// a reference to a scalar
+#define vec8_load(p) (_mm_load_pd(&(p)))
+#define vec8_loadu(p) (_mm_loadu_pd(&(p)))
+#if ! VECTORISE_ALWAYS_USE_ALIGNED_LOADS
+# define vec8_load_off1(p) vec_loadu(p)
+#else
+# define vec8_load_off1(p_) \
+ ({ \
+ CCTK_REAL8 const& p__=(p_); \
+ CCTK_REAL8 const& p=p__; \
+ _mm_shuffle_pd(vec8_load((&p)[-1]), \
+ vec8_load((&p)[+1]), _MM_SHUFFLE2(0,1)); \
+ })
+#endif
+
+// Load a vector from memory that may or may not be aligned, as
+// decided by the offset off and the vector size
+#if VECTORISE_ALWAYS_USE_UNALIGNED_LOADS
+// Implementation: Always use unaligned load
+# define vec8_loadu_maybe(off,p) vec8_loadu(p)
+# define vec8_loadu_maybe3(off1,off2,off3,p) vec8_loadu(p)
+#else
+# define vec8_loadu_maybe(off,p_) \
+ ({ \
+ CCTK_REAL8 const& p__=(p_); \
+ CCTK_REAL8 const& p=p__; \
+ (off) % CCTK_REAL8_VEC_SIZE == 0 ? \
+ vec8_load(p) : \
+ vec8_load_off1(p); \
+ })
+# if VECTORISE_ALIGNED_ARRAYS
+// Assume all array x sizes are multiples of the vector size
+# define vec8_loadu_maybe3(off1,off2,off3,p) \
+ vec8_loadu_maybe(off1,p)
+# else
+# define vec8_loadu_maybe3(off1,off2,off3,p_) \
+ ({ \
+ CCTK_REAL8 const& p__=(p_); \
+ CCTK_REAL8 const& p=p__; \
+ ((off2) % CCTK_REAL8_VEC_SIZE != 0 or \
+ (off3) % CCTK_REAL8_VEC_SIZE != 0) ? \
+ vec8_loadu(p) : \
+ vec8_loadu_maybe(off1,p); \
+ })
+# endif
+#endif
+
+// Store a vector to memory (aligned and non-temporal); this stores to
+// a reference to a scalar
+#define vec8_store(p,x) (_mm_store_pd(&(p),x))
+#define vec8_storeu(p,x) (_mm_storeu_pd(&(p),x))
+#if ! VECTORISE_STREAMING_STORES
+# define vec8_store_nta(p,x) vec8_store(p,x)
+#else
+# define vec8_store_nta(p,x) (_mm_stream_pd(&(p),x))
+#endif
+
+// Store a partial vector (aligned and non-temporal)
+#define vec8_store_partial_prepare(i,imin,imax) \
+ bool const v8stp_lo = (i)>=(imin); \
+ bool const v8stp_hi = (i)+CCTK_REAL_VEC_SIZE-1<(imax)
+#if VECTORISE_STREAMING_STORES && defined(__SSE4A__)
+# define vec8_store_nta_partial(p_,x_) \
+ ({ \
+ CCTK_REAL8& p__=(p_); \
+ CCTK_REAL8& p=p__; \
+ CCTK_REAL8_VEC const x__=(x_); \
+ CCTK_REAL8_VEC const x=x__; \
+ if (CCTK_BUILTIN_EXPECT(v8stp_lo and v8stp_hi, true)) { \
+ vec8_store_nta(p,x); \
+ } else if (v8stp_lo) { \
+ _mm_stream_sd(&p,x); \
+ } else if (v8stp_hi) { \
+ _mm_stream_sd(&p+1, vec8_swap10(x)); \
+ } \
+ })
+#else
+# define vec8_store_nta_partial(p_,x_) \
+ ({ \
+ CCTK_REAL8& p__=(p_); \
+ CCTK_REAL8& p=p__; \
+ CCTK_REAL8_VEC const x__=(x_); \
+ CCTK_REAL8_VEC const x=x__; \
+ if (CCTK_BUILTIN_EXPECT(v8stp_lo and v8stp_hi, true)) { \
+ vec8_store_nta(p,x); \
+ } else if (v8stp_lo) { \
+ _mm_storel_pd(&p,x); \
+ } else if (v8stp_hi) { \
+ _mm_storeh_pd(&p+1,x); \
+ } \
+ })
+#endif
+
+// Store a lower or higher partial vector (aligned and non-temporal)
+#if ! VECTORISE_STREAMING_STORES
+# define vec8_store_nta_partial_lo(p,x,n) (_mm_storel_pd(&(p),x))
+# define vec8_store_nta_partial_hi(p,x,n) (_mm_storeh_pd(&(p)+1,x))
+#else
+# if defined(__SSE4A__)
+# define vec8_store_nta_partial_lo(p,x,n) (_mm_stream_sd(&(p),x))
+# define vec8_store_nta_partial_hi(p,x,n) \
+ (_mm_stream_sd(&(p)+1, vec8_swap10(x)))
+# else
+// TODO: use clflush once a whole cache line has been written (cache
+// lines are usually larger than the CPU vector size)
+# define vec8_store_nta_partial_lo(p_,x,n) \
+ ({ \
+ CCTK_REAL8& p__=(p_); \
+ CCTK_REAL8& p=p__; \
+ _mm_storel_pd(&p,x); \
+ /* _mm_clflush(&p); */ \
+ })
+# define vec8_store_nta_partial_hi(p_,x,n) \
+ ({ \
+ CCTK_REAL8& p__=(p_); \
+ CCTK_REAL8& p=p__; \
+ _mm_storeh_pd(&p+1,x); \
+ /* _mm_clflush(&p+1); */ \
+ })
+# endif
+#endif
+#if 0
+// This is slower; we would need a non-temporal read
+#define vec8_store_nta_partial_lo(p,x,n) \
+ vec8_store_nta(p, _mm_loadh_pd(x,&(p)+1))
+#define vec8_store_nta_partial_hi(p,x,n) \
+ vec8_store_nta(p, _mm_loadl_pd(x,&(p)))
+#endif
+#define vec8_store_nta_partial_mid(p,x,nlo,nhi) assert(0)
+
+
+
+// Functions and operators
+
+static const k8const_t k8sign_mask = {{ K8_IMIN, K8_IMIN, }};
+
+// Operators
+
+// #define k8inot(x) (_mm_xor_si128(k8all_mask,x))
+//
+// #define k8iand(x,y) (_mm_and_si128(x,y))
+// #define k8ior(x,y) (_mm_or_si128(x,y))
+// #define k8ixor(x,y) (_mm_xor_si128(x,y))
+//
+// #define k8ineg(x) (_mm_xor_pd(k8sign_mask,x))
+//
+// #define k8iadd(x,y) (_mm_add_epi64(x,y))
+// #define k8isub(x,y) (_mm_sub_epi64(x,y))
+//
+// #define k8not(x) (_mm_xor_pd(k8all_mask,x))
+//
+// #define k8and(x,y) (_mm_and_pd(x,y))
+// #define k8or(x,y) (_mm_or_pd(x,y))
+// #define k8xor(x,y) (_mm_xor_pd(x,y))
+
+#define k8neg(x) (_mm_xor_pd(k8sign_mask.vf,x))
+
+#define k8add(x,y) (_mm_add_pd(x,y))
+#define k8sub(x,y) (_mm_sub_pd(x,y))
+#define k8mul(x,y) (_mm_mul_pd(x,y))
+#define k8div(x,y) (_mm_div_pd(x,y))
+
+// Fused multiply-add, defined as [+-]x*y[+-]z
+#ifdef __FMA4__
+# define k8madd(x,y,z) (_mm_macc_pd(x,y,z))
+# define k8msub(x,y,z) (_mm_msub_pd(x,y,z))
+# define k8nmadd(x,y,z) (_mm_nmsub_pd(x,y,z))
+# define k8nmsub(x,y,z) (_mm_nmacc_pd(x,y,z))
+#else
+# define k8madd(x,y,z) (k8add(k8mul(x,y),z))
+# define k8msub(x,y,z) (k8sub(k8mul(x,y),z))
+# define k8nmadd(x,y,z) (k8sub(k8neg(z),k8mul(x,y)))
+# define k8nmsub(x,y,z) (k8sub(z,k8mul(x,y)))
+#endif
+
+// Cheap functions
+#define k8copysign(x,y) \
+ (_mm_or_pd(_mm_andnot_pd(k8sign_mask.vf,x), \
+ _mm_and_pd(k8sign_mask.vf,y)))
+#define k8fabs(x) (_mm_andnot_pd(k8sign_mask.vf,x))
+#define k8fmax(x,y) (_mm_max_pd(x,y))
+#define k8fmin(x,y) (_mm_min_pd(x,y))
+#define k8fnabs(x) (_mm_or_pd(k8sign_mask.vf,x))
+#define k8sgn(x_) \
+ ({ \
+ CCTK_REAL_VEC const x__=(x_); \
+ CCTK_REAL_VEC const x=x__; \
+ CCTK_REAL_VEC const iszero = _mm_cmpeq_pd(vec8_set1(0.0), x); \
+ CCTK_REAL_VEC const sign = _mm_and_pd(k8sign_mask.vf, x); \
+ CCTK_REAL_VEC const signedone = _mm_or_pd(vec8_set1(1.0), sign); \
+ k8ifthen(iszero, vec8_set1(0.0), signedone); \
+ })
+#define k8sqrt(x) (_mm_sqrt_pd(x))
+
+// Expensive functions
+#define K8REPL(f,x_) \
+ ({ \
+ CCTK_REAL8_VEC const x__=(x_); \
+ CCTK_REAL8_VEC const x=x__; \
+ vec8_set(f(vec8_elt0(x)), \
+ f(vec8_elt1(x))); \
+ })
+#define K8REPL2S(f,x_,a_) \
+ ({ \
+ CCTK_REAL8_VEC const x__=(x_); \
+ CCTK_REAL8 const a__=(a_); \
+ CCTK_REAL8_VEC const x=x__; \
+ CCTK_REAL8 const a=a__; \
+ vec8_set(f(vec8_elt0(x),a), \
+ f(vec8_elt1(x),a)); \
+ })
+#define K8REPL2(f,x_,y_) \
+ ({ \
+ CCTK_REAL8_VEC const x__=(x_); \
+ CCTK_REAL8_VEC const y__=(y_); \
+ CCTK_REAL8_VEC const x=x__; \
+ CCTK_REAL8_VEC const y=y__; \
+ vec8_set(f(vec8_elt0(x),vec8_elt0(y)), \
+ f(vec8_elt1(x),vec8_elt1(y))); \
+ })
+
+#define k8acos(x) K8REPL(acos,x)
+#define k8acosh(x) K8REPL(acosh,x)
+#define k8asin(x) K8REPL(asin,x)
+#define k8asinh(x) K8REPL(asinh,x)
+#define k8atan(x) K8REPL(atan,x)
+#define k8atan2(x,y) K8REPL2(atan2,x,y)
+#define k8atanh(x) K8REPL(atanh,x)
+#define k8cos(x) K8REPL(cos,x)
+#define k8cosh(x) K8REPL(cosh,x)
+#define k8exp(x) K8REPL(exp,x)
+#define k8log(x) K8REPL(log,x)
+#define k8pow(x,a) K8REPL2S(pow,x,a)
+#define k8sin(x) K8REPL(sin,x)
+#define k8sinh(x) K8REPL(sinh,x)
+#define k8tan(x) K8REPL(tan,x)
+#define k8tanh(x) K8REPL(tanh,x)
+
+static const k8const_t k8lfalse_ = {{ +0LL, +0LL, }};
+static const k8const_t k8ltrue_ = {{ -1LL, -1LL, }};
+#define k8lfalse (k8lfalse_.vf)
+#define k8ltrue (k8ltrue_.vf)
+#define k8lnot(x) (_mm_xor_pd(k8ltrue,x))
+#define k8land(x,y) (_mm_and_pd(x,y))
+#define k8lor(x,y) (_mm_or_pd(x,y))
+#define k8lxor(x,y) (_mm_xor_pd(x,y))
+
+#ifdef __SSE4_1__
+# define k8ifthen(x,y,z) (_mm_blendv_pd(z,y,x))
+#elif 0
+// This is slow (but this is what Intel/PGI produce by themselves)
+# define k8ifthen(x_,y_,z_) \
+ ({ \
+ CCTK_REAL8_VEC const x__=(x_); \
+ CCTK_REAL8_VEC const y__=(y_); \
+ CCTK_REAL8_VEC const z__=(z_); \
+ CCTK_REAL8_VEC const x=x__; \
+ CCTK_REAL8_VEC const y=y__; \
+ CCTK_REAL8_VEC const z=z__; \
+ int const m = _mm_movemask_pd(x); \
+ CCTK_REAL8_VEC r; \
+ switch (m) { \
+ case 0: r = y; break; \
+ case 1: r = _mm_move_sd(y,z); break; \
+ case 2: r = _mm_move_sd(z,y); break; \
+ case 3: r = z; break; \
+ } \
+ r; \
+ })
+#elif 0
+# ifdef __cplusplus
+# define k8signbit(x) ({ using namespace std; signbit(x); })
+# else
+# define k8signbit(x) (signbit(x))
+# endif
+# define k8ifthen(x_,y_,z_) \
+ ({ \
+ CCTK_REAL8_VEC const x__=(x_); \
+ CCTK_REAL8_VEC const y__=(y_); \
+ CCTK_REAL8_VEC const z__=(z_); \
+ CCTK_REAL8_VEC const x=x__; \
+ CCTK_REAL8_VEC const y=y__; \
+ CCTK_REAL8_VEC const z=z__; \
+ vec8_set(k8signbit(vec8_elt0(x)) ? vec8_elt0(y) : vec8_elt0(z), \
+ k8signbit(vec8_elt1(x)) ? vec8_elt1(y) : vec8_elt1(z)); \
+ })
+#elif 0
+// We don't need to shift -- the condition (mask) will be either all
+// zeros or all ones
+static const k8const_t k8ione = {{ 0x1ULL, 0x1ULL, }};
+# define k8ifthen(x_,y_,z_) \
+ ({ \
+ CCTK_REAL8_VEC const x__=(x_); \
+ CCTK_REAL8_VEC const y__=(y_); \
+ CCTK_REAL8_VEC const z__=(z_); \
+ CCTK_REAL8_VEC const x=x__; \
+ CCTK_REAL8_VEC const y=y__; \
+ CCTK_REAL8_VEC const z=z__; \
+ /* there is no _mm_srai_epi64(x, 63); we therefore calculate srli(x)-1 */ \
+ __m128i const x_int = *(__m128i const*)&x; \
+ __m128i const imask_int = \
+ _mm_sub_epi64(_mm_srli_epi64(x_int, 63), k8ione.vi); \
+ CCTK_REAL8_VEC const imask = *(CCTK_REAL8_VEC const*)&imask_int; \
+ /* (z & ~mask) | (y & mask) where imask = ~mask */ \
+ _mm_or_pd(_mm_and_pd(imask, z), _mm_andnot_pd(imask, y)); \
+ })
+#else
+# define k8ifthen(x_,y_,z_) \
+ ({ \
+ CCTK_REAL8_VEC const x__=(x_); \
+ CCTK_REAL8_VEC const y__=(y_); \
+ CCTK_REAL8_VEC const z__=(z_); \
+ CCTK_REAL8_VEC const x=x__; \
+ CCTK_REAL8_VEC const y=y__; \
+ CCTK_REAL8_VEC const z=z__; \
+ /* (z & ~mask) | (y & mask) where imask = ~mask */ \
+ _mm_or_pd(_mm_and_pd(x, y), _mm_andnot_pd(x, z)); \
+ })
+#endif
+
+#define k8cmpeq(x,y) (_mm_cmpeq_pd(x,y))
+#define k8cmpne(x,y) (_mm_cmpneq_pd(x,y))
+#define k8cmpgt(x,y) (_mm_cmpgt_pd(x,y))
+#define k8cmpge(x,y) (_mm_cmpge_pd(x,y))
+#define k8cmplt(x,y) (_mm_cmplt_pd(x,y))
+#define k8cmple(x,y) (_mm_cmple_pd(x,y))
diff --git a/src/macros/vectors-8-default.h b/src/macros/vectors-8-default.h
new file mode 100644
index 0000000..7ff6c8c
--- /dev/null
+++ b/src/macros/vectors-8-default.h
@@ -0,0 +1,132 @@
+// Fallback vectorisation implementation: Do not vectorise
+
+// We use macros here, so that we are not surprised by compilers which
+// don't like to inline functions. This should also make debug builds
+// (which may not inline) more efficient.
+
+
+
+#include <assert.h>
+#include <math.h>
+
+
+
+#define vec8_architecture "scalar (no vectorisation, 64-bit precision)"
+
+// Use CCTK_REAL8
+#define CCTK_REAL8_VEC CCTK_REAL8
+
+// Number of vector elements in a vector
+#define CCTK_REAL8_VEC_SIZE 1
+
+// Integer and boolean types corresponding to this real type
+#define CCTK_INTEGER8 CCTK_REAL8
+#define CCTK_BOOLEAN8 CCTK_REAL8
+#define CCTK_INTEGER8_VEC CCTK_REAL8_VEC
+#define CCTK_BOOLEAN8_VEC CCTK_REAL8_VEC
+
+
+
+// Create a vector replicating a scalar
+#define vec8_set1(a) (a)
+// Create a vector from N scalars
+#define vec8_set(a) (a)
+
+// Access vectors elements
+#define vec8_elt0(x) (x)
+#define vec8_elt(x,d) (x)
+
+
+
+// Load an aligned vector from memory
+#define vec8_load(p) (p)
+// Load an unaligned vector from memory
+#define vec8_loadu(p) (p)
+
+// Load a vector from memory that may or may not be aligned, as
+// decided by the offset and the vector size. These functions are
+// useful e.g. for loading neightbouring grid points while evaluating
+// finite differencing stencils.
+#define vec8_loadu_maybe(off,p) (p)
+#define vec8_loadu_maybe3(off1,off2,off3,p) (p)
+
+// Aligned store
+#define vec8_store(p,x) ((p)=(x))
+// Unaligned store
+#define vec8_store_nta(p,x) ((p)=(x))
+
+#define vec8_store_partial_prepare(i,imin,imax) ((void)0)
+#define vec8_store_nta_partial(p,x) (vec8_store_nta(p,x))
+// Store the n lower elements of a vector to memory
+#define vec8_store_nta_partial_lo(p,x,n) (assert(0))
+// Store the n higher elements of a vector into memory. This stores
+// the vector elements into memory locations as if element 0 were
+// stored at p.
+#define vec8_store_nta_partial_hi(p,x,n) (assert(0))
+#define vec8_store_nta_partial_mid(p,x,nlo,nhi) (assert(0))
+
+
+
+// Operators
+#define k8neg(x) (-(x))
+
+#define k8add(x,y) ((x)+(y))
+#define k8sub(x,y) ((x)-(y))
+#define k8mul(x,y) ((x)*(y))
+#define k8div(x,y) ((x)/(y))
+
+// Fused multiply-add, defined as [+-]x*y[+-]z
+#define k8madd(x,y,z) (+(x)*(y)+(z))
+#define k8msub(x,y,z) (+(x)*(y)-(z))
+#define k8nmadd(x,y,z) (-(x)*(y)-(z))
+#define k8nmsub(x,y,z) (-(x)*(y)+(z))
+
+// Functions
+#define k8acos(x) (acos(x))
+#define k8acosh(x) (acosh(x))
+#define k8asin(x) (asin(x))
+#define k8asinh(x) (asinh(x))
+#define k8atan(x) (atan(x))
+#define k8atan2(x,y) (atan2(x,y))
+#define k8atanh(x) (atanh(x))
+#define k8copysign(x,y) (copysign(x,y))
+#define k8cos(x) (cos(x))
+#define k8cosh(x) (cosh(x))
+#define k8exp(x) (exp(x))
+#define k8fabs(x) (fabs(x))
+#define k8fmax(x,y) (fmax(x,y))
+#define k8fmin(x,y) (fmin(x,y))
+#define k8fnabs(x) (-fabs(x))
+#define k8log(x) (log(x))
+#define k8pow(x,a) (pow(x,a))
+#define k8sin(x) (sin(x))
+#define k8sinh(x) (sinh(x))
+#define k8sqrt(x) (sqrt(x))
+#define k8tan(x) (tan(x))
+#define k8tanh(x) (tanh(x))
+
+#define k8sgn(x_) \
+ ({ \
+ CCTK_REAL x__=(x_); \
+ CCTK_REAL x=x__; \
+ x==(CCTK_REAL)0.0 ? (CCTK_REAL)0.0 : std::copysign((CCTK_REAL)1.0, x); \
+ })
+#define k8signbit(x) (std::signbit(x))
+
+#define k8l2r(x_) ({ CCTK_INT8 x__=(x_); CCTK_INT8 x=x__; *(CCTK_REAL8*)&x; })
+#define k8r2l(x_) ({ CCTK_REAL8 x__=(x_); CCTK_REAL8 x=x__; *(CCTK_INT8*)&x; })
+#define k8lfalse k8l2r(0)
+#define k8ltrue k8l2r(1)
+#define k8lnot(x) k8l2r(!k8r2l(x))
+#define k8land(x,y) k8l2r(k8r2l(x) && k8r2l(y))
+#define k8lor(x,y) k8l2r(k8r2l(x) || k8r2l(y))
+#define k8lxor(x,y) k8l2r(!k8r2l(x) != !k8r2l(y))
+
+#define k8ifthen(x,y,z) (k8r2l(x)?(y):(z))
+
+#define k8cmpeq(x,y) k8l2r((x)==(y))
+#define k8cmpne(x,y) k8l2r((x)!=(y))
+#define k8cmpgt(x,y) k8l2r((x)>(y))
+#define k8cmpge(x,y) k8l2r((x)>=(y))
+#define k8cmplt(x,y) k8l2r((x)<(y))
+#define k8cmple(x,y) k8l2r((x)<=(y))