aboutsummaryrefslogtreecommitdiff
path: root/src/vectors-8-SSE2.h
blob: fe231b7cdb96a213611eafbc77116443346aa3fe (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
// Vectorise using Intel's or AMD's SSE2

// Use the type __m128d directly, without introducing a wrapper class
// Use macros instead of inline functions



#include <assert.h>
#include <math.h>

#include <emmintrin.h>
#ifdef __SSE4_1__
// Intel's SSE 4.1
#  include <smmintrin.h>
#endif
#ifdef __SSE4A__
// AMD's SSE 4a
#  include <ammintrin.h>

// Intel compilers don't support SSE 4a. Here is how we can implement
// these instructions in assembler instead:

// inline void __attribute__((__always_inline__))
//   _mm_stream_sd (double *p, __m128d x)
// {
//   asm ("movntsd %[x],%[p]" : "=m" (*p) : [p] "m" (*p), [x] "x" (x));
// }

#endif
#ifdef __FMA4__
#  include <fma4intrin.h>
#endif



#ifdef __SSE4_1__
#  define vec8_architecture_SSE4_1 "+SSE4.1"
#else
#  define vec8_architecture_SSE4_1 ""
#endif
#ifdef __SSE4A__
#  define vec8_architecture_SSE4a "+SSE4A"
#else
#  define vec8_architecture_SSE4a ""
#endif
#ifdef __FMA4__
#  define vec8_architecture_FMA4 "+FMA4"
#else
#  define vec8_architecture_FMA4 ""
#endif
#define vec8_architecture "SSE2" vec8_architecture_SSE4_1 vec8_architecture_SSE4a vec8_architecture_FMA4 " (64-bit precision)"



// Vector type corresponding to CCTK_REAL
#define CCTK_REAL8_VEC __m128d

// Number of vector elements in a CCTK_REAL_VEC
#define CCTK_REAL8_VEC_SIZE 2



// Create vectors, extract vector elements

#define vec8_set1(a)  (_mm_set1_pd(a))
#define vec8_set(a,b) (_mm_set_pd(b,a)) // note reversed arguments

// original order is 01
#define vec8_swap10(x_)                         \
  ({                                            \
    CCTK_REAL8_VEC const x__=(x_);              \
    CCTK_REAL8_VEC const x=x__;                 \
    _mm_shuffle_pd(x,x, _MM_SHUFFLE2(0,1));     \
  })

#define vec8_elt0(x) (((CCTK_REAL8 const*)&(x))[0])
#define vec8_elt1(x) (((CCTK_REAL8 const*)&(x))[1])
#define vec8_elt(x,d) (((CCTK_REAL8 const*)&(x))[d])



// Load and store vectors

// Load a vector from memory (aligned and unaligned); this loads from
// a reference to a scalar
#define vec8_load(p)  (_mm_load_pd(&(p)))
#define vec8_loadu(p) (_mm_loadu_pd(&(p)))
#if ! VECTORISE_ALWAYS_USE_ALIGNED_LOADS
#  define vec8_load_off1(p) vec_loadu(p)
#else
#  define vec8_load_off1(p_)                                    \
  ({                                                            \
    CCTK_REAL8 const& p__=(p_);                                 \
    CCTK_REAL8 const& p=p__;                                    \
    _mm_shuffle_pd(vec8_load((&p)[-1]),                         \
                   vec8_load((&p)[+1]), _MM_SHUFFLE2(0,1));     \
  })
#endif

// Load a vector from memory that may or may not be aligned, as
// decided by the offset off and the vector size
#if VECTORISE_ALWAYS_USE_UNALIGNED_LOADS
// Implementation: Always use unaligned load
#  define vec8_loadu_maybe(off,p)             vec8_loadu(p)
#  define vec8_loadu_maybe3(off1,off2,off3,p) vec8_loadu(p)
#else
#  define vec8_loadu_maybe(off,p_)              \
  ({                                            \
    CCTK_REAL8 const& p__=(p_);                 \
    CCTK_REAL8 const& p=p__;                    \
    (off) % CCTK_REAL8_VEC_SIZE == 0 ?          \
      vec8_load(p) :                            \
      vec8_load_off1(p);                        \
  })
#  if VECTORISE_ALIGNED_ARRAYS
// Assume all array x sizes are multiples of the vector size
#    define vec8_loadu_maybe3(off1,off2,off3,p) \
  vec8_loadu_maybe(off1,p)
#  else
#    define vec8_loadu_maybe3(off1,off2,off3,p_)        \
  ({                                                    \
    CCTK_REAL8 const& p__=(p_);                         \
    CCTK_REAL8 const& p=p__;                            \
    ((off2) % CCTK_REAL8_VEC_SIZE != 0 or               \
     (off3) % CCTK_REAL8_VEC_SIZE != 0) ?               \
      vec8_loadu(p) :                                   \
      vec8_loadu_maybe(off1,p);                         \
  })
#  endif
#endif

// Store a vector to memory (aligned and non-temporal); this stores to
// a reference to a scalar
#define vec8_store(p,x)  (_mm_store_pd(&(p),x))
#define vec8_storeu(p,x) (_mm_storeu_pd(&(p),x))
#if ! VECTORISE_STREAMING_STORES
#  define vec8_store_nta(p,x) vec8_store(p,x)
#else
#  define vec8_store_nta(p,x) (_mm_stream_pd(&(p),x))
#endif

// Store a partial vector (aligned and non-temporal)
#define vec8_store_partial_prepare(i,imin,imax)                 \
  bool const v8stp_lo = (i)>=(imin);                            \
  bool const v8stp_hi = (i)+CCTK_REAL_VEC_SIZE<(imax)
#if VECTORISE_STREAMING_STORES && defined(__SSE4A__)
#  define vec8_store_nta_partial(p,x)                           \
  ({                                                            \
    if (CCTK_BUILTIN_EXPECT(v8stp_lo and v8stp_hi, true)) {     \
      vec8_store_nta(p,x);                                      \
    } else if (v8stp_lo) {                                      \
      _mm_stream_sd(&p,x);                                      \
    } else if (v8stp_hi) {                                      \
      _mm_stream_sd(&p+1, vec8_swap10(x));                      \
    }                                                           \
  })
#else
#  define vec8_store_nta_partial(p,x)                           \
  ({                                                            \
    if (CCTK_BUILTIN_EXPECT(v8stp_lo and v8stp_hi, true)) {     \
      vec8_store_nta(p,x);                                      \
    } else if (v8stp_lo) {                                      \
      _mm_storel_pd(&p,x);                                      \
    } else if (v8stp_hi) {                                      \
      _mm_storeh_pd(&p+1,x);                                    \
    }                                                           \
  })
#endif

// Store a lower or higher partial vector (aligned and non-temporal)
#if ! VECTORISE_STREAMING_STORES
#  define vec8_store_nta_partial_lo(p,x,n) (_mm_storel_pd(&(p),x))
#  define vec8_store_nta_partial_hi(p,x,n) (_mm_storeh_pd(&(p)+1,x))
#else
#  if defined(__SSE4A__)
#    define vec8_store_nta_partial_lo(p,x,n) (_mm_stream_sd(&(p),x))
#    define vec8_store_nta_partial_hi(p,x,n)    \
  (_mm_stream_sd(&(p)+1, vec8_swap10(x)))
#  else
// TODO: use clflush once a whole cache line has been written (cache
// lines are usually larger than the CPU vector size)
#    define vec8_store_nta_partial_lo(p_,x,n)   \
  ({                                            \
    CCTK_REAL8& p__=(p_);                       \
    CCTK_REAL8& p=p__;                          \
    _mm_storel_pd(&p,x);                        \
    /* _mm_clflush(&p); */                      \
  })
#    define vec8_store_nta_partial_hi(p_,x,n)   \
  ({                                            \
    CCTK_REAL8& p__=(p_);                       \
    CCTK_REAL8& p=p__;                          \
    _mm_storeh_pd(&p+1,x);                      \
    /* _mm_clflush(&p+1); */                    \
  })
#  endif
#endif
#if 0
// This is slower; we would need a non-temporal read
#define vec8_store_nta_partial_lo(p,x,n)        \
  vec8_store_nta(p, _mm_loadh_pd(x,&(p)+1))
#define vec8_store_nta_partial_hi(p,x,n)        \
  vec8_store_nta(p, _mm_loadl_pd(x,&(p)))
#endif
#define vec8_store_nta_partial_mid(p,x,nlo,nhi) assert(0)



// Functions and operators

// static const union {
//   unsigned long long i[2];
//   __m128d            v;
// } k8all_mask_union = {{ 0xfffffffffffffffULL, 0xfffffffffffffffULL }};
// #define k8all_mask (k8all_mask_union.v)
static const union {
  unsigned long long i[2];
  __m128d            v;
} k8sign_mask_union = {{ 0x8000000000000000ULL, 0x8000000000000000ULL }};
#define k8sign_mask (k8sign_mask_union.v)

// Operators

// #define k8inot(x) (_mm_xor_si128(k8all_mask,x))
// 
// #define k8iand(x,y) (_mm_and_si128(x,y))
// #define k8ior(x,y)  (_mm_or_si128(x,y))
// #define k8ixor(x,y) (_mm_xor_si128(x,y))
// 
// #define k8ineg(x) (_mm_xor_pd(k8sign_mask,x))
// 
// #define k8iadd(x,y) (_mm_add_epi64(x,y))
// #define k8isub(x,y) (_mm_sub_epi64(x,y))
// 
// #define k8not(x) (_mm_xor_pd(k8all_mask,x))
// 
// #define k8and(x,y) (_mm_and_pd(x,y))
// #define k8or(x,y)  (_mm_or_pd(x,y))
// #define k8xor(x,y) (_mm_xor_pd(x,y))

#define k8neg(x) (_mm_xor_pd(k8sign_mask,x))

#define k8add(x,y) (_mm_add_pd(x,y))
#define k8sub(x,y) (_mm_sub_pd(x,y))
#define k8mul(x,y) (_mm_mul_pd(x,y))
#define k8div(x,y) (_mm_div_pd(x,y))

// Fused multiply-add, defined as [+-]x*y[+-]z
#ifdef __FMA4__
#  define k8madd(x,y,z)  (_mm_macc_pd(x,y,z))
#  define k8msub(x,y,z)  (_mm_msub_pd(x,y,z))
#  define k8nmadd(x,y,z) (_mm_nmsub_pd(x,y,z))
#  define k8nmsub(x,y,z) (_mm_nmacc_pd(x,y,z))
#else
#  define k8madd(x,y,z)  (k8add(k8mul(x,y),z))
#  define k8msub(x,y,z)  (k8sub(k8mul(x,y),z))
#  define k8nmadd(x,y,z) (k8sub(k8neg(z),k8mul(x,y)))
#  define k8nmsub(x,y,z) (k8sub(z,k8mul(x,y)))
#endif

// Cheap functions
#define k8fabs(x)   (_mm_andnot_pd(k8sign_mask,x))
#define k8fmax(x,y) (_mm_max_pd(x,y))
#define k8fmin(x,y) (_mm_min_pd(x,y))
#define k8fnabs(x)  (_mm_or_pd(k8sign_mask,x))
#define k8sqrt(x)   (_mm_sqrt_pd(x))

// Expensive functions
#define K8REPL(f,x_)                            \
  ({                                            \
    CCTK_REAL8_VEC const x__=(x_);              \
    CCTK_REAL8_VEC const x=x__;                 \
    vec8_set(f(vec8_elt0(x)),                   \
             f(vec8_elt1(x)));                  \
  })
#define K8REPL2(f,x_,a_)                        \
  ({                                            \
    CCTK_REAL8_VEC const x__=(x_);              \
    CCTK_REAL8     const a__=(a_);              \
    CCTK_REAL8_VEC const x=x__;                 \
    CCTK_REAL8     const a=a__;                 \
    vec8_set(f(vec8_elt0(x),a),                 \
             f(vec8_elt1(x),a));                \
  })

#define k8cos(x)   K8REPL(cos,x)
#define k8exp(x)   K8REPL(exp,x)
#define k8log(x)   K8REPL(log,x)
#define k8pow(x,a) K8REPL2(pow,x,a)
#define k8sin(x)   K8REPL(sin,x)
#define k8tan(x)   K8REPL(tan,x)

// Choice   [sign(x)>0 ? y : z]
#ifdef __SSE4_1__
#  define k8ifmsb(x,y,z) (_mm_blendv_pd(z,y,x))
#elif 0
// This is slow
#  define k8ifmsb(x_,y_,z_)                     \
  ({                                            \
    CCTK_REAL8_VEC const x__=(x_);              \
    CCTK_REAL8_VEC const y__=(y_);              \
    CCTK_REAL8_VEC const z__=(z_);              \
    CCTK_REAL8_VEC const x=x__;                 \
    CCTK_REAL8_VEC const y=y__;                 \
    CCTK_REAL8_VEC const z=z__;                 \
    int const m = _mm_movemask_pd(x);           \
    CCTK_REAL8_VEC r;                           \
    switch (m) {                                \
    case 0: r = y; break;                       \
    case 1: r = _mm_move_sd(y,z); break;      \
    case 2: r = _mm_move_sd(z,y); break;      \
    case 3: r = z; break;                       \
    }                                           \
    r;                                          \
  })
#elif 0
#  ifdef __cplusplus
#    define k8sgn(x) ({ using namespace std; signbit(x); })
#  else
#    define k4sgn(x) (signbit(x))
#  endif
#  define k8ifmsb(x_,y_,z_)                                             \
  ({                                                                    \
    CCTK_REAL8_VEC const x__=(x_);                                      \
    CCTK_REAL8_VEC const y__=(y_);                                      \
    CCTK_REAL8_VEC const z__=(z_);                                      \
    CCTK_REAL8_VEC const x=x__;                                         \
    CCTK_REAL8_VEC const y=y__;                                         \
    CCTK_REAL8_VEC const z=z__;                                         \
    vec8_set(k8sgn(vec8_elt0(x)) ? vec8_elt0(y) : vec8_elt0(z),         \
             k8sgn(vec8_elt1(x)) ? vec8_elt1(y) : vec8_elt1(z));        \
  })
#else
static const union {
  unsigned long long i;
  double             d;
} k8one_union = { 0x1ULL };
#  define k8one (k8one_union.d)
#  define k8ifmsb(x_,y_,z_)                                     \
  ({                                                            \
    CCTK_REAL8_VEC const x__=(x_);                              \
    CCTK_REAL8_VEC const y__=(y_);                              \
    CCTK_REAL8_VEC const z__=(z_);                              \
    CCTK_REAL8_VEC const x=x__;                                 \
    CCTK_REAL8_VEC const y=y__;                                 \
    CCTK_REAL8_VEC const z=z__;                                 \
    /* there is no _mm_srai_epi64(x, 63) */                     \
    CCTK_REAL8_VEC const imask =                                \
      (__m128d)_mm_sub_epi64(_mm_srli_epi64((__m128i)x, 63),    \
                             (__m128i)_mm_set1_pd(k8one));      \
    /* (z & ~mask) | (y & mask);   imask = ~mask */             \
    _mm_or_pd(_mm_and_pd(imask, z), _mm_andnot_pd(imask, y));   \
  })
#endif