aboutsummaryrefslogtreecommitdiff
path: root/src/vectors-8-QPX.h
blob: ee9604c6c1f78fc3970370bd2bf5607727e98004 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
// -*-C++-*-
// Vectorise using IBM's Blue Gene/Q QPX (Power)

// Use the type vector4double directly, without introducing a wrapper class
// Use macros instead of inline functions

// Note: bgxlC_r does not like const declarations, so we need to cast
// them away and/or omit them everywhere

// See <http://pic.dhe.ibm.com/infocenter/compbg/v121v141/index.jsp>



#include <assert.h>

// #define vec8_assert(x) ((void)0)
#define vec8_assert(x) assert(x)

#ifdef __cplusplus
#  include <builtins.h>
#endif
#include <mass_simd.h>



#define vec8_architecture "QPX"

// Vector type corresponding to CCTK_REAL
// We use a struct to avoid the "const" issue
// #define CCTK_REAL8_VEC vector4double
struct CCTK_REAL8_VEC {
  vector4double v;
  CCTK_REAL8_VEC() {}
  CCTK_REAL8_VEC(CCTK_REAL8_VEC const& x): v(x.v) {}
  CCTK_REAL8_VEC(vector4double v_): v(v_) {}
  operator vector4double() const { return v; }
};

// Number of vector elements in a CCTK_REAL_VEC
#define CCTK_REAL8_VEC_SIZE 4

// Integer and boolean types corresponding to this real type
#define CCTK_INTEGER8     CCTK_INT8
#define CCTK_BOOLEAN8     CCTK_REAL8
#define CCTK_INTEGER8_VEC CCTK_REAL8_VEC
#define CCTK_BOOLEAN8_VEC CCTK_REAL8_VEC



// Create vectors, extract vector elements

#define vec8_set1(a)      (vec_splats(a))
#if 0
#define vec8_set(a,b,c,d)                               \
  (vec_insert                                           \
   (d,vec_insert                                        \
    (c,vec_insert                                       \
     (b,vec_insert                                      \
      (a,CCTK_REAL8_VEC(),0),1),2),3))
#endif
#define vec8_set(a_,b_,c_,d_)                   \
  ({                                            \
    CCTK_REAL8 const a__ = (a_);                \
    CCTK_REAL8 const b__ = (b_);                \
    CCTK_REAL8 const c__ = (c_);                \
    CCTK_REAL8 const d__ = (d_);                \
    CCTK_REAL8 const a = a__;                   \
    CCTK_REAL8 const b = b__;                   \
    CCTK_REAL8 const c = c__;                   \
    CCTK_REAL8 const d = d__;                   \
    CCTK_REAL8_VEC x;                           \
    ((CCTK_REAL*)&x)[0] = a;                    \
    ((CCTK_REAL*)&x)[1] = b;                    \
    ((CCTK_REAL*)&x)[2] = c;                    \
    ((CCTK_REAL*)&x)[3] = d;                    \
    x;                                          \
  })

#define vec8_b2r(b) ((b)?+1.0:-1.0)
#define vec8_setb(a,b,c,d)                                              \
  (vec8_set(vec8_b2r(a), vec8_b2r(b), vec8_b2r(c), vec8_b2r(d)))

#define vec8_elt0(x)  (vec_extract(x,0))
#define vec8_elt1(x)  (vec_extract(x,1))
#define vec8_elt2(x)  (vec_extract(x,2))
#define vec8_elt3(x)  (vec_extract(x,3))
#define vec8_elt(x,d) (vec_extract(x,d))
#define vec8_elts(x,a,b,c,d)                    \
  ({                                            \
    CCTK_REAL8_VEC x__ = (x_);                  \
    CCTK_REAL8_VEC x = x__;                     \
    a = ((CCTK_REAL*)&x)[0];                    \
    b = ((CCTK_REAL*)&x)[1];                    \
    c = ((CCTK_REAL*)&x)[2];                    \
    d = ((CCTK_REAL*)&x)[3];                    \
  })

#define vec8_r2b(x) ((x)>=0.0)
#define vec8_eltb(x,d) (vec8_r2b(vec8_elt(x,d)))



// Load and store vectors

// Load a vector from memory (aligned and unaligned); this loads from
// a reference to a scalar
#define vec8_load(p)   (vec_lda(0,(CCTK_REAL8*)&(p)))
#define vec8_loadu(p_)                                                  \
  ({                                                                    \
    CCTK_REAL8 const& p__=(p_);                                         \
    CCTK_REAL8& p = *(CCTK_REAL8*)&p__;                                 \
    vector4double v1, v2, vp;                                           \
    /* code taken from IBM's compiler documentation */                  \
    v1 = vec_ld(0,&p);         /* load the left part of the vector */   \
    v2 = vec_ld(31,&p);        /* load the right part of the vector */  \
    vp = vec_lvsl(0,&p);       /* generate control value */             \
    vec_perm(v1,v2,vp);        /* generate the aligned vector */        \
  })
#define vec8_loadu_off(off_,p_)                 \
  ({                                            \
    int const       off__ = (off_);             \
    CCTK_REAL8 const& p__ = (p_);               \
    int off               = off__;              \
    CCTK_REAL8& p         = *(CCTK_REAL8*)&p__; \
    vector4double v1, v2;                       \
    off &= CCTK_REAL8_VEC_SIZE-1;               \
    v1 = vec_lda(0,&p-off);                     \
    v2 = vec_lda(0,&p-off+CCTK_REAL8_VEC_SIZE); \
    off==1 ? vec_sldw(v1,v2,1) :                \
      off==2 ? vec_sldw(v1,v2,2) :              \
      off==3 ? vec_sldw(v1,v2,3) :              \
      (vec8_assert(0), v1);                     \
  })

// Load a vector from memory that may or may not be aligned, as
// decided by the offset and the vector size
#if VECTORISE_ALWAYS_USE_UNALIGNED_LOADS
// Implementation: Always use unaligned load
#  define vec8_loadu_maybe(off,p)             vec8_loadu(p)
#  define vec8_loadu_maybe3(off1,off2,off3,p) vec8_loadu(p)
#else
#  define vec8_loadu_maybe(off_,p_)             \
  ({                                            \
    CCTK_REAL8 const& p__=(p_);                 \
    int const         off__=(off_);             \
    CCTK_REAL8 const& p=p__;                    \
    int const off=off__;                        \
    off % CCTK_REAL8_VEC_SIZE == 0 ?            \
      vec8_load(p) :                            \
      vec8_loadu_off(off,p);                    \
  })
#  if VECTORISE_ALIGNED_ARRAYS
// Assume all array x sizes are multiples of the vector size
#    define vec8_loadu_maybe3(off1,off2,off3,p) vec8_loadu_maybe(off1,p)
#  else
#    define vec8_loadu_maybe3(off1,off2,off3,p_)        \
  ({                                                    \
    CCTK_REAL8 const& p__=(p_);                         \
    CCTK_REAL8 const& p=p__;                            \
    ((off2) % CCTK_REAL8_VEC_SIZE != 0 or               \
     (off3) % CCTK_REAL8_VEC_SIZE != 0) ?               \
      vec8_loadu(p) :                                   \
      vec8_loadu_maybe(off1,p);                         \
  })
#  endif
#endif

// Store a vector to memory (aligned and non-temporal); this stores to
// a reference to a scalar
#define vec8_store(p,x)     (vec_sta(x,0,&(p)))
#define vec8_storeu(p_,x_)                              \
  ({                                                    \
    CCTK_REAL8& p__=(p_);                               \
    CCTK_REAL8_VEC x__=(x_);                            \
    CCTK_REAL8& p=p__;                                  \
    CCTK_REAL8_VEC x=x__;                               \
    CCTK_REAL8_VEC v1, v2, v3, vp, m1, m2, m3;          \
    /* code taken from IBM's compiler documentation */  \
    /* generate insert masks */                         \
    vp = vec_lvsr(0,&p);                                \
    m1 = k8lfalse;                                      \
    m2 = k8ltrue;                                       \
    m3 = vec_perm(m1,m2,vp);                            \
    v3 = vec_perm(x,x,vp);                              \
    _Pragma("tm_atomic") {                              \
      /* get existing data */                           \
      v1 = vec_ld(0,&p);                                \
      v2 = vec_ld(31,&p);                               \
      /* permute and insert */                          \
      v1 = vec_sel(v1,v3,m3);                           \
      v2 = vec_sel(v3,v2,m3);                           \
      /* store data back */                             \
      vec_st(0,&p,v1);                                  \
      vec_st(31,&p,v2);                                 \
      }                                                 \
  })
#define vec8_store_nta(p,x) (vec_sta(x,0,&(p))) // this doesn't avoid the cache

#if VECTORISE_ALIGNED_ARRAYS
// Arrays are aligned; wrap-around is not an issue
#  define vec8_store_omp
#else
// Need to protect partial stores, as they may wrap around to the
// beginning of the next line in the array
#  define vec8_store_omp _Pragma("tm_atomic")
#endif

// Store a partial vector (aligned and non-temporal)
#define vec8_store_partial_prepare(i,imin_,imax_)                       \
  bool v8stp_all;                                                       \
  CCTK_BOOLEAN8_VEC v8stp_mask;                                         \
  bool v8stp_mask0, v8stp_mask1, v8stp_mask2, v8stp_mask3;              \
  ({                                                                    \
    ptrdiff_t const imin__=(imin_);                                     \
    ptrdiff_t const imax__=(imax_);                                     \
    ptrdiff_t const imin=imin__;                                        \
    ptrdiff_t const imax=imax__;                                        \
                                                                        \
    v8stp_all = i>=imin and i+CCTK_REAL8_VEC_SIZE-1<imax;		\
                                                                        \
    if (not CCTK_BUILTIN_EXPECT(v8stp_all, true)) {                     \
      CCTK_INTEGER8_VEC vp_lo, vp_hi;                                   \
      CCTK_BOOLEAN8_VEC mask_lo, mask_hi;                               \
      /* this is correct but slow */                                    \
      /*                                                                \
      mask_lo = vec8_setb(i+0>=imin, i+1>=imin, i+2>=imin, i+3>=imin);  \
      mask_hi = vec8_setb(i+0<imax, i+1<imax, i+2<imax, i+3<imax);      \
      */                                                                \
      /* Note: vec_lvsl(i,p) =  &p[i] / 8 % 4                           \
         Note: vec_lvsr(i,p) = -&p[i] / 8 % 4                           \
         /8: 8 bytes per double                                         \
         %4: 4 doubles per vector                                       \
      */                                                                \
      /* We assume p[i] is aligned */                                   \
      /* Ensure at least one vector element is inside the active region */ \
      vec8_assert(i-imin>=-(CCTK_REAL8_VEC_SIZE-1));                    \
      vp_lo = vec_lvsl(8 * (i-imin), (CCTK_REAL*)0);                    \
      mask_lo = (i-imin >= 0 ?						\
		 k8ltrue :						\
		 vec_perm(k8lfalse, k8ltrue, vp_lo));			\
      /* Ensure at least one vector element is inside the active region */ \
      vec8_assert(i<imax);                                              \
      vp_hi = vec_lvsl(8 * (i-imax), (CCTK_REAL*)0);                    \
      mask_hi =	(i-imax < -(CCTK_REAL8_VEC_SIZE-1) ?                    \
		 k8ltrue :						\
		 vec_perm(k8ltrue, k8lfalse, vp_hi));			\
      v8stp_mask = k8land(mask_lo, mask_hi);                            \
      v8stp_mask0 = vec8_eltb(v8stp_mask, 0);                           \
      v8stp_mask1 = vec8_eltb(v8stp_mask, 1);                           \
      v8stp_mask2 = vec8_eltb(v8stp_mask, 2);                           \
      v8stp_mask3 = vec8_eltb(v8stp_mask, 3);                           \
    }									\
  })
#define vec8_store_nta_partial(p_,x_)                           \
  ({                                                            \
    CCTK_REAL8&    p__=(p_);                                    \
    CCTK_REAL8_VEC x__=(x_);                                    \
    CCTK_REAL8&    p=p__;                                       \
    CCTK_REAL8_VEC x=x__;                                       \
    if (CCTK_BUILTIN_EXPECT(v8stp_all, true)) {                 \
      vec8_store(p, x);                                         \
    } else {                                                    \
      /*                                                        \
      vec8_store_omp                                            \
        vec8_store(p, k8ifthen(v8stp_mask, x, vec8_load(p)));   \
      */                                                        \
      if (VECTORISE_ALIGNED_ARRAYS) {                           \
        vec8_store(p, k8ifthen(v8stp_mask, x, vec8_load(p)));   \
      } else {                                                  \
        if (v8stp_mask0) (&p)[0] = vec8_elt0(x);                \
        if (v8stp_mask1) (&p)[1] = vec8_elt1(x);                \
        if (v8stp_mask2) (&p)[2] = vec8_elt2(x);                \
        if (v8stp_mask3) (&p)[3] = vec8_elt3(x);                \
      }                                                         \
    }                                                           \
  })

// Store a lower or higher partial vector (aligned and non-temporal);
// the non-temporal hint is probably ignored
#define vec8_store_nta_partial_lo(p_,x_,n)                              \
  ({                                                                    \
    CCTK_REAL8&    p__=(p_);                                            \
    CCTK_REAL8_VEC x__=(x_);                                            \
    CCTK_REAL8&    p=p__;                                               \
    CCTK_REAL8_VEC x=x__;                                               \
    CCTK_REAL8_VEC vp, mask;                                            \
    /* Ensure at least one and but all vector elements are active */    \
    vec8_assert(n>0 and n<CCTK_REAL8_VEC_SIZE-1);                       \
    vp = vec_lvsl(-8 * n, (CCTK_REAL*)0);                               \
    mask = vec_perm(k8ltrue, k8lfalse, vp);                             \
    vec8_store_omp                                                      \
      vec8_store(p, k8ifthen(mask, x, vec8_load(p)));                   \
  })
#define vec8_store_nta_partial_hi(p_,x_,n)                              \
  ({                                                                    \
    CCTK_REAL8&    p__=(p_);                                            \
    CCTK_REAL8_VEC x__=(x_);                                            \
    CCTK_REAL8&    p=p__;                                               \
    CCTK_REAL8_VEC x=x__;                                               \
    CCTK_REAL8_VEC vp, mask;                                            \
    /* Ensure at least one but not all vector elements are active */    \
    vec8_assert(n>0 and n<CCTK_REAL8_VEC_SIZE-1);                       \
    vp = vec_lvsl(8 * n, (CCTK_REAL*)0);                                \
    mask = vec_perm(k8lfalse, k8ltrue, vp);                             \
    vec8_store_omp                                                      \
      vec8_store(p, k8ifthen(mask, x, vec8_load(p)));                   \
  })
#define vec8_store_nta_partial_mid(p_,x_,nlo,nhi)                       \
  ({                                                                    \
    CCTK_REAL8&    p__=(p_);                                            \
    CCTK_REAL8_VEC x__=(x_);                                            \
    CCTK_REAL8     p=p__;                                               \
    CCTK_REAL8_VEC x=x__;                                               \
    CCTK_REAL8_VEC vp_lo, mask_lo;                                      \
    /* Ensure at least one but not all vector elements are active */    \
    vec8_assert(nlo>0 and nlo<CCTK_REAL8_VEC_SIZE-1);                   \
    vp_lo = vec_lvsl(-8 * nlo, (CCTK_REAL*)0);                          \
    mask_lo = vec_perm(k8lfalse, k8ltrue, vp_lo);                       \
    CCTK_REAL8_VEC vp_hi, mask_hi;                                      \
    /* Ensure at least one but not all vector elements are active */    \
    vec8_assert(nhi>0 and nhi<CCTK_REAL8_VEC_SIZE-1);                   \
    vp_hi = vec_lvsl(8 * nhi, (CCTK_REAL*)0);                           \
    mask_hi = vec_perm(k8lfalse, k8ltrue, vp_hi);                       \
    CCTK_REAL8_VEC mask;                                                \
    mask = vec_and(mask_lo, mask_hi);                                   \
    vec8_store_omp                                                      \
      vec8_store(p, k8ifthen(mask, x, vec8_load(p)));                   \
  })



// Functions and operators

// Operators
#define k8neg(x) (vec_neg(x))

#define k8add(x,y) (vec_add(x,y))
#define k8sub(x,y) (vec_sub(x,y))
#define k8mul(x,y) (vec_mul(x,y))
#define k8div(x,y) (vec_swdiv_nochk(x,y))

// Fused multiply-add, defined as [+-]x*y[+-]z
#define k8madd(x,y,z)  (vec_madd(x,y,z))
#define k8msub(x,y,z)  (vec_msub(x,y,z))
#define k8nmadd(x,y,z) (vec_nmadd(x,y,z))
#define k8nmsub(x,y,z) (vec_nmsub(x,y,z))

// Cheap functions
#define k8copysign(x,y) (vec_cpsgn(y,x))
#define k8fabs(x)       (vec_abs(x))
#define k8fmax(x_,y_)                           \
  ({                                            \
    CCTK_REAL8_VEC x__=(x_);                    \
    CCTK_REAL8_VEC y__=(y_);                    \
    CCTK_REAL8_VEC x=x__;                       \
    CCTK_REAL8_VEC y=y__;                       \
    k8ifthen(k8cmplt(x,y),y,x);                 \
  })
#define k8fmin(x_,y_)                           \
  ({                                            \
    CCTK_REAL8_VEC x__=(x_);                    \
    CCTK_REAL8_VEC y__=(y_);                    \
    CCTK_REAL8_VEC x=x__;                       \
    CCTK_REAL8_VEC y=y__;                       \
    k8ifthen(k8cmpgt(x,y),y,x);                 \
  })
#define k8fnabs(x) (vec_nabs(x))
#define k8sgn(x_)                               \
  ({                                            \
    CCTK_REAL8_VEC x__=(x_);                    \
    CCTK_REAL8_VEC x=x__;                       \
    CCTK_REAL8_VEC one, zero, iszero;           \
    one = k8ltrue;                              \
    zero = k8sub(one, one);                     \
    iszero = k8cmpeq(x, zero);                  \
    k8ifthen(iszero, zero, k8copysign(one, x)); \
  })
#define k8sqrt(x) (vec_swsqrt_nochk(x))

// Expensive functions
#define k8acos(x)    acosd4(x)
#define k8acosh(x)   acoshd4(x)
#define k8asin(x)    asind4(x)
#define k8asinh(x)   asinhd4(x)
#define k8atan(x)    atand4(x)
#define k8atan2(x,y) atan2d4(x,y)
#define k8atanh(x)   atanhd4(x)
#define k8cos(x)     cosd4(x)
#define k8cosh(x)    coshd4(x)
#define k8exp(x)     expd4(x)
#define k8log(x)     logd4(x)
#define k8pow(x,a)   powd4(x,vec_set1(a))
#define k8sin(x)     sind4(x)
#define k8sinh(x)    sinhd4(x)
#define k8tan(x)     tand4(x)
#define k8tanh(x)    tanhd4(x)

// canonical true is +1.0, canonical false is -1.0
// >=0 is true, -0 is true, nan is false
#define k8lfalse                                                \
  ({ CCTK_REAL8_VEC dummy; vec_logical(dummy,dummy,0x0); })
#define k8ltrue                                                 \
  ({ CCTK_REAL8_VEC dummy; vec_logical(dummy,dummy,0xf); })
#define k8lnot(x)       (vec_not(x))
#define k8land(x,y)     (vec_and(x,y))
#define k8lor(x,y)      (vec_or(x,y))
#define k8lxor(x,y)     (vec_xor(x,y))
#define k8ifthen(x,y,z) (vec_sel(z,y,x))

#define k8cmpeq(x,y) (vec_cmpeq(x,y))
#define k8cmpne(x,y) (k8lnot(k8cmpeq(x,y)))
#define k8cmpgt(x,y) (vec_cmpgt(x,y))
#define k8cmpge(x,y) (k8lnot(k8cmplt(x,y)))
#define k8cmplt(x,y) (vec_cmplt(x,y))
#define k8cmple(x,y) (k8lnot(k8cmpgt(x,y)))