aboutsummaryrefslogtreecommitdiff
path: root/src/vectors-8-AVX.h
blob: f96bd019324022d461fa2b9231a4af3f6f6c4976 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
// Vectorise using Intel's or AMD's AVX

// Use the type __m256d directly, without introducing a wrapper class
// Use macros instead of inline functions



#if VECTORISE_EMULATE_AVX
#  include "avxintrin_emu.h"
#else
#  include <immintrin.h>
#endif
#ifdef __FMA4__
#  include <fma4intrin.h>
#endif



#define vec8_architecture "AVX"

// Vector type corresponding to CCTK_REAL
#define CCTK_REAL8_VEC __m256d

// Number of vector elements in a CCTK_REAL_VEC
#define CCTK_REAL8_VEC_SIZE 4



union k8const_t {
  unsigned long long i[4];
  double             d[4];
  __m256i            vi;
  __m256d            vd;
};

#define K8_ZERO 0x0000000000000000ULL
#define K8_IMIN 0x8000000000000000ULL
#define K8_IMAX 0x7fffffffffffffffULL



// Create vectors, extract vector elements

#define vec8_set1(a)      (_mm256_set1_pd(a))
#define vec8_set(a,b,c,d) (_mm256_set_pd(d,c,b,a)) // note reversed arguments

#define vec8_elt0(x) (((CCTK_REAL8 const*)&(x))[0])
#define vec8_elt1(x) (((CCTK_REAL8 const*)&(x))[1])
#define vec8_elt2(x) (((CCTK_REAL8 const*)&(x))[2])
#define vec8_elt3(x) (((CCTK_REAL8 const*)&(x))[3])
#define vec8_elt(x,d) (((CCTK_REAL8 const*)&(x))[d])



// Load and store vectors

// Load a vector from memory (aligned and unaligned); this loads from
// a reference to a scalar
#define vec8_load(p)  (_mm256_load_pd(&(p)))
#define vec8_loadu(p) (_mm256_loadu_pd(&(p)))
#if ! VECTORISE_ALWAYS_USE_ALIGNED_LOADS
#  define vec8_load_off1(p) vec_loadu(p)
#else
#  error "VECTORISE_ALWAYS_USE_ALIGNED_LOADS not yet supported"
#endif

// Load a vector from memory that may or may not be aligned, as
// decided by the offset off and the vector size
#if VECTORISE_ALWAYS_USE_UNALIGNED_LOADS
// Implementation: Always use unaligned load
#  define vec8_loadu_maybe(off,p)             (vec8_loadu(p))
#  define vec8_loadu_maybe3(off1,off2,off3,p) (vec8_loadu(p))
#else
#  define vec8_loadu_maybe(off,p_)              \
  ({                                            \
    CCTK_REAL8 const& pp=(p_);                  \
    CCTK_REAL8 const& p=pp;                     \
    (off) % CCTK_REAL8_VEC_SIZE == 0 ?          \
      vec8_load(p) :                            \
      vec8_load_off1(p);                        \
  })
#  if VECTORISE_ALIGNED_ARRAYS
// Assume all array x sizes are multiples of the vector size
#    define vec8_loadu_maybe3(off1,off2,off3,p) \
  vec8_loadu_maybe(off1,p)
#  else
#    define vec8_loadu_maybe3(off1,off2,off3,p_)        \
  ({                                                    \
    CCTK_REAL8 const& pp=(p_);                          \
    CCTK_REAL8 const& p=pp;                             \
    ((off2) % CCTK_REAL8_VEC_SIZE != 0 or               \
     (off3) % CCTK_REAL8_VEC_SIZE != 0) ?               \
      vec8_loadu(p) :                                   \
      vec8_loadu_maybe(off1,p);                         \
  })
#  endif
#endif

// Store a vector to memory (aligned and non-temporal); this stores to
// a reference to a scalar
#define vec8_store(p,x)  (_mm256_store_pd(&(p),x))
#define vec8_storeu(p,x) (_mm256_storeu_pd(&(p),x))
#if ! VECTORISE_STREAMING_STORES
#  define vec8_store_nta(p,x) (vec8_store(p,x))
#else
#  define vec8_store_nta(p,x) (_mm256_stream_pd(&(p),x))
#endif

// Store a lower or higher partial vector (aligned and non-temporal);
// the non-temporal hint is probably ignored
static const k8const_t k8store_lo_union[5] =
  {
    {{ K8_ZERO, K8_ZERO, K8_ZERO, K8_ZERO, }},
    {{ K8_IMIN, K8_ZERO, K8_ZERO, K8_ZERO, }},
    {{ K8_IMIN, K8_IMIN, K8_ZERO, K8_ZERO, }},
    {{ K8_IMIN, K8_IMIN, K8_IMIN, K8_ZERO, }},
    {{ K8_IMIN, K8_IMIN, K8_IMIN, K8_IMIN, }},
  };
static const k8const_t k8store_hi_union[5] =
  {
    {{ K8_ZERO, K8_ZERO, K8_ZERO, K8_ZERO, }},
    {{ K8_ZERO, K8_ZERO, K8_ZERO, K8_IMIN, }},
    {{ K8_ZERO, K8_ZERO, K8_IMIN, K8_IMIN, }},
    {{ K8_ZERO, K8_IMIN, K8_IMIN, K8_IMIN, }},
    {{ K8_IMIN, K8_IMIN, K8_IMIN, K8_IMIN, }},
  };
#if defined(__GNUC__) && __GNUC__==4 && __GNUC_MINOR__<=4
// gcc 4.4 uses a wrong prototype for _mm256_maskstore_pd
#  define vec8_store_nta_partial_lo(p,x,n)                              \
  (_mm256_maskstore_pd(&(p),_mm256_castsi256_pd(k8store_lo_union[n].vi),x))
#  define vec8_store_nta_partial_hi(p,x,n)                              \
  (_mm256_maskstore_pd(&(p),_mm256_castsi256_pd(k8store_hi_union[n].vi),x))
#  define vec8_store_nta_partial_mid(p,x,nlo,nhi)                       \
  (_mm256_maskstore_pd                                                  \
   (&(p),                                                               \
    _mm256_castsi256_pd(k8store_lo_union[nlo].vi & k8store_hi_union[nhi].vi), \
    x))
#else
#  define vec8_store_nta_partial_lo(p,x,n)              \
  (_mm256_maskstore_pd(&(p),k8store_lo_union[n].vi,x))
#  define vec8_store_nta_partial_hi(p,x,n)              \
  (_mm256_maskstore_pd(&(p),k8store_hi_union[n].vi,x))
#  define vec8_store_nta_partial_mid(p,x,nlo,nhi)               \
  (_mm256_maskstore_pd                                          \
   (&(p),                                                       \
    k8store_lo_union[nlo].vi & k8store_hi_union[nhi].vi,        \
    x))
#endif



// Functions and operators

static const k8const_t k8sign_mask_union =
  {{ K8_IMIN, K8_IMIN, K8_IMIN, K8_IMIN, }};
static const k8const_t k8abs_mask_union =
  {{ K8_IMAX, K8_IMAX, K8_IMAX, K8_IMAX, }};

// Operators
#define k8neg(x) (_mm256_xor_pd(x,k8sign_mask_union.vd))

#define k8add(x,y) (_mm256_add_pd(x,y))
#define k8sub(x,y) (_mm256_sub_pd(x,y))
#define k8mul(x,y) (_mm256_mul_pd(x,y))
#define k8div(x,y) (_mm256_div_pd(x,y))

// Fused multiply-add, defined as [+-]x*y[+-]z
#ifdef __FMA4__
#  define k8madd(x,y,z)  (_mm256_macc_pd(x,y,z))
#  define k8msub(x,y,z)  (_mm256_msub_pd(x,y,z))
#  define k8nmadd(x,y,z) (_mm256_nmsub_pd(x,y,z))
#  define k8nmsub(x,y,z) (_mm256_nmacc_pd(x,y,z))
#else
#  define k8madd(x,y,z)  (k8add(k8mul(x,y),z))
#  define k8msub(x,y,z)  (k8sub(k8mul(x,y),z))
#  define k8nmadd(x,y,z) (k8sub(k8neg(z),k8mul(x,y)))
#  define k8nmsub(x,y,z) (k8sub(z,k8mul(x,y)))
#endif

// Cheap functions
#define k8fabs(x)   (_mm256_and_pd(x,k8abs_mask_union.vd))
#define k8fmax(x,y) (_mm256_max_pd(x,y))
#define k8fmin(x,y) (_mm256_min_pd(x,y))
#define k8fnabs(x)  (_mm256_or_pd(x,k8sign_mask_union.vd))
#define k8sqrt(x)   (_mm256_sqrt_pd(x))

// Expensive functions
#define K8REPL(f,x)                             \
({                                              \
  CCTK_REAL8_VEC const xfunc=(x);               \
  vec8_set(f(vec8_elt0(xfunc)),                 \
           f(vec8_elt1(xfunc)),                 \
           f(vec8_elt2(xfunc)),                 \
           f(vec8_elt3(xfunc)));                \
})
#define K8REPL2(f,x,a)                          \
({                                              \
  CCTK_REAL8_VEC const xfunc=(x);               \
  CCTK_REAL8     const afunc=(a);               \
  vec8_set(f(vec8_elt0(xfunc),afunc),           \
           f(vec8_elt1(xfunc),afunc),           \
           f(vec8_elt2(xfunc),afunc),           \
           f(vec8_elt3(xfunc),afunc));          \
})
#define k8exp(x)   K8REPL(exp,x)
#define k8log(x)   K8REPL(log,x)
#define k8pow(x,a) K8REPL2(pow,x,a)

#define k8ifpos(x,y,z) (_mm256_blendv_pd(y,z,x))