// Vectorise using Intel's or AMD's AVX // Use the type __m256d directly, without introducing a wrapper class // Use macros instead of inline functions #if VECTORISE_EMULATE_AVX # include "avxintrin_emu.h" #else # include #endif #ifdef __FMA4__ # include #endif #ifdef __FMA4__ # define vec8_architecture_FMA4 "+FMA4" #else # define vec8_architecture_FMA4 "" #endif #define vec8_architecture "AVX" vec8_architecture_FMA4 " (64-bit precision)" // Vector type corresponding to CCTK_REAL #define CCTK_REAL8_VEC __m256d // Number of vector elements in a CCTK_REAL_VEC #define CCTK_REAL8_VEC_SIZE 4 // Integer and boolean types corresponding to this real type #define CCTK_INTEGER8 CCTK_REAL8 #define CCTK_BOOLEAN8 CCTK_REAL8 #define CCTK_INTEGER8_VEC CCTK_REAL8_VEC #define CCTK_BOOLEAN8_VEC CCTK_REAL8_VEC union k8const_t { unsigned long long i[4]; double f[4]; __m256i vi; __m256d vf; }; #define K8_ZERO 0x0000000000000000ULL #define K8_NOTZERO 0xffffffffffffffffULL #define K8_IMIN 0x8000000000000000ULL #define K8_IMAX 0x7fffffffffffffffULL // Create vectors, extract vector elements #define vec8_set1(a) (_mm256_set1_pd(a)) #define vec8_set(a,b,c,d) (_mm256_set_pd(d,c,b,a)) // note reversed arguments #define vec8_elt0(x) (((CCTK_REAL8 const*)&(x))[0]) #define vec8_elt1(x) (((CCTK_REAL8 const*)&(x))[1]) #define vec8_elt2(x) (((CCTK_REAL8 const*)&(x))[2]) #define vec8_elt3(x) (((CCTK_REAL8 const*)&(x))[3]) #define vec8_elt(x,d) (((CCTK_REAL8 const*)&(x))[d]) // Load and store vectors // Load a vector from memory (aligned and unaligned); this loads from // a reference to a scalar #define vec8_load(p) (_mm256_load_pd(&(p))) #define vec8_loadu(p) (_mm256_loadu_pd(&(p))) #if ! VECTORISE_ALWAYS_USE_ALIGNED_LOADS # define vec8_load_off1(p) vec_loadu(p) #else # error "VECTORISE_ALWAYS_USE_ALIGNED_LOADS not yet supported" #endif // Load a vector from memory that may or may not be aligned, as // decided by the offset off and the vector size #if VECTORISE_ALWAYS_USE_UNALIGNED_LOADS // Implementation: Always use unaligned load # define vec8_loadu_maybe(off,p) (vec8_loadu(p)) # define vec8_loadu_maybe3(off1,off2,off3,p) (vec8_loadu(p)) #else # define vec8_loadu_maybe(off,p_) \ ({ \ CCTK_REAL8 const& p__=(p_); \ CCTK_REAL8 const& p=p__; \ (off) % CCTK_REAL8_VEC_SIZE == 0 ? \ vec8_load(p) : \ vec8_load_off1(p); \ }) # if VECTORISE_ALIGNED_ARRAYS // Assume all array x sizes are multiples of the vector size # define vec8_loadu_maybe3(off1,off2,off3,p) \ vec8_loadu_maybe(off1,p) # else # define vec8_loadu_maybe3(off1,off2,off3,p_) \ ({ \ CCTK_REAL8 const& p__=(p_); \ CCTK_REAL8 const& p=p__; \ ((off2) % CCTK_REAL8_VEC_SIZE != 0 or \ (off3) % CCTK_REAL8_VEC_SIZE != 0) ? \ vec8_loadu(p) : \ vec8_loadu_maybe(off1,p); \ }) # endif #endif // Store a vector to memory (aligned and non-temporal); this stores to // a reference to a scalar #define vec8_store(p,x) (_mm256_store_pd(&(p),x)) #define vec8_storeu(p,x) (_mm256_storeu_pd(&(p),x)) #if ! VECTORISE_STREAMING_STORES # define vec8_store_nta(p,x) (vec8_store(p,x)) #else # define vec8_store_nta(p,x) (_mm256_stream_pd(&(p),x)) #endif // Store a partial vector (aligned and non-temporal) #define vec8_store_partial_prepare(i,imin_,imax_) \ bool v8stp_all; \ __m256i v8stp_mask; \ ({ \ ptrdiff_t const imin__=(imin_); \ ptrdiff_t const imin=imin__; \ ptrdiff_t const imax__=(imax_); \ ptrdiff_t const imax=imax__; \ \ v8stp_all = i>=imin and i+CCTK_REAL_VEC_SIZE-1