annotate src/fftw-3.3.5/simd-support/simd-avx2-128.h @ 168:ceec0dd9ec9c

Replace these with versions built using an older toolset (so as to avoid ABI compatibilities when linking on Ubuntu 14.04 for packaging purposes)
author Chris Cannam <cannam@all-day-breakfast.com>
date Fri, 07 Feb 2020 11:51:13 +0000
parents 7867fa7e1b6b
children
rev   line source
cannam@127 1 /*
cannam@127 2 * Copyright (c) 2003, 2007-14 Matteo Frigo
cannam@127 3 * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
cannam@127 4 *
cannam@127 5 * 128-bit AVX2 support by Erik Lindahl, 2015.
cannam@127 6 * Erik Lindahl hereby places his modifications in the public domain.
cannam@127 7 *
cannam@127 8 * This program is free software; you can redistribute it and/or modify
cannam@127 9 * it under the terms of the GNU General Public License as published by
cannam@127 10 * the Free Software Foundation; either version 2 of the License, or
cannam@127 11 * (at your option) any later version.
cannam@127 12 *
cannam@127 13 * This program is distributed in the hope that it will be useful,
cannam@127 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
cannam@127 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
cannam@127 16 * GNU General Public License for more details.
cannam@127 17 *
cannam@127 18 * You should have received a copy of the GNU General Public License
cannam@127 19 * along with this program; if not, write to the Free Software
cannam@127 20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
cannam@127 21 *
cannam@127 22 */
cannam@127 23
cannam@127 24 #if defined(FFTW_LDOUBLE) || defined(FFTW_QUAD)
cannam@127 25 #error "AVX2 only works in single or double precision"
cannam@127 26 #endif
cannam@127 27
cannam@127 28 #ifdef FFTW_SINGLE
cannam@127 29 # define DS(d,s) s /* single-precision option */
cannam@127 30 # define SUFF(name) name ## s
cannam@127 31 #else
cannam@127 32 # define DS(d,s) d /* double-precision option */
cannam@127 33 # define SUFF(name) name ## d
cannam@127 34 #endif
cannam@127 35
cannam@127 36 #define SIMD_SUFFIX _avx2_128 /* for renaming */
cannam@127 37 #define VL DS(1,2) /* SIMD vector length, in term of complex numbers */
cannam@127 38 #define SIMD_VSTRIDE_OKA(x) DS(1,((x) == 2))
cannam@127 39 #define SIMD_STRIDE_OKPAIR SIMD_STRIDE_OK
cannam@127 40
cannam@127 41 #if defined(__GNUC__) && !defined(__AVX2__) /* sanity check */
cannam@127 42 #error "compiling simd-avx2-128.h without avx2 support"
cannam@127 43 #endif
cannam@127 44
cannam@127 45 #ifdef _MSC_VER
cannam@127 46 #ifndef inline
cannam@127 47 #define inline __inline
cannam@127 48 #endif
cannam@127 49 #endif
cannam@127 50
cannam@127 51 #include <immintrin.h>
cannam@127 52
cannam@127 53 typedef DS(__m128d,__m128) V;
cannam@127 54 #define VADD SUFF(_mm_add_p)
cannam@127 55 #define VSUB SUFF(_mm_sub_p)
cannam@127 56 #define VMUL SUFF(_mm_mul_p)
cannam@127 57 #define VXOR SUFF(_mm_xor_p)
cannam@127 58 #define SHUF SUFF(_mm_shuffle_p)
cannam@127 59 #define VPERM1 SUFF(_mm_permute_p)
cannam@127 60 #define UNPCKL SUFF(_mm_unpacklo_p)
cannam@127 61 #define UNPCKH SUFF(_mm_unpackhi_p)
cannam@127 62
cannam@127 63 #define SHUFVALS(fp0,fp1,fp2,fp3) \
cannam@127 64 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
cannam@127 65
cannam@127 66 #define VDUPL(x) DS(_mm_permute_pd(x,0), _mm_moveldup_ps(x))
cannam@127 67 #define VDUPH(x) DS(_mm_permute_pd(x,3), _mm_movehdup_ps(x))
cannam@127 68 #define LOADH(addr, val) _mm_loadh_pi(val, (const __m64 *)(addr))
cannam@127 69 #define LOADL(addr, val) _mm_loadl_pi(val, (const __m64 *)(addr))
cannam@127 70 #define STOREH(a, v) DS(_mm_storeh_pd(a, v), _mm_storeh_pi((__m64 *)(a), v))
cannam@127 71 #define STOREL(a, v) DS(_mm_storel_pd(a, v), _mm_storel_pi((__m64 *)(a), v))
cannam@127 72
cannam@127 73 #define VLIT(x0, x1) DS(_mm_set_pd(x0, x1), _mm_set_ps(x0, x1, x0, x1))
cannam@127 74 #define DVK(var, val) V var = VLIT(val, val)
cannam@127 75 #define LDK(x) x
cannam@127 76
cannam@127 77 static inline V LDA(const R *x, INT ivs, const R *aligned_like)
cannam@127 78 {
cannam@127 79 (void)aligned_like; /* UNUSED */
cannam@127 80 (void)ivs; /* UNUSED */
cannam@127 81 return *(const V *)x;
cannam@127 82 }
cannam@127 83
cannam@127 84 static inline void STA(R *x, V v, INT ovs, const R *aligned_like)
cannam@127 85 {
cannam@127 86 (void)aligned_like; /* UNUSED */
cannam@127 87 (void)ovs; /* UNUSED */
cannam@127 88 *(V *)x = v;
cannam@127 89 }
cannam@127 90
cannam@127 91 #ifdef FFTW_SINGLE
cannam@127 92
cannam@127 93 static inline V LD(const R *x, INT ivs, const R *aligned_like)
cannam@127 94 {
cannam@127 95 __m128 l0, l1;
cannam@127 96 (void)aligned_like; /* UNUSED */
cannam@127 97 #if defined(__ICC) || (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ > 8)
cannam@127 98 l0 = LOADL(x, SUFF(_mm_undefined_p)());
cannam@127 99 l1 = LOADL(x + ivs, SUFF(_mm_undefined_p)());
cannam@127 100 #else
cannam@127 101 l0 = LOADL(x, l0);
cannam@127 102 l1 = LOADL(x + ivs, l1);
cannam@127 103 #endif
cannam@127 104 return SUFF(_mm_movelh_p)(l0,l1);
cannam@127 105 }
cannam@127 106
cannam@127 107 # ifdef _MSC_VER
cannam@127 108 # pragma warning(default : 4700)
cannam@127 109 # pragma runtime_checks("u", restore)
cannam@127 110 # endif
cannam@127 111
cannam@127 112 static inline void ST(R *x, V v, INT ovs, const R *aligned_like)
cannam@127 113 {
cannam@127 114 (void)aligned_like; /* UNUSED */
cannam@127 115 /* WARNING: the extra_iter hack depends upon STOREL occurring
cannam@127 116 after STOREH */
cannam@127 117 STOREH(x + ovs, v);
cannam@127 118 STOREL(x, v);
cannam@127 119 }
cannam@127 120
cannam@127 121 #else /* ! FFTW_SINGLE */
cannam@127 122 # define LD LDA
cannam@127 123 # define ST STA
cannam@127 124 #endif
cannam@127 125
cannam@127 126 #define STM2 DS(STA,ST)
cannam@127 127 #define STN2(x, v0, v1, ovs) /* nop */
cannam@127 128
cannam@127 129 #ifdef FFTW_SINGLE
cannam@127 130 # define STM4(x, v, ovs, aligned_like) /* no-op */
cannam@127 131 /* STN4 is a macro, not a function, thanks to Visual C++ developers
cannam@127 132 deciding "it would be infrequent that people would want to pass more
cannam@127 133 than 3 [__m128 parameters] by value." 3 parameters ought to be enough
cannam@127 134 for anybody. */
cannam@127 135 # define STN4(x, v0, v1, v2, v3, ovs) \
cannam@127 136 { \
cannam@127 137 V xxx0, xxx1, xxx2, xxx3; \
cannam@127 138 xxx0 = UNPCKL(v0, v2); \
cannam@127 139 xxx1 = UNPCKH(v0, v2); \
cannam@127 140 xxx2 = UNPCKL(v1, v3); \
cannam@127 141 xxx3 = UNPCKH(v1, v3); \
cannam@127 142 STA(x, UNPCKL(xxx0, xxx2), 0, 0); \
cannam@127 143 STA(x + ovs, UNPCKH(xxx0, xxx2), 0, 0); \
cannam@127 144 STA(x + 2 * ovs, UNPCKL(xxx1, xxx3), 0, 0); \
cannam@127 145 STA(x + 3 * ovs, UNPCKH(xxx1, xxx3), 0, 0); \
cannam@127 146 }
cannam@127 147 #else /* !FFTW_SINGLE */
cannam@127 148 static inline void STM4(R *x, V v, INT ovs, const R *aligned_like)
cannam@127 149 {
cannam@127 150 (void)aligned_like; /* UNUSED */
cannam@127 151 STOREL(x, v);
cannam@127 152 STOREH(x + ovs, v);
cannam@127 153 }
cannam@127 154 # define STN4(x, v0, v1, v2, v3, ovs) /* nothing */
cannam@127 155 #endif
cannam@127 156
cannam@127 157 static inline V FLIP_RI(V x)
cannam@127 158 {
cannam@127 159 return VPERM1(x, DS(1, SHUFVALS(1, 0, 3, 2)));
cannam@127 160 }
cannam@127 161
cannam@127 162 static inline V VCONJ(V x)
cannam@127 163 {
cannam@127 164 V pmpm = VLIT(-0.0, 0.0);
cannam@127 165 return VXOR(pmpm, x);
cannam@127 166 }
cannam@127 167
cannam@127 168 static inline V VBYI(V x)
cannam@127 169 {
cannam@127 170 x = VCONJ(x);
cannam@127 171 x = FLIP_RI(x);
cannam@127 172 return x;
cannam@127 173 }
cannam@127 174
cannam@127 175 /* FMA support */
cannam@127 176 #define VFMA(a, b, c) SUFF(_mm_fmadd_p)(a,b,c)
cannam@127 177 #define VFNMS(a, b, c) SUFF(_mm_fnmadd_p)(a,b,c)
cannam@127 178 #define VFMS(a, b, c) SUFF(_mm_fmsub_p)(a,b,c)
cannam@127 179 #define VFMAI(b, c) SUFF(_mm_addsub_p)(c,FLIP_RI(b))
cannam@127 180 #define VFNMSI(b, c) VSUB(c, VBYI(b))
cannam@127 181 #define VFMACONJ(b,c) VADD(VCONJ(b),c)
cannam@127 182 #define VFMSCONJ(b,c) VSUB(VCONJ(b),c)
cannam@127 183 #define VFNMSCONJ(b,c) SUFF(_mm_addsub_p)(c,b)
cannam@127 184
cannam@127 185
cannam@127 186 static inline V VZMUL(V tx, V sr)
cannam@127 187 {
cannam@127 188 V tr = VDUPL(tx);
cannam@127 189 V ti = VDUPH(tx);
cannam@127 190 ti = VMUL(ti, FLIP_RI(sr));
cannam@127 191 return SUFF(_mm_fmaddsub_p)(tr,sr,ti);
cannam@127 192 }
cannam@127 193
cannam@127 194 static inline V VZMULJ(V tx, V sr)
cannam@127 195 {
cannam@127 196 V tr = VDUPL(tx);
cannam@127 197 V ti = VDUPH(tx);
cannam@127 198 ti = VMUL(ti, FLIP_RI(sr));
cannam@127 199 return SUFF(_mm_fmsubadd_p)(tr,sr,ti);
cannam@127 200 }
cannam@127 201
cannam@127 202 static inline V VZMULI(V tx, V sr)
cannam@127 203 {
cannam@127 204 V tr = VDUPL(tx);
cannam@127 205 V ti = VDUPH(tx);
cannam@127 206 ti = VMUL(ti, sr);
cannam@127 207 sr = VBYI(sr);
cannam@127 208 return VFMS(tr, sr, ti);
cannam@127 209 }
cannam@127 210
cannam@127 211 static inline V VZMULIJ(V tx, V sr)
cannam@127 212 {
cannam@127 213 V tr = VDUPL(tx);
cannam@127 214 V ti = VDUPH(tx);
cannam@127 215 tr = VMUL(tr, FLIP_RI(sr));
cannam@127 216 return SUFF(_mm_fmaddsub_p)(ti,sr,tr);
cannam@127 217 }
cannam@127 218
cannam@127 219 /* twiddle storage #1: compact, slower */
cannam@127 220 #ifdef FFTW_SINGLE
cannam@127 221 # define VTW1(v,x) \
cannam@127 222 {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_SIN, v, x}, {TW_SIN, v+1, x}
cannam@127 223 static inline V BYTW1(const R *t, V sr)
cannam@127 224 {
cannam@127 225 const V *twp = (const V *)t;
cannam@127 226 V tx = twp[0];
cannam@127 227 V tr = UNPCKL(tx, tx);
cannam@127 228 V ti = UNPCKH(tx, tx);
cannam@127 229 ti = VMUL(ti, FLIP_RI(sr));
cannam@127 230 return SUFF(_mm_fmaddsub_p)(tr,sr,ti);
cannam@127 231 }
cannam@127 232 static inline V BYTWJ1(const R *t, V sr)
cannam@127 233 {
cannam@127 234 const V *twp = (const V *)t;
cannam@127 235 V tx = twp[0];
cannam@127 236 V tr = UNPCKL(tx, tx);
cannam@127 237 V ti = UNPCKH(tx, tx);
cannam@127 238 ti = VMUL(ti, FLIP_RI(sr));
cannam@127 239 return SUFF(_mm_fmsubadd_p)(tr,sr,ti);
cannam@127 240 }
cannam@127 241 #else /* !FFTW_SINGLE */
cannam@127 242 # define VTW1(v,x) {TW_CEXP, v, x}
cannam@127 243 static inline V BYTW1(const R *t, V sr)
cannam@127 244 {
cannam@127 245 V tx = LD(t, 1, t);
cannam@127 246 return VZMUL(tx, sr);
cannam@127 247 }
cannam@127 248 static inline V BYTWJ1(const R *t, V sr)
cannam@127 249 {
cannam@127 250 V tx = LD(t, 1, t);
cannam@127 251 return VZMULJ(tx, sr);
cannam@127 252 }
cannam@127 253 #endif
cannam@127 254 #define TWVL1 (VL)
cannam@127 255
cannam@127 256 /* twiddle storage #2: twice the space, faster (when in cache) */
cannam@127 257 #ifdef FFTW_SINGLE
cannam@127 258 # define VTW2(v,x) \
cannam@127 259 {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \
cannam@127 260 {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x}
cannam@127 261 #else /* !FFTW_SINGLE */
cannam@127 262 # define VTW2(v,x) \
cannam@127 263 {TW_COS, v, x}, {TW_COS, v, x}, {TW_SIN, v, -x}, {TW_SIN, v, x}
cannam@127 264 #endif
cannam@127 265 #define TWVL2 (2 * VL)
cannam@127 266 static inline V BYTW2(const R *t, V sr)
cannam@127 267 {
cannam@127 268 const V *twp = (const V *)t;
cannam@127 269 V si = FLIP_RI(sr);
cannam@127 270 V tr = twp[0], ti = twp[1];
cannam@127 271 return VFMA(tr, sr, VMUL(ti, si));
cannam@127 272 }
cannam@127 273 static inline V BYTWJ2(const R *t, V sr)
cannam@127 274 {
cannam@127 275 const V *twp = (const V *)t;
cannam@127 276 V si = FLIP_RI(sr);
cannam@127 277 V tr = twp[0], ti = twp[1];
cannam@127 278 return VFNMS(ti, si, VMUL(tr, sr));
cannam@127 279 }
cannam@127 280
cannam@127 281 /* twiddle storage #3 */
cannam@127 282 #ifdef FFTW_SINGLE
cannam@127 283 # define VTW3(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x}
cannam@127 284 # define TWVL3 (VL)
cannam@127 285 #else
cannam@127 286 # define VTW3(v,x) VTW1(v,x)
cannam@127 287 # define TWVL3 TWVL1
cannam@127 288 #endif
cannam@127 289
cannam@127 290 /* twiddle storage for split arrays */
cannam@127 291 #ifdef FFTW_SINGLE
cannam@127 292 # define VTWS(v,x) \
cannam@127 293 {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \
cannam@127 294 {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x}
cannam@127 295 #else
cannam@127 296 # define VTWS(v,x) \
cannam@127 297 {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_SIN, v, x}, {TW_SIN, v+1, x}
cannam@127 298 #endif
cannam@127 299 #define TWVLS (2 * VL)
cannam@127 300
cannam@127 301 #define VLEAVE() /* nothing */
cannam@127 302
cannam@127 303 #include "simd-common.h"