annotate src/fftw-3.3.8/simd-support/simd-avx2-128.h @ 84:08ae793730bd

Add null config files
author Chris Cannam
date Mon, 02 Mar 2020 14:03:47 +0000
parents d0c2a83c1364
children
rev   line source
Chris@82 1 /*
Chris@82 2 * Copyright (c) 2003, 2007-14 Matteo Frigo
Chris@82 3 * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
Chris@82 4 *
Chris@82 5 * 128-bit AVX2 support by Erik Lindahl, 2015.
Chris@82 6 * Erik Lindahl hereby places his modifications in the public domain.
Chris@82 7 *
Chris@82 8 * This program is free software; you can redistribute it and/or modify
Chris@82 9 * it under the terms of the GNU General Public License as published by
Chris@82 10 * the Free Software Foundation; either version 2 of the License, or
Chris@82 11 * (at your option) any later version.
Chris@82 12 *
Chris@82 13 * This program is distributed in the hope that it will be useful,
Chris@82 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@82 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@82 16 * GNU General Public License for more details.
Chris@82 17 *
Chris@82 18 * You should have received a copy of the GNU General Public License
Chris@82 19 * along with this program; if not, write to the Free Software
Chris@82 20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@82 21 *
Chris@82 22 */
Chris@82 23
Chris@82 24 #if defined(FFTW_LDOUBLE) || defined(FFTW_QUAD)
Chris@82 25 #error "AVX2 only works in single or double precision"
Chris@82 26 #endif
Chris@82 27
Chris@82 28 #ifdef FFTW_SINGLE
Chris@82 29 # define DS(d,s) s /* single-precision option */
Chris@82 30 # define SUFF(name) name ## s
Chris@82 31 #else
Chris@82 32 # define DS(d,s) d /* double-precision option */
Chris@82 33 # define SUFF(name) name ## d
Chris@82 34 #endif
Chris@82 35
Chris@82 36 #define SIMD_SUFFIX _avx2_128 /* for renaming */
Chris@82 37 #define VL DS(1,2) /* SIMD vector length, in term of complex numbers */
Chris@82 38 #define SIMD_VSTRIDE_OKA(x) DS(1,((x) == 2))
Chris@82 39 #define SIMD_STRIDE_OKPAIR SIMD_STRIDE_OK
Chris@82 40
Chris@82 41 #if defined(__GNUC__) && !defined(__AVX2__) /* sanity check */
Chris@82 42 #error "compiling simd-avx2-128.h without avx2 support"
Chris@82 43 #endif
Chris@82 44
Chris@82 45 #ifdef _MSC_VER
Chris@82 46 #ifndef inline
Chris@82 47 #define inline __inline
Chris@82 48 #endif
Chris@82 49 #endif
Chris@82 50
Chris@82 51 #include <immintrin.h>
Chris@82 52
Chris@82 53 typedef DS(__m128d,__m128) V;
Chris@82 54 #define VADD SUFF(_mm_add_p)
Chris@82 55 #define VSUB SUFF(_mm_sub_p)
Chris@82 56 #define VMUL SUFF(_mm_mul_p)
Chris@82 57 #define VXOR SUFF(_mm_xor_p)
Chris@82 58 #define SHUF SUFF(_mm_shuffle_p)
Chris@82 59 #define VPERM1 SUFF(_mm_permute_p)
Chris@82 60 #define UNPCKL SUFF(_mm_unpacklo_p)
Chris@82 61 #define UNPCKH SUFF(_mm_unpackhi_p)
Chris@82 62
Chris@82 63 #define SHUFVALS(fp0,fp1,fp2,fp3) \
Chris@82 64 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
Chris@82 65
Chris@82 66 #define VDUPL(x) DS(_mm_permute_pd(x,0), _mm_moveldup_ps(x))
Chris@82 67 #define VDUPH(x) DS(_mm_permute_pd(x,3), _mm_movehdup_ps(x))
Chris@82 68 #define LOADH(addr, val) _mm_loadh_pi(val, (const __m64 *)(addr))
Chris@82 69 #define LOADL(addr, val) _mm_loadl_pi(val, (const __m64 *)(addr))
Chris@82 70 #define STOREH(a, v) DS(_mm_storeh_pd(a, v), _mm_storeh_pi((__m64 *)(a), v))
Chris@82 71 #define STOREL(a, v) DS(_mm_storel_pd(a, v), _mm_storel_pi((__m64 *)(a), v))
Chris@82 72
Chris@82 73 #define VLIT(x0, x1) DS(_mm_set_pd(x0, x1), _mm_set_ps(x0, x1, x0, x1))
Chris@82 74 #define DVK(var, val) V var = VLIT(val, val)
Chris@82 75 #define LDK(x) x
Chris@82 76
Chris@82 77 static inline V LDA(const R *x, INT ivs, const R *aligned_like)
Chris@82 78 {
Chris@82 79 (void)aligned_like; /* UNUSED */
Chris@82 80 (void)ivs; /* UNUSED */
Chris@82 81 return *(const V *)x;
Chris@82 82 }
Chris@82 83
Chris@82 84 static inline void STA(R *x, V v, INT ovs, const R *aligned_like)
Chris@82 85 {
Chris@82 86 (void)aligned_like; /* UNUSED */
Chris@82 87 (void)ovs; /* UNUSED */
Chris@82 88 *(V *)x = v;
Chris@82 89 }
Chris@82 90
Chris@82 91 #ifdef FFTW_SINGLE
Chris@82 92
Chris@82 93 # ifdef _MSC_VER
Chris@82 94 /* Temporarily disable the warning "uninitialized local variable
Chris@82 95 'name' used" and runtime checks for using a variable before it is
Chris@82 96 defined which is erroneously triggered by the LOADL0 / LOADH macros
Chris@82 97 as they only modify VAL partly each. */
Chris@82 98 # ifndef __INTEL_COMPILER
Chris@82 99 # pragma warning(disable : 4700)
Chris@82 100 # pragma runtime_checks("u", off)
Chris@82 101 # endif
Chris@82 102 # endif
Chris@82 103 # ifdef __INTEL_COMPILER
Chris@82 104 # pragma warning(disable : 592)
Chris@82 105 # endif
Chris@82 106
Chris@82 107 static inline V LD(const R *x, INT ivs, const R *aligned_like)
Chris@82 108 {
Chris@82 109 __m128 l0, l1;
Chris@82 110 (void)aligned_like; /* UNUSED */
Chris@82 111 #if defined(__ICC) || (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ > 8)
Chris@82 112 l0 = LOADL(x, SUFF(_mm_undefined_p)());
Chris@82 113 l1 = LOADL(x + ivs, SUFF(_mm_undefined_p)());
Chris@82 114 #else
Chris@82 115 l0 = LOADL(x, l0);
Chris@82 116 l1 = LOADL(x + ivs, l1);
Chris@82 117 #endif
Chris@82 118 return SUFF(_mm_movelh_p)(l0,l1);
Chris@82 119 }
Chris@82 120
Chris@82 121 # ifdef _MSC_VER
Chris@82 122 # ifndef __INTEL_COMPILER
Chris@82 123 # pragma warning(default : 4700)
Chris@82 124 # pragma runtime_checks("u", restore)
Chris@82 125 # endif
Chris@82 126 # endif
Chris@82 127 # ifdef __INTEL_COMPILER
Chris@82 128 # pragma warning(default : 592)
Chris@82 129 # endif
Chris@82 130
Chris@82 131 static inline void ST(R *x, V v, INT ovs, const R *aligned_like)
Chris@82 132 {
Chris@82 133 (void)aligned_like; /* UNUSED */
Chris@82 134 /* WARNING: the extra_iter hack depends upon STOREL occurring
Chris@82 135 after STOREH */
Chris@82 136 STOREH(x + ovs, v);
Chris@82 137 STOREL(x, v);
Chris@82 138 }
Chris@82 139
Chris@82 140 #else /* ! FFTW_SINGLE */
Chris@82 141 # define LD LDA
Chris@82 142 # define ST STA
Chris@82 143 #endif
Chris@82 144
Chris@82 145 #define STM2 DS(STA,ST)
Chris@82 146 #define STN2(x, v0, v1, ovs) /* nop */
Chris@82 147
Chris@82 148 #ifdef FFTW_SINGLE
Chris@82 149 # define STM4(x, v, ovs, aligned_like) /* no-op */
Chris@82 150 /* STN4 is a macro, not a function, thanks to Visual C++ developers
Chris@82 151 deciding "it would be infrequent that people would want to pass more
Chris@82 152 than 3 [__m128 parameters] by value." 3 parameters ought to be enough
Chris@82 153 for anybody. */
Chris@82 154 # define STN4(x, v0, v1, v2, v3, ovs) \
Chris@82 155 { \
Chris@82 156 V xxx0, xxx1, xxx2, xxx3; \
Chris@82 157 xxx0 = UNPCKL(v0, v2); \
Chris@82 158 xxx1 = UNPCKH(v0, v2); \
Chris@82 159 xxx2 = UNPCKL(v1, v3); \
Chris@82 160 xxx3 = UNPCKH(v1, v3); \
Chris@82 161 STA(x, UNPCKL(xxx0, xxx2), 0, 0); \
Chris@82 162 STA(x + ovs, UNPCKH(xxx0, xxx2), 0, 0); \
Chris@82 163 STA(x + 2 * ovs, UNPCKL(xxx1, xxx3), 0, 0); \
Chris@82 164 STA(x + 3 * ovs, UNPCKH(xxx1, xxx3), 0, 0); \
Chris@82 165 }
Chris@82 166 #else /* !FFTW_SINGLE */
Chris@82 167 static inline void STM4(R *x, V v, INT ovs, const R *aligned_like)
Chris@82 168 {
Chris@82 169 (void)aligned_like; /* UNUSED */
Chris@82 170 STOREL(x, v);
Chris@82 171 STOREH(x + ovs, v);
Chris@82 172 }
Chris@82 173 # define STN4(x, v0, v1, v2, v3, ovs) /* nothing */
Chris@82 174 #endif
Chris@82 175
Chris@82 176 static inline V FLIP_RI(V x)
Chris@82 177 {
Chris@82 178 return VPERM1(x, DS(1, SHUFVALS(1, 0, 3, 2)));
Chris@82 179 }
Chris@82 180
Chris@82 181 static inline V VCONJ(V x)
Chris@82 182 {
Chris@82 183 /* Produce a SIMD vector[VL] of (0 + -0i).
Chris@82 184
Chris@82 185 We really want to write this:
Chris@82 186
Chris@82 187 V pmpm = VLIT(-0.0, 0.0);
Chris@82 188
Chris@82 189 but historically some compilers have ignored the distiction
Chris@82 190 between +0 and -0. It looks like 'gcc-8 -fast-math' treats -0
Chris@82 191 as 0 too.
Chris@82 192 */
Chris@82 193 union uvec {
Chris@82 194 unsigned u[4];
Chris@82 195 V v;
Chris@82 196 };
Chris@82 197 static const union uvec pmpm = {
Chris@82 198 #ifdef FFTW_SINGLE
Chris@82 199 { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }
Chris@82 200 #else
Chris@82 201 { 0x00000000, 0x00000000, 0x00000000, 0x80000000 }
Chris@82 202 #endif
Chris@82 203 };
Chris@82 204 return VXOR(pmpm.v, x);
Chris@82 205 }
Chris@82 206
Chris@82 207 static inline V VBYI(V x)
Chris@82 208 {
Chris@82 209 x = VCONJ(x);
Chris@82 210 x = FLIP_RI(x);
Chris@82 211 return x;
Chris@82 212 }
Chris@82 213
Chris@82 214 /* FMA support */
Chris@82 215 #define VFMA(a, b, c) SUFF(_mm_fmadd_p)(a,b,c)
Chris@82 216 #define VFNMS(a, b, c) SUFF(_mm_fnmadd_p)(a,b,c)
Chris@82 217 #define VFMS(a, b, c) SUFF(_mm_fmsub_p)(a,b,c)
Chris@82 218 #define VFMAI(b, c) SUFF(_mm_addsub_p)(c,FLIP_RI(b))
Chris@82 219 #define VFNMSI(b, c) VSUB(c, VBYI(b))
Chris@82 220 #define VFMACONJ(b,c) VADD(VCONJ(b),c)
Chris@82 221 #define VFMSCONJ(b,c) VSUB(VCONJ(b),c)
Chris@82 222 #define VFNMSCONJ(b,c) SUFF(_mm_addsub_p)(c,b)
Chris@82 223
Chris@82 224
Chris@82 225 static inline V VZMUL(V tx, V sr)
Chris@82 226 {
Chris@82 227 V tr = VDUPL(tx);
Chris@82 228 V ti = VDUPH(tx);
Chris@82 229 ti = VMUL(ti, FLIP_RI(sr));
Chris@82 230 return SUFF(_mm_fmaddsub_p)(tr,sr,ti);
Chris@82 231 }
Chris@82 232
Chris@82 233 static inline V VZMULJ(V tx, V sr)
Chris@82 234 {
Chris@82 235 V tr = VDUPL(tx);
Chris@82 236 V ti = VDUPH(tx);
Chris@82 237 ti = VMUL(ti, FLIP_RI(sr));
Chris@82 238 return SUFF(_mm_fmsubadd_p)(tr,sr,ti);
Chris@82 239 }
Chris@82 240
Chris@82 241 static inline V VZMULI(V tx, V sr)
Chris@82 242 {
Chris@82 243 V tr = VDUPL(tx);
Chris@82 244 V ti = VDUPH(tx);
Chris@82 245 ti = VMUL(ti, sr);
Chris@82 246 sr = VBYI(sr);
Chris@82 247 return VFMS(tr, sr, ti);
Chris@82 248 }
Chris@82 249
Chris@82 250 static inline V VZMULIJ(V tx, V sr)
Chris@82 251 {
Chris@82 252 V tr = VDUPL(tx);
Chris@82 253 V ti = VDUPH(tx);
Chris@82 254 tr = VMUL(tr, FLIP_RI(sr));
Chris@82 255 return SUFF(_mm_fmaddsub_p)(ti,sr,tr);
Chris@82 256 }
Chris@82 257
Chris@82 258 /* twiddle storage #1: compact, slower */
Chris@82 259 #ifdef FFTW_SINGLE
Chris@82 260 # define VTW1(v,x) \
Chris@82 261 {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_SIN, v, x}, {TW_SIN, v+1, x}
Chris@82 262 static inline V BYTW1(const R *t, V sr)
Chris@82 263 {
Chris@82 264 const V *twp = (const V *)t;
Chris@82 265 V tx = twp[0];
Chris@82 266 V tr = UNPCKL(tx, tx);
Chris@82 267 V ti = UNPCKH(tx, tx);
Chris@82 268 ti = VMUL(ti, FLIP_RI(sr));
Chris@82 269 return SUFF(_mm_fmaddsub_p)(tr,sr,ti);
Chris@82 270 }
Chris@82 271 static inline V BYTWJ1(const R *t, V sr)
Chris@82 272 {
Chris@82 273 const V *twp = (const V *)t;
Chris@82 274 V tx = twp[0];
Chris@82 275 V tr = UNPCKL(tx, tx);
Chris@82 276 V ti = UNPCKH(tx, tx);
Chris@82 277 ti = VMUL(ti, FLIP_RI(sr));
Chris@82 278 return SUFF(_mm_fmsubadd_p)(tr,sr,ti);
Chris@82 279 }
Chris@82 280 #else /* !FFTW_SINGLE */
Chris@82 281 # define VTW1(v,x) {TW_CEXP, v, x}
Chris@82 282 static inline V BYTW1(const R *t, V sr)
Chris@82 283 {
Chris@82 284 V tx = LD(t, 1, t);
Chris@82 285 return VZMUL(tx, sr);
Chris@82 286 }
Chris@82 287 static inline V BYTWJ1(const R *t, V sr)
Chris@82 288 {
Chris@82 289 V tx = LD(t, 1, t);
Chris@82 290 return VZMULJ(tx, sr);
Chris@82 291 }
Chris@82 292 #endif
Chris@82 293 #define TWVL1 (VL)
Chris@82 294
Chris@82 295 /* twiddle storage #2: twice the space, faster (when in cache) */
Chris@82 296 #ifdef FFTW_SINGLE
Chris@82 297 # define VTW2(v,x) \
Chris@82 298 {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \
Chris@82 299 {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x}
Chris@82 300 #else /* !FFTW_SINGLE */
Chris@82 301 # define VTW2(v,x) \
Chris@82 302 {TW_COS, v, x}, {TW_COS, v, x}, {TW_SIN, v, -x}, {TW_SIN, v, x}
Chris@82 303 #endif
Chris@82 304 #define TWVL2 (2 * VL)
Chris@82 305 static inline V BYTW2(const R *t, V sr)
Chris@82 306 {
Chris@82 307 const V *twp = (const V *)t;
Chris@82 308 V si = FLIP_RI(sr);
Chris@82 309 V tr = twp[0], ti = twp[1];
Chris@82 310 return VFMA(tr, sr, VMUL(ti, si));
Chris@82 311 }
Chris@82 312 static inline V BYTWJ2(const R *t, V sr)
Chris@82 313 {
Chris@82 314 const V *twp = (const V *)t;
Chris@82 315 V si = FLIP_RI(sr);
Chris@82 316 V tr = twp[0], ti = twp[1];
Chris@82 317 return VFNMS(ti, si, VMUL(tr, sr));
Chris@82 318 }
Chris@82 319
Chris@82 320 /* twiddle storage #3 */
Chris@82 321 #ifdef FFTW_SINGLE
Chris@82 322 # define VTW3(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x}
Chris@82 323 # define TWVL3 (VL)
Chris@82 324 #else
Chris@82 325 # define VTW3(v,x) VTW1(v,x)
Chris@82 326 # define TWVL3 TWVL1
Chris@82 327 #endif
Chris@82 328
Chris@82 329 /* twiddle storage for split arrays */
Chris@82 330 #ifdef FFTW_SINGLE
Chris@82 331 # define VTWS(v,x) \
Chris@82 332 {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \
Chris@82 333 {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x}
Chris@82 334 #else
Chris@82 335 # define VTWS(v,x) \
Chris@82 336 {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_SIN, v, x}, {TW_SIN, v+1, x}
Chris@82 337 #endif
Chris@82 338 #define TWVLS (2 * VL)
Chris@82 339
Chris@82 340 #define VLEAVE() /* nothing */
Chris@82 341
Chris@82 342 #include "simd-common.h"