annotate src/fftw-3.3.8/simd-support/simd-avx-128-fma.h @ 84:08ae793730bd

Add null config files
author Chris Cannam
date Mon, 02 Mar 2020 14:03:47 +0000
parents d0c2a83c1364
children
rev   line source
Chris@82 1 /*
Chris@82 2 * Copyright (c) 2003, 2007-14 Matteo Frigo
Chris@82 3 * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
Chris@82 4 *
Chris@82 5 * 128-bit AVX support by Erik Lindahl, 2015.
Chris@82 6 * Erik Lindahl hereby places his modifications in the public domain.
Chris@82 7 *
Chris@82 8 * This program is free software; you can redistribute it and/or modify
Chris@82 9 * it under the terms of the GNU General Public License as published by
Chris@82 10 * the Free Software Foundation; either version 2 of the License, or
Chris@82 11 * (at your option) any later version.
Chris@82 12 *
Chris@82 13 * This program is distributed in the hope that it will be useful,
Chris@82 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@82 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@82 16 * GNU General Public License for more details.
Chris@82 17 *
Chris@82 18 * You should have received a copy of the GNU General Public License
Chris@82 19 * along with this program; if not, write to the Free Software
Chris@82 20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@82 21 *
Chris@82 22 */
Chris@82 23
Chris@82 24 #if defined(FFTW_LDOUBLE) || defined(FFTW_QUAD)
Chris@82 25 #error "AVX only works in single or double precision"
Chris@82 26 #endif
Chris@82 27
Chris@82 28 #ifdef FFTW_SINGLE
Chris@82 29 # define DS(d,s) s /* single-precision option */
Chris@82 30 # define SUFF(name) name ## s
Chris@82 31 #else
Chris@82 32 # define DS(d,s) d /* double-precision option */
Chris@82 33 # define SUFF(name) name ## d
Chris@82 34 #endif
Chris@82 35
Chris@82 36 #define SIMD_SUFFIX _avx_128_fma /* for renaming */
Chris@82 37 #define VL DS(1,2) /* SIMD vector length, in term of complex numbers */
Chris@82 38 #define SIMD_VSTRIDE_OKA(x) DS(1,((x) == 2))
Chris@82 39 #define SIMD_STRIDE_OKPAIR SIMD_STRIDE_OK
Chris@82 40
Chris@82 41 #ifdef _MSC_VER
Chris@82 42 #ifndef inline
Chris@82 43 #define inline __inline
Chris@82 44 #endif
Chris@82 45 #endif
Chris@82 46
Chris@82 47 #include <immintrin.h>
Chris@82 48 #ifdef _MSC_VER
Chris@82 49 # include <intrin.h>
Chris@82 50 #elif defined (__GNUC__)
Chris@82 51 # include <x86intrin.h>
Chris@82 52 #endif
Chris@82 53
Chris@82 54 #if !(defined(__AVX__) && defined(__FMA4__)) /* sanity check */
Chris@82 55 #error "compiling simd-avx-128-fma.h without -mavx or -mfma4"
Chris@82 56 #endif
Chris@82 57
Chris@82 58 typedef DS(__m128d,__m128) V;
Chris@82 59 #define VADD SUFF(_mm_add_p)
Chris@82 60 #define VSUB SUFF(_mm_sub_p)
Chris@82 61 #define VMUL SUFF(_mm_mul_p)
Chris@82 62 #define VXOR SUFF(_mm_xor_p)
Chris@82 63 #define SHUF SUFF(_mm_shuffle_p)
Chris@82 64 #define VPERM1 SUFF(_mm_permute_p)
Chris@82 65 #define UNPCKL SUFF(_mm_unpacklo_p)
Chris@82 66 #define UNPCKH SUFF(_mm_unpackhi_p)
Chris@82 67
Chris@82 68 #define SHUFVALS(fp0,fp1,fp2,fp3) \
Chris@82 69 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
Chris@82 70
Chris@82 71 #define VDUPL(x) DS(_mm_permute_pd(x,0), _mm_moveldup_ps(x))
Chris@82 72 #define VDUPH(x) DS(_mm_permute_pd(x,3), _mm_movehdup_ps(x))
Chris@82 73 #define LOADH(addr, val) _mm_loadh_pi(val, (const __m64 *)(addr))
Chris@82 74 #define LOADL(addr, val) _mm_loadl_pi(val, (const __m64 *)(addr))
Chris@82 75 #define STOREH(a, v) DS(_mm_storeh_pd(a, v), _mm_storeh_pi((__m64 *)(a), v))
Chris@82 76 #define STOREL(a, v) DS(_mm_storel_pd(a, v), _mm_storel_pi((__m64 *)(a), v))
Chris@82 77
Chris@82 78 #define VLIT(x0, x1) DS(_mm_set_pd(x0, x1), _mm_set_ps(x0, x1, x0, x1))
Chris@82 79 #define DVK(var, val) V var = VLIT(val, val)
Chris@82 80 #define LDK(x) x
Chris@82 81
Chris@82 82 static inline V LDA(const R *x, INT ivs, const R *aligned_like)
Chris@82 83 {
Chris@82 84 (void)aligned_like; /* UNUSED */
Chris@82 85 (void)ivs; /* UNUSED */
Chris@82 86 return *(const V *)x;
Chris@82 87 }
Chris@82 88
Chris@82 89 static inline void STA(R *x, V v, INT ovs, const R *aligned_like)
Chris@82 90 {
Chris@82 91 (void)aligned_like; /* UNUSED */
Chris@82 92 (void)ovs; /* UNUSED */
Chris@82 93 *(V *)x = v;
Chris@82 94 }
Chris@82 95
Chris@82 96 #ifdef FFTW_SINGLE
Chris@82 97
Chris@82 98 static inline V LD(const R *x, INT ivs, const R *aligned_like)
Chris@82 99 {
Chris@82 100 V var;
Chris@82 101 #if defined(__ICC) || (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ > 8)
Chris@82 102 var = LOADL(x, SUFF(_mm_undefined_p)());
Chris@82 103 var = LOADH(x + ivs, var);
Chris@82 104 #else
Chris@82 105 var = LOADL(x, var);
Chris@82 106 var = LOADH(x + ivs, var);
Chris@82 107 #endif
Chris@82 108 return var;
Chris@82 109 }
Chris@82 110
Chris@82 111 # ifdef _MSC_VER
Chris@82 112 # pragma warning(default : 4700)
Chris@82 113 # pragma runtime_checks("u", restore)
Chris@82 114 # endif
Chris@82 115
Chris@82 116 static inline void ST(R *x, V v, INT ovs, const R *aligned_like)
Chris@82 117 {
Chris@82 118 (void)aligned_like; /* UNUSED */
Chris@82 119 /* WARNING: the extra_iter hack depends upon STOREL occurring
Chris@82 120 after STOREH */
Chris@82 121 STOREH(x + ovs, v);
Chris@82 122 STOREL(x, v);
Chris@82 123 }
Chris@82 124
Chris@82 125 #else /* ! FFTW_SINGLE */
Chris@82 126 # define LD LDA
Chris@82 127 # define ST STA
Chris@82 128 #endif
Chris@82 129
Chris@82 130 #define STM2 DS(STA,ST)
Chris@82 131 #define STN2(x, v0, v1, ovs) /* nop */
Chris@82 132
Chris@82 133 #ifdef FFTW_SINGLE
Chris@82 134 # define STM4(x, v, ovs, aligned_like) /* no-op */
Chris@82 135 /* STN4 is a macro, not a function, thanks to Visual C++ developers
Chris@82 136 deciding "it would be infrequent that people would want to pass more
Chris@82 137 than 3 [__m128 parameters] by value." 3 parameters ought to be enough
Chris@82 138 for anybody. */
Chris@82 139 # define STN4(x, v0, v1, v2, v3, ovs) \
Chris@82 140 { \
Chris@82 141 V xxx0, xxx1, xxx2, xxx3; \
Chris@82 142 xxx0 = UNPCKL(v0, v2); \
Chris@82 143 xxx1 = UNPCKH(v0, v2); \
Chris@82 144 xxx2 = UNPCKL(v1, v3); \
Chris@82 145 xxx3 = UNPCKH(v1, v3); \
Chris@82 146 STA(x, UNPCKL(xxx0, xxx2), 0, 0); \
Chris@82 147 STA(x + ovs, UNPCKH(xxx0, xxx2), 0, 0); \
Chris@82 148 STA(x + 2 * ovs, UNPCKL(xxx1, xxx3), 0, 0); \
Chris@82 149 STA(x + 3 * ovs, UNPCKH(xxx1, xxx3), 0, 0); \
Chris@82 150 }
Chris@82 151 #else /* !FFTW_SINGLE */
Chris@82 152 static inline void STM4(R *x, V v, INT ovs, const R *aligned_like)
Chris@82 153 {
Chris@82 154 (void)aligned_like; /* UNUSED */
Chris@82 155 STOREL(x, v);
Chris@82 156 STOREH(x + ovs, v);
Chris@82 157 }
Chris@82 158 # define STN4(x, v0, v1, v2, v3, ovs) /* nothing */
Chris@82 159 #endif
Chris@82 160
Chris@82 161 static inline V FLIP_RI(V x)
Chris@82 162 {
Chris@82 163 return VPERM1(x, DS(1, SHUFVALS(1, 0, 3, 2)));
Chris@82 164 }
Chris@82 165
Chris@82 166
Chris@82 167 static inline V VCONJ(V x)
Chris@82 168 {
Chris@82 169 /* Produce a SIMD vector[VL] of (0 + -0i).
Chris@82 170
Chris@82 171 We really want to write this:
Chris@82 172
Chris@82 173 V pmpm = VLIT(-0.0, 0.0);
Chris@82 174
Chris@82 175 but historically some compilers have ignored the distiction
Chris@82 176 between +0 and -0. It looks like 'gcc-8 -fast-math' treats -0
Chris@82 177 as 0 too.
Chris@82 178 */
Chris@82 179 union uvec {
Chris@82 180 unsigned u[4];
Chris@82 181 V v;
Chris@82 182 };
Chris@82 183 static const union uvec pmpm = {
Chris@82 184 #ifdef FFTW_SINGLE
Chris@82 185 { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }
Chris@82 186 #else
Chris@82 187 { 0x00000000, 0x00000000, 0x00000000, 0x80000000 }
Chris@82 188 #endif
Chris@82 189 };
Chris@82 190 return VXOR(pmpm.v, x);
Chris@82 191 }
Chris@82 192
Chris@82 193 static inline V VBYI(V x)
Chris@82 194 {
Chris@82 195 x = VCONJ(x);
Chris@82 196 x = FLIP_RI(x);
Chris@82 197 return x;
Chris@82 198 }
Chris@82 199
Chris@82 200 /* FMA support */
Chris@82 201 #define VFMA(a, b, c) SUFF(_mm_macc_p)(a,b,c)
Chris@82 202 #define VFNMS(a, b, c) SUFF(_mm_nmacc_p)(a,b,c)
Chris@82 203 #define VFMS(a, b, c) SUFF(_mm_msub_p)(a,b,c)
Chris@82 204 #define VFMAI(b, c) SUFF(_mm_addsub_p)(c,FLIP_RI(b))
Chris@82 205 #define VFNMSI(b, c) VSUB(c, VBYI(b))
Chris@82 206 #define VFMACONJ(b,c) VADD(VCONJ(b),c)
Chris@82 207 #define VFMSCONJ(b,c) VSUB(VCONJ(b),c)
Chris@82 208 #define VFNMSCONJ(b,c) SUFF(_mm_addsub_p)(c,b)
Chris@82 209
Chris@82 210 static inline V VZMUL(V tx, V sr)
Chris@82 211 {
Chris@82 212 V tr = VDUPL(tx);
Chris@82 213 V ti = VDUPH(tx);
Chris@82 214 tr = VMUL(tr, sr);
Chris@82 215 ti = VMUL(ti, FLIP_RI(sr));
Chris@82 216 return SUFF(_mm_addsub_p)(tr,ti);
Chris@82 217 }
Chris@82 218
Chris@82 219 static inline V VZMULJ(V tx, V sr)
Chris@82 220 {
Chris@82 221 V tr = VDUPL(tx);
Chris@82 222 V ti = VDUPH(tx);
Chris@82 223 tr = VMUL(tr, sr);
Chris@82 224 sr = VBYI(sr);
Chris@82 225 return VFNMS(ti, sr, tr);
Chris@82 226 }
Chris@82 227
Chris@82 228 static inline V VZMULI(V tx, V sr)
Chris@82 229 {
Chris@82 230 V tr = VDUPL(tx);
Chris@82 231 V ti = VDUPH(tx);
Chris@82 232 ti = VMUL(ti, sr);
Chris@82 233 sr = VBYI(sr);
Chris@82 234 return VFMS(tr, sr, ti);
Chris@82 235 }
Chris@82 236
Chris@82 237 static inline V VZMULIJ(V tx, V sr)
Chris@82 238 {
Chris@82 239 V tr = VDUPL(tx);
Chris@82 240 V ti = VDUPH(tx);
Chris@82 241 ti = VMUL(ti, sr);
Chris@82 242 tr = VMUL(tr, FLIP_RI(sr));
Chris@82 243 return SUFF(_mm_addsub_p)(ti,tr);
Chris@82 244 }
Chris@82 245
Chris@82 246 /* twiddle storage #1: compact, slower */
Chris@82 247 #ifdef FFTW_SINGLE
Chris@82 248 # define VTW1(v,x) \
Chris@82 249 {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_SIN, v, x}, {TW_SIN, v+1, x}
Chris@82 250 static inline V BYTW1(const R *t, V sr)
Chris@82 251 {
Chris@82 252 const V *twp = (const V *)t;
Chris@82 253 V tx = twp[0];
Chris@82 254 V tr = UNPCKL(tx, tx);
Chris@82 255 V ti = UNPCKH(tx, tx);
Chris@82 256 tr = VMUL(tr, sr);
Chris@82 257 ti = VMUL(ti, FLIP_RI(sr));
Chris@82 258 return SUFF(_mm_addsub_p)(tr,ti);
Chris@82 259 }
Chris@82 260 static inline V BYTWJ1(const R *t, V sr)
Chris@82 261 {
Chris@82 262 const V *twp = (const V *)t;
Chris@82 263 V tx = twp[0];
Chris@82 264 V tr = UNPCKL(tx, tx);
Chris@82 265 V ti = UNPCKH(tx, tx);
Chris@82 266 tr = VMUL(tr, sr);
Chris@82 267 sr = VBYI(sr);
Chris@82 268 return VFNMS(ti, sr, tr);
Chris@82 269 }
Chris@82 270 #else /* !FFTW_SINGLE */
Chris@82 271 # define VTW1(v,x) {TW_CEXP, v, x}
Chris@82 272 static inline V BYTW1(const R *t, V sr)
Chris@82 273 {
Chris@82 274 V tx = LD(t, 1, t);
Chris@82 275 return VZMUL(tx, sr);
Chris@82 276 }
Chris@82 277 static inline V BYTWJ1(const R *t, V sr)
Chris@82 278 {
Chris@82 279 V tx = LD(t, 1, t);
Chris@82 280 return VZMULJ(tx, sr);
Chris@82 281 }
Chris@82 282 #endif
Chris@82 283 #define TWVL1 (VL)
Chris@82 284
Chris@82 285 /* twiddle storage #2: twice the space, faster (when in cache) */
Chris@82 286 #ifdef FFTW_SINGLE
Chris@82 287 # define VTW2(v,x) \
Chris@82 288 {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \
Chris@82 289 {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x}
Chris@82 290 #else /* !FFTW_SINGLE */
Chris@82 291 # define VTW2(v,x) \
Chris@82 292 {TW_COS, v, x}, {TW_COS, v, x}, {TW_SIN, v, -x}, {TW_SIN, v, x}
Chris@82 293 #endif
Chris@82 294 #define TWVL2 (2 * VL)
Chris@82 295 static inline V BYTW2(const R *t, V sr)
Chris@82 296 {
Chris@82 297 const V *twp = (const V *)t;
Chris@82 298 V si = FLIP_RI(sr);
Chris@82 299 V tr = twp[0], ti = twp[1];
Chris@82 300 return VFMA(tr, sr, VMUL(ti, si));
Chris@82 301 }
Chris@82 302 static inline V BYTWJ2(const R *t, V sr)
Chris@82 303 {
Chris@82 304 const V *twp = (const V *)t;
Chris@82 305 V si = FLIP_RI(sr);
Chris@82 306 V tr = twp[0], ti = twp[1];
Chris@82 307 return VFNMS(ti, si, VMUL(tr, sr));
Chris@82 308 }
Chris@82 309
Chris@82 310 /* twiddle storage #3 */
Chris@82 311 #ifdef FFTW_SINGLE
Chris@82 312 # define VTW3(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x}
Chris@82 313 # define TWVL3 (VL)
Chris@82 314 #else
Chris@82 315 # define VTW3(v,x) VTW1(v,x)
Chris@82 316 # define TWVL3 TWVL1
Chris@82 317 #endif
Chris@82 318
Chris@82 319 /* twiddle storage for split arrays */
Chris@82 320 #ifdef FFTW_SINGLE
Chris@82 321 # define VTWS(v,x) \
Chris@82 322 {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \
Chris@82 323 {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x}
Chris@82 324 #else
Chris@82 325 # define VTWS(v,x) \
Chris@82 326 {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_SIN, v, x}, {TW_SIN, v+1, x}
Chris@82 327 #endif
Chris@82 328 #define TWVLS (2 * VL)
Chris@82 329
Chris@82 330 #define VLEAVE() /* nothing */
Chris@82 331
Chris@82 332 #include "simd-common.h"