Chris@82: /* Chris@82: * Copyright (c) 2003, 2007-14 Matteo Frigo Chris@82: * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology Chris@82: * Chris@82: * This program is free software; you can redistribute it and/or modify Chris@82: * it under the terms of the GNU General Public License as published by Chris@82: * the Free Software Foundation; either version 2 of the License, or Chris@82: * (at your option) any later version. Chris@82: * Chris@82: * This program is distributed in the hope that it will be useful, Chris@82: * but WITHOUT ANY WARRANTY; without even the implied warranty of Chris@82: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Chris@82: * GNU General Public License for more details. Chris@82: * Chris@82: * You should have received a copy of the GNU General Public License Chris@82: * along with this program; if not, write to the Free Software Chris@82: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Chris@82: * Chris@82: */ Chris@82: Chris@82: #if defined(FFTW_LDOUBLE) || defined(FFTW_QUAD) Chris@82: #error "AVX only works in single or double precision" Chris@82: #endif Chris@82: Chris@82: #ifdef FFTW_SINGLE Chris@82: # define DS(d,s) s /* single-precision option */ Chris@82: # define SUFF(name) name ## s Chris@82: #else Chris@82: # define DS(d,s) d /* double-precision option */ Chris@82: # define SUFF(name) name ## d Chris@82: #endif Chris@82: Chris@82: #define SIMD_SUFFIX _avx /* for renaming */ Chris@82: #define VL DS(2, 4) /* SIMD complex vector length */ Chris@82: #define SIMD_VSTRIDE_OKA(x) ((x) == 2) Chris@82: #define SIMD_STRIDE_OKPAIR SIMD_STRIDE_OK Chris@82: Chris@82: #if defined(__GNUC__) && !defined(__AVX__) /* sanity check */ Chris@82: #error "compiling simd-avx.h without -mavx" Chris@82: #endif Chris@82: Chris@82: #ifdef _MSC_VER Chris@82: #ifndef inline Chris@82: #define inline __inline Chris@82: #endif Chris@82: #endif Chris@82: Chris@82: #include Chris@82: Chris@82: typedef DS(__m256d, __m256) V; Chris@82: #define VADD SUFF(_mm256_add_p) Chris@82: #define VSUB SUFF(_mm256_sub_p) Chris@82: #define VMUL SUFF(_mm256_mul_p) Chris@82: #define VXOR SUFF(_mm256_xor_p) Chris@82: #define VSHUF SUFF(_mm256_shuffle_p) Chris@82: Chris@82: #define SHUFVALD(fp0,fp1) \ Chris@82: (((fp1) << 3) | ((fp0) << 2) | ((fp1) << 1) | ((fp0))) Chris@82: #define SHUFVALS(fp0,fp1,fp2,fp3) \ Chris@82: (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0))) Chris@82: Chris@82: #define VDUPL(x) DS(_mm256_unpacklo_pd(x, x), VSHUF(x, x, SHUFVALS(0, 0, 2, 2))) Chris@82: #define VDUPH(x) DS(_mm256_unpackhi_pd(x, x), VSHUF(x, x, SHUFVALS(1, 1, 3, 3))) Chris@82: Chris@82: #define VLIT(x0, x1) DS(_mm256_set_pd(x0, x1, x0, x1), _mm256_set_ps(x0, x1, x0, x1, x0, x1, x0, x1)) Chris@82: #define DVK(var, val) V var = VLIT(val, val) Chris@82: #define LDK(x) x Chris@82: Chris@82: static inline V LDA(const R *x, INT ivs, const R *aligned_like) Chris@82: { Chris@82: (void)aligned_like; /* UNUSED */ Chris@82: (void)ivs; /* UNUSED */ Chris@82: return SUFF(_mm256_loadu_p)(x); Chris@82: } Chris@82: Chris@82: static inline void STA(R *x, V v, INT ovs, const R *aligned_like) Chris@82: { Chris@82: (void)aligned_like; /* UNUSED */ Chris@82: (void)ovs; /* UNUSED */ Chris@82: SUFF(_mm256_storeu_p)(x, v); Chris@82: } Chris@82: Chris@82: #if FFTW_SINGLE Chris@82: Chris@82: # ifdef _MSC_VER Chris@82: /* Temporarily disable the warning "uninitialized local variable Chris@82: 'name' used" and runtime checks for using a variable before it is Chris@82: defined which is erroneously triggered by the LOADL0 / LOADH macros Chris@82: as they only modify VAL partly each. */ Chris@82: # ifndef __INTEL_COMPILER Chris@82: # pragma warning(disable : 4700) Chris@82: # pragma runtime_checks("u", off) Chris@82: # endif Chris@82: # endif Chris@82: # ifdef __INTEL_COMPILER Chris@82: # pragma warning(disable : 592) Chris@82: # endif Chris@82: Chris@82: #define LOADH(addr, val) _mm_loadh_pi(val, (const __m64 *)(addr)) Chris@82: #define LOADL(addr, val) _mm_loadl_pi(val, (const __m64 *)(addr)) Chris@82: #define STOREH(addr, val) _mm_storeh_pi((__m64 *)(addr), val) Chris@82: #define STOREL(addr, val) _mm_storel_pi((__m64 *)(addr), val) Chris@82: Chris@82: /* it seems like the only AVX way to store 4 complex floats is to Chris@82: extract two pairs of complex floats into two __m128 registers, and Chris@82: then use SSE-like half-stores. Similarly, to load 4 complex Chris@82: floats, we load two pairs of complex floats into two __m128 Chris@82: registers, and then pack the two __m128 registers into one __m256 Chris@82: value. */ Chris@82: static inline V LD(const R *x, INT ivs, const R *aligned_like) Chris@82: { Chris@82: __m128 l, h; Chris@82: V v; Chris@82: (void)aligned_like; /* UNUSED */ Chris@82: l = LOADL(x, l); Chris@82: l = LOADH(x + ivs, l); Chris@82: h = LOADL(x + 2*ivs, h); Chris@82: h = LOADH(x + 3*ivs, h); Chris@82: v = _mm256_castps128_ps256(l); Chris@82: v = _mm256_insertf128_ps(v, h, 1); Chris@82: return v; Chris@82: } Chris@82: Chris@82: # ifdef _MSC_VER Chris@82: # ifndef __INTEL_COMPILER Chris@82: # pragma warning(default : 4700) Chris@82: # pragma runtime_checks("u", restore) Chris@82: # endif Chris@82: # endif Chris@82: # ifdef __INTEL_COMPILER Chris@82: # pragma warning(default : 592) Chris@82: # endif Chris@82: Chris@82: static inline void ST(R *x, V v, INT ovs, const R *aligned_like) Chris@82: { Chris@82: __m128 h = _mm256_extractf128_ps(v, 1); Chris@82: __m128 l = _mm256_castps256_ps128(v); Chris@82: (void)aligned_like; /* UNUSED */ Chris@82: /* WARNING: the extra_iter hack depends upon STOREL occurring Chris@82: after STOREH */ Chris@82: STOREH(x + 3*ovs, h); Chris@82: STOREL(x + 2*ovs, h); Chris@82: STOREH(x + ovs, l); Chris@82: STOREL(x, l); Chris@82: } Chris@82: Chris@82: #define STM2(x, v, ovs, aligned_like) /* no-op */ Chris@82: static inline void STN2(R *x, V v0, V v1, INT ovs) Chris@82: { Chris@82: V x0 = VSHUF(v0, v1, SHUFVALS(0, 1, 0, 1)); Chris@82: V x1 = VSHUF(v0, v1, SHUFVALS(2, 3, 2, 3)); Chris@82: __m128 h0 = _mm256_extractf128_ps(x0, 1); Chris@82: __m128 l0 = _mm256_castps256_ps128(x0); Chris@82: __m128 h1 = _mm256_extractf128_ps(x1, 1); Chris@82: __m128 l1 = _mm256_castps256_ps128(x1); Chris@82: Chris@82: *(__m128 *)(x + 3*ovs) = h1; Chris@82: *(__m128 *)(x + 2*ovs) = h0; Chris@82: *(__m128 *)(x + 1*ovs) = l1; Chris@82: *(__m128 *)(x + 0*ovs) = l0; Chris@82: } Chris@82: Chris@82: #define STM4(x, v, ovs, aligned_like) /* no-op */ Chris@82: #define STN4(x, v0, v1, v2, v3, ovs) \ Chris@82: { \ Chris@82: V xxx0, xxx1, xxx2, xxx3; \ Chris@82: V yyy0, yyy1, yyy2, yyy3; \ Chris@82: xxx0 = _mm256_unpacklo_ps(v0, v2); \ Chris@82: xxx1 = _mm256_unpackhi_ps(v0, v2); \ Chris@82: xxx2 = _mm256_unpacklo_ps(v1, v3); \ Chris@82: xxx3 = _mm256_unpackhi_ps(v1, v3); \ Chris@82: yyy0 = _mm256_unpacklo_ps(xxx0, xxx2); \ Chris@82: yyy1 = _mm256_unpackhi_ps(xxx0, xxx2); \ Chris@82: yyy2 = _mm256_unpacklo_ps(xxx1, xxx3); \ Chris@82: yyy3 = _mm256_unpackhi_ps(xxx1, xxx3); \ Chris@82: *(__m128 *)(x + 0 * ovs) = _mm256_castps256_ps128(yyy0); \ Chris@82: *(__m128 *)(x + 4 * ovs) = _mm256_extractf128_ps(yyy0, 1); \ Chris@82: *(__m128 *)(x + 1 * ovs) = _mm256_castps256_ps128(yyy1); \ Chris@82: *(__m128 *)(x + 5 * ovs) = _mm256_extractf128_ps(yyy1, 1); \ Chris@82: *(__m128 *)(x + 2 * ovs) = _mm256_castps256_ps128(yyy2); \ Chris@82: *(__m128 *)(x + 6 * ovs) = _mm256_extractf128_ps(yyy2, 1); \ Chris@82: *(__m128 *)(x + 3 * ovs) = _mm256_castps256_ps128(yyy3); \ Chris@82: *(__m128 *)(x + 7 * ovs) = _mm256_extractf128_ps(yyy3, 1); \ Chris@82: } Chris@82: Chris@82: #else Chris@82: static inline __m128d VMOVAPD_LD(const R *x) Chris@82: { Chris@82: /* gcc-4.6 miscompiles the combination _mm256_castpd128_pd256(VMOVAPD_LD(x)) Chris@82: into a 256-bit vmovapd, which requires 32-byte aligment instead of Chris@82: 16-byte alignment. Chris@82: Chris@82: Force the use of vmovapd via asm until compilers stabilize. Chris@82: */ Chris@82: #if defined(__GNUC__) Chris@82: __m128d var; Chris@82: __asm__("vmovapd %1, %0\n" : "=x"(var) : "m"(x[0])); Chris@82: return var; Chris@82: #else Chris@82: return *(const __m128d *)x; Chris@82: #endif Chris@82: } Chris@82: Chris@82: static inline V LD(const R *x, INT ivs, const R *aligned_like) Chris@82: { Chris@82: V var; Chris@82: (void)aligned_like; /* UNUSED */ Chris@82: var = _mm256_castpd128_pd256(VMOVAPD_LD(x)); Chris@82: var = _mm256_insertf128_pd(var, *(const __m128d *)(x+ivs), 1); Chris@82: return var; Chris@82: } Chris@82: Chris@82: static inline void ST(R *x, V v, INT ovs, const R *aligned_like) Chris@82: { Chris@82: (void)aligned_like; /* UNUSED */ Chris@82: /* WARNING: the extra_iter hack depends upon the store of the low Chris@82: part occurring after the store of the high part */ Chris@82: *(__m128d *)(x + ovs) = _mm256_extractf128_pd(v, 1); Chris@82: *(__m128d *)x = _mm256_castpd256_pd128(v); Chris@82: } Chris@82: Chris@82: Chris@82: #define STM2 ST Chris@82: #define STN2(x, v0, v1, ovs) /* nop */ Chris@82: #define STM4(x, v, ovs, aligned_like) /* no-op */ Chris@82: Chris@82: /* STN4 is a macro, not a function, thanks to Visual C++ developers Chris@82: deciding "it would be infrequent that people would want to pass more Chris@82: than 3 [__m128 parameters] by value." Even though the comment Chris@82: was made about __m128 parameters, it appears to apply to __m256 Chris@82: parameters as well. */ Chris@82: #define STN4(x, v0, v1, v2, v3, ovs) \ Chris@82: { \ Chris@82: V xxx0, xxx1, xxx2, xxx3; \ Chris@82: xxx0 = _mm256_unpacklo_pd(v0, v1); \ Chris@82: xxx1 = _mm256_unpackhi_pd(v0, v1); \ Chris@82: xxx2 = _mm256_unpacklo_pd(v2, v3); \ Chris@82: xxx3 = _mm256_unpackhi_pd(v2, v3); \ Chris@82: STA(x, _mm256_permute2f128_pd(xxx0, xxx2, 0x20), 0, 0); \ Chris@82: STA(x + ovs, _mm256_permute2f128_pd(xxx1, xxx3, 0x20), 0, 0); \ Chris@82: STA(x + 2 * ovs, _mm256_permute2f128_pd(xxx0, xxx2, 0x31), 0, 0); \ Chris@82: STA(x + 3 * ovs, _mm256_permute2f128_pd(xxx1, xxx3, 0x31), 0, 0); \ Chris@82: } Chris@82: #endif Chris@82: Chris@82: static inline V FLIP_RI(V x) Chris@82: { Chris@82: return VSHUF(x, x, Chris@82: DS(SHUFVALD(1, 0), Chris@82: SHUFVALS(1, 0, 3, 2))); Chris@82: } Chris@82: Chris@82: static inline V VCONJ(V x) Chris@82: { Chris@82: /* Produce a SIMD vector[VL] of (0 + -0i). Chris@82: Chris@82: We really want to write this: Chris@82: Chris@82: V pmpm = VLIT(-0.0, 0.0); Chris@82: Chris@82: but historically some compilers have ignored the distiction Chris@82: between +0 and -0. It looks like 'gcc-8 -fast-math' treats -0 Chris@82: as 0 too. Chris@82: */ Chris@82: union uvec { Chris@82: unsigned u[8]; Chris@82: V v; Chris@82: }; Chris@82: static const union uvec pmpm = { Chris@82: #ifdef FFTW_SINGLE Chris@82: { 0x00000000, 0x80000000, 0x00000000, 0x80000000, Chris@82: 0x00000000, 0x80000000, 0x00000000, 0x80000000 } Chris@82: #else Chris@82: { 0x00000000, 0x00000000, 0x00000000, 0x80000000, Chris@82: 0x00000000, 0x00000000, 0x00000000, 0x80000000 } Chris@82: #endif Chris@82: }; Chris@82: return VXOR(pmpm.v, x); Chris@82: } Chris@82: Chris@82: static inline V VBYI(V x) Chris@82: { Chris@82: return FLIP_RI(VCONJ(x)); Chris@82: } Chris@82: Chris@82: /* FMA support */ Chris@82: #define VFMA(a, b, c) VADD(c, VMUL(a, b)) Chris@82: #define VFNMS(a, b, c) VSUB(c, VMUL(a, b)) Chris@82: #define VFMS(a, b, c) VSUB(VMUL(a, b), c) Chris@82: #define VFMAI(b, c) VADD(c, VBYI(b)) Chris@82: #define VFNMSI(b, c) VSUB(c, VBYI(b)) Chris@82: #define VFMACONJ(b,c) VADD(VCONJ(b),c) Chris@82: #define VFMSCONJ(b,c) VSUB(VCONJ(b),c) Chris@82: #define VFNMSCONJ(b,c) VSUB(c, VCONJ(b)) Chris@82: Chris@82: static inline V VZMUL(V tx, V sr) Chris@82: { Chris@82: V tr = VDUPL(tx); Chris@82: V ti = VDUPH(tx); Chris@82: tr = VMUL(sr, tr); Chris@82: sr = VBYI(sr); Chris@82: return VFMA(ti, sr, tr); Chris@82: } Chris@82: Chris@82: static inline V VZMULJ(V tx, V sr) Chris@82: { Chris@82: V tr = VDUPL(tx); Chris@82: V ti = VDUPH(tx); Chris@82: tr = VMUL(sr, tr); Chris@82: sr = VBYI(sr); Chris@82: return VFNMS(ti, sr, tr); Chris@82: } Chris@82: Chris@82: static inline V VZMULI(V tx, V sr) Chris@82: { Chris@82: V tr = VDUPL(tx); Chris@82: V ti = VDUPH(tx); Chris@82: ti = VMUL(ti, sr); Chris@82: sr = VBYI(sr); Chris@82: return VFMS(tr, sr, ti); Chris@82: } Chris@82: Chris@82: static inline V VZMULIJ(V tx, V sr) Chris@82: { Chris@82: V tr = VDUPL(tx); Chris@82: V ti = VDUPH(tx); Chris@82: ti = VMUL(ti, sr); Chris@82: sr = VBYI(sr); Chris@82: return VFMA(tr, sr, ti); Chris@82: } Chris@82: Chris@82: /* twiddle storage #1: compact, slower */ Chris@82: #ifdef FFTW_SINGLE Chris@82: # define VTW1(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x}, {TW_CEXP, v+2, x}, {TW_CEXP, v+3, x} Chris@82: #else Chris@82: # define VTW1(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x} Chris@82: #endif Chris@82: #define TWVL1 (VL) Chris@82: Chris@82: static inline V BYTW1(const R *t, V sr) Chris@82: { Chris@82: return VZMUL(LDA(t, 2, t), sr); Chris@82: } Chris@82: Chris@82: static inline V BYTWJ1(const R *t, V sr) Chris@82: { Chris@82: return VZMULJ(LDA(t, 2, t), sr); Chris@82: } Chris@82: Chris@82: /* twiddle storage #2: twice the space, faster (when in cache) */ Chris@82: #ifdef FFTW_SINGLE Chris@82: # define VTW2(v,x) \ Chris@82: {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \ Chris@82: {TW_COS, v+2, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, {TW_COS, v+3, x}, \ Chris@82: {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x}, \ Chris@82: {TW_SIN, v+2, -x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, -x}, {TW_SIN, v+3, x} Chris@82: #else Chris@82: # define VTW2(v,x) \ Chris@82: {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \ Chris@82: {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x} Chris@82: #endif Chris@82: #define TWVL2 (2 * VL) Chris@82: Chris@82: static inline V BYTW2(const R *t, V sr) Chris@82: { Chris@82: const V *twp = (const V *)t; Chris@82: V si = FLIP_RI(sr); Chris@82: V tr = twp[0], ti = twp[1]; Chris@82: return VFMA(tr, sr, VMUL(ti, si)); Chris@82: } Chris@82: Chris@82: static inline V BYTWJ2(const R *t, V sr) Chris@82: { Chris@82: const V *twp = (const V *)t; Chris@82: V si = FLIP_RI(sr); Chris@82: V tr = twp[0], ti = twp[1]; Chris@82: return VFNMS(ti, si, VMUL(tr, sr)); Chris@82: } Chris@82: Chris@82: /* twiddle storage #3 */ Chris@82: #define VTW3 VTW1 Chris@82: #define TWVL3 TWVL1 Chris@82: Chris@82: /* twiddle storage for split arrays */ Chris@82: #ifdef FFTW_SINGLE Chris@82: # define VTWS(v,x) \ Chris@82: {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \ Chris@82: {TW_COS, v+4, x}, {TW_COS, v+5, x}, {TW_COS, v+6, x}, {TW_COS, v+7, x}, \ Chris@82: {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x}, \ Chris@82: {TW_SIN, v+4, x}, {TW_SIN, v+5, x}, {TW_SIN, v+6, x}, {TW_SIN, v+7, x} Chris@82: #else Chris@82: # define VTWS(v,x) \ Chris@82: {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \ Chris@82: {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x} Chris@82: #endif Chris@82: #define TWVLS (2 * VL) Chris@82: Chris@82: Chris@82: /* Use VZEROUPPER to avoid the penalty of switching from AVX to SSE. Chris@82: See Intel Optimization Manual (April 2011, version 248966), Section Chris@82: 11.3 */ Chris@82: #define VLEAVE _mm256_zeroupper Chris@82: Chris@82: #include "simd-common.h"