cannam@95: /* cannam@95: * Copyright (c) 2003, 2007-11 Matteo Frigo cannam@95: * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology cannam@95: * cannam@95: * This program is free software; you can redistribute it and/or modify cannam@95: * it under the terms of the GNU General Public License as published by cannam@95: * the Free Software Foundation; either version 2 of the License, or cannam@95: * (at your option) any later version. cannam@95: * cannam@95: * This program is distributed in the hope that it will be useful, cannam@95: * but WITHOUT ANY WARRANTY; without even the implied warranty of cannam@95: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the cannam@95: * GNU General Public License for more details. cannam@95: * cannam@95: * You should have received a copy of the GNU General Public License cannam@95: * along with this program; if not, write to the Free Software cannam@95: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA cannam@95: * cannam@95: */ cannam@95: cannam@95: #if defined(FFTW_LDOUBLE) || defined(FFTW_QUAD) cannam@95: #error "AVX only works in single or double precision" cannam@95: #endif cannam@95: cannam@95: #ifdef FFTW_SINGLE cannam@95: # define DS(d,s) s /* single-precision option */ cannam@95: # define SUFF(name) name ## s cannam@95: #else cannam@95: # define DS(d,s) d /* double-precision option */ cannam@95: # define SUFF(name) name ## d cannam@95: #endif cannam@95: cannam@95: #define SIMD_SUFFIX _avx /* for renaming */ cannam@95: #define VL DS(2, 4) /* SIMD complex vector length */ cannam@95: #define SIMD_VSTRIDE_OKA(x) ((x) == 2) cannam@95: #define SIMD_STRIDE_OKPAIR SIMD_STRIDE_OK cannam@95: cannam@95: #if defined(__GNUC__) && !defined(__AVX__) /* sanity check */ cannam@95: #error "compiling simd-avx.h without -mavx" cannam@95: #endif cannam@95: cannam@95: #ifdef _MSC_VER cannam@95: #ifndef inline cannam@95: #define inline __inline cannam@95: #endif cannam@95: #endif cannam@95: cannam@95: #include cannam@95: cannam@95: typedef DS(__m256d, __m256) V; cannam@95: #define VADD SUFF(_mm256_add_p) cannam@95: #define VSUB SUFF(_mm256_sub_p) cannam@95: #define VMUL SUFF(_mm256_mul_p) cannam@95: #define VXOR SUFF(_mm256_xor_p) cannam@95: #define VSHUF SUFF(_mm256_shuffle_p) cannam@95: cannam@95: #define SHUFVALD(fp0,fp1) \ cannam@95: (((fp1) << 3) | ((fp0) << 2) | ((fp1) << 1) | ((fp0))) cannam@95: #define SHUFVALS(fp0,fp1,fp2,fp3) \ cannam@95: (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0))) cannam@95: cannam@95: #define VDUPL(x) DS(_mm256_unpacklo_pd(x, x), VSHUF(x, x, SHUFVALS(0, 0, 2, 2))) cannam@95: #define VDUPH(x) DS(_mm256_unpackhi_pd(x, x), VSHUF(x, x, SHUFVALS(1, 1, 3, 3))) cannam@95: cannam@95: #define VLIT(x0, x1) DS(_mm256_set_pd(x0, x1, x0, x1), _mm256_set_ps(x0, x1, x0, x1, x0, x1, x0, x1)) cannam@95: #define DVK(var, val) V var = VLIT(val, val) cannam@95: #define LDK(x) x cannam@95: cannam@95: static inline V LDA(const R *x, INT ivs, const R *aligned_like) cannam@95: { cannam@95: (void)aligned_like; /* UNUSED */ cannam@95: (void)ivs; /* UNUSED */ cannam@95: return SUFF(_mm256_loadu_p)(x); cannam@95: } cannam@95: cannam@95: static inline void STA(R *x, V v, INT ovs, const R *aligned_like) cannam@95: { cannam@95: (void)aligned_like; /* UNUSED */ cannam@95: (void)ovs; /* UNUSED */ cannam@95: SUFF(_mm256_storeu_p)(x, v); cannam@95: } cannam@95: cannam@95: #if FFTW_SINGLE cannam@95: cannam@95: #define LOADH(addr, val) _mm_loadh_pi(val, (const __m64 *)(addr)) cannam@95: #define LOADL(addr, val) _mm_loadl_pi(val, (const __m64 *)(addr)) cannam@95: #define STOREH(addr, val) _mm_storeh_pi((__m64 *)(addr), val) cannam@95: #define STOREL(addr, val) _mm_storel_pi((__m64 *)(addr), val) cannam@95: cannam@95: /* it seems like the only AVX way to store 4 complex floats is to cannam@95: extract two pairs of complex floats into two __m128 registers, and cannam@95: then use SSE-like half-stores. Similarly, to load 4 complex cannam@95: floats, we load two pairs of complex floats into two __m128 cannam@95: registers, and then pack the two __m128 registers into one __m256 cannam@95: value. */ cannam@95: static inline V LD(const R *x, INT ivs, const R *aligned_like) cannam@95: { cannam@95: __m128 l, h; cannam@95: V v; cannam@95: (void)aligned_like; /* UNUSED */ cannam@95: l = LOADL(x, l); cannam@95: l = LOADH(x + ivs, l); cannam@95: h = LOADL(x + 2*ivs, h); cannam@95: h = LOADH(x + 3*ivs, h); cannam@95: v = _mm256_castps128_ps256(l); cannam@95: v = _mm256_insertf128_ps(v, h, 1); cannam@95: return v; cannam@95: } cannam@95: cannam@95: static inline void ST(R *x, V v, INT ovs, const R *aligned_like) cannam@95: { cannam@95: __m128 h = _mm256_extractf128_ps(v, 1); cannam@95: __m128 l = _mm256_castps256_ps128(v); cannam@95: (void)aligned_like; /* UNUSED */ cannam@95: /* WARNING: the extra_iter hack depends upon STOREL occurring cannam@95: after STOREH */ cannam@95: STOREH(x + 3*ovs, h); cannam@95: STOREL(x + 2*ovs, h); cannam@95: STOREH(x + ovs, l); cannam@95: STOREL(x, l); cannam@95: } cannam@95: cannam@95: #define STM2(x, v, ovs, aligned_like) /* no-op */ cannam@95: static inline void STN2(R *x, V v0, V v1, INT ovs) cannam@95: { cannam@95: V x0 = VSHUF(v0, v1, SHUFVALS(0, 1, 0, 1)); cannam@95: V x1 = VSHUF(v0, v1, SHUFVALS(2, 3, 2, 3)); cannam@95: __m128 h0 = _mm256_extractf128_ps(x0, 1); cannam@95: __m128 l0 = _mm256_castps256_ps128(x0); cannam@95: __m128 h1 = _mm256_extractf128_ps(x1, 1); cannam@95: __m128 l1 = _mm256_castps256_ps128(x1); cannam@95: *(__m128 *)(x + 3*ovs) = h1; cannam@95: *(__m128 *)(x + 2*ovs) = h0; cannam@95: *(__m128 *)(x + 1*ovs) = l1; cannam@95: *(__m128 *)(x + 0*ovs) = l0; cannam@95: } cannam@95: cannam@95: #define STM4(x, v, ovs, aligned_like) /* no-op */ cannam@95: #define STN4(x, v0, v1, v2, v3, ovs) \ cannam@95: { \ cannam@95: V xxx0, xxx1, xxx2, xxx3; \ cannam@95: V yyy0, yyy1, yyy2, yyy3; \ cannam@95: xxx0 = _mm256_unpacklo_ps(v0, v2); \ cannam@95: xxx1 = _mm256_unpackhi_ps(v0, v2); \ cannam@95: xxx2 = _mm256_unpacklo_ps(v1, v3); \ cannam@95: xxx3 = _mm256_unpackhi_ps(v1, v3); \ cannam@95: yyy0 = _mm256_unpacklo_ps(xxx0, xxx2); \ cannam@95: yyy1 = _mm256_unpackhi_ps(xxx0, xxx2); \ cannam@95: yyy2 = _mm256_unpacklo_ps(xxx1, xxx3); \ cannam@95: yyy3 = _mm256_unpackhi_ps(xxx1, xxx3); \ cannam@95: *(__m128 *)(x + 0 * ovs) = _mm256_castps256_ps128(yyy0); \ cannam@95: *(__m128 *)(x + 4 * ovs) = _mm256_extractf128_ps(yyy0, 1); \ cannam@95: *(__m128 *)(x + 1 * ovs) = _mm256_castps256_ps128(yyy1); \ cannam@95: *(__m128 *)(x + 5 * ovs) = _mm256_extractf128_ps(yyy1, 1); \ cannam@95: *(__m128 *)(x + 2 * ovs) = _mm256_castps256_ps128(yyy2); \ cannam@95: *(__m128 *)(x + 6 * ovs) = _mm256_extractf128_ps(yyy2, 1); \ cannam@95: *(__m128 *)(x + 3 * ovs) = _mm256_castps256_ps128(yyy3); \ cannam@95: *(__m128 *)(x + 7 * ovs) = _mm256_extractf128_ps(yyy3, 1); \ cannam@95: } cannam@95: cannam@95: #else cannam@95: static inline __m128d VMOVAPD_LD(const R *x) cannam@95: { cannam@95: /* gcc-4.6 miscompiles the combination _mm256_castpd128_pd256(VMOVAPD_LD(x)) cannam@95: into a 256-bit vmovapd, which requires 32-byte aligment instead of cannam@95: 16-byte alignment. cannam@95: cannam@95: Force the use of vmovapd via asm until compilers stabilize. cannam@95: */ cannam@95: #if defined(__GNUC__) cannam@95: __m128d var; cannam@95: __asm__("vmovapd %1, %0\n" : "=x"(var) : "m"(x[0])); cannam@95: return var; cannam@95: #else cannam@95: return *(const __m128d *)x; cannam@95: #endif cannam@95: } cannam@95: cannam@95: static inline V LD(const R *x, INT ivs, const R *aligned_like) cannam@95: { cannam@95: V var; cannam@95: (void)aligned_like; /* UNUSED */ cannam@95: var = _mm256_castpd128_pd256(VMOVAPD_LD(x)); cannam@95: var = _mm256_insertf128_pd(var, *(const __m128d *)(x+ivs), 1); cannam@95: return var; cannam@95: } cannam@95: cannam@95: static inline void ST(R *x, V v, INT ovs, const R *aligned_like) cannam@95: { cannam@95: (void)aligned_like; /* UNUSED */ cannam@95: /* WARNING: the extra_iter hack depends upon the store of the low cannam@95: part occurring after the store of the high part */ cannam@95: *(__m128d *)(x + ovs) = _mm256_extractf128_pd(v, 1); cannam@95: *(__m128d *)x = _mm256_castpd256_pd128(v); cannam@95: } cannam@95: cannam@95: cannam@95: #define STM2 ST cannam@95: #define STN2(x, v0, v1, ovs) /* nop */ cannam@95: #define STM4(x, v, ovs, aligned_like) /* no-op */ cannam@95: cannam@95: /* STN4 is a macro, not a function, thanks to Visual C++ developers cannam@95: deciding "it would be infrequent that people would want to pass more cannam@95: than 3 [__m128 parameters] by value." Even though the comment cannam@95: was made about __m128 parameters, it appears to apply to __m256 cannam@95: parameters as well. */ cannam@95: #define STN4(x, v0, v1, v2, v3, ovs) \ cannam@95: { \ cannam@95: V xxx0, xxx1, xxx2, xxx3; \ cannam@95: xxx0 = _mm256_unpacklo_pd(v0, v1); \ cannam@95: xxx1 = _mm256_unpackhi_pd(v0, v1); \ cannam@95: xxx2 = _mm256_unpacklo_pd(v2, v3); \ cannam@95: xxx3 = _mm256_unpackhi_pd(v2, v3); \ cannam@95: STA(x, _mm256_permute2f128_pd(xxx0, xxx2, 0x20), 0, 0); \ cannam@95: STA(x + ovs, _mm256_permute2f128_pd(xxx1, xxx3, 0x20), 0, 0); \ cannam@95: STA(x + 2 * ovs, _mm256_permute2f128_pd(xxx0, xxx2, 0x31), 0, 0); \ cannam@95: STA(x + 3 * ovs, _mm256_permute2f128_pd(xxx1, xxx3, 0x31), 0, 0); \ cannam@95: } cannam@95: #endif cannam@95: cannam@95: static inline V FLIP_RI(V x) cannam@95: { cannam@95: return VSHUF(x, x, cannam@95: DS(SHUFVALD(1, 0), cannam@95: SHUFVALS(1, 0, 3, 2))); cannam@95: } cannam@95: cannam@95: static inline V VCONJ(V x) cannam@95: { cannam@95: V pmpm = VLIT(-0.0, 0.0); cannam@95: return VXOR(pmpm, x); cannam@95: } cannam@95: cannam@95: static inline V VBYI(V x) cannam@95: { cannam@95: return FLIP_RI(VCONJ(x)); cannam@95: } cannam@95: cannam@95: /* FMA support */ cannam@95: #define VFMA(a, b, c) VADD(c, VMUL(a, b)) cannam@95: #define VFNMS(a, b, c) VSUB(c, VMUL(a, b)) cannam@95: #define VFMS(a, b, c) VSUB(VMUL(a, b), c) cannam@95: #define VFMAI(b, c) VADD(c, VBYI(b)) cannam@95: #define VFNMSI(b, c) VSUB(c, VBYI(b)) cannam@95: #define VFMACONJ(b,c) VADD(VCONJ(b),c) cannam@95: #define VFMSCONJ(b,c) VSUB(VCONJ(b),c) cannam@95: #define VFNMSCONJ(b,c) VSUB(c, VCONJ(b)) cannam@95: cannam@95: static inline V VZMUL(V tx, V sr) cannam@95: { cannam@95: V tr = VDUPL(tx); cannam@95: V ti = VDUPH(tx); cannam@95: tr = VMUL(sr, tr); cannam@95: sr = VBYI(sr); cannam@95: return VFMA(ti, sr, tr); cannam@95: } cannam@95: cannam@95: static inline V VZMULJ(V tx, V sr) cannam@95: { cannam@95: V tr = VDUPL(tx); cannam@95: V ti = VDUPH(tx); cannam@95: tr = VMUL(sr, tr); cannam@95: sr = VBYI(sr); cannam@95: return VFNMS(ti, sr, tr); cannam@95: } cannam@95: cannam@95: static inline V VZMULI(V tx, V sr) cannam@95: { cannam@95: V tr = VDUPL(tx); cannam@95: V ti = VDUPH(tx); cannam@95: ti = VMUL(ti, sr); cannam@95: sr = VBYI(sr); cannam@95: return VFMS(tr, sr, ti); cannam@95: } cannam@95: cannam@95: static inline V VZMULIJ(V tx, V sr) cannam@95: { cannam@95: V tr = VDUPL(tx); cannam@95: V ti = VDUPH(tx); cannam@95: ti = VMUL(ti, sr); cannam@95: sr = VBYI(sr); cannam@95: return VFMA(tr, sr, ti); cannam@95: } cannam@95: cannam@95: /* twiddle storage #1: compact, slower */ cannam@95: #ifdef FFTW_SINGLE cannam@95: # define VTW1(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x}, {TW_CEXP, v+2, x}, {TW_CEXP, v+3, x} cannam@95: #else cannam@95: # define VTW1(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x} cannam@95: #endif cannam@95: #define TWVL1 (VL) cannam@95: cannam@95: static inline V BYTW1(const R *t, V sr) cannam@95: { cannam@95: return VZMUL(LDA(t, 2, t), sr); cannam@95: } cannam@95: cannam@95: static inline V BYTWJ1(const R *t, V sr) cannam@95: { cannam@95: return VZMULJ(LDA(t, 2, t), sr); cannam@95: } cannam@95: cannam@95: /* twiddle storage #2: twice the space, faster (when in cache) */ cannam@95: #ifdef FFTW_SINGLE cannam@95: # define VTW2(v,x) \ cannam@95: {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \ cannam@95: {TW_COS, v+2, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, {TW_COS, v+3, x}, \ cannam@95: {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x}, \ cannam@95: {TW_SIN, v+2, -x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, -x}, {TW_SIN, v+3, x} cannam@95: #else cannam@95: # define VTW2(v,x) \ cannam@95: {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \ cannam@95: {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x} cannam@95: #endif cannam@95: #define TWVL2 (2 * VL) cannam@95: cannam@95: static inline V BYTW2(const R *t, V sr) cannam@95: { cannam@95: const V *twp = (const V *)t; cannam@95: V si = FLIP_RI(sr); cannam@95: V tr = twp[0], ti = twp[1]; cannam@95: return VFMA(tr, sr, VMUL(ti, si)); cannam@95: } cannam@95: cannam@95: static inline V BYTWJ2(const R *t, V sr) cannam@95: { cannam@95: const V *twp = (const V *)t; cannam@95: V si = FLIP_RI(sr); cannam@95: V tr = twp[0], ti = twp[1]; cannam@95: return VFNMS(ti, si, VMUL(tr, sr)); cannam@95: } cannam@95: cannam@95: /* twiddle storage #3 */ cannam@95: #define VTW3 VTW1 cannam@95: #define TWVL3 TWVL1 cannam@95: cannam@95: /* twiddle storage for split arrays */ cannam@95: #ifdef FFTW_SINGLE cannam@95: # define VTWS(v,x) \ cannam@95: {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \ cannam@95: {TW_COS, v+4, x}, {TW_COS, v+5, x}, {TW_COS, v+6, x}, {TW_COS, v+7, x}, \ cannam@95: {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x}, \ cannam@95: {TW_SIN, v+4, x}, {TW_SIN, v+5, x}, {TW_SIN, v+6, x}, {TW_SIN, v+7, x} cannam@95: #else cannam@95: # define VTWS(v,x) \ cannam@95: {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \ cannam@95: {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x} cannam@95: #endif cannam@95: #define TWVLS (2 * VL) cannam@95: cannam@95: cannam@95: /* Use VZEROUPPER to avoid the penalty of switching from AVX to SSE. cannam@95: See Intel Optimization Manual (April 2011, version 248966), Section cannam@95: 11.3 */ cannam@95: #define VLEAVE _mm256_zeroupper cannam@95: cannam@95: #include "simd-common.h"