Chris@82: /* Chris@82: * Copyright (c) 2003, 2007-14 Matteo Frigo Chris@82: * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology Chris@82: * Chris@82: * This program is free software; you can redistribute it and/or modify Chris@82: * it under the terms of the GNU General Public License as published by Chris@82: * the Free Software Foundation; either version 2 of the License, or Chris@82: * (at your option) any later version. Chris@82: * Chris@82: * This program is distributed in the hope that it will be useful, Chris@82: * but WITHOUT ANY WARRANTY; without even the implied warranty of Chris@82: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Chris@82: * GNU General Public License for more details. Chris@82: * Chris@82: * You should have received a copy of the GNU General Public License Chris@82: * along with this program; if not, write to the Free Software Chris@82: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Chris@82: * Chris@82: */ Chris@82: Chris@82: #if defined(FFTW_LDOUBLE) || defined(FFTW_QUAD) Chris@82: # error "SSE/SSE2 only works in single/double precision" Chris@82: #endif Chris@82: Chris@82: #ifdef FFTW_SINGLE Chris@82: # define DS(d,s) s /* single-precision option */ Chris@82: # define SUFF(name) name ## s Chris@82: #else Chris@82: # define DS(d,s) d /* double-precision option */ Chris@82: # define SUFF(name) name ## d Chris@82: #endif Chris@82: Chris@82: #define SIMD_SUFFIX _sse2 /* for renaming */ Chris@82: #define VL DS(1,2) /* SIMD vector length, in term of complex numbers */ Chris@82: #define SIMD_VSTRIDE_OKA(x) DS(1,((x) == 2)) Chris@82: #define SIMD_STRIDE_OKPAIR SIMD_STRIDE_OK Chris@82: Chris@82: #if defined(__GNUC__) && !defined(FFTW_SINGLE) && !defined(__SSE2__) Chris@82: # error "compiling simd-sse2.h in double precision without -msse2" Chris@82: #elif defined(__GNUC__) && defined(FFTW_SINGLE) && !defined(__SSE__) Chris@82: # error "compiling simd-sse2.h in single precision without -msse" Chris@82: #endif Chris@82: Chris@82: #ifdef _MSC_VER Chris@82: #ifndef inline Chris@82: #define inline __inline Chris@82: #endif Chris@82: #endif Chris@82: Chris@82: /* some versions of glibc's sys/cdefs.h define __inline to be empty, Chris@82: which is wrong because emmintrin.h defines several inline Chris@82: procedures */ Chris@82: #ifndef _MSC_VER Chris@82: #undef __inline Chris@82: #endif Chris@82: Chris@82: #ifdef FFTW_SINGLE Chris@82: # include Chris@82: #else Chris@82: # include Chris@82: #endif Chris@82: Chris@82: typedef DS(__m128d,__m128) V; Chris@82: #define VADD SUFF(_mm_add_p) Chris@82: #define VSUB SUFF(_mm_sub_p) Chris@82: #define VMUL SUFF(_mm_mul_p) Chris@82: #define VXOR SUFF(_mm_xor_p) Chris@82: #define SHUF SUFF(_mm_shuffle_p) Chris@82: #define UNPCKL SUFF(_mm_unpacklo_p) Chris@82: #define UNPCKH SUFF(_mm_unpackhi_p) Chris@82: Chris@82: #define SHUFVALS(fp0,fp1,fp2,fp3) \ Chris@82: (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0))) Chris@82: Chris@82: #define VDUPL(x) DS(UNPCKL(x, x), SHUF(x, x, SHUFVALS(0, 0, 2, 2))) Chris@82: #define VDUPH(x) DS(UNPCKH(x, x), SHUF(x, x, SHUFVALS(1, 1, 3, 3))) Chris@82: #define STOREH(a, v) DS(_mm_storeh_pd(a, v), _mm_storeh_pi((__m64 *)(a), v)) Chris@82: #define STOREL(a, v) DS(_mm_storel_pd(a, v), _mm_storel_pi((__m64 *)(a), v)) Chris@82: Chris@82: Chris@82: #ifdef __GNUC__ Chris@82: /* Chris@82: * gcc-3.3 generates slow code for mm_set_ps (write all elements to Chris@82: * the stack and load __m128 from the stack). Chris@82: * Chris@82: * gcc-3.[34] generates slow code for mm_set_ps1 (load into low element Chris@82: * and shuffle). Chris@82: * Chris@82: * This hack forces gcc to generate a constant __m128 at compile time. Chris@82: */ Chris@82: union rvec { Chris@82: R r[DS(2,4)]; Chris@82: V v; Chris@82: }; Chris@82: Chris@82: # ifdef FFTW_SINGLE Chris@82: # define DVK(var, val) V var = __extension__ ({ \ Chris@82: static const union rvec _var = { {val,val,val,val} }; _var.v; }) Chris@82: # else Chris@82: # define DVK(var, val) V var = __extension__ ({ \ Chris@82: static const union rvec _var = { {val,val} }; _var.v; }) Chris@82: # endif Chris@82: # define LDK(x) x Chris@82: #else Chris@82: # define DVK(var, val) const R var = K(val) Chris@82: # define LDK(x) DS(_mm_set1_pd,_mm_set_ps1)(x) Chris@82: #endif Chris@82: Chris@82: static inline V LDA(const R *x, INT ivs, const R *aligned_like) Chris@82: { Chris@82: (void)aligned_like; /* UNUSED */ Chris@82: (void)ivs; /* UNUSED */ Chris@82: return *(const V *)x; Chris@82: } Chris@82: Chris@82: static inline void STA(R *x, V v, INT ovs, const R *aligned_like) Chris@82: { Chris@82: (void)aligned_like; /* UNUSED */ Chris@82: (void)ovs; /* UNUSED */ Chris@82: *(V *)x = v; Chris@82: } Chris@82: Chris@82: #ifdef FFTW_SINGLE Chris@82: Chris@82: # ifdef _MSC_VER Chris@82: /* Temporarily disable the warning "uninitialized local variable Chris@82: 'name' used" and runtime checks for using a variable before it is Chris@82: defined which is erroneously triggered by the LOADL0 / LOADH macros Chris@82: as they only modify VAL partly each. */ Chris@82: # ifndef __INTEL_COMPILER Chris@82: # pragma warning(disable : 4700) Chris@82: # pragma runtime_checks("u", off) Chris@82: # endif Chris@82: # endif Chris@82: # ifdef __INTEL_COMPILER Chris@82: # pragma warning(disable : 592) Chris@82: # endif Chris@82: Chris@82: static inline V LD(const R *x, INT ivs, const R *aligned_like) Chris@82: { Chris@82: V var; Chris@82: (void)aligned_like; /* UNUSED */ Chris@82: # ifdef __GNUC__ Chris@82: /* We use inline asm because gcc-3.x generates slow code for Chris@82: _mm_loadh_pi(). gcc-3.x insists upon having an existing variable for Chris@82: VAL, which is however never used. Thus, it generates code to move Chris@82: values in and out the variable. Worse still, gcc-4.0 stores VAL on Chris@82: the stack, causing valgrind to complain about uninitialized reads. */ Chris@82: __asm__("movlps %1, %0\n\tmovhps %2, %0" Chris@82: : "=x"(var) : "m"(x[0]), "m"(x[ivs])); Chris@82: # else Chris@82: # define LOADH(addr, val) _mm_loadh_pi(val, (const __m64 *)(addr)) Chris@82: # define LOADL0(addr, val) _mm_loadl_pi(val, (const __m64 *)(addr)) Chris@82: var = LOADL0(x, var); Chris@82: var = LOADH(x + ivs, var); Chris@82: # endif Chris@82: return var; Chris@82: } Chris@82: Chris@82: # ifdef _MSC_VER Chris@82: # ifndef __INTEL_COMPILER Chris@82: # pragma warning(default : 4700) Chris@82: # pragma runtime_checks("u", restore) Chris@82: # endif Chris@82: # endif Chris@82: # ifdef __INTEL_COMPILER Chris@82: # pragma warning(default : 592) Chris@82: # endif Chris@82: Chris@82: static inline void ST(R *x, V v, INT ovs, const R *aligned_like) Chris@82: { Chris@82: (void)aligned_like; /* UNUSED */ Chris@82: /* WARNING: the extra_iter hack depends upon STOREL occurring Chris@82: after STOREH */ Chris@82: STOREH(x + ovs, v); Chris@82: STOREL(x, v); Chris@82: } Chris@82: Chris@82: #else /* ! FFTW_SINGLE */ Chris@82: # define LD LDA Chris@82: # define ST STA Chris@82: #endif Chris@82: Chris@82: #define STM2 DS(STA,ST) Chris@82: #define STN2(x, v0, v1, ovs) /* nop */ Chris@82: Chris@82: #ifdef FFTW_SINGLE Chris@82: # define STM4(x, v, ovs, aligned_like) /* no-op */ Chris@82: /* STN4 is a macro, not a function, thanks to Visual C++ developers Chris@82: deciding "it would be infrequent that people would want to pass more Chris@82: than 3 [__m128 parameters] by value." 3 parameters ought to be enough Chris@82: for anybody. */ Chris@82: # define STN4(x, v0, v1, v2, v3, ovs) \ Chris@82: { \ Chris@82: V xxx0, xxx1, xxx2, xxx3; \ Chris@82: xxx0 = UNPCKL(v0, v2); \ Chris@82: xxx1 = UNPCKH(v0, v2); \ Chris@82: xxx2 = UNPCKL(v1, v3); \ Chris@82: xxx3 = UNPCKH(v1, v3); \ Chris@82: STA(x, UNPCKL(xxx0, xxx2), 0, 0); \ Chris@82: STA(x + ovs, UNPCKH(xxx0, xxx2), 0, 0); \ Chris@82: STA(x + 2 * ovs, UNPCKL(xxx1, xxx3), 0, 0); \ Chris@82: STA(x + 3 * ovs, UNPCKH(xxx1, xxx3), 0, 0); \ Chris@82: } Chris@82: #else /* !FFTW_SINGLE */ Chris@82: static inline void STM4(R *x, V v, INT ovs, const R *aligned_like) Chris@82: { Chris@82: (void)aligned_like; /* UNUSED */ Chris@82: STOREL(x, v); Chris@82: STOREH(x + ovs, v); Chris@82: } Chris@82: # define STN4(x, v0, v1, v2, v3, ovs) /* nothing */ Chris@82: #endif Chris@82: Chris@82: static inline V FLIP_RI(V x) Chris@82: { Chris@82: return SHUF(x, x, DS(1, SHUFVALS(1, 0, 3, 2))); Chris@82: } Chris@82: Chris@82: static inline V VCONJ(V x) Chris@82: { Chris@82: /* This will produce -0.0f (or -0.0d) even on broken Chris@82: compilers that do not distinguish +0.0 from -0.0. Chris@82: I bet some are still around. */ Chris@82: union uvec { Chris@82: unsigned u[4]; Chris@82: V v; Chris@82: }; Chris@82: /* it looks like gcc-3.3.5 produces slow code unless PM is Chris@82: declared static. */ Chris@82: static const union uvec pm = { Chris@82: #ifdef FFTW_SINGLE Chris@82: { 0x00000000, 0x80000000, 0x00000000, 0x80000000 } Chris@82: #else Chris@82: { 0x00000000, 0x00000000, 0x00000000, 0x80000000 } Chris@82: #endif Chris@82: }; Chris@82: return VXOR(pm.v, x); Chris@82: } Chris@82: Chris@82: static inline V VBYI(V x) Chris@82: { Chris@82: x = VCONJ(x); Chris@82: x = FLIP_RI(x); Chris@82: return x; Chris@82: } Chris@82: Chris@82: /* FMA support */ Chris@82: #define VFMA(a, b, c) VADD(c, VMUL(a, b)) Chris@82: #define VFNMS(a, b, c) VSUB(c, VMUL(a, b)) Chris@82: #define VFMS(a, b, c) VSUB(VMUL(a, b), c) Chris@82: #define VFMAI(b, c) VADD(c, VBYI(b)) Chris@82: #define VFNMSI(b, c) VSUB(c, VBYI(b)) Chris@82: #define VFMACONJ(b,c) VADD(VCONJ(b),c) Chris@82: #define VFMSCONJ(b,c) VSUB(VCONJ(b),c) Chris@82: #define VFNMSCONJ(b,c) VSUB(c, VCONJ(b)) Chris@82: Chris@82: static inline V VZMUL(V tx, V sr) Chris@82: { Chris@82: V tr = VDUPL(tx); Chris@82: V ti = VDUPH(tx); Chris@82: tr = VMUL(sr, tr); Chris@82: sr = VBYI(sr); Chris@82: return VFMA(ti, sr, tr); Chris@82: } Chris@82: Chris@82: static inline V VZMULJ(V tx, V sr) Chris@82: { Chris@82: V tr = VDUPL(tx); Chris@82: V ti = VDUPH(tx); Chris@82: tr = VMUL(sr, tr); Chris@82: sr = VBYI(sr); Chris@82: return VFNMS(ti, sr, tr); Chris@82: } Chris@82: Chris@82: static inline V VZMULI(V tx, V sr) Chris@82: { Chris@82: V tr = VDUPL(tx); Chris@82: V ti = VDUPH(tx); Chris@82: ti = VMUL(ti, sr); Chris@82: sr = VBYI(sr); Chris@82: return VFMS(tr, sr, ti); Chris@82: } Chris@82: Chris@82: static inline V VZMULIJ(V tx, V sr) Chris@82: { Chris@82: V tr = VDUPL(tx); Chris@82: V ti = VDUPH(tx); Chris@82: ti = VMUL(ti, sr); Chris@82: sr = VBYI(sr); Chris@82: return VFMA(tr, sr, ti); Chris@82: } Chris@82: Chris@82: /* twiddle storage #1: compact, slower */ Chris@82: #ifdef FFTW_SINGLE Chris@82: # define VTW1(v,x) \ Chris@82: {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_SIN, v, x}, {TW_SIN, v+1, x} Chris@82: static inline V BYTW1(const R *t, V sr) Chris@82: { Chris@82: const V *twp = (const V *)t; Chris@82: V tx = twp[0]; Chris@82: V tr = UNPCKL(tx, tx); Chris@82: V ti = UNPCKH(tx, tx); Chris@82: tr = VMUL(tr, sr); Chris@82: sr = VBYI(sr); Chris@82: return VFMA(ti, sr, tr); Chris@82: } Chris@82: static inline V BYTWJ1(const R *t, V sr) Chris@82: { Chris@82: const V *twp = (const V *)t; Chris@82: V tx = twp[0]; Chris@82: V tr = UNPCKL(tx, tx); Chris@82: V ti = UNPCKH(tx, tx); Chris@82: tr = VMUL(tr, sr); Chris@82: sr = VBYI(sr); Chris@82: return VFNMS(ti, sr, tr); Chris@82: } Chris@82: #else /* !FFTW_SINGLE */ Chris@82: # define VTW1(v,x) {TW_CEXP, v, x} Chris@82: static inline V BYTW1(const R *t, V sr) Chris@82: { Chris@82: V tx = LD(t, 1, t); Chris@82: return VZMUL(tx, sr); Chris@82: } Chris@82: static inline V BYTWJ1(const R *t, V sr) Chris@82: { Chris@82: V tx = LD(t, 1, t); Chris@82: return VZMULJ(tx, sr); Chris@82: } Chris@82: #endif Chris@82: #define TWVL1 (VL) Chris@82: Chris@82: /* twiddle storage #2: twice the space, faster (when in cache) */ Chris@82: #ifdef FFTW_SINGLE Chris@82: # define VTW2(v,x) \ Chris@82: {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \ Chris@82: {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x} Chris@82: #else /* !FFTW_SINGLE */ Chris@82: # define VTW2(v,x) \ Chris@82: {TW_COS, v, x}, {TW_COS, v, x}, {TW_SIN, v, -x}, {TW_SIN, v, x} Chris@82: #endif Chris@82: #define TWVL2 (2 * VL) Chris@82: static inline V BYTW2(const R *t, V sr) Chris@82: { Chris@82: const V *twp = (const V *)t; Chris@82: V si = FLIP_RI(sr); Chris@82: V tr = twp[0], ti = twp[1]; Chris@82: return VFMA(tr, sr, VMUL(ti, si)); Chris@82: } Chris@82: static inline V BYTWJ2(const R *t, V sr) Chris@82: { Chris@82: const V *twp = (const V *)t; Chris@82: V si = FLIP_RI(sr); Chris@82: V tr = twp[0], ti = twp[1]; Chris@82: return VFNMS(ti, si, VMUL(tr, sr)); Chris@82: } Chris@82: Chris@82: /* twiddle storage #3 */ Chris@82: #ifdef FFTW_SINGLE Chris@82: # define VTW3(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x} Chris@82: # define TWVL3 (VL) Chris@82: #else Chris@82: # define VTW3(v,x) VTW1(v,x) Chris@82: # define TWVL3 TWVL1 Chris@82: #endif Chris@82: Chris@82: /* twiddle storage for split arrays */ Chris@82: #ifdef FFTW_SINGLE Chris@82: # define VTWS(v,x) \ Chris@82: {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \ Chris@82: {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x} Chris@82: #else Chris@82: # define VTWS(v,x) \ Chris@82: {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_SIN, v, x}, {TW_SIN, v+1, x} Chris@82: #endif Chris@82: #define TWVLS (2 * VL) Chris@82: Chris@82: #define VLEAVE() /* nothing */ Chris@82: Chris@82: #include "simd-common.h"