cannam@167: /* cannam@167: * Copyright (c) 2003, 2007-14 Matteo Frigo cannam@167: * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology cannam@167: * cannam@167: * Double-precision support added by Romain Dolbeau. cannam@167: * Romain Dolbeau hereby places his modifications in the public domain. cannam@167: * cannam@167: * This program is free software; you can redistribute it and/or modify cannam@167: * it under the terms of the GNU General Public License as published by cannam@167: * the Free Software Foundation; either version 2 of the License, or cannam@167: * (at your option) any later version. cannam@167: * cannam@167: * This program is distributed in the hope that it will be useful, cannam@167: * but WITHOUT ANY WARRANTY; without even the implied warranty of cannam@167: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the cannam@167: * GNU General Public License for more details. cannam@167: * cannam@167: * You should have received a copy of the GNU General Public License cannam@167: * along with this program; if not, write to the Free Software cannam@167: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA cannam@167: * cannam@167: */ cannam@167: cannam@167: #if !defined(FFTW_SINGLE) && !defined( __aarch64__) cannam@167: #error "NEON only works in single precision on 32 bits ARM" cannam@167: #endif cannam@167: #if defined(FFTW_LDOUBLE) || defined(FFTW_QUAD) cannam@167: #error "NEON only works in single or double precision" cannam@167: #endif cannam@167: cannam@167: #ifdef FFTW_SINGLE cannam@167: # define DS(d,s) s /* single-precision option */ cannam@167: # define SUFF(name) name ## _f32 cannam@167: #else cannam@167: # define DS(d,s) d /* double-precision option */ cannam@167: # define SUFF(name) name ## _f64 cannam@167: #endif cannam@167: cannam@167: /* define these unconditionally, because they are used by cannam@167: taint.c which is compiled without neon */ cannam@167: #define SIMD_SUFFIX _neon /* for renaming */ cannam@167: #define VL DS(1,2) /* SIMD complex vector length */ cannam@167: #define SIMD_VSTRIDE_OKA(x) DS(1,((x) == 2)) cannam@167: #define SIMD_STRIDE_OKPAIR SIMD_STRIDE_OK cannam@167: cannam@167: #if defined(__GNUC__) && !defined(__ARM_NEON__) && !defined(__ARM_NEON) cannam@167: #error "compiling simd-neon.h requires -mfpu=neon or equivalent" cannam@167: #endif cannam@167: cannam@167: #include cannam@167: cannam@167: /* FIXME: I am not sure whether this code assumes little-endian cannam@167: ordering. VLIT may or may not be wrong for big-endian systems. */ cannam@167: typedef DS(float64x2_t, float32x4_t) V; cannam@167: cannam@167: #ifdef FFTW_SINGLE cannam@167: # define VLIT(x0, x1) {x0, x1, x0, x1} cannam@167: #else cannam@167: # define VLIT(x0, x1) {x0, x1} cannam@167: #endif cannam@167: #define LDK(x) x cannam@167: #define DVK(var, val) const V var = VLIT(val, val) cannam@167: cannam@167: /* NEON has FMA, but a three-operand FMA is not too useful cannam@167: for FFT purposes. We normally compute cannam@167: cannam@167: t0=a+b*c cannam@167: t1=a-b*c cannam@167: cannam@167: In a three-operand instruction set this translates into cannam@167: cannam@167: t0=a cannam@167: t0+=b*c cannam@167: t1=a cannam@167: t1-=b*c cannam@167: cannam@167: At least one move must be implemented, negating the advantage of cannam@167: the FMA in the first place. At least some versions of gcc generate cannam@167: both moves. So we are better off generating t=b*c;t0=a+t;t1=a-t;*/ cannam@167: #if ARCH_PREFERS_FMA cannam@167: #warning "--enable-fma on NEON is probably a bad idea (see source code)" cannam@167: #endif cannam@167: cannam@167: #define VADD(a, b) SUFF(vaddq)(a, b) cannam@167: #define VSUB(a, b) SUFF(vsubq)(a, b) cannam@167: #define VMUL(a, b) SUFF(vmulq)(a, b) cannam@167: #define VFMA(a, b, c) SUFF(vmlaq)(c, a, b) /* a*b+c */ cannam@167: #define VFNMS(a, b, c) SUFF(vmlsq)(c, a, b) /* FNMS=-(a*b-c) in powerpc terminology; MLS=c-a*b cannam@167: in ARM terminology */ cannam@167: #define VFMS(a, b, c) VSUB(VMUL(a, b), c) /* FMS=a*b-c in powerpc terminology; no equivalent cannam@167: arm instruction (?) */ cannam@167: cannam@167: #define STOREH(a, v) SUFF(vst1)((a), SUFF(vget_high)(v)) cannam@167: #define STOREL(a, v) SUFF(vst1)((a), SUFF(vget_low)(v)) cannam@167: cannam@167: static inline V LDA(const R *x, INT ivs, const R *aligned_like) cannam@167: { cannam@167: (void) aligned_like; /* UNUSED */ cannam@167: return SUFF(vld1q)(x); cannam@167: } cannam@167: static inline void STA(R *x, V v, INT ovs, const R *aligned_like) cannam@167: { cannam@167: (void) aligned_like; /* UNUSED */ cannam@167: SUFF(vst1q)(x, v); cannam@167: } cannam@167: cannam@167: cannam@167: #ifdef FFTW_SINGLE cannam@167: static inline V LD(const R *x, INT ivs, const R *aligned_like) cannam@167: { cannam@167: (void) aligned_like; /* UNUSED */ cannam@167: return SUFF(vcombine)(SUFF(vld1)(x), SUFF(vld1)((x + ivs))); cannam@167: } cannam@167: static inline void ST(R *x, V v, INT ovs, const R *aligned_like) cannam@167: { cannam@167: (void) aligned_like; /* UNUSED */ cannam@167: /* WARNING: the extra_iter hack depends upon store-low occurring cannam@167: after store-high */ cannam@167: STOREH(x + ovs, v); cannam@167: STOREL(x,v); cannam@167: } cannam@167: #else /* !FFTW_SINGLE */ cannam@167: # define LD LDA cannam@167: # define ST STA cannam@167: #endif cannam@167: cannam@167: /* 2x2 complex transpose and store */ cannam@167: #define STM2 DS(STA,ST) cannam@167: #define STN2(x, v0, v1, ovs) /* nop */ cannam@167: cannam@167: #ifdef FFTW_SINGLE cannam@167: /* store and 4x4 real transpose */ cannam@167: static inline void STM4(R *x, V v, INT ovs, const R *aligned_like) cannam@167: { cannam@167: (void) aligned_like; /* UNUSED */ cannam@167: SUFF(vst1_lane)((x) , SUFF(vget_low)(v), 0); cannam@167: SUFF(vst1_lane)((x + ovs), SUFF(vget_low)(v), 1); cannam@167: SUFF(vst1_lane)((x + 2 * ovs), SUFF(vget_high)(v), 0); cannam@167: SUFF(vst1_lane)((x + 3 * ovs), SUFF(vget_high)(v), 1); cannam@167: } cannam@167: #define STN4(x, v0, v1, v2, v3, ovs) /* use STM4 */ cannam@167: #else /* !FFTW_SINGLE */ cannam@167: static inline void STM4(R *x, V v, INT ovs, const R *aligned_like) cannam@167: { cannam@167: (void)aligned_like; /* UNUSED */ cannam@167: STOREL(x, v); cannam@167: STOREH(x + ovs, v); cannam@167: } cannam@167: # define STN4(x, v0, v1, v2, v3, ovs) /* nothing */ cannam@167: #endif cannam@167: cannam@167: #ifdef FFTW_SINGLE cannam@167: #define FLIP_RI(x) SUFF(vrev64q)(x) cannam@167: #else cannam@167: /* FIXME */ cannam@167: #define FLIP_RI(x) SUFF(vcombine)(SUFF(vget_high)(x), SUFF(vget_low)(x)) cannam@167: #endif cannam@167: cannam@167: static inline V VCONJ(V x) cannam@167: { cannam@167: #ifdef FFTW_SINGLE cannam@167: static const uint32x4_t pm = {0, 0x80000000u, 0, 0x80000000u}; cannam@167: return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(x), pm)); cannam@167: #else cannam@167: static const uint64x2_t pm = {0, 0x8000000000000000ull}; cannam@167: /* Gcc-4.9.2 still does not include vreinterpretq_f64_u64, but simple cannam@167: * casts generate the correct assembly. cannam@167: */ cannam@167: return (float64x2_t)(veorq_u64((uint64x2_t)(x), (uint64x2_t)(pm))); cannam@167: #endif cannam@167: } cannam@167: cannam@167: static inline V VBYI(V x) cannam@167: { cannam@167: return FLIP_RI(VCONJ(x)); cannam@167: } cannam@167: cannam@167: static inline V VFMAI(V b, V c) cannam@167: { cannam@167: const V mp = VLIT(-1.0, 1.0); cannam@167: return VFMA(FLIP_RI(b), mp, c); cannam@167: } cannam@167: cannam@167: static inline V VFNMSI(V b, V c) cannam@167: { cannam@167: const V mp = VLIT(-1.0, 1.0); cannam@167: return VFNMS(FLIP_RI(b), mp, c); cannam@167: } cannam@167: cannam@167: static inline V VFMACONJ(V b, V c) cannam@167: { cannam@167: const V pm = VLIT(1.0, -1.0); cannam@167: return VFMA(b, pm, c); cannam@167: } cannam@167: cannam@167: static inline V VFNMSCONJ(V b, V c) cannam@167: { cannam@167: const V pm = VLIT(1.0, -1.0); cannam@167: return VFNMS(b, pm, c); cannam@167: } cannam@167: cannam@167: static inline V VFMSCONJ(V b, V c) cannam@167: { cannam@167: return VSUB(VCONJ(b), c); cannam@167: } cannam@167: cannam@167: #ifdef FFTW_SINGLE cannam@167: #if 1 cannam@167: #define VEXTRACT_REIM(tr, ti, tx) \ cannam@167: { \ cannam@167: tr = SUFF(vcombine)(SUFF(vdup_lane)(SUFF(vget_low)(tx), 0), \ cannam@167: SUFF(vdup_lane)(SUFF(vget_high)(tx), 0)); \ cannam@167: ti = SUFF(vcombine)(SUFF(vdup_lane)(SUFF(vget_low)(tx), 1), \ cannam@167: SUFF(vdup_lane)(SUFF(vget_high)(tx), 1)); \ cannam@167: } cannam@167: #else cannam@167: /* this alternative might be faster in an ideal world, but gcc likes cannam@167: to spill VVV onto the stack */ cannam@167: #define VEXTRACT_REIM(tr, ti, tx) \ cannam@167: { \ cannam@167: float32x4x2_t vvv = SUFF(vtrnq)(tx, tx); \ cannam@167: tr = vvv.val[0]; \ cannam@167: ti = vvv.val[1]; \ cannam@167: } cannam@167: #endif cannam@167: #else cannam@167: #define VEXTRACT_REIM(tr, ti, tx) \ cannam@167: { \ cannam@167: tr = SUFF(vtrn1q)(tx, tx); \ cannam@167: ti = SUFF(vtrn2q)(tx, tx); \ cannam@167: } cannam@167: #endif cannam@167: cannam@167: static inline V VZMUL(V tx, V sr) cannam@167: { cannam@167: V tr, ti; cannam@167: VEXTRACT_REIM(tr, ti, tx); cannam@167: tr = VMUL(sr, tr); cannam@167: sr = VBYI(sr); cannam@167: return VFMA(ti, sr, tr); cannam@167: } cannam@167: cannam@167: static inline V VZMULJ(V tx, V sr) cannam@167: { cannam@167: V tr, ti; cannam@167: VEXTRACT_REIM(tr, ti, tx); cannam@167: tr = VMUL(sr, tr); cannam@167: sr = VBYI(sr); cannam@167: return VFNMS(ti, sr, tr); cannam@167: } cannam@167: cannam@167: static inline V VZMULI(V tx, V sr) cannam@167: { cannam@167: V tr, ti; cannam@167: VEXTRACT_REIM(tr, ti, tx); cannam@167: ti = VMUL(ti, sr); cannam@167: sr = VBYI(sr); cannam@167: return VFMS(tr, sr, ti); cannam@167: } cannam@167: cannam@167: static inline V VZMULIJ(V tx, V sr) cannam@167: { cannam@167: V tr, ti; cannam@167: VEXTRACT_REIM(tr, ti, tx); cannam@167: ti = VMUL(ti, sr); cannam@167: sr = VBYI(sr); cannam@167: return VFMA(tr, sr, ti); cannam@167: } cannam@167: cannam@167: /* twiddle storage #1: compact, slower */ cannam@167: #ifdef FFTW_SINGLE cannam@167: #define VTW1(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x} cannam@167: #else cannam@167: #define VTW1(v,x) {TW_CEXP, v, x} cannam@167: #endif cannam@167: #define TWVL1 VL cannam@167: static inline V BYTW1(const R *t, V sr) cannam@167: { cannam@167: V tx = LDA(t, 2, 0); cannam@167: return VZMUL(tx, sr); cannam@167: } cannam@167: cannam@167: static inline V BYTWJ1(const R *t, V sr) cannam@167: { cannam@167: V tx = LDA(t, 2, 0); cannam@167: return VZMULJ(tx, sr); cannam@167: } cannam@167: cannam@167: /* twiddle storage #2: twice the space, faster (when in cache) */ cannam@167: #ifdef FFTW_SINGLE cannam@167: # define VTW2(v,x) \ cannam@167: {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \ cannam@167: {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x} cannam@167: #else cannam@167: # define VTW2(v,x) \ cannam@167: {TW_COS, v, x}, {TW_COS, v, x}, {TW_SIN, v, -x}, {TW_SIN, v, x} cannam@167: #endif cannam@167: #define TWVL2 (2 * VL) cannam@167: cannam@167: static inline V BYTW2(const R *t, V sr) cannam@167: { cannam@167: V si = FLIP_RI(sr); cannam@167: V tr = LDA(t, 2, 0), ti = LDA(t+2*VL, 2, 0); cannam@167: return VFMA(ti, si, VMUL(tr, sr)); cannam@167: } cannam@167: cannam@167: static inline V BYTWJ2(const R *t, V sr) cannam@167: { cannam@167: V si = FLIP_RI(sr); cannam@167: V tr = LDA(t, 2, 0), ti = LDA(t+2*VL, 2, 0); cannam@167: return VFNMS(ti, si, VMUL(tr, sr)); cannam@167: } cannam@167: cannam@167: /* twiddle storage #3 */ cannam@167: #ifdef FFTW_SINGLE cannam@167: # define VTW3(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x} cannam@167: #else cannam@167: # define VTW3(v,x) {TW_CEXP, v, x} cannam@167: #endif cannam@167: # define TWVL3 (VL) cannam@167: cannam@167: /* twiddle storage for split arrays */ cannam@167: #ifdef FFTW_SINGLE cannam@167: # define VTWS(v,x) \ cannam@167: {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \ cannam@167: {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x} cannam@167: #else cannam@167: # define VTWS(v,x) \ cannam@167: {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_SIN, v, x}, {TW_SIN, v+1, x} cannam@167: #endif cannam@167: #define TWVLS (2 * VL) cannam@167: cannam@167: #define VLEAVE() /* nothing */ cannam@167: cannam@167: #include "simd-common.h"