cannam@95: /* cannam@95: * Copyright (c) 2003, 2007-11 Matteo Frigo cannam@95: * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology cannam@95: * cannam@95: * This program is free software; you can redistribute it and/or modify cannam@95: * it under the terms of the GNU General Public License as published by cannam@95: * the Free Software Foundation; either version 2 of the License, or cannam@95: * (at your option) any later version. cannam@95: * cannam@95: * This program is distributed in the hope that it will be useful, cannam@95: * but WITHOUT ANY WARRANTY; without even the implied warranty of cannam@95: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the cannam@95: * GNU General Public License for more details. cannam@95: * cannam@95: * You should have received a copy of the GNU General Public License cannam@95: * along with this program; if not, write to the Free Software cannam@95: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA cannam@95: * cannam@95: */ cannam@95: cannam@95: #ifndef FFTW_SINGLE cannam@95: #error "NEON only works in single precision" cannam@95: #endif cannam@95: cannam@95: /* define these unconditionally, because they are used by cannam@95: taint.c which is compiled without neon */ cannam@95: #define SIMD_SUFFIX _neon /* for renaming */ cannam@95: #define VL 2 /* SIMD complex vector length */ cannam@95: #define SIMD_VSTRIDE_OKA(x) ((x) == 2) cannam@95: #define SIMD_STRIDE_OKPAIR SIMD_STRIDE_OK cannam@95: cannam@95: #if defined(__GNUC__) && !defined(__ARM_NEON__) cannam@95: #error "compiling simd-neon.h requires -mfpu=neon or equivalent" cannam@95: #endif cannam@95: cannam@95: #include cannam@95: cannam@95: /* FIXME: I am not sure whether this code assumes little-endian cannam@95: ordering. VLIT may or may not be wrong for big-endian systems. */ cannam@95: typedef float32x4_t V; cannam@95: cannam@95: #define VLIT(x0, x1, x2, x3) {x0, x1, x2, x3} cannam@95: #define LDK(x) x cannam@95: #define DVK(var, val) const V var = VLIT(val, val, val, val) cannam@95: cannam@95: /* NEON has FMA, but a three-operand FMA is not too useful cannam@95: for FFT purposes. We normally compute cannam@95: cannam@95: t0=a+b*c cannam@95: t1=a-b*c cannam@95: cannam@95: In a three-operand instruction set this translates into cannam@95: cannam@95: t0=a cannam@95: t0+=b*c cannam@95: t1=a cannam@95: t1-=b*c cannam@95: cannam@95: At least one move must be implemented, negating the advantage of cannam@95: the FMA in the first place. At least some versions of gcc generate cannam@95: both moves. So we are better off generating t=b*c;t0=a+t;t1=a-t;*/ cannam@95: #if HAVE_FMA cannam@95: #warning "--enable-fma on NEON is probably a bad idea (see source code)" cannam@95: #endif cannam@95: cannam@95: #define VADD(a, b) vaddq_f32(a, b) cannam@95: #define VSUB(a, b) vsubq_f32(a, b) cannam@95: #define VMUL(a, b) vmulq_f32(a, b) cannam@95: #define VFMA(a, b, c) vmlaq_f32(c, a, b) /* a*b+c */ cannam@95: #define VFNMS(a, b, c) vmlsq_f32(c, a, b) /* FNMS=-(a*b-c) in powerpc terminology; MLS=c-a*b cannam@95: in ARM terminology */ cannam@95: #define VFMS(a, b, c) VSUB(VMUL(a, b), c) /* FMS=a*b-c in powerpc terminology; no equivalent cannam@95: arm instruction (?) */ cannam@95: cannam@95: static inline V LDA(const R *x, INT ivs, const R *aligned_like) cannam@95: { cannam@95: (void) aligned_like; /* UNUSED */ cannam@95: return vld1q_f32((const float32_t *)x); cannam@95: } cannam@95: cannam@95: static inline V LD(const R *x, INT ivs, const R *aligned_like) cannam@95: { cannam@95: (void) aligned_like; /* UNUSED */ cannam@95: return vcombine_f32(vld1_f32((float32_t *)x), vld1_f32((float32_t *)(x + ivs))); cannam@95: } cannam@95: cannam@95: static inline void STA(R *x, V v, INT ovs, const R *aligned_like) cannam@95: { cannam@95: (void) aligned_like; /* UNUSED */ cannam@95: vst1q_f32((float32_t *)x, v); cannam@95: } cannam@95: cannam@95: static inline void ST(R *x, V v, INT ovs, const R *aligned_like) cannam@95: { cannam@95: (void) aligned_like; /* UNUSED */ cannam@95: /* WARNING: the extra_iter hack depends upon store-low occurring cannam@95: after store-high */ cannam@95: vst1_f32((float32_t *)(x + ovs), vget_high_f32(v)); cannam@95: vst1_f32((float32_t *)x, vget_low_f32(v)); cannam@95: } cannam@95: cannam@95: /* 2x2 complex transpose and store */ cannam@95: #define STM2 ST cannam@95: #define STN2(x, v0, v1, ovs) /* nop */ cannam@95: cannam@95: /* store and 4x4 real transpose */ cannam@95: static inline void STM4(R *x, V v, INT ovs, const R *aligned_like) cannam@95: { cannam@95: (void) aligned_like; /* UNUSED */ cannam@95: vst1_lane_f32((float32_t *)(x) , vget_low_f32(v), 0); cannam@95: vst1_lane_f32((float32_t *)(x + ovs), vget_low_f32(v), 1); cannam@95: vst1_lane_f32((float32_t *)(x + 2 * ovs), vget_high_f32(v), 0); cannam@95: vst1_lane_f32((float32_t *)(x + 3 * ovs), vget_high_f32(v), 1); cannam@95: } cannam@95: #define STN4(x, v0, v1, v2, v3, ovs) /* use STM4 */ cannam@95: cannam@95: #define FLIP_RI(x) vrev64q_f32(x) cannam@95: cannam@95: static inline V VCONJ(V x) cannam@95: { cannam@95: #if 1 cannam@95: static const uint32x4_t pm = {0, 0x80000000u, 0, 0x80000000u}; cannam@95: return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(x), pm)); cannam@95: #else cannam@95: const V pm = VLIT(1.0, -1.0, 1.0, -1.0); cannam@95: return VMUL(x, pm); cannam@95: #endif cannam@95: } cannam@95: cannam@95: static inline V VBYI(V x) cannam@95: { cannam@95: return FLIP_RI(VCONJ(x)); cannam@95: } cannam@95: cannam@95: static inline V VFMAI(V b, V c) cannam@95: { cannam@95: const V mp = VLIT(-1.0, 1.0, -1.0, 1.0); cannam@95: return VFMA(FLIP_RI(b), mp, c); cannam@95: } cannam@95: cannam@95: static inline V VFNMSI(V b, V c) cannam@95: { cannam@95: const V mp = VLIT(-1.0, 1.0, -1.0, 1.0); cannam@95: return VFNMS(FLIP_RI(b), mp, c); cannam@95: } cannam@95: cannam@95: static inline V VFMACONJ(V b, V c) cannam@95: { cannam@95: const V pm = VLIT(1.0, -1.0, 1.0, -1.0); cannam@95: return VFMA(b, pm, c); cannam@95: } cannam@95: cannam@95: static inline V VFNMSCONJ(V b, V c) cannam@95: { cannam@95: const V pm = VLIT(1.0, -1.0, 1.0, -1.0); cannam@95: return VFNMS(b, pm, c); cannam@95: } cannam@95: cannam@95: static inline V VFMSCONJ(V b, V c) cannam@95: { cannam@95: return VSUB(VCONJ(b), c); cannam@95: } cannam@95: cannam@95: #if 1 cannam@95: #define VEXTRACT_REIM(tr, ti, tx) \ cannam@95: { \ cannam@95: tr = vcombine_f32(vdup_lane_f32(vget_low_f32(tx), 0), \ cannam@95: vdup_lane_f32(vget_high_f32(tx), 0)); \ cannam@95: ti = vcombine_f32(vdup_lane_f32(vget_low_f32(tx), 1), \ cannam@95: vdup_lane_f32(vget_high_f32(tx), 1)); \ cannam@95: } cannam@95: #else cannam@95: /* this alternative might be faster in an ideal world, but gcc likes cannam@95: to spill VVV onto the stack */ cannam@95: #define VEXTRACT_REIM(tr, ti, tx) \ cannam@95: { \ cannam@95: float32x4x2_t vvv = vtrnq_f32(tx, tx); \ cannam@95: tr = vvv.val[0]; \ cannam@95: ti = vvv.val[1]; \ cannam@95: } cannam@95: #endif cannam@95: cannam@95: static inline V VZMUL(V tx, V sr) cannam@95: { cannam@95: V tr, ti; cannam@95: VEXTRACT_REIM(tr, ti, tx); cannam@95: tr = VMUL(sr, tr); cannam@95: sr = VBYI(sr); cannam@95: return VFMA(ti, sr, tr); cannam@95: } cannam@95: cannam@95: static inline V VZMULJ(V tx, V sr) cannam@95: { cannam@95: V tr, ti; cannam@95: VEXTRACT_REIM(tr, ti, tx); cannam@95: tr = VMUL(sr, tr); cannam@95: sr = VBYI(sr); cannam@95: return VFNMS(ti, sr, tr); cannam@95: } cannam@95: cannam@95: static inline V VZMULI(V tx, V sr) cannam@95: { cannam@95: V tr, ti; cannam@95: VEXTRACT_REIM(tr, ti, tx); cannam@95: ti = VMUL(ti, sr); cannam@95: sr = VBYI(sr); cannam@95: return VFMS(tr, sr, ti); cannam@95: } cannam@95: cannam@95: static inline V VZMULIJ(V tx, V sr) cannam@95: { cannam@95: V tr, ti; cannam@95: VEXTRACT_REIM(tr, ti, tx); cannam@95: ti = VMUL(ti, sr); cannam@95: sr = VBYI(sr); cannam@95: return VFMA(tr, sr, ti); cannam@95: } cannam@95: cannam@95: /* twiddle storage #1: compact, slower */ cannam@95: #define VTW1(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x} cannam@95: #define TWVL1 VL cannam@95: static inline V BYTW1(const R *t, V sr) cannam@95: { cannam@95: V tx = LDA(t, 2, 0); cannam@95: return VZMUL(tx, sr); cannam@95: } cannam@95: cannam@95: static inline V BYTWJ1(const R *t, V sr) cannam@95: { cannam@95: V tx = LDA(t, 2, 0); cannam@95: return VZMULJ(tx, sr); cannam@95: } cannam@95: cannam@95: /* twiddle storage #2: twice the space, faster (when in cache) */ cannam@95: # define VTW2(v,x) \ cannam@95: {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \ cannam@95: {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x} cannam@95: #define TWVL2 (2 * VL) cannam@95: cannam@95: static inline V BYTW2(const R *t, V sr) cannam@95: { cannam@95: V si = FLIP_RI(sr); cannam@95: V tr = LDA(t, 2, 0), ti = LDA(t+2*VL, 2, 0); cannam@95: return VFMA(ti, si, VMUL(tr, sr)); cannam@95: } cannam@95: cannam@95: static inline V BYTWJ2(const R *t, V sr) cannam@95: { cannam@95: V si = FLIP_RI(sr); cannam@95: V tr = LDA(t, 2, 0), ti = LDA(t+2*VL, 2, 0); cannam@95: return VFNMS(ti, si, VMUL(tr, sr)); cannam@95: } cannam@95: cannam@95: /* twiddle storage #3 */ cannam@95: # define VTW3(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x} cannam@95: # define TWVL3 (VL) cannam@95: cannam@95: /* twiddle storage for split arrays */ cannam@95: # define VTWS(v,x) \ cannam@95: {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \ cannam@95: {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x} cannam@95: #define TWVLS (2 * VL) cannam@95: cannam@95: #define VLEAVE() /* nothing */ cannam@95: cannam@95: #include "simd-common.h"