Chris@10: /* Chris@10: * Copyright (c) 2003, 2007-11 Matteo Frigo Chris@10: * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology Chris@10: * Chris@10: * This program is free software; you can redistribute it and/or modify Chris@10: * it under the terms of the GNU General Public License as published by Chris@10: * the Free Software Foundation; either version 2 of the License, or Chris@10: * (at your option) any later version. Chris@10: * Chris@10: * This program is distributed in the hope that it will be useful, Chris@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of Chris@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Chris@10: * GNU General Public License for more details. Chris@10: * Chris@10: * You should have received a copy of the GNU General Public License Chris@10: * along with this program; if not, write to the Free Software Chris@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Chris@10: * Chris@10: */ Chris@10: Chris@10: #ifndef FFTW_SINGLE Chris@10: #error "NEON only works in single precision" Chris@10: #endif Chris@10: Chris@10: /* define these unconditionally, because they are used by Chris@10: taint.c which is compiled without neon */ Chris@10: #define SIMD_SUFFIX _neon /* for renaming */ Chris@10: #define VL 2 /* SIMD complex vector length */ Chris@10: #define SIMD_VSTRIDE_OKA(x) ((x) == 2) Chris@10: #define SIMD_STRIDE_OKPAIR SIMD_STRIDE_OK Chris@10: Chris@10: #if defined(__GNUC__) && !defined(__ARM_NEON__) Chris@10: #error "compiling simd-neon.h requires -mfpu=neon or equivalent" Chris@10: #endif Chris@10: Chris@10: #include Chris@10: Chris@10: /* FIXME: I am not sure whether this code assumes little-endian Chris@10: ordering. VLIT may or may not be wrong for big-endian systems. */ Chris@10: typedef float32x4_t V; Chris@10: Chris@10: #define VLIT(x0, x1, x2, x3) {x0, x1, x2, x3} Chris@10: #define LDK(x) x Chris@10: #define DVK(var, val) const V var = VLIT(val, val, val, val) Chris@10: Chris@10: /* NEON has FMA, but a three-operand FMA is not too useful Chris@10: for FFT purposes. We normally compute Chris@10: Chris@10: t0=a+b*c Chris@10: t1=a-b*c Chris@10: Chris@10: In a three-operand instruction set this translates into Chris@10: Chris@10: t0=a Chris@10: t0+=b*c Chris@10: t1=a Chris@10: t1-=b*c Chris@10: Chris@10: At least one move must be implemented, negating the advantage of Chris@10: the FMA in the first place. At least some versions of gcc generate Chris@10: both moves. So we are better off generating t=b*c;t0=a+t;t1=a-t;*/ Chris@10: #if HAVE_FMA Chris@10: #warning "--enable-fma on NEON is probably a bad idea (see source code)" Chris@10: #endif Chris@10: Chris@10: #define VADD(a, b) vaddq_f32(a, b) Chris@10: #define VSUB(a, b) vsubq_f32(a, b) Chris@10: #define VMUL(a, b) vmulq_f32(a, b) Chris@10: #define VFMA(a, b, c) vmlaq_f32(c, a, b) /* a*b+c */ Chris@10: #define VFNMS(a, b, c) vmlsq_f32(c, a, b) /* FNMS=-(a*b-c) in powerpc terminology; MLS=c-a*b Chris@10: in ARM terminology */ Chris@10: #define VFMS(a, b, c) VSUB(VMUL(a, b), c) /* FMS=a*b-c in powerpc terminology; no equivalent Chris@10: arm instruction (?) */ Chris@10: Chris@10: static inline V LDA(const R *x, INT ivs, const R *aligned_like) Chris@10: { Chris@10: (void) aligned_like; /* UNUSED */ Chris@10: return vld1q_f32((const float32_t *)x); Chris@10: } Chris@10: Chris@10: static inline V LD(const R *x, INT ivs, const R *aligned_like) Chris@10: { Chris@10: (void) aligned_like; /* UNUSED */ Chris@10: return vcombine_f32(vld1_f32((float32_t *)x), vld1_f32((float32_t *)(x + ivs))); Chris@10: } Chris@10: Chris@10: static inline void STA(R *x, V v, INT ovs, const R *aligned_like) Chris@10: { Chris@10: (void) aligned_like; /* UNUSED */ Chris@10: vst1q_f32((float32_t *)x, v); Chris@10: } Chris@10: Chris@10: static inline void ST(R *x, V v, INT ovs, const R *aligned_like) Chris@10: { Chris@10: (void) aligned_like; /* UNUSED */ Chris@10: /* WARNING: the extra_iter hack depends upon store-low occurring Chris@10: after store-high */ Chris@10: vst1_f32((float32_t *)(x + ovs), vget_high_f32(v)); Chris@10: vst1_f32((float32_t *)x, vget_low_f32(v)); Chris@10: } Chris@10: Chris@10: /* 2x2 complex transpose and store */ Chris@10: #define STM2 ST Chris@10: #define STN2(x, v0, v1, ovs) /* nop */ Chris@10: Chris@10: /* store and 4x4 real transpose */ Chris@10: static inline void STM4(R *x, V v, INT ovs, const R *aligned_like) Chris@10: { Chris@10: (void) aligned_like; /* UNUSED */ Chris@10: vst1_lane_f32((float32_t *)(x) , vget_low_f32(v), 0); Chris@10: vst1_lane_f32((float32_t *)(x + ovs), vget_low_f32(v), 1); Chris@10: vst1_lane_f32((float32_t *)(x + 2 * ovs), vget_high_f32(v), 0); Chris@10: vst1_lane_f32((float32_t *)(x + 3 * ovs), vget_high_f32(v), 1); Chris@10: } Chris@10: #define STN4(x, v0, v1, v2, v3, ovs) /* use STM4 */ Chris@10: Chris@10: #define FLIP_RI(x) vrev64q_f32(x) Chris@10: Chris@10: static inline V VCONJ(V x) Chris@10: { Chris@10: #if 1 Chris@10: static const uint32x4_t pm = {0, 0x80000000u, 0, 0x80000000u}; Chris@10: return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(x), pm)); Chris@10: #else Chris@10: const V pm = VLIT(1.0, -1.0, 1.0, -1.0); Chris@10: return VMUL(x, pm); Chris@10: #endif Chris@10: } Chris@10: Chris@10: static inline V VBYI(V x) Chris@10: { Chris@10: return FLIP_RI(VCONJ(x)); Chris@10: } Chris@10: Chris@10: static inline V VFMAI(V b, V c) Chris@10: { Chris@10: const V mp = VLIT(-1.0, 1.0, -1.0, 1.0); Chris@10: return VFMA(FLIP_RI(b), mp, c); Chris@10: } Chris@10: Chris@10: static inline V VFNMSI(V b, V c) Chris@10: { Chris@10: const V mp = VLIT(-1.0, 1.0, -1.0, 1.0); Chris@10: return VFNMS(FLIP_RI(b), mp, c); Chris@10: } Chris@10: Chris@10: static inline V VFMACONJ(V b, V c) Chris@10: { Chris@10: const V pm = VLIT(1.0, -1.0, 1.0, -1.0); Chris@10: return VFMA(b, pm, c); Chris@10: } Chris@10: Chris@10: static inline V VFNMSCONJ(V b, V c) Chris@10: { Chris@10: const V pm = VLIT(1.0, -1.0, 1.0, -1.0); Chris@10: return VFNMS(b, pm, c); Chris@10: } Chris@10: Chris@10: static inline V VFMSCONJ(V b, V c) Chris@10: { Chris@10: return VSUB(VCONJ(b), c); Chris@10: } Chris@10: Chris@10: #if 1 Chris@10: #define VEXTRACT_REIM(tr, ti, tx) \ Chris@10: { \ Chris@10: tr = vcombine_f32(vdup_lane_f32(vget_low_f32(tx), 0), \ Chris@10: vdup_lane_f32(vget_high_f32(tx), 0)); \ Chris@10: ti = vcombine_f32(vdup_lane_f32(vget_low_f32(tx), 1), \ Chris@10: vdup_lane_f32(vget_high_f32(tx), 1)); \ Chris@10: } Chris@10: #else Chris@10: /* this alternative might be faster in an ideal world, but gcc likes Chris@10: to spill VVV onto the stack */ Chris@10: #define VEXTRACT_REIM(tr, ti, tx) \ Chris@10: { \ Chris@10: float32x4x2_t vvv = vtrnq_f32(tx, tx); \ Chris@10: tr = vvv.val[0]; \ Chris@10: ti = vvv.val[1]; \ Chris@10: } Chris@10: #endif Chris@10: Chris@10: static inline V VZMUL(V tx, V sr) Chris@10: { Chris@10: V tr, ti; Chris@10: VEXTRACT_REIM(tr, ti, tx); Chris@10: tr = VMUL(sr, tr); Chris@10: sr = VBYI(sr); Chris@10: return VFMA(ti, sr, tr); Chris@10: } Chris@10: Chris@10: static inline V VZMULJ(V tx, V sr) Chris@10: { Chris@10: V tr, ti; Chris@10: VEXTRACT_REIM(tr, ti, tx); Chris@10: tr = VMUL(sr, tr); Chris@10: sr = VBYI(sr); Chris@10: return VFNMS(ti, sr, tr); Chris@10: } Chris@10: Chris@10: static inline V VZMULI(V tx, V sr) Chris@10: { Chris@10: V tr, ti; Chris@10: VEXTRACT_REIM(tr, ti, tx); Chris@10: ti = VMUL(ti, sr); Chris@10: sr = VBYI(sr); Chris@10: return VFMS(tr, sr, ti); Chris@10: } Chris@10: Chris@10: static inline V VZMULIJ(V tx, V sr) Chris@10: { Chris@10: V tr, ti; Chris@10: VEXTRACT_REIM(tr, ti, tx); Chris@10: ti = VMUL(ti, sr); Chris@10: sr = VBYI(sr); Chris@10: return VFMA(tr, sr, ti); Chris@10: } Chris@10: Chris@10: /* twiddle storage #1: compact, slower */ Chris@10: #define VTW1(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x} Chris@10: #define TWVL1 VL Chris@10: static inline V BYTW1(const R *t, V sr) Chris@10: { Chris@10: V tx = LDA(t, 2, 0); Chris@10: return VZMUL(tx, sr); Chris@10: } Chris@10: Chris@10: static inline V BYTWJ1(const R *t, V sr) Chris@10: { Chris@10: V tx = LDA(t, 2, 0); Chris@10: return VZMULJ(tx, sr); Chris@10: } Chris@10: Chris@10: /* twiddle storage #2: twice the space, faster (when in cache) */ Chris@10: # define VTW2(v,x) \ Chris@10: {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \ Chris@10: {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x} Chris@10: #define TWVL2 (2 * VL) Chris@10: Chris@10: static inline V BYTW2(const R *t, V sr) Chris@10: { Chris@10: V si = FLIP_RI(sr); Chris@10: V tr = LDA(t, 2, 0), ti = LDA(t+2*VL, 2, 0); Chris@10: return VFMA(ti, si, VMUL(tr, sr)); Chris@10: } Chris@10: Chris@10: static inline V BYTWJ2(const R *t, V sr) Chris@10: { Chris@10: V si = FLIP_RI(sr); Chris@10: V tr = LDA(t, 2, 0), ti = LDA(t+2*VL, 2, 0); Chris@10: return VFNMS(ti, si, VMUL(tr, sr)); Chris@10: } Chris@10: Chris@10: /* twiddle storage #3 */ Chris@10: # define VTW3(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x} Chris@10: # define TWVL3 (VL) Chris@10: Chris@10: /* twiddle storage for split arrays */ Chris@10: # define VTWS(v,x) \ Chris@10: {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \ Chris@10: {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x} Chris@10: #define TWVLS (2 * VL) Chris@10: Chris@10: #define VLEAVE() /* nothing */ Chris@10: Chris@10: #include "simd-common.h"