annotate src/fftw-3.3.3/simd-support/simd-neon.h @ 23:619f715526df sv_v2.1

Update Vamp plugin SDK to 2.5
author Chris Cannam
date Thu, 09 May 2013 10:52:46 +0100
parents 37bf6b4a2645
children
rev   line source
Chris@10 1 /*
Chris@10 2 * Copyright (c) 2003, 2007-11 Matteo Frigo
Chris@10 3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
Chris@10 4 *
Chris@10 5 * This program is free software; you can redistribute it and/or modify
Chris@10 6 * it under the terms of the GNU General Public License as published by
Chris@10 7 * the Free Software Foundation; either version 2 of the License, or
Chris@10 8 * (at your option) any later version.
Chris@10 9 *
Chris@10 10 * This program is distributed in the hope that it will be useful,
Chris@10 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@10 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@10 13 * GNU General Public License for more details.
Chris@10 14 *
Chris@10 15 * You should have received a copy of the GNU General Public License
Chris@10 16 * along with this program; if not, write to the Free Software
Chris@10 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@10 18 *
Chris@10 19 */
Chris@10 20
Chris@10 21 #ifndef FFTW_SINGLE
Chris@10 22 #error "NEON only works in single precision"
Chris@10 23 #endif
Chris@10 24
Chris@10 25 /* define these unconditionally, because they are used by
Chris@10 26 taint.c which is compiled without neon */
Chris@10 27 #define SIMD_SUFFIX _neon /* for renaming */
Chris@10 28 #define VL 2 /* SIMD complex vector length */
Chris@10 29 #define SIMD_VSTRIDE_OKA(x) ((x) == 2)
Chris@10 30 #define SIMD_STRIDE_OKPAIR SIMD_STRIDE_OK
Chris@10 31
Chris@10 32 #if defined(__GNUC__) && !defined(__ARM_NEON__)
Chris@10 33 #error "compiling simd-neon.h requires -mfpu=neon or equivalent"
Chris@10 34 #endif
Chris@10 35
Chris@10 36 #include <arm_neon.h>
Chris@10 37
Chris@10 38 /* FIXME: I am not sure whether this code assumes little-endian
Chris@10 39 ordering. VLIT may or may not be wrong for big-endian systems. */
Chris@10 40 typedef float32x4_t V;
Chris@10 41
Chris@10 42 #define VLIT(x0, x1, x2, x3) {x0, x1, x2, x3}
Chris@10 43 #define LDK(x) x
Chris@10 44 #define DVK(var, val) const V var = VLIT(val, val, val, val)
Chris@10 45
Chris@10 46 /* NEON has FMA, but a three-operand FMA is not too useful
Chris@10 47 for FFT purposes. We normally compute
Chris@10 48
Chris@10 49 t0=a+b*c
Chris@10 50 t1=a-b*c
Chris@10 51
Chris@10 52 In a three-operand instruction set this translates into
Chris@10 53
Chris@10 54 t0=a
Chris@10 55 t0+=b*c
Chris@10 56 t1=a
Chris@10 57 t1-=b*c
Chris@10 58
Chris@10 59 At least one move must be implemented, negating the advantage of
Chris@10 60 the FMA in the first place. At least some versions of gcc generate
Chris@10 61 both moves. So we are better off generating t=b*c;t0=a+t;t1=a-t;*/
Chris@10 62 #if HAVE_FMA
Chris@10 63 #warning "--enable-fma on NEON is probably a bad idea (see source code)"
Chris@10 64 #endif
Chris@10 65
Chris@10 66 #define VADD(a, b) vaddq_f32(a, b)
Chris@10 67 #define VSUB(a, b) vsubq_f32(a, b)
Chris@10 68 #define VMUL(a, b) vmulq_f32(a, b)
Chris@10 69 #define VFMA(a, b, c) vmlaq_f32(c, a, b) /* a*b+c */
Chris@10 70 #define VFNMS(a, b, c) vmlsq_f32(c, a, b) /* FNMS=-(a*b-c) in powerpc terminology; MLS=c-a*b
Chris@10 71 in ARM terminology */
Chris@10 72 #define VFMS(a, b, c) VSUB(VMUL(a, b), c) /* FMS=a*b-c in powerpc terminology; no equivalent
Chris@10 73 arm instruction (?) */
Chris@10 74
Chris@10 75 static inline V LDA(const R *x, INT ivs, const R *aligned_like)
Chris@10 76 {
Chris@10 77 (void) aligned_like; /* UNUSED */
Chris@10 78 return vld1q_f32((const float32_t *)x);
Chris@10 79 }
Chris@10 80
Chris@10 81 static inline V LD(const R *x, INT ivs, const R *aligned_like)
Chris@10 82 {
Chris@10 83 (void) aligned_like; /* UNUSED */
Chris@10 84 return vcombine_f32(vld1_f32((float32_t *)x), vld1_f32((float32_t *)(x + ivs)));
Chris@10 85 }
Chris@10 86
Chris@10 87 static inline void STA(R *x, V v, INT ovs, const R *aligned_like)
Chris@10 88 {
Chris@10 89 (void) aligned_like; /* UNUSED */
Chris@10 90 vst1q_f32((float32_t *)x, v);
Chris@10 91 }
Chris@10 92
Chris@10 93 static inline void ST(R *x, V v, INT ovs, const R *aligned_like)
Chris@10 94 {
Chris@10 95 (void) aligned_like; /* UNUSED */
Chris@10 96 /* WARNING: the extra_iter hack depends upon store-low occurring
Chris@10 97 after store-high */
Chris@10 98 vst1_f32((float32_t *)(x + ovs), vget_high_f32(v));
Chris@10 99 vst1_f32((float32_t *)x, vget_low_f32(v));
Chris@10 100 }
Chris@10 101
Chris@10 102 /* 2x2 complex transpose and store */
Chris@10 103 #define STM2 ST
Chris@10 104 #define STN2(x, v0, v1, ovs) /* nop */
Chris@10 105
Chris@10 106 /* store and 4x4 real transpose */
Chris@10 107 static inline void STM4(R *x, V v, INT ovs, const R *aligned_like)
Chris@10 108 {
Chris@10 109 (void) aligned_like; /* UNUSED */
Chris@10 110 vst1_lane_f32((float32_t *)(x) , vget_low_f32(v), 0);
Chris@10 111 vst1_lane_f32((float32_t *)(x + ovs), vget_low_f32(v), 1);
Chris@10 112 vst1_lane_f32((float32_t *)(x + 2 * ovs), vget_high_f32(v), 0);
Chris@10 113 vst1_lane_f32((float32_t *)(x + 3 * ovs), vget_high_f32(v), 1);
Chris@10 114 }
Chris@10 115 #define STN4(x, v0, v1, v2, v3, ovs) /* use STM4 */
Chris@10 116
Chris@10 117 #define FLIP_RI(x) vrev64q_f32(x)
Chris@10 118
Chris@10 119 static inline V VCONJ(V x)
Chris@10 120 {
Chris@10 121 #if 1
Chris@10 122 static const uint32x4_t pm = {0, 0x80000000u, 0, 0x80000000u};
Chris@10 123 return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(x), pm));
Chris@10 124 #else
Chris@10 125 const V pm = VLIT(1.0, -1.0, 1.0, -1.0);
Chris@10 126 return VMUL(x, pm);
Chris@10 127 #endif
Chris@10 128 }
Chris@10 129
Chris@10 130 static inline V VBYI(V x)
Chris@10 131 {
Chris@10 132 return FLIP_RI(VCONJ(x));
Chris@10 133 }
Chris@10 134
Chris@10 135 static inline V VFMAI(V b, V c)
Chris@10 136 {
Chris@10 137 const V mp = VLIT(-1.0, 1.0, -1.0, 1.0);
Chris@10 138 return VFMA(FLIP_RI(b), mp, c);
Chris@10 139 }
Chris@10 140
Chris@10 141 static inline V VFNMSI(V b, V c)
Chris@10 142 {
Chris@10 143 const V mp = VLIT(-1.0, 1.0, -1.0, 1.0);
Chris@10 144 return VFNMS(FLIP_RI(b), mp, c);
Chris@10 145 }
Chris@10 146
Chris@10 147 static inline V VFMACONJ(V b, V c)
Chris@10 148 {
Chris@10 149 const V pm = VLIT(1.0, -1.0, 1.0, -1.0);
Chris@10 150 return VFMA(b, pm, c);
Chris@10 151 }
Chris@10 152
Chris@10 153 static inline V VFNMSCONJ(V b, V c)
Chris@10 154 {
Chris@10 155 const V pm = VLIT(1.0, -1.0, 1.0, -1.0);
Chris@10 156 return VFNMS(b, pm, c);
Chris@10 157 }
Chris@10 158
Chris@10 159 static inline V VFMSCONJ(V b, V c)
Chris@10 160 {
Chris@10 161 return VSUB(VCONJ(b), c);
Chris@10 162 }
Chris@10 163
Chris@10 164 #if 1
Chris@10 165 #define VEXTRACT_REIM(tr, ti, tx) \
Chris@10 166 { \
Chris@10 167 tr = vcombine_f32(vdup_lane_f32(vget_low_f32(tx), 0), \
Chris@10 168 vdup_lane_f32(vget_high_f32(tx), 0)); \
Chris@10 169 ti = vcombine_f32(vdup_lane_f32(vget_low_f32(tx), 1), \
Chris@10 170 vdup_lane_f32(vget_high_f32(tx), 1)); \
Chris@10 171 }
Chris@10 172 #else
Chris@10 173 /* this alternative might be faster in an ideal world, but gcc likes
Chris@10 174 to spill VVV onto the stack */
Chris@10 175 #define VEXTRACT_REIM(tr, ti, tx) \
Chris@10 176 { \
Chris@10 177 float32x4x2_t vvv = vtrnq_f32(tx, tx); \
Chris@10 178 tr = vvv.val[0]; \
Chris@10 179 ti = vvv.val[1]; \
Chris@10 180 }
Chris@10 181 #endif
Chris@10 182
Chris@10 183 static inline V VZMUL(V tx, V sr)
Chris@10 184 {
Chris@10 185 V tr, ti;
Chris@10 186 VEXTRACT_REIM(tr, ti, tx);
Chris@10 187 tr = VMUL(sr, tr);
Chris@10 188 sr = VBYI(sr);
Chris@10 189 return VFMA(ti, sr, tr);
Chris@10 190 }
Chris@10 191
Chris@10 192 static inline V VZMULJ(V tx, V sr)
Chris@10 193 {
Chris@10 194 V tr, ti;
Chris@10 195 VEXTRACT_REIM(tr, ti, tx);
Chris@10 196 tr = VMUL(sr, tr);
Chris@10 197 sr = VBYI(sr);
Chris@10 198 return VFNMS(ti, sr, tr);
Chris@10 199 }
Chris@10 200
Chris@10 201 static inline V VZMULI(V tx, V sr)
Chris@10 202 {
Chris@10 203 V tr, ti;
Chris@10 204 VEXTRACT_REIM(tr, ti, tx);
Chris@10 205 ti = VMUL(ti, sr);
Chris@10 206 sr = VBYI(sr);
Chris@10 207 return VFMS(tr, sr, ti);
Chris@10 208 }
Chris@10 209
Chris@10 210 static inline V VZMULIJ(V tx, V sr)
Chris@10 211 {
Chris@10 212 V tr, ti;
Chris@10 213 VEXTRACT_REIM(tr, ti, tx);
Chris@10 214 ti = VMUL(ti, sr);
Chris@10 215 sr = VBYI(sr);
Chris@10 216 return VFMA(tr, sr, ti);
Chris@10 217 }
Chris@10 218
Chris@10 219 /* twiddle storage #1: compact, slower */
Chris@10 220 #define VTW1(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x}
Chris@10 221 #define TWVL1 VL
Chris@10 222 static inline V BYTW1(const R *t, V sr)
Chris@10 223 {
Chris@10 224 V tx = LDA(t, 2, 0);
Chris@10 225 return VZMUL(tx, sr);
Chris@10 226 }
Chris@10 227
Chris@10 228 static inline V BYTWJ1(const R *t, V sr)
Chris@10 229 {
Chris@10 230 V tx = LDA(t, 2, 0);
Chris@10 231 return VZMULJ(tx, sr);
Chris@10 232 }
Chris@10 233
Chris@10 234 /* twiddle storage #2: twice the space, faster (when in cache) */
Chris@10 235 # define VTW2(v,x) \
Chris@10 236 {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \
Chris@10 237 {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x}
Chris@10 238 #define TWVL2 (2 * VL)
Chris@10 239
Chris@10 240 static inline V BYTW2(const R *t, V sr)
Chris@10 241 {
Chris@10 242 V si = FLIP_RI(sr);
Chris@10 243 V tr = LDA(t, 2, 0), ti = LDA(t+2*VL, 2, 0);
Chris@10 244 return VFMA(ti, si, VMUL(tr, sr));
Chris@10 245 }
Chris@10 246
Chris@10 247 static inline V BYTWJ2(const R *t, V sr)
Chris@10 248 {
Chris@10 249 V si = FLIP_RI(sr);
Chris@10 250 V tr = LDA(t, 2, 0), ti = LDA(t+2*VL, 2, 0);
Chris@10 251 return VFNMS(ti, si, VMUL(tr, sr));
Chris@10 252 }
Chris@10 253
Chris@10 254 /* twiddle storage #3 */
Chris@10 255 # define VTW3(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x}
Chris@10 256 # define TWVL3 (VL)
Chris@10 257
Chris@10 258 /* twiddle storage for split arrays */
Chris@10 259 # define VTWS(v,x) \
Chris@10 260 {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \
Chris@10 261 {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x}
Chris@10 262 #define TWVLS (2 * VL)
Chris@10 263
Chris@10 264 #define VLEAVE() /* nothing */
Chris@10 265
Chris@10 266 #include "simd-common.h"