annotate src/fftw-3.3.5/simd-support/simd-generic256.h @ 84:08ae793730bd

Add null config files
author Chris Cannam
date Mon, 02 Mar 2020 14:03:47 +0000
parents 2cd0e3b3e1fd
children
rev   line source
Chris@42 1 /*
Chris@42 2 * Copyright (c) 2003, 2007-11 Matteo Frigo
Chris@42 3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
Chris@42 4 *
Chris@42 5 * Generic256d added by Romain Dolbeau, and turned into simd-generic256.h
Chris@42 6 * with single & double precision by Erik Lindahl.
Chris@42 7 * Romain Dolbeau hereby places his modifications in the public domain.
Chris@42 8 * Erik Lindahl hereby places his modifications in the public domain.
Chris@42 9 *
Chris@42 10 * This program is free software; you can redistribute it and/or modify
Chris@42 11 * it under the terms of the GNU General Public License as published by
Chris@42 12 * the Free Software Foundation; either version 2 of the License, or
Chris@42 13 * (at your option) any later version.
Chris@42 14 *
Chris@42 15 * This program is distributed in the hope that it will be useful,
Chris@42 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@42 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@42 18 * GNU General Public License for more details.
Chris@42 19 *
Chris@42 20 * You should have received a copy of the GNU General Public License
Chris@42 21 * along with this program; if not, write to the Free Software
Chris@42 22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@42 23 *
Chris@42 24 */
Chris@42 25
Chris@42 26 #if defined(FFTW_LDOUBLE) || defined(FFTW_QUAD)
Chris@42 27 # error "Generic simd256 only works in single or double precision"
Chris@42 28 #endif
Chris@42 29
Chris@42 30 #define SIMD_SUFFIX _generic_simd256 /* for renaming */
Chris@42 31
Chris@42 32 #ifdef FFTW_SINGLE
Chris@42 33 # define DS(d,s) s /* single-precision option */
Chris@42 34 # define VDUPL(x) {x[0],x[0],x[2],x[2],x[4],x[4],x[6],x[6]}
Chris@42 35 # define VDUPH(x) {x[1],x[1],x[3],x[3],x[5],x[5],x[7],x[7]}
Chris@42 36 # define DVK(var, val) V var = {val,val,val,val,val,val,val,val}
Chris@42 37 #else
Chris@42 38 # define DS(d,s) d /* double-precision option */
Chris@42 39 # define VDUPL(x) {x[0],x[0],x[2],x[2]}
Chris@42 40 # define VDUPH(x) {x[1],x[1],x[3],x[3]}
Chris@42 41 # define DVK(var, val) V var = {val, val, val, val}
Chris@42 42 #endif
Chris@42 43
Chris@42 44 #define VL DS(2,4) /* SIMD vector length, in term of complex numbers */
Chris@42 45 #define SIMD_VSTRIDE_OKA(x) DS(1,((x) == 2))
Chris@42 46 #define SIMD_STRIDE_OKPAIR SIMD_STRIDE_OK
Chris@42 47
Chris@42 48 typedef DS(double,float) V __attribute__ ((vector_size(32)));
Chris@42 49
Chris@42 50 #define VADD(a,b) ((a)+(b))
Chris@42 51 #define VSUB(a,b) ((a)-(b))
Chris@42 52 #define VMUL(a,b) ((a)*(b))
Chris@42 53
Chris@42 54 #define LDK(x) x
Chris@42 55
Chris@42 56 static inline V LDA(const R *x, INT ivs, const R *aligned_like)
Chris@42 57 {
Chris@42 58 V var;
Chris@42 59 (void)aligned_like; /* UNUSED */
Chris@42 60 return *(const V *)x;
Chris@42 61 }
Chris@42 62
Chris@42 63 static inline void STA(R *x, V v, INT ovs, const R *aligned_like)
Chris@42 64 {
Chris@42 65 (void)aligned_like; /* UNUSED */
Chris@42 66 (void)ovs; /* UNUSED */
Chris@42 67 *(V *)x = v;
Chris@42 68 }
Chris@42 69
Chris@42 70 static inline V LD(const R *x, INT ivs, const R *aligned_like)
Chris@42 71 {
Chris@42 72 V var;
Chris@42 73 (void)aligned_like; /* UNUSED */
Chris@42 74 var[0] = x[0];
Chris@42 75 var[1] = x[1];
Chris@42 76 var[2] = x[ivs];
Chris@42 77 var[3] = x[ivs+1];
Chris@42 78 #ifdef FFTW_SINGLE
Chris@42 79 var[4] = x[2*ivs];
Chris@42 80 var[5] = x[2*ivs+1];
Chris@42 81 var[6] = x[3*ivs];
Chris@42 82 var[7] = x[3*ivs+1];
Chris@42 83 #endif
Chris@42 84 return var;
Chris@42 85 }
Chris@42 86
Chris@42 87
Chris@42 88 /* ST has to be separate due to the storage hack requiring reverse order */
Chris@42 89
Chris@42 90 static inline void ST(R *x, V v, INT ovs, const R *aligned_like)
Chris@42 91 {
Chris@42 92 (void)aligned_like; /* UNUSED */
Chris@42 93 #ifdef FFTW_SINGLE
Chris@42 94 *(x + 3*ovs ) = v[6];
Chris@42 95 *(x + 3*ovs + 1) = v[7];
Chris@42 96 *(x + 2*ovs ) = v[4];
Chris@42 97 *(x + 2*ovs + 1) = v[5];
Chris@42 98 *(x + ovs ) = v[2];
Chris@42 99 *(x + ovs + 1) = v[3];
Chris@42 100 *(x ) = v[0];
Chris@42 101 *(x + 1) = v[1];
Chris@42 102 #else
Chris@42 103 *(x + ovs ) = v[2];
Chris@42 104 *(x + ovs + 1) = v[3];
Chris@42 105 *(x ) = v[0];
Chris@42 106 *(x + 1) = v[1];
Chris@42 107 #endif
Chris@42 108 }
Chris@42 109
Chris@42 110 #ifdef FFTW_SINGLE
Chris@42 111 #define STM2(x, v, ovs, a) /* no-op */
Chris@42 112 static inline void STN2(R *x, V v0, V v1, INT ovs)
Chris@42 113 {
Chris@42 114 x[ 0] = v0[0];
Chris@42 115 x[ 1] = v0[1];
Chris@42 116 x[ 2] = v1[0];
Chris@42 117 x[ 3] = v1[1];
Chris@42 118 x[ ovs ] = v0[2];
Chris@42 119 x[ ovs + 1] = v0[3];
Chris@42 120 x[ ovs + 2] = v1[2];
Chris@42 121 x[ ovs + 3] = v1[3];
Chris@42 122 x[2*ovs ] = v0[4];
Chris@42 123 x[2*ovs + 1] = v0[5];
Chris@42 124 x[2*ovs + 2] = v1[4];
Chris@42 125 x[2*ovs + 3] = v1[5];
Chris@42 126 x[3*ovs ] = v0[6];
Chris@42 127 x[3*ovs + 1] = v0[7];
Chris@42 128 x[3*ovs + 2] = v1[6];
Chris@42 129 x[3*ovs + 3] = v1[7];
Chris@42 130 }
Chris@42 131
Chris@42 132 # define STM4(x, v, ovs, aligned_like) /* no-op */
Chris@42 133 static inline void STN4(R *x, V v0, V v1, V v2, V v3, INT ovs)
Chris@42 134 {
Chris@42 135 *(x ) = v0[0];
Chris@42 136 *(x + 1) = v1[0];
Chris@42 137 *(x + 2) = v2[0];
Chris@42 138 *(x + 3) = v3[0];
Chris@42 139 *(x + ovs ) = v0[1];
Chris@42 140 *(x + ovs + 1) = v1[1];
Chris@42 141 *(x + ovs + 2) = v2[1];
Chris@42 142 *(x + ovs + 3) = v3[1];
Chris@42 143 *(x + 2 * ovs ) = v0[2];
Chris@42 144 *(x + 2 * ovs + 1) = v1[2];
Chris@42 145 *(x + 2 * ovs + 2) = v2[2];
Chris@42 146 *(x + 2 * ovs + 3) = v3[2];
Chris@42 147 *(x + 3 * ovs ) = v0[3];
Chris@42 148 *(x + 3 * ovs + 1) = v1[3];
Chris@42 149 *(x + 3 * ovs + 2) = v2[3];
Chris@42 150 *(x + 3 * ovs + 3) = v3[3];
Chris@42 151 *(x + 4 * ovs ) = v0[4];
Chris@42 152 *(x + 4 * ovs + 1) = v1[4];
Chris@42 153 *(x + 4 * ovs + 2) = v2[4];
Chris@42 154 *(x + 4 * ovs + 3) = v3[4];
Chris@42 155 *(x + 5 * ovs ) = v0[5];
Chris@42 156 *(x + 5 * ovs + 1) = v1[5];
Chris@42 157 *(x + 5 * ovs + 2) = v2[5];
Chris@42 158 *(x + 5 * ovs + 3) = v3[5];
Chris@42 159 *(x + 6 * ovs ) = v0[6];
Chris@42 160 *(x + 6 * ovs + 1) = v1[6];
Chris@42 161 *(x + 6 * ovs + 2) = v2[6];
Chris@42 162 *(x + 6 * ovs + 3) = v3[6];
Chris@42 163 *(x + 7 * ovs ) = v0[7];
Chris@42 164 *(x + 7 * ovs + 1) = v1[7];
Chris@42 165 *(x + 7 * ovs + 2) = v2[7];
Chris@42 166 *(x + 7 * ovs + 3) = v3[7];
Chris@42 167 }
Chris@42 168
Chris@42 169 #else
Chris@42 170 /* FFTW_DOUBLE */
Chris@42 171
Chris@42 172 #define STM2 ST
Chris@42 173 #define STN2(x, v0, v1, ovs) /* nop */
Chris@42 174 #define STM4(x, v, ovs, aligned_like) /* no-op */
Chris@42 175
Chris@42 176 static inline void STN4(R *x, V v0, V v1, V v2, V v3, INT ovs) {
Chris@42 177 *(x ) = v0[0];
Chris@42 178 *(x + 1) = v1[0];
Chris@42 179 *(x + 2) = v2[0];
Chris@42 180 *(x + 3) = v3[0];
Chris@42 181 *(x + ovs ) = v0[1];
Chris@42 182 *(x + ovs + 1) = v1[1];
Chris@42 183 *(x + ovs + 2) = v2[1];
Chris@42 184 *(x + ovs + 3) = v3[1];
Chris@42 185 *(x + 2 * ovs ) = v0[2];
Chris@42 186 *(x + 2 * ovs + 1) = v1[2];
Chris@42 187 *(x + 2 * ovs + 2) = v2[2];
Chris@42 188 *(x + 2 * ovs + 3) = v3[2];
Chris@42 189 *(x + 3 * ovs ) = v0[3];
Chris@42 190 *(x + 3 * ovs + 1) = v1[3];
Chris@42 191 *(x + 3 * ovs + 2) = v2[3];
Chris@42 192 *(x + 3 * ovs + 3) = v3[3];
Chris@42 193 }
Chris@42 194 #endif
Chris@42 195
Chris@42 196 static inline V FLIP_RI(V x)
Chris@42 197 {
Chris@42 198 #ifdef FFTW_SINGLE
Chris@42 199 return (V){x[1],x[0],x[3],x[2],x[5],x[4],x[7],x[6]};
Chris@42 200 #else
Chris@42 201 return (V){x[1],x[0],x[3],x[2]};
Chris@42 202 #endif
Chris@42 203 }
Chris@42 204
Chris@42 205 static inline V VCONJ(V x)
Chris@42 206 {
Chris@42 207 #ifdef FFTW_SINGLE
Chris@42 208 return (x * (V){1.0,-1.0,1.0,-1.0,1.0,-1.0,1.0,-1.0});
Chris@42 209 #else
Chris@42 210 return (x * (V){1.0,-1.0,1.0,-1.0});
Chris@42 211 #endif
Chris@42 212 }
Chris@42 213
Chris@42 214 static inline V VBYI(V x)
Chris@42 215 {
Chris@42 216 return FLIP_RI(VCONJ(x));
Chris@42 217 }
Chris@42 218
Chris@42 219 /* FMA support */
Chris@42 220 #define VFMA(a, b, c) VADD(c, VMUL(a, b))
Chris@42 221 #define VFNMS(a, b, c) VSUB(c, VMUL(a, b))
Chris@42 222 #define VFMS(a, b, c) VSUB(VMUL(a, b), c)
Chris@42 223 #define VFMAI(b, c) VADD(c, VBYI(b))
Chris@42 224 #define VFNMSI(b, c) VSUB(c, VBYI(b))
Chris@42 225 #define VFMACONJ(b,c) VADD(VCONJ(b),c)
Chris@42 226 #define VFMSCONJ(b,c) VSUB(VCONJ(b),c)
Chris@42 227 #define VFNMSCONJ(b,c) VSUB(c, VCONJ(b))
Chris@42 228
Chris@42 229 static inline V VZMUL(V tx, V sr)
Chris@42 230 {
Chris@42 231 V tr = VDUPL(tx);
Chris@42 232 V ti = VDUPH(tx);
Chris@42 233 tr = VMUL(sr, tr);
Chris@42 234 sr = VBYI(sr);
Chris@42 235 return VFMA(ti, sr, tr);
Chris@42 236 }
Chris@42 237
Chris@42 238 static inline V VZMULJ(V tx, V sr)
Chris@42 239 {
Chris@42 240 V tr = VDUPL(tx);
Chris@42 241 V ti = VDUPH(tx);
Chris@42 242 tr = VMUL(sr, tr);
Chris@42 243 sr = VBYI(sr);
Chris@42 244 return VFNMS(ti, sr, tr);
Chris@42 245 }
Chris@42 246
Chris@42 247 static inline V VZMULI(V tx, V sr)
Chris@42 248 {
Chris@42 249 V tr = VDUPL(tx);
Chris@42 250 V ti = VDUPH(tx);
Chris@42 251 ti = VMUL(ti, sr);
Chris@42 252 sr = VBYI(sr);
Chris@42 253 return VFMS(tr, sr, ti);
Chris@42 254 }
Chris@42 255
Chris@42 256 static inline V VZMULIJ(V tx, V sr)
Chris@42 257 {
Chris@42 258 V tr = VDUPL(tx);
Chris@42 259 V ti = VDUPH(tx);
Chris@42 260 ti = VMUL(ti, sr);
Chris@42 261 sr = VBYI(sr);
Chris@42 262 return VFMA(tr, sr, ti);
Chris@42 263 }
Chris@42 264
Chris@42 265 /* twiddle storage #1: compact, slower */
Chris@42 266 #ifdef FFTW_SINGLE
Chris@42 267 # define VTW1(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x}, {TW_CEXP, v+2, x}, {TW_CEXP, v+3, x}
Chris@42 268 #else
Chris@42 269 # define VTW1(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x}
Chris@42 270 #endif
Chris@42 271 #define TWVL1 (VL)
Chris@42 272
Chris@42 273 static inline V BYTW1(const R *t, V sr)
Chris@42 274 {
Chris@42 275 return VZMUL(LDA(t, 2, t), sr);
Chris@42 276 }
Chris@42 277
Chris@42 278 static inline V BYTWJ1(const R *t, V sr)
Chris@42 279 {
Chris@42 280 return VZMULJ(LDA(t, 2, t), sr);
Chris@42 281 }
Chris@42 282
Chris@42 283 /* twiddle storage #2: twice the space, faster (when in cache) */
Chris@42 284 #ifdef FFTW_SINGLE
Chris@42 285 # define VTW2(v,x) \
Chris@42 286 {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \
Chris@42 287 {TW_COS, v+2, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, {TW_COS, v+3, x}, \
Chris@42 288 {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x}, \
Chris@42 289 {TW_SIN, v+2, -x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, -x}, {TW_SIN, v+3, x}
Chris@42 290 #else
Chris@42 291 # define VTW2(v,x) \
Chris@42 292 {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \
Chris@42 293 {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x}
Chris@42 294 #endif
Chris@42 295 #define TWVL2 (2 * VL)
Chris@42 296
Chris@42 297 static inline V BYTW2(const R *t, V sr)
Chris@42 298 {
Chris@42 299 const V *twp = (const V *)t;
Chris@42 300 V si = FLIP_RI(sr);
Chris@42 301 V tr = twp[0], ti = twp[1];
Chris@42 302 return VFMA(tr, sr, VMUL(ti, si));
Chris@42 303 }
Chris@42 304
Chris@42 305 static inline V BYTWJ2(const R *t, V sr)
Chris@42 306 {
Chris@42 307 const V *twp = (const V *)t;
Chris@42 308 V si = FLIP_RI(sr);
Chris@42 309 V tr = twp[0], ti = twp[1];
Chris@42 310 return VFNMS(ti, si, VMUL(tr, sr));
Chris@42 311 }
Chris@42 312
Chris@42 313 /* twiddle storage #3 */
Chris@42 314 #define VTW3 VTW1
Chris@42 315 #define TWVL3 TWVL1
Chris@42 316
Chris@42 317 /* twiddle storage for split arrays */
Chris@42 318 #ifdef FFTW_SINGLE
Chris@42 319 # define VTWS(v,x) \
Chris@42 320 {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \
Chris@42 321 {TW_COS, v+4, x}, {TW_COS, v+5, x}, {TW_COS, v+6, x}, {TW_COS, v+7, x}, \
Chris@42 322 {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x}, \
Chris@42 323 {TW_SIN, v+4, x}, {TW_SIN, v+5, x}, {TW_SIN, v+6, x}, {TW_SIN, v+7, x}
Chris@42 324 #else
Chris@42 325 # define VTWS(v,x) \
Chris@42 326 {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \
Chris@42 327 {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x}
Chris@42 328 #endif
Chris@42 329 #define TWVLS (2 * VL)
Chris@42 330
Chris@42 331 #define VLEAVE() /* nothing */
Chris@42 332
Chris@42 333 #include "simd-common.h"