annotate src/fftw-3.3.8/simd-support/simd-generic256.h @ 84:08ae793730bd

Add null config files
author Chris Cannam
date Mon, 02 Mar 2020 14:03:47 +0000
parents d0c2a83c1364
children
rev   line source
Chris@82 1 /*
Chris@82 2 * Copyright (c) 2003, 2007-11 Matteo Frigo
Chris@82 3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
Chris@82 4 *
Chris@82 5 * Generic256d added by Romain Dolbeau, and turned into simd-generic256.h
Chris@82 6 * with single & double precision by Erik Lindahl.
Chris@82 7 * Romain Dolbeau hereby places his modifications in the public domain.
Chris@82 8 * Erik Lindahl hereby places his modifications in the public domain.
Chris@82 9 *
Chris@82 10 * This program is free software; you can redistribute it and/or modify
Chris@82 11 * it under the terms of the GNU General Public License as published by
Chris@82 12 * the Free Software Foundation; either version 2 of the License, or
Chris@82 13 * (at your option) any later version.
Chris@82 14 *
Chris@82 15 * This program is distributed in the hope that it will be useful,
Chris@82 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@82 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@82 18 * GNU General Public License for more details.
Chris@82 19 *
Chris@82 20 * You should have received a copy of the GNU General Public License
Chris@82 21 * along with this program; if not, write to the Free Software
Chris@82 22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@82 23 *
Chris@82 24 */
Chris@82 25
Chris@82 26 #if defined(FFTW_LDOUBLE) || defined(FFTW_QUAD)
Chris@82 27 # error "Generic simd256 only works in single or double precision"
Chris@82 28 #endif
Chris@82 29
Chris@82 30 #define SIMD_SUFFIX _generic_simd256 /* for renaming */
Chris@82 31
Chris@82 32 #ifdef FFTW_SINGLE
Chris@82 33 # define DS(d,s) s /* single-precision option */
Chris@82 34 # define VDUPL(x) {x[0],x[0],x[2],x[2],x[4],x[4],x[6],x[6]}
Chris@82 35 # define VDUPH(x) {x[1],x[1],x[3],x[3],x[5],x[5],x[7],x[7]}
Chris@82 36 # define DVK(var, val) V var = {val,val,val,val,val,val,val,val}
Chris@82 37 #else
Chris@82 38 # define DS(d,s) d /* double-precision option */
Chris@82 39 # define VDUPL(x) {x[0],x[0],x[2],x[2]}
Chris@82 40 # define VDUPH(x) {x[1],x[1],x[3],x[3]}
Chris@82 41 # define DVK(var, val) V var = {val, val, val, val}
Chris@82 42 #endif
Chris@82 43
Chris@82 44 #define VL DS(2,4) /* SIMD vector length, in term of complex numbers */
Chris@82 45 #define SIMD_VSTRIDE_OKA(x) DS(1,((x) == 2))
Chris@82 46 #define SIMD_STRIDE_OKPAIR SIMD_STRIDE_OK
Chris@82 47
Chris@82 48 typedef DS(double,float) V __attribute__ ((vector_size(32)));
Chris@82 49
Chris@82 50 #define VADD(a,b) ((a)+(b))
Chris@82 51 #define VSUB(a,b) ((a)-(b))
Chris@82 52 #define VMUL(a,b) ((a)*(b))
Chris@82 53
Chris@82 54 #define LDK(x) x
Chris@82 55
Chris@82 56 static inline V LDA(const R *x, INT ivs, const R *aligned_like)
Chris@82 57 {
Chris@82 58 V var;
Chris@82 59 (void)aligned_like; /* UNUSED */
Chris@82 60 return *(const V *)x;
Chris@82 61 }
Chris@82 62
Chris@82 63 static inline void STA(R *x, V v, INT ovs, const R *aligned_like)
Chris@82 64 {
Chris@82 65 (void)aligned_like; /* UNUSED */
Chris@82 66 (void)ovs; /* UNUSED */
Chris@82 67 *(V *)x = v;
Chris@82 68 }
Chris@82 69
Chris@82 70 static inline V LD(const R *x, INT ivs, const R *aligned_like)
Chris@82 71 {
Chris@82 72 V var;
Chris@82 73 (void)aligned_like; /* UNUSED */
Chris@82 74 var[0] = x[0];
Chris@82 75 var[1] = x[1];
Chris@82 76 var[2] = x[ivs];
Chris@82 77 var[3] = x[ivs+1];
Chris@82 78 #ifdef FFTW_SINGLE
Chris@82 79 var[4] = x[2*ivs];
Chris@82 80 var[5] = x[2*ivs+1];
Chris@82 81 var[6] = x[3*ivs];
Chris@82 82 var[7] = x[3*ivs+1];
Chris@82 83 #endif
Chris@82 84 return var;
Chris@82 85 }
Chris@82 86
Chris@82 87
Chris@82 88 /* ST has to be separate due to the storage hack requiring reverse order */
Chris@82 89
Chris@82 90 static inline void ST(R *x, V v, INT ovs, const R *aligned_like)
Chris@82 91 {
Chris@82 92 (void)aligned_like; /* UNUSED */
Chris@82 93 #ifdef FFTW_SINGLE
Chris@82 94 *(x + 3*ovs ) = v[6];
Chris@82 95 *(x + 3*ovs + 1) = v[7];
Chris@82 96 *(x + 2*ovs ) = v[4];
Chris@82 97 *(x + 2*ovs + 1) = v[5];
Chris@82 98 *(x + ovs ) = v[2];
Chris@82 99 *(x + ovs + 1) = v[3];
Chris@82 100 *(x ) = v[0];
Chris@82 101 *(x + 1) = v[1];
Chris@82 102 #else
Chris@82 103 *(x + ovs ) = v[2];
Chris@82 104 *(x + ovs + 1) = v[3];
Chris@82 105 *(x ) = v[0];
Chris@82 106 *(x + 1) = v[1];
Chris@82 107 #endif
Chris@82 108 }
Chris@82 109
Chris@82 110 #ifdef FFTW_SINGLE
Chris@82 111 #define STM2(x, v, ovs, a) /* no-op */
Chris@82 112 static inline void STN2(R *x, V v0, V v1, INT ovs)
Chris@82 113 {
Chris@82 114 x[ 0] = v0[0];
Chris@82 115 x[ 1] = v0[1];
Chris@82 116 x[ 2] = v1[0];
Chris@82 117 x[ 3] = v1[1];
Chris@82 118 x[ ovs ] = v0[2];
Chris@82 119 x[ ovs + 1] = v0[3];
Chris@82 120 x[ ovs + 2] = v1[2];
Chris@82 121 x[ ovs + 3] = v1[3];
Chris@82 122 x[2*ovs ] = v0[4];
Chris@82 123 x[2*ovs + 1] = v0[5];
Chris@82 124 x[2*ovs + 2] = v1[4];
Chris@82 125 x[2*ovs + 3] = v1[5];
Chris@82 126 x[3*ovs ] = v0[6];
Chris@82 127 x[3*ovs + 1] = v0[7];
Chris@82 128 x[3*ovs + 2] = v1[6];
Chris@82 129 x[3*ovs + 3] = v1[7];
Chris@82 130 }
Chris@82 131
Chris@82 132 # define STM4(x, v, ovs, aligned_like) /* no-op */
Chris@82 133 static inline void STN4(R *x, V v0, V v1, V v2, V v3, INT ovs)
Chris@82 134 {
Chris@82 135 *(x ) = v0[0];
Chris@82 136 *(x + 1) = v1[0];
Chris@82 137 *(x + 2) = v2[0];
Chris@82 138 *(x + 3) = v3[0];
Chris@82 139 *(x + ovs ) = v0[1];
Chris@82 140 *(x + ovs + 1) = v1[1];
Chris@82 141 *(x + ovs + 2) = v2[1];
Chris@82 142 *(x + ovs + 3) = v3[1];
Chris@82 143 *(x + 2 * ovs ) = v0[2];
Chris@82 144 *(x + 2 * ovs + 1) = v1[2];
Chris@82 145 *(x + 2 * ovs + 2) = v2[2];
Chris@82 146 *(x + 2 * ovs + 3) = v3[2];
Chris@82 147 *(x + 3 * ovs ) = v0[3];
Chris@82 148 *(x + 3 * ovs + 1) = v1[3];
Chris@82 149 *(x + 3 * ovs + 2) = v2[3];
Chris@82 150 *(x + 3 * ovs + 3) = v3[3];
Chris@82 151 *(x + 4 * ovs ) = v0[4];
Chris@82 152 *(x + 4 * ovs + 1) = v1[4];
Chris@82 153 *(x + 4 * ovs + 2) = v2[4];
Chris@82 154 *(x + 4 * ovs + 3) = v3[4];
Chris@82 155 *(x + 5 * ovs ) = v0[5];
Chris@82 156 *(x + 5 * ovs + 1) = v1[5];
Chris@82 157 *(x + 5 * ovs + 2) = v2[5];
Chris@82 158 *(x + 5 * ovs + 3) = v3[5];
Chris@82 159 *(x + 6 * ovs ) = v0[6];
Chris@82 160 *(x + 6 * ovs + 1) = v1[6];
Chris@82 161 *(x + 6 * ovs + 2) = v2[6];
Chris@82 162 *(x + 6 * ovs + 3) = v3[6];
Chris@82 163 *(x + 7 * ovs ) = v0[7];
Chris@82 164 *(x + 7 * ovs + 1) = v1[7];
Chris@82 165 *(x + 7 * ovs + 2) = v2[7];
Chris@82 166 *(x + 7 * ovs + 3) = v3[7];
Chris@82 167 }
Chris@82 168
Chris@82 169 #else
Chris@82 170 /* FFTW_DOUBLE */
Chris@82 171
Chris@82 172 #define STM2 ST
Chris@82 173 #define STN2(x, v0, v1, ovs) /* nop */
Chris@82 174 #define STM4(x, v, ovs, aligned_like) /* no-op */
Chris@82 175
Chris@82 176 static inline void STN4(R *x, V v0, V v1, V v2, V v3, INT ovs) {
Chris@82 177 *(x ) = v0[0];
Chris@82 178 *(x + 1) = v1[0];
Chris@82 179 *(x + 2) = v2[0];
Chris@82 180 *(x + 3) = v3[0];
Chris@82 181 *(x + ovs ) = v0[1];
Chris@82 182 *(x + ovs + 1) = v1[1];
Chris@82 183 *(x + ovs + 2) = v2[1];
Chris@82 184 *(x + ovs + 3) = v3[1];
Chris@82 185 *(x + 2 * ovs ) = v0[2];
Chris@82 186 *(x + 2 * ovs + 1) = v1[2];
Chris@82 187 *(x + 2 * ovs + 2) = v2[2];
Chris@82 188 *(x + 2 * ovs + 3) = v3[2];
Chris@82 189 *(x + 3 * ovs ) = v0[3];
Chris@82 190 *(x + 3 * ovs + 1) = v1[3];
Chris@82 191 *(x + 3 * ovs + 2) = v2[3];
Chris@82 192 *(x + 3 * ovs + 3) = v3[3];
Chris@82 193 }
Chris@82 194 #endif
Chris@82 195
Chris@82 196 static inline V FLIP_RI(V x)
Chris@82 197 {
Chris@82 198 #ifdef FFTW_SINGLE
Chris@82 199 return (V){x[1],x[0],x[3],x[2],x[5],x[4],x[7],x[6]};
Chris@82 200 #else
Chris@82 201 return (V){x[1],x[0],x[3],x[2]};
Chris@82 202 #endif
Chris@82 203 }
Chris@82 204
Chris@82 205 static inline V VCONJ(V x)
Chris@82 206 {
Chris@82 207 #ifdef FFTW_SINGLE
Chris@82 208 return (x * (V){1.0,-1.0,1.0,-1.0,1.0,-1.0,1.0,-1.0});
Chris@82 209 #else
Chris@82 210 return (x * (V){1.0,-1.0,1.0,-1.0});
Chris@82 211 #endif
Chris@82 212 }
Chris@82 213
Chris@82 214 static inline V VBYI(V x)
Chris@82 215 {
Chris@82 216 return FLIP_RI(VCONJ(x));
Chris@82 217 }
Chris@82 218
Chris@82 219 /* FMA support */
Chris@82 220 #define VFMA(a, b, c) VADD(c, VMUL(a, b))
Chris@82 221 #define VFNMS(a, b, c) VSUB(c, VMUL(a, b))
Chris@82 222 #define VFMS(a, b, c) VSUB(VMUL(a, b), c)
Chris@82 223 #define VFMAI(b, c) VADD(c, VBYI(b))
Chris@82 224 #define VFNMSI(b, c) VSUB(c, VBYI(b))
Chris@82 225 #define VFMACONJ(b,c) VADD(VCONJ(b),c)
Chris@82 226 #define VFMSCONJ(b,c) VSUB(VCONJ(b),c)
Chris@82 227 #define VFNMSCONJ(b,c) VSUB(c, VCONJ(b))
Chris@82 228
Chris@82 229 static inline V VZMUL(V tx, V sr)
Chris@82 230 {
Chris@82 231 V tr = VDUPL(tx);
Chris@82 232 V ti = VDUPH(tx);
Chris@82 233 tr = VMUL(sr, tr);
Chris@82 234 sr = VBYI(sr);
Chris@82 235 return VFMA(ti, sr, tr);
Chris@82 236 }
Chris@82 237
Chris@82 238 static inline V VZMULJ(V tx, V sr)
Chris@82 239 {
Chris@82 240 V tr = VDUPL(tx);
Chris@82 241 V ti = VDUPH(tx);
Chris@82 242 tr = VMUL(sr, tr);
Chris@82 243 sr = VBYI(sr);
Chris@82 244 return VFNMS(ti, sr, tr);
Chris@82 245 }
Chris@82 246
Chris@82 247 static inline V VZMULI(V tx, V sr)
Chris@82 248 {
Chris@82 249 V tr = VDUPL(tx);
Chris@82 250 V ti = VDUPH(tx);
Chris@82 251 ti = VMUL(ti, sr);
Chris@82 252 sr = VBYI(sr);
Chris@82 253 return VFMS(tr, sr, ti);
Chris@82 254 }
Chris@82 255
Chris@82 256 static inline V VZMULIJ(V tx, V sr)
Chris@82 257 {
Chris@82 258 V tr = VDUPL(tx);
Chris@82 259 V ti = VDUPH(tx);
Chris@82 260 ti = VMUL(ti, sr);
Chris@82 261 sr = VBYI(sr);
Chris@82 262 return VFMA(tr, sr, ti);
Chris@82 263 }
Chris@82 264
Chris@82 265 /* twiddle storage #1: compact, slower */
Chris@82 266 #ifdef FFTW_SINGLE
Chris@82 267 # define VTW1(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x}, {TW_CEXP, v+2, x}, {TW_CEXP, v+3, x}
Chris@82 268 #else
Chris@82 269 # define VTW1(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x}
Chris@82 270 #endif
Chris@82 271 #define TWVL1 (VL)
Chris@82 272
Chris@82 273 static inline V BYTW1(const R *t, V sr)
Chris@82 274 {
Chris@82 275 return VZMUL(LDA(t, 2, t), sr);
Chris@82 276 }
Chris@82 277
Chris@82 278 static inline V BYTWJ1(const R *t, V sr)
Chris@82 279 {
Chris@82 280 return VZMULJ(LDA(t, 2, t), sr);
Chris@82 281 }
Chris@82 282
Chris@82 283 /* twiddle storage #2: twice the space, faster (when in cache) */
Chris@82 284 #ifdef FFTW_SINGLE
Chris@82 285 # define VTW2(v,x) \
Chris@82 286 {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \
Chris@82 287 {TW_COS, v+2, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, {TW_COS, v+3, x}, \
Chris@82 288 {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x}, \
Chris@82 289 {TW_SIN, v+2, -x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, -x}, {TW_SIN, v+3, x}
Chris@82 290 #else
Chris@82 291 # define VTW2(v,x) \
Chris@82 292 {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \
Chris@82 293 {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x}
Chris@82 294 #endif
Chris@82 295 #define TWVL2 (2 * VL)
Chris@82 296
Chris@82 297 static inline V BYTW2(const R *t, V sr)
Chris@82 298 {
Chris@82 299 const V *twp = (const V *)t;
Chris@82 300 V si = FLIP_RI(sr);
Chris@82 301 V tr = twp[0], ti = twp[1];
Chris@82 302 return VFMA(tr, sr, VMUL(ti, si));
Chris@82 303 }
Chris@82 304
Chris@82 305 static inline V BYTWJ2(const R *t, V sr)
Chris@82 306 {
Chris@82 307 const V *twp = (const V *)t;
Chris@82 308 V si = FLIP_RI(sr);
Chris@82 309 V tr = twp[0], ti = twp[1];
Chris@82 310 return VFNMS(ti, si, VMUL(tr, sr));
Chris@82 311 }
Chris@82 312
Chris@82 313 /* twiddle storage #3 */
Chris@82 314 #define VTW3 VTW1
Chris@82 315 #define TWVL3 TWVL1
Chris@82 316
Chris@82 317 /* twiddle storage for split arrays */
Chris@82 318 #ifdef FFTW_SINGLE
Chris@82 319 # define VTWS(v,x) \
Chris@82 320 {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \
Chris@82 321 {TW_COS, v+4, x}, {TW_COS, v+5, x}, {TW_COS, v+6, x}, {TW_COS, v+7, x}, \
Chris@82 322 {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x}, \
Chris@82 323 {TW_SIN, v+4, x}, {TW_SIN, v+5, x}, {TW_SIN, v+6, x}, {TW_SIN, v+7, x}
Chris@82 324 #else
Chris@82 325 # define VTWS(v,x) \
Chris@82 326 {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \
Chris@82 327 {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x}
Chris@82 328 #endif
Chris@82 329 #define TWVLS (2 * VL)
Chris@82 330
Chris@82 331 #define VLEAVE() /* nothing */
Chris@82 332
Chris@82 333 #include "simd-common.h"