Chris@42
|
1 /*
|
Chris@42
|
2 * Copyright (c) 2003, 2007-11 Matteo Frigo
|
Chris@42
|
3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
|
Chris@42
|
4 *
|
Chris@42
|
5 * Knights Corner Vector Instruction support added by Romain Dolbeau.
|
Chris@42
|
6 * Romain Dolbeau hereby places his modifications in the public domain.
|
Chris@42
|
7 *
|
Chris@42
|
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
|
Chris@42
|
9 * of this software and associated documentation files (the "Software"), to deal
|
Chris@42
|
10 * in the Software without restriction, including without limitation the rights
|
Chris@42
|
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
Chris@42
|
12 * copies of the Software, and to permit persons to whom the Software is
|
Chris@42
|
13 * furnished to do so, subject to the following conditions:
|
Chris@42
|
14 *
|
Chris@42
|
15 * The above copyright notice and this permission notice shall be included in
|
Chris@42
|
16 * all copies or substantial portions of the Software.
|
Chris@42
|
17 *
|
Chris@42
|
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
Chris@42
|
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
Chris@42
|
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
Chris@42
|
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
Chris@42
|
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
Chris@42
|
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
Chris@42
|
24 * THE SOFTWARE.
|
Chris@42
|
25 *
|
Chris@42
|
26 */
|
Chris@42
|
27
|
Chris@42
|
28 #if defined(FFTW_LDOUBLE) || defined(FFTW_QUAD)
|
Chris@42
|
29 #error "Knights Corner vector instructions only works in single or double precision"
|
Chris@42
|
30 #endif
|
Chris@42
|
31
|
Chris@42
|
32 #ifdef FFTW_SINGLE
|
Chris@42
|
33 # define DS(d,s) s /* single-precision option */
|
Chris@42
|
34 # define SUFF(name) name ## _ps
|
Chris@42
|
35 # define SCAL(x) x ## f
|
Chris@42
|
36 #else /* !FFTW_SINGLE */
|
Chris@42
|
37 # define DS(d,s) d /* double-precision option */
|
Chris@42
|
38 # define SUFF(name) name ## _pd
|
Chris@42
|
39 # define SCAL(x) x
|
Chris@42
|
40 #endif /* FFTW_SINGLE */
|
Chris@42
|
41
|
Chris@42
|
42 #define SIMD_SUFFIX _kcvi /* for renaming */
|
Chris@42
|
43 #define VL DS(4, 8) /* SIMD complex vector length */
|
Chris@42
|
44 #define SIMD_VSTRIDE_OKA(x) ((x) == 2)
|
Chris@42
|
45 #define SIMD_STRIDE_OKPAIR SIMD_STRIDE_OK
|
Chris@42
|
46
|
Chris@42
|
47 /* configuration ; KNF 0 0 0 1 0 1 */
|
Chris@42
|
48 #define KCVI_VBYI_SINGLE_USE_MUL 0
|
Chris@42
|
49 #define KCVI_VBYI_DOUBLE_USE_MUL 0
|
Chris@42
|
50 #define KCVI_LD_DOUBLE_USE_UNPACK 1
|
Chris@42
|
51 #define KCVI_ST_DOUBLE_USE_PACK 1
|
Chris@42
|
52 #define KCVI_ST2_DOUBLE_USE_STN2 0
|
Chris@42
|
53 #define KCVI_MULZ_USE_SWIZZLE 1
|
Chris@42
|
54
|
Chris@42
|
55 #include <immintrin.h>
|
Chris@42
|
56
|
Chris@42
|
57 typedef DS(__m512d, __m512) V;
|
Chris@42
|
58
|
Chris@42
|
59 #define VADD(a,b) SUFF(_mm512_add)(a,b)
|
Chris@42
|
60 #define VSUB(a,b) SUFF(_mm512_sub)(a,b)
|
Chris@42
|
61 #define VMUL(a,b) SUFF(_mm512_mul)(a,b)
|
Chris@42
|
62
|
Chris@42
|
63 #define VFMA(a, b, c) SUFF(_mm512_fmadd)(a, b, c) //VADD(c, VMUL(a, b))
|
Chris@42
|
64 #define VFMS(a, b, c) SUFF(_mm512_fmsub)(a, b, c) //VSUB(VMUL(a, b), c)
|
Chris@42
|
65 #define VFNMS(a, b, c) SUFF(_mm512_fnmadd)(a, b, c) //VSUB(c, VMUL(a, b))
|
Chris@42
|
66
|
Chris@42
|
67 #define LDK(x) x
|
Chris@42
|
68 #define VLIT(re, im) SUFF(_mm512_setr4)(im, re, im, re)
|
Chris@42
|
69 #define DVK(var, val) V var = SUFF(_mm512_set1)(val)
|
Chris@42
|
70
|
Chris@42
|
71 static inline V LDA(const R *x, INT ivs, const R *aligned_like) {
|
Chris@42
|
72 return SUFF(_mm512_load)(x);
|
Chris@42
|
73 }
|
Chris@42
|
74 static inline void STA(R *x, V v, INT ovs, const R *aligned_like) {
|
Chris@42
|
75 SUFF(_mm512_store)(x, v);
|
Chris@42
|
76 }
|
Chris@42
|
77
|
Chris@42
|
78 #if FFTW_SINGLE
|
Chris@42
|
79 #define VXOR(a,b) _mm512_xor_epi32(a,b)
|
Chris@42
|
80
|
Chris@42
|
81 static inline V LDu(const R *x, INT ivs, const R *aligned_like)
|
Chris@42
|
82 {
|
Chris@42
|
83 (void)aligned_like; /* UNUSED */
|
Chris@42
|
84 __m512i index = _mm512_set_epi32(7 * ivs + 1, 7 * ivs,
|
Chris@42
|
85 6 * ivs + 1, 6 * ivs,
|
Chris@42
|
86 5 * ivs + 1, 5 * ivs,
|
Chris@42
|
87 4 * ivs + 1, 4 * ivs,
|
Chris@42
|
88 3 * ivs + 1, 3 * ivs,
|
Chris@42
|
89 2 * ivs + 1, 2 * ivs,
|
Chris@42
|
90 1 * ivs + 1, 1 * ivs,
|
Chris@42
|
91 0 * ivs + 1, 0 * ivs);
|
Chris@42
|
92
|
Chris@42
|
93 return _mm512_i32gather_ps(index, x, _MM_SCALE_4);
|
Chris@42
|
94 }
|
Chris@42
|
95
|
Chris@42
|
96 static inline void STu(R *x, V v, INT ovs, const R *aligned_like)
|
Chris@42
|
97 {
|
Chris@42
|
98 (void)aligned_like; /* UNUSED */
|
Chris@42
|
99 __m512i index = _mm512_set_epi32(7 * ovs + 1, 7 * ovs,
|
Chris@42
|
100 6 * ovs + 1, 6 * ovs,
|
Chris@42
|
101 5 * ovs + 1, 5 * ovs,
|
Chris@42
|
102 4 * ovs + 1, 4 * ovs,
|
Chris@42
|
103 3 * ovs + 1, 3 * ovs,
|
Chris@42
|
104 2 * ovs + 1, 2 * ovs,
|
Chris@42
|
105 1 * ovs + 1, 1 * ovs,
|
Chris@42
|
106 0 * ovs + 1, 0 * ovs);
|
Chris@42
|
107
|
Chris@42
|
108 _mm512_i32scatter_ps(x, index, v, _MM_SCALE_4);
|
Chris@42
|
109 }
|
Chris@42
|
110
|
Chris@42
|
111 static inline V FLIP_RI(V x)
|
Chris@42
|
112 {
|
Chris@42
|
113 return (V)_mm512_shuffle_epi32((__m512i)x, _MM_PERM_CDAB);
|
Chris@42
|
114 }
|
Chris@42
|
115
|
Chris@42
|
116 #define VDUPH(a) (V)_mm512_shuffle_epi32((__m512i)a, _MM_PERM_DDBB);
|
Chris@42
|
117 #define VDUPL(a) (V)_mm512_shuffle_epi32((__m512i)a, _MM_PERM_CCAA);
|
Chris@42
|
118
|
Chris@42
|
119 #else /* !FFTW_SINGLE */
|
Chris@42
|
120 #define VXOR(a,b) _mm512_xor_epi64(a,b)
|
Chris@42
|
121
|
Chris@42
|
122 #if defined (KCVI_LD_DOUBLE_USE_UNPACK) && KCVI_LD_DOUBLE_USE_UNPACK
|
Chris@42
|
123 static inline V LDu(const R *x, INT ivs, const R *aligned_like)
|
Chris@42
|
124 {
|
Chris@42
|
125 (void)aligned_like; /* UNUSED */
|
Chris@42
|
126 V temp;
|
Chris@42
|
127 /* no need for hq here */
|
Chris@42
|
128 temp = _mm512_mask_loadunpacklo_pd(temp, 0x0003, x + (0 * ivs));
|
Chris@42
|
129 temp = _mm512_mask_loadunpacklo_pd(temp, 0x000c, x + (1 * ivs));
|
Chris@42
|
130 temp = _mm512_mask_loadunpacklo_pd(temp, 0x0030, x + (2 * ivs));
|
Chris@42
|
131 temp = _mm512_mask_loadunpacklo_pd(temp, 0x00c0, x + (3 * ivs));
|
Chris@42
|
132 return temp;
|
Chris@42
|
133 }
|
Chris@42
|
134 #else
|
Chris@42
|
135 static inline V LDu(const R *x, INT ivs, const R *aligned_like)
|
Chris@42
|
136 {
|
Chris@42
|
137 (void)aligned_like; /* UNUSED */
|
Chris@42
|
138 __declspec(align(64)) R temp[8];
|
Chris@42
|
139 int i;
|
Chris@42
|
140 for (i = 0 ; i < 4 ; i++) {
|
Chris@42
|
141 temp[i*2] = x[i * ivs];
|
Chris@42
|
142 temp[i*2+1] = x[i * ivs + 1];
|
Chris@42
|
143 }
|
Chris@42
|
144 return _mm512_load_pd(temp);
|
Chris@42
|
145 }
|
Chris@42
|
146 #endif
|
Chris@42
|
147
|
Chris@42
|
148 #if defined(KCVI_ST_DOUBLE_USE_PACK) && KCVI_ST_DOUBLE_USE_PACK
|
Chris@42
|
149 static inline void STu(R *x, V v, INT ovs, const R *aligned_like)
|
Chris@42
|
150 {
|
Chris@42
|
151 (void)aligned_like; /* UNUSED */
|
Chris@42
|
152 /* no need for hq here */
|
Chris@42
|
153 _mm512_mask_packstorelo_pd(x + (0 * ovs), 0x0003, v);
|
Chris@42
|
154 _mm512_mask_packstorelo_pd(x + (1 * ovs), 0x000c, v);
|
Chris@42
|
155 _mm512_mask_packstorelo_pd(x + (2 * ovs), 0x0030, v);
|
Chris@42
|
156 _mm512_mask_packstorelo_pd(x + (3 * ovs), 0x00c0, v);
|
Chris@42
|
157 }
|
Chris@42
|
158 #else
|
Chris@42
|
159 static inline void STu(R *x, V v, INT ovs, const R *aligned_like)
|
Chris@42
|
160 {
|
Chris@42
|
161 (void)aligned_like; /* UNUSED */
|
Chris@42
|
162 __declspec(align(64)) R temp[8];
|
Chris@42
|
163 int i;
|
Chris@42
|
164 _mm512_store_pd(temp, v);
|
Chris@42
|
165 for (i = 0 ; i < 4 ; i++) {
|
Chris@42
|
166 x[i * ovs] = temp[i*2];
|
Chris@42
|
167 x[i * ovs + 1] = temp[i*2+1];
|
Chris@42
|
168 }
|
Chris@42
|
169 }
|
Chris@42
|
170 #endif
|
Chris@42
|
171
|
Chris@42
|
172 static inline V FLIP_RI(V x)
|
Chris@42
|
173 {
|
Chris@42
|
174 return (V)_mm512_shuffle_epi32((__m512i)x, _MM_PERM_BADC);
|
Chris@42
|
175 }
|
Chris@42
|
176
|
Chris@42
|
177 #define VDUPH(a) (V)_mm512_shuffle_epi32((__m512i)a, _MM_PERM_DCDC);
|
Chris@42
|
178 #define VDUPL(a) (V)_mm512_shuffle_epi32((__m512i)a, _MM_PERM_BABA);
|
Chris@42
|
179
|
Chris@42
|
180 #endif /* FFTW_SINGLE */
|
Chris@42
|
181
|
Chris@42
|
182 #define LD LDu
|
Chris@42
|
183 #define ST STu
|
Chris@42
|
184
|
Chris@42
|
185 #ifdef FFTW_SINGLE
|
Chris@42
|
186 #define STM2(x, v, ovs, a) ST(x, v, ovs, a)
|
Chris@42
|
187 #define STN2(x, v0, v1, ovs) /* nop */
|
Chris@42
|
188
|
Chris@42
|
189 static inline void STM4(R *x, V v, INT ovs, const R *aligned_like)
|
Chris@42
|
190 {
|
Chris@42
|
191 (void)aligned_like; /* UNUSED */
|
Chris@42
|
192 __m512i index = _mm512_set_epi32(15 * ovs, 14 * ovs,
|
Chris@42
|
193 13 * ovs, 12 * ovs,
|
Chris@42
|
194 11 * ovs, 10 * ovs,
|
Chris@42
|
195 9 * ovs, 8 * ovs,
|
Chris@42
|
196 7 * ovs, 6 * ovs,
|
Chris@42
|
197 5 * ovs, 4 * ovs,
|
Chris@42
|
198 3 * ovs, 2 * ovs,
|
Chris@42
|
199 1 * ovs, 0 * ovs);
|
Chris@42
|
200
|
Chris@42
|
201 _mm512_i32scatter_ps(x, index, v, _MM_SCALE_4);
|
Chris@42
|
202 }
|
Chris@42
|
203 #define STN4(x, v0, v1, v2, v3, ovs) /* no-op */
|
Chris@42
|
204 #else /* !FFTW_SINGLE */
|
Chris@42
|
205 #if defined(KCVI_ST2_DOUBLE_USE_STN2) && KCVI_ST2_DOUBLE_USE_STN2
|
Chris@42
|
206 #define STM2(x, v, ovs, a) /* no-op */
|
Chris@42
|
207 static inline void STN2(R *x, V v0, V v1, INT ovs) {
|
Chris@42
|
208 /* we start
|
Chris@42
|
209 AB CD EF GH -> *x (2 DBL), ovs between complex
|
Chris@42
|
210 IJ KL MN OP -> *(x+2) (2DBL), ovs between complex
|
Chris@42
|
211 and we want
|
Chris@42
|
212 ABIJ EFMN -> *x (4 DBL), 2 * ovs between complex pairs
|
Chris@42
|
213 CDKL GHOP -> *(x+ovs) (4DBL), 2 * ovs between complex pairs
|
Chris@42
|
214 */
|
Chris@42
|
215 V x00 = (V)_mm512_mask_permute4f128_epi32((__m512i)v0, 0xF0F0, (__m512i)v1, _MM_PERM_CDAB);
|
Chris@42
|
216 V x01 = (V)_mm512_mask_permute4f128_epi32((__m512i)v1, 0x0F0F, (__m512i)v0, _MM_PERM_CDAB);
|
Chris@42
|
217 _mm512_mask_packstorelo_pd(x + (0 * ovs) + 0, 0x000F, x00);
|
Chris@42
|
218 /* _mm512_mask_packstorehi_pd(x + (0 * ovs) + 8, 0x000F, x00); */
|
Chris@42
|
219 _mm512_mask_packstorelo_pd(x + (2 * ovs) + 0, 0x00F0, x00);
|
Chris@42
|
220 /* _mm512_mask_packstorehi_pd(x + (2 * ovs) + 8, 0x00F0, x00); */
|
Chris@42
|
221 _mm512_mask_packstorelo_pd(x + (1 * ovs) + 0, 0x000F, x01);
|
Chris@42
|
222 /* _mm512_mask_packstorehi_pd(x + (1 * ovs) + 8, 0x000F, x01); */
|
Chris@42
|
223 _mm512_mask_packstorelo_pd(x + (3 * ovs) + 0, 0x00F0, x01);
|
Chris@42
|
224 /* _mm512_mask_packstorehi_pd(x + (3 * ovs) + 8, 0x00F0, x01); */
|
Chris@42
|
225 }
|
Chris@42
|
226 #else
|
Chris@42
|
227 #define STM2(x, v, ovs, a) ST(x, v, ovs, a)
|
Chris@42
|
228 #define STN2(x, v0, v1, ovs) /* nop */
|
Chris@42
|
229 #endif
|
Chris@42
|
230
|
Chris@42
|
231 static inline void STM4(R *x, V v, INT ovs, const R *aligned_like)
|
Chris@42
|
232 {
|
Chris@42
|
233 (void)aligned_like; /* UNUSED */
|
Chris@42
|
234 __m512i index = _mm512_set_epi32(0, 0, 0, 0, 0, 0, 0, 0,
|
Chris@42
|
235 7 * ovs, 6 * ovs,
|
Chris@42
|
236 5 * ovs, 4 * ovs,
|
Chris@42
|
237 3 * ovs, 2 * ovs,
|
Chris@42
|
238 1 * ovs, 0 * ovs);
|
Chris@42
|
239
|
Chris@42
|
240 _mm512_i32loscatter_pd(x, index, v, _MM_SCALE_8);
|
Chris@42
|
241 }
|
Chris@42
|
242 #define STN4(x, v0, v1, v2, v3, ovs) /* no-op */
|
Chris@42
|
243 #endif /* FFTW_SINGLE */
|
Chris@42
|
244
|
Chris@42
|
245 static inline V VFMAI(V b, V c) {
|
Chris@42
|
246 V mpmp = VLIT(SCAL(1.0), SCAL(-1.0));
|
Chris@42
|
247 return SUFF(_mm512_fmadd)(mpmp, SUFF(_mm512_swizzle)(b, _MM_SWIZ_REG_CDAB), c);
|
Chris@42
|
248 }
|
Chris@42
|
249
|
Chris@42
|
250 static inline V VFNMSI(V b, V c) {
|
Chris@42
|
251 V mpmp = VLIT(SCAL(1.0), SCAL(-1.0));
|
Chris@42
|
252 return SUFF(_mm512_fnmadd)(mpmp, SUFF(_mm512_swizzle)(b, _MM_SWIZ_REG_CDAB), c);
|
Chris@42
|
253 }
|
Chris@42
|
254
|
Chris@42
|
255 static inline V VFMACONJ(V b, V c) {
|
Chris@42
|
256 V pmpm = VLIT(SCAL(-1.0), SCAL(1.0));
|
Chris@42
|
257 return SUFF(_mm512_fmadd)(pmpm, b, c);
|
Chris@42
|
258 }
|
Chris@42
|
259
|
Chris@42
|
260 static inline V VFMSCONJ(V b, V c) {
|
Chris@42
|
261 V pmpm = VLIT(SCAL(-1.0), SCAL(1.0));
|
Chris@42
|
262 return SUFF(_mm512_fmsub)(pmpm, b, c);
|
Chris@42
|
263 }
|
Chris@42
|
264
|
Chris@42
|
265 static inline V VFNMSCONJ(V b, V c) {
|
Chris@42
|
266 V pmpm = VLIT(SCAL(-1.0), SCAL(1.0));
|
Chris@42
|
267 return SUFF(_mm512_fnmadd)(pmpm, b, c);
|
Chris@42
|
268 }
|
Chris@42
|
269
|
Chris@42
|
270 static inline V VCONJ(V x)
|
Chris@42
|
271 {
|
Chris@42
|
272 V pmpm = VLIT(SCAL(-0.0), SCAL(0.0));
|
Chris@42
|
273 return (V)VXOR((__m512i)pmpm, (__m512i)x);
|
Chris@42
|
274 }
|
Chris@42
|
275
|
Chris@42
|
276 #ifdef FFTW_SINGLE
|
Chris@42
|
277 #if defined(KCVI_VBYI_SINGLE_USE_MUL) && KCVI_VBYI_SINGLE_USE_MUL
|
Chris@42
|
278 /* untested */
|
Chris@42
|
279 static inline V VBYI(V x)
|
Chris@42
|
280 {
|
Chris@42
|
281 V mpmp = VLIT(SCAL(1.0), SCAL(-1.0));
|
Chris@42
|
282 return _mm512_mul_ps(mpmp, _mm512_swizzle_ps(x, _MM_SWIZ_REG_CDAB));
|
Chris@42
|
283 }
|
Chris@42
|
284 #else
|
Chris@42
|
285 static inline V VBYI(V x)
|
Chris@42
|
286 {
|
Chris@42
|
287 return FLIP_RI(VCONJ(x));
|
Chris@42
|
288 }
|
Chris@42
|
289 #endif
|
Chris@42
|
290 #else /* !FFTW_SINGLE */
|
Chris@42
|
291 #if defined(KCVI_VBYI_DOUBLE_USE_MUL) && KCVI_VBYI_DOUBLE_USE_MUL
|
Chris@42
|
292 /* on KNF, using mul_pd is slower than shuf128x32 + xor */
|
Chris@42
|
293 static inline V VBYI(V x)
|
Chris@42
|
294 {
|
Chris@42
|
295 V mpmp = VLIT(SCAL(1.0), SCAL(-1.0));
|
Chris@42
|
296 return _mm512_mul_pd(mpmp, _mm512_swizzle_pd(x, _MM_SWIZ_REG_CDAB));
|
Chris@42
|
297 }
|
Chris@42
|
298 #else
|
Chris@42
|
299 static inline V VBYI(V x)
|
Chris@42
|
300 {
|
Chris@42
|
301 return FLIP_RI(VCONJ(x));
|
Chris@42
|
302 }
|
Chris@42
|
303 #endif
|
Chris@42
|
304 #endif /* FFTW_SINGLE */
|
Chris@42
|
305
|
Chris@42
|
306 #if defined(KCVI_MULZ_USE_SWIZZLE) && KCVI_MULZ_USE_SWIZZLE
|
Chris@42
|
307 static inline V VZMUL(V tx, V sr) /* (a,b) (c,d) */
|
Chris@42
|
308 {
|
Chris@42
|
309 V ac = SUFF(_mm512_mul)(tx, sr); /* (a*c,b*d) */
|
Chris@42
|
310 V ad = SUFF(_mm512_mul)(tx, SUFF(_mm512_swizzle)(sr, _MM_SWIZ_REG_CDAB)); /* (a*d,b*c) */
|
Chris@42
|
311 V acmbd = SUFF(_mm512_sub)(ac, SUFF(_mm512_swizzle)(ac, _MM_SWIZ_REG_CDAB)); /* (a*c-b*d, b*d-a*c) */
|
Chris@42
|
312 V res = SUFF(_mm512_mask_add)(acmbd, DS(0x00aa,0xaaaa), ad, SUFF(_mm512_swizzle)(ad, _MM_SWIZ_REG_CDAB)); /* ([a*c+b*c] a*c-b*d, b*c+a*d) */
|
Chris@42
|
313 return res;
|
Chris@42
|
314 }
|
Chris@42
|
315 static inline V VZMULJ(V tx, V sr) /* (a,b) (c,d) */
|
Chris@42
|
316 {
|
Chris@42
|
317 V ac = SUFF(_mm512_mul)(tx, sr); /* (a*c,b*d) */
|
Chris@42
|
318 V ad = SUFF(_mm512_mul)(tx, SUFF(_mm512_swizzle)(sr, _MM_SWIZ_REG_CDAB)); /* (a*d,b*c) */
|
Chris@42
|
319 V acmbd = SUFF(_mm512_add)(ac, SUFF(_mm512_swizzle)(ac, _MM_SWIZ_REG_CDAB)); /* (a*c+b*d, b*d+a*c) */
|
Chris@42
|
320 V res = SUFF(_mm512_mask_subr)(acmbd, DS(0x00aa,0xaaaa), ad, SUFF(_mm512_swizzle)(ad, _MM_SWIZ_REG_CDAB)); /* ([a*c+b*c] a*c+b*d, a*d-b*c) */
|
Chris@42
|
321 return res;
|
Chris@42
|
322 }
|
Chris@42
|
323 static inline V VZMULI(V tx, V sr) /* (a,b) (c,d) */
|
Chris@42
|
324 {
|
Chris@42
|
325 DVK(zero, SCAL(0.0));
|
Chris@42
|
326 V ac = SUFF(_mm512_mul)(tx, sr); /* (a*c,b*d) */
|
Chris@42
|
327 V ad = SUFF(_mm512_fnmadd)(tx, SUFF(_mm512_swizzle)(sr, _MM_SWIZ_REG_CDAB), zero); /* (-a*d,-b*c) */
|
Chris@42
|
328 V acmbd = SUFF(_mm512_subr)(ac, SUFF(_mm512_swizzle)(ac, _MM_SWIZ_REG_CDAB)); /* (b*d-a*c, a*c-b*d) */
|
Chris@42
|
329 V res = SUFF(_mm512_mask_add)(acmbd, DS(0x0055,0x5555), ad, SUFF(_mm512_swizzle)(ad, _MM_SWIZ_REG_CDAB)); /* (-a*d-b*c, a*c-b*d) */
|
Chris@42
|
330 return res;
|
Chris@42
|
331 }
|
Chris@42
|
332 static inline V VZMULIJ(V tx, V sr) /* (a,b) (c,d) */
|
Chris@42
|
333 {
|
Chris@42
|
334 DVK(zero, SCAL(0.0));
|
Chris@42
|
335 V ac = SUFF(_mm512_mul)(tx, sr); /* (a*c,b*d) */
|
Chris@42
|
336 V ad = SUFF(_mm512_fnmadd)(tx, SUFF(_mm512_swizzle)(sr, _MM_SWIZ_REG_CDAB), zero); /* (-a*d,-b*c) */
|
Chris@42
|
337 V acmbd = SUFF(_mm512_add)(ac, SUFF(_mm512_swizzle)(ac, _MM_SWIZ_REG_CDAB)); /* (b*d+a*c, a*c+b*d) */
|
Chris@42
|
338 V res = SUFF(_mm512_mask_sub)(acmbd, DS(0x0055,0x5555), ad, SUFF(_mm512_swizzle)(ad, _MM_SWIZ_REG_CDAB)); /* (-a*d+b*c, a*c-b*d) */
|
Chris@42
|
339 return res;
|
Chris@42
|
340 }
|
Chris@42
|
341 #else
|
Chris@42
|
342 static inline V VZMUL(V tx, V sr)
|
Chris@42
|
343 {
|
Chris@42
|
344 V tr = VDUPL(tx);
|
Chris@42
|
345 V ti = VDUPH(tx);
|
Chris@42
|
346 tr = VMUL(sr, tr);
|
Chris@42
|
347 sr = VBYI(sr);
|
Chris@42
|
348 return VFMA(ti, sr, tr);
|
Chris@42
|
349 }
|
Chris@42
|
350
|
Chris@42
|
351 static inline V VZMULJ(V tx, V sr)
|
Chris@42
|
352 {
|
Chris@42
|
353 V tr = VDUPL(tx);
|
Chris@42
|
354 V ti = VDUPH(tx);
|
Chris@42
|
355 tr = VMUL(sr, tr);
|
Chris@42
|
356 sr = VBYI(sr);
|
Chris@42
|
357 return VFNMS(ti, sr, tr);
|
Chris@42
|
358 }
|
Chris@42
|
359
|
Chris@42
|
360 static inline V VZMULI(V tx, V sr)
|
Chris@42
|
361 {
|
Chris@42
|
362 V tr = VDUPL(tx);
|
Chris@42
|
363 V ti = VDUPH(tx);
|
Chris@42
|
364 ti = VMUL(ti, sr);
|
Chris@42
|
365 sr = VBYI(sr);
|
Chris@42
|
366 return VFMS(tr, sr, ti);
|
Chris@42
|
367 }
|
Chris@42
|
368
|
Chris@42
|
369 static inline V VZMULIJ(V tx, V sr)
|
Chris@42
|
370 {
|
Chris@42
|
371 V tr = VDUPL(tx);
|
Chris@42
|
372 V ti = VDUPH(tx);
|
Chris@42
|
373 ti = VMUL(ti, sr);
|
Chris@42
|
374 sr = VBYI(sr);
|
Chris@42
|
375 return VFMA(tr, sr, ti);
|
Chris@42
|
376 }
|
Chris@42
|
377 #endif
|
Chris@42
|
378
|
Chris@42
|
379 /* twiddle storage #1: compact, slower */
|
Chris@42
|
380 #ifdef FFTW_SINGLE
|
Chris@42
|
381 # define VTW1(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x}, {TW_CEXP, v+2, x}, {TW_CEXP, v+3, x}, {TW_CEXP, v+4, x}, {TW_CEXP, v+5, x}, {TW_CEXP, v+6, x}, {TW_CEXP, v+7, x}
|
Chris@42
|
382 #else /* !FFTW_SINGLE */
|
Chris@42
|
383 # define VTW1(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x}, {TW_CEXP, v+2, x}, {TW_CEXP, v+3, x}
|
Chris@42
|
384 #endif /* FFTW_SINGLE */
|
Chris@42
|
385 #define TWVL1 (VL)
|
Chris@42
|
386
|
Chris@42
|
387 static inline V BYTW1(const R *t, V sr)
|
Chris@42
|
388 {
|
Chris@42
|
389 return VZMUL(LDA(t, 2, t), sr);
|
Chris@42
|
390 }
|
Chris@42
|
391
|
Chris@42
|
392 static inline V BYTWJ1(const R *t, V sr)
|
Chris@42
|
393 {
|
Chris@42
|
394 return VZMULJ(LDA(t, 2, t), sr);
|
Chris@42
|
395 }
|
Chris@42
|
396
|
Chris@42
|
397 /* twiddle storage #2: twice the space, faster (when in cache) */
|
Chris@42
|
398 #ifdef FFTW_SINGLE
|
Chris@42
|
399 # define VTW2(v,x) \
|
Chris@42
|
400 {TW_COS, v , x}, {TW_COS, v , x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \
|
Chris@42
|
401 {TW_COS, v+2, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, {TW_COS, v+3, x}, \
|
Chris@42
|
402 {TW_COS, v+4, x}, {TW_COS, v+4, x}, {TW_COS, v+5, x}, {TW_COS, v+5, x}, \
|
Chris@42
|
403 {TW_COS, v+6, x}, {TW_COS, v+6, x}, {TW_COS, v+7, x}, {TW_COS, v+7, x}, \
|
Chris@42
|
404 {TW_SIN, v , -x}, {TW_SIN, v , x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x}, \
|
Chris@42
|
405 {TW_SIN, v+2, -x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, -x}, {TW_SIN, v+3, x}, \
|
Chris@42
|
406 {TW_SIN, v+4, -x}, {TW_SIN, v+4, x}, {TW_SIN, v+5, -x}, {TW_SIN, v+5, x}, \
|
Chris@42
|
407 {TW_SIN, v+6, -x}, {TW_SIN, v+6, x}, {TW_SIN, v+7, -x}, {TW_SIN, v+7, x}
|
Chris@42
|
408 #else /* !FFTW_SINGLE */
|
Chris@42
|
409 # define VTW2(v,x) \
|
Chris@42
|
410 {TW_COS, v , x}, {TW_COS, v , x}, {TW_COS, v+1, x}, {TW_COS, v+1, x}, \
|
Chris@42
|
411 {TW_COS, v+2, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, {TW_COS, v+3, x}, \
|
Chris@42
|
412 {TW_SIN, v , -x}, {TW_SIN, v , x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x}, \
|
Chris@42
|
413 {TW_SIN, v+2, -x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, -x}, {TW_SIN, v+3, x}
|
Chris@42
|
414 #endif /* FFTW_SINGLE */
|
Chris@42
|
415 #define TWVL2 (2 * VL)
|
Chris@42
|
416
|
Chris@42
|
417 static inline V BYTW2(const R *t, V sr)
|
Chris@42
|
418 {
|
Chris@42
|
419 const V *twp = (const V *)t;
|
Chris@42
|
420 V si = FLIP_RI(sr);
|
Chris@42
|
421 V tr = twp[0], ti = twp[1];
|
Chris@42
|
422 /* V tr = LD(t, 2, t), ti = LD(t + VL, 2, t + VL); */
|
Chris@42
|
423 return VFMA(tr, sr, VMUL(ti, si));
|
Chris@42
|
424 }
|
Chris@42
|
425
|
Chris@42
|
426 static inline V BYTWJ2(const R *t, V sr)
|
Chris@42
|
427 {
|
Chris@42
|
428 const V *twp = (const V *)t;
|
Chris@42
|
429 V si = FLIP_RI(sr);
|
Chris@42
|
430 V tr = twp[0], ti = twp[1];
|
Chris@42
|
431 /* V tr = LD(t, 2, t), ti = LD(t + VL, 2, t + VL); */
|
Chris@42
|
432 return VFNMS(ti, si, VMUL(tr, sr));
|
Chris@42
|
433 }
|
Chris@42
|
434
|
Chris@42
|
435 /* twiddle storage #3 */
|
Chris@42
|
436 #define VTW3(v,x) VTW1(v,x)
|
Chris@42
|
437 #define TWVL3 TWVL1
|
Chris@42
|
438
|
Chris@42
|
439 /* twiddle storage for split arrays */
|
Chris@42
|
440 #ifdef FFTW_SINGLE
|
Chris@42
|
441 # define VTWS(v,x) \
|
Chris@42
|
442 {TW_COS, v , x}, {TW_COS, v+1 , x}, {TW_COS, v+2 , x}, {TW_COS, v+3 , x}, \
|
Chris@42
|
443 {TW_COS, v+4 , x}, {TW_COS, v+5 , x}, {TW_COS, v+6 , x}, {TW_COS, v+7 , x}, \
|
Chris@42
|
444 {TW_COS, v+8 , x}, {TW_COS, v+9 , x}, {TW_COS, v+10, x}, {TW_COS, v+11, x}, \
|
Chris@42
|
445 {TW_COS, v+12, x}, {TW_COS, v+13, x}, {TW_COS, v+14, x}, {TW_COS, v+15, x}, \
|
Chris@42
|
446 {TW_SIN, v , x}, {TW_SIN, v+1 , x}, {TW_SIN, v+2 , x}, {TW_SIN, v+3 , x}, \
|
Chris@42
|
447 {TW_SIN, v+4 , x}, {TW_SIN, v+5 , x}, {TW_SIN, v+6 , x}, {TW_SIN, v+7 , x}, \
|
Chris@42
|
448 {TW_SIN, v+8 , x}, {TW_SIN, v+9 , x}, {TW_SIN, v+10, x}, {TW_SIN, v+11, x}, \
|
Chris@42
|
449 {TW_SIN, v+12, x}, {TW_SIN, v+13, x}, {TW_SIN, v+14, x}, {TW_SIN, v+15, x}
|
Chris@42
|
450 #else /* !FFTW_SINGLE */
|
Chris@42
|
451 # define VTWS(v,x) \
|
Chris@42
|
452 {TW_COS, v , x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \
|
Chris@42
|
453 {TW_COS, v+4, x}, {TW_COS, v+5, x}, {TW_COS, v+6, x}, {TW_COS, v+7, x}, \
|
Chris@42
|
454 {TW_SIN, v , x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x}, \
|
Chris@42
|
455 {TW_SIN, v+4, x}, {TW_SIN, v+5, x}, {TW_SIN, v+6, x}, {TW_SIN, v+7, x}
|
Chris@42
|
456 #endif /* FFTW_SINGLE */
|
Chris@42
|
457 #define TWVLS (2 * VL)
|
Chris@42
|
458
|
Chris@42
|
459 #define VLEAVE() /* nothing */
|
Chris@42
|
460
|
Chris@42
|
461 #include "simd-common.h"
|