Chris@10
|
1 /*
|
Chris@10
|
2 * Copyright (c) 2003, 2007-11 Matteo Frigo
|
Chris@10
|
3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
|
Chris@10
|
4 *
|
Chris@10
|
5 * This program is free software; you can redistribute it and/or modify
|
Chris@10
|
6 * it under the terms of the GNU General Public License as published by
|
Chris@10
|
7 * the Free Software Foundation; either version 2 of the License, or
|
Chris@10
|
8 * (at your option) any later version.
|
Chris@10
|
9 *
|
Chris@10
|
10 * This program is distributed in the hope that it will be useful,
|
Chris@10
|
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
Chris@10
|
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
Chris@10
|
13 * GNU General Public License for more details.
|
Chris@10
|
14 *
|
Chris@10
|
15 * You should have received a copy of the GNU General Public License
|
Chris@10
|
16 * along with this program; if not, write to the Free Software
|
Chris@10
|
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
Chris@10
|
18 *
|
Chris@10
|
19 */
|
Chris@10
|
20
|
Chris@10
|
21 /* This file was automatically generated --- DO NOT EDIT */
|
Chris@10
|
22 /* Generated on Sun Nov 25 07:37:48 EST 2012 */
|
Chris@10
|
23
|
Chris@10
|
24 #include "codelet-dft.h"
|
Chris@10
|
25
|
Chris@10
|
26 #ifdef HAVE_FMA
|
Chris@10
|
27
|
Chris@10
|
28 /* Generated by: ../../../genfft/gen_notw.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -n 16 -name n2sv_16 -with-ostride 1 -include n2s.h -store-multiple 4 */
|
Chris@10
|
29
|
Chris@10
|
30 /*
|
Chris@10
|
31 * This function contains 144 FP additions, 40 FP multiplications,
|
Chris@10
|
32 * (or, 104 additions, 0 multiplications, 40 fused multiply/add),
|
Chris@10
|
33 * 110 stack variables, 3 constants, and 72 memory accesses
|
Chris@10
|
34 */
|
Chris@10
|
35 #include "n2s.h"
|
Chris@10
|
36
|
Chris@10
|
37 static void n2sv_16(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
|
Chris@10
|
38 {
|
Chris@10
|
39 DVK(KP923879532, +0.923879532511286756128183189396788286822416626);
|
Chris@10
|
40 DVK(KP707106781, +0.707106781186547524400844362104849039284835938);
|
Chris@10
|
41 DVK(KP414213562, +0.414213562373095048801688724209698078569671875);
|
Chris@10
|
42 {
|
Chris@10
|
43 INT i;
|
Chris@10
|
44 for (i = v; i > 0; i = i - (2 * VL), ri = ri + ((2 * VL) * ivs), ii = ii + ((2 * VL) * ivs), ro = ro + ((2 * VL) * ovs), io = io + ((2 * VL) * ovs), MAKE_VOLATILE_STRIDE(64, is), MAKE_VOLATILE_STRIDE(64, os)) {
|
Chris@10
|
45 V T2p, T2q, T2r, T2s, T2x, T2y, T2z, T2A, T1M, T1N, T1L, T1P, T2F, T2G, T2H;
|
Chris@10
|
46 V T2I, T1O, T1Q;
|
Chris@10
|
47 {
|
Chris@10
|
48 V T1l, T1H, T1R, T7, T1x, TN, TC, T25, T1E, T1b, T1Z, Tt, T2h, T22, T1D;
|
Chris@10
|
49 V T1g, T1n, TQ, T11, Ti, Te, T26, T1m, TT, T1S, TJ, TZ, T1V, TW, Tl;
|
Chris@10
|
50 V T12, T13;
|
Chris@10
|
51 {
|
Chris@10
|
52 V Tq, T1c, Tp, T20, T1a, Tr, T1d, T1e;
|
Chris@10
|
53 {
|
Chris@10
|
54 V T1, T2, Tw, Tx, T4, T5, Tz, TA;
|
Chris@10
|
55 T1 = LD(&(ri[0]), ivs, &(ri[0]));
|
Chris@10
|
56 T2 = LD(&(ri[WS(is, 8)]), ivs, &(ri[0]));
|
Chris@10
|
57 Tw = LD(&(ii[0]), ivs, &(ii[0]));
|
Chris@10
|
58 Tx = LD(&(ii[WS(is, 8)]), ivs, &(ii[0]));
|
Chris@10
|
59 T4 = LD(&(ri[WS(is, 4)]), ivs, &(ri[0]));
|
Chris@10
|
60 T5 = LD(&(ri[WS(is, 12)]), ivs, &(ri[0]));
|
Chris@10
|
61 Tz = LD(&(ii[WS(is, 4)]), ivs, &(ii[0]));
|
Chris@10
|
62 TA = LD(&(ii[WS(is, 12)]), ivs, &(ii[0]));
|
Chris@10
|
63 {
|
Chris@10
|
64 V Tn, TL, T3, T1k, Ty, T1j, T6, TM, TB, To, T18, T19;
|
Chris@10
|
65 Tn = LD(&(ri[WS(is, 15)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
66 TL = VSUB(T1, T2);
|
Chris@10
|
67 T3 = VADD(T1, T2);
|
Chris@10
|
68 T1k = VSUB(Tw, Tx);
|
Chris@10
|
69 Ty = VADD(Tw, Tx);
|
Chris@10
|
70 T1j = VSUB(T4, T5);
|
Chris@10
|
71 T6 = VADD(T4, T5);
|
Chris@10
|
72 TM = VSUB(Tz, TA);
|
Chris@10
|
73 TB = VADD(Tz, TA);
|
Chris@10
|
74 To = LD(&(ri[WS(is, 7)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
75 T18 = LD(&(ii[WS(is, 15)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
76 T19 = LD(&(ii[WS(is, 7)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
77 Tq = LD(&(ri[WS(is, 3)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
78 T1l = VADD(T1j, T1k);
|
Chris@10
|
79 T1H = VSUB(T1k, T1j);
|
Chris@10
|
80 T1R = VSUB(T3, T6);
|
Chris@10
|
81 T7 = VADD(T3, T6);
|
Chris@10
|
82 T1x = VADD(TL, TM);
|
Chris@10
|
83 TN = VSUB(TL, TM);
|
Chris@10
|
84 TC = VADD(Ty, TB);
|
Chris@10
|
85 T25 = VSUB(Ty, TB);
|
Chris@10
|
86 T1c = VSUB(Tn, To);
|
Chris@10
|
87 Tp = VADD(Tn, To);
|
Chris@10
|
88 T20 = VADD(T18, T19);
|
Chris@10
|
89 T1a = VSUB(T18, T19);
|
Chris@10
|
90 Tr = LD(&(ri[WS(is, 11)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
91 T1d = LD(&(ii[WS(is, 3)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
92 T1e = LD(&(ii[WS(is, 11)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
93 }
|
Chris@10
|
94 }
|
Chris@10
|
95 {
|
Chris@10
|
96 V Tb, Ta, TF, Tc, TG, TH, TP, TO;
|
Chris@10
|
97 {
|
Chris@10
|
98 V T8, T9, TD, TE;
|
Chris@10
|
99 T8 = LD(&(ri[WS(is, 2)]), ivs, &(ri[0]));
|
Chris@10
|
100 T9 = LD(&(ri[WS(is, 10)]), ivs, &(ri[0]));
|
Chris@10
|
101 TD = LD(&(ii[WS(is, 2)]), ivs, &(ii[0]));
|
Chris@10
|
102 TE = LD(&(ii[WS(is, 10)]), ivs, &(ii[0]));
|
Chris@10
|
103 Tb = LD(&(ri[WS(is, 14)]), ivs, &(ri[0]));
|
Chris@10
|
104 {
|
Chris@10
|
105 V T17, Ts, T21, T1f;
|
Chris@10
|
106 T17 = VSUB(Tq, Tr);
|
Chris@10
|
107 Ts = VADD(Tq, Tr);
|
Chris@10
|
108 T21 = VADD(T1d, T1e);
|
Chris@10
|
109 T1f = VSUB(T1d, T1e);
|
Chris@10
|
110 TP = VSUB(T8, T9);
|
Chris@10
|
111 Ta = VADD(T8, T9);
|
Chris@10
|
112 TO = VSUB(TD, TE);
|
Chris@10
|
113 TF = VADD(TD, TE);
|
Chris@10
|
114 T1E = VSUB(T1a, T17);
|
Chris@10
|
115 T1b = VADD(T17, T1a);
|
Chris@10
|
116 T1Z = VSUB(Tp, Ts);
|
Chris@10
|
117 Tt = VADD(Tp, Ts);
|
Chris@10
|
118 T2h = VADD(T20, T21);
|
Chris@10
|
119 T22 = VSUB(T20, T21);
|
Chris@10
|
120 T1D = VADD(T1c, T1f);
|
Chris@10
|
121 T1g = VSUB(T1c, T1f);
|
Chris@10
|
122 Tc = LD(&(ri[WS(is, 6)]), ivs, &(ri[0]));
|
Chris@10
|
123 }
|
Chris@10
|
124 TG = LD(&(ii[WS(is, 14)]), ivs, &(ii[0]));
|
Chris@10
|
125 TH = LD(&(ii[WS(is, 6)]), ivs, &(ii[0]));
|
Chris@10
|
126 }
|
Chris@10
|
127 T1n = VADD(TP, TO);
|
Chris@10
|
128 TQ = VSUB(TO, TP);
|
Chris@10
|
129 {
|
Chris@10
|
130 V Tg, Th, TX, TR, Td, TS, TI, TY, Tj, Tk;
|
Chris@10
|
131 Tg = LD(&(ri[WS(is, 1)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
132 Th = LD(&(ri[WS(is, 9)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
133 TX = LD(&(ii[WS(is, 1)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
134 TR = VSUB(Tb, Tc);
|
Chris@10
|
135 Td = VADD(Tb, Tc);
|
Chris@10
|
136 TS = VSUB(TG, TH);
|
Chris@10
|
137 TI = VADD(TG, TH);
|
Chris@10
|
138 TY = LD(&(ii[WS(is, 9)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
139 Tj = LD(&(ri[WS(is, 5)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
140 T11 = VSUB(Tg, Th);
|
Chris@10
|
141 Ti = VADD(Tg, Th);
|
Chris@10
|
142 Tk = LD(&(ri[WS(is, 13)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
143 Te = VADD(Ta, Td);
|
Chris@10
|
144 T26 = VSUB(Td, Ta);
|
Chris@10
|
145 T1m = VSUB(TR, TS);
|
Chris@10
|
146 TT = VADD(TR, TS);
|
Chris@10
|
147 T1S = VSUB(TF, TI);
|
Chris@10
|
148 TJ = VADD(TF, TI);
|
Chris@10
|
149 TZ = VSUB(TX, TY);
|
Chris@10
|
150 T1V = VADD(TX, TY);
|
Chris@10
|
151 TW = VSUB(Tj, Tk);
|
Chris@10
|
152 Tl = VADD(Tj, Tk);
|
Chris@10
|
153 T12 = LD(&(ii[WS(is, 5)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
154 T13 = LD(&(ii[WS(is, 13)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
155 }
|
Chris@10
|
156 }
|
Chris@10
|
157 }
|
Chris@10
|
158 {
|
Chris@10
|
159 V T2f, Tf, T2j, TK, Tm, T1U, T10, T1B, T14, T1W;
|
Chris@10
|
160 T2f = VSUB(T7, Te);
|
Chris@10
|
161 Tf = VADD(T7, Te);
|
Chris@10
|
162 T2j = VADD(TC, TJ);
|
Chris@10
|
163 TK = VSUB(TC, TJ);
|
Chris@10
|
164 Tm = VADD(Ti, Tl);
|
Chris@10
|
165 T1U = VSUB(Ti, Tl);
|
Chris@10
|
166 T10 = VADD(TW, TZ);
|
Chris@10
|
167 T1B = VSUB(TZ, TW);
|
Chris@10
|
168 T14 = VSUB(T12, T13);
|
Chris@10
|
169 T1W = VADD(T12, T13);
|
Chris@10
|
170 {
|
Chris@10
|
171 V T29, T1T, T27, T2d, T2b, T23, T15, T1A, T2l, T2m, T2n, T2o, T2i, T2k, T1Y;
|
Chris@10
|
172 V T2a;
|
Chris@10
|
173 {
|
Chris@10
|
174 V Tv, Tu, T1X, T2g;
|
Chris@10
|
175 T29 = VSUB(T1R, T1S);
|
Chris@10
|
176 T1T = VADD(T1R, T1S);
|
Chris@10
|
177 T27 = VSUB(T25, T26);
|
Chris@10
|
178 T2d = VADD(T26, T25);
|
Chris@10
|
179 T2b = VADD(T1Z, T22);
|
Chris@10
|
180 T23 = VSUB(T1Z, T22);
|
Chris@10
|
181 Tv = VSUB(Tt, Tm);
|
Chris@10
|
182 Tu = VADD(Tm, Tt);
|
Chris@10
|
183 T1X = VSUB(T1V, T1W);
|
Chris@10
|
184 T2g = VADD(T1V, T1W);
|
Chris@10
|
185 T15 = VSUB(T11, T14);
|
Chris@10
|
186 T1A = VADD(T11, T14);
|
Chris@10
|
187 T2l = VSUB(TK, Tv);
|
Chris@10
|
188 STM4(&(io[12]), T2l, ovs, &(io[0]));
|
Chris@10
|
189 T2m = VADD(Tv, TK);
|
Chris@10
|
190 STM4(&(io[4]), T2m, ovs, &(io[0]));
|
Chris@10
|
191 T2n = VADD(Tf, Tu);
|
Chris@10
|
192 STM4(&(ro[0]), T2n, ovs, &(ro[0]));
|
Chris@10
|
193 T2o = VSUB(Tf, Tu);
|
Chris@10
|
194 STM4(&(ro[8]), T2o, ovs, &(ro[0]));
|
Chris@10
|
195 T2i = VSUB(T2g, T2h);
|
Chris@10
|
196 T2k = VADD(T2g, T2h);
|
Chris@10
|
197 T1Y = VADD(T1U, T1X);
|
Chris@10
|
198 T2a = VSUB(T1X, T1U);
|
Chris@10
|
199 }
|
Chris@10
|
200 {
|
Chris@10
|
201 V T1I, T1y, T1t, T16, T1v, TV, T1r, T1p, T2t, T2u, T2v, T2w, T1h, T1s, TU;
|
Chris@10
|
202 V T1o;
|
Chris@10
|
203 T1I = VADD(TQ, TT);
|
Chris@10
|
204 TU = VSUB(TQ, TT);
|
Chris@10
|
205 T1o = VSUB(T1m, T1n);
|
Chris@10
|
206 T1y = VADD(T1n, T1m);
|
Chris@10
|
207 T1t = VFNMS(LDK(KP414213562), T10, T15);
|
Chris@10
|
208 T16 = VFMA(LDK(KP414213562), T15, T10);
|
Chris@10
|
209 T2p = VADD(T2f, T2i);
|
Chris@10
|
210 STM4(&(ro[4]), T2p, ovs, &(ro[0]));
|
Chris@10
|
211 T2q = VSUB(T2f, T2i);
|
Chris@10
|
212 STM4(&(ro[12]), T2q, ovs, &(ro[0]));
|
Chris@10
|
213 T2r = VADD(T2j, T2k);
|
Chris@10
|
214 STM4(&(io[0]), T2r, ovs, &(io[0]));
|
Chris@10
|
215 T2s = VSUB(T2j, T2k);
|
Chris@10
|
216 STM4(&(io[8]), T2s, ovs, &(io[0]));
|
Chris@10
|
217 {
|
Chris@10
|
218 V T28, T24, T2e, T2c;
|
Chris@10
|
219 T28 = VSUB(T23, T1Y);
|
Chris@10
|
220 T24 = VADD(T1Y, T23);
|
Chris@10
|
221 T2e = VADD(T2a, T2b);
|
Chris@10
|
222 T2c = VSUB(T2a, T2b);
|
Chris@10
|
223 T1v = VFNMS(LDK(KP707106781), TU, TN);
|
Chris@10
|
224 TV = VFMA(LDK(KP707106781), TU, TN);
|
Chris@10
|
225 T1r = VFMA(LDK(KP707106781), T1o, T1l);
|
Chris@10
|
226 T1p = VFNMS(LDK(KP707106781), T1o, T1l);
|
Chris@10
|
227 T2t = VFNMS(LDK(KP707106781), T28, T27);
|
Chris@10
|
228 STM4(&(io[14]), T2t, ovs, &(io[0]));
|
Chris@10
|
229 T2u = VFMA(LDK(KP707106781), T28, T27);
|
Chris@10
|
230 STM4(&(io[6]), T2u, ovs, &(io[0]));
|
Chris@10
|
231 T2v = VFMA(LDK(KP707106781), T24, T1T);
|
Chris@10
|
232 STM4(&(ro[2]), T2v, ovs, &(ro[0]));
|
Chris@10
|
233 T2w = VFNMS(LDK(KP707106781), T24, T1T);
|
Chris@10
|
234 STM4(&(ro[10]), T2w, ovs, &(ro[0]));
|
Chris@10
|
235 T2x = VFNMS(LDK(KP707106781), T2e, T2d);
|
Chris@10
|
236 STM4(&(io[10]), T2x, ovs, &(io[0]));
|
Chris@10
|
237 T2y = VFMA(LDK(KP707106781), T2e, T2d);
|
Chris@10
|
238 STM4(&(io[2]), T2y, ovs, &(io[0]));
|
Chris@10
|
239 T2z = VFMA(LDK(KP707106781), T2c, T29);
|
Chris@10
|
240 STM4(&(ro[6]), T2z, ovs, &(ro[0]));
|
Chris@10
|
241 T2A = VFNMS(LDK(KP707106781), T2c, T29);
|
Chris@10
|
242 STM4(&(ro[14]), T2A, ovs, &(ro[0]));
|
Chris@10
|
243 T1h = VFNMS(LDK(KP414213562), T1g, T1b);
|
Chris@10
|
244 T1s = VFMA(LDK(KP414213562), T1b, T1g);
|
Chris@10
|
245 }
|
Chris@10
|
246 {
|
Chris@10
|
247 V T1z, T1J, T1K, T1G, T2B, T2C, T2D, T2E, T1C, T1F;
|
Chris@10
|
248 T1M = VFNMS(LDK(KP414213562), T1A, T1B);
|
Chris@10
|
249 T1C = VFMA(LDK(KP414213562), T1B, T1A);
|
Chris@10
|
250 T1F = VFNMS(LDK(KP414213562), T1E, T1D);
|
Chris@10
|
251 T1N = VFMA(LDK(KP414213562), T1D, T1E);
|
Chris@10
|
252 {
|
Chris@10
|
253 V T1q, T1i, T1w, T1u;
|
Chris@10
|
254 T1q = VADD(T16, T1h);
|
Chris@10
|
255 T1i = VSUB(T16, T1h);
|
Chris@10
|
256 T1w = VADD(T1t, T1s);
|
Chris@10
|
257 T1u = VSUB(T1s, T1t);
|
Chris@10
|
258 T1L = VFNMS(LDK(KP707106781), T1y, T1x);
|
Chris@10
|
259 T1z = VFMA(LDK(KP707106781), T1y, T1x);
|
Chris@10
|
260 T1P = VFMA(LDK(KP707106781), T1I, T1H);
|
Chris@10
|
261 T1J = VFNMS(LDK(KP707106781), T1I, T1H);
|
Chris@10
|
262 T1K = VSUB(T1F, T1C);
|
Chris@10
|
263 T1G = VADD(T1C, T1F);
|
Chris@10
|
264 T2B = VFMA(LDK(KP923879532), T1q, T1p);
|
Chris@10
|
265 STM4(&(io[15]), T2B, ovs, &(io[1]));
|
Chris@10
|
266 T2C = VFNMS(LDK(KP923879532), T1q, T1p);
|
Chris@10
|
267 STM4(&(io[7]), T2C, ovs, &(io[1]));
|
Chris@10
|
268 T2D = VFMA(LDK(KP923879532), T1i, TV);
|
Chris@10
|
269 STM4(&(ro[3]), T2D, ovs, &(ro[1]));
|
Chris@10
|
270 T2E = VFNMS(LDK(KP923879532), T1i, TV);
|
Chris@10
|
271 STM4(&(ro[11]), T2E, ovs, &(ro[1]));
|
Chris@10
|
272 T2F = VFMA(LDK(KP923879532), T1w, T1v);
|
Chris@10
|
273 STM4(&(ro[15]), T2F, ovs, &(ro[1]));
|
Chris@10
|
274 T2G = VFNMS(LDK(KP923879532), T1w, T1v);
|
Chris@10
|
275 STM4(&(ro[7]), T2G, ovs, &(ro[1]));
|
Chris@10
|
276 T2H = VFMA(LDK(KP923879532), T1u, T1r);
|
Chris@10
|
277 STM4(&(io[3]), T2H, ovs, &(io[1]));
|
Chris@10
|
278 T2I = VFNMS(LDK(KP923879532), T1u, T1r);
|
Chris@10
|
279 STM4(&(io[11]), T2I, ovs, &(io[1]));
|
Chris@10
|
280 }
|
Chris@10
|
281 {
|
Chris@10
|
282 V T2J, T2K, T2L, T2M;
|
Chris@10
|
283 T2J = VFNMS(LDK(KP923879532), T1G, T1z);
|
Chris@10
|
284 STM4(&(ro[9]), T2J, ovs, &(ro[1]));
|
Chris@10
|
285 STN4(&(ro[8]), T2o, T2J, T2w, T2E, ovs);
|
Chris@10
|
286 T2K = VFMA(LDK(KP923879532), T1G, T1z);
|
Chris@10
|
287 STM4(&(ro[1]), T2K, ovs, &(ro[1]));
|
Chris@10
|
288 STN4(&(ro[0]), T2n, T2K, T2v, T2D, ovs);
|
Chris@10
|
289 T2L = VFNMS(LDK(KP923879532), T1K, T1J);
|
Chris@10
|
290 STM4(&(io[13]), T2L, ovs, &(io[1]));
|
Chris@10
|
291 STN4(&(io[12]), T2l, T2L, T2t, T2B, ovs);
|
Chris@10
|
292 T2M = VFMA(LDK(KP923879532), T1K, T1J);
|
Chris@10
|
293 STM4(&(io[5]), T2M, ovs, &(io[1]));
|
Chris@10
|
294 STN4(&(io[4]), T2m, T2M, T2u, T2C, ovs);
|
Chris@10
|
295 }
|
Chris@10
|
296 }
|
Chris@10
|
297 }
|
Chris@10
|
298 }
|
Chris@10
|
299 }
|
Chris@10
|
300 }
|
Chris@10
|
301 T1O = VSUB(T1M, T1N);
|
Chris@10
|
302 T1Q = VADD(T1M, T1N);
|
Chris@10
|
303 {
|
Chris@10
|
304 V T2N, T2O, T2P, T2Q;
|
Chris@10
|
305 T2N = VFMA(LDK(KP923879532), T1Q, T1P);
|
Chris@10
|
306 STM4(&(io[1]), T2N, ovs, &(io[1]));
|
Chris@10
|
307 STN4(&(io[0]), T2r, T2N, T2y, T2H, ovs);
|
Chris@10
|
308 T2O = VFNMS(LDK(KP923879532), T1Q, T1P);
|
Chris@10
|
309 STM4(&(io[9]), T2O, ovs, &(io[1]));
|
Chris@10
|
310 STN4(&(io[8]), T2s, T2O, T2x, T2I, ovs);
|
Chris@10
|
311 T2P = VFMA(LDK(KP923879532), T1O, T1L);
|
Chris@10
|
312 STM4(&(ro[5]), T2P, ovs, &(ro[1]));
|
Chris@10
|
313 STN4(&(ro[4]), T2p, T2P, T2z, T2G, ovs);
|
Chris@10
|
314 T2Q = VFNMS(LDK(KP923879532), T1O, T1L);
|
Chris@10
|
315 STM4(&(ro[13]), T2Q, ovs, &(ro[1]));
|
Chris@10
|
316 STN4(&(ro[12]), T2q, T2Q, T2A, T2F, ovs);
|
Chris@10
|
317 }
|
Chris@10
|
318 }
|
Chris@10
|
319 }
|
Chris@10
|
320 VLEAVE();
|
Chris@10
|
321 }
|
Chris@10
|
322
|
Chris@10
|
323 static const kdft_desc desc = { 16, XSIMD_STRING("n2sv_16"), {104, 0, 40, 0}, &GENUS, 0, 1, 0, 0 };
|
Chris@10
|
324
|
Chris@10
|
325 void XSIMD(codelet_n2sv_16) (planner *p) {
|
Chris@10
|
326 X(kdft_register) (p, n2sv_16, &desc);
|
Chris@10
|
327 }
|
Chris@10
|
328
|
Chris@10
|
329 #else /* HAVE_FMA */
|
Chris@10
|
330
|
Chris@10
|
331 /* Generated by: ../../../genfft/gen_notw.native -simd -compact -variables 4 -pipeline-latency 8 -n 16 -name n2sv_16 -with-ostride 1 -include n2s.h -store-multiple 4 */
|
Chris@10
|
332
|
Chris@10
|
333 /*
|
Chris@10
|
334 * This function contains 144 FP additions, 24 FP multiplications,
|
Chris@10
|
335 * (or, 136 additions, 16 multiplications, 8 fused multiply/add),
|
Chris@10
|
336 * 74 stack variables, 3 constants, and 72 memory accesses
|
Chris@10
|
337 */
|
Chris@10
|
338 #include "n2s.h"
|
Chris@10
|
339
|
Chris@10
|
340 static void n2sv_16(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
|
Chris@10
|
341 {
|
Chris@10
|
342 DVK(KP382683432, +0.382683432365089771728459984030398866761344562);
|
Chris@10
|
343 DVK(KP923879532, +0.923879532511286756128183189396788286822416626);
|
Chris@10
|
344 DVK(KP707106781, +0.707106781186547524400844362104849039284835938);
|
Chris@10
|
345 {
|
Chris@10
|
346 INT i;
|
Chris@10
|
347 for (i = v; i > 0; i = i - (2 * VL), ri = ri + ((2 * VL) * ivs), ii = ii + ((2 * VL) * ivs), ro = ro + ((2 * VL) * ovs), io = io + ((2 * VL) * ovs), MAKE_VOLATILE_STRIDE(64, is), MAKE_VOLATILE_STRIDE(64, os)) {
|
Chris@10
|
348 V T7, T1R, T25, TC, TN, T1x, T1H, T1l, Tt, T22, T2h, T1b, T1g, T1E, T1Z;
|
Chris@10
|
349 V T1D, Te, T1S, T26, TJ, TQ, T1m, T1n, TT, Tm, T1X, T2g, T10, T15, T1B;
|
Chris@10
|
350 V T1U, T1A;
|
Chris@10
|
351 {
|
Chris@10
|
352 V T3, TL, Ty, T1k, T6, T1j, TB, TM;
|
Chris@10
|
353 {
|
Chris@10
|
354 V T1, T2, Tw, Tx;
|
Chris@10
|
355 T1 = LD(&(ri[0]), ivs, &(ri[0]));
|
Chris@10
|
356 T2 = LD(&(ri[WS(is, 8)]), ivs, &(ri[0]));
|
Chris@10
|
357 T3 = VADD(T1, T2);
|
Chris@10
|
358 TL = VSUB(T1, T2);
|
Chris@10
|
359 Tw = LD(&(ii[0]), ivs, &(ii[0]));
|
Chris@10
|
360 Tx = LD(&(ii[WS(is, 8)]), ivs, &(ii[0]));
|
Chris@10
|
361 Ty = VADD(Tw, Tx);
|
Chris@10
|
362 T1k = VSUB(Tw, Tx);
|
Chris@10
|
363 }
|
Chris@10
|
364 {
|
Chris@10
|
365 V T4, T5, Tz, TA;
|
Chris@10
|
366 T4 = LD(&(ri[WS(is, 4)]), ivs, &(ri[0]));
|
Chris@10
|
367 T5 = LD(&(ri[WS(is, 12)]), ivs, &(ri[0]));
|
Chris@10
|
368 T6 = VADD(T4, T5);
|
Chris@10
|
369 T1j = VSUB(T4, T5);
|
Chris@10
|
370 Tz = LD(&(ii[WS(is, 4)]), ivs, &(ii[0]));
|
Chris@10
|
371 TA = LD(&(ii[WS(is, 12)]), ivs, &(ii[0]));
|
Chris@10
|
372 TB = VADD(Tz, TA);
|
Chris@10
|
373 TM = VSUB(Tz, TA);
|
Chris@10
|
374 }
|
Chris@10
|
375 T7 = VADD(T3, T6);
|
Chris@10
|
376 T1R = VSUB(T3, T6);
|
Chris@10
|
377 T25 = VSUB(Ty, TB);
|
Chris@10
|
378 TC = VADD(Ty, TB);
|
Chris@10
|
379 TN = VSUB(TL, TM);
|
Chris@10
|
380 T1x = VADD(TL, TM);
|
Chris@10
|
381 T1H = VSUB(T1k, T1j);
|
Chris@10
|
382 T1l = VADD(T1j, T1k);
|
Chris@10
|
383 }
|
Chris@10
|
384 {
|
Chris@10
|
385 V Tp, T17, T1f, T20, Ts, T1c, T1a, T21;
|
Chris@10
|
386 {
|
Chris@10
|
387 V Tn, To, T1d, T1e;
|
Chris@10
|
388 Tn = LD(&(ri[WS(is, 15)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
389 To = LD(&(ri[WS(is, 7)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
390 Tp = VADD(Tn, To);
|
Chris@10
|
391 T17 = VSUB(Tn, To);
|
Chris@10
|
392 T1d = LD(&(ii[WS(is, 15)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
393 T1e = LD(&(ii[WS(is, 7)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
394 T1f = VSUB(T1d, T1e);
|
Chris@10
|
395 T20 = VADD(T1d, T1e);
|
Chris@10
|
396 }
|
Chris@10
|
397 {
|
Chris@10
|
398 V Tq, Tr, T18, T19;
|
Chris@10
|
399 Tq = LD(&(ri[WS(is, 3)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
400 Tr = LD(&(ri[WS(is, 11)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
401 Ts = VADD(Tq, Tr);
|
Chris@10
|
402 T1c = VSUB(Tq, Tr);
|
Chris@10
|
403 T18 = LD(&(ii[WS(is, 3)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
404 T19 = LD(&(ii[WS(is, 11)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
405 T1a = VSUB(T18, T19);
|
Chris@10
|
406 T21 = VADD(T18, T19);
|
Chris@10
|
407 }
|
Chris@10
|
408 Tt = VADD(Tp, Ts);
|
Chris@10
|
409 T22 = VSUB(T20, T21);
|
Chris@10
|
410 T2h = VADD(T20, T21);
|
Chris@10
|
411 T1b = VSUB(T17, T1a);
|
Chris@10
|
412 T1g = VADD(T1c, T1f);
|
Chris@10
|
413 T1E = VSUB(T1f, T1c);
|
Chris@10
|
414 T1Z = VSUB(Tp, Ts);
|
Chris@10
|
415 T1D = VADD(T17, T1a);
|
Chris@10
|
416 }
|
Chris@10
|
417 {
|
Chris@10
|
418 V Ta, TP, TF, TO, Td, TR, TI, TS;
|
Chris@10
|
419 {
|
Chris@10
|
420 V T8, T9, TD, TE;
|
Chris@10
|
421 T8 = LD(&(ri[WS(is, 2)]), ivs, &(ri[0]));
|
Chris@10
|
422 T9 = LD(&(ri[WS(is, 10)]), ivs, &(ri[0]));
|
Chris@10
|
423 Ta = VADD(T8, T9);
|
Chris@10
|
424 TP = VSUB(T8, T9);
|
Chris@10
|
425 TD = LD(&(ii[WS(is, 2)]), ivs, &(ii[0]));
|
Chris@10
|
426 TE = LD(&(ii[WS(is, 10)]), ivs, &(ii[0]));
|
Chris@10
|
427 TF = VADD(TD, TE);
|
Chris@10
|
428 TO = VSUB(TD, TE);
|
Chris@10
|
429 }
|
Chris@10
|
430 {
|
Chris@10
|
431 V Tb, Tc, TG, TH;
|
Chris@10
|
432 Tb = LD(&(ri[WS(is, 14)]), ivs, &(ri[0]));
|
Chris@10
|
433 Tc = LD(&(ri[WS(is, 6)]), ivs, &(ri[0]));
|
Chris@10
|
434 Td = VADD(Tb, Tc);
|
Chris@10
|
435 TR = VSUB(Tb, Tc);
|
Chris@10
|
436 TG = LD(&(ii[WS(is, 14)]), ivs, &(ii[0]));
|
Chris@10
|
437 TH = LD(&(ii[WS(is, 6)]), ivs, &(ii[0]));
|
Chris@10
|
438 TI = VADD(TG, TH);
|
Chris@10
|
439 TS = VSUB(TG, TH);
|
Chris@10
|
440 }
|
Chris@10
|
441 Te = VADD(Ta, Td);
|
Chris@10
|
442 T1S = VSUB(TF, TI);
|
Chris@10
|
443 T26 = VSUB(Td, Ta);
|
Chris@10
|
444 TJ = VADD(TF, TI);
|
Chris@10
|
445 TQ = VSUB(TO, TP);
|
Chris@10
|
446 T1m = VSUB(TR, TS);
|
Chris@10
|
447 T1n = VADD(TP, TO);
|
Chris@10
|
448 TT = VADD(TR, TS);
|
Chris@10
|
449 }
|
Chris@10
|
450 {
|
Chris@10
|
451 V Ti, T11, TZ, T1V, Tl, TW, T14, T1W;
|
Chris@10
|
452 {
|
Chris@10
|
453 V Tg, Th, TX, TY;
|
Chris@10
|
454 Tg = LD(&(ri[WS(is, 1)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
455 Th = LD(&(ri[WS(is, 9)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
456 Ti = VADD(Tg, Th);
|
Chris@10
|
457 T11 = VSUB(Tg, Th);
|
Chris@10
|
458 TX = LD(&(ii[WS(is, 1)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
459 TY = LD(&(ii[WS(is, 9)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
460 TZ = VSUB(TX, TY);
|
Chris@10
|
461 T1V = VADD(TX, TY);
|
Chris@10
|
462 }
|
Chris@10
|
463 {
|
Chris@10
|
464 V Tj, Tk, T12, T13;
|
Chris@10
|
465 Tj = LD(&(ri[WS(is, 5)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
466 Tk = LD(&(ri[WS(is, 13)]), ivs, &(ri[WS(is, 1)]));
|
Chris@10
|
467 Tl = VADD(Tj, Tk);
|
Chris@10
|
468 TW = VSUB(Tj, Tk);
|
Chris@10
|
469 T12 = LD(&(ii[WS(is, 5)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
470 T13 = LD(&(ii[WS(is, 13)]), ivs, &(ii[WS(is, 1)]));
|
Chris@10
|
471 T14 = VSUB(T12, T13);
|
Chris@10
|
472 T1W = VADD(T12, T13);
|
Chris@10
|
473 }
|
Chris@10
|
474 Tm = VADD(Ti, Tl);
|
Chris@10
|
475 T1X = VSUB(T1V, T1W);
|
Chris@10
|
476 T2g = VADD(T1V, T1W);
|
Chris@10
|
477 T10 = VADD(TW, TZ);
|
Chris@10
|
478 T15 = VSUB(T11, T14);
|
Chris@10
|
479 T1B = VADD(T11, T14);
|
Chris@10
|
480 T1U = VSUB(Ti, Tl);
|
Chris@10
|
481 T1A = VSUB(TZ, TW);
|
Chris@10
|
482 }
|
Chris@10
|
483 {
|
Chris@10
|
484 V T2l, T2m, T2n, T2o, T2p, T2q, T2r, T2s;
|
Chris@10
|
485 {
|
Chris@10
|
486 V Tf, Tu, T2j, T2k;
|
Chris@10
|
487 Tf = VADD(T7, Te);
|
Chris@10
|
488 Tu = VADD(Tm, Tt);
|
Chris@10
|
489 T2l = VSUB(Tf, Tu);
|
Chris@10
|
490 STM4(&(ro[8]), T2l, ovs, &(ro[0]));
|
Chris@10
|
491 T2m = VADD(Tf, Tu);
|
Chris@10
|
492 STM4(&(ro[0]), T2m, ovs, &(ro[0]));
|
Chris@10
|
493 T2j = VADD(TC, TJ);
|
Chris@10
|
494 T2k = VADD(T2g, T2h);
|
Chris@10
|
495 T2n = VSUB(T2j, T2k);
|
Chris@10
|
496 STM4(&(io[8]), T2n, ovs, &(io[0]));
|
Chris@10
|
497 T2o = VADD(T2j, T2k);
|
Chris@10
|
498 STM4(&(io[0]), T2o, ovs, &(io[0]));
|
Chris@10
|
499 }
|
Chris@10
|
500 {
|
Chris@10
|
501 V Tv, TK, T2f, T2i;
|
Chris@10
|
502 Tv = VSUB(Tt, Tm);
|
Chris@10
|
503 TK = VSUB(TC, TJ);
|
Chris@10
|
504 T2p = VADD(Tv, TK);
|
Chris@10
|
505 STM4(&(io[4]), T2p, ovs, &(io[0]));
|
Chris@10
|
506 T2q = VSUB(TK, Tv);
|
Chris@10
|
507 STM4(&(io[12]), T2q, ovs, &(io[0]));
|
Chris@10
|
508 T2f = VSUB(T7, Te);
|
Chris@10
|
509 T2i = VSUB(T2g, T2h);
|
Chris@10
|
510 T2r = VSUB(T2f, T2i);
|
Chris@10
|
511 STM4(&(ro[12]), T2r, ovs, &(ro[0]));
|
Chris@10
|
512 T2s = VADD(T2f, T2i);
|
Chris@10
|
513 STM4(&(ro[4]), T2s, ovs, &(ro[0]));
|
Chris@10
|
514 }
|
Chris@10
|
515 {
|
Chris@10
|
516 V T2t, T2u, T2v, T2w, T2x, T2y, T2z, T2A;
|
Chris@10
|
517 {
|
Chris@10
|
518 V T1T, T27, T24, T28, T1Y, T23;
|
Chris@10
|
519 T1T = VADD(T1R, T1S);
|
Chris@10
|
520 T27 = VSUB(T25, T26);
|
Chris@10
|
521 T1Y = VADD(T1U, T1X);
|
Chris@10
|
522 T23 = VSUB(T1Z, T22);
|
Chris@10
|
523 T24 = VMUL(LDK(KP707106781), VADD(T1Y, T23));
|
Chris@10
|
524 T28 = VMUL(LDK(KP707106781), VSUB(T23, T1Y));
|
Chris@10
|
525 T2t = VSUB(T1T, T24);
|
Chris@10
|
526 STM4(&(ro[10]), T2t, ovs, &(ro[0]));
|
Chris@10
|
527 T2u = VADD(T27, T28);
|
Chris@10
|
528 STM4(&(io[6]), T2u, ovs, &(io[0]));
|
Chris@10
|
529 T2v = VADD(T1T, T24);
|
Chris@10
|
530 STM4(&(ro[2]), T2v, ovs, &(ro[0]));
|
Chris@10
|
531 T2w = VSUB(T27, T28);
|
Chris@10
|
532 STM4(&(io[14]), T2w, ovs, &(io[0]));
|
Chris@10
|
533 }
|
Chris@10
|
534 {
|
Chris@10
|
535 V T29, T2d, T2c, T2e, T2a, T2b;
|
Chris@10
|
536 T29 = VSUB(T1R, T1S);
|
Chris@10
|
537 T2d = VADD(T26, T25);
|
Chris@10
|
538 T2a = VSUB(T1X, T1U);
|
Chris@10
|
539 T2b = VADD(T1Z, T22);
|
Chris@10
|
540 T2c = VMUL(LDK(KP707106781), VSUB(T2a, T2b));
|
Chris@10
|
541 T2e = VMUL(LDK(KP707106781), VADD(T2a, T2b));
|
Chris@10
|
542 T2x = VSUB(T29, T2c);
|
Chris@10
|
543 STM4(&(ro[14]), T2x, ovs, &(ro[0]));
|
Chris@10
|
544 T2y = VADD(T2d, T2e);
|
Chris@10
|
545 STM4(&(io[2]), T2y, ovs, &(io[0]));
|
Chris@10
|
546 T2z = VADD(T29, T2c);
|
Chris@10
|
547 STM4(&(ro[6]), T2z, ovs, &(ro[0]));
|
Chris@10
|
548 T2A = VSUB(T2d, T2e);
|
Chris@10
|
549 STM4(&(io[10]), T2A, ovs, &(io[0]));
|
Chris@10
|
550 }
|
Chris@10
|
551 {
|
Chris@10
|
552 V T2B, T2C, T2D, T2E, T2F, T2G, T2H, T2I;
|
Chris@10
|
553 {
|
Chris@10
|
554 V TV, T1r, T1p, T1v, T1i, T1q, T1u, T1w, TU, T1o;
|
Chris@10
|
555 TU = VMUL(LDK(KP707106781), VSUB(TQ, TT));
|
Chris@10
|
556 TV = VADD(TN, TU);
|
Chris@10
|
557 T1r = VSUB(TN, TU);
|
Chris@10
|
558 T1o = VMUL(LDK(KP707106781), VSUB(T1m, T1n));
|
Chris@10
|
559 T1p = VSUB(T1l, T1o);
|
Chris@10
|
560 T1v = VADD(T1l, T1o);
|
Chris@10
|
561 {
|
Chris@10
|
562 V T16, T1h, T1s, T1t;
|
Chris@10
|
563 T16 = VFMA(LDK(KP923879532), T10, VMUL(LDK(KP382683432), T15));
|
Chris@10
|
564 T1h = VFNMS(LDK(KP923879532), T1g, VMUL(LDK(KP382683432), T1b));
|
Chris@10
|
565 T1i = VADD(T16, T1h);
|
Chris@10
|
566 T1q = VSUB(T1h, T16);
|
Chris@10
|
567 T1s = VFNMS(LDK(KP923879532), T15, VMUL(LDK(KP382683432), T10));
|
Chris@10
|
568 T1t = VFMA(LDK(KP382683432), T1g, VMUL(LDK(KP923879532), T1b));
|
Chris@10
|
569 T1u = VSUB(T1s, T1t);
|
Chris@10
|
570 T1w = VADD(T1s, T1t);
|
Chris@10
|
571 }
|
Chris@10
|
572 T2B = VSUB(TV, T1i);
|
Chris@10
|
573 STM4(&(ro[11]), T2B, ovs, &(ro[1]));
|
Chris@10
|
574 T2C = VSUB(T1v, T1w);
|
Chris@10
|
575 STM4(&(io[11]), T2C, ovs, &(io[1]));
|
Chris@10
|
576 T2D = VADD(TV, T1i);
|
Chris@10
|
577 STM4(&(ro[3]), T2D, ovs, &(ro[1]));
|
Chris@10
|
578 T2E = VADD(T1v, T1w);
|
Chris@10
|
579 STM4(&(io[3]), T2E, ovs, &(io[1]));
|
Chris@10
|
580 T2F = VSUB(T1p, T1q);
|
Chris@10
|
581 STM4(&(io[15]), T2F, ovs, &(io[1]));
|
Chris@10
|
582 T2G = VSUB(T1r, T1u);
|
Chris@10
|
583 STM4(&(ro[15]), T2G, ovs, &(ro[1]));
|
Chris@10
|
584 T2H = VADD(T1p, T1q);
|
Chris@10
|
585 STM4(&(io[7]), T2H, ovs, &(io[1]));
|
Chris@10
|
586 T2I = VADD(T1r, T1u);
|
Chris@10
|
587 STM4(&(ro[7]), T2I, ovs, &(ro[1]));
|
Chris@10
|
588 }
|
Chris@10
|
589 {
|
Chris@10
|
590 V T1z, T1L, T1J, T1P, T1G, T1K, T1O, T1Q, T1y, T1I;
|
Chris@10
|
591 T1y = VMUL(LDK(KP707106781), VADD(T1n, T1m));
|
Chris@10
|
592 T1z = VADD(T1x, T1y);
|
Chris@10
|
593 T1L = VSUB(T1x, T1y);
|
Chris@10
|
594 T1I = VMUL(LDK(KP707106781), VADD(TQ, TT));
|
Chris@10
|
595 T1J = VSUB(T1H, T1I);
|
Chris@10
|
596 T1P = VADD(T1H, T1I);
|
Chris@10
|
597 {
|
Chris@10
|
598 V T1C, T1F, T1M, T1N;
|
Chris@10
|
599 T1C = VFMA(LDK(KP382683432), T1A, VMUL(LDK(KP923879532), T1B));
|
Chris@10
|
600 T1F = VFNMS(LDK(KP382683432), T1E, VMUL(LDK(KP923879532), T1D));
|
Chris@10
|
601 T1G = VADD(T1C, T1F);
|
Chris@10
|
602 T1K = VSUB(T1F, T1C);
|
Chris@10
|
603 T1M = VFNMS(LDK(KP382683432), T1B, VMUL(LDK(KP923879532), T1A));
|
Chris@10
|
604 T1N = VFMA(LDK(KP923879532), T1E, VMUL(LDK(KP382683432), T1D));
|
Chris@10
|
605 T1O = VSUB(T1M, T1N);
|
Chris@10
|
606 T1Q = VADD(T1M, T1N);
|
Chris@10
|
607 }
|
Chris@10
|
608 {
|
Chris@10
|
609 V T2J, T2K, T2L, T2M;
|
Chris@10
|
610 T2J = VSUB(T1z, T1G);
|
Chris@10
|
611 STM4(&(ro[9]), T2J, ovs, &(ro[1]));
|
Chris@10
|
612 STN4(&(ro[8]), T2l, T2J, T2t, T2B, ovs);
|
Chris@10
|
613 T2K = VSUB(T1P, T1Q);
|
Chris@10
|
614 STM4(&(io[9]), T2K, ovs, &(io[1]));
|
Chris@10
|
615 STN4(&(io[8]), T2n, T2K, T2A, T2C, ovs);
|
Chris@10
|
616 T2L = VADD(T1z, T1G);
|
Chris@10
|
617 STM4(&(ro[1]), T2L, ovs, &(ro[1]));
|
Chris@10
|
618 STN4(&(ro[0]), T2m, T2L, T2v, T2D, ovs);
|
Chris@10
|
619 T2M = VADD(T1P, T1Q);
|
Chris@10
|
620 STM4(&(io[1]), T2M, ovs, &(io[1]));
|
Chris@10
|
621 STN4(&(io[0]), T2o, T2M, T2y, T2E, ovs);
|
Chris@10
|
622 }
|
Chris@10
|
623 {
|
Chris@10
|
624 V T2N, T2O, T2P, T2Q;
|
Chris@10
|
625 T2N = VSUB(T1J, T1K);
|
Chris@10
|
626 STM4(&(io[13]), T2N, ovs, &(io[1]));
|
Chris@10
|
627 STN4(&(io[12]), T2q, T2N, T2w, T2F, ovs);
|
Chris@10
|
628 T2O = VSUB(T1L, T1O);
|
Chris@10
|
629 STM4(&(ro[13]), T2O, ovs, &(ro[1]));
|
Chris@10
|
630 STN4(&(ro[12]), T2r, T2O, T2x, T2G, ovs);
|
Chris@10
|
631 T2P = VADD(T1J, T1K);
|
Chris@10
|
632 STM4(&(io[5]), T2P, ovs, &(io[1]));
|
Chris@10
|
633 STN4(&(io[4]), T2p, T2P, T2u, T2H, ovs);
|
Chris@10
|
634 T2Q = VADD(T1L, T1O);
|
Chris@10
|
635 STM4(&(ro[5]), T2Q, ovs, &(ro[1]));
|
Chris@10
|
636 STN4(&(ro[4]), T2s, T2Q, T2z, T2I, ovs);
|
Chris@10
|
637 }
|
Chris@10
|
638 }
|
Chris@10
|
639 }
|
Chris@10
|
640 }
|
Chris@10
|
641 }
|
Chris@10
|
642 }
|
Chris@10
|
643 }
|
Chris@10
|
644 VLEAVE();
|
Chris@10
|
645 }
|
Chris@10
|
646
|
Chris@10
|
647 static const kdft_desc desc = { 16, XSIMD_STRING("n2sv_16"), {136, 16, 8, 0}, &GENUS, 0, 1, 0, 0 };
|
Chris@10
|
648
|
Chris@10
|
649 void XSIMD(codelet_n2sv_16) (planner *p) {
|
Chris@10
|
650 X(kdft_register) (p, n2sv_16, &desc);
|
Chris@10
|
651 }
|
Chris@10
|
652
|
Chris@10
|
653 #endif /* HAVE_FMA */
|