Chris@10
|
1 /*
|
Chris@10
|
2 * Copyright (c) 2003, 2007-11 Matteo Frigo
|
Chris@10
|
3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
|
Chris@10
|
4 *
|
Chris@10
|
5 * This program is free software; you can redistribute it and/or modify
|
Chris@10
|
6 * it under the terms of the GNU General Public License as published by
|
Chris@10
|
7 * the Free Software Foundation; either version 2 of the License, or
|
Chris@10
|
8 * (at your option) any later version.
|
Chris@10
|
9 *
|
Chris@10
|
10 * This program is distributed in the hope that it will be useful,
|
Chris@10
|
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
Chris@10
|
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
Chris@10
|
13 * GNU General Public License for more details.
|
Chris@10
|
14 *
|
Chris@10
|
15 * You should have received a copy of the GNU General Public License
|
Chris@10
|
16 * along with this program; if not, write to the Free Software
|
Chris@10
|
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
Chris@10
|
18 *
|
Chris@10
|
19 */
|
Chris@10
|
20
|
Chris@10
|
21 /* This file was automatically generated --- DO NOT EDIT */
|
Chris@10
|
22 /* Generated on Sun Nov 25 07:39:31 EST 2012 */
|
Chris@10
|
23
|
Chris@10
|
24 #include "codelet-dft.h"
|
Chris@10
|
25
|
Chris@10
|
26 #ifdef HAVE_FMA
|
Chris@10
|
27
|
Chris@10
|
28 /* Generated by: ../../../genfft/gen_twidsq_c.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -n 8 -dif -name q1fv_8 -include q1f.h */
|
Chris@10
|
29
|
Chris@10
|
30 /*
|
Chris@10
|
31 * This function contains 264 FP additions, 192 FP multiplications,
|
Chris@10
|
32 * (or, 184 additions, 112 multiplications, 80 fused multiply/add),
|
Chris@10
|
33 * 117 stack variables, 1 constants, and 128 memory accesses
|
Chris@10
|
34 */
|
Chris@10
|
35 #include "q1f.h"
|
Chris@10
|
36
|
Chris@10
|
37 static void q1fv_8(R *ri, R *ii, const R *W, stride rs, stride vs, INT mb, INT me, INT ms)
|
Chris@10
|
38 {
|
Chris@10
|
39 DVK(KP707106781, +0.707106781186547524400844362104849039284835938);
|
Chris@10
|
40 {
|
Chris@10
|
41 INT m;
|
Chris@10
|
42 R *x;
|
Chris@10
|
43 x = ri;
|
Chris@10
|
44 for (m = mb, W = W + (mb * ((TWVL / VL) * 14)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 14), MAKE_VOLATILE_STRIDE(16, rs), MAKE_VOLATILE_STRIDE(16, vs)) {
|
Chris@10
|
45 V T42, T43, T1U, T1V, T2Y, T2Z, TT, TS;
|
Chris@10
|
46 {
|
Chris@10
|
47 V T3, Te, T1E, T1P, Tu, Tp, T25, T20, T2b, T2m, T3M, T2x, T2C, T3X, TA;
|
Chris@10
|
48 V TL, T48, T4d, T17, T11, TW, T1i, T2I, T1y, T1t, T2T, T3f, T3q, T34, T39;
|
Chris@10
|
49 V T3G, T3B, Ts, Tv, Tf, Ta, T23, T26, T1Q, T1L, T2A, T2D, T2n, T2i, T4b;
|
Chris@10
|
50 V T4e, T3Y, T3T, TZ, T12, TM, TH, T35, T2L, T3j, T1w, T1z, T1j, T1e, T36;
|
Chris@10
|
51 V T2O, T3C, T3i, T3k;
|
Chris@10
|
52 {
|
Chris@10
|
53 V T3d, T32, T3e, T3o, T3p, T33;
|
Chris@10
|
54 {
|
Chris@10
|
55 V T2v, T2w, T3V, T46, T3W;
|
Chris@10
|
56 {
|
Chris@10
|
57 V T1, T2, Tc, Td, T1C, T1D, T1N, T1O;
|
Chris@10
|
58 T1 = LD(&(x[0]), ms, &(x[0]));
|
Chris@10
|
59 T2 = LD(&(x[WS(rs, 4)]), ms, &(x[0]));
|
Chris@10
|
60 Tc = LD(&(x[WS(rs, 2)]), ms, &(x[0]));
|
Chris@10
|
61 Td = LD(&(x[WS(rs, 6)]), ms, &(x[0]));
|
Chris@10
|
62 T1C = LD(&(x[WS(vs, 3)]), ms, &(x[WS(vs, 3)]));
|
Chris@10
|
63 T1D = LD(&(x[WS(vs, 3) + WS(rs, 4)]), ms, &(x[WS(vs, 3)]));
|
Chris@10
|
64 T1N = LD(&(x[WS(vs, 3) + WS(rs, 2)]), ms, &(x[WS(vs, 3)]));
|
Chris@10
|
65 T1O = LD(&(x[WS(vs, 3) + WS(rs, 6)]), ms, &(x[WS(vs, 3)]));
|
Chris@10
|
66 {
|
Chris@10
|
67 V T29, T1Y, T1Z, T2a, T2k, T2l, Tn, To, T3K, T3L;
|
Chris@10
|
68 T29 = LD(&(x[WS(vs, 4)]), ms, &(x[WS(vs, 4)]));
|
Chris@10
|
69 T3 = VSUB(T1, T2);
|
Chris@10
|
70 Tn = VADD(T1, T2);
|
Chris@10
|
71 Te = VSUB(Tc, Td);
|
Chris@10
|
72 To = VADD(Tc, Td);
|
Chris@10
|
73 T1E = VSUB(T1C, T1D);
|
Chris@10
|
74 T1Y = VADD(T1C, T1D);
|
Chris@10
|
75 T1P = VSUB(T1N, T1O);
|
Chris@10
|
76 T1Z = VADD(T1N, T1O);
|
Chris@10
|
77 T2a = LD(&(x[WS(vs, 4) + WS(rs, 4)]), ms, &(x[WS(vs, 4)]));
|
Chris@10
|
78 T2k = LD(&(x[WS(vs, 4) + WS(rs, 2)]), ms, &(x[WS(vs, 4)]));
|
Chris@10
|
79 T2l = LD(&(x[WS(vs, 4) + WS(rs, 6)]), ms, &(x[WS(vs, 4)]));
|
Chris@10
|
80 Tu = VSUB(Tn, To);
|
Chris@10
|
81 Tp = VADD(Tn, To);
|
Chris@10
|
82 T3K = LD(&(x[WS(vs, 7)]), ms, &(x[WS(vs, 7)]));
|
Chris@10
|
83 T3L = LD(&(x[WS(vs, 7) + WS(rs, 4)]), ms, &(x[WS(vs, 7)]));
|
Chris@10
|
84 T25 = VSUB(T1Y, T1Z);
|
Chris@10
|
85 T20 = VADD(T1Y, T1Z);
|
Chris@10
|
86 T2v = VADD(T29, T2a);
|
Chris@10
|
87 T2b = VSUB(T29, T2a);
|
Chris@10
|
88 T2w = VADD(T2k, T2l);
|
Chris@10
|
89 T2m = VSUB(T2k, T2l);
|
Chris@10
|
90 T3V = LD(&(x[WS(vs, 7) + WS(rs, 2)]), ms, &(x[WS(vs, 7)]));
|
Chris@10
|
91 T46 = VADD(T3K, T3L);
|
Chris@10
|
92 T3M = VSUB(T3K, T3L);
|
Chris@10
|
93 T3W = LD(&(x[WS(vs, 7) + WS(rs, 6)]), ms, &(x[WS(vs, 7)]));
|
Chris@10
|
94 }
|
Chris@10
|
95 }
|
Chris@10
|
96 {
|
Chris@10
|
97 V T15, TU, T16, T1g, TV, T1h;
|
Chris@10
|
98 {
|
Chris@10
|
99 V Ty, Tz, TJ, TK, T47;
|
Chris@10
|
100 Ty = LD(&(x[WS(vs, 1)]), ms, &(x[WS(vs, 1)]));
|
Chris@10
|
101 Tz = LD(&(x[WS(vs, 1) + WS(rs, 4)]), ms, &(x[WS(vs, 1)]));
|
Chris@10
|
102 TJ = LD(&(x[WS(vs, 1) + WS(rs, 2)]), ms, &(x[WS(vs, 1)]));
|
Chris@10
|
103 T2x = VADD(T2v, T2w);
|
Chris@10
|
104 T2C = VSUB(T2v, T2w);
|
Chris@10
|
105 TK = LD(&(x[WS(vs, 1) + WS(rs, 6)]), ms, &(x[WS(vs, 1)]));
|
Chris@10
|
106 T47 = VADD(T3V, T3W);
|
Chris@10
|
107 T3X = VSUB(T3V, T3W);
|
Chris@10
|
108 T15 = LD(&(x[WS(vs, 2)]), ms, &(x[WS(vs, 2)]));
|
Chris@10
|
109 TA = VSUB(Ty, Tz);
|
Chris@10
|
110 TU = VADD(Ty, Tz);
|
Chris@10
|
111 T16 = LD(&(x[WS(vs, 2) + WS(rs, 4)]), ms, &(x[WS(vs, 2)]));
|
Chris@10
|
112 T1g = LD(&(x[WS(vs, 2) + WS(rs, 2)]), ms, &(x[WS(vs, 2)]));
|
Chris@10
|
113 TL = VSUB(TJ, TK);
|
Chris@10
|
114 TV = VADD(TJ, TK);
|
Chris@10
|
115 T48 = VADD(T46, T47);
|
Chris@10
|
116 T4d = VSUB(T46, T47);
|
Chris@10
|
117 T1h = LD(&(x[WS(vs, 2) + WS(rs, 6)]), ms, &(x[WS(vs, 2)]));
|
Chris@10
|
118 }
|
Chris@10
|
119 {
|
Chris@10
|
120 V T2G, T1r, T2H, T2R, T1s, T2S;
|
Chris@10
|
121 T2G = LD(&(x[WS(vs, 5)]), ms, &(x[WS(vs, 5)]));
|
Chris@10
|
122 T17 = VSUB(T15, T16);
|
Chris@10
|
123 T1r = VADD(T15, T16);
|
Chris@10
|
124 T2H = LD(&(x[WS(vs, 5) + WS(rs, 4)]), ms, &(x[WS(vs, 5)]));
|
Chris@10
|
125 T11 = VSUB(TU, TV);
|
Chris@10
|
126 TW = VADD(TU, TV);
|
Chris@10
|
127 T2R = LD(&(x[WS(vs, 5) + WS(rs, 2)]), ms, &(x[WS(vs, 5)]));
|
Chris@10
|
128 T1i = VSUB(T1g, T1h);
|
Chris@10
|
129 T1s = VADD(T1g, T1h);
|
Chris@10
|
130 T2S = LD(&(x[WS(vs, 5) + WS(rs, 6)]), ms, &(x[WS(vs, 5)]));
|
Chris@10
|
131 T3d = LD(&(x[WS(vs, 6)]), ms, &(x[WS(vs, 6)]));
|
Chris@10
|
132 T2I = VSUB(T2G, T2H);
|
Chris@10
|
133 T32 = VADD(T2G, T2H);
|
Chris@10
|
134 T3e = LD(&(x[WS(vs, 6) + WS(rs, 4)]), ms, &(x[WS(vs, 6)]));
|
Chris@10
|
135 T3o = LD(&(x[WS(vs, 6) + WS(rs, 2)]), ms, &(x[WS(vs, 6)]));
|
Chris@10
|
136 T3p = LD(&(x[WS(vs, 6) + WS(rs, 6)]), ms, &(x[WS(vs, 6)]));
|
Chris@10
|
137 T1y = VSUB(T1r, T1s);
|
Chris@10
|
138 T1t = VADD(T1r, T1s);
|
Chris@10
|
139 T33 = VADD(T2R, T2S);
|
Chris@10
|
140 T2T = VSUB(T2R, T2S);
|
Chris@10
|
141 }
|
Chris@10
|
142 }
|
Chris@10
|
143 }
|
Chris@10
|
144 {
|
Chris@10
|
145 V T2y, T2e, T3Q, T2z, T2h, T49, T3P, T3R;
|
Chris@10
|
146 {
|
Chris@10
|
147 V T6, Tq, T1I, Tr, T9, T21, T1H, T1J;
|
Chris@10
|
148 {
|
Chris@10
|
149 V T4, T3z, T3A, T5, T7, T8, T1F, T1G;
|
Chris@10
|
150 T4 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
151 T3f = VSUB(T3d, T3e);
|
Chris@10
|
152 T3z = VADD(T3d, T3e);
|
Chris@10
|
153 T3q = VSUB(T3o, T3p);
|
Chris@10
|
154 T3A = VADD(T3o, T3p);
|
Chris@10
|
155 T5 = LD(&(x[WS(rs, 5)]), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
156 T7 = LD(&(x[WS(rs, 7)]), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
157 T34 = VADD(T32, T33);
|
Chris@10
|
158 T39 = VSUB(T32, T33);
|
Chris@10
|
159 T8 = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
160 T1F = LD(&(x[WS(vs, 3) + WS(rs, 1)]), ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
161 T1G = LD(&(x[WS(vs, 3) + WS(rs, 5)]), ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
162 T3G = VSUB(T3z, T3A);
|
Chris@10
|
163 T3B = VADD(T3z, T3A);
|
Chris@10
|
164 T6 = VSUB(T4, T5);
|
Chris@10
|
165 Tq = VADD(T4, T5);
|
Chris@10
|
166 T1I = LD(&(x[WS(vs, 3) + WS(rs, 7)]), ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
167 Tr = VADD(T7, T8);
|
Chris@10
|
168 T9 = VSUB(T7, T8);
|
Chris@10
|
169 T21 = VADD(T1F, T1G);
|
Chris@10
|
170 T1H = VSUB(T1F, T1G);
|
Chris@10
|
171 T1J = LD(&(x[WS(vs, 3) + WS(rs, 3)]), ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
172 }
|
Chris@10
|
173 {
|
Chris@10
|
174 V T2f, T22, T1K, T2g, T2c, T2d, T3N, T3O;
|
Chris@10
|
175 T2c = LD(&(x[WS(vs, 4) + WS(rs, 1)]), ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
176 T2d = LD(&(x[WS(vs, 4) + WS(rs, 5)]), ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
177 T2f = LD(&(x[WS(vs, 4) + WS(rs, 7)]), ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
178 Ts = VADD(Tq, Tr);
|
Chris@10
|
179 Tv = VSUB(Tr, Tq);
|
Chris@10
|
180 Tf = VSUB(T9, T6);
|
Chris@10
|
181 Ta = VADD(T6, T9);
|
Chris@10
|
182 T22 = VADD(T1I, T1J);
|
Chris@10
|
183 T1K = VSUB(T1I, T1J);
|
Chris@10
|
184 T2y = VADD(T2c, T2d);
|
Chris@10
|
185 T2e = VSUB(T2c, T2d);
|
Chris@10
|
186 T2g = LD(&(x[WS(vs, 4) + WS(rs, 3)]), ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
187 T3N = LD(&(x[WS(vs, 7) + WS(rs, 1)]), ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
188 T3O = LD(&(x[WS(vs, 7) + WS(rs, 5)]), ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
189 T3Q = LD(&(x[WS(vs, 7) + WS(rs, 7)]), ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
190 T23 = VADD(T21, T22);
|
Chris@10
|
191 T26 = VSUB(T22, T21);
|
Chris@10
|
192 T1Q = VSUB(T1K, T1H);
|
Chris@10
|
193 T1L = VADD(T1H, T1K);
|
Chris@10
|
194 T2z = VADD(T2f, T2g);
|
Chris@10
|
195 T2h = VSUB(T2f, T2g);
|
Chris@10
|
196 T49 = VADD(T3N, T3O);
|
Chris@10
|
197 T3P = VSUB(T3N, T3O);
|
Chris@10
|
198 T3R = LD(&(x[WS(vs, 7) + WS(rs, 3)]), ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
199 }
|
Chris@10
|
200 }
|
Chris@10
|
201 {
|
Chris@10
|
202 V TX, TD, T1b, TY, TG, T1u, T1a, T1c;
|
Chris@10
|
203 {
|
Chris@10
|
204 V TE, T4a, T3S, TF, TB, TC, T18, T19;
|
Chris@10
|
205 TB = LD(&(x[WS(vs, 1) + WS(rs, 1)]), ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
206 TC = LD(&(x[WS(vs, 1) + WS(rs, 5)]), ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
207 TE = LD(&(x[WS(vs, 1) + WS(rs, 7)]), ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
208 T2A = VADD(T2y, T2z);
|
Chris@10
|
209 T2D = VSUB(T2z, T2y);
|
Chris@10
|
210 T2n = VSUB(T2h, T2e);
|
Chris@10
|
211 T2i = VADD(T2e, T2h);
|
Chris@10
|
212 T4a = VADD(T3Q, T3R);
|
Chris@10
|
213 T3S = VSUB(T3Q, T3R);
|
Chris@10
|
214 TX = VADD(TB, TC);
|
Chris@10
|
215 TD = VSUB(TB, TC);
|
Chris@10
|
216 TF = LD(&(x[WS(vs, 1) + WS(rs, 3)]), ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
217 T18 = LD(&(x[WS(vs, 2) + WS(rs, 1)]), ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
218 T19 = LD(&(x[WS(vs, 2) + WS(rs, 5)]), ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
219 T1b = LD(&(x[WS(vs, 2) + WS(rs, 7)]), ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
220 T4b = VADD(T49, T4a);
|
Chris@10
|
221 T4e = VSUB(T4a, T49);
|
Chris@10
|
222 T3Y = VSUB(T3S, T3P);
|
Chris@10
|
223 T3T = VADD(T3P, T3S);
|
Chris@10
|
224 TY = VADD(TE, TF);
|
Chris@10
|
225 TG = VSUB(TE, TF);
|
Chris@10
|
226 T1u = VADD(T18, T19);
|
Chris@10
|
227 T1a = VSUB(T18, T19);
|
Chris@10
|
228 T1c = LD(&(x[WS(vs, 2) + WS(rs, 3)]), ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
229 }
|
Chris@10
|
230 {
|
Chris@10
|
231 V T2M, T1v, T1d, T2N, T2J, T2K, T3g, T3h;
|
Chris@10
|
232 T2J = LD(&(x[WS(vs, 5) + WS(rs, 1)]), ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
233 T2K = LD(&(x[WS(vs, 5) + WS(rs, 5)]), ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
234 T2M = LD(&(x[WS(vs, 5) + WS(rs, 7)]), ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
235 TZ = VADD(TX, TY);
|
Chris@10
|
236 T12 = VSUB(TY, TX);
|
Chris@10
|
237 TM = VSUB(TG, TD);
|
Chris@10
|
238 TH = VADD(TD, TG);
|
Chris@10
|
239 T1v = VADD(T1b, T1c);
|
Chris@10
|
240 T1d = VSUB(T1b, T1c);
|
Chris@10
|
241 T35 = VADD(T2J, T2K);
|
Chris@10
|
242 T2L = VSUB(T2J, T2K);
|
Chris@10
|
243 T2N = LD(&(x[WS(vs, 5) + WS(rs, 3)]), ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
244 T3g = LD(&(x[WS(vs, 6) + WS(rs, 1)]), ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
245 T3h = LD(&(x[WS(vs, 6) + WS(rs, 5)]), ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
246 T3j = LD(&(x[WS(vs, 6) + WS(rs, 7)]), ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
247 T1w = VADD(T1u, T1v);
|
Chris@10
|
248 T1z = VSUB(T1v, T1u);
|
Chris@10
|
249 T1j = VSUB(T1d, T1a);
|
Chris@10
|
250 T1e = VADD(T1a, T1d);
|
Chris@10
|
251 T36 = VADD(T2M, T2N);
|
Chris@10
|
252 T2O = VSUB(T2M, T2N);
|
Chris@10
|
253 T3C = VADD(T3g, T3h);
|
Chris@10
|
254 T3i = VSUB(T3g, T3h);
|
Chris@10
|
255 T3k = LD(&(x[WS(vs, 6) + WS(rs, 3)]), ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
256 }
|
Chris@10
|
257 }
|
Chris@10
|
258 }
|
Chris@10
|
259 }
|
Chris@10
|
260 {
|
Chris@10
|
261 V T3a, T2U, T2P, T3H, T3r, T3m, T13, T27, T3b, T4f;
|
Chris@10
|
262 {
|
Chris@10
|
263 V T37, T3E, T2B, T24;
|
Chris@10
|
264 {
|
Chris@10
|
265 V T3D, T3l, Tt, T4c;
|
Chris@10
|
266 ST(&(x[0]), VADD(Tp, Ts), ms, &(x[0]));
|
Chris@10
|
267 ST(&(x[WS(rs, 2)]), VADD(T1t, T1w), ms, &(x[0]));
|
Chris@10
|
268 ST(&(x[WS(rs, 7)]), VADD(T48, T4b), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
269 T37 = VADD(T35, T36);
|
Chris@10
|
270 T3a = VSUB(T36, T35);
|
Chris@10
|
271 T2U = VSUB(T2O, T2L);
|
Chris@10
|
272 T2P = VADD(T2L, T2O);
|
Chris@10
|
273 T3D = VADD(T3j, T3k);
|
Chris@10
|
274 T3l = VSUB(T3j, T3k);
|
Chris@10
|
275 ST(&(x[WS(rs, 4)]), VADD(T2x, T2A), ms, &(x[0]));
|
Chris@10
|
276 ST(&(x[WS(rs, 3)]), VADD(T20, T23), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
277 ST(&(x[WS(rs, 5)]), VADD(T34, T37), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
278 ST(&(x[WS(rs, 1)]), VADD(TW, TZ), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
279 Tt = BYTWJ(&(W[TWVL * 6]), VSUB(Tp, Ts));
|
Chris@10
|
280 T4c = BYTWJ(&(W[TWVL * 6]), VSUB(T48, T4b));
|
Chris@10
|
281 T3E = VADD(T3C, T3D);
|
Chris@10
|
282 T3H = VSUB(T3D, T3C);
|
Chris@10
|
283 T3r = VSUB(T3l, T3i);
|
Chris@10
|
284 T3m = VADD(T3i, T3l);
|
Chris@10
|
285 T2B = BYTWJ(&(W[TWVL * 6]), VSUB(T2x, T2A));
|
Chris@10
|
286 T24 = BYTWJ(&(W[TWVL * 6]), VSUB(T20, T23));
|
Chris@10
|
287 ST(&(x[WS(vs, 4)]), Tt, ms, &(x[WS(vs, 4)]));
|
Chris@10
|
288 ST(&(x[WS(vs, 4) + WS(rs, 7)]), T4c, ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
289 ST(&(x[WS(rs, 6)]), VADD(T3B, T3E), ms, &(x[0]));
|
Chris@10
|
290 }
|
Chris@10
|
291 {
|
Chris@10
|
292 V T38, T1A, Tw, T10, T1x, T3F, T2E, T3I;
|
Chris@10
|
293 T10 = BYTWJ(&(W[TWVL * 6]), VSUB(TW, TZ));
|
Chris@10
|
294 T1x = BYTWJ(&(W[TWVL * 6]), VSUB(T1t, T1w));
|
Chris@10
|
295 T3F = BYTWJ(&(W[TWVL * 6]), VSUB(T3B, T3E));
|
Chris@10
|
296 ST(&(x[WS(vs, 4) + WS(rs, 4)]), T2B, ms, &(x[WS(vs, 4)]));
|
Chris@10
|
297 ST(&(x[WS(vs, 4) + WS(rs, 3)]), T24, ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
298 T38 = BYTWJ(&(W[TWVL * 6]), VSUB(T34, T37));
|
Chris@10
|
299 T1A = BYTWJ(&(W[TWVL * 10]), VFNMSI(T1z, T1y));
|
Chris@10
|
300 Tw = BYTWJ(&(W[TWVL * 10]), VFNMSI(Tv, Tu));
|
Chris@10
|
301 ST(&(x[WS(vs, 4) + WS(rs, 1)]), T10, ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
302 ST(&(x[WS(vs, 4) + WS(rs, 2)]), T1x, ms, &(x[WS(vs, 4)]));
|
Chris@10
|
303 ST(&(x[WS(vs, 4) + WS(rs, 6)]), T3F, ms, &(x[WS(vs, 4)]));
|
Chris@10
|
304 T2E = BYTWJ(&(W[TWVL * 10]), VFNMSI(T2D, T2C));
|
Chris@10
|
305 T3I = BYTWJ(&(W[TWVL * 10]), VFNMSI(T3H, T3G));
|
Chris@10
|
306 ST(&(x[WS(vs, 4) + WS(rs, 5)]), T38, ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
307 ST(&(x[WS(vs, 6) + WS(rs, 2)]), T1A, ms, &(x[WS(vs, 6)]));
|
Chris@10
|
308 ST(&(x[WS(vs, 6)]), Tw, ms, &(x[WS(vs, 6)]));
|
Chris@10
|
309 T13 = BYTWJ(&(W[TWVL * 10]), VFNMSI(T12, T11));
|
Chris@10
|
310 T27 = BYTWJ(&(W[TWVL * 10]), VFNMSI(T26, T25));
|
Chris@10
|
311 T3b = BYTWJ(&(W[TWVL * 10]), VFNMSI(T3a, T39));
|
Chris@10
|
312 ST(&(x[WS(vs, 6) + WS(rs, 4)]), T2E, ms, &(x[WS(vs, 6)]));
|
Chris@10
|
313 ST(&(x[WS(vs, 6) + WS(rs, 6)]), T3I, ms, &(x[WS(vs, 6)]));
|
Chris@10
|
314 T4f = BYTWJ(&(W[TWVL * 10]), VFNMSI(T4e, T4d));
|
Chris@10
|
315 }
|
Chris@10
|
316 }
|
Chris@10
|
317 {
|
Chris@10
|
318 V Tj, Tk, T2r, T2j, Ti, Th, T2o, T2s, T1M, T1R, T41, T40;
|
Chris@10
|
319 {
|
Chris@10
|
320 V T3c, T4g, T3J, T2F, Tx, T1B;
|
Chris@10
|
321 Tx = BYTWJ(&(W[TWVL * 2]), VFMAI(Tv, Tu));
|
Chris@10
|
322 T1B = BYTWJ(&(W[TWVL * 2]), VFMAI(T1z, T1y));
|
Chris@10
|
323 ST(&(x[WS(vs, 6) + WS(rs, 1)]), T13, ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
324 ST(&(x[WS(vs, 6) + WS(rs, 3)]), T27, ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
325 ST(&(x[WS(vs, 6) + WS(rs, 5)]), T3b, ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
326 T3c = BYTWJ(&(W[TWVL * 2]), VFMAI(T3a, T39));
|
Chris@10
|
327 T4g = BYTWJ(&(W[TWVL * 2]), VFMAI(T4e, T4d));
|
Chris@10
|
328 ST(&(x[WS(vs, 6) + WS(rs, 7)]), T4f, ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
329 ST(&(x[WS(vs, 2)]), Tx, ms, &(x[WS(vs, 2)]));
|
Chris@10
|
330 ST(&(x[WS(vs, 2) + WS(rs, 2)]), T1B, ms, &(x[WS(vs, 2)]));
|
Chris@10
|
331 T3J = BYTWJ(&(W[TWVL * 2]), VFMAI(T3H, T3G));
|
Chris@10
|
332 T2F = BYTWJ(&(W[TWVL * 2]), VFMAI(T2D, T2C));
|
Chris@10
|
333 {
|
Chris@10
|
334 V T14, Tb, Tg, T28, T3U, T3Z;
|
Chris@10
|
335 T28 = BYTWJ(&(W[TWVL * 2]), VFMAI(T26, T25));
|
Chris@10
|
336 ST(&(x[WS(vs, 2) + WS(rs, 5)]), T3c, ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
337 ST(&(x[WS(vs, 2) + WS(rs, 7)]), T4g, ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
338 T14 = BYTWJ(&(W[TWVL * 2]), VFMAI(T12, T11));
|
Chris@10
|
339 Tj = VFNMS(LDK(KP707106781), Ta, T3);
|
Chris@10
|
340 Tb = VFMA(LDK(KP707106781), Ta, T3);
|
Chris@10
|
341 Tg = VFNMS(LDK(KP707106781), Tf, Te);
|
Chris@10
|
342 Tk = VFMA(LDK(KP707106781), Tf, Te);
|
Chris@10
|
343 ST(&(x[WS(vs, 2) + WS(rs, 6)]), T3J, ms, &(x[WS(vs, 2)]));
|
Chris@10
|
344 ST(&(x[WS(vs, 2) + WS(rs, 4)]), T2F, ms, &(x[WS(vs, 2)]));
|
Chris@10
|
345 ST(&(x[WS(vs, 2) + WS(rs, 3)]), T28, ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
346 T3U = VFMA(LDK(KP707106781), T3T, T3M);
|
Chris@10
|
347 T42 = VFNMS(LDK(KP707106781), T3T, T3M);
|
Chris@10
|
348 T43 = VFMA(LDK(KP707106781), T3Y, T3X);
|
Chris@10
|
349 T3Z = VFNMS(LDK(KP707106781), T3Y, T3X);
|
Chris@10
|
350 ST(&(x[WS(vs, 2) + WS(rs, 1)]), T14, ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
351 T2r = VFNMS(LDK(KP707106781), T2i, T2b);
|
Chris@10
|
352 T2j = VFMA(LDK(KP707106781), T2i, T2b);
|
Chris@10
|
353 Ti = BYTWJ(&(W[TWVL * 12]), VFMAI(Tg, Tb));
|
Chris@10
|
354 Th = BYTWJ(&(W[0]), VFNMSI(Tg, Tb));
|
Chris@10
|
355 T2o = VFNMS(LDK(KP707106781), T2n, T2m);
|
Chris@10
|
356 T2s = VFMA(LDK(KP707106781), T2n, T2m);
|
Chris@10
|
357 T1U = VFNMS(LDK(KP707106781), T1L, T1E);
|
Chris@10
|
358 T1M = VFMA(LDK(KP707106781), T1L, T1E);
|
Chris@10
|
359 T1R = VFNMS(LDK(KP707106781), T1Q, T1P);
|
Chris@10
|
360 T1V = VFMA(LDK(KP707106781), T1Q, T1P);
|
Chris@10
|
361 T41 = BYTWJ(&(W[TWVL * 12]), VFMAI(T3Z, T3U));
|
Chris@10
|
362 T40 = BYTWJ(&(W[0]), VFNMSI(T3Z, T3U));
|
Chris@10
|
363 }
|
Chris@10
|
364 }
|
Chris@10
|
365 {
|
Chris@10
|
366 V TQ, TR, T1n, T1o, T3v, T3w;
|
Chris@10
|
367 {
|
Chris@10
|
368 V T1f, T1k, T3n, TP, TO, T3s, T2Q, T2V;
|
Chris@10
|
369 {
|
Chris@10
|
370 V TI, T2q, T2p, T1T, T1S, TN;
|
Chris@10
|
371 TQ = VFNMS(LDK(KP707106781), TH, TA);
|
Chris@10
|
372 TI = VFMA(LDK(KP707106781), TH, TA);
|
Chris@10
|
373 ST(&(x[WS(vs, 7)]), Ti, ms, &(x[WS(vs, 7)]));
|
Chris@10
|
374 ST(&(x[WS(vs, 1)]), Th, ms, &(x[WS(vs, 1)]));
|
Chris@10
|
375 T2q = BYTWJ(&(W[TWVL * 12]), VFMAI(T2o, T2j));
|
Chris@10
|
376 T2p = BYTWJ(&(W[0]), VFNMSI(T2o, T2j));
|
Chris@10
|
377 T1T = BYTWJ(&(W[TWVL * 12]), VFMAI(T1R, T1M));
|
Chris@10
|
378 T1S = BYTWJ(&(W[0]), VFNMSI(T1R, T1M));
|
Chris@10
|
379 ST(&(x[WS(vs, 7) + WS(rs, 7)]), T41, ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
380 ST(&(x[WS(vs, 1) + WS(rs, 7)]), T40, ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
381 TN = VFNMS(LDK(KP707106781), TM, TL);
|
Chris@10
|
382 TR = VFMA(LDK(KP707106781), TM, TL);
|
Chris@10
|
383 T1n = VFNMS(LDK(KP707106781), T1e, T17);
|
Chris@10
|
384 T1f = VFMA(LDK(KP707106781), T1e, T17);
|
Chris@10
|
385 ST(&(x[WS(vs, 7) + WS(rs, 4)]), T2q, ms, &(x[WS(vs, 7)]));
|
Chris@10
|
386 ST(&(x[WS(vs, 1) + WS(rs, 4)]), T2p, ms, &(x[WS(vs, 1)]));
|
Chris@10
|
387 ST(&(x[WS(vs, 7) + WS(rs, 3)]), T1T, ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
388 ST(&(x[WS(vs, 1) + WS(rs, 3)]), T1S, ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
389 T1k = VFNMS(LDK(KP707106781), T1j, T1i);
|
Chris@10
|
390 T1o = VFMA(LDK(KP707106781), T1j, T1i);
|
Chris@10
|
391 T3v = VFNMS(LDK(KP707106781), T3m, T3f);
|
Chris@10
|
392 T3n = VFMA(LDK(KP707106781), T3m, T3f);
|
Chris@10
|
393 TP = BYTWJ(&(W[TWVL * 12]), VFMAI(TN, TI));
|
Chris@10
|
394 TO = BYTWJ(&(W[0]), VFNMSI(TN, TI));
|
Chris@10
|
395 T3s = VFNMS(LDK(KP707106781), T3r, T3q);
|
Chris@10
|
396 T3w = VFMA(LDK(KP707106781), T3r, T3q);
|
Chris@10
|
397 }
|
Chris@10
|
398 T2Y = VFNMS(LDK(KP707106781), T2P, T2I);
|
Chris@10
|
399 T2Q = VFMA(LDK(KP707106781), T2P, T2I);
|
Chris@10
|
400 T2V = VFNMS(LDK(KP707106781), T2U, T2T);
|
Chris@10
|
401 T2Z = VFMA(LDK(KP707106781), T2U, T2T);
|
Chris@10
|
402 {
|
Chris@10
|
403 V T3u, T3t, T2X, T2W, T1m, T1l;
|
Chris@10
|
404 T1m = BYTWJ(&(W[TWVL * 12]), VFMAI(T1k, T1f));
|
Chris@10
|
405 T1l = BYTWJ(&(W[0]), VFNMSI(T1k, T1f));
|
Chris@10
|
406 ST(&(x[WS(vs, 7) + WS(rs, 1)]), TP, ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
407 ST(&(x[WS(vs, 1) + WS(rs, 1)]), TO, ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
408 T3u = BYTWJ(&(W[TWVL * 12]), VFMAI(T3s, T3n));
|
Chris@10
|
409 T3t = BYTWJ(&(W[0]), VFNMSI(T3s, T3n));
|
Chris@10
|
410 T2X = BYTWJ(&(W[TWVL * 12]), VFMAI(T2V, T2Q));
|
Chris@10
|
411 T2W = BYTWJ(&(W[0]), VFNMSI(T2V, T2Q));
|
Chris@10
|
412 ST(&(x[WS(vs, 7) + WS(rs, 2)]), T1m, ms, &(x[WS(vs, 7)]));
|
Chris@10
|
413 ST(&(x[WS(vs, 1) + WS(rs, 2)]), T1l, ms, &(x[WS(vs, 1)]));
|
Chris@10
|
414 ST(&(x[WS(vs, 7) + WS(rs, 6)]), T3u, ms, &(x[WS(vs, 7)]));
|
Chris@10
|
415 ST(&(x[WS(vs, 1) + WS(rs, 6)]), T3t, ms, &(x[WS(vs, 1)]));
|
Chris@10
|
416 ST(&(x[WS(vs, 7) + WS(rs, 5)]), T2X, ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
417 ST(&(x[WS(vs, 1) + WS(rs, 5)]), T2W, ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
418 }
|
Chris@10
|
419 }
|
Chris@10
|
420 {
|
Chris@10
|
421 V T2u, T2t, T3y, T3x;
|
Chris@10
|
422 {
|
Chris@10
|
423 V T1q, T1p, Tm, Tl;
|
Chris@10
|
424 T1q = BYTWJ(&(W[TWVL * 4]), VFMAI(T1o, T1n));
|
Chris@10
|
425 T1p = BYTWJ(&(W[TWVL * 8]), VFNMSI(T1o, T1n));
|
Chris@10
|
426 Tm = BYTWJ(&(W[TWVL * 4]), VFMAI(Tk, Tj));
|
Chris@10
|
427 Tl = BYTWJ(&(W[TWVL * 8]), VFNMSI(Tk, Tj));
|
Chris@10
|
428 ST(&(x[WS(vs, 3) + WS(rs, 2)]), T1q, ms, &(x[WS(vs, 3)]));
|
Chris@10
|
429 ST(&(x[WS(vs, 5) + WS(rs, 2)]), T1p, ms, &(x[WS(vs, 5)]));
|
Chris@10
|
430 T2u = BYTWJ(&(W[TWVL * 4]), VFMAI(T2s, T2r));
|
Chris@10
|
431 T2t = BYTWJ(&(W[TWVL * 8]), VFNMSI(T2s, T2r));
|
Chris@10
|
432 T3y = BYTWJ(&(W[TWVL * 4]), VFMAI(T3w, T3v));
|
Chris@10
|
433 T3x = BYTWJ(&(W[TWVL * 8]), VFNMSI(T3w, T3v));
|
Chris@10
|
434 ST(&(x[WS(vs, 3)]), Tm, ms, &(x[WS(vs, 3)]));
|
Chris@10
|
435 ST(&(x[WS(vs, 5)]), Tl, ms, &(x[WS(vs, 5)]));
|
Chris@10
|
436 }
|
Chris@10
|
437 ST(&(x[WS(vs, 3) + WS(rs, 4)]), T2u, ms, &(x[WS(vs, 3)]));
|
Chris@10
|
438 ST(&(x[WS(vs, 5) + WS(rs, 4)]), T2t, ms, &(x[WS(vs, 5)]));
|
Chris@10
|
439 ST(&(x[WS(vs, 3) + WS(rs, 6)]), T3y, ms, &(x[WS(vs, 3)]));
|
Chris@10
|
440 ST(&(x[WS(vs, 5) + WS(rs, 6)]), T3x, ms, &(x[WS(vs, 5)]));
|
Chris@10
|
441 TT = BYTWJ(&(W[TWVL * 4]), VFMAI(TR, TQ));
|
Chris@10
|
442 TS = BYTWJ(&(W[TWVL * 8]), VFNMSI(TR, TQ));
|
Chris@10
|
443 }
|
Chris@10
|
444 }
|
Chris@10
|
445 }
|
Chris@10
|
446 }
|
Chris@10
|
447 }
|
Chris@10
|
448 {
|
Chris@10
|
449 V T31, T30, T45, T44, T1X, T1W;
|
Chris@10
|
450 T1X = BYTWJ(&(W[TWVL * 4]), VFMAI(T1V, T1U));
|
Chris@10
|
451 T1W = BYTWJ(&(W[TWVL * 8]), VFNMSI(T1V, T1U));
|
Chris@10
|
452 ST(&(x[WS(vs, 3) + WS(rs, 1)]), TT, ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
453 ST(&(x[WS(vs, 5) + WS(rs, 1)]), TS, ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
454 T31 = BYTWJ(&(W[TWVL * 4]), VFMAI(T2Z, T2Y));
|
Chris@10
|
455 T30 = BYTWJ(&(W[TWVL * 8]), VFNMSI(T2Z, T2Y));
|
Chris@10
|
456 T45 = BYTWJ(&(W[TWVL * 4]), VFMAI(T43, T42));
|
Chris@10
|
457 T44 = BYTWJ(&(W[TWVL * 8]), VFNMSI(T43, T42));
|
Chris@10
|
458 ST(&(x[WS(vs, 3) + WS(rs, 3)]), T1X, ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
459 ST(&(x[WS(vs, 5) + WS(rs, 3)]), T1W, ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
460 ST(&(x[WS(vs, 3) + WS(rs, 5)]), T31, ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
461 ST(&(x[WS(vs, 5) + WS(rs, 5)]), T30, ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
462 ST(&(x[WS(vs, 3) + WS(rs, 7)]), T45, ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
463 ST(&(x[WS(vs, 5) + WS(rs, 7)]), T44, ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
464 }
|
Chris@10
|
465 }
|
Chris@10
|
466 }
|
Chris@10
|
467 VLEAVE();
|
Chris@10
|
468 }
|
Chris@10
|
469
|
Chris@10
|
470 static const tw_instr twinstr[] = {
|
Chris@10
|
471 VTW(0, 1),
|
Chris@10
|
472 VTW(0, 2),
|
Chris@10
|
473 VTW(0, 3),
|
Chris@10
|
474 VTW(0, 4),
|
Chris@10
|
475 VTW(0, 5),
|
Chris@10
|
476 VTW(0, 6),
|
Chris@10
|
477 VTW(0, 7),
|
Chris@10
|
478 {TW_NEXT, VL, 0}
|
Chris@10
|
479 };
|
Chris@10
|
480
|
Chris@10
|
481 static const ct_desc desc = { 8, XSIMD_STRING("q1fv_8"), twinstr, &GENUS, {184, 112, 80, 0}, 0, 0, 0 };
|
Chris@10
|
482
|
Chris@10
|
483 void XSIMD(codelet_q1fv_8) (planner *p) {
|
Chris@10
|
484 X(kdft_difsq_register) (p, q1fv_8, &desc);
|
Chris@10
|
485 }
|
Chris@10
|
486 #else /* HAVE_FMA */
|
Chris@10
|
487
|
Chris@10
|
488 /* Generated by: ../../../genfft/gen_twidsq_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 8 -dif -name q1fv_8 -include q1f.h */
|
Chris@10
|
489
|
Chris@10
|
490 /*
|
Chris@10
|
491 * This function contains 264 FP additions, 128 FP multiplications,
|
Chris@10
|
492 * (or, 264 additions, 128 multiplications, 0 fused multiply/add),
|
Chris@10
|
493 * 77 stack variables, 1 constants, and 128 memory accesses
|
Chris@10
|
494 */
|
Chris@10
|
495 #include "q1f.h"
|
Chris@10
|
496
|
Chris@10
|
497 static void q1fv_8(R *ri, R *ii, const R *W, stride rs, stride vs, INT mb, INT me, INT ms)
|
Chris@10
|
498 {
|
Chris@10
|
499 DVK(KP707106781, +0.707106781186547524400844362104849039284835938);
|
Chris@10
|
500 {
|
Chris@10
|
501 INT m;
|
Chris@10
|
502 R *x;
|
Chris@10
|
503 x = ri;
|
Chris@10
|
504 for (m = mb, W = W + (mb * ((TWVL / VL) * 14)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 14), MAKE_VOLATILE_STRIDE(16, rs), MAKE_VOLATILE_STRIDE(16, vs)) {
|
Chris@10
|
505 V T3, Tu, Tf, Tp, T1E, T25, T1Q, T20, T2b, T2C, T2n, T2x, T3M, T4d, T3Y;
|
Chris@10
|
506 V T48, TA, T11, TM, TW, T17, T1y, T1j, T1t, T2I, T39, T2U, T34, T3f, T3G;
|
Chris@10
|
507 V T3r, T3B, Ta, Tv, Tc, Ts, T1L, T26, T1N, T23, T2i, T2D, T2k, T2A, T3T;
|
Chris@10
|
508 V T4e, T3V, T4b, TH, T12, TJ, TZ, T1e, T1z, T1g, T1w, T2P, T3a, T2R, T37;
|
Chris@10
|
509 V T3m, T3H, T3o, T3E, T28, T14;
|
Chris@10
|
510 {
|
Chris@10
|
511 V T1, T2, Tn, Td, Te, To;
|
Chris@10
|
512 T1 = LD(&(x[0]), ms, &(x[0]));
|
Chris@10
|
513 T2 = LD(&(x[WS(rs, 4)]), ms, &(x[0]));
|
Chris@10
|
514 Tn = VADD(T1, T2);
|
Chris@10
|
515 Td = LD(&(x[WS(rs, 2)]), ms, &(x[0]));
|
Chris@10
|
516 Te = LD(&(x[WS(rs, 6)]), ms, &(x[0]));
|
Chris@10
|
517 To = VADD(Td, Te);
|
Chris@10
|
518 T3 = VSUB(T1, T2);
|
Chris@10
|
519 Tu = VSUB(Tn, To);
|
Chris@10
|
520 Tf = VSUB(Td, Te);
|
Chris@10
|
521 Tp = VADD(Tn, To);
|
Chris@10
|
522 }
|
Chris@10
|
523 {
|
Chris@10
|
524 V T1C, T1D, T1Y, T1O, T1P, T1Z;
|
Chris@10
|
525 T1C = LD(&(x[WS(vs, 3)]), ms, &(x[WS(vs, 3)]));
|
Chris@10
|
526 T1D = LD(&(x[WS(vs, 3) + WS(rs, 4)]), ms, &(x[WS(vs, 3)]));
|
Chris@10
|
527 T1Y = VADD(T1C, T1D);
|
Chris@10
|
528 T1O = LD(&(x[WS(vs, 3) + WS(rs, 2)]), ms, &(x[WS(vs, 3)]));
|
Chris@10
|
529 T1P = LD(&(x[WS(vs, 3) + WS(rs, 6)]), ms, &(x[WS(vs, 3)]));
|
Chris@10
|
530 T1Z = VADD(T1O, T1P);
|
Chris@10
|
531 T1E = VSUB(T1C, T1D);
|
Chris@10
|
532 T25 = VSUB(T1Y, T1Z);
|
Chris@10
|
533 T1Q = VSUB(T1O, T1P);
|
Chris@10
|
534 T20 = VADD(T1Y, T1Z);
|
Chris@10
|
535 }
|
Chris@10
|
536 {
|
Chris@10
|
537 V T29, T2a, T2v, T2l, T2m, T2w;
|
Chris@10
|
538 T29 = LD(&(x[WS(vs, 4)]), ms, &(x[WS(vs, 4)]));
|
Chris@10
|
539 T2a = LD(&(x[WS(vs, 4) + WS(rs, 4)]), ms, &(x[WS(vs, 4)]));
|
Chris@10
|
540 T2v = VADD(T29, T2a);
|
Chris@10
|
541 T2l = LD(&(x[WS(vs, 4) + WS(rs, 2)]), ms, &(x[WS(vs, 4)]));
|
Chris@10
|
542 T2m = LD(&(x[WS(vs, 4) + WS(rs, 6)]), ms, &(x[WS(vs, 4)]));
|
Chris@10
|
543 T2w = VADD(T2l, T2m);
|
Chris@10
|
544 T2b = VSUB(T29, T2a);
|
Chris@10
|
545 T2C = VSUB(T2v, T2w);
|
Chris@10
|
546 T2n = VSUB(T2l, T2m);
|
Chris@10
|
547 T2x = VADD(T2v, T2w);
|
Chris@10
|
548 }
|
Chris@10
|
549 {
|
Chris@10
|
550 V T3K, T3L, T46, T3W, T3X, T47;
|
Chris@10
|
551 T3K = LD(&(x[WS(vs, 7)]), ms, &(x[WS(vs, 7)]));
|
Chris@10
|
552 T3L = LD(&(x[WS(vs, 7) + WS(rs, 4)]), ms, &(x[WS(vs, 7)]));
|
Chris@10
|
553 T46 = VADD(T3K, T3L);
|
Chris@10
|
554 T3W = LD(&(x[WS(vs, 7) + WS(rs, 2)]), ms, &(x[WS(vs, 7)]));
|
Chris@10
|
555 T3X = LD(&(x[WS(vs, 7) + WS(rs, 6)]), ms, &(x[WS(vs, 7)]));
|
Chris@10
|
556 T47 = VADD(T3W, T3X);
|
Chris@10
|
557 T3M = VSUB(T3K, T3L);
|
Chris@10
|
558 T4d = VSUB(T46, T47);
|
Chris@10
|
559 T3Y = VSUB(T3W, T3X);
|
Chris@10
|
560 T48 = VADD(T46, T47);
|
Chris@10
|
561 }
|
Chris@10
|
562 {
|
Chris@10
|
563 V Ty, Tz, TU, TK, TL, TV;
|
Chris@10
|
564 Ty = LD(&(x[WS(vs, 1)]), ms, &(x[WS(vs, 1)]));
|
Chris@10
|
565 Tz = LD(&(x[WS(vs, 1) + WS(rs, 4)]), ms, &(x[WS(vs, 1)]));
|
Chris@10
|
566 TU = VADD(Ty, Tz);
|
Chris@10
|
567 TK = LD(&(x[WS(vs, 1) + WS(rs, 2)]), ms, &(x[WS(vs, 1)]));
|
Chris@10
|
568 TL = LD(&(x[WS(vs, 1) + WS(rs, 6)]), ms, &(x[WS(vs, 1)]));
|
Chris@10
|
569 TV = VADD(TK, TL);
|
Chris@10
|
570 TA = VSUB(Ty, Tz);
|
Chris@10
|
571 T11 = VSUB(TU, TV);
|
Chris@10
|
572 TM = VSUB(TK, TL);
|
Chris@10
|
573 TW = VADD(TU, TV);
|
Chris@10
|
574 }
|
Chris@10
|
575 {
|
Chris@10
|
576 V T15, T16, T1r, T1h, T1i, T1s;
|
Chris@10
|
577 T15 = LD(&(x[WS(vs, 2)]), ms, &(x[WS(vs, 2)]));
|
Chris@10
|
578 T16 = LD(&(x[WS(vs, 2) + WS(rs, 4)]), ms, &(x[WS(vs, 2)]));
|
Chris@10
|
579 T1r = VADD(T15, T16);
|
Chris@10
|
580 T1h = LD(&(x[WS(vs, 2) + WS(rs, 2)]), ms, &(x[WS(vs, 2)]));
|
Chris@10
|
581 T1i = LD(&(x[WS(vs, 2) + WS(rs, 6)]), ms, &(x[WS(vs, 2)]));
|
Chris@10
|
582 T1s = VADD(T1h, T1i);
|
Chris@10
|
583 T17 = VSUB(T15, T16);
|
Chris@10
|
584 T1y = VSUB(T1r, T1s);
|
Chris@10
|
585 T1j = VSUB(T1h, T1i);
|
Chris@10
|
586 T1t = VADD(T1r, T1s);
|
Chris@10
|
587 }
|
Chris@10
|
588 {
|
Chris@10
|
589 V T2G, T2H, T32, T2S, T2T, T33;
|
Chris@10
|
590 T2G = LD(&(x[WS(vs, 5)]), ms, &(x[WS(vs, 5)]));
|
Chris@10
|
591 T2H = LD(&(x[WS(vs, 5) + WS(rs, 4)]), ms, &(x[WS(vs, 5)]));
|
Chris@10
|
592 T32 = VADD(T2G, T2H);
|
Chris@10
|
593 T2S = LD(&(x[WS(vs, 5) + WS(rs, 2)]), ms, &(x[WS(vs, 5)]));
|
Chris@10
|
594 T2T = LD(&(x[WS(vs, 5) + WS(rs, 6)]), ms, &(x[WS(vs, 5)]));
|
Chris@10
|
595 T33 = VADD(T2S, T2T);
|
Chris@10
|
596 T2I = VSUB(T2G, T2H);
|
Chris@10
|
597 T39 = VSUB(T32, T33);
|
Chris@10
|
598 T2U = VSUB(T2S, T2T);
|
Chris@10
|
599 T34 = VADD(T32, T33);
|
Chris@10
|
600 }
|
Chris@10
|
601 {
|
Chris@10
|
602 V T3d, T3e, T3z, T3p, T3q, T3A;
|
Chris@10
|
603 T3d = LD(&(x[WS(vs, 6)]), ms, &(x[WS(vs, 6)]));
|
Chris@10
|
604 T3e = LD(&(x[WS(vs, 6) + WS(rs, 4)]), ms, &(x[WS(vs, 6)]));
|
Chris@10
|
605 T3z = VADD(T3d, T3e);
|
Chris@10
|
606 T3p = LD(&(x[WS(vs, 6) + WS(rs, 2)]), ms, &(x[WS(vs, 6)]));
|
Chris@10
|
607 T3q = LD(&(x[WS(vs, 6) + WS(rs, 6)]), ms, &(x[WS(vs, 6)]));
|
Chris@10
|
608 T3A = VADD(T3p, T3q);
|
Chris@10
|
609 T3f = VSUB(T3d, T3e);
|
Chris@10
|
610 T3G = VSUB(T3z, T3A);
|
Chris@10
|
611 T3r = VSUB(T3p, T3q);
|
Chris@10
|
612 T3B = VADD(T3z, T3A);
|
Chris@10
|
613 }
|
Chris@10
|
614 {
|
Chris@10
|
615 V T6, Tq, T9, Tr;
|
Chris@10
|
616 {
|
Chris@10
|
617 V T4, T5, T7, T8;
|
Chris@10
|
618 T4 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
619 T5 = LD(&(x[WS(rs, 5)]), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
620 T6 = VSUB(T4, T5);
|
Chris@10
|
621 Tq = VADD(T4, T5);
|
Chris@10
|
622 T7 = LD(&(x[WS(rs, 7)]), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
623 T8 = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
624 T9 = VSUB(T7, T8);
|
Chris@10
|
625 Tr = VADD(T7, T8);
|
Chris@10
|
626 }
|
Chris@10
|
627 Ta = VMUL(LDK(KP707106781), VADD(T6, T9));
|
Chris@10
|
628 Tv = VBYI(VSUB(Tr, Tq));
|
Chris@10
|
629 Tc = VMUL(LDK(KP707106781), VSUB(T9, T6));
|
Chris@10
|
630 Ts = VADD(Tq, Tr);
|
Chris@10
|
631 }
|
Chris@10
|
632 {
|
Chris@10
|
633 V T1H, T21, T1K, T22;
|
Chris@10
|
634 {
|
Chris@10
|
635 V T1F, T1G, T1I, T1J;
|
Chris@10
|
636 T1F = LD(&(x[WS(vs, 3) + WS(rs, 1)]), ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
637 T1G = LD(&(x[WS(vs, 3) + WS(rs, 5)]), ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
638 T1H = VSUB(T1F, T1G);
|
Chris@10
|
639 T21 = VADD(T1F, T1G);
|
Chris@10
|
640 T1I = LD(&(x[WS(vs, 3) + WS(rs, 7)]), ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
641 T1J = LD(&(x[WS(vs, 3) + WS(rs, 3)]), ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
642 T1K = VSUB(T1I, T1J);
|
Chris@10
|
643 T22 = VADD(T1I, T1J);
|
Chris@10
|
644 }
|
Chris@10
|
645 T1L = VMUL(LDK(KP707106781), VADD(T1H, T1K));
|
Chris@10
|
646 T26 = VBYI(VSUB(T22, T21));
|
Chris@10
|
647 T1N = VMUL(LDK(KP707106781), VSUB(T1K, T1H));
|
Chris@10
|
648 T23 = VADD(T21, T22);
|
Chris@10
|
649 }
|
Chris@10
|
650 {
|
Chris@10
|
651 V T2e, T2y, T2h, T2z;
|
Chris@10
|
652 {
|
Chris@10
|
653 V T2c, T2d, T2f, T2g;
|
Chris@10
|
654 T2c = LD(&(x[WS(vs, 4) + WS(rs, 1)]), ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
655 T2d = LD(&(x[WS(vs, 4) + WS(rs, 5)]), ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
656 T2e = VSUB(T2c, T2d);
|
Chris@10
|
657 T2y = VADD(T2c, T2d);
|
Chris@10
|
658 T2f = LD(&(x[WS(vs, 4) + WS(rs, 7)]), ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
659 T2g = LD(&(x[WS(vs, 4) + WS(rs, 3)]), ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
660 T2h = VSUB(T2f, T2g);
|
Chris@10
|
661 T2z = VADD(T2f, T2g);
|
Chris@10
|
662 }
|
Chris@10
|
663 T2i = VMUL(LDK(KP707106781), VADD(T2e, T2h));
|
Chris@10
|
664 T2D = VBYI(VSUB(T2z, T2y));
|
Chris@10
|
665 T2k = VMUL(LDK(KP707106781), VSUB(T2h, T2e));
|
Chris@10
|
666 T2A = VADD(T2y, T2z);
|
Chris@10
|
667 }
|
Chris@10
|
668 {
|
Chris@10
|
669 V T3P, T49, T3S, T4a;
|
Chris@10
|
670 {
|
Chris@10
|
671 V T3N, T3O, T3Q, T3R;
|
Chris@10
|
672 T3N = LD(&(x[WS(vs, 7) + WS(rs, 1)]), ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
673 T3O = LD(&(x[WS(vs, 7) + WS(rs, 5)]), ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
674 T3P = VSUB(T3N, T3O);
|
Chris@10
|
675 T49 = VADD(T3N, T3O);
|
Chris@10
|
676 T3Q = LD(&(x[WS(vs, 7) + WS(rs, 7)]), ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
677 T3R = LD(&(x[WS(vs, 7) + WS(rs, 3)]), ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
678 T3S = VSUB(T3Q, T3R);
|
Chris@10
|
679 T4a = VADD(T3Q, T3R);
|
Chris@10
|
680 }
|
Chris@10
|
681 T3T = VMUL(LDK(KP707106781), VADD(T3P, T3S));
|
Chris@10
|
682 T4e = VBYI(VSUB(T4a, T49));
|
Chris@10
|
683 T3V = VMUL(LDK(KP707106781), VSUB(T3S, T3P));
|
Chris@10
|
684 T4b = VADD(T49, T4a);
|
Chris@10
|
685 }
|
Chris@10
|
686 {
|
Chris@10
|
687 V TD, TX, TG, TY;
|
Chris@10
|
688 {
|
Chris@10
|
689 V TB, TC, TE, TF;
|
Chris@10
|
690 TB = LD(&(x[WS(vs, 1) + WS(rs, 1)]), ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
691 TC = LD(&(x[WS(vs, 1) + WS(rs, 5)]), ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
692 TD = VSUB(TB, TC);
|
Chris@10
|
693 TX = VADD(TB, TC);
|
Chris@10
|
694 TE = LD(&(x[WS(vs, 1) + WS(rs, 7)]), ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
695 TF = LD(&(x[WS(vs, 1) + WS(rs, 3)]), ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
696 TG = VSUB(TE, TF);
|
Chris@10
|
697 TY = VADD(TE, TF);
|
Chris@10
|
698 }
|
Chris@10
|
699 TH = VMUL(LDK(KP707106781), VADD(TD, TG));
|
Chris@10
|
700 T12 = VBYI(VSUB(TY, TX));
|
Chris@10
|
701 TJ = VMUL(LDK(KP707106781), VSUB(TG, TD));
|
Chris@10
|
702 TZ = VADD(TX, TY);
|
Chris@10
|
703 }
|
Chris@10
|
704 {
|
Chris@10
|
705 V T1a, T1u, T1d, T1v;
|
Chris@10
|
706 {
|
Chris@10
|
707 V T18, T19, T1b, T1c;
|
Chris@10
|
708 T18 = LD(&(x[WS(vs, 2) + WS(rs, 1)]), ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
709 T19 = LD(&(x[WS(vs, 2) + WS(rs, 5)]), ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
710 T1a = VSUB(T18, T19);
|
Chris@10
|
711 T1u = VADD(T18, T19);
|
Chris@10
|
712 T1b = LD(&(x[WS(vs, 2) + WS(rs, 7)]), ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
713 T1c = LD(&(x[WS(vs, 2) + WS(rs, 3)]), ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
714 T1d = VSUB(T1b, T1c);
|
Chris@10
|
715 T1v = VADD(T1b, T1c);
|
Chris@10
|
716 }
|
Chris@10
|
717 T1e = VMUL(LDK(KP707106781), VADD(T1a, T1d));
|
Chris@10
|
718 T1z = VBYI(VSUB(T1v, T1u));
|
Chris@10
|
719 T1g = VMUL(LDK(KP707106781), VSUB(T1d, T1a));
|
Chris@10
|
720 T1w = VADD(T1u, T1v);
|
Chris@10
|
721 }
|
Chris@10
|
722 {
|
Chris@10
|
723 V T2L, T35, T2O, T36;
|
Chris@10
|
724 {
|
Chris@10
|
725 V T2J, T2K, T2M, T2N;
|
Chris@10
|
726 T2J = LD(&(x[WS(vs, 5) + WS(rs, 1)]), ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
727 T2K = LD(&(x[WS(vs, 5) + WS(rs, 5)]), ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
728 T2L = VSUB(T2J, T2K);
|
Chris@10
|
729 T35 = VADD(T2J, T2K);
|
Chris@10
|
730 T2M = LD(&(x[WS(vs, 5) + WS(rs, 7)]), ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
731 T2N = LD(&(x[WS(vs, 5) + WS(rs, 3)]), ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
732 T2O = VSUB(T2M, T2N);
|
Chris@10
|
733 T36 = VADD(T2M, T2N);
|
Chris@10
|
734 }
|
Chris@10
|
735 T2P = VMUL(LDK(KP707106781), VADD(T2L, T2O));
|
Chris@10
|
736 T3a = VBYI(VSUB(T36, T35));
|
Chris@10
|
737 T2R = VMUL(LDK(KP707106781), VSUB(T2O, T2L));
|
Chris@10
|
738 T37 = VADD(T35, T36);
|
Chris@10
|
739 }
|
Chris@10
|
740 {
|
Chris@10
|
741 V T3i, T3C, T3l, T3D;
|
Chris@10
|
742 {
|
Chris@10
|
743 V T3g, T3h, T3j, T3k;
|
Chris@10
|
744 T3g = LD(&(x[WS(vs, 6) + WS(rs, 1)]), ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
745 T3h = LD(&(x[WS(vs, 6) + WS(rs, 5)]), ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
746 T3i = VSUB(T3g, T3h);
|
Chris@10
|
747 T3C = VADD(T3g, T3h);
|
Chris@10
|
748 T3j = LD(&(x[WS(vs, 6) + WS(rs, 7)]), ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
749 T3k = LD(&(x[WS(vs, 6) + WS(rs, 3)]), ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
750 T3l = VSUB(T3j, T3k);
|
Chris@10
|
751 T3D = VADD(T3j, T3k);
|
Chris@10
|
752 }
|
Chris@10
|
753 T3m = VMUL(LDK(KP707106781), VADD(T3i, T3l));
|
Chris@10
|
754 T3H = VBYI(VSUB(T3D, T3C));
|
Chris@10
|
755 T3o = VMUL(LDK(KP707106781), VSUB(T3l, T3i));
|
Chris@10
|
756 T3E = VADD(T3C, T3D);
|
Chris@10
|
757 }
|
Chris@10
|
758 ST(&(x[0]), VADD(Tp, Ts), ms, &(x[0]));
|
Chris@10
|
759 ST(&(x[WS(rs, 2)]), VADD(T1t, T1w), ms, &(x[0]));
|
Chris@10
|
760 ST(&(x[WS(rs, 5)]), VADD(T34, T37), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
761 ST(&(x[WS(rs, 7)]), VADD(T48, T4b), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
762 ST(&(x[WS(rs, 6)]), VADD(T3B, T3E), ms, &(x[0]));
|
Chris@10
|
763 ST(&(x[WS(rs, 4)]), VADD(T2x, T2A), ms, &(x[0]));
|
Chris@10
|
764 {
|
Chris@10
|
765 V Tt, T4c, T2B, T24;
|
Chris@10
|
766 ST(&(x[WS(rs, 3)]), VADD(T20, T23), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
767 ST(&(x[WS(rs, 1)]), VADD(TW, TZ), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
768 Tt = BYTWJ(&(W[TWVL * 6]), VSUB(Tp, Ts));
|
Chris@10
|
769 ST(&(x[WS(vs, 4)]), Tt, ms, &(x[WS(vs, 4)]));
|
Chris@10
|
770 T4c = BYTWJ(&(W[TWVL * 6]), VSUB(T48, T4b));
|
Chris@10
|
771 ST(&(x[WS(vs, 4) + WS(rs, 7)]), T4c, ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
772 T2B = BYTWJ(&(W[TWVL * 6]), VSUB(T2x, T2A));
|
Chris@10
|
773 ST(&(x[WS(vs, 4) + WS(rs, 4)]), T2B, ms, &(x[WS(vs, 4)]));
|
Chris@10
|
774 T24 = BYTWJ(&(W[TWVL * 6]), VSUB(T20, T23));
|
Chris@10
|
775 ST(&(x[WS(vs, 4) + WS(rs, 3)]), T24, ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
776 }
|
Chris@10
|
777 {
|
Chris@10
|
778 V T10, T1x, T3F, T38, T1A, Tw;
|
Chris@10
|
779 T10 = BYTWJ(&(W[TWVL * 6]), VSUB(TW, TZ));
|
Chris@10
|
780 ST(&(x[WS(vs, 4) + WS(rs, 1)]), T10, ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
781 T1x = BYTWJ(&(W[TWVL * 6]), VSUB(T1t, T1w));
|
Chris@10
|
782 ST(&(x[WS(vs, 4) + WS(rs, 2)]), T1x, ms, &(x[WS(vs, 4)]));
|
Chris@10
|
783 T3F = BYTWJ(&(W[TWVL * 6]), VSUB(T3B, T3E));
|
Chris@10
|
784 ST(&(x[WS(vs, 4) + WS(rs, 6)]), T3F, ms, &(x[WS(vs, 4)]));
|
Chris@10
|
785 T38 = BYTWJ(&(W[TWVL * 6]), VSUB(T34, T37));
|
Chris@10
|
786 ST(&(x[WS(vs, 4) + WS(rs, 5)]), T38, ms, &(x[WS(vs, 4) + WS(rs, 1)]));
|
Chris@10
|
787 T1A = BYTWJ(&(W[TWVL * 10]), VSUB(T1y, T1z));
|
Chris@10
|
788 ST(&(x[WS(vs, 6) + WS(rs, 2)]), T1A, ms, &(x[WS(vs, 6)]));
|
Chris@10
|
789 Tw = BYTWJ(&(W[TWVL * 10]), VSUB(Tu, Tv));
|
Chris@10
|
790 ST(&(x[WS(vs, 6)]), Tw, ms, &(x[WS(vs, 6)]));
|
Chris@10
|
791 }
|
Chris@10
|
792 {
|
Chris@10
|
793 V T2E, T3I, T13, T27, T3b, T4f;
|
Chris@10
|
794 T2E = BYTWJ(&(W[TWVL * 10]), VSUB(T2C, T2D));
|
Chris@10
|
795 ST(&(x[WS(vs, 6) + WS(rs, 4)]), T2E, ms, &(x[WS(vs, 6)]));
|
Chris@10
|
796 T3I = BYTWJ(&(W[TWVL * 10]), VSUB(T3G, T3H));
|
Chris@10
|
797 ST(&(x[WS(vs, 6) + WS(rs, 6)]), T3I, ms, &(x[WS(vs, 6)]));
|
Chris@10
|
798 T13 = BYTWJ(&(W[TWVL * 10]), VSUB(T11, T12));
|
Chris@10
|
799 ST(&(x[WS(vs, 6) + WS(rs, 1)]), T13, ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
800 T27 = BYTWJ(&(W[TWVL * 10]), VSUB(T25, T26));
|
Chris@10
|
801 ST(&(x[WS(vs, 6) + WS(rs, 3)]), T27, ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
802 T3b = BYTWJ(&(W[TWVL * 10]), VSUB(T39, T3a));
|
Chris@10
|
803 ST(&(x[WS(vs, 6) + WS(rs, 5)]), T3b, ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
804 T4f = BYTWJ(&(W[TWVL * 10]), VSUB(T4d, T4e));
|
Chris@10
|
805 ST(&(x[WS(vs, 6) + WS(rs, 7)]), T4f, ms, &(x[WS(vs, 6) + WS(rs, 1)]));
|
Chris@10
|
806 }
|
Chris@10
|
807 {
|
Chris@10
|
808 V Tx, T1B, T3c, T4g, T3J, T2F;
|
Chris@10
|
809 Tx = BYTWJ(&(W[TWVL * 2]), VADD(Tu, Tv));
|
Chris@10
|
810 ST(&(x[WS(vs, 2)]), Tx, ms, &(x[WS(vs, 2)]));
|
Chris@10
|
811 T1B = BYTWJ(&(W[TWVL * 2]), VADD(T1y, T1z));
|
Chris@10
|
812 ST(&(x[WS(vs, 2) + WS(rs, 2)]), T1B, ms, &(x[WS(vs, 2)]));
|
Chris@10
|
813 T3c = BYTWJ(&(W[TWVL * 2]), VADD(T39, T3a));
|
Chris@10
|
814 ST(&(x[WS(vs, 2) + WS(rs, 5)]), T3c, ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
815 T4g = BYTWJ(&(W[TWVL * 2]), VADD(T4d, T4e));
|
Chris@10
|
816 ST(&(x[WS(vs, 2) + WS(rs, 7)]), T4g, ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
817 T3J = BYTWJ(&(W[TWVL * 2]), VADD(T3G, T3H));
|
Chris@10
|
818 ST(&(x[WS(vs, 2) + WS(rs, 6)]), T3J, ms, &(x[WS(vs, 2)]));
|
Chris@10
|
819 T2F = BYTWJ(&(W[TWVL * 2]), VADD(T2C, T2D));
|
Chris@10
|
820 ST(&(x[WS(vs, 2) + WS(rs, 4)]), T2F, ms, &(x[WS(vs, 2)]));
|
Chris@10
|
821 }
|
Chris@10
|
822 T28 = BYTWJ(&(W[TWVL * 2]), VADD(T25, T26));
|
Chris@10
|
823 ST(&(x[WS(vs, 2) + WS(rs, 3)]), T28, ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
824 T14 = BYTWJ(&(W[TWVL * 2]), VADD(T11, T12));
|
Chris@10
|
825 ST(&(x[WS(vs, 2) + WS(rs, 1)]), T14, ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
826 {
|
Chris@10
|
827 V Th, Ti, Tb, Tg;
|
Chris@10
|
828 Tb = VADD(T3, Ta);
|
Chris@10
|
829 Tg = VBYI(VSUB(Tc, Tf));
|
Chris@10
|
830 Th = BYTWJ(&(W[TWVL * 12]), VSUB(Tb, Tg));
|
Chris@10
|
831 Ti = BYTWJ(&(W[0]), VADD(Tb, Tg));
|
Chris@10
|
832 ST(&(x[WS(vs, 7)]), Th, ms, &(x[WS(vs, 7)]));
|
Chris@10
|
833 ST(&(x[WS(vs, 1)]), Ti, ms, &(x[WS(vs, 1)]));
|
Chris@10
|
834 }
|
Chris@10
|
835 {
|
Chris@10
|
836 V T40, T41, T3U, T3Z;
|
Chris@10
|
837 T3U = VADD(T3M, T3T);
|
Chris@10
|
838 T3Z = VBYI(VSUB(T3V, T3Y));
|
Chris@10
|
839 T40 = BYTWJ(&(W[TWVL * 12]), VSUB(T3U, T3Z));
|
Chris@10
|
840 T41 = BYTWJ(&(W[0]), VADD(T3U, T3Z));
|
Chris@10
|
841 ST(&(x[WS(vs, 7) + WS(rs, 7)]), T40, ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
842 ST(&(x[WS(vs, 1) + WS(rs, 7)]), T41, ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
843 }
|
Chris@10
|
844 {
|
Chris@10
|
845 V T2p, T2q, T2j, T2o;
|
Chris@10
|
846 T2j = VADD(T2b, T2i);
|
Chris@10
|
847 T2o = VBYI(VSUB(T2k, T2n));
|
Chris@10
|
848 T2p = BYTWJ(&(W[TWVL * 12]), VSUB(T2j, T2o));
|
Chris@10
|
849 T2q = BYTWJ(&(W[0]), VADD(T2j, T2o));
|
Chris@10
|
850 ST(&(x[WS(vs, 7) + WS(rs, 4)]), T2p, ms, &(x[WS(vs, 7)]));
|
Chris@10
|
851 ST(&(x[WS(vs, 1) + WS(rs, 4)]), T2q, ms, &(x[WS(vs, 1)]));
|
Chris@10
|
852 }
|
Chris@10
|
853 {
|
Chris@10
|
854 V T1S, T1T, T1M, T1R;
|
Chris@10
|
855 T1M = VADD(T1E, T1L);
|
Chris@10
|
856 T1R = VBYI(VSUB(T1N, T1Q));
|
Chris@10
|
857 T1S = BYTWJ(&(W[TWVL * 12]), VSUB(T1M, T1R));
|
Chris@10
|
858 T1T = BYTWJ(&(W[0]), VADD(T1M, T1R));
|
Chris@10
|
859 ST(&(x[WS(vs, 7) + WS(rs, 3)]), T1S, ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
860 ST(&(x[WS(vs, 1) + WS(rs, 3)]), T1T, ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
861 }
|
Chris@10
|
862 {
|
Chris@10
|
863 V TO, TP, TI, TN;
|
Chris@10
|
864 TI = VADD(TA, TH);
|
Chris@10
|
865 TN = VBYI(VSUB(TJ, TM));
|
Chris@10
|
866 TO = BYTWJ(&(W[TWVL * 12]), VSUB(TI, TN));
|
Chris@10
|
867 TP = BYTWJ(&(W[0]), VADD(TI, TN));
|
Chris@10
|
868 ST(&(x[WS(vs, 7) + WS(rs, 1)]), TO, ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
869 ST(&(x[WS(vs, 1) + WS(rs, 1)]), TP, ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
870 }
|
Chris@10
|
871 {
|
Chris@10
|
872 V T1l, T1m, T1f, T1k;
|
Chris@10
|
873 T1f = VADD(T17, T1e);
|
Chris@10
|
874 T1k = VBYI(VSUB(T1g, T1j));
|
Chris@10
|
875 T1l = BYTWJ(&(W[TWVL * 12]), VSUB(T1f, T1k));
|
Chris@10
|
876 T1m = BYTWJ(&(W[0]), VADD(T1f, T1k));
|
Chris@10
|
877 ST(&(x[WS(vs, 7) + WS(rs, 2)]), T1l, ms, &(x[WS(vs, 7)]));
|
Chris@10
|
878 ST(&(x[WS(vs, 1) + WS(rs, 2)]), T1m, ms, &(x[WS(vs, 1)]));
|
Chris@10
|
879 }
|
Chris@10
|
880 {
|
Chris@10
|
881 V T3t, T3u, T3n, T3s;
|
Chris@10
|
882 T3n = VADD(T3f, T3m);
|
Chris@10
|
883 T3s = VBYI(VSUB(T3o, T3r));
|
Chris@10
|
884 T3t = BYTWJ(&(W[TWVL * 12]), VSUB(T3n, T3s));
|
Chris@10
|
885 T3u = BYTWJ(&(W[0]), VADD(T3n, T3s));
|
Chris@10
|
886 ST(&(x[WS(vs, 7) + WS(rs, 6)]), T3t, ms, &(x[WS(vs, 7)]));
|
Chris@10
|
887 ST(&(x[WS(vs, 1) + WS(rs, 6)]), T3u, ms, &(x[WS(vs, 1)]));
|
Chris@10
|
888 }
|
Chris@10
|
889 {
|
Chris@10
|
890 V T2W, T2X, T2Q, T2V;
|
Chris@10
|
891 T2Q = VADD(T2I, T2P);
|
Chris@10
|
892 T2V = VBYI(VSUB(T2R, T2U));
|
Chris@10
|
893 T2W = BYTWJ(&(W[TWVL * 12]), VSUB(T2Q, T2V));
|
Chris@10
|
894 T2X = BYTWJ(&(W[0]), VADD(T2Q, T2V));
|
Chris@10
|
895 ST(&(x[WS(vs, 7) + WS(rs, 5)]), T2W, ms, &(x[WS(vs, 7) + WS(rs, 1)]));
|
Chris@10
|
896 ST(&(x[WS(vs, 1) + WS(rs, 5)]), T2X, ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
897 }
|
Chris@10
|
898 {
|
Chris@10
|
899 V T1p, T1q, T1n, T1o;
|
Chris@10
|
900 T1n = VSUB(T17, T1e);
|
Chris@10
|
901 T1o = VBYI(VADD(T1j, T1g));
|
Chris@10
|
902 T1p = BYTWJ(&(W[TWVL * 8]), VSUB(T1n, T1o));
|
Chris@10
|
903 T1q = BYTWJ(&(W[TWVL * 4]), VADD(T1n, T1o));
|
Chris@10
|
904 ST(&(x[WS(vs, 5) + WS(rs, 2)]), T1p, ms, &(x[WS(vs, 5)]));
|
Chris@10
|
905 ST(&(x[WS(vs, 3) + WS(rs, 2)]), T1q, ms, &(x[WS(vs, 3)]));
|
Chris@10
|
906 }
|
Chris@10
|
907 {
|
Chris@10
|
908 V Tl, Tm, Tj, Tk;
|
Chris@10
|
909 Tj = VSUB(T3, Ta);
|
Chris@10
|
910 Tk = VBYI(VADD(Tf, Tc));
|
Chris@10
|
911 Tl = BYTWJ(&(W[TWVL * 8]), VSUB(Tj, Tk));
|
Chris@10
|
912 Tm = BYTWJ(&(W[TWVL * 4]), VADD(Tj, Tk));
|
Chris@10
|
913 ST(&(x[WS(vs, 5)]), Tl, ms, &(x[WS(vs, 5)]));
|
Chris@10
|
914 ST(&(x[WS(vs, 3)]), Tm, ms, &(x[WS(vs, 3)]));
|
Chris@10
|
915 }
|
Chris@10
|
916 {
|
Chris@10
|
917 V T2t, T2u, T2r, T2s;
|
Chris@10
|
918 T2r = VSUB(T2b, T2i);
|
Chris@10
|
919 T2s = VBYI(VADD(T2n, T2k));
|
Chris@10
|
920 T2t = BYTWJ(&(W[TWVL * 8]), VSUB(T2r, T2s));
|
Chris@10
|
921 T2u = BYTWJ(&(W[TWVL * 4]), VADD(T2r, T2s));
|
Chris@10
|
922 ST(&(x[WS(vs, 5) + WS(rs, 4)]), T2t, ms, &(x[WS(vs, 5)]));
|
Chris@10
|
923 ST(&(x[WS(vs, 3) + WS(rs, 4)]), T2u, ms, &(x[WS(vs, 3)]));
|
Chris@10
|
924 }
|
Chris@10
|
925 {
|
Chris@10
|
926 V T3x, T3y, T3v, T3w;
|
Chris@10
|
927 T3v = VSUB(T3f, T3m);
|
Chris@10
|
928 T3w = VBYI(VADD(T3r, T3o));
|
Chris@10
|
929 T3x = BYTWJ(&(W[TWVL * 8]), VSUB(T3v, T3w));
|
Chris@10
|
930 T3y = BYTWJ(&(W[TWVL * 4]), VADD(T3v, T3w));
|
Chris@10
|
931 ST(&(x[WS(vs, 5) + WS(rs, 6)]), T3x, ms, &(x[WS(vs, 5)]));
|
Chris@10
|
932 ST(&(x[WS(vs, 3) + WS(rs, 6)]), T3y, ms, &(x[WS(vs, 3)]));
|
Chris@10
|
933 }
|
Chris@10
|
934 {
|
Chris@10
|
935 V TS, TT, TQ, TR;
|
Chris@10
|
936 TQ = VSUB(TA, TH);
|
Chris@10
|
937 TR = VBYI(VADD(TM, TJ));
|
Chris@10
|
938 TS = BYTWJ(&(W[TWVL * 8]), VSUB(TQ, TR));
|
Chris@10
|
939 TT = BYTWJ(&(W[TWVL * 4]), VADD(TQ, TR));
|
Chris@10
|
940 ST(&(x[WS(vs, 5) + WS(rs, 1)]), TS, ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
941 ST(&(x[WS(vs, 3) + WS(rs, 1)]), TT, ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
942 }
|
Chris@10
|
943 {
|
Chris@10
|
944 V T1W, T1X, T1U, T1V;
|
Chris@10
|
945 T1U = VSUB(T1E, T1L);
|
Chris@10
|
946 T1V = VBYI(VADD(T1Q, T1N));
|
Chris@10
|
947 T1W = BYTWJ(&(W[TWVL * 8]), VSUB(T1U, T1V));
|
Chris@10
|
948 T1X = BYTWJ(&(W[TWVL * 4]), VADD(T1U, T1V));
|
Chris@10
|
949 ST(&(x[WS(vs, 5) + WS(rs, 3)]), T1W, ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
950 ST(&(x[WS(vs, 3) + WS(rs, 3)]), T1X, ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
951 }
|
Chris@10
|
952 {
|
Chris@10
|
953 V T30, T31, T2Y, T2Z;
|
Chris@10
|
954 T2Y = VSUB(T2I, T2P);
|
Chris@10
|
955 T2Z = VBYI(VADD(T2U, T2R));
|
Chris@10
|
956 T30 = BYTWJ(&(W[TWVL * 8]), VSUB(T2Y, T2Z));
|
Chris@10
|
957 T31 = BYTWJ(&(W[TWVL * 4]), VADD(T2Y, T2Z));
|
Chris@10
|
958 ST(&(x[WS(vs, 5) + WS(rs, 5)]), T30, ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
959 ST(&(x[WS(vs, 3) + WS(rs, 5)]), T31, ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
960 }
|
Chris@10
|
961 {
|
Chris@10
|
962 V T44, T45, T42, T43;
|
Chris@10
|
963 T42 = VSUB(T3M, T3T);
|
Chris@10
|
964 T43 = VBYI(VADD(T3Y, T3V));
|
Chris@10
|
965 T44 = BYTWJ(&(W[TWVL * 8]), VSUB(T42, T43));
|
Chris@10
|
966 T45 = BYTWJ(&(W[TWVL * 4]), VADD(T42, T43));
|
Chris@10
|
967 ST(&(x[WS(vs, 5) + WS(rs, 7)]), T44, ms, &(x[WS(vs, 5) + WS(rs, 1)]));
|
Chris@10
|
968 ST(&(x[WS(vs, 3) + WS(rs, 7)]), T45, ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
969 }
|
Chris@10
|
970 }
|
Chris@10
|
971 }
|
Chris@10
|
972 VLEAVE();
|
Chris@10
|
973 }
|
Chris@10
|
974
|
Chris@10
|
975 static const tw_instr twinstr[] = {
|
Chris@10
|
976 VTW(0, 1),
|
Chris@10
|
977 VTW(0, 2),
|
Chris@10
|
978 VTW(0, 3),
|
Chris@10
|
979 VTW(0, 4),
|
Chris@10
|
980 VTW(0, 5),
|
Chris@10
|
981 VTW(0, 6),
|
Chris@10
|
982 VTW(0, 7),
|
Chris@10
|
983 {TW_NEXT, VL, 0}
|
Chris@10
|
984 };
|
Chris@10
|
985
|
Chris@10
|
986 static const ct_desc desc = { 8, XSIMD_STRING("q1fv_8"), twinstr, &GENUS, {264, 128, 0, 0}, 0, 0, 0 };
|
Chris@10
|
987
|
Chris@10
|
988 void XSIMD(codelet_q1fv_8) (planner *p) {
|
Chris@10
|
989 X(kdft_difsq_register) (p, q1fv_8, &desc);
|
Chris@10
|
990 }
|
Chris@10
|
991 #endif /* HAVE_FMA */
|