Chris@10
|
1 /*
|
Chris@10
|
2 * Copyright (c) 2003, 2007-11 Matteo Frigo
|
Chris@10
|
3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
|
Chris@10
|
4 *
|
Chris@10
|
5 * This program is free software; you can redistribute it and/or modify
|
Chris@10
|
6 * it under the terms of the GNU General Public License as published by
|
Chris@10
|
7 * the Free Software Foundation; either version 2 of the License, or
|
Chris@10
|
8 * (at your option) any later version.
|
Chris@10
|
9 *
|
Chris@10
|
10 * This program is distributed in the hope that it will be useful,
|
Chris@10
|
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
Chris@10
|
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
Chris@10
|
13 * GNU General Public License for more details.
|
Chris@10
|
14 *
|
Chris@10
|
15 * You should have received a copy of the GNU General Public License
|
Chris@10
|
16 * along with this program; if not, write to the Free Software
|
Chris@10
|
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
Chris@10
|
18 *
|
Chris@10
|
19 */
|
Chris@10
|
20
|
Chris@10
|
21 /* This file was automatically generated --- DO NOT EDIT */
|
Chris@10
|
22 /* Generated on Sun Nov 25 07:39:33 EST 2012 */
|
Chris@10
|
23
|
Chris@10
|
24 #include "codelet-dft.h"
|
Chris@10
|
25
|
Chris@10
|
26 #ifdef HAVE_FMA
|
Chris@10
|
27
|
Chris@10
|
28 /* Generated by: ../../../genfft/gen_twidsq_c.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -n 4 -dif -name q1bv_4 -include q1b.h -sign 1 */
|
Chris@10
|
29
|
Chris@10
|
30 /*
|
Chris@10
|
31 * This function contains 44 FP additions, 32 FP multiplications,
|
Chris@10
|
32 * (or, 36 additions, 24 multiplications, 8 fused multiply/add),
|
Chris@10
|
33 * 38 stack variables, 0 constants, and 32 memory accesses
|
Chris@10
|
34 */
|
Chris@10
|
35 #include "q1b.h"
|
Chris@10
|
36
|
Chris@10
|
37 static void q1bv_4(R *ri, R *ii, const R *W, stride rs, stride vs, INT mb, INT me, INT ms)
|
Chris@10
|
38 {
|
Chris@10
|
39 {
|
Chris@10
|
40 INT m;
|
Chris@10
|
41 R *x;
|
Chris@10
|
42 x = ii;
|
Chris@10
|
43 for (m = mb, W = W + (mb * ((TWVL / VL) * 6)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 6), MAKE_VOLATILE_STRIDE(8, rs), MAKE_VOLATILE_STRIDE(8, vs)) {
|
Chris@10
|
44 V Tb, Tm, Tx, TI;
|
Chris@10
|
45 {
|
Chris@10
|
46 V Tc, T9, T3, TG, TA, TH, TD, Ta, T6, Td, Tn, To, Tq, Tr, Tf;
|
Chris@10
|
47 V Tg;
|
Chris@10
|
48 {
|
Chris@10
|
49 V T1, T2, Ty, Tz, TB, TC, T4, T5;
|
Chris@10
|
50 T1 = LD(&(x[0]), ms, &(x[0]));
|
Chris@10
|
51 T2 = LD(&(x[WS(rs, 2)]), ms, &(x[0]));
|
Chris@10
|
52 Ty = LD(&(x[WS(vs, 3)]), ms, &(x[WS(vs, 3)]));
|
Chris@10
|
53 Tz = LD(&(x[WS(vs, 3) + WS(rs, 2)]), ms, &(x[WS(vs, 3)]));
|
Chris@10
|
54 TB = LD(&(x[WS(vs, 3) + WS(rs, 1)]), ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
55 TC = LD(&(x[WS(vs, 3) + WS(rs, 3)]), ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
56 T4 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
57 T5 = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
58 Tc = LD(&(x[WS(vs, 1)]), ms, &(x[WS(vs, 1)]));
|
Chris@10
|
59 T9 = VADD(T1, T2);
|
Chris@10
|
60 T3 = VSUB(T1, T2);
|
Chris@10
|
61 TG = VADD(Ty, Tz);
|
Chris@10
|
62 TA = VSUB(Ty, Tz);
|
Chris@10
|
63 TH = VADD(TB, TC);
|
Chris@10
|
64 TD = VSUB(TB, TC);
|
Chris@10
|
65 Ta = VADD(T4, T5);
|
Chris@10
|
66 T6 = VSUB(T4, T5);
|
Chris@10
|
67 Td = LD(&(x[WS(vs, 1) + WS(rs, 2)]), ms, &(x[WS(vs, 1)]));
|
Chris@10
|
68 Tn = LD(&(x[WS(vs, 2)]), ms, &(x[WS(vs, 2)]));
|
Chris@10
|
69 To = LD(&(x[WS(vs, 2) + WS(rs, 2)]), ms, &(x[WS(vs, 2)]));
|
Chris@10
|
70 Tq = LD(&(x[WS(vs, 2) + WS(rs, 1)]), ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
71 Tr = LD(&(x[WS(vs, 2) + WS(rs, 3)]), ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
72 Tf = LD(&(x[WS(vs, 1) + WS(rs, 1)]), ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
73 Tg = LD(&(x[WS(vs, 1) + WS(rs, 3)]), ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
74 }
|
Chris@10
|
75 {
|
Chris@10
|
76 V Tk, Te, Tv, Tp, Tw, Ts, Tl, Th, T7, TE, Tu, TF;
|
Chris@10
|
77 ST(&(x[0]), VADD(T9, Ta), ms, &(x[0]));
|
Chris@10
|
78 Tk = VADD(Tc, Td);
|
Chris@10
|
79 Te = VSUB(Tc, Td);
|
Chris@10
|
80 Tv = VADD(Tn, To);
|
Chris@10
|
81 Tp = VSUB(Tn, To);
|
Chris@10
|
82 Tw = VADD(Tq, Tr);
|
Chris@10
|
83 Ts = VSUB(Tq, Tr);
|
Chris@10
|
84 Tl = VADD(Tf, Tg);
|
Chris@10
|
85 Th = VSUB(Tf, Tg);
|
Chris@10
|
86 ST(&(x[WS(rs, 3)]), VADD(TG, TH), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
87 T7 = BYTW(&(W[TWVL * 4]), VFNMSI(T6, T3));
|
Chris@10
|
88 TE = BYTW(&(W[TWVL * 4]), VFNMSI(TD, TA));
|
Chris@10
|
89 {
|
Chris@10
|
90 V Tt, Ti, Tj, T8;
|
Chris@10
|
91 T8 = BYTW(&(W[0]), VFMAI(T6, T3));
|
Chris@10
|
92 ST(&(x[WS(rs, 2)]), VADD(Tv, Tw), ms, &(x[0]));
|
Chris@10
|
93 Tt = BYTW(&(W[TWVL * 4]), VFNMSI(Ts, Tp));
|
Chris@10
|
94 ST(&(x[WS(rs, 1)]), VADD(Tk, Tl), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
95 Ti = BYTW(&(W[TWVL * 4]), VFNMSI(Th, Te));
|
Chris@10
|
96 Tj = BYTW(&(W[0]), VFMAI(Th, Te));
|
Chris@10
|
97 ST(&(x[WS(vs, 3)]), T7, ms, &(x[WS(vs, 3)]));
|
Chris@10
|
98 ST(&(x[WS(vs, 3) + WS(rs, 3)]), TE, ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
99 ST(&(x[WS(vs, 1)]), T8, ms, &(x[WS(vs, 1)]));
|
Chris@10
|
100 Tu = BYTW(&(W[0]), VFMAI(Ts, Tp));
|
Chris@10
|
101 ST(&(x[WS(vs, 3) + WS(rs, 2)]), Tt, ms, &(x[WS(vs, 3)]));
|
Chris@10
|
102 TF = BYTW(&(W[0]), VFMAI(TD, TA));
|
Chris@10
|
103 ST(&(x[WS(vs, 3) + WS(rs, 1)]), Ti, ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
104 ST(&(x[WS(vs, 1) + WS(rs, 1)]), Tj, ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
105 }
|
Chris@10
|
106 Tb = BYTW(&(W[TWVL * 2]), VSUB(T9, Ta));
|
Chris@10
|
107 Tm = BYTW(&(W[TWVL * 2]), VSUB(Tk, Tl));
|
Chris@10
|
108 Tx = BYTW(&(W[TWVL * 2]), VSUB(Tv, Tw));
|
Chris@10
|
109 ST(&(x[WS(vs, 1) + WS(rs, 2)]), Tu, ms, &(x[WS(vs, 1)]));
|
Chris@10
|
110 TI = BYTW(&(W[TWVL * 2]), VSUB(TG, TH));
|
Chris@10
|
111 ST(&(x[WS(vs, 1) + WS(rs, 3)]), TF, ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
112 }
|
Chris@10
|
113 }
|
Chris@10
|
114 ST(&(x[WS(vs, 2)]), Tb, ms, &(x[WS(vs, 2)]));
|
Chris@10
|
115 ST(&(x[WS(vs, 2) + WS(rs, 1)]), Tm, ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
116 ST(&(x[WS(vs, 2) + WS(rs, 2)]), Tx, ms, &(x[WS(vs, 2)]));
|
Chris@10
|
117 ST(&(x[WS(vs, 2) + WS(rs, 3)]), TI, ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
118 }
|
Chris@10
|
119 }
|
Chris@10
|
120 VLEAVE();
|
Chris@10
|
121 }
|
Chris@10
|
122
|
Chris@10
|
123 static const tw_instr twinstr[] = {
|
Chris@10
|
124 VTW(0, 1),
|
Chris@10
|
125 VTW(0, 2),
|
Chris@10
|
126 VTW(0, 3),
|
Chris@10
|
127 {TW_NEXT, VL, 0}
|
Chris@10
|
128 };
|
Chris@10
|
129
|
Chris@10
|
130 static const ct_desc desc = { 4, XSIMD_STRING("q1bv_4"), twinstr, &GENUS, {36, 24, 8, 0}, 0, 0, 0 };
|
Chris@10
|
131
|
Chris@10
|
132 void XSIMD(codelet_q1bv_4) (planner *p) {
|
Chris@10
|
133 X(kdft_difsq_register) (p, q1bv_4, &desc);
|
Chris@10
|
134 }
|
Chris@10
|
135 #else /* HAVE_FMA */
|
Chris@10
|
136
|
Chris@10
|
137 /* Generated by: ../../../genfft/gen_twidsq_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 4 -dif -name q1bv_4 -include q1b.h -sign 1 */
|
Chris@10
|
138
|
Chris@10
|
139 /*
|
Chris@10
|
140 * This function contains 44 FP additions, 24 FP multiplications,
|
Chris@10
|
141 * (or, 44 additions, 24 multiplications, 0 fused multiply/add),
|
Chris@10
|
142 * 22 stack variables, 0 constants, and 32 memory accesses
|
Chris@10
|
143 */
|
Chris@10
|
144 #include "q1b.h"
|
Chris@10
|
145
|
Chris@10
|
146 static void q1bv_4(R *ri, R *ii, const R *W, stride rs, stride vs, INT mb, INT me, INT ms)
|
Chris@10
|
147 {
|
Chris@10
|
148 {
|
Chris@10
|
149 INT m;
|
Chris@10
|
150 R *x;
|
Chris@10
|
151 x = ii;
|
Chris@10
|
152 for (m = mb, W = W + (mb * ((TWVL / VL) * 6)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 6), MAKE_VOLATILE_STRIDE(8, rs), MAKE_VOLATILE_STRIDE(8, vs)) {
|
Chris@10
|
153 V T3, T9, TA, TG, TD, TH, T6, Ta, Te, Tk, Tp, Tv, Ts, Tw, Th;
|
Chris@10
|
154 V Tl;
|
Chris@10
|
155 {
|
Chris@10
|
156 V T1, T2, Ty, Tz;
|
Chris@10
|
157 T1 = LD(&(x[0]), ms, &(x[0]));
|
Chris@10
|
158 T2 = LD(&(x[WS(rs, 2)]), ms, &(x[0]));
|
Chris@10
|
159 T3 = VSUB(T1, T2);
|
Chris@10
|
160 T9 = VADD(T1, T2);
|
Chris@10
|
161 Ty = LD(&(x[WS(vs, 3)]), ms, &(x[WS(vs, 3)]));
|
Chris@10
|
162 Tz = LD(&(x[WS(vs, 3) + WS(rs, 2)]), ms, &(x[WS(vs, 3)]));
|
Chris@10
|
163 TA = VSUB(Ty, Tz);
|
Chris@10
|
164 TG = VADD(Ty, Tz);
|
Chris@10
|
165 }
|
Chris@10
|
166 {
|
Chris@10
|
167 V TB, TC, T4, T5;
|
Chris@10
|
168 TB = LD(&(x[WS(vs, 3) + WS(rs, 1)]), ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
169 TC = LD(&(x[WS(vs, 3) + WS(rs, 3)]), ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
170 TD = VBYI(VSUB(TB, TC));
|
Chris@10
|
171 TH = VADD(TB, TC);
|
Chris@10
|
172 T4 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
173 T5 = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
174 T6 = VBYI(VSUB(T4, T5));
|
Chris@10
|
175 Ta = VADD(T4, T5);
|
Chris@10
|
176 }
|
Chris@10
|
177 {
|
Chris@10
|
178 V Tc, Td, Tn, To;
|
Chris@10
|
179 Tc = LD(&(x[WS(vs, 1)]), ms, &(x[WS(vs, 1)]));
|
Chris@10
|
180 Td = LD(&(x[WS(vs, 1) + WS(rs, 2)]), ms, &(x[WS(vs, 1)]));
|
Chris@10
|
181 Te = VSUB(Tc, Td);
|
Chris@10
|
182 Tk = VADD(Tc, Td);
|
Chris@10
|
183 Tn = LD(&(x[WS(vs, 2)]), ms, &(x[WS(vs, 2)]));
|
Chris@10
|
184 To = LD(&(x[WS(vs, 2) + WS(rs, 2)]), ms, &(x[WS(vs, 2)]));
|
Chris@10
|
185 Tp = VSUB(Tn, To);
|
Chris@10
|
186 Tv = VADD(Tn, To);
|
Chris@10
|
187 }
|
Chris@10
|
188 {
|
Chris@10
|
189 V Tq, Tr, Tf, Tg;
|
Chris@10
|
190 Tq = LD(&(x[WS(vs, 2) + WS(rs, 1)]), ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
191 Tr = LD(&(x[WS(vs, 2) + WS(rs, 3)]), ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
192 Ts = VBYI(VSUB(Tq, Tr));
|
Chris@10
|
193 Tw = VADD(Tq, Tr);
|
Chris@10
|
194 Tf = LD(&(x[WS(vs, 1) + WS(rs, 1)]), ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
195 Tg = LD(&(x[WS(vs, 1) + WS(rs, 3)]), ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
196 Th = VBYI(VSUB(Tf, Tg));
|
Chris@10
|
197 Tl = VADD(Tf, Tg);
|
Chris@10
|
198 }
|
Chris@10
|
199 ST(&(x[0]), VADD(T9, Ta), ms, &(x[0]));
|
Chris@10
|
200 ST(&(x[WS(rs, 1)]), VADD(Tk, Tl), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
201 ST(&(x[WS(rs, 2)]), VADD(Tv, Tw), ms, &(x[0]));
|
Chris@10
|
202 ST(&(x[WS(rs, 3)]), VADD(TG, TH), ms, &(x[WS(rs, 1)]));
|
Chris@10
|
203 {
|
Chris@10
|
204 V T7, Ti, Tt, TE;
|
Chris@10
|
205 T7 = BYTW(&(W[TWVL * 4]), VSUB(T3, T6));
|
Chris@10
|
206 ST(&(x[WS(vs, 3)]), T7, ms, &(x[WS(vs, 3)]));
|
Chris@10
|
207 Ti = BYTW(&(W[TWVL * 4]), VSUB(Te, Th));
|
Chris@10
|
208 ST(&(x[WS(vs, 3) + WS(rs, 1)]), Ti, ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
209 Tt = BYTW(&(W[TWVL * 4]), VSUB(Tp, Ts));
|
Chris@10
|
210 ST(&(x[WS(vs, 3) + WS(rs, 2)]), Tt, ms, &(x[WS(vs, 3)]));
|
Chris@10
|
211 TE = BYTW(&(W[TWVL * 4]), VSUB(TA, TD));
|
Chris@10
|
212 ST(&(x[WS(vs, 3) + WS(rs, 3)]), TE, ms, &(x[WS(vs, 3) + WS(rs, 1)]));
|
Chris@10
|
213 }
|
Chris@10
|
214 {
|
Chris@10
|
215 V T8, Tj, Tu, TF;
|
Chris@10
|
216 T8 = BYTW(&(W[0]), VADD(T3, T6));
|
Chris@10
|
217 ST(&(x[WS(vs, 1)]), T8, ms, &(x[WS(vs, 1)]));
|
Chris@10
|
218 Tj = BYTW(&(W[0]), VADD(Te, Th));
|
Chris@10
|
219 ST(&(x[WS(vs, 1) + WS(rs, 1)]), Tj, ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
220 Tu = BYTW(&(W[0]), VADD(Tp, Ts));
|
Chris@10
|
221 ST(&(x[WS(vs, 1) + WS(rs, 2)]), Tu, ms, &(x[WS(vs, 1)]));
|
Chris@10
|
222 TF = BYTW(&(W[0]), VADD(TA, TD));
|
Chris@10
|
223 ST(&(x[WS(vs, 1) + WS(rs, 3)]), TF, ms, &(x[WS(vs, 1) + WS(rs, 1)]));
|
Chris@10
|
224 }
|
Chris@10
|
225 {
|
Chris@10
|
226 V Tb, Tm, Tx, TI;
|
Chris@10
|
227 Tb = BYTW(&(W[TWVL * 2]), VSUB(T9, Ta));
|
Chris@10
|
228 ST(&(x[WS(vs, 2)]), Tb, ms, &(x[WS(vs, 2)]));
|
Chris@10
|
229 Tm = BYTW(&(W[TWVL * 2]), VSUB(Tk, Tl));
|
Chris@10
|
230 ST(&(x[WS(vs, 2) + WS(rs, 1)]), Tm, ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
231 Tx = BYTW(&(W[TWVL * 2]), VSUB(Tv, Tw));
|
Chris@10
|
232 ST(&(x[WS(vs, 2) + WS(rs, 2)]), Tx, ms, &(x[WS(vs, 2)]));
|
Chris@10
|
233 TI = BYTW(&(W[TWVL * 2]), VSUB(TG, TH));
|
Chris@10
|
234 ST(&(x[WS(vs, 2) + WS(rs, 3)]), TI, ms, &(x[WS(vs, 2) + WS(rs, 1)]));
|
Chris@10
|
235 }
|
Chris@10
|
236 }
|
Chris@10
|
237 }
|
Chris@10
|
238 VLEAVE();
|
Chris@10
|
239 }
|
Chris@10
|
240
|
Chris@10
|
241 static const tw_instr twinstr[] = {
|
Chris@10
|
242 VTW(0, 1),
|
Chris@10
|
243 VTW(0, 2),
|
Chris@10
|
244 VTW(0, 3),
|
Chris@10
|
245 {TW_NEXT, VL, 0}
|
Chris@10
|
246 };
|
Chris@10
|
247
|
Chris@10
|
248 static const ct_desc desc = { 4, XSIMD_STRING("q1bv_4"), twinstr, &GENUS, {44, 24, 0, 0}, 0, 0, 0 };
|
Chris@10
|
249
|
Chris@10
|
250 void XSIMD(codelet_q1bv_4) (planner *p) {
|
Chris@10
|
251 X(kdft_difsq_register) (p, q1bv_4, &desc);
|
Chris@10
|
252 }
|
Chris@10
|
253 #endif /* HAVE_FMA */
|