comparison src/fftw-3.3.8/dft/simd/common/n2sv_4.c @ 167:bd3cc4d1df30

Add FFTW 3.3.8 source, and a Linux build
author Chris Cannam <cannam@all-day-breakfast.com>
date Tue, 19 Nov 2019 14:52:55 +0000
parents
children
comparison
equal deleted inserted replaced
166:cbd6d7e562c7 167:bd3cc4d1df30
1 /*
2 * Copyright (c) 2003, 2007-14 Matteo Frigo
3 * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21 /* This file was automatically generated --- DO NOT EDIT */
22 /* Generated on Thu May 24 08:05:19 EDT 2018 */
23
24 #include "dft/codelet-dft.h"
25
26 #if defined(ARCH_PREFERS_FMA) || defined(ISA_EXTENSION_PREFERS_FMA)
27
28 /* Generated by: ../../../genfft/gen_notw.native -fma -simd -compact -variables 4 -pipeline-latency 8 -n 4 -name n2sv_4 -with-ostride 1 -include dft/simd/n2s.h -store-multiple 4 */
29
30 /*
31 * This function contains 16 FP additions, 0 FP multiplications,
32 * (or, 16 additions, 0 multiplications, 0 fused multiply/add),
33 * 17 stack variables, 0 constants, and 18 memory accesses
34 */
35 #include "dft/simd/n2s.h"
36
37 static void n2sv_4(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
38 {
39 {
40 INT i;
41 for (i = v; i > 0; i = i - (2 * VL), ri = ri + ((2 * VL) * ivs), ii = ii + ((2 * VL) * ivs), ro = ro + ((2 * VL) * ovs), io = io + ((2 * VL) * ovs), MAKE_VOLATILE_STRIDE(16, is), MAKE_VOLATILE_STRIDE(16, os)) {
42 V T3, Tb, T9, Tf, T6, Ta, Te, Tg;
43 {
44 V T1, T2, T7, T8;
45 T1 = LD(&(ri[0]), ivs, &(ri[0]));
46 T2 = LD(&(ri[WS(is, 2)]), ivs, &(ri[0]));
47 T3 = VADD(T1, T2);
48 Tb = VSUB(T1, T2);
49 T7 = LD(&(ii[0]), ivs, &(ii[0]));
50 T8 = LD(&(ii[WS(is, 2)]), ivs, &(ii[0]));
51 T9 = VSUB(T7, T8);
52 Tf = VADD(T7, T8);
53 }
54 {
55 V T4, T5, Tc, Td;
56 T4 = LD(&(ri[WS(is, 1)]), ivs, &(ri[WS(is, 1)]));
57 T5 = LD(&(ri[WS(is, 3)]), ivs, &(ri[WS(is, 1)]));
58 T6 = VADD(T4, T5);
59 Ta = VSUB(T4, T5);
60 Tc = LD(&(ii[WS(is, 1)]), ivs, &(ii[WS(is, 1)]));
61 Td = LD(&(ii[WS(is, 3)]), ivs, &(ii[WS(is, 1)]));
62 Te = VSUB(Tc, Td);
63 Tg = VADD(Tc, Td);
64 }
65 {
66 V Th, Ti, Tj, Tk;
67 Th = VSUB(T3, T6);
68 STM4(&(ro[2]), Th, ovs, &(ro[0]));
69 Ti = VSUB(Tf, Tg);
70 STM4(&(io[2]), Ti, ovs, &(io[0]));
71 Tj = VADD(T3, T6);
72 STM4(&(ro[0]), Tj, ovs, &(ro[0]));
73 Tk = VADD(Tf, Tg);
74 STM4(&(io[0]), Tk, ovs, &(io[0]));
75 {
76 V Tl, Tm, Tn, To;
77 Tl = VSUB(T9, Ta);
78 STM4(&(io[1]), Tl, ovs, &(io[1]));
79 Tm = VADD(Tb, Te);
80 STM4(&(ro[1]), Tm, ovs, &(ro[1]));
81 Tn = VADD(Ta, T9);
82 STM4(&(io[3]), Tn, ovs, &(io[1]));
83 STN4(&(io[0]), Tk, Tl, Ti, Tn, ovs);
84 To = VSUB(Tb, Te);
85 STM4(&(ro[3]), To, ovs, &(ro[1]));
86 STN4(&(ro[0]), Tj, Tm, Th, To, ovs);
87 }
88 }
89 }
90 }
91 VLEAVE();
92 }
93
94 static const kdft_desc desc = { 4, XSIMD_STRING("n2sv_4"), {16, 0, 0, 0}, &GENUS, 0, 1, 0, 0 };
95
96 void XSIMD(codelet_n2sv_4) (planner *p) {
97 X(kdft_register) (p, n2sv_4, &desc);
98 }
99
100 #else
101
102 /* Generated by: ../../../genfft/gen_notw.native -simd -compact -variables 4 -pipeline-latency 8 -n 4 -name n2sv_4 -with-ostride 1 -include dft/simd/n2s.h -store-multiple 4 */
103
104 /*
105 * This function contains 16 FP additions, 0 FP multiplications,
106 * (or, 16 additions, 0 multiplications, 0 fused multiply/add),
107 * 17 stack variables, 0 constants, and 18 memory accesses
108 */
109 #include "dft/simd/n2s.h"
110
111 static void n2sv_4(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
112 {
113 {
114 INT i;
115 for (i = v; i > 0; i = i - (2 * VL), ri = ri + ((2 * VL) * ivs), ii = ii + ((2 * VL) * ivs), ro = ro + ((2 * VL) * ovs), io = io + ((2 * VL) * ovs), MAKE_VOLATILE_STRIDE(16, is), MAKE_VOLATILE_STRIDE(16, os)) {
116 V T3, Tb, T9, Tf, T6, Ta, Te, Tg;
117 {
118 V T1, T2, T7, T8;
119 T1 = LD(&(ri[0]), ivs, &(ri[0]));
120 T2 = LD(&(ri[WS(is, 2)]), ivs, &(ri[0]));
121 T3 = VADD(T1, T2);
122 Tb = VSUB(T1, T2);
123 T7 = LD(&(ii[0]), ivs, &(ii[0]));
124 T8 = LD(&(ii[WS(is, 2)]), ivs, &(ii[0]));
125 T9 = VSUB(T7, T8);
126 Tf = VADD(T7, T8);
127 }
128 {
129 V T4, T5, Tc, Td;
130 T4 = LD(&(ri[WS(is, 1)]), ivs, &(ri[WS(is, 1)]));
131 T5 = LD(&(ri[WS(is, 3)]), ivs, &(ri[WS(is, 1)]));
132 T6 = VADD(T4, T5);
133 Ta = VSUB(T4, T5);
134 Tc = LD(&(ii[WS(is, 1)]), ivs, &(ii[WS(is, 1)]));
135 Td = LD(&(ii[WS(is, 3)]), ivs, &(ii[WS(is, 1)]));
136 Te = VSUB(Tc, Td);
137 Tg = VADD(Tc, Td);
138 }
139 {
140 V Th, Ti, Tj, Tk;
141 Th = VSUB(T3, T6);
142 STM4(&(ro[2]), Th, ovs, &(ro[0]));
143 Ti = VSUB(Tf, Tg);
144 STM4(&(io[2]), Ti, ovs, &(io[0]));
145 Tj = VADD(T3, T6);
146 STM4(&(ro[0]), Tj, ovs, &(ro[0]));
147 Tk = VADD(Tf, Tg);
148 STM4(&(io[0]), Tk, ovs, &(io[0]));
149 {
150 V Tl, Tm, Tn, To;
151 Tl = VSUB(T9, Ta);
152 STM4(&(io[1]), Tl, ovs, &(io[1]));
153 Tm = VADD(Tb, Te);
154 STM4(&(ro[1]), Tm, ovs, &(ro[1]));
155 Tn = VADD(Ta, T9);
156 STM4(&(io[3]), Tn, ovs, &(io[1]));
157 STN4(&(io[0]), Tk, Tl, Ti, Tn, ovs);
158 To = VSUB(Tb, Te);
159 STM4(&(ro[3]), To, ovs, &(ro[1]));
160 STN4(&(ro[0]), Tj, Tm, Th, To, ovs);
161 }
162 }
163 }
164 }
165 VLEAVE();
166 }
167
168 static const kdft_desc desc = { 4, XSIMD_STRING("n2sv_4"), {16, 0, 0, 0}, &GENUS, 0, 1, 0, 0 };
169
170 void XSIMD(codelet_n2sv_4) (planner *p) {
171 X(kdft_register) (p, n2sv_4, &desc);
172 }
173
174 #endif