annotate src/fftw-3.3.3/dft/simd/common/n2fv_14.c @ 83:ae30d91d2ffe

Replace these with versions built using an older toolset (so as to avoid ABI compatibilities when linking on Ubuntu 14.04 for packaging purposes)
author Chris Cannam
date Fri, 07 Feb 2020 11:51:13 +0000
parents 37bf6b4a2645
children
rev   line source
Chris@10 1 /*
Chris@10 2 * Copyright (c) 2003, 2007-11 Matteo Frigo
Chris@10 3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
Chris@10 4 *
Chris@10 5 * This program is free software; you can redistribute it and/or modify
Chris@10 6 * it under the terms of the GNU General Public License as published by
Chris@10 7 * the Free Software Foundation; either version 2 of the License, or
Chris@10 8 * (at your option) any later version.
Chris@10 9 *
Chris@10 10 * This program is distributed in the hope that it will be useful,
Chris@10 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@10 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@10 13 * GNU General Public License for more details.
Chris@10 14 *
Chris@10 15 * You should have received a copy of the GNU General Public License
Chris@10 16 * along with this program; if not, write to the Free Software
Chris@10 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@10 18 *
Chris@10 19 */
Chris@10 20
Chris@10 21 /* This file was automatically generated --- DO NOT EDIT */
Chris@10 22 /* Generated on Sun Nov 25 07:37:22 EST 2012 */
Chris@10 23
Chris@10 24 #include "codelet-dft.h"
Chris@10 25
Chris@10 26 #ifdef HAVE_FMA
Chris@10 27
Chris@10 28 /* Generated by: ../../../genfft/gen_notw_c.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -n 14 -name n2fv_14 -with-ostride 2 -include n2f.h -store-multiple 2 */
Chris@10 29
Chris@10 30 /*
Chris@10 31 * This function contains 74 FP additions, 48 FP multiplications,
Chris@10 32 * (or, 32 additions, 6 multiplications, 42 fused multiply/add),
Chris@10 33 * 65 stack variables, 6 constants, and 35 memory accesses
Chris@10 34 */
Chris@10 35 #include "n2f.h"
Chris@10 36
Chris@10 37 static void n2fv_14(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@10 38 {
Chris@10 39 DVK(KP900968867, +0.900968867902419126236102319507445051165919162);
Chris@10 40 DVK(KP801937735, +0.801937735804838252472204639014890102331838324);
Chris@10 41 DVK(KP974927912, +0.974927912181823607018131682993931217232785801);
Chris@10 42 DVK(KP692021471, +0.692021471630095869627814897002069140197260599);
Chris@10 43 DVK(KP554958132, +0.554958132087371191422194871006410481067288862);
Chris@10 44 DVK(KP356895867, +0.356895867892209443894399510021300583399127187);
Chris@10 45 {
Chris@10 46 INT i;
Chris@10 47 const R *xi;
Chris@10 48 R *xo;
Chris@10 49 xi = ri;
Chris@10 50 xo = ro;
Chris@10 51 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(28, is), MAKE_VOLATILE_STRIDE(28, os)) {
Chris@10 52 V TH, T3, TP, Tn, Ta, Ts, TW, TK, TO, Tk, TM, Tg, TL, Td, T1;
Chris@10 53 V T2;
Chris@10 54 T1 = LD(&(xi[0]), ivs, &(xi[0]));
Chris@10 55 T2 = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)]));
Chris@10 56 {
Chris@10 57 V Ti, TI, T6, TJ, T9, Tj, Te, Tf, Tb, Tc;
Chris@10 58 {
Chris@10 59 V T4, T5, T7, T8, Tl, Tm;
Chris@10 60 T4 = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@10 61 T5 = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)]));
Chris@10 62 T7 = LD(&(xi[WS(is, 12)]), ivs, &(xi[0]));
Chris@10 63 T8 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@10 64 Tl = LD(&(xi[WS(is, 8)]), ivs, &(xi[0]));
Chris@10 65 Tm = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@10 66 Ti = LD(&(xi[WS(is, 6)]), ivs, &(xi[0]));
Chris@10 67 TH = VADD(T1, T2);
Chris@10 68 T3 = VSUB(T1, T2);
Chris@10 69 TI = VADD(T4, T5);
Chris@10 70 T6 = VSUB(T4, T5);
Chris@10 71 TJ = VADD(T7, T8);
Chris@10 72 T9 = VSUB(T7, T8);
Chris@10 73 TP = VADD(Tl, Tm);
Chris@10 74 Tn = VSUB(Tl, Tm);
Chris@10 75 Tj = LD(&(xi[WS(is, 13)]), ivs, &(xi[WS(is, 1)]));
Chris@10 76 Te = LD(&(xi[WS(is, 10)]), ivs, &(xi[0]));
Chris@10 77 Tf = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@10 78 Tb = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@10 79 Tc = LD(&(xi[WS(is, 11)]), ivs, &(xi[WS(is, 1)]));
Chris@10 80 }
Chris@10 81 Ta = VADD(T6, T9);
Chris@10 82 Ts = VSUB(T9, T6);
Chris@10 83 TW = VSUB(TJ, TI);
Chris@10 84 TK = VADD(TI, TJ);
Chris@10 85 TO = VADD(Ti, Tj);
Chris@10 86 Tk = VSUB(Ti, Tj);
Chris@10 87 TM = VADD(Te, Tf);
Chris@10 88 Tg = VSUB(Te, Tf);
Chris@10 89 TL = VADD(Tb, Tc);
Chris@10 90 Td = VSUB(Tb, Tc);
Chris@10 91 }
Chris@10 92 {
Chris@10 93 V T19, T1a, T18, TB, T13, TY, TG, Tw, T11, Tr, T16, TT, Tz, TE, TU;
Chris@10 94 V TQ;
Chris@10 95 TU = VSUB(TO, TP);
Chris@10 96 TQ = VADD(TO, TP);
Chris@10 97 {
Chris@10 98 V Tt, To, TV, TN;
Chris@10 99 Tt = VSUB(Tn, Tk);
Chris@10 100 To = VADD(Tk, Tn);
Chris@10 101 TV = VSUB(TL, TM);
Chris@10 102 TN = VADD(TL, TM);
Chris@10 103 {
Chris@10 104 V Tu, Th, TZ, T17;
Chris@10 105 Tu = VSUB(Tg, Td);
Chris@10 106 Th = VADD(Td, Tg);
Chris@10 107 TZ = VFNMS(LDK(KP356895867), TK, TQ);
Chris@10 108 T17 = VFNMS(LDK(KP554958132), TU, TW);
Chris@10 109 {
Chris@10 110 V Tp, TA, T14, TR;
Chris@10 111 Tp = VFNMS(LDK(KP356895867), Ta, To);
Chris@10 112 TA = VFMA(LDK(KP554958132), Tt, Ts);
Chris@10 113 T19 = VADD(TH, VADD(TK, VADD(TN, TQ)));
Chris@10 114 STM2(&(xo[0]), T19, ovs, &(xo[0]));
Chris@10 115 T14 = VFNMS(LDK(KP356895867), TN, TK);
Chris@10 116 TR = VFNMS(LDK(KP356895867), TQ, TN);
Chris@10 117 {
Chris@10 118 V T12, TX, Tx, TC;
Chris@10 119 T12 = VFMA(LDK(KP554958132), TV, TU);
Chris@10 120 TX = VFMA(LDK(KP554958132), TW, TV);
Chris@10 121 T1a = VADD(T3, VADD(Ta, VADD(Th, To)));
Chris@10 122 STM2(&(xo[14]), T1a, ovs, &(xo[2]));
Chris@10 123 Tx = VFNMS(LDK(KP356895867), Th, Ta);
Chris@10 124 TC = VFNMS(LDK(KP356895867), To, Th);
Chris@10 125 {
Chris@10 126 V TF, Tv, T10, Tq;
Chris@10 127 TF = VFNMS(LDK(KP554958132), Ts, Tu);
Chris@10 128 Tv = VFMA(LDK(KP554958132), Tu, Tt);
Chris@10 129 T10 = VFNMS(LDK(KP692021471), TZ, TN);
Chris@10 130 T18 = VMUL(LDK(KP974927912), VFNMS(LDK(KP801937735), T17, TV));
Chris@10 131 Tq = VFNMS(LDK(KP692021471), Tp, Th);
Chris@10 132 TB = VMUL(LDK(KP974927912), VFMA(LDK(KP801937735), TA, Tu));
Chris@10 133 {
Chris@10 134 V T15, TS, Ty, TD;
Chris@10 135 T15 = VFNMS(LDK(KP692021471), T14, TQ);
Chris@10 136 TS = VFNMS(LDK(KP692021471), TR, TK);
Chris@10 137 T13 = VMUL(LDK(KP974927912), VFMA(LDK(KP801937735), T12, TW));
Chris@10 138 TY = VMUL(LDK(KP974927912), VFNMS(LDK(KP801937735), TX, TU));
Chris@10 139 Ty = VFNMS(LDK(KP692021471), Tx, To);
Chris@10 140 TD = VFNMS(LDK(KP692021471), TC, Ta);
Chris@10 141 TG = VMUL(LDK(KP974927912), VFNMS(LDK(KP801937735), TF, Tt));
Chris@10 142 Tw = VMUL(LDK(KP974927912), VFNMS(LDK(KP801937735), Tv, Ts));
Chris@10 143 T11 = VFNMS(LDK(KP900968867), T10, TH);
Chris@10 144 Tr = VFNMS(LDK(KP900968867), Tq, T3);
Chris@10 145 T16 = VFNMS(LDK(KP900968867), T15, TH);
Chris@10 146 TT = VFNMS(LDK(KP900968867), TS, TH);
Chris@10 147 Tz = VFNMS(LDK(KP900968867), Ty, T3);
Chris@10 148 TE = VFNMS(LDK(KP900968867), TD, T3);
Chris@10 149 }
Chris@10 150 }
Chris@10 151 }
Chris@10 152 }
Chris@10 153 }
Chris@10 154 }
Chris@10 155 {
Chris@10 156 V T1b, T1c, T1d, T1e;
Chris@10 157 T1b = VFNMSI(T13, T11);
Chris@10 158 STM2(&(xo[24]), T1b, ovs, &(xo[0]));
Chris@10 159 T1c = VFMAI(T13, T11);
Chris@10 160 STM2(&(xo[4]), T1c, ovs, &(xo[0]));
Chris@10 161 T1d = VFMAI(Tw, Tr);
Chris@10 162 STM2(&(xo[18]), T1d, ovs, &(xo[2]));
Chris@10 163 T1e = VFNMSI(Tw, Tr);
Chris@10 164 STM2(&(xo[10]), T1e, ovs, &(xo[2]));
Chris@10 165 {
Chris@10 166 V T1f, T1g, T1h, T1i;
Chris@10 167 T1f = VFNMSI(T18, T16);
Chris@10 168 STM2(&(xo[16]), T1f, ovs, &(xo[0]));
Chris@10 169 STN2(&(xo[16]), T1f, T1d, ovs);
Chris@10 170 T1g = VFMAI(T18, T16);
Chris@10 171 STM2(&(xo[12]), T1g, ovs, &(xo[0]));
Chris@10 172 STN2(&(xo[12]), T1g, T1a, ovs);
Chris@10 173 T1h = VFNMSI(TY, TT);
Chris@10 174 STM2(&(xo[20]), T1h, ovs, &(xo[0]));
Chris@10 175 T1i = VFMAI(TY, TT);
Chris@10 176 STM2(&(xo[8]), T1i, ovs, &(xo[0]));
Chris@10 177 STN2(&(xo[8]), T1i, T1e, ovs);
Chris@10 178 {
Chris@10 179 V T1j, T1k, T1l, T1m;
Chris@10 180 T1j = VFMAI(TB, Tz);
Chris@10 181 STM2(&(xo[2]), T1j, ovs, &(xo[2]));
Chris@10 182 STN2(&(xo[0]), T19, T1j, ovs);
Chris@10 183 T1k = VFNMSI(TB, Tz);
Chris@10 184 STM2(&(xo[26]), T1k, ovs, &(xo[2]));
Chris@10 185 STN2(&(xo[24]), T1b, T1k, ovs);
Chris@10 186 T1l = VFMAI(TG, TE);
Chris@10 187 STM2(&(xo[6]), T1l, ovs, &(xo[2]));
Chris@10 188 STN2(&(xo[4]), T1c, T1l, ovs);
Chris@10 189 T1m = VFNMSI(TG, TE);
Chris@10 190 STM2(&(xo[22]), T1m, ovs, &(xo[2]));
Chris@10 191 STN2(&(xo[20]), T1h, T1m, ovs);
Chris@10 192 }
Chris@10 193 }
Chris@10 194 }
Chris@10 195 }
Chris@10 196 }
Chris@10 197 }
Chris@10 198 VLEAVE();
Chris@10 199 }
Chris@10 200
Chris@10 201 static const kdft_desc desc = { 14, XSIMD_STRING("n2fv_14"), {32, 6, 42, 0}, &GENUS, 0, 2, 0, 0 };
Chris@10 202
Chris@10 203 void XSIMD(codelet_n2fv_14) (planner *p) {
Chris@10 204 X(kdft_register) (p, n2fv_14, &desc);
Chris@10 205 }
Chris@10 206
Chris@10 207 #else /* HAVE_FMA */
Chris@10 208
Chris@10 209 /* Generated by: ../../../genfft/gen_notw_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 14 -name n2fv_14 -with-ostride 2 -include n2f.h -store-multiple 2 */
Chris@10 210
Chris@10 211 /*
Chris@10 212 * This function contains 74 FP additions, 36 FP multiplications,
Chris@10 213 * (or, 50 additions, 12 multiplications, 24 fused multiply/add),
Chris@10 214 * 39 stack variables, 6 constants, and 35 memory accesses
Chris@10 215 */
Chris@10 216 #include "n2f.h"
Chris@10 217
Chris@10 218 static void n2fv_14(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@10 219 {
Chris@10 220 DVK(KP222520933, +0.222520933956314404288902564496794759466355569);
Chris@10 221 DVK(KP900968867, +0.900968867902419126236102319507445051165919162);
Chris@10 222 DVK(KP623489801, +0.623489801858733530525004884004239810632274731);
Chris@10 223 DVK(KP433883739, +0.433883739117558120475768332848358754609990728);
Chris@10 224 DVK(KP781831482, +0.781831482468029808708444526674057750232334519);
Chris@10 225 DVK(KP974927912, +0.974927912181823607018131682993931217232785801);
Chris@10 226 {
Chris@10 227 INT i;
Chris@10 228 const R *xi;
Chris@10 229 R *xo;
Chris@10 230 xi = ri;
Chris@10 231 xo = ro;
Chris@10 232 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(28, is), MAKE_VOLATILE_STRIDE(28, os)) {
Chris@10 233 V T3, Ty, To, TK, Tr, TE, Ta, TJ, Tq, TB, Th, TL, Ts, TH, T1;
Chris@10 234 V T2;
Chris@10 235 T1 = LD(&(xi[0]), ivs, &(xi[0]));
Chris@10 236 T2 = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)]));
Chris@10 237 T3 = VSUB(T1, T2);
Chris@10 238 Ty = VADD(T1, T2);
Chris@10 239 {
Chris@10 240 V Tk, TC, Tn, TD;
Chris@10 241 {
Chris@10 242 V Ti, Tj, Tl, Tm;
Chris@10 243 Ti = LD(&(xi[WS(is, 6)]), ivs, &(xi[0]));
Chris@10 244 Tj = LD(&(xi[WS(is, 13)]), ivs, &(xi[WS(is, 1)]));
Chris@10 245 Tk = VSUB(Ti, Tj);
Chris@10 246 TC = VADD(Ti, Tj);
Chris@10 247 Tl = LD(&(xi[WS(is, 8)]), ivs, &(xi[0]));
Chris@10 248 Tm = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@10 249 Tn = VSUB(Tl, Tm);
Chris@10 250 TD = VADD(Tl, Tm);
Chris@10 251 }
Chris@10 252 To = VADD(Tk, Tn);
Chris@10 253 TK = VSUB(TC, TD);
Chris@10 254 Tr = VSUB(Tn, Tk);
Chris@10 255 TE = VADD(TC, TD);
Chris@10 256 }
Chris@10 257 {
Chris@10 258 V T6, Tz, T9, TA;
Chris@10 259 {
Chris@10 260 V T4, T5, T7, T8;
Chris@10 261 T4 = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@10 262 T5 = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)]));
Chris@10 263 T6 = VSUB(T4, T5);
Chris@10 264 Tz = VADD(T4, T5);
Chris@10 265 T7 = LD(&(xi[WS(is, 12)]), ivs, &(xi[0]));
Chris@10 266 T8 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@10 267 T9 = VSUB(T7, T8);
Chris@10 268 TA = VADD(T7, T8);
Chris@10 269 }
Chris@10 270 Ta = VADD(T6, T9);
Chris@10 271 TJ = VSUB(TA, Tz);
Chris@10 272 Tq = VSUB(T9, T6);
Chris@10 273 TB = VADD(Tz, TA);
Chris@10 274 }
Chris@10 275 {
Chris@10 276 V Td, TF, Tg, TG;
Chris@10 277 {
Chris@10 278 V Tb, Tc, Te, Tf;
Chris@10 279 Tb = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@10 280 Tc = LD(&(xi[WS(is, 11)]), ivs, &(xi[WS(is, 1)]));
Chris@10 281 Td = VSUB(Tb, Tc);
Chris@10 282 TF = VADD(Tb, Tc);
Chris@10 283 Te = LD(&(xi[WS(is, 10)]), ivs, &(xi[0]));
Chris@10 284 Tf = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@10 285 Tg = VSUB(Te, Tf);
Chris@10 286 TG = VADD(Te, Tf);
Chris@10 287 }
Chris@10 288 Th = VADD(Td, Tg);
Chris@10 289 TL = VSUB(TF, TG);
Chris@10 290 Ts = VSUB(Tg, Td);
Chris@10 291 TH = VADD(TF, TG);
Chris@10 292 }
Chris@10 293 {
Chris@10 294 V TR, TS, TT, TU, TV, TW;
Chris@10 295 TR = VADD(T3, VADD(Ta, VADD(Th, To)));
Chris@10 296 STM2(&(xo[14]), TR, ovs, &(xo[2]));
Chris@10 297 TS = VADD(Ty, VADD(TB, VADD(TH, TE)));
Chris@10 298 STM2(&(xo[0]), TS, ovs, &(xo[0]));
Chris@10 299 {
Chris@10 300 V Tt, Tp, TP, TQ;
Chris@10 301 Tt = VBYI(VFNMS(LDK(KP781831482), Tr, VFNMS(LDK(KP433883739), Ts, VMUL(LDK(KP974927912), Tq))));
Chris@10 302 Tp = VFMA(LDK(KP623489801), To, VFNMS(LDK(KP900968867), Th, VFNMS(LDK(KP222520933), Ta, T3)));
Chris@10 303 TT = VSUB(Tp, Tt);
Chris@10 304 STM2(&(xo[10]), TT, ovs, &(xo[2]));
Chris@10 305 TU = VADD(Tp, Tt);
Chris@10 306 STM2(&(xo[18]), TU, ovs, &(xo[2]));
Chris@10 307 TP = VBYI(VFMA(LDK(KP974927912), TJ, VFMA(LDK(KP433883739), TL, VMUL(LDK(KP781831482), TK))));
Chris@10 308 TQ = VFMA(LDK(KP623489801), TE, VFNMS(LDK(KP900968867), TH, VFNMS(LDK(KP222520933), TB, Ty)));
Chris@10 309 TV = VADD(TP, TQ);
Chris@10 310 STM2(&(xo[4]), TV, ovs, &(xo[0]));
Chris@10 311 TW = VSUB(TQ, TP);
Chris@10 312 STM2(&(xo[24]), TW, ovs, &(xo[0]));
Chris@10 313 }
Chris@10 314 {
Chris@10 315 V Tv, Tu, TX, TY;
Chris@10 316 Tv = VBYI(VFMA(LDK(KP781831482), Tq, VFMA(LDK(KP974927912), Ts, VMUL(LDK(KP433883739), Tr))));
Chris@10 317 Tu = VFMA(LDK(KP623489801), Ta, VFNMS(LDK(KP900968867), To, VFNMS(LDK(KP222520933), Th, T3)));
Chris@10 318 TX = VSUB(Tu, Tv);
Chris@10 319 STM2(&(xo[26]), TX, ovs, &(xo[2]));
Chris@10 320 STN2(&(xo[24]), TW, TX, ovs);
Chris@10 321 TY = VADD(Tu, Tv);
Chris@10 322 STM2(&(xo[2]), TY, ovs, &(xo[2]));
Chris@10 323 STN2(&(xo[0]), TS, TY, ovs);
Chris@10 324 }
Chris@10 325 {
Chris@10 326 V TM, TI, TZ, T10;
Chris@10 327 TM = VBYI(VFNMS(LDK(KP433883739), TK, VFNMS(LDK(KP974927912), TL, VMUL(LDK(KP781831482), TJ))));
Chris@10 328 TI = VFMA(LDK(KP623489801), TB, VFNMS(LDK(KP900968867), TE, VFNMS(LDK(KP222520933), TH, Ty)));
Chris@10 329 TZ = VSUB(TI, TM);
Chris@10 330 STM2(&(xo[12]), TZ, ovs, &(xo[0]));
Chris@10 331 STN2(&(xo[12]), TZ, TR, ovs);
Chris@10 332 T10 = VADD(TM, TI);
Chris@10 333 STM2(&(xo[16]), T10, ovs, &(xo[0]));
Chris@10 334 STN2(&(xo[16]), T10, TU, ovs);
Chris@10 335 }
Chris@10 336 {
Chris@10 337 V T12, TO, TN, T11;
Chris@10 338 TO = VBYI(VFMA(LDK(KP433883739), TJ, VFNMS(LDK(KP974927912), TK, VMUL(LDK(KP781831482), TL))));
Chris@10 339 TN = VFMA(LDK(KP623489801), TH, VFNMS(LDK(KP222520933), TE, VFNMS(LDK(KP900968867), TB, Ty)));
Chris@10 340 T11 = VSUB(TN, TO);
Chris@10 341 STM2(&(xo[8]), T11, ovs, &(xo[0]));
Chris@10 342 STN2(&(xo[8]), T11, TT, ovs);
Chris@10 343 T12 = VADD(TO, TN);
Chris@10 344 STM2(&(xo[20]), T12, ovs, &(xo[0]));
Chris@10 345 {
Chris@10 346 V Tx, Tw, T13, T14;
Chris@10 347 Tx = VBYI(VFMA(LDK(KP433883739), Tq, VFNMS(LDK(KP781831482), Ts, VMUL(LDK(KP974927912), Tr))));
Chris@10 348 Tw = VFMA(LDK(KP623489801), Th, VFNMS(LDK(KP222520933), To, VFNMS(LDK(KP900968867), Ta, T3)));
Chris@10 349 T13 = VSUB(Tw, Tx);
Chris@10 350 STM2(&(xo[22]), T13, ovs, &(xo[2]));
Chris@10 351 STN2(&(xo[20]), T12, T13, ovs);
Chris@10 352 T14 = VADD(Tw, Tx);
Chris@10 353 STM2(&(xo[6]), T14, ovs, &(xo[2]));
Chris@10 354 STN2(&(xo[4]), TV, T14, ovs);
Chris@10 355 }
Chris@10 356 }
Chris@10 357 }
Chris@10 358 }
Chris@10 359 }
Chris@10 360 VLEAVE();
Chris@10 361 }
Chris@10 362
Chris@10 363 static const kdft_desc desc = { 14, XSIMD_STRING("n2fv_14"), {50, 12, 24, 0}, &GENUS, 0, 2, 0, 0 };
Chris@10 364
Chris@10 365 void XSIMD(codelet_n2fv_14) (planner *p) {
Chris@10 366 X(kdft_register) (p, n2fv_14, &desc);
Chris@10 367 }
Chris@10 368
Chris@10 369 #endif /* HAVE_FMA */