annotate src/fftw-3.3.5/dft/simd/common/n1fv_15.c @ 83:ae30d91d2ffe

Replace these with versions built using an older toolset (so as to avoid ABI compatibilities when linking on Ubuntu 14.04 for packaging purposes)
author Chris Cannam
date Fri, 07 Feb 2020 11:51:13 +0000
parents 2cd0e3b3e1fd
children
rev   line source
Chris@42 1 /*
Chris@42 2 * Copyright (c) 2003, 2007-14 Matteo Frigo
Chris@42 3 * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
Chris@42 4 *
Chris@42 5 * This program is free software; you can redistribute it and/or modify
Chris@42 6 * it under the terms of the GNU General Public License as published by
Chris@42 7 * the Free Software Foundation; either version 2 of the License, or
Chris@42 8 * (at your option) any later version.
Chris@42 9 *
Chris@42 10 * This program is distributed in the hope that it will be useful,
Chris@42 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@42 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@42 13 * GNU General Public License for more details.
Chris@42 14 *
Chris@42 15 * You should have received a copy of the GNU General Public License
Chris@42 16 * along with this program; if not, write to the Free Software
Chris@42 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@42 18 *
Chris@42 19 */
Chris@42 20
Chris@42 21 /* This file was automatically generated --- DO NOT EDIT */
Chris@42 22 /* Generated on Sat Jul 30 16:38:42 EDT 2016 */
Chris@42 23
Chris@42 24 #include "codelet-dft.h"
Chris@42 25
Chris@42 26 #ifdef HAVE_FMA
Chris@42 27
Chris@42 28 /* Generated by: ../../../genfft/gen_notw_c.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -n 15 -name n1fv_15 -include n1f.h */
Chris@42 29
Chris@42 30 /*
Chris@42 31 * This function contains 78 FP additions, 49 FP multiplications,
Chris@42 32 * (or, 36 additions, 7 multiplications, 42 fused multiply/add),
Chris@42 33 * 78 stack variables, 8 constants, and 30 memory accesses
Chris@42 34 */
Chris@42 35 #include "n1f.h"
Chris@42 36
Chris@42 37 static void n1fv_15(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@42 38 {
Chris@42 39 DVK(KP823639103, +0.823639103546331925877420039278190003029660514);
Chris@42 40 DVK(KP910592997, +0.910592997310029334643087372129977886038870291);
Chris@42 41 DVK(KP559016994, +0.559016994374947424102293417182819058860154590);
Chris@42 42 DVK(KP951056516, +0.951056516295153572116439333379382143405698634);
Chris@42 43 DVK(KP866025403, +0.866025403784438646763723170752936183471402627);
Chris@42 44 DVK(KP250000000, +0.250000000000000000000000000000000000000000000);
Chris@42 45 DVK(KP618033988, +0.618033988749894848204586834365638117720309180);
Chris@42 46 DVK(KP500000000, +0.500000000000000000000000000000000000000000000);
Chris@42 47 {
Chris@42 48 INT i;
Chris@42 49 const R *xi;
Chris@42 50 R *xo;
Chris@42 51 xi = ri;
Chris@42 52 xo = ro;
Chris@42 53 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(30, is), MAKE_VOLATILE_STRIDE(30, os)) {
Chris@42 54 V Tb, TX, TM, TQ, Th, TB, T5, Ti, Ta, TC, TN, Te, TG, Tq, Tj;
Chris@42 55 V T1, T2, T3;
Chris@42 56 T1 = LD(&(xi[0]), ivs, &(xi[0]));
Chris@42 57 T2 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@42 58 T3 = LD(&(xi[WS(is, 10)]), ivs, &(xi[0]));
Chris@42 59 {
Chris@42 60 V T6, T7, T8, Tm, Tn, To;
Chris@42 61 T6 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@42 62 T7 = LD(&(xi[WS(is, 8)]), ivs, &(xi[0]));
Chris@42 63 T8 = LD(&(xi[WS(is, 13)]), ivs, &(xi[WS(is, 1)]));
Chris@42 64 Tm = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)]));
Chris@42 65 Tn = LD(&(xi[WS(is, 14)]), ivs, &(xi[0]));
Chris@42 66 To = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@42 67 {
Chris@42 68 V T4, Tc, T9, Td, Tp;
Chris@42 69 Tb = LD(&(xi[WS(is, 12)]), ivs, &(xi[0]));
Chris@42 70 T4 = VADD(T2, T3);
Chris@42 71 TX = VSUB(T3, T2);
Chris@42 72 Tc = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@42 73 TM = VSUB(T8, T7);
Chris@42 74 T9 = VADD(T7, T8);
Chris@42 75 Td = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)]));
Chris@42 76 Tp = VADD(Tn, To);
Chris@42 77 TQ = VSUB(To, Tn);
Chris@42 78 Th = LD(&(xi[WS(is, 6)]), ivs, &(xi[0]));
Chris@42 79 TB = VFNMS(LDK(KP500000000), T4, T1);
Chris@42 80 T5 = VADD(T1, T4);
Chris@42 81 Ti = LD(&(xi[WS(is, 11)]), ivs, &(xi[WS(is, 1)]));
Chris@42 82 Ta = VADD(T6, T9);
Chris@42 83 TC = VFNMS(LDK(KP500000000), T9, T6);
Chris@42 84 TN = VSUB(Td, Tc);
Chris@42 85 Te = VADD(Tc, Td);
Chris@42 86 TG = VFNMS(LDK(KP500000000), Tp, Tm);
Chris@42 87 Tq = VADD(Tm, Tp);
Chris@42 88 Tj = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@42 89 }
Chris@42 90 }
Chris@42 91 {
Chris@42 92 V TY, TO, Tf, TD, TP, Tk;
Chris@42 93 TY = VADD(TM, TN);
Chris@42 94 TO = VSUB(TM, TN);
Chris@42 95 Tf = VADD(Tb, Te);
Chris@42 96 TD = VFNMS(LDK(KP500000000), Te, Tb);
Chris@42 97 TP = VSUB(Tj, Ti);
Chris@42 98 Tk = VADD(Ti, Tj);
Chris@42 99 {
Chris@42 100 V Tx, Tg, TE, TU, TZ, TR, Tl, TF;
Chris@42 101 Tx = VSUB(Ta, Tf);
Chris@42 102 Tg = VADD(Ta, Tf);
Chris@42 103 TE = VADD(TC, TD);
Chris@42 104 TU = VSUB(TC, TD);
Chris@42 105 TZ = VADD(TP, TQ);
Chris@42 106 TR = VSUB(TP, TQ);
Chris@42 107 Tl = VADD(Th, Tk);
Chris@42 108 TF = VFNMS(LDK(KP500000000), Tk, Th);
Chris@42 109 {
Chris@42 110 V T12, T10, T18, TS, Tw, Tr, TH, TV, T11, T1g;
Chris@42 111 T12 = VSUB(TY, TZ);
Chris@42 112 T10 = VADD(TY, TZ);
Chris@42 113 T18 = VFNMS(LDK(KP618033988), TO, TR);
Chris@42 114 TS = VFMA(LDK(KP618033988), TR, TO);
Chris@42 115 Tw = VSUB(Tl, Tq);
Chris@42 116 Tr = VADD(Tl, Tq);
Chris@42 117 TH = VADD(TF, TG);
Chris@42 118 TV = VSUB(TF, TG);
Chris@42 119 T11 = VFNMS(LDK(KP250000000), T10, TX);
Chris@42 120 T1g = VMUL(LDK(KP866025403), VADD(TX, T10));
Chris@42 121 {
Chris@42 122 V TA, Ty, Tu, TK, TI, T1a, TW, T1b, T13, Tt, Ts, TJ, T1f;
Chris@42 123 TA = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), Tw, Tx));
Chris@42 124 Ty = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), Tx, Tw));
Chris@42 125 Ts = VADD(Tg, Tr);
Chris@42 126 Tu = VSUB(Tg, Tr);
Chris@42 127 TK = VSUB(TE, TH);
Chris@42 128 TI = VADD(TE, TH);
Chris@42 129 T1a = VFNMS(LDK(KP618033988), TU, TV);
Chris@42 130 TW = VFMA(LDK(KP618033988), TV, TU);
Chris@42 131 T1b = VFNMS(LDK(KP559016994), T12, T11);
Chris@42 132 T13 = VFMA(LDK(KP559016994), T12, T11);
Chris@42 133 ST(&(xo[0]), VADD(T5, Ts), ovs, &(xo[0]));
Chris@42 134 Tt = VFNMS(LDK(KP250000000), Ts, T5);
Chris@42 135 TJ = VFNMS(LDK(KP250000000), TI, TB);
Chris@42 136 T1f = VADD(TB, TI);
Chris@42 137 {
Chris@42 138 V T1c, T1e, T16, T14, Tv, Tz, T17, TL;
Chris@42 139 T1c = VMUL(LDK(KP951056516), VFNMS(LDK(KP910592997), T1b, T1a));
Chris@42 140 T1e = VMUL(LDK(KP951056516), VFMA(LDK(KP910592997), T1b, T1a));
Chris@42 141 T16 = VMUL(LDK(KP951056516), VFMA(LDK(KP910592997), T13, TW));
Chris@42 142 T14 = VMUL(LDK(KP951056516), VFNMS(LDK(KP910592997), T13, TW));
Chris@42 143 Tv = VFNMS(LDK(KP559016994), Tu, Tt);
Chris@42 144 Tz = VFMA(LDK(KP559016994), Tu, Tt);
Chris@42 145 T17 = VFNMS(LDK(KP559016994), TK, TJ);
Chris@42 146 TL = VFMA(LDK(KP559016994), TK, TJ);
Chris@42 147 ST(&(xo[WS(os, 10)]), VFMAI(T1g, T1f), ovs, &(xo[0]));
Chris@42 148 ST(&(xo[WS(os, 5)]), VFNMSI(T1g, T1f), ovs, &(xo[WS(os, 1)]));
Chris@42 149 {
Chris@42 150 V T19, T1d, T15, TT;
Chris@42 151 ST(&(xo[WS(os, 12)]), VFMAI(Ty, Tv), ovs, &(xo[0]));
Chris@42 152 ST(&(xo[WS(os, 3)]), VFNMSI(Ty, Tv), ovs, &(xo[WS(os, 1)]));
Chris@42 153 ST(&(xo[WS(os, 9)]), VFMAI(TA, Tz), ovs, &(xo[WS(os, 1)]));
Chris@42 154 ST(&(xo[WS(os, 6)]), VFNMSI(TA, Tz), ovs, &(xo[0]));
Chris@42 155 T19 = VFMA(LDK(KP823639103), T18, T17);
Chris@42 156 T1d = VFNMS(LDK(KP823639103), T18, T17);
Chris@42 157 T15 = VFNMS(LDK(KP823639103), TS, TL);
Chris@42 158 TT = VFMA(LDK(KP823639103), TS, TL);
Chris@42 159 ST(&(xo[WS(os, 2)]), VFMAI(T1c, T19), ovs, &(xo[0]));
Chris@42 160 ST(&(xo[WS(os, 13)]), VFNMSI(T1c, T19), ovs, &(xo[WS(os, 1)]));
Chris@42 161 ST(&(xo[WS(os, 7)]), VFMAI(T1e, T1d), ovs, &(xo[WS(os, 1)]));
Chris@42 162 ST(&(xo[WS(os, 8)]), VFNMSI(T1e, T1d), ovs, &(xo[0]));
Chris@42 163 ST(&(xo[WS(os, 4)]), VFMAI(T16, T15), ovs, &(xo[0]));
Chris@42 164 ST(&(xo[WS(os, 11)]), VFNMSI(T16, T15), ovs, &(xo[WS(os, 1)]));
Chris@42 165 ST(&(xo[WS(os, 14)]), VFMAI(T14, TT), ovs, &(xo[0]));
Chris@42 166 ST(&(xo[WS(os, 1)]), VFNMSI(T14, TT), ovs, &(xo[WS(os, 1)]));
Chris@42 167 }
Chris@42 168 }
Chris@42 169 }
Chris@42 170 }
Chris@42 171 }
Chris@42 172 }
Chris@42 173 }
Chris@42 174 }
Chris@42 175 VLEAVE();
Chris@42 176 }
Chris@42 177
Chris@42 178 static const kdft_desc desc = { 15, XSIMD_STRING("n1fv_15"), {36, 7, 42, 0}, &GENUS, 0, 0, 0, 0 };
Chris@42 179
Chris@42 180 void XSIMD(codelet_n1fv_15) (planner *p) {
Chris@42 181 X(kdft_register) (p, n1fv_15, &desc);
Chris@42 182 }
Chris@42 183
Chris@42 184 #else /* HAVE_FMA */
Chris@42 185
Chris@42 186 /* Generated by: ../../../genfft/gen_notw_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 15 -name n1fv_15 -include n1f.h */
Chris@42 187
Chris@42 188 /*
Chris@42 189 * This function contains 78 FP additions, 25 FP multiplications,
Chris@42 190 * (or, 64 additions, 11 multiplications, 14 fused multiply/add),
Chris@42 191 * 55 stack variables, 10 constants, and 30 memory accesses
Chris@42 192 */
Chris@42 193 #include "n1f.h"
Chris@42 194
Chris@42 195 static void n1fv_15(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@42 196 {
Chris@42 197 DVK(KP216506350, +0.216506350946109661690930792688234045867850657);
Chris@42 198 DVK(KP509036960, +0.509036960455127183450980863393907648510733164);
Chris@42 199 DVK(KP823639103, +0.823639103546331925877420039278190003029660514);
Chris@42 200 DVK(KP587785252, +0.587785252292473129168705954639072768597652438);
Chris@42 201 DVK(KP951056516, +0.951056516295153572116439333379382143405698634);
Chris@42 202 DVK(KP250000000, +0.250000000000000000000000000000000000000000000);
Chris@42 203 DVK(KP559016994, +0.559016994374947424102293417182819058860154590);
Chris@42 204 DVK(KP866025403, +0.866025403784438646763723170752936183471402627);
Chris@42 205 DVK(KP484122918, +0.484122918275927110647408174972799951354115213);
Chris@42 206 DVK(KP500000000, +0.500000000000000000000000000000000000000000000);
Chris@42 207 {
Chris@42 208 INT i;
Chris@42 209 const R *xi;
Chris@42 210 R *xo;
Chris@42 211 xi = ri;
Chris@42 212 xo = ro;
Chris@42 213 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(30, is), MAKE_VOLATILE_STRIDE(30, os)) {
Chris@42 214 V T5, T10, TB, TO, TU, TV, TR, Ta, Tf, Tg, Tl, Tq, Tr, TE, TH;
Chris@42 215 V TI, TZ, T11, T1f, T1g;
Chris@42 216 {
Chris@42 217 V T1, T2, T3, T4;
Chris@42 218 T1 = LD(&(xi[0]), ivs, &(xi[0]));
Chris@42 219 T2 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@42 220 T3 = LD(&(xi[WS(is, 10)]), ivs, &(xi[0]));
Chris@42 221 T4 = VADD(T2, T3);
Chris@42 222 T5 = VADD(T1, T4);
Chris@42 223 T10 = VSUB(T3, T2);
Chris@42 224 TB = VFNMS(LDK(KP500000000), T4, T1);
Chris@42 225 }
Chris@42 226 {
Chris@42 227 V T6, T9, TC, TP, Tm, Tp, TG, TN, Tb, Te, TD, TQ, Th, Tk, TF;
Chris@42 228 V TM, TX, TY;
Chris@42 229 {
Chris@42 230 V T7, T8, Tn, To;
Chris@42 231 T6 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@42 232 T7 = LD(&(xi[WS(is, 8)]), ivs, &(xi[0]));
Chris@42 233 T8 = LD(&(xi[WS(is, 13)]), ivs, &(xi[WS(is, 1)]));
Chris@42 234 T9 = VADD(T7, T8);
Chris@42 235 TC = VFNMS(LDK(KP500000000), T9, T6);
Chris@42 236 TP = VSUB(T8, T7);
Chris@42 237 Tm = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)]));
Chris@42 238 Tn = LD(&(xi[WS(is, 14)]), ivs, &(xi[0]));
Chris@42 239 To = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@42 240 Tp = VADD(Tn, To);
Chris@42 241 TG = VFNMS(LDK(KP500000000), Tp, Tm);
Chris@42 242 TN = VSUB(To, Tn);
Chris@42 243 }
Chris@42 244 {
Chris@42 245 V Tc, Td, Ti, Tj;
Chris@42 246 Tb = LD(&(xi[WS(is, 12)]), ivs, &(xi[0]));
Chris@42 247 Tc = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@42 248 Td = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)]));
Chris@42 249 Te = VADD(Tc, Td);
Chris@42 250 TD = VFNMS(LDK(KP500000000), Te, Tb);
Chris@42 251 TQ = VSUB(Td, Tc);
Chris@42 252 Th = LD(&(xi[WS(is, 6)]), ivs, &(xi[0]));
Chris@42 253 Ti = LD(&(xi[WS(is, 11)]), ivs, &(xi[WS(is, 1)]));
Chris@42 254 Tj = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@42 255 Tk = VADD(Ti, Tj);
Chris@42 256 TF = VFNMS(LDK(KP500000000), Tk, Th);
Chris@42 257 TM = VSUB(Tj, Ti);
Chris@42 258 }
Chris@42 259 TO = VSUB(TM, TN);
Chris@42 260 TU = VSUB(TF, TG);
Chris@42 261 TV = VSUB(TC, TD);
Chris@42 262 TR = VSUB(TP, TQ);
Chris@42 263 Ta = VADD(T6, T9);
Chris@42 264 Tf = VADD(Tb, Te);
Chris@42 265 Tg = VADD(Ta, Tf);
Chris@42 266 Tl = VADD(Th, Tk);
Chris@42 267 Tq = VADD(Tm, Tp);
Chris@42 268 Tr = VADD(Tl, Tq);
Chris@42 269 TE = VADD(TC, TD);
Chris@42 270 TH = VADD(TF, TG);
Chris@42 271 TI = VADD(TE, TH);
Chris@42 272 TX = VADD(TP, TQ);
Chris@42 273 TY = VADD(TM, TN);
Chris@42 274 TZ = VMUL(LDK(KP484122918), VSUB(TX, TY));
Chris@42 275 T11 = VADD(TX, TY);
Chris@42 276 }
Chris@42 277 T1f = VADD(TB, TI);
Chris@42 278 T1g = VBYI(VMUL(LDK(KP866025403), VADD(T10, T11)));
Chris@42 279 ST(&(xo[WS(os, 5)]), VSUB(T1f, T1g), ovs, &(xo[WS(os, 1)]));
Chris@42 280 ST(&(xo[WS(os, 10)]), VADD(T1f, T1g), ovs, &(xo[0]));
Chris@42 281 {
Chris@42 282 V Tu, Ts, Tt, Ty, TA, Tw, Tx, Tz, Tv;
Chris@42 283 Tu = VMUL(LDK(KP559016994), VSUB(Tg, Tr));
Chris@42 284 Ts = VADD(Tg, Tr);
Chris@42 285 Tt = VFNMS(LDK(KP250000000), Ts, T5);
Chris@42 286 Tw = VSUB(Tl, Tq);
Chris@42 287 Tx = VSUB(Ta, Tf);
Chris@42 288 Ty = VBYI(VFNMS(LDK(KP587785252), Tx, VMUL(LDK(KP951056516), Tw)));
Chris@42 289 TA = VBYI(VFMA(LDK(KP951056516), Tx, VMUL(LDK(KP587785252), Tw)));
Chris@42 290 ST(&(xo[0]), VADD(T5, Ts), ovs, &(xo[0]));
Chris@42 291 Tz = VADD(Tu, Tt);
Chris@42 292 ST(&(xo[WS(os, 6)]), VSUB(Tz, TA), ovs, &(xo[0]));
Chris@42 293 ST(&(xo[WS(os, 9)]), VADD(TA, Tz), ovs, &(xo[WS(os, 1)]));
Chris@42 294 Tv = VSUB(Tt, Tu);
Chris@42 295 ST(&(xo[WS(os, 3)]), VSUB(Tv, Ty), ovs, &(xo[WS(os, 1)]));
Chris@42 296 ST(&(xo[WS(os, 12)]), VADD(Ty, Tv), ovs, &(xo[0]));
Chris@42 297 }
Chris@42 298 {
Chris@42 299 V TS, TW, T1b, T18, T13, T1a, TL, T17, T12, TJ, TK;
Chris@42 300 TS = VFNMS(LDK(KP509036960), TR, VMUL(LDK(KP823639103), TO));
Chris@42 301 TW = VFNMS(LDK(KP587785252), TV, VMUL(LDK(KP951056516), TU));
Chris@42 302 T1b = VFMA(LDK(KP951056516), TV, VMUL(LDK(KP587785252), TU));
Chris@42 303 T18 = VFMA(LDK(KP823639103), TR, VMUL(LDK(KP509036960), TO));
Chris@42 304 T12 = VFNMS(LDK(KP216506350), T11, VMUL(LDK(KP866025403), T10));
Chris@42 305 T13 = VSUB(TZ, T12);
Chris@42 306 T1a = VADD(TZ, T12);
Chris@42 307 TJ = VFNMS(LDK(KP250000000), TI, TB);
Chris@42 308 TK = VMUL(LDK(KP559016994), VSUB(TE, TH));
Chris@42 309 TL = VSUB(TJ, TK);
Chris@42 310 T17 = VADD(TK, TJ);
Chris@42 311 {
Chris@42 312 V TT, T14, T1d, T1e;
Chris@42 313 TT = VSUB(TL, TS);
Chris@42 314 T14 = VBYI(VSUB(TW, T13));
Chris@42 315 ST(&(xo[WS(os, 8)]), VSUB(TT, T14), ovs, &(xo[0]));
Chris@42 316 ST(&(xo[WS(os, 7)]), VADD(TT, T14), ovs, &(xo[WS(os, 1)]));
Chris@42 317 T1d = VSUB(T17, T18);
Chris@42 318 T1e = VBYI(VADD(T1b, T1a));
Chris@42 319 ST(&(xo[WS(os, 11)]), VSUB(T1d, T1e), ovs, &(xo[WS(os, 1)]));
Chris@42 320 ST(&(xo[WS(os, 4)]), VADD(T1d, T1e), ovs, &(xo[0]));
Chris@42 321 }
Chris@42 322 {
Chris@42 323 V T15, T16, T19, T1c;
Chris@42 324 T15 = VADD(TL, TS);
Chris@42 325 T16 = VBYI(VADD(TW, T13));
Chris@42 326 ST(&(xo[WS(os, 13)]), VSUB(T15, T16), ovs, &(xo[WS(os, 1)]));
Chris@42 327 ST(&(xo[WS(os, 2)]), VADD(T15, T16), ovs, &(xo[0]));
Chris@42 328 T19 = VADD(T17, T18);
Chris@42 329 T1c = VBYI(VSUB(T1a, T1b));
Chris@42 330 ST(&(xo[WS(os, 14)]), VSUB(T19, T1c), ovs, &(xo[0]));
Chris@42 331 ST(&(xo[WS(os, 1)]), VADD(T19, T1c), ovs, &(xo[WS(os, 1)]));
Chris@42 332 }
Chris@42 333 }
Chris@42 334 }
Chris@42 335 }
Chris@42 336 VLEAVE();
Chris@42 337 }
Chris@42 338
Chris@42 339 static const kdft_desc desc = { 15, XSIMD_STRING("n1fv_15"), {64, 11, 14, 0}, &GENUS, 0, 0, 0, 0 };
Chris@42 340
Chris@42 341 void XSIMD(codelet_n1fv_15) (planner *p) {
Chris@42 342 X(kdft_register) (p, n1fv_15, &desc);
Chris@42 343 }
Chris@42 344
Chris@42 345 #endif /* HAVE_FMA */