annotate src/fftw-3.3.5/dft/simd/common/n1bv_15.c @ 83:ae30d91d2ffe

Replace these with versions built using an older toolset (so as to avoid ABI compatibilities when linking on Ubuntu 14.04 for packaging purposes)
author Chris Cannam
date Fri, 07 Feb 2020 11:51:13 +0000
parents 2cd0e3b3e1fd
children
rev   line source
Chris@42 1 /*
Chris@42 2 * Copyright (c) 2003, 2007-14 Matteo Frigo
Chris@42 3 * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
Chris@42 4 *
Chris@42 5 * This program is free software; you can redistribute it and/or modify
Chris@42 6 * it under the terms of the GNU General Public License as published by
Chris@42 7 * the Free Software Foundation; either version 2 of the License, or
Chris@42 8 * (at your option) any later version.
Chris@42 9 *
Chris@42 10 * This program is distributed in the hope that it will be useful,
Chris@42 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@42 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@42 13 * GNU General Public License for more details.
Chris@42 14 *
Chris@42 15 * You should have received a copy of the GNU General Public License
Chris@42 16 * along with this program; if not, write to the Free Software
Chris@42 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@42 18 *
Chris@42 19 */
Chris@42 20
Chris@42 21 /* This file was automatically generated --- DO NOT EDIT */
Chris@42 22 /* Generated on Sat Jul 30 16:39:19 EDT 2016 */
Chris@42 23
Chris@42 24 #include "codelet-dft.h"
Chris@42 25
Chris@42 26 #ifdef HAVE_FMA
Chris@42 27
Chris@42 28 /* Generated by: ../../../genfft/gen_notw_c.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -sign 1 -n 15 -name n1bv_15 -include n1b.h */
Chris@42 29
Chris@42 30 /*
Chris@42 31 * This function contains 78 FP additions, 49 FP multiplications,
Chris@42 32 * (or, 36 additions, 7 multiplications, 42 fused multiply/add),
Chris@42 33 * 78 stack variables, 8 constants, and 30 memory accesses
Chris@42 34 */
Chris@42 35 #include "n1b.h"
Chris@42 36
Chris@42 37 static void n1bv_15(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@42 38 {
Chris@42 39 DVK(KP823639103, +0.823639103546331925877420039278190003029660514);
Chris@42 40 DVK(KP910592997, +0.910592997310029334643087372129977886038870291);
Chris@42 41 DVK(KP559016994, +0.559016994374947424102293417182819058860154590);
Chris@42 42 DVK(KP951056516, +0.951056516295153572116439333379382143405698634);
Chris@42 43 DVK(KP866025403, +0.866025403784438646763723170752936183471402627);
Chris@42 44 DVK(KP250000000, +0.250000000000000000000000000000000000000000000);
Chris@42 45 DVK(KP618033988, +0.618033988749894848204586834365638117720309180);
Chris@42 46 DVK(KP500000000, +0.500000000000000000000000000000000000000000000);
Chris@42 47 {
Chris@42 48 INT i;
Chris@42 49 const R *xi;
Chris@42 50 R *xo;
Chris@42 51 xi = ii;
Chris@42 52 xo = io;
Chris@42 53 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(30, is), MAKE_VOLATILE_STRIDE(30, os)) {
Chris@42 54 V Tb, TH, Tw, TA, Th, T11, T5, Ti, T12, Ta, Tx, Te, Tq, T16, Tj;
Chris@42 55 V T1, T2, T3;
Chris@42 56 T1 = LD(&(xi[0]), ivs, &(xi[0]));
Chris@42 57 T2 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@42 58 T3 = LD(&(xi[WS(is, 10)]), ivs, &(xi[0]));
Chris@42 59 {
Chris@42 60 V T6, T7, T8, Tm, Tn, To;
Chris@42 61 T6 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@42 62 T7 = LD(&(xi[WS(is, 8)]), ivs, &(xi[0]));
Chris@42 63 T8 = LD(&(xi[WS(is, 13)]), ivs, &(xi[WS(is, 1)]));
Chris@42 64 Tm = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)]));
Chris@42 65 Tn = LD(&(xi[WS(is, 14)]), ivs, &(xi[0]));
Chris@42 66 To = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@42 67 {
Chris@42 68 V T4, Tc, T9, Td, Tp;
Chris@42 69 Tb = LD(&(xi[WS(is, 12)]), ivs, &(xi[0]));
Chris@42 70 T4 = VADD(T2, T3);
Chris@42 71 TH = VSUB(T2, T3);
Chris@42 72 Tc = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@42 73 Tw = VSUB(T7, T8);
Chris@42 74 T9 = VADD(T7, T8);
Chris@42 75 Td = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)]));
Chris@42 76 Tp = VADD(Tn, To);
Chris@42 77 TA = VSUB(Tn, To);
Chris@42 78 Th = LD(&(xi[WS(is, 6)]), ivs, &(xi[0]));
Chris@42 79 T11 = VADD(T1, T4);
Chris@42 80 T5 = VFNMS(LDK(KP500000000), T4, T1);
Chris@42 81 Ti = LD(&(xi[WS(is, 11)]), ivs, &(xi[WS(is, 1)]));
Chris@42 82 T12 = VADD(T6, T9);
Chris@42 83 Ta = VFNMS(LDK(KP500000000), T9, T6);
Chris@42 84 Tx = VSUB(Tc, Td);
Chris@42 85 Te = VADD(Tc, Td);
Chris@42 86 Tq = VFNMS(LDK(KP500000000), Tp, Tm);
Chris@42 87 T16 = VADD(Tm, Tp);
Chris@42 88 Tj = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@42 89 }
Chris@42 90 }
Chris@42 91 {
Chris@42 92 V TI, Ty, T13, Tf, Tz, Tk;
Chris@42 93 TI = VADD(Tw, Tx);
Chris@42 94 Ty = VSUB(Tw, Tx);
Chris@42 95 T13 = VADD(Tb, Te);
Chris@42 96 Tf = VFNMS(LDK(KP500000000), Te, Tb);
Chris@42 97 Tz = VSUB(Ti, Tj);
Chris@42 98 Tk = VADD(Ti, Tj);
Chris@42 99 {
Chris@42 100 V T1d, T14, Tg, TE, TJ, TB, T15, Tl;
Chris@42 101 T1d = VSUB(T12, T13);
Chris@42 102 T14 = VADD(T12, T13);
Chris@42 103 Tg = VADD(Ta, Tf);
Chris@42 104 TE = VSUB(Ta, Tf);
Chris@42 105 TJ = VADD(Tz, TA);
Chris@42 106 TB = VSUB(Tz, TA);
Chris@42 107 T15 = VADD(Th, Tk);
Chris@42 108 Tl = VFNMS(LDK(KP500000000), Tk, Th);
Chris@42 109 {
Chris@42 110 V TM, TK, TS, TC, T1c, T17, Tr, TF, TL, T10;
Chris@42 111 TM = VSUB(TI, TJ);
Chris@42 112 TK = VADD(TI, TJ);
Chris@42 113 TS = VFNMS(LDK(KP618033988), Ty, TB);
Chris@42 114 TC = VFMA(LDK(KP618033988), TB, Ty);
Chris@42 115 T1c = VSUB(T15, T16);
Chris@42 116 T17 = VADD(T15, T16);
Chris@42 117 Tr = VADD(Tl, Tq);
Chris@42 118 TF = VSUB(Tl, Tq);
Chris@42 119 TL = VFNMS(LDK(KP250000000), TK, TH);
Chris@42 120 T10 = VMUL(LDK(KP866025403), VADD(TH, TK));
Chris@42 121 {
Chris@42 122 V T1g, T1e, T1a, Tu, Ts, TU, TG, TV, TN, T19, T18, Tt, TZ;
Chris@42 123 T1g = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), T1c, T1d));
Chris@42 124 T1e = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), T1d, T1c));
Chris@42 125 T18 = VADD(T14, T17);
Chris@42 126 T1a = VSUB(T14, T17);
Chris@42 127 Tu = VSUB(Tg, Tr);
Chris@42 128 Ts = VADD(Tg, Tr);
Chris@42 129 TU = VFNMS(LDK(KP618033988), TE, TF);
Chris@42 130 TG = VFMA(LDK(KP618033988), TF, TE);
Chris@42 131 TV = VFNMS(LDK(KP559016994), TM, TL);
Chris@42 132 TN = VFMA(LDK(KP559016994), TM, TL);
Chris@42 133 ST(&(xo[0]), VADD(T11, T18), ovs, &(xo[0]));
Chris@42 134 T19 = VFNMS(LDK(KP250000000), T18, T11);
Chris@42 135 Tt = VFNMS(LDK(KP250000000), Ts, T5);
Chris@42 136 TZ = VADD(T5, Ts);
Chris@42 137 {
Chris@42 138 V TW, TY, TQ, TO, T1b, T1f, TR, Tv;
Chris@42 139 TW = VMUL(LDK(KP951056516), VFMA(LDK(KP910592997), TV, TU));
Chris@42 140 TY = VMUL(LDK(KP951056516), VFNMS(LDK(KP910592997), TV, TU));
Chris@42 141 TQ = VMUL(LDK(KP951056516), VFNMS(LDK(KP910592997), TN, TG));
Chris@42 142 TO = VMUL(LDK(KP951056516), VFMA(LDK(KP910592997), TN, TG));
Chris@42 143 T1b = VFNMS(LDK(KP559016994), T1a, T19);
Chris@42 144 T1f = VFMA(LDK(KP559016994), T1a, T19);
Chris@42 145 TR = VFNMS(LDK(KP559016994), Tu, Tt);
Chris@42 146 Tv = VFMA(LDK(KP559016994), Tu, Tt);
Chris@42 147 ST(&(xo[WS(os, 10)]), VFMAI(T10, TZ), ovs, &(xo[0]));
Chris@42 148 ST(&(xo[WS(os, 5)]), VFNMSI(T10, TZ), ovs, &(xo[WS(os, 1)]));
Chris@42 149 {
Chris@42 150 V TT, TX, TP, TD;
Chris@42 151 ST(&(xo[WS(os, 12)]), VFNMSI(T1e, T1b), ovs, &(xo[0]));
Chris@42 152 ST(&(xo[WS(os, 3)]), VFMAI(T1e, T1b), ovs, &(xo[WS(os, 1)]));
Chris@42 153 ST(&(xo[WS(os, 9)]), VFNMSI(T1g, T1f), ovs, &(xo[WS(os, 1)]));
Chris@42 154 ST(&(xo[WS(os, 6)]), VFMAI(T1g, T1f), ovs, &(xo[0]));
Chris@42 155 TT = VFNMS(LDK(KP823639103), TS, TR);
Chris@42 156 TX = VFMA(LDK(KP823639103), TS, TR);
Chris@42 157 TP = VFMA(LDK(KP823639103), TC, Tv);
Chris@42 158 TD = VFNMS(LDK(KP823639103), TC, Tv);
Chris@42 159 ST(&(xo[WS(os, 13)]), VFMAI(TW, TT), ovs, &(xo[WS(os, 1)]));
Chris@42 160 ST(&(xo[WS(os, 2)]), VFNMSI(TW, TT), ovs, &(xo[0]));
Chris@42 161 ST(&(xo[WS(os, 8)]), VFMAI(TY, TX), ovs, &(xo[0]));
Chris@42 162 ST(&(xo[WS(os, 7)]), VFNMSI(TY, TX), ovs, &(xo[WS(os, 1)]));
Chris@42 163 ST(&(xo[WS(os, 11)]), VFMAI(TQ, TP), ovs, &(xo[WS(os, 1)]));
Chris@42 164 ST(&(xo[WS(os, 4)]), VFNMSI(TQ, TP), ovs, &(xo[0]));
Chris@42 165 ST(&(xo[WS(os, 14)]), VFNMSI(TO, TD), ovs, &(xo[0]));
Chris@42 166 ST(&(xo[WS(os, 1)]), VFMAI(TO, TD), ovs, &(xo[WS(os, 1)]));
Chris@42 167 }
Chris@42 168 }
Chris@42 169 }
Chris@42 170 }
Chris@42 171 }
Chris@42 172 }
Chris@42 173 }
Chris@42 174 }
Chris@42 175 VLEAVE();
Chris@42 176 }
Chris@42 177
Chris@42 178 static const kdft_desc desc = { 15, XSIMD_STRING("n1bv_15"), {36, 7, 42, 0}, &GENUS, 0, 0, 0, 0 };
Chris@42 179
Chris@42 180 void XSIMD(codelet_n1bv_15) (planner *p) {
Chris@42 181 X(kdft_register) (p, n1bv_15, &desc);
Chris@42 182 }
Chris@42 183
Chris@42 184 #else /* HAVE_FMA */
Chris@42 185
Chris@42 186 /* Generated by: ../../../genfft/gen_notw_c.native -simd -compact -variables 4 -pipeline-latency 8 -sign 1 -n 15 -name n1bv_15 -include n1b.h */
Chris@42 187
Chris@42 188 /*
Chris@42 189 * This function contains 78 FP additions, 25 FP multiplications,
Chris@42 190 * (or, 64 additions, 11 multiplications, 14 fused multiply/add),
Chris@42 191 * 55 stack variables, 10 constants, and 30 memory accesses
Chris@42 192 */
Chris@42 193 #include "n1b.h"
Chris@42 194
Chris@42 195 static void n1bv_15(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@42 196 {
Chris@42 197 DVK(KP216506350, +0.216506350946109661690930792688234045867850657);
Chris@42 198 DVK(KP509036960, +0.509036960455127183450980863393907648510733164);
Chris@42 199 DVK(KP823639103, +0.823639103546331925877420039278190003029660514);
Chris@42 200 DVK(KP951056516, +0.951056516295153572116439333379382143405698634);
Chris@42 201 DVK(KP587785252, +0.587785252292473129168705954639072768597652438);
Chris@42 202 DVK(KP250000000, +0.250000000000000000000000000000000000000000000);
Chris@42 203 DVK(KP559016994, +0.559016994374947424102293417182819058860154590);
Chris@42 204 DVK(KP866025403, +0.866025403784438646763723170752936183471402627);
Chris@42 205 DVK(KP484122918, +0.484122918275927110647408174972799951354115213);
Chris@42 206 DVK(KP500000000, +0.500000000000000000000000000000000000000000000);
Chris@42 207 {
Chris@42 208 INT i;
Chris@42 209 const R *xi;
Chris@42 210 R *xo;
Chris@42 211 xi = ii;
Chris@42 212 xo = io;
Chris@42 213 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(30, is), MAKE_VOLATILE_STRIDE(30, os)) {
Chris@42 214 V Ti, T11, TH, Ts, TL, TM, Tz, TC, TD, TI, T12, T13, T14, T15, T16;
Chris@42 215 V T17, Tf, Tj, TZ, T10;
Chris@42 216 {
Chris@42 217 V TF, Tg, Th, TG;
Chris@42 218 TF = LD(&(xi[0]), ivs, &(xi[0]));
Chris@42 219 Tg = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@42 220 Th = LD(&(xi[WS(is, 10)]), ivs, &(xi[0]));
Chris@42 221 TG = VADD(Tg, Th);
Chris@42 222 Ti = VSUB(Tg, Th);
Chris@42 223 T11 = VADD(TF, TG);
Chris@42 224 TH = VFNMS(LDK(KP500000000), TG, TF);
Chris@42 225 }
Chris@42 226 {
Chris@42 227 V Tm, Tn, T3, To, Tw, Tx, Td, Ty, Tp, Tq, T6, Tr, Tt, Tu, Ta;
Chris@42 228 V Tv, T7, Te;
Chris@42 229 {
Chris@42 230 V T1, T2, Tb, Tc;
Chris@42 231 Tm = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@42 232 T1 = LD(&(xi[WS(is, 8)]), ivs, &(xi[0]));
Chris@42 233 T2 = LD(&(xi[WS(is, 13)]), ivs, &(xi[WS(is, 1)]));
Chris@42 234 Tn = VADD(T1, T2);
Chris@42 235 T3 = VSUB(T1, T2);
Chris@42 236 To = VFNMS(LDK(KP500000000), Tn, Tm);
Chris@42 237 Tw = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)]));
Chris@42 238 Tb = LD(&(xi[WS(is, 14)]), ivs, &(xi[0]));
Chris@42 239 Tc = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@42 240 Tx = VADD(Tb, Tc);
Chris@42 241 Td = VSUB(Tb, Tc);
Chris@42 242 Ty = VFNMS(LDK(KP500000000), Tx, Tw);
Chris@42 243 }
Chris@42 244 {
Chris@42 245 V T4, T5, T8, T9;
Chris@42 246 Tp = LD(&(xi[WS(is, 12)]), ivs, &(xi[0]));
Chris@42 247 T4 = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@42 248 T5 = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)]));
Chris@42 249 Tq = VADD(T4, T5);
Chris@42 250 T6 = VSUB(T4, T5);
Chris@42 251 Tr = VFNMS(LDK(KP500000000), Tq, Tp);
Chris@42 252 Tt = LD(&(xi[WS(is, 6)]), ivs, &(xi[0]));
Chris@42 253 T8 = LD(&(xi[WS(is, 11)]), ivs, &(xi[WS(is, 1)]));
Chris@42 254 T9 = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@42 255 Tu = VADD(T8, T9);
Chris@42 256 Ta = VSUB(T8, T9);
Chris@42 257 Tv = VFNMS(LDK(KP500000000), Tu, Tt);
Chris@42 258 }
Chris@42 259 Ts = VSUB(To, Tr);
Chris@42 260 TL = VSUB(T3, T6);
Chris@42 261 TM = VSUB(Ta, Td);
Chris@42 262 Tz = VSUB(Tv, Ty);
Chris@42 263 TC = VADD(To, Tr);
Chris@42 264 TD = VADD(Tv, Ty);
Chris@42 265 TI = VADD(TC, TD);
Chris@42 266 T12 = VADD(Tm, Tn);
Chris@42 267 T13 = VADD(Tp, Tq);
Chris@42 268 T14 = VADD(T12, T13);
Chris@42 269 T15 = VADD(Tt, Tu);
Chris@42 270 T16 = VADD(Tw, Tx);
Chris@42 271 T17 = VADD(T15, T16);
Chris@42 272 T7 = VADD(T3, T6);
Chris@42 273 Te = VADD(Ta, Td);
Chris@42 274 Tf = VMUL(LDK(KP484122918), VSUB(T7, Te));
Chris@42 275 Tj = VADD(T7, Te);
Chris@42 276 }
Chris@42 277 TZ = VADD(TH, TI);
Chris@42 278 T10 = VBYI(VMUL(LDK(KP866025403), VADD(Ti, Tj)));
Chris@42 279 ST(&(xo[WS(os, 5)]), VSUB(TZ, T10), ovs, &(xo[WS(os, 1)]));
Chris@42 280 ST(&(xo[WS(os, 10)]), VADD(T10, TZ), ovs, &(xo[0]));
Chris@42 281 {
Chris@42 282 V T1a, T18, T19, T1e, T1f, T1c, T1d, T1g, T1b;
Chris@42 283 T1a = VMUL(LDK(KP559016994), VSUB(T14, T17));
Chris@42 284 T18 = VADD(T14, T17);
Chris@42 285 T19 = VFNMS(LDK(KP250000000), T18, T11);
Chris@42 286 T1c = VSUB(T12, T13);
Chris@42 287 T1d = VSUB(T15, T16);
Chris@42 288 T1e = VBYI(VFNMS(LDK(KP951056516), T1d, VMUL(LDK(KP587785252), T1c)));
Chris@42 289 T1f = VBYI(VFMA(LDK(KP951056516), T1c, VMUL(LDK(KP587785252), T1d)));
Chris@42 290 ST(&(xo[0]), VADD(T11, T18), ovs, &(xo[0]));
Chris@42 291 T1g = VADD(T1a, T19);
Chris@42 292 ST(&(xo[WS(os, 6)]), VADD(T1f, T1g), ovs, &(xo[0]));
Chris@42 293 ST(&(xo[WS(os, 9)]), VSUB(T1g, T1f), ovs, &(xo[WS(os, 1)]));
Chris@42 294 T1b = VSUB(T19, T1a);
Chris@42 295 ST(&(xo[WS(os, 3)]), VSUB(T1b, T1e), ovs, &(xo[WS(os, 1)]));
Chris@42 296 ST(&(xo[WS(os, 12)]), VADD(T1e, T1b), ovs, &(xo[0]));
Chris@42 297 }
Chris@42 298 {
Chris@42 299 V TA, TN, TU, TS, Tl, TR, TK, TV, Tk, TE, TJ;
Chris@42 300 TA = VFMA(LDK(KP951056516), Ts, VMUL(LDK(KP587785252), Tz));
Chris@42 301 TN = VFMA(LDK(KP823639103), TL, VMUL(LDK(KP509036960), TM));
Chris@42 302 TU = VFNMS(LDK(KP823639103), TM, VMUL(LDK(KP509036960), TL));
Chris@42 303 TS = VFNMS(LDK(KP951056516), Tz, VMUL(LDK(KP587785252), Ts));
Chris@42 304 Tk = VFNMS(LDK(KP216506350), Tj, VMUL(LDK(KP866025403), Ti));
Chris@42 305 Tl = VADD(Tf, Tk);
Chris@42 306 TR = VSUB(Tf, Tk);
Chris@42 307 TE = VMUL(LDK(KP559016994), VSUB(TC, TD));
Chris@42 308 TJ = VFNMS(LDK(KP250000000), TI, TH);
Chris@42 309 TK = VADD(TE, TJ);
Chris@42 310 TV = VSUB(TJ, TE);
Chris@42 311 {
Chris@42 312 V TB, TO, TX, TY;
Chris@42 313 TB = VBYI(VADD(Tl, TA));
Chris@42 314 TO = VSUB(TK, TN);
Chris@42 315 ST(&(xo[WS(os, 1)]), VADD(TB, TO), ovs, &(xo[WS(os, 1)]));
Chris@42 316 ST(&(xo[WS(os, 14)]), VSUB(TO, TB), ovs, &(xo[0]));
Chris@42 317 TX = VBYI(VSUB(TS, TR));
Chris@42 318 TY = VSUB(TV, TU);
Chris@42 319 ST(&(xo[WS(os, 7)]), VADD(TX, TY), ovs, &(xo[WS(os, 1)]));
Chris@42 320 ST(&(xo[WS(os, 8)]), VSUB(TY, TX), ovs, &(xo[0]));
Chris@42 321 }
Chris@42 322 {
Chris@42 323 V TP, TQ, TT, TW;
Chris@42 324 TP = VBYI(VSUB(Tl, TA));
Chris@42 325 TQ = VADD(TN, TK);
Chris@42 326 ST(&(xo[WS(os, 4)]), VADD(TP, TQ), ovs, &(xo[0]));
Chris@42 327 ST(&(xo[WS(os, 11)]), VSUB(TQ, TP), ovs, &(xo[WS(os, 1)]));
Chris@42 328 TT = VBYI(VADD(TR, TS));
Chris@42 329 TW = VADD(TU, TV);
Chris@42 330 ST(&(xo[WS(os, 2)]), VADD(TT, TW), ovs, &(xo[0]));
Chris@42 331 ST(&(xo[WS(os, 13)]), VSUB(TW, TT), ovs, &(xo[WS(os, 1)]));
Chris@42 332 }
Chris@42 333 }
Chris@42 334 }
Chris@42 335 }
Chris@42 336 VLEAVE();
Chris@42 337 }
Chris@42 338
Chris@42 339 static const kdft_desc desc = { 15, XSIMD_STRING("n1bv_15"), {64, 11, 14, 0}, &GENUS, 0, 0, 0, 0 };
Chris@42 340
Chris@42 341 void XSIMD(codelet_n1bv_15) (planner *p) {
Chris@42 342 X(kdft_register) (p, n1bv_15, &desc);
Chris@42 343 }
Chris@42 344
Chris@42 345 #endif /* HAVE_FMA */