annotate src/fftw-3.3.8/dft/simd/common/n1fv_15.c @ 83:ae30d91d2ffe

Replace these with versions built using an older toolset (so as to avoid ABI compatibilities when linking on Ubuntu 14.04 for packaging purposes)
author Chris Cannam
date Fri, 07 Feb 2020 11:51:13 +0000
parents d0c2a83c1364
children
rev   line source
Chris@82 1 /*
Chris@82 2 * Copyright (c) 2003, 2007-14 Matteo Frigo
Chris@82 3 * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
Chris@82 4 *
Chris@82 5 * This program is free software; you can redistribute it and/or modify
Chris@82 6 * it under the terms of the GNU General Public License as published by
Chris@82 7 * the Free Software Foundation; either version 2 of the License, or
Chris@82 8 * (at your option) any later version.
Chris@82 9 *
Chris@82 10 * This program is distributed in the hope that it will be useful,
Chris@82 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@82 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@82 13 * GNU General Public License for more details.
Chris@82 14 *
Chris@82 15 * You should have received a copy of the GNU General Public License
Chris@82 16 * along with this program; if not, write to the Free Software
Chris@82 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@82 18 *
Chris@82 19 */
Chris@82 20
Chris@82 21 /* This file was automatically generated --- DO NOT EDIT */
Chris@82 22 /* Generated on Thu May 24 08:04:52 EDT 2018 */
Chris@82 23
Chris@82 24 #include "dft/codelet-dft.h"
Chris@82 25
Chris@82 26 #if defined(ARCH_PREFERS_FMA) || defined(ISA_EXTENSION_PREFERS_FMA)
Chris@82 27
Chris@82 28 /* Generated by: ../../../genfft/gen_notw_c.native -fma -simd -compact -variables 4 -pipeline-latency 8 -n 15 -name n1fv_15 -include dft/simd/n1f.h */
Chris@82 29
Chris@82 30 /*
Chris@82 31 * This function contains 78 FP additions, 49 FP multiplications,
Chris@82 32 * (or, 36 additions, 7 multiplications, 42 fused multiply/add),
Chris@82 33 * 53 stack variables, 8 constants, and 30 memory accesses
Chris@82 34 */
Chris@82 35 #include "dft/simd/n1f.h"
Chris@82 36
Chris@82 37 static void n1fv_15(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@82 38 {
Chris@82 39 DVK(KP910592997, +0.910592997310029334643087372129977886038870291);
Chris@82 40 DVK(KP823639103, +0.823639103546331925877420039278190003029660514);
Chris@82 41 DVK(KP559016994, +0.559016994374947424102293417182819058860154590);
Chris@82 42 DVK(KP618033988, +0.618033988749894848204586834365638117720309180);
Chris@82 43 DVK(KP951056516, +0.951056516295153572116439333379382143405698634);
Chris@82 44 DVK(KP250000000, +0.250000000000000000000000000000000000000000000);
Chris@82 45 DVK(KP866025403, +0.866025403784438646763723170752936183471402627);
Chris@82 46 DVK(KP500000000, +0.500000000000000000000000000000000000000000000);
Chris@82 47 {
Chris@82 48 INT i;
Chris@82 49 const R *xi;
Chris@82 50 R *xo;
Chris@82 51 xi = ri;
Chris@82 52 xo = ro;
Chris@82 53 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(30, is), MAKE_VOLATILE_STRIDE(30, os)) {
Chris@82 54 V T5, TX, TB, TO, TU, TV, TR, Ta, Tf, Tg, Tl, Tq, Tr, TE, TH;
Chris@82 55 V TI, T10, T12, T1f, T1g;
Chris@82 56 {
Chris@82 57 V T1, T2, T3, T4;
Chris@82 58 T1 = LD(&(xi[0]), ivs, &(xi[0]));
Chris@82 59 T2 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@82 60 T3 = LD(&(xi[WS(is, 10)]), ivs, &(xi[0]));
Chris@82 61 T4 = VADD(T2, T3);
Chris@82 62 T5 = VADD(T1, T4);
Chris@82 63 TX = VSUB(T3, T2);
Chris@82 64 TB = VFNMS(LDK(KP500000000), T4, T1);
Chris@82 65 }
Chris@82 66 {
Chris@82 67 V T6, T9, TC, TM, Tm, Tp, TG, TQ, Tb, Te, TD, TN, Th, Tk, TF;
Chris@82 68 V TP, TY, TZ;
Chris@82 69 {
Chris@82 70 V T7, T8, Tn, To;
Chris@82 71 T6 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@82 72 T7 = LD(&(xi[WS(is, 8)]), ivs, &(xi[0]));
Chris@82 73 T8 = LD(&(xi[WS(is, 13)]), ivs, &(xi[WS(is, 1)]));
Chris@82 74 T9 = VADD(T7, T8);
Chris@82 75 TC = VFNMS(LDK(KP500000000), T9, T6);
Chris@82 76 TM = VSUB(T8, T7);
Chris@82 77 Tm = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)]));
Chris@82 78 Tn = LD(&(xi[WS(is, 14)]), ivs, &(xi[0]));
Chris@82 79 To = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@82 80 Tp = VADD(Tn, To);
Chris@82 81 TG = VFNMS(LDK(KP500000000), Tp, Tm);
Chris@82 82 TQ = VSUB(To, Tn);
Chris@82 83 }
Chris@82 84 {
Chris@82 85 V Tc, Td, Ti, Tj;
Chris@82 86 Tb = LD(&(xi[WS(is, 12)]), ivs, &(xi[0]));
Chris@82 87 Tc = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@82 88 Td = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)]));
Chris@82 89 Te = VADD(Tc, Td);
Chris@82 90 TD = VFNMS(LDK(KP500000000), Te, Tb);
Chris@82 91 TN = VSUB(Td, Tc);
Chris@82 92 Th = LD(&(xi[WS(is, 6)]), ivs, &(xi[0]));
Chris@82 93 Ti = LD(&(xi[WS(is, 11)]), ivs, &(xi[WS(is, 1)]));
Chris@82 94 Tj = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@82 95 Tk = VADD(Ti, Tj);
Chris@82 96 TF = VFNMS(LDK(KP500000000), Tk, Th);
Chris@82 97 TP = VSUB(Tj, Ti);
Chris@82 98 }
Chris@82 99 TO = VSUB(TM, TN);
Chris@82 100 TU = VSUB(TC, TD);
Chris@82 101 TV = VSUB(TF, TG);
Chris@82 102 TR = VSUB(TP, TQ);
Chris@82 103 Ta = VADD(T6, T9);
Chris@82 104 Tf = VADD(Tb, Te);
Chris@82 105 Tg = VADD(Ta, Tf);
Chris@82 106 Tl = VADD(Th, Tk);
Chris@82 107 Tq = VADD(Tm, Tp);
Chris@82 108 Tr = VADD(Tl, Tq);
Chris@82 109 TE = VADD(TC, TD);
Chris@82 110 TH = VADD(TF, TG);
Chris@82 111 TI = VADD(TE, TH);
Chris@82 112 TY = VADD(TM, TN);
Chris@82 113 TZ = VADD(TP, TQ);
Chris@82 114 T10 = VADD(TY, TZ);
Chris@82 115 T12 = VSUB(TY, TZ);
Chris@82 116 }
Chris@82 117 T1f = VADD(TB, TI);
Chris@82 118 T1g = VMUL(LDK(KP866025403), VADD(TX, T10));
Chris@82 119 ST(&(xo[WS(os, 5)]), VFNMSI(T1g, T1f), ovs, &(xo[WS(os, 1)]));
Chris@82 120 ST(&(xo[WS(os, 10)]), VFMAI(T1g, T1f), ovs, &(xo[0]));
Chris@82 121 {
Chris@82 122 V Tu, Ts, Tt, Ty, TA, Tw, Tx, Tz, Tv;
Chris@82 123 Tu = VSUB(Tg, Tr);
Chris@82 124 Ts = VADD(Tg, Tr);
Chris@82 125 Tt = VFNMS(LDK(KP250000000), Ts, T5);
Chris@82 126 Tw = VSUB(Tl, Tq);
Chris@82 127 Tx = VSUB(Ta, Tf);
Chris@82 128 Ty = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), Tx, Tw));
Chris@82 129 TA = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), Tw, Tx));
Chris@82 130 ST(&(xo[0]), VADD(T5, Ts), ovs, &(xo[0]));
Chris@82 131 Tz = VFMA(LDK(KP559016994), Tu, Tt);
Chris@82 132 ST(&(xo[WS(os, 6)]), VFNMSI(TA, Tz), ovs, &(xo[0]));
Chris@82 133 ST(&(xo[WS(os, 9)]), VFMAI(TA, Tz), ovs, &(xo[WS(os, 1)]));
Chris@82 134 Tv = VFNMS(LDK(KP559016994), Tu, Tt);
Chris@82 135 ST(&(xo[WS(os, 3)]), VFNMSI(Ty, Tv), ovs, &(xo[WS(os, 1)]));
Chris@82 136 ST(&(xo[WS(os, 12)]), VFMAI(Ty, Tv), ovs, &(xo[0]));
Chris@82 137 }
Chris@82 138 {
Chris@82 139 V TS, TW, T1a, T18, T13, T1b, TL, T17, T11, TJ, TK;
Chris@82 140 TS = VFMA(LDK(KP618033988), TR, TO);
Chris@82 141 TW = VFMA(LDK(KP618033988), TV, TU);
Chris@82 142 T1a = VFNMS(LDK(KP618033988), TU, TV);
Chris@82 143 T18 = VFNMS(LDK(KP618033988), TO, TR);
Chris@82 144 T11 = VFNMS(LDK(KP250000000), T10, TX);
Chris@82 145 T13 = VFMA(LDK(KP559016994), T12, T11);
Chris@82 146 T1b = VFNMS(LDK(KP559016994), T12, T11);
Chris@82 147 TJ = VFNMS(LDK(KP250000000), TI, TB);
Chris@82 148 TK = VSUB(TE, TH);
Chris@82 149 TL = VFMA(LDK(KP559016994), TK, TJ);
Chris@82 150 T17 = VFNMS(LDK(KP559016994), TK, TJ);
Chris@82 151 {
Chris@82 152 V TT, T14, T1d, T1e;
Chris@82 153 TT = VFMA(LDK(KP823639103), TS, TL);
Chris@82 154 T14 = VMUL(LDK(KP951056516), VFNMS(LDK(KP910592997), T13, TW));
Chris@82 155 ST(&(xo[WS(os, 1)]), VFNMSI(T14, TT), ovs, &(xo[WS(os, 1)]));
Chris@82 156 ST(&(xo[WS(os, 14)]), VFMAI(T14, TT), ovs, &(xo[0]));
Chris@82 157 T1d = VFNMS(LDK(KP823639103), T18, T17);
Chris@82 158 T1e = VMUL(LDK(KP951056516), VFMA(LDK(KP910592997), T1b, T1a));
Chris@82 159 ST(&(xo[WS(os, 8)]), VFNMSI(T1e, T1d), ovs, &(xo[0]));
Chris@82 160 ST(&(xo[WS(os, 7)]), VFMAI(T1e, T1d), ovs, &(xo[WS(os, 1)]));
Chris@82 161 }
Chris@82 162 {
Chris@82 163 V T15, T16, T19, T1c;
Chris@82 164 T15 = VFNMS(LDK(KP823639103), TS, TL);
Chris@82 165 T16 = VMUL(LDK(KP951056516), VFMA(LDK(KP910592997), T13, TW));
Chris@82 166 ST(&(xo[WS(os, 11)]), VFNMSI(T16, T15), ovs, &(xo[WS(os, 1)]));
Chris@82 167 ST(&(xo[WS(os, 4)]), VFMAI(T16, T15), ovs, &(xo[0]));
Chris@82 168 T19 = VFMA(LDK(KP823639103), T18, T17);
Chris@82 169 T1c = VMUL(LDK(KP951056516), VFNMS(LDK(KP910592997), T1b, T1a));
Chris@82 170 ST(&(xo[WS(os, 13)]), VFNMSI(T1c, T19), ovs, &(xo[WS(os, 1)]));
Chris@82 171 ST(&(xo[WS(os, 2)]), VFMAI(T1c, T19), ovs, &(xo[0]));
Chris@82 172 }
Chris@82 173 }
Chris@82 174 }
Chris@82 175 }
Chris@82 176 VLEAVE();
Chris@82 177 }
Chris@82 178
Chris@82 179 static const kdft_desc desc = { 15, XSIMD_STRING("n1fv_15"), {36, 7, 42, 0}, &GENUS, 0, 0, 0, 0 };
Chris@82 180
Chris@82 181 void XSIMD(codelet_n1fv_15) (planner *p) {
Chris@82 182 X(kdft_register) (p, n1fv_15, &desc);
Chris@82 183 }
Chris@82 184
Chris@82 185 #else
Chris@82 186
Chris@82 187 /* Generated by: ../../../genfft/gen_notw_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 15 -name n1fv_15 -include dft/simd/n1f.h */
Chris@82 188
Chris@82 189 /*
Chris@82 190 * This function contains 78 FP additions, 25 FP multiplications,
Chris@82 191 * (or, 64 additions, 11 multiplications, 14 fused multiply/add),
Chris@82 192 * 55 stack variables, 10 constants, and 30 memory accesses
Chris@82 193 */
Chris@82 194 #include "dft/simd/n1f.h"
Chris@82 195
Chris@82 196 static void n1fv_15(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@82 197 {
Chris@82 198 DVK(KP216506350, +0.216506350946109661690930792688234045867850657);
Chris@82 199 DVK(KP509036960, +0.509036960455127183450980863393907648510733164);
Chris@82 200 DVK(KP823639103, +0.823639103546331925877420039278190003029660514);
Chris@82 201 DVK(KP587785252, +0.587785252292473129168705954639072768597652438);
Chris@82 202 DVK(KP951056516, +0.951056516295153572116439333379382143405698634);
Chris@82 203 DVK(KP250000000, +0.250000000000000000000000000000000000000000000);
Chris@82 204 DVK(KP559016994, +0.559016994374947424102293417182819058860154590);
Chris@82 205 DVK(KP866025403, +0.866025403784438646763723170752936183471402627);
Chris@82 206 DVK(KP484122918, +0.484122918275927110647408174972799951354115213);
Chris@82 207 DVK(KP500000000, +0.500000000000000000000000000000000000000000000);
Chris@82 208 {
Chris@82 209 INT i;
Chris@82 210 const R *xi;
Chris@82 211 R *xo;
Chris@82 212 xi = ri;
Chris@82 213 xo = ro;
Chris@82 214 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(30, is), MAKE_VOLATILE_STRIDE(30, os)) {
Chris@82 215 V T5, T10, TB, TO, TU, TV, TR, Ta, Tf, Tg, Tl, Tq, Tr, TE, TH;
Chris@82 216 V TI, TZ, T11, T1f, T1g;
Chris@82 217 {
Chris@82 218 V T1, T2, T3, T4;
Chris@82 219 T1 = LD(&(xi[0]), ivs, &(xi[0]));
Chris@82 220 T2 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@82 221 T3 = LD(&(xi[WS(is, 10)]), ivs, &(xi[0]));
Chris@82 222 T4 = VADD(T2, T3);
Chris@82 223 T5 = VADD(T1, T4);
Chris@82 224 T10 = VSUB(T3, T2);
Chris@82 225 TB = VFNMS(LDK(KP500000000), T4, T1);
Chris@82 226 }
Chris@82 227 {
Chris@82 228 V T6, T9, TC, TP, Tm, Tp, TG, TN, Tb, Te, TD, TQ, Th, Tk, TF;
Chris@82 229 V TM, TX, TY;
Chris@82 230 {
Chris@82 231 V T7, T8, Tn, To;
Chris@82 232 T6 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@82 233 T7 = LD(&(xi[WS(is, 8)]), ivs, &(xi[0]));
Chris@82 234 T8 = LD(&(xi[WS(is, 13)]), ivs, &(xi[WS(is, 1)]));
Chris@82 235 T9 = VADD(T7, T8);
Chris@82 236 TC = VFNMS(LDK(KP500000000), T9, T6);
Chris@82 237 TP = VSUB(T8, T7);
Chris@82 238 Tm = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)]));
Chris@82 239 Tn = LD(&(xi[WS(is, 14)]), ivs, &(xi[0]));
Chris@82 240 To = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@82 241 Tp = VADD(Tn, To);
Chris@82 242 TG = VFNMS(LDK(KP500000000), Tp, Tm);
Chris@82 243 TN = VSUB(To, Tn);
Chris@82 244 }
Chris@82 245 {
Chris@82 246 V Tc, Td, Ti, Tj;
Chris@82 247 Tb = LD(&(xi[WS(is, 12)]), ivs, &(xi[0]));
Chris@82 248 Tc = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@82 249 Td = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)]));
Chris@82 250 Te = VADD(Tc, Td);
Chris@82 251 TD = VFNMS(LDK(KP500000000), Te, Tb);
Chris@82 252 TQ = VSUB(Td, Tc);
Chris@82 253 Th = LD(&(xi[WS(is, 6)]), ivs, &(xi[0]));
Chris@82 254 Ti = LD(&(xi[WS(is, 11)]), ivs, &(xi[WS(is, 1)]));
Chris@82 255 Tj = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@82 256 Tk = VADD(Ti, Tj);
Chris@82 257 TF = VFNMS(LDK(KP500000000), Tk, Th);
Chris@82 258 TM = VSUB(Tj, Ti);
Chris@82 259 }
Chris@82 260 TO = VSUB(TM, TN);
Chris@82 261 TU = VSUB(TF, TG);
Chris@82 262 TV = VSUB(TC, TD);
Chris@82 263 TR = VSUB(TP, TQ);
Chris@82 264 Ta = VADD(T6, T9);
Chris@82 265 Tf = VADD(Tb, Te);
Chris@82 266 Tg = VADD(Ta, Tf);
Chris@82 267 Tl = VADD(Th, Tk);
Chris@82 268 Tq = VADD(Tm, Tp);
Chris@82 269 Tr = VADD(Tl, Tq);
Chris@82 270 TE = VADD(TC, TD);
Chris@82 271 TH = VADD(TF, TG);
Chris@82 272 TI = VADD(TE, TH);
Chris@82 273 TX = VADD(TP, TQ);
Chris@82 274 TY = VADD(TM, TN);
Chris@82 275 TZ = VMUL(LDK(KP484122918), VSUB(TX, TY));
Chris@82 276 T11 = VADD(TX, TY);
Chris@82 277 }
Chris@82 278 T1f = VADD(TB, TI);
Chris@82 279 T1g = VBYI(VMUL(LDK(KP866025403), VADD(T10, T11)));
Chris@82 280 ST(&(xo[WS(os, 5)]), VSUB(T1f, T1g), ovs, &(xo[WS(os, 1)]));
Chris@82 281 ST(&(xo[WS(os, 10)]), VADD(T1f, T1g), ovs, &(xo[0]));
Chris@82 282 {
Chris@82 283 V Tu, Ts, Tt, Ty, TA, Tw, Tx, Tz, Tv;
Chris@82 284 Tu = VMUL(LDK(KP559016994), VSUB(Tg, Tr));
Chris@82 285 Ts = VADD(Tg, Tr);
Chris@82 286 Tt = VFNMS(LDK(KP250000000), Ts, T5);
Chris@82 287 Tw = VSUB(Tl, Tq);
Chris@82 288 Tx = VSUB(Ta, Tf);
Chris@82 289 Ty = VBYI(VFNMS(LDK(KP587785252), Tx, VMUL(LDK(KP951056516), Tw)));
Chris@82 290 TA = VBYI(VFMA(LDK(KP951056516), Tx, VMUL(LDK(KP587785252), Tw)));
Chris@82 291 ST(&(xo[0]), VADD(T5, Ts), ovs, &(xo[0]));
Chris@82 292 Tz = VADD(Tu, Tt);
Chris@82 293 ST(&(xo[WS(os, 6)]), VSUB(Tz, TA), ovs, &(xo[0]));
Chris@82 294 ST(&(xo[WS(os, 9)]), VADD(TA, Tz), ovs, &(xo[WS(os, 1)]));
Chris@82 295 Tv = VSUB(Tt, Tu);
Chris@82 296 ST(&(xo[WS(os, 3)]), VSUB(Tv, Ty), ovs, &(xo[WS(os, 1)]));
Chris@82 297 ST(&(xo[WS(os, 12)]), VADD(Ty, Tv), ovs, &(xo[0]));
Chris@82 298 }
Chris@82 299 {
Chris@82 300 V TS, TW, T1b, T18, T13, T1a, TL, T17, T12, TJ, TK;
Chris@82 301 TS = VFNMS(LDK(KP509036960), TR, VMUL(LDK(KP823639103), TO));
Chris@82 302 TW = VFNMS(LDK(KP587785252), TV, VMUL(LDK(KP951056516), TU));
Chris@82 303 T1b = VFMA(LDK(KP951056516), TV, VMUL(LDK(KP587785252), TU));
Chris@82 304 T18 = VFMA(LDK(KP823639103), TR, VMUL(LDK(KP509036960), TO));
Chris@82 305 T12 = VFNMS(LDK(KP216506350), T11, VMUL(LDK(KP866025403), T10));
Chris@82 306 T13 = VSUB(TZ, T12);
Chris@82 307 T1a = VADD(TZ, T12);
Chris@82 308 TJ = VFNMS(LDK(KP250000000), TI, TB);
Chris@82 309 TK = VMUL(LDK(KP559016994), VSUB(TE, TH));
Chris@82 310 TL = VSUB(TJ, TK);
Chris@82 311 T17 = VADD(TK, TJ);
Chris@82 312 {
Chris@82 313 V TT, T14, T1d, T1e;
Chris@82 314 TT = VSUB(TL, TS);
Chris@82 315 T14 = VBYI(VSUB(TW, T13));
Chris@82 316 ST(&(xo[WS(os, 8)]), VSUB(TT, T14), ovs, &(xo[0]));
Chris@82 317 ST(&(xo[WS(os, 7)]), VADD(TT, T14), ovs, &(xo[WS(os, 1)]));
Chris@82 318 T1d = VSUB(T17, T18);
Chris@82 319 T1e = VBYI(VADD(T1b, T1a));
Chris@82 320 ST(&(xo[WS(os, 11)]), VSUB(T1d, T1e), ovs, &(xo[WS(os, 1)]));
Chris@82 321 ST(&(xo[WS(os, 4)]), VADD(T1d, T1e), ovs, &(xo[0]));
Chris@82 322 }
Chris@82 323 {
Chris@82 324 V T15, T16, T19, T1c;
Chris@82 325 T15 = VADD(TL, TS);
Chris@82 326 T16 = VBYI(VADD(TW, T13));
Chris@82 327 ST(&(xo[WS(os, 13)]), VSUB(T15, T16), ovs, &(xo[WS(os, 1)]));
Chris@82 328 ST(&(xo[WS(os, 2)]), VADD(T15, T16), ovs, &(xo[0]));
Chris@82 329 T19 = VADD(T17, T18);
Chris@82 330 T1c = VBYI(VSUB(T1a, T1b));
Chris@82 331 ST(&(xo[WS(os, 14)]), VSUB(T19, T1c), ovs, &(xo[0]));
Chris@82 332 ST(&(xo[WS(os, 1)]), VADD(T19, T1c), ovs, &(xo[WS(os, 1)]));
Chris@82 333 }
Chris@82 334 }
Chris@82 335 }
Chris@82 336 }
Chris@82 337 VLEAVE();
Chris@82 338 }
Chris@82 339
Chris@82 340 static const kdft_desc desc = { 15, XSIMD_STRING("n1fv_15"), {64, 11, 14, 0}, &GENUS, 0, 0, 0, 0 };
Chris@82 341
Chris@82 342 void XSIMD(codelet_n1fv_15) (planner *p) {
Chris@82 343 X(kdft_register) (p, n1fv_15, &desc);
Chris@82 344 }
Chris@82 345
Chris@82 346 #endif