Chris@82: /* Chris@82: * Copyright (c) 2003, 2007-14 Matteo Frigo Chris@82: * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology Chris@82: * Chris@82: * This program is free software; you can redistribute it and/or modify Chris@82: * it under the terms of the GNU General Public License as published by Chris@82: * the Free Software Foundation; either version 2 of the License, or Chris@82: * (at your option) any later version. Chris@82: * Chris@82: * This program is distributed in the hope that it will be useful, Chris@82: * but WITHOUT ANY WARRANTY; without even the implied warranty of Chris@82: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Chris@82: * GNU General Public License for more details. Chris@82: * Chris@82: * You should have received a copy of the GNU General Public License Chris@82: * along with this program; if not, write to the Free Software Chris@82: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Chris@82: * Chris@82: */ Chris@82: Chris@82: /* This file was automatically generated --- DO NOT EDIT */ Chris@82: /* Generated on Thu May 24 08:04:51 EDT 2018 */ Chris@82: Chris@82: #include "dft/codelet-dft.h" Chris@82: Chris@82: #if defined(ARCH_PREFERS_FMA) || defined(ISA_EXTENSION_PREFERS_FMA) Chris@82: Chris@82: /* Generated by: ../../../genfft/gen_notw_c.native -fma -simd -compact -variables 4 -pipeline-latency 8 -n 13 -name n1fv_13 -include dft/simd/n1f.h */ Chris@82: Chris@82: /* Chris@82: * This function contains 88 FP additions, 63 FP multiplications, Chris@82: * (or, 31 additions, 6 multiplications, 57 fused multiply/add), Chris@82: * 63 stack variables, 23 constants, and 26 memory accesses Chris@82: */ Chris@82: #include "dft/simd/n1f.h" Chris@82: Chris@82: static void n1fv_13(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs) Chris@82: { Chris@82: DVK(KP904176221, +0.904176221990848204433795481776887926501523162); Chris@82: DVK(KP575140729, +0.575140729474003121368385547455453388461001608); Chris@82: DVK(KP957805992, +0.957805992594665126462521754605754580515587217); Chris@82: DVK(KP600477271, +0.600477271932665282925769253334763009352012849); Chris@82: DVK(KP516520780, +0.516520780623489722840901288569017135705033622); Chris@82: DVK(KP581704778, +0.581704778510515730456870384989698884939833902); Chris@82: DVK(KP300462606, +0.300462606288665774426601772289207995520941381); Chris@82: DVK(KP503537032, +0.503537032863766627246873853868466977093348562); Chris@82: DVK(KP251768516, +0.251768516431883313623436926934233488546674281); Chris@82: DVK(KP301479260, +0.301479260047709873958013540496673347309208464); Chris@82: DVK(KP083333333, +0.083333333333333333333333333333333333333333333); Chris@82: DVK(KP859542535, +0.859542535098774820163672132761689612766401925); Chris@82: DVK(KP514918778, +0.514918778086315755491789696138117261566051239); Chris@82: DVK(KP522026385, +0.522026385161275033714027226654165028300441940); Chris@82: DVK(KP853480001, +0.853480001859823990758994934970528322872359049); Chris@82: DVK(KP612264650, +0.612264650376756543746494474777125408779395514); Chris@82: DVK(KP038632954, +0.038632954644348171955506895830342264440241080); Chris@82: DVK(KP302775637, +0.302775637731994646559610633735247973125648287); Chris@82: DVK(KP769338817, +0.769338817572980603471413688209101117038278899); Chris@82: DVK(KP686558370, +0.686558370781754340655719594850823015421401653); Chris@82: DVK(KP226109445, +0.226109445035782405468510155372505010481906348); Chris@82: DVK(KP866025403, +0.866025403784438646763723170752936183471402627); Chris@82: DVK(KP500000000, +0.500000000000000000000000000000000000000000000); Chris@82: { Chris@82: INT i; Chris@82: const R *xi; Chris@82: R *xo; Chris@82: xi = ri; Chris@82: xo = ro; Chris@82: for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(26, is), MAKE_VOLATILE_STRIDE(26, os)) { Chris@82: V T1, TX, TY, To, TH, TR, TU, TB, TE, Tw, TF, TM, TT; Chris@82: T1 = LD(&(xi[0]), ivs, &(xi[0])); Chris@82: { Chris@82: V Tf, TN, Tb, Ty, Tq, T6, Tx, Tr, Ti, Tt, Tl, Tu, Tm, TO, Td; Chris@82: V Te, Tc, Tn; Chris@82: Td = LD(&(xi[WS(is, 8)]), ivs, &(xi[0])); Chris@82: Te = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)])); Chris@82: Tf = VADD(Td, Te); Chris@82: TN = VSUB(Td, Te); Chris@82: { Chris@82: V T7, T8, T9, Ta; Chris@82: T7 = LD(&(xi[WS(is, 12)]), ivs, &(xi[0])); Chris@82: T8 = LD(&(xi[WS(is, 10)]), ivs, &(xi[0])); Chris@82: T9 = LD(&(xi[WS(is, 4)]), ivs, &(xi[0])); Chris@82: Ta = VADD(T8, T9); Chris@82: Tb = VADD(T7, Ta); Chris@82: Ty = VFMS(LDK(KP500000000), Ta, T7); Chris@82: Tq = VSUB(T8, T9); Chris@82: } Chris@82: { Chris@82: V T2, T3, T4, T5; Chris@82: T2 = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)])); Chris@82: T3 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)])); Chris@82: T4 = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)])); Chris@82: T5 = VADD(T3, T4); Chris@82: T6 = VADD(T2, T5); Chris@82: Tx = VFNMS(LDK(KP500000000), T5, T2); Chris@82: Tr = VSUB(T4, T3); Chris@82: } Chris@82: { Chris@82: V Tg, Th, Tj, Tk; Chris@82: Tg = LD(&(xi[WS(is, 11)]), ivs, &(xi[WS(is, 1)])); Chris@82: Th = LD(&(xi[WS(is, 6)]), ivs, &(xi[0])); Chris@82: Ti = VADD(Tg, Th); Chris@82: Tt = VSUB(Tg, Th); Chris@82: Tj = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)])); Chris@82: Tk = LD(&(xi[WS(is, 2)]), ivs, &(xi[0])); Chris@82: Tl = VADD(Tj, Tk); Chris@82: Tu = VSUB(Tj, Tk); Chris@82: } Chris@82: Tm = VADD(Ti, Tl); Chris@82: TO = VADD(Tt, Tu); Chris@82: TX = VSUB(T6, Tb); Chris@82: TY = VADD(TN, TO); Chris@82: Tc = VADD(T6, Tb); Chris@82: Tn = VADD(Tf, Tm); Chris@82: To = VADD(Tc, Tn); Chris@82: TH = VSUB(Tc, Tn); Chris@82: { Chris@82: V TP, TQ, Tz, TA; Chris@82: TP = VFNMS(LDK(KP500000000), TO, TN); Chris@82: TQ = VADD(Tr, Tq); Chris@82: TR = VFMA(LDK(KP866025403), TQ, TP); Chris@82: TU = VFNMS(LDK(KP866025403), TQ, TP); Chris@82: Tz = VSUB(Tx, Ty); Chris@82: TA = VFNMS(LDK(KP500000000), Tm, Tf); Chris@82: TB = VADD(Tz, TA); Chris@82: TE = VSUB(Tz, TA); Chris@82: } Chris@82: { Chris@82: V Ts, Tv, TK, TL; Chris@82: Ts = VSUB(Tq, Tr); Chris@82: Tv = VSUB(Tt, Tu); Chris@82: Tw = VADD(Ts, Tv); Chris@82: TF = VSUB(Ts, Tv); Chris@82: TK = VADD(Tx, Ty); Chris@82: TL = VSUB(Ti, Tl); Chris@82: TM = VFMA(LDK(KP866025403), TL, TK); Chris@82: TT = VFNMS(LDK(KP866025403), TL, TK); Chris@82: } Chris@82: } Chris@82: ST(&(xo[0]), VADD(T1, To), ovs, &(xo[0])); Chris@82: { Chris@82: V T1c, T1k, T15, T14, T1e, T1n, TZ, TW, T1f, T1m, TD, T1j, TI, T19, TS; Chris@82: V TV; Chris@82: { Chris@82: V T1a, T1b, T12, T13; Chris@82: T1a = VFNMS(LDK(KP226109445), Tw, TB); Chris@82: T1b = VFMA(LDK(KP686558370), TE, TF); Chris@82: T1c = VFNMS(LDK(KP769338817), T1b, T1a); Chris@82: T1k = VFMA(LDK(KP769338817), T1b, T1a); Chris@82: T15 = VFNMS(LDK(KP302775637), TX, TY); Chris@82: T12 = VFMA(LDK(KP038632954), TM, TR); Chris@82: T13 = VFMA(LDK(KP612264650), TT, TU); Chris@82: T14 = VFNMS(LDK(KP853480001), T13, T12); Chris@82: T1e = VFNMS(LDK(KP522026385), T14, T15); Chris@82: T1n = VFMA(LDK(KP853480001), T13, T12); Chris@82: } Chris@82: TZ = VFMA(LDK(KP302775637), TY, TX); Chris@82: TS = VFNMS(LDK(KP038632954), TR, TM); Chris@82: TV = VFNMS(LDK(KP612264650), TU, TT); Chris@82: TW = VFNMS(LDK(KP853480001), TV, TS); Chris@82: T1f = VFMA(LDK(KP853480001), TV, TS); Chris@82: T1m = VFNMS(LDK(KP522026385), TW, TZ); Chris@82: { Chris@82: V TG, T18, Tp, TC, T17; Chris@82: TG = VFNMS(LDK(KP514918778), TF, TE); Chris@82: T18 = VFNMS(LDK(KP859542535), TG, TH); Chris@82: Tp = VFNMS(LDK(KP083333333), To, T1); Chris@82: TC = VFMA(LDK(KP301479260), TB, Tw); Chris@82: T17 = VFNMS(LDK(KP251768516), TC, Tp); Chris@82: TD = VFMA(LDK(KP503537032), TC, Tp); Chris@82: T1j = VFNMS(LDK(KP300462606), T18, T17); Chris@82: TI = VFMA(LDK(KP581704778), TH, TG); Chris@82: T19 = VFMA(LDK(KP300462606), T18, T17); Chris@82: } Chris@82: { Chris@82: V TJ, T10, T1l, T1o; Chris@82: TJ = VFNMS(LDK(KP516520780), TI, TD); Chris@82: T10 = VMUL(LDK(KP600477271), VFMA(LDK(KP957805992), TZ, TW)); Chris@82: ST(&(xo[WS(os, 5)]), VFNMSI(T10, TJ), ovs, &(xo[WS(os, 1)])); Chris@82: ST(&(xo[WS(os, 8)]), VFMAI(T10, TJ), ovs, &(xo[0])); Chris@82: { Chris@82: V T11, T16, T1p, T1q; Chris@82: T11 = VFMA(LDK(KP516520780), TI, TD); Chris@82: T16 = VMUL(LDK(KP600477271), VFMA(LDK(KP957805992), T15, T14)); Chris@82: ST(&(xo[WS(os, 1)]), VFMAI(T16, T11), ovs, &(xo[WS(os, 1)])); Chris@82: ST(&(xo[WS(os, 12)]), VFNMSI(T16, T11), ovs, &(xo[0])); Chris@82: T1p = VFMA(LDK(KP503537032), T1k, T1j); Chris@82: T1q = VMUL(LDK(KP575140729), VFMA(LDK(KP904176221), T1n, T1m)); Chris@82: ST(&(xo[WS(os, 2)]), VFNMSI(T1q, T1p), ovs, &(xo[0])); Chris@82: ST(&(xo[WS(os, 11)]), VFMAI(T1q, T1p), ovs, &(xo[WS(os, 1)])); Chris@82: } Chris@82: T1l = VFNMS(LDK(KP503537032), T1k, T1j); Chris@82: T1o = VMUL(LDK(KP575140729), VFNMS(LDK(KP904176221), T1n, T1m)); Chris@82: ST(&(xo[WS(os, 6)]), VFNMSI(T1o, T1l), ovs, &(xo[0])); Chris@82: ST(&(xo[WS(os, 7)]), VFMAI(T1o, T1l), ovs, &(xo[WS(os, 1)])); Chris@82: { Chris@82: V T1h, T1i, T1d, T1g; Chris@82: T1h = VFMA(LDK(KP503537032), T1c, T19); Chris@82: T1i = VMUL(LDK(KP575140729), VFNMS(LDK(KP904176221), T1f, T1e)); Chris@82: ST(&(xo[WS(os, 3)]), VFMAI(T1i, T1h), ovs, &(xo[WS(os, 1)])); Chris@82: ST(&(xo[WS(os, 10)]), VFNMSI(T1i, T1h), ovs, &(xo[0])); Chris@82: T1d = VFNMS(LDK(KP503537032), T1c, T19); Chris@82: T1g = VMUL(LDK(KP575140729), VFMA(LDK(KP904176221), T1f, T1e)); Chris@82: ST(&(xo[WS(os, 4)]), VFNMSI(T1g, T1d), ovs, &(xo[0])); Chris@82: ST(&(xo[WS(os, 9)]), VFMAI(T1g, T1d), ovs, &(xo[WS(os, 1)])); Chris@82: } Chris@82: } Chris@82: } Chris@82: } Chris@82: } Chris@82: VLEAVE(); Chris@82: } Chris@82: Chris@82: static const kdft_desc desc = { 13, XSIMD_STRING("n1fv_13"), {31, 6, 57, 0}, &GENUS, 0, 0, 0, 0 }; Chris@82: Chris@82: void XSIMD(codelet_n1fv_13) (planner *p) { Chris@82: X(kdft_register) (p, n1fv_13, &desc); Chris@82: } Chris@82: Chris@82: #else Chris@82: Chris@82: /* Generated by: ../../../genfft/gen_notw_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 13 -name n1fv_13 -include dft/simd/n1f.h */ Chris@82: Chris@82: /* Chris@82: * This function contains 88 FP additions, 34 FP multiplications, Chris@82: * (or, 69 additions, 15 multiplications, 19 fused multiply/add), Chris@82: * 60 stack variables, 20 constants, and 26 memory accesses Chris@82: */ Chris@82: #include "dft/simd/n1f.h" Chris@82: Chris@82: static void n1fv_13(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs) Chris@82: { Chris@82: DVK(KP2_000000000, +2.000000000000000000000000000000000000000000000); Chris@82: DVK(KP083333333, +0.083333333333333333333333333333333333333333333); Chris@82: DVK(KP075902986, +0.075902986037193865983102897245103540356428373); Chris@82: DVK(KP251768516, +0.251768516431883313623436926934233488546674281); Chris@82: DVK(KP132983124, +0.132983124607418643793760531921092974399165133); Chris@82: DVK(KP258260390, +0.258260390311744861420450644284508567852516811); Chris@82: DVK(KP1_732050807, +1.732050807568877293527446341505872366942805254); Chris@82: DVK(KP300238635, +0.300238635966332641462884626667381504676006424); Chris@82: DVK(KP011599105, +0.011599105605768290721655456654083252189827041); Chris@82: DVK(KP156891391, +0.156891391051584611046832726756003269660212636); Chris@82: DVK(KP256247671, +0.256247671582936600958684654061725059144125175); Chris@82: DVK(KP174138601, +0.174138601152135905005660794929264742616964676); Chris@82: DVK(KP575140729, +0.575140729474003121368385547455453388461001608); Chris@82: DVK(KP503537032, +0.503537032863766627246873853868466977093348562); Chris@82: DVK(KP113854479, +0.113854479055790798974654345867655310534642560); Chris@82: DVK(KP265966249, +0.265966249214837287587521063842185948798330267); Chris@82: DVK(KP387390585, +0.387390585467617292130675966426762851778775217); Chris@82: DVK(KP300462606, +0.300462606288665774426601772289207995520941381); Chris@82: DVK(KP866025403, +0.866025403784438646763723170752936183471402627); Chris@82: DVK(KP500000000, +0.500000000000000000000000000000000000000000000); Chris@82: { Chris@82: INT i; Chris@82: const R *xi; Chris@82: R *xo; Chris@82: xi = ri; Chris@82: xo = ro; Chris@82: for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(26, is), MAKE_VOLATILE_STRIDE(26, os)) { Chris@82: V TW, Tb, Tm, Tu, TC, TR, TX, TK, TU, Tz, TB, TN, TT; Chris@82: TW = LD(&(xi[0]), ivs, &(xi[0])); Chris@82: { Chris@82: V T3, TH, Tl, Tw, Tp, Tg, Tv, To, T6, Tr, T9, Ts, Ta, TI, T1; Chris@82: V T2, Tq, Tt; Chris@82: T1 = LD(&(xi[WS(is, 8)]), ivs, &(xi[0])); Chris@82: T2 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)])); Chris@82: T3 = VSUB(T1, T2); Chris@82: TH = VADD(T1, T2); Chris@82: { Chris@82: V Th, Ti, Tj, Tk; Chris@82: Th = LD(&(xi[WS(is, 12)]), ivs, &(xi[0])); Chris@82: Ti = LD(&(xi[WS(is, 10)]), ivs, &(xi[0])); Chris@82: Tj = LD(&(xi[WS(is, 4)]), ivs, &(xi[0])); Chris@82: Tk = VADD(Ti, Tj); Chris@82: Tl = VADD(Th, Tk); Chris@82: Tw = VSUB(Ti, Tj); Chris@82: Tp = VFNMS(LDK(KP500000000), Tk, Th); Chris@82: } Chris@82: { Chris@82: V Tc, Td, Te, Tf; Chris@82: Tc = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)])); Chris@82: Td = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)])); Chris@82: Te = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)])); Chris@82: Tf = VADD(Td, Te); Chris@82: Tg = VADD(Tc, Tf); Chris@82: Tv = VSUB(Td, Te); Chris@82: To = VFNMS(LDK(KP500000000), Tf, Tc); Chris@82: } Chris@82: { Chris@82: V T4, T5, T7, T8; Chris@82: T4 = LD(&(xi[WS(is, 11)]), ivs, &(xi[WS(is, 1)])); Chris@82: T5 = LD(&(xi[WS(is, 6)]), ivs, &(xi[0])); Chris@82: T6 = VSUB(T4, T5); Chris@82: Tr = VADD(T4, T5); Chris@82: T7 = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)])); Chris@82: T8 = LD(&(xi[WS(is, 2)]), ivs, &(xi[0])); Chris@82: T9 = VSUB(T7, T8); Chris@82: Ts = VADD(T7, T8); Chris@82: } Chris@82: Ta = VADD(T6, T9); Chris@82: TI = VADD(Tr, Ts); Chris@82: Tb = VADD(T3, Ta); Chris@82: Tm = VSUB(Tg, Tl); Chris@82: Tq = VSUB(To, Tp); Chris@82: Tt = VMUL(LDK(KP866025403), VSUB(Tr, Ts)); Chris@82: Tu = VADD(Tq, Tt); Chris@82: TC = VSUB(Tq, Tt); Chris@82: { Chris@82: V TP, TQ, TG, TJ; Chris@82: TP = VADD(Tg, Tl); Chris@82: TQ = VADD(TH, TI); Chris@82: TR = VMUL(LDK(KP300462606), VSUB(TP, TQ)); Chris@82: TX = VADD(TP, TQ); Chris@82: TG = VADD(To, Tp); Chris@82: TJ = VFNMS(LDK(KP500000000), TI, TH); Chris@82: TK = VSUB(TG, TJ); Chris@82: TU = VADD(TG, TJ); Chris@82: } Chris@82: { Chris@82: V Tx, Ty, TL, TM; Chris@82: Tx = VMUL(LDK(KP866025403), VSUB(Tv, Tw)); Chris@82: Ty = VFNMS(LDK(KP500000000), Ta, T3); Chris@82: Tz = VSUB(Tx, Ty); Chris@82: TB = VADD(Tx, Ty); Chris@82: TL = VADD(Tv, Tw); Chris@82: TM = VSUB(T6, T9); Chris@82: TN = VSUB(TL, TM); Chris@82: TT = VADD(TL, TM); Chris@82: } Chris@82: } Chris@82: ST(&(xo[0]), VADD(TW, TX), ovs, &(xo[0])); Chris@82: { Chris@82: V T19, T1n, T14, T13, T1f, T1k, Tn, TE, T1e, T1j, TS, T1m, TZ, T1c, TA; Chris@82: V TD; Chris@82: { Chris@82: V T17, T18, T11, T12; Chris@82: T17 = VFMA(LDK(KP387390585), TN, VMUL(LDK(KP265966249), TK)); Chris@82: T18 = VFNMS(LDK(KP503537032), TU, VMUL(LDK(KP113854479), TT)); Chris@82: T19 = VSUB(T17, T18); Chris@82: T1n = VADD(T17, T18); Chris@82: T14 = VFMA(LDK(KP575140729), Tm, VMUL(LDK(KP174138601), Tb)); Chris@82: T11 = VFNMS(LDK(KP156891391), TB, VMUL(LDK(KP256247671), TC)); Chris@82: T12 = VFMA(LDK(KP011599105), Tz, VMUL(LDK(KP300238635), Tu)); Chris@82: T13 = VSUB(T11, T12); Chris@82: T1f = VADD(T14, T13); Chris@82: T1k = VMUL(LDK(KP1_732050807), VADD(T11, T12)); Chris@82: } Chris@82: Tn = VFNMS(LDK(KP174138601), Tm, VMUL(LDK(KP575140729), Tb)); Chris@82: TA = VFNMS(LDK(KP300238635), Tz, VMUL(LDK(KP011599105), Tu)); Chris@82: TD = VFMA(LDK(KP256247671), TB, VMUL(LDK(KP156891391), TC)); Chris@82: TE = VSUB(TA, TD); Chris@82: T1e = VMUL(LDK(KP1_732050807), VADD(TD, TA)); Chris@82: T1j = VSUB(Tn, TE); Chris@82: { Chris@82: V TO, T1b, TV, TY, T1a; Chris@82: TO = VFNMS(LDK(KP132983124), TN, VMUL(LDK(KP258260390), TK)); Chris@82: T1b = VSUB(TR, TO); Chris@82: TV = VFMA(LDK(KP251768516), TT, VMUL(LDK(KP075902986), TU)); Chris@82: TY = VFNMS(LDK(KP083333333), TX, TW); Chris@82: T1a = VSUB(TY, TV); Chris@82: TS = VFMA(LDK(KP2_000000000), TO, TR); Chris@82: T1m = VADD(T1b, T1a); Chris@82: TZ = VFMA(LDK(KP2_000000000), TV, TY); Chris@82: T1c = VSUB(T1a, T1b); Chris@82: } Chris@82: { Chris@82: V TF, T10, T1l, T1o; Chris@82: TF = VBYI(VFMA(LDK(KP2_000000000), TE, Tn)); Chris@82: T10 = VADD(TS, TZ); Chris@82: ST(&(xo[WS(os, 1)]), VADD(TF, T10), ovs, &(xo[WS(os, 1)])); Chris@82: ST(&(xo[WS(os, 12)]), VSUB(T10, TF), ovs, &(xo[0])); Chris@82: { Chris@82: V T15, T16, T1p, T1q; Chris@82: T15 = VBYI(VFMS(LDK(KP2_000000000), T13, T14)); Chris@82: T16 = VSUB(TZ, TS); Chris@82: ST(&(xo[WS(os, 5)]), VADD(T15, T16), ovs, &(xo[WS(os, 1)])); Chris@82: ST(&(xo[WS(os, 8)]), VSUB(T16, T15), ovs, &(xo[0])); Chris@82: T1p = VADD(T1n, T1m); Chris@82: T1q = VBYI(VADD(T1j, T1k)); Chris@82: ST(&(xo[WS(os, 4)]), VSUB(T1p, T1q), ovs, &(xo[0])); Chris@82: ST(&(xo[WS(os, 9)]), VADD(T1q, T1p), ovs, &(xo[WS(os, 1)])); Chris@82: } Chris@82: T1l = VBYI(VSUB(T1j, T1k)); Chris@82: T1o = VSUB(T1m, T1n); Chris@82: ST(&(xo[WS(os, 3)]), VADD(T1l, T1o), ovs, &(xo[WS(os, 1)])); Chris@82: ST(&(xo[WS(os, 10)]), VSUB(T1o, T1l), ovs, &(xo[0])); Chris@82: { Chris@82: V T1h, T1i, T1d, T1g; Chris@82: T1h = VBYI(VSUB(T1e, T1f)); Chris@82: T1i = VSUB(T1c, T19); Chris@82: ST(&(xo[WS(os, 6)]), VADD(T1h, T1i), ovs, &(xo[0])); Chris@82: ST(&(xo[WS(os, 7)]), VSUB(T1i, T1h), ovs, &(xo[WS(os, 1)])); Chris@82: T1d = VADD(T19, T1c); Chris@82: T1g = VBYI(VADD(T1e, T1f)); Chris@82: ST(&(xo[WS(os, 2)]), VSUB(T1d, T1g), ovs, &(xo[0])); Chris@82: ST(&(xo[WS(os, 11)]), VADD(T1g, T1d), ovs, &(xo[WS(os, 1)])); Chris@82: } Chris@82: } Chris@82: } Chris@82: } Chris@82: } Chris@82: VLEAVE(); Chris@82: } Chris@82: Chris@82: static const kdft_desc desc = { 13, XSIMD_STRING("n1fv_13"), {69, 15, 19, 0}, &GENUS, 0, 0, 0, 0 }; Chris@82: Chris@82: void XSIMD(codelet_n1fv_13) (planner *p) { Chris@82: X(kdft_register) (p, n1fv_13, &desc); Chris@82: } Chris@82: Chris@82: #endif