Chris@10: /* Chris@10: * Copyright (c) 2003, 2007-11 Matteo Frigo Chris@10: * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology Chris@10: * Chris@10: * This program is free software; you can redistribute it and/or modify Chris@10: * it under the terms of the GNU General Public License as published by Chris@10: * the Free Software Foundation; either version 2 of the License, or Chris@10: * (at your option) any later version. Chris@10: * Chris@10: * This program is distributed in the hope that it will be useful, Chris@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of Chris@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Chris@10: * GNU General Public License for more details. Chris@10: * Chris@10: * You should have received a copy of the GNU General Public License Chris@10: * along with this program; if not, write to the Free Software Chris@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Chris@10: * Chris@10: */ Chris@10: Chris@10: /* This file was automatically generated --- DO NOT EDIT */ Chris@10: /* Generated on Sun Nov 25 07:37:48 EST 2012 */ Chris@10: Chris@10: #include "codelet-dft.h" Chris@10: Chris@10: #ifdef HAVE_FMA Chris@10: Chris@10: /* Generated by: ../../../genfft/gen_notw.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -n 16 -name n2sv_16 -with-ostride 1 -include n2s.h -store-multiple 4 */ Chris@10: Chris@10: /* Chris@10: * This function contains 144 FP additions, 40 FP multiplications, Chris@10: * (or, 104 additions, 0 multiplications, 40 fused multiply/add), Chris@10: * 110 stack variables, 3 constants, and 72 memory accesses Chris@10: */ Chris@10: #include "n2s.h" Chris@10: Chris@10: static void n2sv_16(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs) Chris@10: { Chris@10: DVK(KP923879532, +0.923879532511286756128183189396788286822416626); Chris@10: DVK(KP707106781, +0.707106781186547524400844362104849039284835938); Chris@10: DVK(KP414213562, +0.414213562373095048801688724209698078569671875); Chris@10: { Chris@10: INT i; Chris@10: for (i = v; i > 0; i = i - (2 * VL), ri = ri + ((2 * VL) * ivs), ii = ii + ((2 * VL) * ivs), ro = ro + ((2 * VL) * ovs), io = io + ((2 * VL) * ovs), MAKE_VOLATILE_STRIDE(64, is), MAKE_VOLATILE_STRIDE(64, os)) { Chris@10: V T2p, T2q, T2r, T2s, T2x, T2y, T2z, T2A, T1M, T1N, T1L, T1P, T2F, T2G, T2H; Chris@10: V T2I, T1O, T1Q; Chris@10: { Chris@10: V T1l, T1H, T1R, T7, T1x, TN, TC, T25, T1E, T1b, T1Z, Tt, T2h, T22, T1D; Chris@10: V T1g, T1n, TQ, T11, Ti, Te, T26, T1m, TT, T1S, TJ, TZ, T1V, TW, Tl; Chris@10: V T12, T13; Chris@10: { Chris@10: V Tq, T1c, Tp, T20, T1a, Tr, T1d, T1e; Chris@10: { Chris@10: V T1, T2, Tw, Tx, T4, T5, Tz, TA; Chris@10: T1 = LD(&(ri[0]), ivs, &(ri[0])); Chris@10: T2 = LD(&(ri[WS(is, 8)]), ivs, &(ri[0])); Chris@10: Tw = LD(&(ii[0]), ivs, &(ii[0])); Chris@10: Tx = LD(&(ii[WS(is, 8)]), ivs, &(ii[0])); Chris@10: T4 = LD(&(ri[WS(is, 4)]), ivs, &(ri[0])); Chris@10: T5 = LD(&(ri[WS(is, 12)]), ivs, &(ri[0])); Chris@10: Tz = LD(&(ii[WS(is, 4)]), ivs, &(ii[0])); Chris@10: TA = LD(&(ii[WS(is, 12)]), ivs, &(ii[0])); Chris@10: { Chris@10: V Tn, TL, T3, T1k, Ty, T1j, T6, TM, TB, To, T18, T19; Chris@10: Tn = LD(&(ri[WS(is, 15)]), ivs, &(ri[WS(is, 1)])); Chris@10: TL = VSUB(T1, T2); Chris@10: T3 = VADD(T1, T2); Chris@10: T1k = VSUB(Tw, Tx); Chris@10: Ty = VADD(Tw, Tx); Chris@10: T1j = VSUB(T4, T5); Chris@10: T6 = VADD(T4, T5); Chris@10: TM = VSUB(Tz, TA); Chris@10: TB = VADD(Tz, TA); Chris@10: To = LD(&(ri[WS(is, 7)]), ivs, &(ri[WS(is, 1)])); Chris@10: T18 = LD(&(ii[WS(is, 15)]), ivs, &(ii[WS(is, 1)])); Chris@10: T19 = LD(&(ii[WS(is, 7)]), ivs, &(ii[WS(is, 1)])); Chris@10: Tq = LD(&(ri[WS(is, 3)]), ivs, &(ri[WS(is, 1)])); Chris@10: T1l = VADD(T1j, T1k); Chris@10: T1H = VSUB(T1k, T1j); Chris@10: T1R = VSUB(T3, T6); Chris@10: T7 = VADD(T3, T6); Chris@10: T1x = VADD(TL, TM); Chris@10: TN = VSUB(TL, TM); Chris@10: TC = VADD(Ty, TB); Chris@10: T25 = VSUB(Ty, TB); Chris@10: T1c = VSUB(Tn, To); Chris@10: Tp = VADD(Tn, To); Chris@10: T20 = VADD(T18, T19); Chris@10: T1a = VSUB(T18, T19); Chris@10: Tr = LD(&(ri[WS(is, 11)]), ivs, &(ri[WS(is, 1)])); Chris@10: T1d = LD(&(ii[WS(is, 3)]), ivs, &(ii[WS(is, 1)])); Chris@10: T1e = LD(&(ii[WS(is, 11)]), ivs, &(ii[WS(is, 1)])); Chris@10: } Chris@10: } Chris@10: { Chris@10: V Tb, Ta, TF, Tc, TG, TH, TP, TO; Chris@10: { Chris@10: V T8, T9, TD, TE; Chris@10: T8 = LD(&(ri[WS(is, 2)]), ivs, &(ri[0])); Chris@10: T9 = LD(&(ri[WS(is, 10)]), ivs, &(ri[0])); Chris@10: TD = LD(&(ii[WS(is, 2)]), ivs, &(ii[0])); Chris@10: TE = LD(&(ii[WS(is, 10)]), ivs, &(ii[0])); Chris@10: Tb = LD(&(ri[WS(is, 14)]), ivs, &(ri[0])); Chris@10: { Chris@10: V T17, Ts, T21, T1f; Chris@10: T17 = VSUB(Tq, Tr); Chris@10: Ts = VADD(Tq, Tr); Chris@10: T21 = VADD(T1d, T1e); Chris@10: T1f = VSUB(T1d, T1e); Chris@10: TP = VSUB(T8, T9); Chris@10: Ta = VADD(T8, T9); Chris@10: TO = VSUB(TD, TE); Chris@10: TF = VADD(TD, TE); Chris@10: T1E = VSUB(T1a, T17); Chris@10: T1b = VADD(T17, T1a); Chris@10: T1Z = VSUB(Tp, Ts); Chris@10: Tt = VADD(Tp, Ts); Chris@10: T2h = VADD(T20, T21); Chris@10: T22 = VSUB(T20, T21); Chris@10: T1D = VADD(T1c, T1f); Chris@10: T1g = VSUB(T1c, T1f); Chris@10: Tc = LD(&(ri[WS(is, 6)]), ivs, &(ri[0])); Chris@10: } Chris@10: TG = LD(&(ii[WS(is, 14)]), ivs, &(ii[0])); Chris@10: TH = LD(&(ii[WS(is, 6)]), ivs, &(ii[0])); Chris@10: } Chris@10: T1n = VADD(TP, TO); Chris@10: TQ = VSUB(TO, TP); Chris@10: { Chris@10: V Tg, Th, TX, TR, Td, TS, TI, TY, Tj, Tk; Chris@10: Tg = LD(&(ri[WS(is, 1)]), ivs, &(ri[WS(is, 1)])); Chris@10: Th = LD(&(ri[WS(is, 9)]), ivs, &(ri[WS(is, 1)])); Chris@10: TX = LD(&(ii[WS(is, 1)]), ivs, &(ii[WS(is, 1)])); Chris@10: TR = VSUB(Tb, Tc); Chris@10: Td = VADD(Tb, Tc); Chris@10: TS = VSUB(TG, TH); Chris@10: TI = VADD(TG, TH); Chris@10: TY = LD(&(ii[WS(is, 9)]), ivs, &(ii[WS(is, 1)])); Chris@10: Tj = LD(&(ri[WS(is, 5)]), ivs, &(ri[WS(is, 1)])); Chris@10: T11 = VSUB(Tg, Th); Chris@10: Ti = VADD(Tg, Th); Chris@10: Tk = LD(&(ri[WS(is, 13)]), ivs, &(ri[WS(is, 1)])); Chris@10: Te = VADD(Ta, Td); Chris@10: T26 = VSUB(Td, Ta); Chris@10: T1m = VSUB(TR, TS); Chris@10: TT = VADD(TR, TS); Chris@10: T1S = VSUB(TF, TI); Chris@10: TJ = VADD(TF, TI); Chris@10: TZ = VSUB(TX, TY); Chris@10: T1V = VADD(TX, TY); Chris@10: TW = VSUB(Tj, Tk); Chris@10: Tl = VADD(Tj, Tk); Chris@10: T12 = LD(&(ii[WS(is, 5)]), ivs, &(ii[WS(is, 1)])); Chris@10: T13 = LD(&(ii[WS(is, 13)]), ivs, &(ii[WS(is, 1)])); Chris@10: } Chris@10: } Chris@10: } Chris@10: { Chris@10: V T2f, Tf, T2j, TK, Tm, T1U, T10, T1B, T14, T1W; Chris@10: T2f = VSUB(T7, Te); Chris@10: Tf = VADD(T7, Te); Chris@10: T2j = VADD(TC, TJ); Chris@10: TK = VSUB(TC, TJ); Chris@10: Tm = VADD(Ti, Tl); Chris@10: T1U = VSUB(Ti, Tl); Chris@10: T10 = VADD(TW, TZ); Chris@10: T1B = VSUB(TZ, TW); Chris@10: T14 = VSUB(T12, T13); Chris@10: T1W = VADD(T12, T13); Chris@10: { Chris@10: V T29, T1T, T27, T2d, T2b, T23, T15, T1A, T2l, T2m, T2n, T2o, T2i, T2k, T1Y; Chris@10: V T2a; Chris@10: { Chris@10: V Tv, Tu, T1X, T2g; Chris@10: T29 = VSUB(T1R, T1S); Chris@10: T1T = VADD(T1R, T1S); Chris@10: T27 = VSUB(T25, T26); Chris@10: T2d = VADD(T26, T25); Chris@10: T2b = VADD(T1Z, T22); Chris@10: T23 = VSUB(T1Z, T22); Chris@10: Tv = VSUB(Tt, Tm); Chris@10: Tu = VADD(Tm, Tt); Chris@10: T1X = VSUB(T1V, T1W); Chris@10: T2g = VADD(T1V, T1W); Chris@10: T15 = VSUB(T11, T14); Chris@10: T1A = VADD(T11, T14); Chris@10: T2l = VSUB(TK, Tv); Chris@10: STM4(&(io[12]), T2l, ovs, &(io[0])); Chris@10: T2m = VADD(Tv, TK); Chris@10: STM4(&(io[4]), T2m, ovs, &(io[0])); Chris@10: T2n = VADD(Tf, Tu); Chris@10: STM4(&(ro[0]), T2n, ovs, &(ro[0])); Chris@10: T2o = VSUB(Tf, Tu); Chris@10: STM4(&(ro[8]), T2o, ovs, &(ro[0])); Chris@10: T2i = VSUB(T2g, T2h); Chris@10: T2k = VADD(T2g, T2h); Chris@10: T1Y = VADD(T1U, T1X); Chris@10: T2a = VSUB(T1X, T1U); Chris@10: } Chris@10: { Chris@10: V T1I, T1y, T1t, T16, T1v, TV, T1r, T1p, T2t, T2u, T2v, T2w, T1h, T1s, TU; Chris@10: V T1o; Chris@10: T1I = VADD(TQ, TT); Chris@10: TU = VSUB(TQ, TT); Chris@10: T1o = VSUB(T1m, T1n); Chris@10: T1y = VADD(T1n, T1m); Chris@10: T1t = VFNMS(LDK(KP414213562), T10, T15); Chris@10: T16 = VFMA(LDK(KP414213562), T15, T10); Chris@10: T2p = VADD(T2f, T2i); Chris@10: STM4(&(ro[4]), T2p, ovs, &(ro[0])); Chris@10: T2q = VSUB(T2f, T2i); Chris@10: STM4(&(ro[12]), T2q, ovs, &(ro[0])); Chris@10: T2r = VADD(T2j, T2k); Chris@10: STM4(&(io[0]), T2r, ovs, &(io[0])); Chris@10: T2s = VSUB(T2j, T2k); Chris@10: STM4(&(io[8]), T2s, ovs, &(io[0])); Chris@10: { Chris@10: V T28, T24, T2e, T2c; Chris@10: T28 = VSUB(T23, T1Y); Chris@10: T24 = VADD(T1Y, T23); Chris@10: T2e = VADD(T2a, T2b); Chris@10: T2c = VSUB(T2a, T2b); Chris@10: T1v = VFNMS(LDK(KP707106781), TU, TN); Chris@10: TV = VFMA(LDK(KP707106781), TU, TN); Chris@10: T1r = VFMA(LDK(KP707106781), T1o, T1l); Chris@10: T1p = VFNMS(LDK(KP707106781), T1o, T1l); Chris@10: T2t = VFNMS(LDK(KP707106781), T28, T27); Chris@10: STM4(&(io[14]), T2t, ovs, &(io[0])); Chris@10: T2u = VFMA(LDK(KP707106781), T28, T27); Chris@10: STM4(&(io[6]), T2u, ovs, &(io[0])); Chris@10: T2v = VFMA(LDK(KP707106781), T24, T1T); Chris@10: STM4(&(ro[2]), T2v, ovs, &(ro[0])); Chris@10: T2w = VFNMS(LDK(KP707106781), T24, T1T); Chris@10: STM4(&(ro[10]), T2w, ovs, &(ro[0])); Chris@10: T2x = VFNMS(LDK(KP707106781), T2e, T2d); Chris@10: STM4(&(io[10]), T2x, ovs, &(io[0])); Chris@10: T2y = VFMA(LDK(KP707106781), T2e, T2d); Chris@10: STM4(&(io[2]), T2y, ovs, &(io[0])); Chris@10: T2z = VFMA(LDK(KP707106781), T2c, T29); Chris@10: STM4(&(ro[6]), T2z, ovs, &(ro[0])); Chris@10: T2A = VFNMS(LDK(KP707106781), T2c, T29); Chris@10: STM4(&(ro[14]), T2A, ovs, &(ro[0])); Chris@10: T1h = VFNMS(LDK(KP414213562), T1g, T1b); Chris@10: T1s = VFMA(LDK(KP414213562), T1b, T1g); Chris@10: } Chris@10: { Chris@10: V T1z, T1J, T1K, T1G, T2B, T2C, T2D, T2E, T1C, T1F; Chris@10: T1M = VFNMS(LDK(KP414213562), T1A, T1B); Chris@10: T1C = VFMA(LDK(KP414213562), T1B, T1A); Chris@10: T1F = VFNMS(LDK(KP414213562), T1E, T1D); Chris@10: T1N = VFMA(LDK(KP414213562), T1D, T1E); Chris@10: { Chris@10: V T1q, T1i, T1w, T1u; Chris@10: T1q = VADD(T16, T1h); Chris@10: T1i = VSUB(T16, T1h); Chris@10: T1w = VADD(T1t, T1s); Chris@10: T1u = VSUB(T1s, T1t); Chris@10: T1L = VFNMS(LDK(KP707106781), T1y, T1x); Chris@10: T1z = VFMA(LDK(KP707106781), T1y, T1x); Chris@10: T1P = VFMA(LDK(KP707106781), T1I, T1H); Chris@10: T1J = VFNMS(LDK(KP707106781), T1I, T1H); Chris@10: T1K = VSUB(T1F, T1C); Chris@10: T1G = VADD(T1C, T1F); Chris@10: T2B = VFMA(LDK(KP923879532), T1q, T1p); Chris@10: STM4(&(io[15]), T2B, ovs, &(io[1])); Chris@10: T2C = VFNMS(LDK(KP923879532), T1q, T1p); Chris@10: STM4(&(io[7]), T2C, ovs, &(io[1])); Chris@10: T2D = VFMA(LDK(KP923879532), T1i, TV); Chris@10: STM4(&(ro[3]), T2D, ovs, &(ro[1])); Chris@10: T2E = VFNMS(LDK(KP923879532), T1i, TV); Chris@10: STM4(&(ro[11]), T2E, ovs, &(ro[1])); Chris@10: T2F = VFMA(LDK(KP923879532), T1w, T1v); Chris@10: STM4(&(ro[15]), T2F, ovs, &(ro[1])); Chris@10: T2G = VFNMS(LDK(KP923879532), T1w, T1v); Chris@10: STM4(&(ro[7]), T2G, ovs, &(ro[1])); Chris@10: T2H = VFMA(LDK(KP923879532), T1u, T1r); Chris@10: STM4(&(io[3]), T2H, ovs, &(io[1])); Chris@10: T2I = VFNMS(LDK(KP923879532), T1u, T1r); Chris@10: STM4(&(io[11]), T2I, ovs, &(io[1])); Chris@10: } Chris@10: { Chris@10: V T2J, T2K, T2L, T2M; Chris@10: T2J = VFNMS(LDK(KP923879532), T1G, T1z); Chris@10: STM4(&(ro[9]), T2J, ovs, &(ro[1])); Chris@10: STN4(&(ro[8]), T2o, T2J, T2w, T2E, ovs); Chris@10: T2K = VFMA(LDK(KP923879532), T1G, T1z); Chris@10: STM4(&(ro[1]), T2K, ovs, &(ro[1])); Chris@10: STN4(&(ro[0]), T2n, T2K, T2v, T2D, ovs); Chris@10: T2L = VFNMS(LDK(KP923879532), T1K, T1J); Chris@10: STM4(&(io[13]), T2L, ovs, &(io[1])); Chris@10: STN4(&(io[12]), T2l, T2L, T2t, T2B, ovs); Chris@10: T2M = VFMA(LDK(KP923879532), T1K, T1J); Chris@10: STM4(&(io[5]), T2M, ovs, &(io[1])); Chris@10: STN4(&(io[4]), T2m, T2M, T2u, T2C, ovs); Chris@10: } Chris@10: } Chris@10: } Chris@10: } Chris@10: } Chris@10: } Chris@10: T1O = VSUB(T1M, T1N); Chris@10: T1Q = VADD(T1M, T1N); Chris@10: { Chris@10: V T2N, T2O, T2P, T2Q; Chris@10: T2N = VFMA(LDK(KP923879532), T1Q, T1P); Chris@10: STM4(&(io[1]), T2N, ovs, &(io[1])); Chris@10: STN4(&(io[0]), T2r, T2N, T2y, T2H, ovs); Chris@10: T2O = VFNMS(LDK(KP923879532), T1Q, T1P); Chris@10: STM4(&(io[9]), T2O, ovs, &(io[1])); Chris@10: STN4(&(io[8]), T2s, T2O, T2x, T2I, ovs); Chris@10: T2P = VFMA(LDK(KP923879532), T1O, T1L); Chris@10: STM4(&(ro[5]), T2P, ovs, &(ro[1])); Chris@10: STN4(&(ro[4]), T2p, T2P, T2z, T2G, ovs); Chris@10: T2Q = VFNMS(LDK(KP923879532), T1O, T1L); Chris@10: STM4(&(ro[13]), T2Q, ovs, &(ro[1])); Chris@10: STN4(&(ro[12]), T2q, T2Q, T2A, T2F, ovs); Chris@10: } Chris@10: } Chris@10: } Chris@10: VLEAVE(); Chris@10: } Chris@10: Chris@10: static const kdft_desc desc = { 16, XSIMD_STRING("n2sv_16"), {104, 0, 40, 0}, &GENUS, 0, 1, 0, 0 }; Chris@10: Chris@10: void XSIMD(codelet_n2sv_16) (planner *p) { Chris@10: X(kdft_register) (p, n2sv_16, &desc); Chris@10: } Chris@10: Chris@10: #else /* HAVE_FMA */ Chris@10: Chris@10: /* Generated by: ../../../genfft/gen_notw.native -simd -compact -variables 4 -pipeline-latency 8 -n 16 -name n2sv_16 -with-ostride 1 -include n2s.h -store-multiple 4 */ Chris@10: Chris@10: /* Chris@10: * This function contains 144 FP additions, 24 FP multiplications, Chris@10: * (or, 136 additions, 16 multiplications, 8 fused multiply/add), Chris@10: * 74 stack variables, 3 constants, and 72 memory accesses Chris@10: */ Chris@10: #include "n2s.h" Chris@10: Chris@10: static void n2sv_16(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs) Chris@10: { Chris@10: DVK(KP382683432, +0.382683432365089771728459984030398866761344562); Chris@10: DVK(KP923879532, +0.923879532511286756128183189396788286822416626); Chris@10: DVK(KP707106781, +0.707106781186547524400844362104849039284835938); Chris@10: { Chris@10: INT i; Chris@10: for (i = v; i > 0; i = i - (2 * VL), ri = ri + ((2 * VL) * ivs), ii = ii + ((2 * VL) * ivs), ro = ro + ((2 * VL) * ovs), io = io + ((2 * VL) * ovs), MAKE_VOLATILE_STRIDE(64, is), MAKE_VOLATILE_STRIDE(64, os)) { Chris@10: V T7, T1R, T25, TC, TN, T1x, T1H, T1l, Tt, T22, T2h, T1b, T1g, T1E, T1Z; Chris@10: V T1D, Te, T1S, T26, TJ, TQ, T1m, T1n, TT, Tm, T1X, T2g, T10, T15, T1B; Chris@10: V T1U, T1A; Chris@10: { Chris@10: V T3, TL, Ty, T1k, T6, T1j, TB, TM; Chris@10: { Chris@10: V T1, T2, Tw, Tx; Chris@10: T1 = LD(&(ri[0]), ivs, &(ri[0])); Chris@10: T2 = LD(&(ri[WS(is, 8)]), ivs, &(ri[0])); Chris@10: T3 = VADD(T1, T2); Chris@10: TL = VSUB(T1, T2); Chris@10: Tw = LD(&(ii[0]), ivs, &(ii[0])); Chris@10: Tx = LD(&(ii[WS(is, 8)]), ivs, &(ii[0])); Chris@10: Ty = VADD(Tw, Tx); Chris@10: T1k = VSUB(Tw, Tx); Chris@10: } Chris@10: { Chris@10: V T4, T5, Tz, TA; Chris@10: T4 = LD(&(ri[WS(is, 4)]), ivs, &(ri[0])); Chris@10: T5 = LD(&(ri[WS(is, 12)]), ivs, &(ri[0])); Chris@10: T6 = VADD(T4, T5); Chris@10: T1j = VSUB(T4, T5); Chris@10: Tz = LD(&(ii[WS(is, 4)]), ivs, &(ii[0])); Chris@10: TA = LD(&(ii[WS(is, 12)]), ivs, &(ii[0])); Chris@10: TB = VADD(Tz, TA); Chris@10: TM = VSUB(Tz, TA); Chris@10: } Chris@10: T7 = VADD(T3, T6); Chris@10: T1R = VSUB(T3, T6); Chris@10: T25 = VSUB(Ty, TB); Chris@10: TC = VADD(Ty, TB); Chris@10: TN = VSUB(TL, TM); Chris@10: T1x = VADD(TL, TM); Chris@10: T1H = VSUB(T1k, T1j); Chris@10: T1l = VADD(T1j, T1k); Chris@10: } Chris@10: { Chris@10: V Tp, T17, T1f, T20, Ts, T1c, T1a, T21; Chris@10: { Chris@10: V Tn, To, T1d, T1e; Chris@10: Tn = LD(&(ri[WS(is, 15)]), ivs, &(ri[WS(is, 1)])); Chris@10: To = LD(&(ri[WS(is, 7)]), ivs, &(ri[WS(is, 1)])); Chris@10: Tp = VADD(Tn, To); Chris@10: T17 = VSUB(Tn, To); Chris@10: T1d = LD(&(ii[WS(is, 15)]), ivs, &(ii[WS(is, 1)])); Chris@10: T1e = LD(&(ii[WS(is, 7)]), ivs, &(ii[WS(is, 1)])); Chris@10: T1f = VSUB(T1d, T1e); Chris@10: T20 = VADD(T1d, T1e); Chris@10: } Chris@10: { Chris@10: V Tq, Tr, T18, T19; Chris@10: Tq = LD(&(ri[WS(is, 3)]), ivs, &(ri[WS(is, 1)])); Chris@10: Tr = LD(&(ri[WS(is, 11)]), ivs, &(ri[WS(is, 1)])); Chris@10: Ts = VADD(Tq, Tr); Chris@10: T1c = VSUB(Tq, Tr); Chris@10: T18 = LD(&(ii[WS(is, 3)]), ivs, &(ii[WS(is, 1)])); Chris@10: T19 = LD(&(ii[WS(is, 11)]), ivs, &(ii[WS(is, 1)])); Chris@10: T1a = VSUB(T18, T19); Chris@10: T21 = VADD(T18, T19); Chris@10: } Chris@10: Tt = VADD(Tp, Ts); Chris@10: T22 = VSUB(T20, T21); Chris@10: T2h = VADD(T20, T21); Chris@10: T1b = VSUB(T17, T1a); Chris@10: T1g = VADD(T1c, T1f); Chris@10: T1E = VSUB(T1f, T1c); Chris@10: T1Z = VSUB(Tp, Ts); Chris@10: T1D = VADD(T17, T1a); Chris@10: } Chris@10: { Chris@10: V Ta, TP, TF, TO, Td, TR, TI, TS; Chris@10: { Chris@10: V T8, T9, TD, TE; Chris@10: T8 = LD(&(ri[WS(is, 2)]), ivs, &(ri[0])); Chris@10: T9 = LD(&(ri[WS(is, 10)]), ivs, &(ri[0])); Chris@10: Ta = VADD(T8, T9); Chris@10: TP = VSUB(T8, T9); Chris@10: TD = LD(&(ii[WS(is, 2)]), ivs, &(ii[0])); Chris@10: TE = LD(&(ii[WS(is, 10)]), ivs, &(ii[0])); Chris@10: TF = VADD(TD, TE); Chris@10: TO = VSUB(TD, TE); Chris@10: } Chris@10: { Chris@10: V Tb, Tc, TG, TH; Chris@10: Tb = LD(&(ri[WS(is, 14)]), ivs, &(ri[0])); Chris@10: Tc = LD(&(ri[WS(is, 6)]), ivs, &(ri[0])); Chris@10: Td = VADD(Tb, Tc); Chris@10: TR = VSUB(Tb, Tc); Chris@10: TG = LD(&(ii[WS(is, 14)]), ivs, &(ii[0])); Chris@10: TH = LD(&(ii[WS(is, 6)]), ivs, &(ii[0])); Chris@10: TI = VADD(TG, TH); Chris@10: TS = VSUB(TG, TH); Chris@10: } Chris@10: Te = VADD(Ta, Td); Chris@10: T1S = VSUB(TF, TI); Chris@10: T26 = VSUB(Td, Ta); Chris@10: TJ = VADD(TF, TI); Chris@10: TQ = VSUB(TO, TP); Chris@10: T1m = VSUB(TR, TS); Chris@10: T1n = VADD(TP, TO); Chris@10: TT = VADD(TR, TS); Chris@10: } Chris@10: { Chris@10: V Ti, T11, TZ, T1V, Tl, TW, T14, T1W; Chris@10: { Chris@10: V Tg, Th, TX, TY; Chris@10: Tg = LD(&(ri[WS(is, 1)]), ivs, &(ri[WS(is, 1)])); Chris@10: Th = LD(&(ri[WS(is, 9)]), ivs, &(ri[WS(is, 1)])); Chris@10: Ti = VADD(Tg, Th); Chris@10: T11 = VSUB(Tg, Th); Chris@10: TX = LD(&(ii[WS(is, 1)]), ivs, &(ii[WS(is, 1)])); Chris@10: TY = LD(&(ii[WS(is, 9)]), ivs, &(ii[WS(is, 1)])); Chris@10: TZ = VSUB(TX, TY); Chris@10: T1V = VADD(TX, TY); Chris@10: } Chris@10: { Chris@10: V Tj, Tk, T12, T13; Chris@10: Tj = LD(&(ri[WS(is, 5)]), ivs, &(ri[WS(is, 1)])); Chris@10: Tk = LD(&(ri[WS(is, 13)]), ivs, &(ri[WS(is, 1)])); Chris@10: Tl = VADD(Tj, Tk); Chris@10: TW = VSUB(Tj, Tk); Chris@10: T12 = LD(&(ii[WS(is, 5)]), ivs, &(ii[WS(is, 1)])); Chris@10: T13 = LD(&(ii[WS(is, 13)]), ivs, &(ii[WS(is, 1)])); Chris@10: T14 = VSUB(T12, T13); Chris@10: T1W = VADD(T12, T13); Chris@10: } Chris@10: Tm = VADD(Ti, Tl); Chris@10: T1X = VSUB(T1V, T1W); Chris@10: T2g = VADD(T1V, T1W); Chris@10: T10 = VADD(TW, TZ); Chris@10: T15 = VSUB(T11, T14); Chris@10: T1B = VADD(T11, T14); Chris@10: T1U = VSUB(Ti, Tl); Chris@10: T1A = VSUB(TZ, TW); Chris@10: } Chris@10: { Chris@10: V T2l, T2m, T2n, T2o, T2p, T2q, T2r, T2s; Chris@10: { Chris@10: V Tf, Tu, T2j, T2k; Chris@10: Tf = VADD(T7, Te); Chris@10: Tu = VADD(Tm, Tt); Chris@10: T2l = VSUB(Tf, Tu); Chris@10: STM4(&(ro[8]), T2l, ovs, &(ro[0])); Chris@10: T2m = VADD(Tf, Tu); Chris@10: STM4(&(ro[0]), T2m, ovs, &(ro[0])); Chris@10: T2j = VADD(TC, TJ); Chris@10: T2k = VADD(T2g, T2h); Chris@10: T2n = VSUB(T2j, T2k); Chris@10: STM4(&(io[8]), T2n, ovs, &(io[0])); Chris@10: T2o = VADD(T2j, T2k); Chris@10: STM4(&(io[0]), T2o, ovs, &(io[0])); Chris@10: } Chris@10: { Chris@10: V Tv, TK, T2f, T2i; Chris@10: Tv = VSUB(Tt, Tm); Chris@10: TK = VSUB(TC, TJ); Chris@10: T2p = VADD(Tv, TK); Chris@10: STM4(&(io[4]), T2p, ovs, &(io[0])); Chris@10: T2q = VSUB(TK, Tv); Chris@10: STM4(&(io[12]), T2q, ovs, &(io[0])); Chris@10: T2f = VSUB(T7, Te); Chris@10: T2i = VSUB(T2g, T2h); Chris@10: T2r = VSUB(T2f, T2i); Chris@10: STM4(&(ro[12]), T2r, ovs, &(ro[0])); Chris@10: T2s = VADD(T2f, T2i); Chris@10: STM4(&(ro[4]), T2s, ovs, &(ro[0])); Chris@10: } Chris@10: { Chris@10: V T2t, T2u, T2v, T2w, T2x, T2y, T2z, T2A; Chris@10: { Chris@10: V T1T, T27, T24, T28, T1Y, T23; Chris@10: T1T = VADD(T1R, T1S); Chris@10: T27 = VSUB(T25, T26); Chris@10: T1Y = VADD(T1U, T1X); Chris@10: T23 = VSUB(T1Z, T22); Chris@10: T24 = VMUL(LDK(KP707106781), VADD(T1Y, T23)); Chris@10: T28 = VMUL(LDK(KP707106781), VSUB(T23, T1Y)); Chris@10: T2t = VSUB(T1T, T24); Chris@10: STM4(&(ro[10]), T2t, ovs, &(ro[0])); Chris@10: T2u = VADD(T27, T28); Chris@10: STM4(&(io[6]), T2u, ovs, &(io[0])); Chris@10: T2v = VADD(T1T, T24); Chris@10: STM4(&(ro[2]), T2v, ovs, &(ro[0])); Chris@10: T2w = VSUB(T27, T28); Chris@10: STM4(&(io[14]), T2w, ovs, &(io[0])); Chris@10: } Chris@10: { Chris@10: V T29, T2d, T2c, T2e, T2a, T2b; Chris@10: T29 = VSUB(T1R, T1S); Chris@10: T2d = VADD(T26, T25); Chris@10: T2a = VSUB(T1X, T1U); Chris@10: T2b = VADD(T1Z, T22); Chris@10: T2c = VMUL(LDK(KP707106781), VSUB(T2a, T2b)); Chris@10: T2e = VMUL(LDK(KP707106781), VADD(T2a, T2b)); Chris@10: T2x = VSUB(T29, T2c); Chris@10: STM4(&(ro[14]), T2x, ovs, &(ro[0])); Chris@10: T2y = VADD(T2d, T2e); Chris@10: STM4(&(io[2]), T2y, ovs, &(io[0])); Chris@10: T2z = VADD(T29, T2c); Chris@10: STM4(&(ro[6]), T2z, ovs, &(ro[0])); Chris@10: T2A = VSUB(T2d, T2e); Chris@10: STM4(&(io[10]), T2A, ovs, &(io[0])); Chris@10: } Chris@10: { Chris@10: V T2B, T2C, T2D, T2E, T2F, T2G, T2H, T2I; Chris@10: { Chris@10: V TV, T1r, T1p, T1v, T1i, T1q, T1u, T1w, TU, T1o; Chris@10: TU = VMUL(LDK(KP707106781), VSUB(TQ, TT)); Chris@10: TV = VADD(TN, TU); Chris@10: T1r = VSUB(TN, TU); Chris@10: T1o = VMUL(LDK(KP707106781), VSUB(T1m, T1n)); Chris@10: T1p = VSUB(T1l, T1o); Chris@10: T1v = VADD(T1l, T1o); Chris@10: { Chris@10: V T16, T1h, T1s, T1t; Chris@10: T16 = VFMA(LDK(KP923879532), T10, VMUL(LDK(KP382683432), T15)); Chris@10: T1h = VFNMS(LDK(KP923879532), T1g, VMUL(LDK(KP382683432), T1b)); Chris@10: T1i = VADD(T16, T1h); Chris@10: T1q = VSUB(T1h, T16); Chris@10: T1s = VFNMS(LDK(KP923879532), T15, VMUL(LDK(KP382683432), T10)); Chris@10: T1t = VFMA(LDK(KP382683432), T1g, VMUL(LDK(KP923879532), T1b)); Chris@10: T1u = VSUB(T1s, T1t); Chris@10: T1w = VADD(T1s, T1t); Chris@10: } Chris@10: T2B = VSUB(TV, T1i); Chris@10: STM4(&(ro[11]), T2B, ovs, &(ro[1])); Chris@10: T2C = VSUB(T1v, T1w); Chris@10: STM4(&(io[11]), T2C, ovs, &(io[1])); Chris@10: T2D = VADD(TV, T1i); Chris@10: STM4(&(ro[3]), T2D, ovs, &(ro[1])); Chris@10: T2E = VADD(T1v, T1w); Chris@10: STM4(&(io[3]), T2E, ovs, &(io[1])); Chris@10: T2F = VSUB(T1p, T1q); Chris@10: STM4(&(io[15]), T2F, ovs, &(io[1])); Chris@10: T2G = VSUB(T1r, T1u); Chris@10: STM4(&(ro[15]), T2G, ovs, &(ro[1])); Chris@10: T2H = VADD(T1p, T1q); Chris@10: STM4(&(io[7]), T2H, ovs, &(io[1])); Chris@10: T2I = VADD(T1r, T1u); Chris@10: STM4(&(ro[7]), T2I, ovs, &(ro[1])); Chris@10: } Chris@10: { Chris@10: V T1z, T1L, T1J, T1P, T1G, T1K, T1O, T1Q, T1y, T1I; Chris@10: T1y = VMUL(LDK(KP707106781), VADD(T1n, T1m)); Chris@10: T1z = VADD(T1x, T1y); Chris@10: T1L = VSUB(T1x, T1y); Chris@10: T1I = VMUL(LDK(KP707106781), VADD(TQ, TT)); Chris@10: T1J = VSUB(T1H, T1I); Chris@10: T1P = VADD(T1H, T1I); Chris@10: { Chris@10: V T1C, T1F, T1M, T1N; Chris@10: T1C = VFMA(LDK(KP382683432), T1A, VMUL(LDK(KP923879532), T1B)); Chris@10: T1F = VFNMS(LDK(KP382683432), T1E, VMUL(LDK(KP923879532), T1D)); Chris@10: T1G = VADD(T1C, T1F); Chris@10: T1K = VSUB(T1F, T1C); Chris@10: T1M = VFNMS(LDK(KP382683432), T1B, VMUL(LDK(KP923879532), T1A)); Chris@10: T1N = VFMA(LDK(KP923879532), T1E, VMUL(LDK(KP382683432), T1D)); Chris@10: T1O = VSUB(T1M, T1N); Chris@10: T1Q = VADD(T1M, T1N); Chris@10: } Chris@10: { Chris@10: V T2J, T2K, T2L, T2M; Chris@10: T2J = VSUB(T1z, T1G); Chris@10: STM4(&(ro[9]), T2J, ovs, &(ro[1])); Chris@10: STN4(&(ro[8]), T2l, T2J, T2t, T2B, ovs); Chris@10: T2K = VSUB(T1P, T1Q); Chris@10: STM4(&(io[9]), T2K, ovs, &(io[1])); Chris@10: STN4(&(io[8]), T2n, T2K, T2A, T2C, ovs); Chris@10: T2L = VADD(T1z, T1G); Chris@10: STM4(&(ro[1]), T2L, ovs, &(ro[1])); Chris@10: STN4(&(ro[0]), T2m, T2L, T2v, T2D, ovs); Chris@10: T2M = VADD(T1P, T1Q); Chris@10: STM4(&(io[1]), T2M, ovs, &(io[1])); Chris@10: STN4(&(io[0]), T2o, T2M, T2y, T2E, ovs); Chris@10: } Chris@10: { Chris@10: V T2N, T2O, T2P, T2Q; Chris@10: T2N = VSUB(T1J, T1K); Chris@10: STM4(&(io[13]), T2N, ovs, &(io[1])); Chris@10: STN4(&(io[12]), T2q, T2N, T2w, T2F, ovs); Chris@10: T2O = VSUB(T1L, T1O); Chris@10: STM4(&(ro[13]), T2O, ovs, &(ro[1])); Chris@10: STN4(&(ro[12]), T2r, T2O, T2x, T2G, ovs); Chris@10: T2P = VADD(T1J, T1K); Chris@10: STM4(&(io[5]), T2P, ovs, &(io[1])); Chris@10: STN4(&(io[4]), T2p, T2P, T2u, T2H, ovs); Chris@10: T2Q = VADD(T1L, T1O); Chris@10: STM4(&(ro[5]), T2Q, ovs, &(ro[1])); Chris@10: STN4(&(ro[4]), T2s, T2Q, T2z, T2I, ovs); Chris@10: } Chris@10: } Chris@10: } Chris@10: } Chris@10: } Chris@10: } Chris@10: } Chris@10: VLEAVE(); Chris@10: } Chris@10: Chris@10: static const kdft_desc desc = { 16, XSIMD_STRING("n2sv_16"), {136, 16, 8, 0}, &GENUS, 0, 1, 0, 0 }; Chris@10: Chris@10: void XSIMD(codelet_n2sv_16) (planner *p) { Chris@10: X(kdft_register) (p, n2sv_16, &desc); Chris@10: } Chris@10: Chris@10: #endif /* HAVE_FMA */