Chris@82: /* Chris@82: * Copyright (c) 2003, 2007-14 Matteo Frigo Chris@82: * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology Chris@82: * Chris@82: * This program is free software; you can redistribute it and/or modify Chris@82: * it under the terms of the GNU General Public License as published by Chris@82: * the Free Software Foundation; either version 2 of the License, or Chris@82: * (at your option) any later version. Chris@82: * Chris@82: * This program is distributed in the hope that it will be useful, Chris@82: * but WITHOUT ANY WARRANTY; without even the implied warranty of Chris@82: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Chris@82: * GNU General Public License for more details. Chris@82: * Chris@82: * You should have received a copy of the GNU General Public License Chris@82: * along with this program; if not, write to the Free Software Chris@82: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Chris@82: * Chris@82: */ Chris@82: Chris@82: /* This file was automatically generated --- DO NOT EDIT */ Chris@82: /* Generated on Thu May 24 08:05:19 EDT 2018 */ Chris@82: Chris@82: #include "dft/codelet-dft.h" Chris@82: Chris@82: #if defined(ARCH_PREFERS_FMA) || defined(ISA_EXTENSION_PREFERS_FMA) Chris@82: Chris@82: /* Generated by: ../../../genfft/gen_notw.native -fma -simd -compact -variables 4 -pipeline-latency 8 -n 8 -name n2sv_8 -with-ostride 1 -include dft/simd/n2s.h -store-multiple 4 */ Chris@82: Chris@82: /* Chris@82: * This function contains 52 FP additions, 8 FP multiplications, Chris@82: * (or, 44 additions, 0 multiplications, 8 fused multiply/add), Chris@82: * 34 stack variables, 1 constants, and 36 memory accesses Chris@82: */ Chris@82: #include "dft/simd/n2s.h" Chris@82: Chris@82: static void n2sv_8(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs) Chris@82: { Chris@82: DVK(KP707106781, +0.707106781186547524400844362104849039284835938); Chris@82: { Chris@82: INT i; Chris@82: for (i = v; i > 0; i = i - (2 * VL), ri = ri + ((2 * VL) * ivs), ii = ii + ((2 * VL) * ivs), ro = ro + ((2 * VL) * ovs), io = io + ((2 * VL) * ovs), MAKE_VOLATILE_STRIDE(32, is), MAKE_VOLATILE_STRIDE(32, os)) { Chris@82: V T3, Tn, Ti, TC, T6, TB, Tl, To, Td, TN, Tz, TH, Ta, TM, Tu; Chris@82: V TG; Chris@82: { Chris@82: V T1, T2, Tj, Tk; Chris@82: T1 = LD(&(ri[0]), ivs, &(ri[0])); Chris@82: T2 = LD(&(ri[WS(is, 4)]), ivs, &(ri[0])); Chris@82: T3 = VADD(T1, T2); Chris@82: Tn = VSUB(T1, T2); Chris@82: { Chris@82: V Tg, Th, T4, T5; Chris@82: Tg = LD(&(ii[0]), ivs, &(ii[0])); Chris@82: Th = LD(&(ii[WS(is, 4)]), ivs, &(ii[0])); Chris@82: Ti = VADD(Tg, Th); Chris@82: TC = VSUB(Tg, Th); Chris@82: T4 = LD(&(ri[WS(is, 2)]), ivs, &(ri[0])); Chris@82: T5 = LD(&(ri[WS(is, 6)]), ivs, &(ri[0])); Chris@82: T6 = VADD(T4, T5); Chris@82: TB = VSUB(T4, T5); Chris@82: } Chris@82: Tj = LD(&(ii[WS(is, 2)]), ivs, &(ii[0])); Chris@82: Tk = LD(&(ii[WS(is, 6)]), ivs, &(ii[0])); Chris@82: Tl = VADD(Tj, Tk); Chris@82: To = VSUB(Tj, Tk); Chris@82: { Chris@82: V Tb, Tc, Tv, Tw, Tx, Ty; Chris@82: Tb = LD(&(ri[WS(is, 7)]), ivs, &(ri[WS(is, 1)])); Chris@82: Tc = LD(&(ri[WS(is, 3)]), ivs, &(ri[WS(is, 1)])); Chris@82: Tv = VSUB(Tb, Tc); Chris@82: Tw = LD(&(ii[WS(is, 7)]), ivs, &(ii[WS(is, 1)])); Chris@82: Tx = LD(&(ii[WS(is, 3)]), ivs, &(ii[WS(is, 1)])); Chris@82: Ty = VSUB(Tw, Tx); Chris@82: Td = VADD(Tb, Tc); Chris@82: TN = VADD(Tw, Tx); Chris@82: Tz = VSUB(Tv, Ty); Chris@82: TH = VADD(Tv, Ty); Chris@82: } Chris@82: { Chris@82: V T8, T9, Tq, Tr, Ts, Tt; Chris@82: T8 = LD(&(ri[WS(is, 1)]), ivs, &(ri[WS(is, 1)])); Chris@82: T9 = LD(&(ri[WS(is, 5)]), ivs, &(ri[WS(is, 1)])); Chris@82: Tq = VSUB(T8, T9); Chris@82: Tr = LD(&(ii[WS(is, 1)]), ivs, &(ii[WS(is, 1)])); Chris@82: Ts = LD(&(ii[WS(is, 5)]), ivs, &(ii[WS(is, 1)])); Chris@82: Tt = VSUB(Tr, Ts); Chris@82: Ta = VADD(T8, T9); Chris@82: TM = VADD(Tr, Ts); Chris@82: Tu = VADD(Tq, Tt); Chris@82: TG = VSUB(Tt, Tq); Chris@82: } Chris@82: } Chris@82: { Chris@82: V TR, TS, TT, TU, TV, TW, TX, TY; Chris@82: { Chris@82: V T7, Te, TP, TQ; Chris@82: T7 = VADD(T3, T6); Chris@82: Te = VADD(Ta, Td); Chris@82: TR = VSUB(T7, Te); Chris@82: STM4(&(ro[4]), TR, ovs, &(ro[0])); Chris@82: TS = VADD(T7, Te); Chris@82: STM4(&(ro[0]), TS, ovs, &(ro[0])); Chris@82: TP = VADD(Ti, Tl); Chris@82: TQ = VADD(TM, TN); Chris@82: TT = VSUB(TP, TQ); Chris@82: STM4(&(io[4]), TT, ovs, &(io[0])); Chris@82: TU = VADD(TP, TQ); Chris@82: STM4(&(io[0]), TU, ovs, &(io[0])); Chris@82: } Chris@82: { Chris@82: V Tf, Tm, TL, TO; Chris@82: Tf = VSUB(Td, Ta); Chris@82: Tm = VSUB(Ti, Tl); Chris@82: TV = VADD(Tf, Tm); Chris@82: STM4(&(io[2]), TV, ovs, &(io[0])); Chris@82: TW = VSUB(Tm, Tf); Chris@82: STM4(&(io[6]), TW, ovs, &(io[0])); Chris@82: TL = VSUB(T3, T6); Chris@82: TO = VSUB(TM, TN); Chris@82: TX = VSUB(TL, TO); Chris@82: STM4(&(ro[6]), TX, ovs, &(ro[0])); Chris@82: TY = VADD(TL, TO); Chris@82: STM4(&(ro[2]), TY, ovs, &(ro[0])); Chris@82: } Chris@82: { Chris@82: V TZ, T10, T11, T12; Chris@82: { Chris@82: V Tp, TA, TJ, TK; Chris@82: Tp = VADD(Tn, To); Chris@82: TA = VADD(Tu, Tz); Chris@82: TZ = VFNMS(LDK(KP707106781), TA, Tp); Chris@82: STM4(&(ro[5]), TZ, ovs, &(ro[1])); Chris@82: T10 = VFMA(LDK(KP707106781), TA, Tp); Chris@82: STM4(&(ro[1]), T10, ovs, &(ro[1])); Chris@82: TJ = VSUB(TC, TB); Chris@82: TK = VADD(TG, TH); Chris@82: T11 = VFNMS(LDK(KP707106781), TK, TJ); Chris@82: STM4(&(io[5]), T11, ovs, &(io[1])); Chris@82: T12 = VFMA(LDK(KP707106781), TK, TJ); Chris@82: STM4(&(io[1]), T12, ovs, &(io[1])); Chris@82: } Chris@82: { Chris@82: V TD, TE, T13, T14; Chris@82: TD = VADD(TB, TC); Chris@82: TE = VSUB(Tz, Tu); Chris@82: T13 = VFNMS(LDK(KP707106781), TE, TD); Chris@82: STM4(&(io[7]), T13, ovs, &(io[1])); Chris@82: STN4(&(io[4]), TT, T11, TW, T13, ovs); Chris@82: T14 = VFMA(LDK(KP707106781), TE, TD); Chris@82: STM4(&(io[3]), T14, ovs, &(io[1])); Chris@82: STN4(&(io[0]), TU, T12, TV, T14, ovs); Chris@82: } Chris@82: { Chris@82: V TF, TI, T15, T16; Chris@82: TF = VSUB(Tn, To); Chris@82: TI = VSUB(TG, TH); Chris@82: T15 = VFNMS(LDK(KP707106781), TI, TF); Chris@82: STM4(&(ro[7]), T15, ovs, &(ro[1])); Chris@82: STN4(&(ro[4]), TR, TZ, TX, T15, ovs); Chris@82: T16 = VFMA(LDK(KP707106781), TI, TF); Chris@82: STM4(&(ro[3]), T16, ovs, &(ro[1])); Chris@82: STN4(&(ro[0]), TS, T10, TY, T16, ovs); Chris@82: } Chris@82: } Chris@82: } Chris@82: } Chris@82: } Chris@82: VLEAVE(); Chris@82: } Chris@82: Chris@82: static const kdft_desc desc = { 8, XSIMD_STRING("n2sv_8"), {44, 0, 8, 0}, &GENUS, 0, 1, 0, 0 }; Chris@82: Chris@82: void XSIMD(codelet_n2sv_8) (planner *p) { Chris@82: X(kdft_register) (p, n2sv_8, &desc); Chris@82: } Chris@82: Chris@82: #else Chris@82: Chris@82: /* Generated by: ../../../genfft/gen_notw.native -simd -compact -variables 4 -pipeline-latency 8 -n 8 -name n2sv_8 -with-ostride 1 -include dft/simd/n2s.h -store-multiple 4 */ Chris@82: Chris@82: /* Chris@82: * This function contains 52 FP additions, 4 FP multiplications, Chris@82: * (or, 52 additions, 4 multiplications, 0 fused multiply/add), Chris@82: * 34 stack variables, 1 constants, and 36 memory accesses Chris@82: */ Chris@82: #include "dft/simd/n2s.h" Chris@82: Chris@82: static void n2sv_8(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs) Chris@82: { Chris@82: DVK(KP707106781, +0.707106781186547524400844362104849039284835938); Chris@82: { Chris@82: INT i; Chris@82: for (i = v; i > 0; i = i - (2 * VL), ri = ri + ((2 * VL) * ivs), ii = ii + ((2 * VL) * ivs), ro = ro + ((2 * VL) * ovs), io = io + ((2 * VL) * ovs), MAKE_VOLATILE_STRIDE(32, is), MAKE_VOLATILE_STRIDE(32, os)) { Chris@82: V T3, Tn, Ti, TC, T6, TB, Tl, To, Td, TN, Tz, TH, Ta, TM, Tu; Chris@82: V TG; Chris@82: { Chris@82: V T1, T2, Tj, Tk; Chris@82: T1 = LD(&(ri[0]), ivs, &(ri[0])); Chris@82: T2 = LD(&(ri[WS(is, 4)]), ivs, &(ri[0])); Chris@82: T3 = VADD(T1, T2); Chris@82: Tn = VSUB(T1, T2); Chris@82: { Chris@82: V Tg, Th, T4, T5; Chris@82: Tg = LD(&(ii[0]), ivs, &(ii[0])); Chris@82: Th = LD(&(ii[WS(is, 4)]), ivs, &(ii[0])); Chris@82: Ti = VADD(Tg, Th); Chris@82: TC = VSUB(Tg, Th); Chris@82: T4 = LD(&(ri[WS(is, 2)]), ivs, &(ri[0])); Chris@82: T5 = LD(&(ri[WS(is, 6)]), ivs, &(ri[0])); Chris@82: T6 = VADD(T4, T5); Chris@82: TB = VSUB(T4, T5); Chris@82: } Chris@82: Tj = LD(&(ii[WS(is, 2)]), ivs, &(ii[0])); Chris@82: Tk = LD(&(ii[WS(is, 6)]), ivs, &(ii[0])); Chris@82: Tl = VADD(Tj, Tk); Chris@82: To = VSUB(Tj, Tk); Chris@82: { Chris@82: V Tb, Tc, Tv, Tw, Tx, Ty; Chris@82: Tb = LD(&(ri[WS(is, 7)]), ivs, &(ri[WS(is, 1)])); Chris@82: Tc = LD(&(ri[WS(is, 3)]), ivs, &(ri[WS(is, 1)])); Chris@82: Tv = VSUB(Tb, Tc); Chris@82: Tw = LD(&(ii[WS(is, 7)]), ivs, &(ii[WS(is, 1)])); Chris@82: Tx = LD(&(ii[WS(is, 3)]), ivs, &(ii[WS(is, 1)])); Chris@82: Ty = VSUB(Tw, Tx); Chris@82: Td = VADD(Tb, Tc); Chris@82: TN = VADD(Tw, Tx); Chris@82: Tz = VSUB(Tv, Ty); Chris@82: TH = VADD(Tv, Ty); Chris@82: } Chris@82: { Chris@82: V T8, T9, Tq, Tr, Ts, Tt; Chris@82: T8 = LD(&(ri[WS(is, 1)]), ivs, &(ri[WS(is, 1)])); Chris@82: T9 = LD(&(ri[WS(is, 5)]), ivs, &(ri[WS(is, 1)])); Chris@82: Tq = VSUB(T8, T9); Chris@82: Tr = LD(&(ii[WS(is, 1)]), ivs, &(ii[WS(is, 1)])); Chris@82: Ts = LD(&(ii[WS(is, 5)]), ivs, &(ii[WS(is, 1)])); Chris@82: Tt = VSUB(Tr, Ts); Chris@82: Ta = VADD(T8, T9); Chris@82: TM = VADD(Tr, Ts); Chris@82: Tu = VADD(Tq, Tt); Chris@82: TG = VSUB(Tt, Tq); Chris@82: } Chris@82: } Chris@82: { Chris@82: V TR, TS, TT, TU, TV, TW, TX, TY; Chris@82: { Chris@82: V T7, Te, TP, TQ; Chris@82: T7 = VADD(T3, T6); Chris@82: Te = VADD(Ta, Td); Chris@82: TR = VSUB(T7, Te); Chris@82: STM4(&(ro[4]), TR, ovs, &(ro[0])); Chris@82: TS = VADD(T7, Te); Chris@82: STM4(&(ro[0]), TS, ovs, &(ro[0])); Chris@82: TP = VADD(Ti, Tl); Chris@82: TQ = VADD(TM, TN); Chris@82: TT = VSUB(TP, TQ); Chris@82: STM4(&(io[4]), TT, ovs, &(io[0])); Chris@82: TU = VADD(TP, TQ); Chris@82: STM4(&(io[0]), TU, ovs, &(io[0])); Chris@82: } Chris@82: { Chris@82: V Tf, Tm, TL, TO; Chris@82: Tf = VSUB(Td, Ta); Chris@82: Tm = VSUB(Ti, Tl); Chris@82: TV = VADD(Tf, Tm); Chris@82: STM4(&(io[2]), TV, ovs, &(io[0])); Chris@82: TW = VSUB(Tm, Tf); Chris@82: STM4(&(io[6]), TW, ovs, &(io[0])); Chris@82: TL = VSUB(T3, T6); Chris@82: TO = VSUB(TM, TN); Chris@82: TX = VSUB(TL, TO); Chris@82: STM4(&(ro[6]), TX, ovs, &(ro[0])); Chris@82: TY = VADD(TL, TO); Chris@82: STM4(&(ro[2]), TY, ovs, &(ro[0])); Chris@82: } Chris@82: { Chris@82: V TZ, T10, T11, T12; Chris@82: { Chris@82: V Tp, TA, TJ, TK; Chris@82: Tp = VADD(Tn, To); Chris@82: TA = VMUL(LDK(KP707106781), VADD(Tu, Tz)); Chris@82: TZ = VSUB(Tp, TA); Chris@82: STM4(&(ro[5]), TZ, ovs, &(ro[1])); Chris@82: T10 = VADD(Tp, TA); Chris@82: STM4(&(ro[1]), T10, ovs, &(ro[1])); Chris@82: TJ = VSUB(TC, TB); Chris@82: TK = VMUL(LDK(KP707106781), VADD(TG, TH)); Chris@82: T11 = VSUB(TJ, TK); Chris@82: STM4(&(io[5]), T11, ovs, &(io[1])); Chris@82: T12 = VADD(TJ, TK); Chris@82: STM4(&(io[1]), T12, ovs, &(io[1])); Chris@82: } Chris@82: { Chris@82: V TD, TE, T13, T14; Chris@82: TD = VADD(TB, TC); Chris@82: TE = VMUL(LDK(KP707106781), VSUB(Tz, Tu)); Chris@82: T13 = VSUB(TD, TE); Chris@82: STM4(&(io[7]), T13, ovs, &(io[1])); Chris@82: STN4(&(io[4]), TT, T11, TW, T13, ovs); Chris@82: T14 = VADD(TD, TE); Chris@82: STM4(&(io[3]), T14, ovs, &(io[1])); Chris@82: STN4(&(io[0]), TU, T12, TV, T14, ovs); Chris@82: } Chris@82: { Chris@82: V TF, TI, T15, T16; Chris@82: TF = VSUB(Tn, To); Chris@82: TI = VMUL(LDK(KP707106781), VSUB(TG, TH)); Chris@82: T15 = VSUB(TF, TI); Chris@82: STM4(&(ro[7]), T15, ovs, &(ro[1])); Chris@82: STN4(&(ro[4]), TR, TZ, TX, T15, ovs); Chris@82: T16 = VADD(TF, TI); Chris@82: STM4(&(ro[3]), T16, ovs, &(ro[1])); Chris@82: STN4(&(ro[0]), TS, T10, TY, T16, ovs); Chris@82: } Chris@82: } Chris@82: } Chris@82: } Chris@82: } Chris@82: VLEAVE(); Chris@82: } Chris@82: Chris@82: static const kdft_desc desc = { 8, XSIMD_STRING("n2sv_8"), {52, 4, 0, 0}, &GENUS, 0, 1, 0, 0 }; Chris@82: Chris@82: void XSIMD(codelet_n2sv_8) (planner *p) { Chris@82: X(kdft_register) (p, n2sv_8, &desc); Chris@82: } Chris@82: Chris@82: #endif