Chris@10: /* Chris@10: * Copyright (c) 2003, 2007-11 Matteo Frigo Chris@10: * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology Chris@10: * Chris@10: * This program is free software; you can redistribute it and/or modify Chris@10: * it under the terms of the GNU General Public License as published by Chris@10: * the Free Software Foundation; either version 2 of the License, or Chris@10: * (at your option) any later version. Chris@10: * Chris@10: * This program is distributed in the hope that it will be useful, Chris@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of Chris@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Chris@10: * GNU General Public License for more details. Chris@10: * Chris@10: * You should have received a copy of the GNU General Public License Chris@10: * along with this program; if not, write to the Free Software Chris@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Chris@10: * Chris@10: */ Chris@10: Chris@10: /* This file was automatically generated --- DO NOT EDIT */ Chris@10: /* Generated on Sun Nov 25 07:39:33 EST 2012 */ Chris@10: Chris@10: #include "codelet-dft.h" Chris@10: Chris@10: #ifdef HAVE_FMA Chris@10: Chris@10: /* Generated by: ../../../genfft/gen_twidsq_c.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -n 5 -dif -name q1bv_5 -include q1b.h -sign 1 */ Chris@10: Chris@10: /* Chris@10: * This function contains 100 FP additions, 95 FP multiplications, Chris@10: * (or, 55 additions, 50 multiplications, 45 fused multiply/add), Chris@10: * 69 stack variables, 4 constants, and 50 memory accesses Chris@10: */ Chris@10: #include "q1b.h" Chris@10: Chris@10: static void q1bv_5(R *ri, R *ii, const R *W, stride rs, stride vs, INT mb, INT me, INT ms) Chris@10: { Chris@10: DVK(KP559016994, +0.559016994374947424102293417182819058860154590); Chris@10: DVK(KP250000000, +0.250000000000000000000000000000000000000000000); Chris@10: DVK(KP618033988, +0.618033988749894848204586834365638117720309180); Chris@10: DVK(KP951056516, +0.951056516295153572116439333379382143405698634); Chris@10: { Chris@10: INT m; Chris@10: R *x; Chris@10: x = ii; Chris@10: for (m = mb, W = W + (mb * ((TWVL / VL) * 8)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 8), MAKE_VOLATILE_STRIDE(10, rs), MAKE_VOLATILE_STRIDE(10, vs)) { Chris@10: V Te, T1w, Ty, TS, TW, Tb, T1t, Tv, T1g, T1c, TP, TV, T1f, T19, TY; Chris@10: V TX; Chris@10: { Chris@10: V T1, T1j, Tl, Ti, Ta, T8, T1A, T1q, T1s, T9, TF, T1r, TZ, TR, TL; Chris@10: V TC, Ts, Tu, TQ, TI, T15, T1b, T10, T11, Tt; Chris@10: { Chris@10: V T1n, T1o, T1k, T1l, T7, Td, T4, Tc; Chris@10: { Chris@10: V T5, T6, T2, T3; Chris@10: T1 = LD(&(x[0]), ms, &(x[0])); Chris@10: T5 = LD(&(x[WS(rs, 2)]), ms, &(x[0])); Chris@10: T6 = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)])); Chris@10: T2 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)])); Chris@10: T3 = LD(&(x[WS(rs, 4)]), ms, &(x[0])); Chris@10: T1j = LD(&(x[WS(vs, 4)]), ms, &(x[WS(vs, 4)])); Chris@10: T1n = LD(&(x[WS(vs, 4) + WS(rs, 2)]), ms, &(x[WS(vs, 4)])); Chris@10: T1o = LD(&(x[WS(vs, 4) + WS(rs, 3)]), ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@10: T1k = LD(&(x[WS(vs, 4) + WS(rs, 1)]), ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@10: T1l = LD(&(x[WS(vs, 4) + WS(rs, 4)]), ms, &(x[WS(vs, 4)])); Chris@10: T7 = VADD(T5, T6); Chris@10: Td = VSUB(T5, T6); Chris@10: T4 = VADD(T2, T3); Chris@10: Tc = VSUB(T2, T3); Chris@10: } Chris@10: { Chris@10: V Tm, Tn, Tr, Tx, T1v, T1p; Chris@10: Tl = LD(&(x[WS(vs, 1)]), ms, &(x[WS(vs, 1)])); Chris@10: T1v = VSUB(T1n, T1o); Chris@10: T1p = VADD(T1n, T1o); Chris@10: { Chris@10: V T1u, T1m, Tp, Tq; Chris@10: T1u = VSUB(T1k, T1l); Chris@10: T1m = VADD(T1k, T1l); Chris@10: Tp = LD(&(x[WS(vs, 1) + WS(rs, 2)]), ms, &(x[WS(vs, 1)])); Chris@10: Ti = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), Tc, Td)); Chris@10: Te = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), Td, Tc)); Chris@10: Ta = VSUB(T4, T7); Chris@10: T8 = VADD(T4, T7); Chris@10: Tq = LD(&(x[WS(vs, 1) + WS(rs, 3)]), ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@10: T1w = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), T1v, T1u)); Chris@10: T1A = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), T1u, T1v)); Chris@10: T1q = VADD(T1m, T1p); Chris@10: T1s = VSUB(T1m, T1p); Chris@10: Tm = LD(&(x[WS(vs, 1) + WS(rs, 1)]), ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@10: T9 = VFNMS(LDK(KP250000000), T8, T1); Chris@10: Tn = LD(&(x[WS(vs, 1) + WS(rs, 4)]), ms, &(x[WS(vs, 1)])); Chris@10: Tr = VADD(Tp, Tq); Chris@10: Tx = VSUB(Tp, Tq); Chris@10: } Chris@10: { Chris@10: V TJ, TK, TG, Tw, To, TH, T13, T14; Chris@10: TF = LD(&(x[WS(vs, 2)]), ms, &(x[WS(vs, 2)])); Chris@10: T1r = VFNMS(LDK(KP250000000), T1q, T1j); Chris@10: TJ = LD(&(x[WS(vs, 2) + WS(rs, 2)]), ms, &(x[WS(vs, 2)])); Chris@10: TK = LD(&(x[WS(vs, 2) + WS(rs, 3)]), ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@10: TG = LD(&(x[WS(vs, 2) + WS(rs, 1)]), ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@10: Tw = VSUB(Tm, Tn); Chris@10: To = VADD(Tm, Tn); Chris@10: TH = LD(&(x[WS(vs, 2) + WS(rs, 4)]), ms, &(x[WS(vs, 2)])); Chris@10: TZ = LD(&(x[WS(vs, 3)]), ms, &(x[WS(vs, 3)])); Chris@10: T13 = LD(&(x[WS(vs, 3) + WS(rs, 2)]), ms, &(x[WS(vs, 3)])); Chris@10: T14 = LD(&(x[WS(vs, 3) + WS(rs, 3)]), ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@10: TR = VSUB(TJ, TK); Chris@10: TL = VADD(TJ, TK); Chris@10: Ty = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), Tx, Tw)); Chris@10: TC = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), Tw, Tx)); Chris@10: Ts = VADD(To, Tr); Chris@10: Tu = VSUB(To, Tr); Chris@10: TQ = VSUB(TG, TH); Chris@10: TI = VADD(TG, TH); Chris@10: T15 = VADD(T13, T14); Chris@10: T1b = VSUB(T13, T14); Chris@10: T10 = LD(&(x[WS(vs, 3) + WS(rs, 1)]), ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@10: T11 = LD(&(x[WS(vs, 3) + WS(rs, 4)]), ms, &(x[WS(vs, 3)])); Chris@10: Tt = VFNMS(LDK(KP250000000), Ts, Tl); Chris@10: } Chris@10: } Chris@10: } Chris@10: { Chris@10: V TO, T12, T1a, Th, T1z, TN, TM, T18, T17; Chris@10: ST(&(x[0]), VADD(T1, T8), ms, &(x[0])); Chris@10: TS = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), TR, TQ)); Chris@10: TW = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), TQ, TR)); Chris@10: TM = VADD(TI, TL); Chris@10: TO = VSUB(TI, TL); Chris@10: ST(&(x[WS(rs, 4)]), VADD(T1j, T1q), ms, &(x[0])); Chris@10: T12 = VADD(T10, T11); Chris@10: T1a = VSUB(T10, T11); Chris@10: ST(&(x[WS(rs, 1)]), VADD(Tl, Ts), ms, &(x[WS(rs, 1)])); Chris@10: Th = VFNMS(LDK(KP559016994), Ta, T9); Chris@10: Tb = VFMA(LDK(KP559016994), Ta, T9); Chris@10: T1t = VFMA(LDK(KP559016994), T1s, T1r); Chris@10: T1z = VFNMS(LDK(KP559016994), T1s, T1r); Chris@10: ST(&(x[WS(rs, 2)]), VADD(TF, TM), ms, &(x[0])); Chris@10: TN = VFNMS(LDK(KP250000000), TM, TF); Chris@10: { Chris@10: V T16, Tk, Tj, T1C, T1B, TD, TE, TB; Chris@10: TB = VFNMS(LDK(KP559016994), Tu, Tt); Chris@10: Tv = VFMA(LDK(KP559016994), Tu, Tt); Chris@10: T1g = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), T1a, T1b)); Chris@10: T1c = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), T1b, T1a)); Chris@10: T18 = VSUB(T12, T15); Chris@10: T16 = VADD(T12, T15); Chris@10: Tk = BYTW(&(W[TWVL * 4]), VFMAI(Ti, Th)); Chris@10: Tj = BYTW(&(W[TWVL * 2]), VFNMSI(Ti, Th)); Chris@10: T1C = BYTW(&(W[TWVL * 4]), VFMAI(T1A, T1z)); Chris@10: T1B = BYTW(&(W[TWVL * 2]), VFNMSI(T1A, T1z)); Chris@10: TD = BYTW(&(W[TWVL * 2]), VFNMSI(TC, TB)); Chris@10: TE = BYTW(&(W[TWVL * 4]), VFMAI(TC, TB)); Chris@10: ST(&(x[WS(rs, 3)]), VADD(TZ, T16), ms, &(x[WS(rs, 1)])); Chris@10: T17 = VFNMS(LDK(KP250000000), T16, TZ); Chris@10: ST(&(x[WS(vs, 3)]), Tk, ms, &(x[WS(vs, 3)])); Chris@10: ST(&(x[WS(vs, 2)]), Tj, ms, &(x[WS(vs, 2)])); Chris@10: ST(&(x[WS(vs, 3) + WS(rs, 4)]), T1C, ms, &(x[WS(vs, 3)])); Chris@10: ST(&(x[WS(vs, 2) + WS(rs, 4)]), T1B, ms, &(x[WS(vs, 2)])); Chris@10: ST(&(x[WS(vs, 2) + WS(rs, 1)]), TD, ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@10: ST(&(x[WS(vs, 3) + WS(rs, 1)]), TE, ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@10: } Chris@10: TP = VFMA(LDK(KP559016994), TO, TN); Chris@10: TV = VFNMS(LDK(KP559016994), TO, TN); Chris@10: T1f = VFNMS(LDK(KP559016994), T18, T17); Chris@10: T19 = VFMA(LDK(KP559016994), T18, T17); Chris@10: } Chris@10: } Chris@10: TY = BYTW(&(W[TWVL * 4]), VFMAI(TW, TV)); Chris@10: TX = BYTW(&(W[TWVL * 2]), VFNMSI(TW, TV)); Chris@10: { Chris@10: V T1i, T1h, TU, TT; Chris@10: T1i = BYTW(&(W[TWVL * 4]), VFMAI(T1g, T1f)); Chris@10: T1h = BYTW(&(W[TWVL * 2]), VFNMSI(T1g, T1f)); Chris@10: TU = BYTW(&(W[TWVL * 6]), VFNMSI(TS, TP)); Chris@10: TT = BYTW(&(W[0]), VFMAI(TS, TP)); Chris@10: { Chris@10: V Tg, Tf, TA, Tz; Chris@10: Tg = BYTW(&(W[TWVL * 6]), VFNMSI(Te, Tb)); Chris@10: Tf = BYTW(&(W[0]), VFMAI(Te, Tb)); Chris@10: TA = BYTW(&(W[TWVL * 6]), VFNMSI(Ty, Tv)); Chris@10: Tz = BYTW(&(W[0]), VFMAI(Ty, Tv)); Chris@10: { Chris@10: V T1e, T1d, T1y, T1x; Chris@10: T1e = BYTW(&(W[TWVL * 6]), VFNMSI(T1c, T19)); Chris@10: T1d = BYTW(&(W[0]), VFMAI(T1c, T19)); Chris@10: T1y = BYTW(&(W[TWVL * 6]), VFNMSI(T1w, T1t)); Chris@10: T1x = BYTW(&(W[0]), VFMAI(T1w, T1t)); Chris@10: ST(&(x[WS(vs, 3) + WS(rs, 2)]), TY, ms, &(x[WS(vs, 3)])); Chris@10: ST(&(x[WS(vs, 2) + WS(rs, 2)]), TX, ms, &(x[WS(vs, 2)])); Chris@10: ST(&(x[WS(vs, 3) + WS(rs, 3)]), T1i, ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@10: ST(&(x[WS(vs, 2) + WS(rs, 3)]), T1h, ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@10: ST(&(x[WS(vs, 4) + WS(rs, 2)]), TU, ms, &(x[WS(vs, 4)])); Chris@10: ST(&(x[WS(vs, 1) + WS(rs, 2)]), TT, ms, &(x[WS(vs, 1)])); Chris@10: ST(&(x[WS(vs, 4)]), Tg, ms, &(x[WS(vs, 4)])); Chris@10: ST(&(x[WS(vs, 1)]), Tf, ms, &(x[WS(vs, 1)])); Chris@10: ST(&(x[WS(vs, 4) + WS(rs, 1)]), TA, ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@10: ST(&(x[WS(vs, 1) + WS(rs, 1)]), Tz, ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@10: ST(&(x[WS(vs, 4) + WS(rs, 3)]), T1e, ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@10: ST(&(x[WS(vs, 1) + WS(rs, 3)]), T1d, ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@10: ST(&(x[WS(vs, 4) + WS(rs, 4)]), T1y, ms, &(x[WS(vs, 4)])); Chris@10: ST(&(x[WS(vs, 1) + WS(rs, 4)]), T1x, ms, &(x[WS(vs, 1)])); Chris@10: } Chris@10: } Chris@10: } Chris@10: } Chris@10: } Chris@10: VLEAVE(); Chris@10: } Chris@10: Chris@10: static const tw_instr twinstr[] = { Chris@10: VTW(0, 1), Chris@10: VTW(0, 2), Chris@10: VTW(0, 3), Chris@10: VTW(0, 4), Chris@10: {TW_NEXT, VL, 0} Chris@10: }; Chris@10: Chris@10: static const ct_desc desc = { 5, XSIMD_STRING("q1bv_5"), twinstr, &GENUS, {55, 50, 45, 0}, 0, 0, 0 }; Chris@10: Chris@10: void XSIMD(codelet_q1bv_5) (planner *p) { Chris@10: X(kdft_difsq_register) (p, q1bv_5, &desc); Chris@10: } Chris@10: #else /* HAVE_FMA */ Chris@10: Chris@10: /* Generated by: ../../../genfft/gen_twidsq_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 5 -dif -name q1bv_5 -include q1b.h -sign 1 */ Chris@10: Chris@10: /* Chris@10: * This function contains 100 FP additions, 70 FP multiplications, Chris@10: * (or, 85 additions, 55 multiplications, 15 fused multiply/add), Chris@10: * 44 stack variables, 4 constants, and 50 memory accesses Chris@10: */ Chris@10: #include "q1b.h" Chris@10: Chris@10: static void q1bv_5(R *ri, R *ii, const R *W, stride rs, stride vs, INT mb, INT me, INT ms) Chris@10: { Chris@10: DVK(KP250000000, +0.250000000000000000000000000000000000000000000); Chris@10: DVK(KP559016994, +0.559016994374947424102293417182819058860154590); Chris@10: DVK(KP587785252, +0.587785252292473129168705954639072768597652438); Chris@10: DVK(KP951056516, +0.951056516295153572116439333379382143405698634); Chris@10: { Chris@10: INT m; Chris@10: R *x; Chris@10: x = ii; Chris@10: for (m = mb, W = W + (mb * ((TWVL / VL) * 8)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 8), MAKE_VOLATILE_STRIDE(10, rs), MAKE_VOLATILE_STRIDE(10, vs)) { Chris@10: V Tb, T7, Th, Ta, Tc, Td, T1t, T1p, T1z, T1s, T1u, T1v, Tv, Tr, TB; Chris@10: V Tu, Tw, Tx, TP, TL, TV, TO, TQ, TR, T19, T15, T1f, T18, T1a, T1b; Chris@10: { Chris@10: V T6, T9, T3, T8; Chris@10: Tb = LD(&(x[0]), ms, &(x[0])); Chris@10: { Chris@10: V T4, T5, T1, T2; Chris@10: T4 = LD(&(x[WS(rs, 2)]), ms, &(x[0])); Chris@10: T5 = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)])); Chris@10: T6 = VSUB(T4, T5); Chris@10: T9 = VADD(T4, T5); Chris@10: T1 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)])); Chris@10: T2 = LD(&(x[WS(rs, 4)]), ms, &(x[0])); Chris@10: T3 = VSUB(T1, T2); Chris@10: T8 = VADD(T1, T2); Chris@10: } Chris@10: T7 = VBYI(VFMA(LDK(KP951056516), T3, VMUL(LDK(KP587785252), T6))); Chris@10: Th = VBYI(VFNMS(LDK(KP951056516), T6, VMUL(LDK(KP587785252), T3))); Chris@10: Ta = VMUL(LDK(KP559016994), VSUB(T8, T9)); Chris@10: Tc = VADD(T8, T9); Chris@10: Td = VFNMS(LDK(KP250000000), Tc, Tb); Chris@10: } Chris@10: { Chris@10: V T1o, T1r, T1l, T1q; Chris@10: T1t = LD(&(x[WS(vs, 4)]), ms, &(x[WS(vs, 4)])); Chris@10: { Chris@10: V T1m, T1n, T1j, T1k; Chris@10: T1m = LD(&(x[WS(vs, 4) + WS(rs, 2)]), ms, &(x[WS(vs, 4)])); Chris@10: T1n = LD(&(x[WS(vs, 4) + WS(rs, 3)]), ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@10: T1o = VSUB(T1m, T1n); Chris@10: T1r = VADD(T1m, T1n); Chris@10: T1j = LD(&(x[WS(vs, 4) + WS(rs, 1)]), ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@10: T1k = LD(&(x[WS(vs, 4) + WS(rs, 4)]), ms, &(x[WS(vs, 4)])); Chris@10: T1l = VSUB(T1j, T1k); Chris@10: T1q = VADD(T1j, T1k); Chris@10: } Chris@10: T1p = VBYI(VFMA(LDK(KP951056516), T1l, VMUL(LDK(KP587785252), T1o))); Chris@10: T1z = VBYI(VFNMS(LDK(KP951056516), T1o, VMUL(LDK(KP587785252), T1l))); Chris@10: T1s = VMUL(LDK(KP559016994), VSUB(T1q, T1r)); Chris@10: T1u = VADD(T1q, T1r); Chris@10: T1v = VFNMS(LDK(KP250000000), T1u, T1t); Chris@10: } Chris@10: { Chris@10: V Tq, Tt, Tn, Ts; Chris@10: Tv = LD(&(x[WS(vs, 1)]), ms, &(x[WS(vs, 1)])); Chris@10: { Chris@10: V To, Tp, Tl, Tm; Chris@10: To = LD(&(x[WS(vs, 1) + WS(rs, 2)]), ms, &(x[WS(vs, 1)])); Chris@10: Tp = LD(&(x[WS(vs, 1) + WS(rs, 3)]), ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@10: Tq = VSUB(To, Tp); Chris@10: Tt = VADD(To, Tp); Chris@10: Tl = LD(&(x[WS(vs, 1) + WS(rs, 1)]), ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@10: Tm = LD(&(x[WS(vs, 1) + WS(rs, 4)]), ms, &(x[WS(vs, 1)])); Chris@10: Tn = VSUB(Tl, Tm); Chris@10: Ts = VADD(Tl, Tm); Chris@10: } Chris@10: Tr = VBYI(VFMA(LDK(KP951056516), Tn, VMUL(LDK(KP587785252), Tq))); Chris@10: TB = VBYI(VFNMS(LDK(KP951056516), Tq, VMUL(LDK(KP587785252), Tn))); Chris@10: Tu = VMUL(LDK(KP559016994), VSUB(Ts, Tt)); Chris@10: Tw = VADD(Ts, Tt); Chris@10: Tx = VFNMS(LDK(KP250000000), Tw, Tv); Chris@10: } Chris@10: { Chris@10: V TK, TN, TH, TM; Chris@10: TP = LD(&(x[WS(vs, 2)]), ms, &(x[WS(vs, 2)])); Chris@10: { Chris@10: V TI, TJ, TF, TG; Chris@10: TI = LD(&(x[WS(vs, 2) + WS(rs, 2)]), ms, &(x[WS(vs, 2)])); Chris@10: TJ = LD(&(x[WS(vs, 2) + WS(rs, 3)]), ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@10: TK = VSUB(TI, TJ); Chris@10: TN = VADD(TI, TJ); Chris@10: TF = LD(&(x[WS(vs, 2) + WS(rs, 1)]), ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@10: TG = LD(&(x[WS(vs, 2) + WS(rs, 4)]), ms, &(x[WS(vs, 2)])); Chris@10: TH = VSUB(TF, TG); Chris@10: TM = VADD(TF, TG); Chris@10: } Chris@10: TL = VBYI(VFMA(LDK(KP951056516), TH, VMUL(LDK(KP587785252), TK))); Chris@10: TV = VBYI(VFNMS(LDK(KP951056516), TK, VMUL(LDK(KP587785252), TH))); Chris@10: TO = VMUL(LDK(KP559016994), VSUB(TM, TN)); Chris@10: TQ = VADD(TM, TN); Chris@10: TR = VFNMS(LDK(KP250000000), TQ, TP); Chris@10: } Chris@10: { Chris@10: V T14, T17, T11, T16; Chris@10: T19 = LD(&(x[WS(vs, 3)]), ms, &(x[WS(vs, 3)])); Chris@10: { Chris@10: V T12, T13, TZ, T10; Chris@10: T12 = LD(&(x[WS(vs, 3) + WS(rs, 2)]), ms, &(x[WS(vs, 3)])); Chris@10: T13 = LD(&(x[WS(vs, 3) + WS(rs, 3)]), ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@10: T14 = VSUB(T12, T13); Chris@10: T17 = VADD(T12, T13); Chris@10: TZ = LD(&(x[WS(vs, 3) + WS(rs, 1)]), ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@10: T10 = LD(&(x[WS(vs, 3) + WS(rs, 4)]), ms, &(x[WS(vs, 3)])); Chris@10: T11 = VSUB(TZ, T10); Chris@10: T16 = VADD(TZ, T10); Chris@10: } Chris@10: T15 = VBYI(VFMA(LDK(KP951056516), T11, VMUL(LDK(KP587785252), T14))); Chris@10: T1f = VBYI(VFNMS(LDK(KP951056516), T14, VMUL(LDK(KP587785252), T11))); Chris@10: T18 = VMUL(LDK(KP559016994), VSUB(T16, T17)); Chris@10: T1a = VADD(T16, T17); Chris@10: T1b = VFNMS(LDK(KP250000000), T1a, T19); Chris@10: } Chris@10: ST(&(x[0]), VADD(Tb, Tc), ms, &(x[0])); Chris@10: ST(&(x[WS(rs, 4)]), VADD(T1t, T1u), ms, &(x[0])); Chris@10: ST(&(x[WS(rs, 2)]), VADD(TP, TQ), ms, &(x[0])); Chris@10: ST(&(x[WS(rs, 3)]), VADD(T19, T1a), ms, &(x[WS(rs, 1)])); Chris@10: ST(&(x[WS(rs, 1)]), VADD(Tv, Tw), ms, &(x[WS(rs, 1)])); Chris@10: { Chris@10: V Tj, Tk, Ti, T1B, T1C, T1A; Chris@10: Ti = VSUB(Td, Ta); Chris@10: Tj = BYTW(&(W[TWVL * 2]), VADD(Th, Ti)); Chris@10: Tk = BYTW(&(W[TWVL * 4]), VSUB(Ti, Th)); Chris@10: ST(&(x[WS(vs, 2)]), Tj, ms, &(x[WS(vs, 2)])); Chris@10: ST(&(x[WS(vs, 3)]), Tk, ms, &(x[WS(vs, 3)])); Chris@10: T1A = VSUB(T1v, T1s); Chris@10: T1B = BYTW(&(W[TWVL * 2]), VADD(T1z, T1A)); Chris@10: T1C = BYTW(&(W[TWVL * 4]), VSUB(T1A, T1z)); Chris@10: ST(&(x[WS(vs, 2) + WS(rs, 4)]), T1B, ms, &(x[WS(vs, 2)])); Chris@10: ST(&(x[WS(vs, 3) + WS(rs, 4)]), T1C, ms, &(x[WS(vs, 3)])); Chris@10: } Chris@10: { Chris@10: V T1h, T1i, T1g, TD, TE, TC; Chris@10: T1g = VSUB(T1b, T18); Chris@10: T1h = BYTW(&(W[TWVL * 2]), VADD(T1f, T1g)); Chris@10: T1i = BYTW(&(W[TWVL * 4]), VSUB(T1g, T1f)); Chris@10: ST(&(x[WS(vs, 2) + WS(rs, 3)]), T1h, ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@10: ST(&(x[WS(vs, 3) + WS(rs, 3)]), T1i, ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@10: TC = VSUB(Tx, Tu); Chris@10: TD = BYTW(&(W[TWVL * 2]), VADD(TB, TC)); Chris@10: TE = BYTW(&(W[TWVL * 4]), VSUB(TC, TB)); Chris@10: ST(&(x[WS(vs, 2) + WS(rs, 1)]), TD, ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@10: ST(&(x[WS(vs, 3) + WS(rs, 1)]), TE, ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@10: } Chris@10: { Chris@10: V TX, TY, TW, TT, TU, TS; Chris@10: TW = VSUB(TR, TO); Chris@10: TX = BYTW(&(W[TWVL * 2]), VADD(TV, TW)); Chris@10: TY = BYTW(&(W[TWVL * 4]), VSUB(TW, TV)); Chris@10: ST(&(x[WS(vs, 2) + WS(rs, 2)]), TX, ms, &(x[WS(vs, 2)])); Chris@10: ST(&(x[WS(vs, 3) + WS(rs, 2)]), TY, ms, &(x[WS(vs, 3)])); Chris@10: TS = VADD(TO, TR); Chris@10: TT = BYTW(&(W[0]), VADD(TL, TS)); Chris@10: TU = BYTW(&(W[TWVL * 6]), VSUB(TS, TL)); Chris@10: ST(&(x[WS(vs, 1) + WS(rs, 2)]), TT, ms, &(x[WS(vs, 1)])); Chris@10: ST(&(x[WS(vs, 4) + WS(rs, 2)]), TU, ms, &(x[WS(vs, 4)])); Chris@10: } Chris@10: { Chris@10: V Tf, Tg, Te, Tz, TA, Ty; Chris@10: Te = VADD(Ta, Td); Chris@10: Tf = BYTW(&(W[0]), VADD(T7, Te)); Chris@10: Tg = BYTW(&(W[TWVL * 6]), VSUB(Te, T7)); Chris@10: ST(&(x[WS(vs, 1)]), Tf, ms, &(x[WS(vs, 1)])); Chris@10: ST(&(x[WS(vs, 4)]), Tg, ms, &(x[WS(vs, 4)])); Chris@10: Ty = VADD(Tu, Tx); Chris@10: Tz = BYTW(&(W[0]), VADD(Tr, Ty)); Chris@10: TA = BYTW(&(W[TWVL * 6]), VSUB(Ty, Tr)); Chris@10: ST(&(x[WS(vs, 1) + WS(rs, 1)]), Tz, ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@10: ST(&(x[WS(vs, 4) + WS(rs, 1)]), TA, ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@10: } Chris@10: { Chris@10: V T1d, T1e, T1c, T1x, T1y, T1w; Chris@10: T1c = VADD(T18, T1b); Chris@10: T1d = BYTW(&(W[0]), VADD(T15, T1c)); Chris@10: T1e = BYTW(&(W[TWVL * 6]), VSUB(T1c, T15)); Chris@10: ST(&(x[WS(vs, 1) + WS(rs, 3)]), T1d, ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@10: ST(&(x[WS(vs, 4) + WS(rs, 3)]), T1e, ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@10: T1w = VADD(T1s, T1v); Chris@10: T1x = BYTW(&(W[0]), VADD(T1p, T1w)); Chris@10: T1y = BYTW(&(W[TWVL * 6]), VSUB(T1w, T1p)); Chris@10: ST(&(x[WS(vs, 1) + WS(rs, 4)]), T1x, ms, &(x[WS(vs, 1)])); Chris@10: ST(&(x[WS(vs, 4) + WS(rs, 4)]), T1y, ms, &(x[WS(vs, 4)])); Chris@10: } Chris@10: } Chris@10: } Chris@10: VLEAVE(); Chris@10: } Chris@10: Chris@10: static const tw_instr twinstr[] = { Chris@10: VTW(0, 1), Chris@10: VTW(0, 2), Chris@10: VTW(0, 3), Chris@10: VTW(0, 4), Chris@10: {TW_NEXT, VL, 0} Chris@10: }; Chris@10: Chris@10: static const ct_desc desc = { 5, XSIMD_STRING("q1bv_5"), twinstr, &GENUS, {85, 55, 15, 0}, 0, 0, 0 }; Chris@10: Chris@10: void XSIMD(codelet_q1bv_5) (planner *p) { Chris@10: X(kdft_difsq_register) (p, q1bv_5, &desc); Chris@10: } Chris@10: #endif /* HAVE_FMA */