Chris@42: /* Chris@42: * Copyright (c) 2003, 2007-14 Matteo Frigo Chris@42: * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology Chris@42: * Chris@42: * This program is free software; you can redistribute it and/or modify Chris@42: * it under the terms of the GNU General Public License as published by Chris@42: * the Free Software Foundation; either version 2 of the License, or Chris@42: * (at your option) any later version. Chris@42: * Chris@42: * This program is distributed in the hope that it will be useful, Chris@42: * but WITHOUT ANY WARRANTY; without even the implied warranty of Chris@42: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Chris@42: * GNU General Public License for more details. Chris@42: * Chris@42: * You should have received a copy of the GNU General Public License Chris@42: * along with this program; if not, write to the Free Software Chris@42: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Chris@42: * Chris@42: */ Chris@42: Chris@42: /* This file was automatically generated --- DO NOT EDIT */ Chris@42: /* Generated on Sat Jul 30 16:45:26 EDT 2016 */ Chris@42: Chris@42: #include "codelet-dft.h" Chris@42: Chris@42: #ifdef HAVE_FMA Chris@42: Chris@42: /* Generated by: ../../../genfft/gen_twidsq_c.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -n 8 -dif -name q1fv_8 -include q1f.h */ Chris@42: Chris@42: /* Chris@42: * This function contains 264 FP additions, 192 FP multiplications, Chris@42: * (or, 184 additions, 112 multiplications, 80 fused multiply/add), Chris@42: * 117 stack variables, 1 constants, and 128 memory accesses Chris@42: */ Chris@42: #include "q1f.h" Chris@42: Chris@42: static void q1fv_8(R *ri, R *ii, const R *W, stride rs, stride vs, INT mb, INT me, INT ms) Chris@42: { Chris@42: DVK(KP707106781, +0.707106781186547524400844362104849039284835938); Chris@42: { Chris@42: INT m; Chris@42: R *x; Chris@42: x = ri; Chris@42: for (m = mb, W = W + (mb * ((TWVL / VL) * 14)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 14), MAKE_VOLATILE_STRIDE(16, rs), MAKE_VOLATILE_STRIDE(16, vs)) { Chris@42: V T42, T43, T1U, T1V, T2Y, T2Z, TT, TS; Chris@42: { Chris@42: V T3, Te, T1E, T1P, Tu, Tp, T25, T20, T2b, T2m, T3M, T2x, T2C, T3X, TA; Chris@42: V TL, T48, T4d, T17, T11, TW, T1i, T2I, T1y, T1t, T2T, T3f, T3q, T34, T39; Chris@42: V T3G, T3B, Ts, Tv, Tf, Ta, T23, T26, T1Q, T1L, T2A, T2D, T2n, T2i, T4b; Chris@42: V T4e, T3Y, T3T, TZ, T12, TM, TH, T35, T2L, T3j, T1w, T1z, T1j, T1e, T36; Chris@42: V T2O, T3C, T3i, T3k; Chris@42: { Chris@42: V T3d, T32, T3e, T3o, T3p, T33; Chris@42: { Chris@42: V T2v, T2w, T3V, T46, T3W; Chris@42: { Chris@42: V T1, T2, Tc, Td, T1C, T1D, T1N, T1O; Chris@42: T1 = LD(&(x[0]), ms, &(x[0])); Chris@42: T2 = LD(&(x[WS(rs, 4)]), ms, &(x[0])); Chris@42: Tc = LD(&(x[WS(rs, 2)]), ms, &(x[0])); Chris@42: Td = LD(&(x[WS(rs, 6)]), ms, &(x[0])); Chris@42: T1C = LD(&(x[WS(vs, 3)]), ms, &(x[WS(vs, 3)])); Chris@42: T1D = LD(&(x[WS(vs, 3) + WS(rs, 4)]), ms, &(x[WS(vs, 3)])); Chris@42: T1N = LD(&(x[WS(vs, 3) + WS(rs, 2)]), ms, &(x[WS(vs, 3)])); Chris@42: T1O = LD(&(x[WS(vs, 3) + WS(rs, 6)]), ms, &(x[WS(vs, 3)])); Chris@42: { Chris@42: V T29, T1Y, T1Z, T2a, T2k, T2l, Tn, To, T3K, T3L; Chris@42: T29 = LD(&(x[WS(vs, 4)]), ms, &(x[WS(vs, 4)])); Chris@42: T3 = VSUB(T1, T2); Chris@42: Tn = VADD(T1, T2); Chris@42: Te = VSUB(Tc, Td); Chris@42: To = VADD(Tc, Td); Chris@42: T1E = VSUB(T1C, T1D); Chris@42: T1Y = VADD(T1C, T1D); Chris@42: T1P = VSUB(T1N, T1O); Chris@42: T1Z = VADD(T1N, T1O); Chris@42: T2a = LD(&(x[WS(vs, 4) + WS(rs, 4)]), ms, &(x[WS(vs, 4)])); Chris@42: T2k = LD(&(x[WS(vs, 4) + WS(rs, 2)]), ms, &(x[WS(vs, 4)])); Chris@42: T2l = LD(&(x[WS(vs, 4) + WS(rs, 6)]), ms, &(x[WS(vs, 4)])); Chris@42: Tu = VSUB(Tn, To); Chris@42: Tp = VADD(Tn, To); Chris@42: T3K = LD(&(x[WS(vs, 7)]), ms, &(x[WS(vs, 7)])); Chris@42: T3L = LD(&(x[WS(vs, 7) + WS(rs, 4)]), ms, &(x[WS(vs, 7)])); Chris@42: T25 = VSUB(T1Y, T1Z); Chris@42: T20 = VADD(T1Y, T1Z); Chris@42: T2v = VADD(T29, T2a); Chris@42: T2b = VSUB(T29, T2a); Chris@42: T2w = VADD(T2k, T2l); Chris@42: T2m = VSUB(T2k, T2l); Chris@42: T3V = LD(&(x[WS(vs, 7) + WS(rs, 2)]), ms, &(x[WS(vs, 7)])); Chris@42: T46 = VADD(T3K, T3L); Chris@42: T3M = VSUB(T3K, T3L); Chris@42: T3W = LD(&(x[WS(vs, 7) + WS(rs, 6)]), ms, &(x[WS(vs, 7)])); Chris@42: } Chris@42: } Chris@42: { Chris@42: V T15, TU, T16, T1g, TV, T1h; Chris@42: { Chris@42: V Ty, Tz, TJ, TK, T47; Chris@42: Ty = LD(&(x[WS(vs, 1)]), ms, &(x[WS(vs, 1)])); Chris@42: Tz = LD(&(x[WS(vs, 1) + WS(rs, 4)]), ms, &(x[WS(vs, 1)])); Chris@42: TJ = LD(&(x[WS(vs, 1) + WS(rs, 2)]), ms, &(x[WS(vs, 1)])); Chris@42: T2x = VADD(T2v, T2w); Chris@42: T2C = VSUB(T2v, T2w); Chris@42: TK = LD(&(x[WS(vs, 1) + WS(rs, 6)]), ms, &(x[WS(vs, 1)])); Chris@42: T47 = VADD(T3V, T3W); Chris@42: T3X = VSUB(T3V, T3W); Chris@42: T15 = LD(&(x[WS(vs, 2)]), ms, &(x[WS(vs, 2)])); Chris@42: TA = VSUB(Ty, Tz); Chris@42: TU = VADD(Ty, Tz); Chris@42: T16 = LD(&(x[WS(vs, 2) + WS(rs, 4)]), ms, &(x[WS(vs, 2)])); Chris@42: T1g = LD(&(x[WS(vs, 2) + WS(rs, 2)]), ms, &(x[WS(vs, 2)])); Chris@42: TL = VSUB(TJ, TK); Chris@42: TV = VADD(TJ, TK); Chris@42: T48 = VADD(T46, T47); Chris@42: T4d = VSUB(T46, T47); Chris@42: T1h = LD(&(x[WS(vs, 2) + WS(rs, 6)]), ms, &(x[WS(vs, 2)])); Chris@42: } Chris@42: { Chris@42: V T2G, T1r, T2H, T2R, T1s, T2S; Chris@42: T2G = LD(&(x[WS(vs, 5)]), ms, &(x[WS(vs, 5)])); Chris@42: T17 = VSUB(T15, T16); Chris@42: T1r = VADD(T15, T16); Chris@42: T2H = LD(&(x[WS(vs, 5) + WS(rs, 4)]), ms, &(x[WS(vs, 5)])); Chris@42: T11 = VSUB(TU, TV); Chris@42: TW = VADD(TU, TV); Chris@42: T2R = LD(&(x[WS(vs, 5) + WS(rs, 2)]), ms, &(x[WS(vs, 5)])); Chris@42: T1i = VSUB(T1g, T1h); Chris@42: T1s = VADD(T1g, T1h); Chris@42: T2S = LD(&(x[WS(vs, 5) + WS(rs, 6)]), ms, &(x[WS(vs, 5)])); Chris@42: T3d = LD(&(x[WS(vs, 6)]), ms, &(x[WS(vs, 6)])); Chris@42: T2I = VSUB(T2G, T2H); Chris@42: T32 = VADD(T2G, T2H); Chris@42: T3e = LD(&(x[WS(vs, 6) + WS(rs, 4)]), ms, &(x[WS(vs, 6)])); Chris@42: T3o = LD(&(x[WS(vs, 6) + WS(rs, 2)]), ms, &(x[WS(vs, 6)])); Chris@42: T3p = LD(&(x[WS(vs, 6) + WS(rs, 6)]), ms, &(x[WS(vs, 6)])); Chris@42: T1y = VSUB(T1r, T1s); Chris@42: T1t = VADD(T1r, T1s); Chris@42: T33 = VADD(T2R, T2S); Chris@42: T2T = VSUB(T2R, T2S); Chris@42: } Chris@42: } Chris@42: } Chris@42: { Chris@42: V T2y, T2e, T3Q, T2z, T2h, T49, T3P, T3R; Chris@42: { Chris@42: V T6, Tq, T1I, Tr, T9, T21, T1H, T1J; Chris@42: { Chris@42: V T4, T3z, T3A, T5, T7, T8, T1F, T1G; Chris@42: T4 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)])); Chris@42: T3f = VSUB(T3d, T3e); Chris@42: T3z = VADD(T3d, T3e); Chris@42: T3q = VSUB(T3o, T3p); Chris@42: T3A = VADD(T3o, T3p); Chris@42: T5 = LD(&(x[WS(rs, 5)]), ms, &(x[WS(rs, 1)])); Chris@42: T7 = LD(&(x[WS(rs, 7)]), ms, &(x[WS(rs, 1)])); Chris@42: T34 = VADD(T32, T33); Chris@42: T39 = VSUB(T32, T33); Chris@42: T8 = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)])); Chris@42: T1F = LD(&(x[WS(vs, 3) + WS(rs, 1)]), ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: T1G = LD(&(x[WS(vs, 3) + WS(rs, 5)]), ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: T3G = VSUB(T3z, T3A); Chris@42: T3B = VADD(T3z, T3A); Chris@42: T6 = VSUB(T4, T5); Chris@42: Tq = VADD(T4, T5); Chris@42: T1I = LD(&(x[WS(vs, 3) + WS(rs, 7)]), ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: Tr = VADD(T7, T8); Chris@42: T9 = VSUB(T7, T8); Chris@42: T21 = VADD(T1F, T1G); Chris@42: T1H = VSUB(T1F, T1G); Chris@42: T1J = LD(&(x[WS(vs, 3) + WS(rs, 3)]), ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: } Chris@42: { Chris@42: V T2f, T22, T1K, T2g, T2c, T2d, T3N, T3O; Chris@42: T2c = LD(&(x[WS(vs, 4) + WS(rs, 1)]), ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: T2d = LD(&(x[WS(vs, 4) + WS(rs, 5)]), ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: T2f = LD(&(x[WS(vs, 4) + WS(rs, 7)]), ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: Ts = VADD(Tq, Tr); Chris@42: Tv = VSUB(Tr, Tq); Chris@42: Tf = VSUB(T9, T6); Chris@42: Ta = VADD(T6, T9); Chris@42: T22 = VADD(T1I, T1J); Chris@42: T1K = VSUB(T1I, T1J); Chris@42: T2y = VADD(T2c, T2d); Chris@42: T2e = VSUB(T2c, T2d); Chris@42: T2g = LD(&(x[WS(vs, 4) + WS(rs, 3)]), ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: T3N = LD(&(x[WS(vs, 7) + WS(rs, 1)]), ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: T3O = LD(&(x[WS(vs, 7) + WS(rs, 5)]), ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: T3Q = LD(&(x[WS(vs, 7) + WS(rs, 7)]), ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: T23 = VADD(T21, T22); Chris@42: T26 = VSUB(T22, T21); Chris@42: T1Q = VSUB(T1K, T1H); Chris@42: T1L = VADD(T1H, T1K); Chris@42: T2z = VADD(T2f, T2g); Chris@42: T2h = VSUB(T2f, T2g); Chris@42: T49 = VADD(T3N, T3O); Chris@42: T3P = VSUB(T3N, T3O); Chris@42: T3R = LD(&(x[WS(vs, 7) + WS(rs, 3)]), ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: } Chris@42: } Chris@42: { Chris@42: V TX, TD, T1b, TY, TG, T1u, T1a, T1c; Chris@42: { Chris@42: V TE, T4a, T3S, TF, TB, TC, T18, T19; Chris@42: TB = LD(&(x[WS(vs, 1) + WS(rs, 1)]), ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: TC = LD(&(x[WS(vs, 1) + WS(rs, 5)]), ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: TE = LD(&(x[WS(vs, 1) + WS(rs, 7)]), ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: T2A = VADD(T2y, T2z); Chris@42: T2D = VSUB(T2z, T2y); Chris@42: T2n = VSUB(T2h, T2e); Chris@42: T2i = VADD(T2e, T2h); Chris@42: T4a = VADD(T3Q, T3R); Chris@42: T3S = VSUB(T3Q, T3R); Chris@42: TX = VADD(TB, TC); Chris@42: TD = VSUB(TB, TC); Chris@42: TF = LD(&(x[WS(vs, 1) + WS(rs, 3)]), ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: T18 = LD(&(x[WS(vs, 2) + WS(rs, 1)]), ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: T19 = LD(&(x[WS(vs, 2) + WS(rs, 5)]), ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: T1b = LD(&(x[WS(vs, 2) + WS(rs, 7)]), ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: T4b = VADD(T49, T4a); Chris@42: T4e = VSUB(T4a, T49); Chris@42: T3Y = VSUB(T3S, T3P); Chris@42: T3T = VADD(T3P, T3S); Chris@42: TY = VADD(TE, TF); Chris@42: TG = VSUB(TE, TF); Chris@42: T1u = VADD(T18, T19); Chris@42: T1a = VSUB(T18, T19); Chris@42: T1c = LD(&(x[WS(vs, 2) + WS(rs, 3)]), ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: } Chris@42: { Chris@42: V T2M, T1v, T1d, T2N, T2J, T2K, T3g, T3h; Chris@42: T2J = LD(&(x[WS(vs, 5) + WS(rs, 1)]), ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: T2K = LD(&(x[WS(vs, 5) + WS(rs, 5)]), ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: T2M = LD(&(x[WS(vs, 5) + WS(rs, 7)]), ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: TZ = VADD(TX, TY); Chris@42: T12 = VSUB(TY, TX); Chris@42: TM = VSUB(TG, TD); Chris@42: TH = VADD(TD, TG); Chris@42: T1v = VADD(T1b, T1c); Chris@42: T1d = VSUB(T1b, T1c); Chris@42: T35 = VADD(T2J, T2K); Chris@42: T2L = VSUB(T2J, T2K); Chris@42: T2N = LD(&(x[WS(vs, 5) + WS(rs, 3)]), ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: T3g = LD(&(x[WS(vs, 6) + WS(rs, 1)]), ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: T3h = LD(&(x[WS(vs, 6) + WS(rs, 5)]), ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: T3j = LD(&(x[WS(vs, 6) + WS(rs, 7)]), ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: T1w = VADD(T1u, T1v); Chris@42: T1z = VSUB(T1v, T1u); Chris@42: T1j = VSUB(T1d, T1a); Chris@42: T1e = VADD(T1a, T1d); Chris@42: T36 = VADD(T2M, T2N); Chris@42: T2O = VSUB(T2M, T2N); Chris@42: T3C = VADD(T3g, T3h); Chris@42: T3i = VSUB(T3g, T3h); Chris@42: T3k = LD(&(x[WS(vs, 6) + WS(rs, 3)]), ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: } Chris@42: } Chris@42: } Chris@42: } Chris@42: { Chris@42: V T3a, T2U, T2P, T3H, T3r, T3m, T13, T27, T3b, T4f; Chris@42: { Chris@42: V T37, T3E, T2B, T24; Chris@42: { Chris@42: V T3D, T3l, Tt, T4c; Chris@42: ST(&(x[0]), VADD(Tp, Ts), ms, &(x[0])); Chris@42: ST(&(x[WS(rs, 2)]), VADD(T1t, T1w), ms, &(x[0])); Chris@42: ST(&(x[WS(rs, 7)]), VADD(T48, T4b), ms, &(x[WS(rs, 1)])); Chris@42: T37 = VADD(T35, T36); Chris@42: T3a = VSUB(T36, T35); Chris@42: T2U = VSUB(T2O, T2L); Chris@42: T2P = VADD(T2L, T2O); Chris@42: T3D = VADD(T3j, T3k); Chris@42: T3l = VSUB(T3j, T3k); Chris@42: ST(&(x[WS(rs, 4)]), VADD(T2x, T2A), ms, &(x[0])); Chris@42: ST(&(x[WS(rs, 3)]), VADD(T20, T23), ms, &(x[WS(rs, 1)])); Chris@42: ST(&(x[WS(rs, 5)]), VADD(T34, T37), ms, &(x[WS(rs, 1)])); Chris@42: ST(&(x[WS(rs, 1)]), VADD(TW, TZ), ms, &(x[WS(rs, 1)])); Chris@42: Tt = BYTWJ(&(W[TWVL * 6]), VSUB(Tp, Ts)); Chris@42: T4c = BYTWJ(&(W[TWVL * 6]), VSUB(T48, T4b)); Chris@42: T3E = VADD(T3C, T3D); Chris@42: T3H = VSUB(T3D, T3C); Chris@42: T3r = VSUB(T3l, T3i); Chris@42: T3m = VADD(T3i, T3l); Chris@42: T2B = BYTWJ(&(W[TWVL * 6]), VSUB(T2x, T2A)); Chris@42: T24 = BYTWJ(&(W[TWVL * 6]), VSUB(T20, T23)); Chris@42: ST(&(x[WS(vs, 4)]), Tt, ms, &(x[WS(vs, 4)])); Chris@42: ST(&(x[WS(vs, 4) + WS(rs, 7)]), T4c, ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: ST(&(x[WS(rs, 6)]), VADD(T3B, T3E), ms, &(x[0])); Chris@42: } Chris@42: { Chris@42: V T38, T1A, Tw, T10, T1x, T3F, T2E, T3I; Chris@42: T10 = BYTWJ(&(W[TWVL * 6]), VSUB(TW, TZ)); Chris@42: T1x = BYTWJ(&(W[TWVL * 6]), VSUB(T1t, T1w)); Chris@42: T3F = BYTWJ(&(W[TWVL * 6]), VSUB(T3B, T3E)); Chris@42: ST(&(x[WS(vs, 4) + WS(rs, 4)]), T2B, ms, &(x[WS(vs, 4)])); Chris@42: ST(&(x[WS(vs, 4) + WS(rs, 3)]), T24, ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: T38 = BYTWJ(&(W[TWVL * 6]), VSUB(T34, T37)); Chris@42: T1A = BYTWJ(&(W[TWVL * 10]), VFNMSI(T1z, T1y)); Chris@42: Tw = BYTWJ(&(W[TWVL * 10]), VFNMSI(Tv, Tu)); Chris@42: ST(&(x[WS(vs, 4) + WS(rs, 1)]), T10, ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 4) + WS(rs, 2)]), T1x, ms, &(x[WS(vs, 4)])); Chris@42: ST(&(x[WS(vs, 4) + WS(rs, 6)]), T3F, ms, &(x[WS(vs, 4)])); Chris@42: T2E = BYTWJ(&(W[TWVL * 10]), VFNMSI(T2D, T2C)); Chris@42: T3I = BYTWJ(&(W[TWVL * 10]), VFNMSI(T3H, T3G)); Chris@42: ST(&(x[WS(vs, 4) + WS(rs, 5)]), T38, ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 6) + WS(rs, 2)]), T1A, ms, &(x[WS(vs, 6)])); Chris@42: ST(&(x[WS(vs, 6)]), Tw, ms, &(x[WS(vs, 6)])); Chris@42: T13 = BYTWJ(&(W[TWVL * 10]), VFNMSI(T12, T11)); Chris@42: T27 = BYTWJ(&(W[TWVL * 10]), VFNMSI(T26, T25)); Chris@42: T3b = BYTWJ(&(W[TWVL * 10]), VFNMSI(T3a, T39)); Chris@42: ST(&(x[WS(vs, 6) + WS(rs, 4)]), T2E, ms, &(x[WS(vs, 6)])); Chris@42: ST(&(x[WS(vs, 6) + WS(rs, 6)]), T3I, ms, &(x[WS(vs, 6)])); Chris@42: T4f = BYTWJ(&(W[TWVL * 10]), VFNMSI(T4e, T4d)); Chris@42: } Chris@42: } Chris@42: { Chris@42: V Tj, Tk, T2r, T2j, Ti, Th, T2o, T2s, T1M, T1R, T41, T40; Chris@42: { Chris@42: V T3c, T4g, T3J, T2F, Tx, T1B; Chris@42: Tx = BYTWJ(&(W[TWVL * 2]), VFMAI(Tv, Tu)); Chris@42: T1B = BYTWJ(&(W[TWVL * 2]), VFMAI(T1z, T1y)); Chris@42: ST(&(x[WS(vs, 6) + WS(rs, 1)]), T13, ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 6) + WS(rs, 3)]), T27, ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 6) + WS(rs, 5)]), T3b, ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: T3c = BYTWJ(&(W[TWVL * 2]), VFMAI(T3a, T39)); Chris@42: T4g = BYTWJ(&(W[TWVL * 2]), VFMAI(T4e, T4d)); Chris@42: ST(&(x[WS(vs, 6) + WS(rs, 7)]), T4f, ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 2)]), Tx, ms, &(x[WS(vs, 2)])); Chris@42: ST(&(x[WS(vs, 2) + WS(rs, 2)]), T1B, ms, &(x[WS(vs, 2)])); Chris@42: T3J = BYTWJ(&(W[TWVL * 2]), VFMAI(T3H, T3G)); Chris@42: T2F = BYTWJ(&(W[TWVL * 2]), VFMAI(T2D, T2C)); Chris@42: { Chris@42: V T14, Tb, Tg, T28, T3U, T3Z; Chris@42: T28 = BYTWJ(&(W[TWVL * 2]), VFMAI(T26, T25)); Chris@42: ST(&(x[WS(vs, 2) + WS(rs, 5)]), T3c, ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 2) + WS(rs, 7)]), T4g, ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: T14 = BYTWJ(&(W[TWVL * 2]), VFMAI(T12, T11)); Chris@42: Tj = VFNMS(LDK(KP707106781), Ta, T3); Chris@42: Tb = VFMA(LDK(KP707106781), Ta, T3); Chris@42: Tg = VFNMS(LDK(KP707106781), Tf, Te); Chris@42: Tk = VFMA(LDK(KP707106781), Tf, Te); Chris@42: ST(&(x[WS(vs, 2) + WS(rs, 6)]), T3J, ms, &(x[WS(vs, 2)])); Chris@42: ST(&(x[WS(vs, 2) + WS(rs, 4)]), T2F, ms, &(x[WS(vs, 2)])); Chris@42: ST(&(x[WS(vs, 2) + WS(rs, 3)]), T28, ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: T3U = VFMA(LDK(KP707106781), T3T, T3M); Chris@42: T42 = VFNMS(LDK(KP707106781), T3T, T3M); Chris@42: T43 = VFMA(LDK(KP707106781), T3Y, T3X); Chris@42: T3Z = VFNMS(LDK(KP707106781), T3Y, T3X); Chris@42: ST(&(x[WS(vs, 2) + WS(rs, 1)]), T14, ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: T2r = VFNMS(LDK(KP707106781), T2i, T2b); Chris@42: T2j = VFMA(LDK(KP707106781), T2i, T2b); Chris@42: Ti = BYTWJ(&(W[TWVL * 12]), VFMAI(Tg, Tb)); Chris@42: Th = BYTWJ(&(W[0]), VFNMSI(Tg, Tb)); Chris@42: T2o = VFNMS(LDK(KP707106781), T2n, T2m); Chris@42: T2s = VFMA(LDK(KP707106781), T2n, T2m); Chris@42: T1U = VFNMS(LDK(KP707106781), T1L, T1E); Chris@42: T1M = VFMA(LDK(KP707106781), T1L, T1E); Chris@42: T1R = VFNMS(LDK(KP707106781), T1Q, T1P); Chris@42: T1V = VFMA(LDK(KP707106781), T1Q, T1P); Chris@42: T41 = BYTWJ(&(W[TWVL * 12]), VFMAI(T3Z, T3U)); Chris@42: T40 = BYTWJ(&(W[0]), VFNMSI(T3Z, T3U)); Chris@42: } Chris@42: } Chris@42: { Chris@42: V TQ, TR, T1n, T1o, T3v, T3w; Chris@42: { Chris@42: V T1f, T1k, T3n, TP, TO, T3s, T2Q, T2V; Chris@42: { Chris@42: V TI, T2q, T2p, T1T, T1S, TN; Chris@42: TQ = VFNMS(LDK(KP707106781), TH, TA); Chris@42: TI = VFMA(LDK(KP707106781), TH, TA); Chris@42: ST(&(x[WS(vs, 7)]), Ti, ms, &(x[WS(vs, 7)])); Chris@42: ST(&(x[WS(vs, 1)]), Th, ms, &(x[WS(vs, 1)])); Chris@42: T2q = BYTWJ(&(W[TWVL * 12]), VFMAI(T2o, T2j)); Chris@42: T2p = BYTWJ(&(W[0]), VFNMSI(T2o, T2j)); Chris@42: T1T = BYTWJ(&(W[TWVL * 12]), VFMAI(T1R, T1M)); Chris@42: T1S = BYTWJ(&(W[0]), VFNMSI(T1R, T1M)); Chris@42: ST(&(x[WS(vs, 7) + WS(rs, 7)]), T41, ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 1) + WS(rs, 7)]), T40, ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: TN = VFNMS(LDK(KP707106781), TM, TL); Chris@42: TR = VFMA(LDK(KP707106781), TM, TL); Chris@42: T1n = VFNMS(LDK(KP707106781), T1e, T17); Chris@42: T1f = VFMA(LDK(KP707106781), T1e, T17); Chris@42: ST(&(x[WS(vs, 7) + WS(rs, 4)]), T2q, ms, &(x[WS(vs, 7)])); Chris@42: ST(&(x[WS(vs, 1) + WS(rs, 4)]), T2p, ms, &(x[WS(vs, 1)])); Chris@42: ST(&(x[WS(vs, 7) + WS(rs, 3)]), T1T, ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 1) + WS(rs, 3)]), T1S, ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: T1k = VFNMS(LDK(KP707106781), T1j, T1i); Chris@42: T1o = VFMA(LDK(KP707106781), T1j, T1i); Chris@42: T3v = VFNMS(LDK(KP707106781), T3m, T3f); Chris@42: T3n = VFMA(LDK(KP707106781), T3m, T3f); Chris@42: TP = BYTWJ(&(W[TWVL * 12]), VFMAI(TN, TI)); Chris@42: TO = BYTWJ(&(W[0]), VFNMSI(TN, TI)); Chris@42: T3s = VFNMS(LDK(KP707106781), T3r, T3q); Chris@42: T3w = VFMA(LDK(KP707106781), T3r, T3q); Chris@42: } Chris@42: T2Y = VFNMS(LDK(KP707106781), T2P, T2I); Chris@42: T2Q = VFMA(LDK(KP707106781), T2P, T2I); Chris@42: T2V = VFNMS(LDK(KP707106781), T2U, T2T); Chris@42: T2Z = VFMA(LDK(KP707106781), T2U, T2T); Chris@42: { Chris@42: V T3u, T3t, T2X, T2W, T1m, T1l; Chris@42: T1m = BYTWJ(&(W[TWVL * 12]), VFMAI(T1k, T1f)); Chris@42: T1l = BYTWJ(&(W[0]), VFNMSI(T1k, T1f)); Chris@42: ST(&(x[WS(vs, 7) + WS(rs, 1)]), TP, ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 1) + WS(rs, 1)]), TO, ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: T3u = BYTWJ(&(W[TWVL * 12]), VFMAI(T3s, T3n)); Chris@42: T3t = BYTWJ(&(W[0]), VFNMSI(T3s, T3n)); Chris@42: T2X = BYTWJ(&(W[TWVL * 12]), VFMAI(T2V, T2Q)); Chris@42: T2W = BYTWJ(&(W[0]), VFNMSI(T2V, T2Q)); Chris@42: ST(&(x[WS(vs, 7) + WS(rs, 2)]), T1m, ms, &(x[WS(vs, 7)])); Chris@42: ST(&(x[WS(vs, 1) + WS(rs, 2)]), T1l, ms, &(x[WS(vs, 1)])); Chris@42: ST(&(x[WS(vs, 7) + WS(rs, 6)]), T3u, ms, &(x[WS(vs, 7)])); Chris@42: ST(&(x[WS(vs, 1) + WS(rs, 6)]), T3t, ms, &(x[WS(vs, 1)])); Chris@42: ST(&(x[WS(vs, 7) + WS(rs, 5)]), T2X, ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 1) + WS(rs, 5)]), T2W, ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: } Chris@42: } Chris@42: { Chris@42: V T2u, T2t, T3y, T3x; Chris@42: { Chris@42: V T1q, T1p, Tm, Tl; Chris@42: T1q = BYTWJ(&(W[TWVL * 4]), VFMAI(T1o, T1n)); Chris@42: T1p = BYTWJ(&(W[TWVL * 8]), VFNMSI(T1o, T1n)); Chris@42: Tm = BYTWJ(&(W[TWVL * 4]), VFMAI(Tk, Tj)); Chris@42: Tl = BYTWJ(&(W[TWVL * 8]), VFNMSI(Tk, Tj)); Chris@42: ST(&(x[WS(vs, 3) + WS(rs, 2)]), T1q, ms, &(x[WS(vs, 3)])); Chris@42: ST(&(x[WS(vs, 5) + WS(rs, 2)]), T1p, ms, &(x[WS(vs, 5)])); Chris@42: T2u = BYTWJ(&(W[TWVL * 4]), VFMAI(T2s, T2r)); Chris@42: T2t = BYTWJ(&(W[TWVL * 8]), VFNMSI(T2s, T2r)); Chris@42: T3y = BYTWJ(&(W[TWVL * 4]), VFMAI(T3w, T3v)); Chris@42: T3x = BYTWJ(&(W[TWVL * 8]), VFNMSI(T3w, T3v)); Chris@42: ST(&(x[WS(vs, 3)]), Tm, ms, &(x[WS(vs, 3)])); Chris@42: ST(&(x[WS(vs, 5)]), Tl, ms, &(x[WS(vs, 5)])); Chris@42: } Chris@42: ST(&(x[WS(vs, 3) + WS(rs, 4)]), T2u, ms, &(x[WS(vs, 3)])); Chris@42: ST(&(x[WS(vs, 5) + WS(rs, 4)]), T2t, ms, &(x[WS(vs, 5)])); Chris@42: ST(&(x[WS(vs, 3) + WS(rs, 6)]), T3y, ms, &(x[WS(vs, 3)])); Chris@42: ST(&(x[WS(vs, 5) + WS(rs, 6)]), T3x, ms, &(x[WS(vs, 5)])); Chris@42: TT = BYTWJ(&(W[TWVL * 4]), VFMAI(TR, TQ)); Chris@42: TS = BYTWJ(&(W[TWVL * 8]), VFNMSI(TR, TQ)); Chris@42: } Chris@42: } Chris@42: } Chris@42: } Chris@42: } Chris@42: { Chris@42: V T31, T30, T45, T44, T1X, T1W; Chris@42: T1X = BYTWJ(&(W[TWVL * 4]), VFMAI(T1V, T1U)); Chris@42: T1W = BYTWJ(&(W[TWVL * 8]), VFNMSI(T1V, T1U)); Chris@42: ST(&(x[WS(vs, 3) + WS(rs, 1)]), TT, ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 5) + WS(rs, 1)]), TS, ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: T31 = BYTWJ(&(W[TWVL * 4]), VFMAI(T2Z, T2Y)); Chris@42: T30 = BYTWJ(&(W[TWVL * 8]), VFNMSI(T2Z, T2Y)); Chris@42: T45 = BYTWJ(&(W[TWVL * 4]), VFMAI(T43, T42)); Chris@42: T44 = BYTWJ(&(W[TWVL * 8]), VFNMSI(T43, T42)); Chris@42: ST(&(x[WS(vs, 3) + WS(rs, 3)]), T1X, ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 5) + WS(rs, 3)]), T1W, ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 3) + WS(rs, 5)]), T31, ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 5) + WS(rs, 5)]), T30, ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 3) + WS(rs, 7)]), T45, ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 5) + WS(rs, 7)]), T44, ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: } Chris@42: } Chris@42: } Chris@42: VLEAVE(); Chris@42: } Chris@42: Chris@42: static const tw_instr twinstr[] = { Chris@42: VTW(0, 1), Chris@42: VTW(0, 2), Chris@42: VTW(0, 3), Chris@42: VTW(0, 4), Chris@42: VTW(0, 5), Chris@42: VTW(0, 6), Chris@42: VTW(0, 7), Chris@42: {TW_NEXT, VL, 0} Chris@42: }; Chris@42: Chris@42: static const ct_desc desc = { 8, XSIMD_STRING("q1fv_8"), twinstr, &GENUS, {184, 112, 80, 0}, 0, 0, 0 }; Chris@42: Chris@42: void XSIMD(codelet_q1fv_8) (planner *p) { Chris@42: X(kdft_difsq_register) (p, q1fv_8, &desc); Chris@42: } Chris@42: #else /* HAVE_FMA */ Chris@42: Chris@42: /* Generated by: ../../../genfft/gen_twidsq_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 8 -dif -name q1fv_8 -include q1f.h */ Chris@42: Chris@42: /* Chris@42: * This function contains 264 FP additions, 128 FP multiplications, Chris@42: * (or, 264 additions, 128 multiplications, 0 fused multiply/add), Chris@42: * 77 stack variables, 1 constants, and 128 memory accesses Chris@42: */ Chris@42: #include "q1f.h" Chris@42: Chris@42: static void q1fv_8(R *ri, R *ii, const R *W, stride rs, stride vs, INT mb, INT me, INT ms) Chris@42: { Chris@42: DVK(KP707106781, +0.707106781186547524400844362104849039284835938); Chris@42: { Chris@42: INT m; Chris@42: R *x; Chris@42: x = ri; Chris@42: for (m = mb, W = W + (mb * ((TWVL / VL) * 14)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 14), MAKE_VOLATILE_STRIDE(16, rs), MAKE_VOLATILE_STRIDE(16, vs)) { Chris@42: V T3, Tu, Tf, Tp, T1E, T25, T1Q, T20, T2b, T2C, T2n, T2x, T3M, T4d, T3Y; Chris@42: V T48, TA, T11, TM, TW, T17, T1y, T1j, T1t, T2I, T39, T2U, T34, T3f, T3G; Chris@42: V T3r, T3B, Ta, Tv, Tc, Ts, T1L, T26, T1N, T23, T2i, T2D, T2k, T2A, T3T; Chris@42: V T4e, T3V, T4b, TH, T12, TJ, TZ, T1e, T1z, T1g, T1w, T2P, T3a, T2R, T37; Chris@42: V T3m, T3H, T3o, T3E, T28, T14; Chris@42: { Chris@42: V T1, T2, Tn, Td, Te, To; Chris@42: T1 = LD(&(x[0]), ms, &(x[0])); Chris@42: T2 = LD(&(x[WS(rs, 4)]), ms, &(x[0])); Chris@42: Tn = VADD(T1, T2); Chris@42: Td = LD(&(x[WS(rs, 2)]), ms, &(x[0])); Chris@42: Te = LD(&(x[WS(rs, 6)]), ms, &(x[0])); Chris@42: To = VADD(Td, Te); Chris@42: T3 = VSUB(T1, T2); Chris@42: Tu = VSUB(Tn, To); Chris@42: Tf = VSUB(Td, Te); Chris@42: Tp = VADD(Tn, To); Chris@42: } Chris@42: { Chris@42: V T1C, T1D, T1Y, T1O, T1P, T1Z; Chris@42: T1C = LD(&(x[WS(vs, 3)]), ms, &(x[WS(vs, 3)])); Chris@42: T1D = LD(&(x[WS(vs, 3) + WS(rs, 4)]), ms, &(x[WS(vs, 3)])); Chris@42: T1Y = VADD(T1C, T1D); Chris@42: T1O = LD(&(x[WS(vs, 3) + WS(rs, 2)]), ms, &(x[WS(vs, 3)])); Chris@42: T1P = LD(&(x[WS(vs, 3) + WS(rs, 6)]), ms, &(x[WS(vs, 3)])); Chris@42: T1Z = VADD(T1O, T1P); Chris@42: T1E = VSUB(T1C, T1D); Chris@42: T25 = VSUB(T1Y, T1Z); Chris@42: T1Q = VSUB(T1O, T1P); Chris@42: T20 = VADD(T1Y, T1Z); Chris@42: } Chris@42: { Chris@42: V T29, T2a, T2v, T2l, T2m, T2w; Chris@42: T29 = LD(&(x[WS(vs, 4)]), ms, &(x[WS(vs, 4)])); Chris@42: T2a = LD(&(x[WS(vs, 4) + WS(rs, 4)]), ms, &(x[WS(vs, 4)])); Chris@42: T2v = VADD(T29, T2a); Chris@42: T2l = LD(&(x[WS(vs, 4) + WS(rs, 2)]), ms, &(x[WS(vs, 4)])); Chris@42: T2m = LD(&(x[WS(vs, 4) + WS(rs, 6)]), ms, &(x[WS(vs, 4)])); Chris@42: T2w = VADD(T2l, T2m); Chris@42: T2b = VSUB(T29, T2a); Chris@42: T2C = VSUB(T2v, T2w); Chris@42: T2n = VSUB(T2l, T2m); Chris@42: T2x = VADD(T2v, T2w); Chris@42: } Chris@42: { Chris@42: V T3K, T3L, T46, T3W, T3X, T47; Chris@42: T3K = LD(&(x[WS(vs, 7)]), ms, &(x[WS(vs, 7)])); Chris@42: T3L = LD(&(x[WS(vs, 7) + WS(rs, 4)]), ms, &(x[WS(vs, 7)])); Chris@42: T46 = VADD(T3K, T3L); Chris@42: T3W = LD(&(x[WS(vs, 7) + WS(rs, 2)]), ms, &(x[WS(vs, 7)])); Chris@42: T3X = LD(&(x[WS(vs, 7) + WS(rs, 6)]), ms, &(x[WS(vs, 7)])); Chris@42: T47 = VADD(T3W, T3X); Chris@42: T3M = VSUB(T3K, T3L); Chris@42: T4d = VSUB(T46, T47); Chris@42: T3Y = VSUB(T3W, T3X); Chris@42: T48 = VADD(T46, T47); Chris@42: } Chris@42: { Chris@42: V Ty, Tz, TU, TK, TL, TV; Chris@42: Ty = LD(&(x[WS(vs, 1)]), ms, &(x[WS(vs, 1)])); Chris@42: Tz = LD(&(x[WS(vs, 1) + WS(rs, 4)]), ms, &(x[WS(vs, 1)])); Chris@42: TU = VADD(Ty, Tz); Chris@42: TK = LD(&(x[WS(vs, 1) + WS(rs, 2)]), ms, &(x[WS(vs, 1)])); Chris@42: TL = LD(&(x[WS(vs, 1) + WS(rs, 6)]), ms, &(x[WS(vs, 1)])); Chris@42: TV = VADD(TK, TL); Chris@42: TA = VSUB(Ty, Tz); Chris@42: T11 = VSUB(TU, TV); Chris@42: TM = VSUB(TK, TL); Chris@42: TW = VADD(TU, TV); Chris@42: } Chris@42: { Chris@42: V T15, T16, T1r, T1h, T1i, T1s; Chris@42: T15 = LD(&(x[WS(vs, 2)]), ms, &(x[WS(vs, 2)])); Chris@42: T16 = LD(&(x[WS(vs, 2) + WS(rs, 4)]), ms, &(x[WS(vs, 2)])); Chris@42: T1r = VADD(T15, T16); Chris@42: T1h = LD(&(x[WS(vs, 2) + WS(rs, 2)]), ms, &(x[WS(vs, 2)])); Chris@42: T1i = LD(&(x[WS(vs, 2) + WS(rs, 6)]), ms, &(x[WS(vs, 2)])); Chris@42: T1s = VADD(T1h, T1i); Chris@42: T17 = VSUB(T15, T16); Chris@42: T1y = VSUB(T1r, T1s); Chris@42: T1j = VSUB(T1h, T1i); Chris@42: T1t = VADD(T1r, T1s); Chris@42: } Chris@42: { Chris@42: V T2G, T2H, T32, T2S, T2T, T33; Chris@42: T2G = LD(&(x[WS(vs, 5)]), ms, &(x[WS(vs, 5)])); Chris@42: T2H = LD(&(x[WS(vs, 5) + WS(rs, 4)]), ms, &(x[WS(vs, 5)])); Chris@42: T32 = VADD(T2G, T2H); Chris@42: T2S = LD(&(x[WS(vs, 5) + WS(rs, 2)]), ms, &(x[WS(vs, 5)])); Chris@42: T2T = LD(&(x[WS(vs, 5) + WS(rs, 6)]), ms, &(x[WS(vs, 5)])); Chris@42: T33 = VADD(T2S, T2T); Chris@42: T2I = VSUB(T2G, T2H); Chris@42: T39 = VSUB(T32, T33); Chris@42: T2U = VSUB(T2S, T2T); Chris@42: T34 = VADD(T32, T33); Chris@42: } Chris@42: { Chris@42: V T3d, T3e, T3z, T3p, T3q, T3A; Chris@42: T3d = LD(&(x[WS(vs, 6)]), ms, &(x[WS(vs, 6)])); Chris@42: T3e = LD(&(x[WS(vs, 6) + WS(rs, 4)]), ms, &(x[WS(vs, 6)])); Chris@42: T3z = VADD(T3d, T3e); Chris@42: T3p = LD(&(x[WS(vs, 6) + WS(rs, 2)]), ms, &(x[WS(vs, 6)])); Chris@42: T3q = LD(&(x[WS(vs, 6) + WS(rs, 6)]), ms, &(x[WS(vs, 6)])); Chris@42: T3A = VADD(T3p, T3q); Chris@42: T3f = VSUB(T3d, T3e); Chris@42: T3G = VSUB(T3z, T3A); Chris@42: T3r = VSUB(T3p, T3q); Chris@42: T3B = VADD(T3z, T3A); Chris@42: } Chris@42: { Chris@42: V T6, Tq, T9, Tr; Chris@42: { Chris@42: V T4, T5, T7, T8; Chris@42: T4 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)])); Chris@42: T5 = LD(&(x[WS(rs, 5)]), ms, &(x[WS(rs, 1)])); Chris@42: T6 = VSUB(T4, T5); Chris@42: Tq = VADD(T4, T5); Chris@42: T7 = LD(&(x[WS(rs, 7)]), ms, &(x[WS(rs, 1)])); Chris@42: T8 = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)])); Chris@42: T9 = VSUB(T7, T8); Chris@42: Tr = VADD(T7, T8); Chris@42: } Chris@42: Ta = VMUL(LDK(KP707106781), VADD(T6, T9)); Chris@42: Tv = VBYI(VSUB(Tr, Tq)); Chris@42: Tc = VMUL(LDK(KP707106781), VSUB(T9, T6)); Chris@42: Ts = VADD(Tq, Tr); Chris@42: } Chris@42: { Chris@42: V T1H, T21, T1K, T22; Chris@42: { Chris@42: V T1F, T1G, T1I, T1J; Chris@42: T1F = LD(&(x[WS(vs, 3) + WS(rs, 1)]), ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: T1G = LD(&(x[WS(vs, 3) + WS(rs, 5)]), ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: T1H = VSUB(T1F, T1G); Chris@42: T21 = VADD(T1F, T1G); Chris@42: T1I = LD(&(x[WS(vs, 3) + WS(rs, 7)]), ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: T1J = LD(&(x[WS(vs, 3) + WS(rs, 3)]), ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: T1K = VSUB(T1I, T1J); Chris@42: T22 = VADD(T1I, T1J); Chris@42: } Chris@42: T1L = VMUL(LDK(KP707106781), VADD(T1H, T1K)); Chris@42: T26 = VBYI(VSUB(T22, T21)); Chris@42: T1N = VMUL(LDK(KP707106781), VSUB(T1K, T1H)); Chris@42: T23 = VADD(T21, T22); Chris@42: } Chris@42: { Chris@42: V T2e, T2y, T2h, T2z; Chris@42: { Chris@42: V T2c, T2d, T2f, T2g; Chris@42: T2c = LD(&(x[WS(vs, 4) + WS(rs, 1)]), ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: T2d = LD(&(x[WS(vs, 4) + WS(rs, 5)]), ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: T2e = VSUB(T2c, T2d); Chris@42: T2y = VADD(T2c, T2d); Chris@42: T2f = LD(&(x[WS(vs, 4) + WS(rs, 7)]), ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: T2g = LD(&(x[WS(vs, 4) + WS(rs, 3)]), ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: T2h = VSUB(T2f, T2g); Chris@42: T2z = VADD(T2f, T2g); Chris@42: } Chris@42: T2i = VMUL(LDK(KP707106781), VADD(T2e, T2h)); Chris@42: T2D = VBYI(VSUB(T2z, T2y)); Chris@42: T2k = VMUL(LDK(KP707106781), VSUB(T2h, T2e)); Chris@42: T2A = VADD(T2y, T2z); Chris@42: } Chris@42: { Chris@42: V T3P, T49, T3S, T4a; Chris@42: { Chris@42: V T3N, T3O, T3Q, T3R; Chris@42: T3N = LD(&(x[WS(vs, 7) + WS(rs, 1)]), ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: T3O = LD(&(x[WS(vs, 7) + WS(rs, 5)]), ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: T3P = VSUB(T3N, T3O); Chris@42: T49 = VADD(T3N, T3O); Chris@42: T3Q = LD(&(x[WS(vs, 7) + WS(rs, 7)]), ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: T3R = LD(&(x[WS(vs, 7) + WS(rs, 3)]), ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: T3S = VSUB(T3Q, T3R); Chris@42: T4a = VADD(T3Q, T3R); Chris@42: } Chris@42: T3T = VMUL(LDK(KP707106781), VADD(T3P, T3S)); Chris@42: T4e = VBYI(VSUB(T4a, T49)); Chris@42: T3V = VMUL(LDK(KP707106781), VSUB(T3S, T3P)); Chris@42: T4b = VADD(T49, T4a); Chris@42: } Chris@42: { Chris@42: V TD, TX, TG, TY; Chris@42: { Chris@42: V TB, TC, TE, TF; Chris@42: TB = LD(&(x[WS(vs, 1) + WS(rs, 1)]), ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: TC = LD(&(x[WS(vs, 1) + WS(rs, 5)]), ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: TD = VSUB(TB, TC); Chris@42: TX = VADD(TB, TC); Chris@42: TE = LD(&(x[WS(vs, 1) + WS(rs, 7)]), ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: TF = LD(&(x[WS(vs, 1) + WS(rs, 3)]), ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: TG = VSUB(TE, TF); Chris@42: TY = VADD(TE, TF); Chris@42: } Chris@42: TH = VMUL(LDK(KP707106781), VADD(TD, TG)); Chris@42: T12 = VBYI(VSUB(TY, TX)); Chris@42: TJ = VMUL(LDK(KP707106781), VSUB(TG, TD)); Chris@42: TZ = VADD(TX, TY); Chris@42: } Chris@42: { Chris@42: V T1a, T1u, T1d, T1v; Chris@42: { Chris@42: V T18, T19, T1b, T1c; Chris@42: T18 = LD(&(x[WS(vs, 2) + WS(rs, 1)]), ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: T19 = LD(&(x[WS(vs, 2) + WS(rs, 5)]), ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: T1a = VSUB(T18, T19); Chris@42: T1u = VADD(T18, T19); Chris@42: T1b = LD(&(x[WS(vs, 2) + WS(rs, 7)]), ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: T1c = LD(&(x[WS(vs, 2) + WS(rs, 3)]), ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: T1d = VSUB(T1b, T1c); Chris@42: T1v = VADD(T1b, T1c); Chris@42: } Chris@42: T1e = VMUL(LDK(KP707106781), VADD(T1a, T1d)); Chris@42: T1z = VBYI(VSUB(T1v, T1u)); Chris@42: T1g = VMUL(LDK(KP707106781), VSUB(T1d, T1a)); Chris@42: T1w = VADD(T1u, T1v); Chris@42: } Chris@42: { Chris@42: V T2L, T35, T2O, T36; Chris@42: { Chris@42: V T2J, T2K, T2M, T2N; Chris@42: T2J = LD(&(x[WS(vs, 5) + WS(rs, 1)]), ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: T2K = LD(&(x[WS(vs, 5) + WS(rs, 5)]), ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: T2L = VSUB(T2J, T2K); Chris@42: T35 = VADD(T2J, T2K); Chris@42: T2M = LD(&(x[WS(vs, 5) + WS(rs, 7)]), ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: T2N = LD(&(x[WS(vs, 5) + WS(rs, 3)]), ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: T2O = VSUB(T2M, T2N); Chris@42: T36 = VADD(T2M, T2N); Chris@42: } Chris@42: T2P = VMUL(LDK(KP707106781), VADD(T2L, T2O)); Chris@42: T3a = VBYI(VSUB(T36, T35)); Chris@42: T2R = VMUL(LDK(KP707106781), VSUB(T2O, T2L)); Chris@42: T37 = VADD(T35, T36); Chris@42: } Chris@42: { Chris@42: V T3i, T3C, T3l, T3D; Chris@42: { Chris@42: V T3g, T3h, T3j, T3k; Chris@42: T3g = LD(&(x[WS(vs, 6) + WS(rs, 1)]), ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: T3h = LD(&(x[WS(vs, 6) + WS(rs, 5)]), ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: T3i = VSUB(T3g, T3h); Chris@42: T3C = VADD(T3g, T3h); Chris@42: T3j = LD(&(x[WS(vs, 6) + WS(rs, 7)]), ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: T3k = LD(&(x[WS(vs, 6) + WS(rs, 3)]), ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: T3l = VSUB(T3j, T3k); Chris@42: T3D = VADD(T3j, T3k); Chris@42: } Chris@42: T3m = VMUL(LDK(KP707106781), VADD(T3i, T3l)); Chris@42: T3H = VBYI(VSUB(T3D, T3C)); Chris@42: T3o = VMUL(LDK(KP707106781), VSUB(T3l, T3i)); Chris@42: T3E = VADD(T3C, T3D); Chris@42: } Chris@42: ST(&(x[0]), VADD(Tp, Ts), ms, &(x[0])); Chris@42: ST(&(x[WS(rs, 2)]), VADD(T1t, T1w), ms, &(x[0])); Chris@42: ST(&(x[WS(rs, 5)]), VADD(T34, T37), ms, &(x[WS(rs, 1)])); Chris@42: ST(&(x[WS(rs, 7)]), VADD(T48, T4b), ms, &(x[WS(rs, 1)])); Chris@42: ST(&(x[WS(rs, 6)]), VADD(T3B, T3E), ms, &(x[0])); Chris@42: ST(&(x[WS(rs, 4)]), VADD(T2x, T2A), ms, &(x[0])); Chris@42: { Chris@42: V Tt, T4c, T2B, T24; Chris@42: ST(&(x[WS(rs, 3)]), VADD(T20, T23), ms, &(x[WS(rs, 1)])); Chris@42: ST(&(x[WS(rs, 1)]), VADD(TW, TZ), ms, &(x[WS(rs, 1)])); Chris@42: Tt = BYTWJ(&(W[TWVL * 6]), VSUB(Tp, Ts)); Chris@42: ST(&(x[WS(vs, 4)]), Tt, ms, &(x[WS(vs, 4)])); Chris@42: T4c = BYTWJ(&(W[TWVL * 6]), VSUB(T48, T4b)); Chris@42: ST(&(x[WS(vs, 4) + WS(rs, 7)]), T4c, ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: T2B = BYTWJ(&(W[TWVL * 6]), VSUB(T2x, T2A)); Chris@42: ST(&(x[WS(vs, 4) + WS(rs, 4)]), T2B, ms, &(x[WS(vs, 4)])); Chris@42: T24 = BYTWJ(&(W[TWVL * 6]), VSUB(T20, T23)); Chris@42: ST(&(x[WS(vs, 4) + WS(rs, 3)]), T24, ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: } Chris@42: { Chris@42: V T10, T1x, T3F, T38, T1A, Tw; Chris@42: T10 = BYTWJ(&(W[TWVL * 6]), VSUB(TW, TZ)); Chris@42: ST(&(x[WS(vs, 4) + WS(rs, 1)]), T10, ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: T1x = BYTWJ(&(W[TWVL * 6]), VSUB(T1t, T1w)); Chris@42: ST(&(x[WS(vs, 4) + WS(rs, 2)]), T1x, ms, &(x[WS(vs, 4)])); Chris@42: T3F = BYTWJ(&(W[TWVL * 6]), VSUB(T3B, T3E)); Chris@42: ST(&(x[WS(vs, 4) + WS(rs, 6)]), T3F, ms, &(x[WS(vs, 4)])); Chris@42: T38 = BYTWJ(&(W[TWVL * 6]), VSUB(T34, T37)); Chris@42: ST(&(x[WS(vs, 4) + WS(rs, 5)]), T38, ms, &(x[WS(vs, 4) + WS(rs, 1)])); Chris@42: T1A = BYTWJ(&(W[TWVL * 10]), VSUB(T1y, T1z)); Chris@42: ST(&(x[WS(vs, 6) + WS(rs, 2)]), T1A, ms, &(x[WS(vs, 6)])); Chris@42: Tw = BYTWJ(&(W[TWVL * 10]), VSUB(Tu, Tv)); Chris@42: ST(&(x[WS(vs, 6)]), Tw, ms, &(x[WS(vs, 6)])); Chris@42: } Chris@42: { Chris@42: V T2E, T3I, T13, T27, T3b, T4f; Chris@42: T2E = BYTWJ(&(W[TWVL * 10]), VSUB(T2C, T2D)); Chris@42: ST(&(x[WS(vs, 6) + WS(rs, 4)]), T2E, ms, &(x[WS(vs, 6)])); Chris@42: T3I = BYTWJ(&(W[TWVL * 10]), VSUB(T3G, T3H)); Chris@42: ST(&(x[WS(vs, 6) + WS(rs, 6)]), T3I, ms, &(x[WS(vs, 6)])); Chris@42: T13 = BYTWJ(&(W[TWVL * 10]), VSUB(T11, T12)); Chris@42: ST(&(x[WS(vs, 6) + WS(rs, 1)]), T13, ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: T27 = BYTWJ(&(W[TWVL * 10]), VSUB(T25, T26)); Chris@42: ST(&(x[WS(vs, 6) + WS(rs, 3)]), T27, ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: T3b = BYTWJ(&(W[TWVL * 10]), VSUB(T39, T3a)); Chris@42: ST(&(x[WS(vs, 6) + WS(rs, 5)]), T3b, ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: T4f = BYTWJ(&(W[TWVL * 10]), VSUB(T4d, T4e)); Chris@42: ST(&(x[WS(vs, 6) + WS(rs, 7)]), T4f, ms, &(x[WS(vs, 6) + WS(rs, 1)])); Chris@42: } Chris@42: { Chris@42: V Tx, T1B, T3c, T4g, T3J, T2F; Chris@42: Tx = BYTWJ(&(W[TWVL * 2]), VADD(Tu, Tv)); Chris@42: ST(&(x[WS(vs, 2)]), Tx, ms, &(x[WS(vs, 2)])); Chris@42: T1B = BYTWJ(&(W[TWVL * 2]), VADD(T1y, T1z)); Chris@42: ST(&(x[WS(vs, 2) + WS(rs, 2)]), T1B, ms, &(x[WS(vs, 2)])); Chris@42: T3c = BYTWJ(&(W[TWVL * 2]), VADD(T39, T3a)); Chris@42: ST(&(x[WS(vs, 2) + WS(rs, 5)]), T3c, ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: T4g = BYTWJ(&(W[TWVL * 2]), VADD(T4d, T4e)); Chris@42: ST(&(x[WS(vs, 2) + WS(rs, 7)]), T4g, ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: T3J = BYTWJ(&(W[TWVL * 2]), VADD(T3G, T3H)); Chris@42: ST(&(x[WS(vs, 2) + WS(rs, 6)]), T3J, ms, &(x[WS(vs, 2)])); Chris@42: T2F = BYTWJ(&(W[TWVL * 2]), VADD(T2C, T2D)); Chris@42: ST(&(x[WS(vs, 2) + WS(rs, 4)]), T2F, ms, &(x[WS(vs, 2)])); Chris@42: } Chris@42: T28 = BYTWJ(&(W[TWVL * 2]), VADD(T25, T26)); Chris@42: ST(&(x[WS(vs, 2) + WS(rs, 3)]), T28, ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: T14 = BYTWJ(&(W[TWVL * 2]), VADD(T11, T12)); Chris@42: ST(&(x[WS(vs, 2) + WS(rs, 1)]), T14, ms, &(x[WS(vs, 2) + WS(rs, 1)])); Chris@42: { Chris@42: V Th, Ti, Tb, Tg; Chris@42: Tb = VADD(T3, Ta); Chris@42: Tg = VBYI(VSUB(Tc, Tf)); Chris@42: Th = BYTWJ(&(W[TWVL * 12]), VSUB(Tb, Tg)); Chris@42: Ti = BYTWJ(&(W[0]), VADD(Tb, Tg)); Chris@42: ST(&(x[WS(vs, 7)]), Th, ms, &(x[WS(vs, 7)])); Chris@42: ST(&(x[WS(vs, 1)]), Ti, ms, &(x[WS(vs, 1)])); Chris@42: } Chris@42: { Chris@42: V T40, T41, T3U, T3Z; Chris@42: T3U = VADD(T3M, T3T); Chris@42: T3Z = VBYI(VSUB(T3V, T3Y)); Chris@42: T40 = BYTWJ(&(W[TWVL * 12]), VSUB(T3U, T3Z)); Chris@42: T41 = BYTWJ(&(W[0]), VADD(T3U, T3Z)); Chris@42: ST(&(x[WS(vs, 7) + WS(rs, 7)]), T40, ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 1) + WS(rs, 7)]), T41, ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: } Chris@42: { Chris@42: V T2p, T2q, T2j, T2o; Chris@42: T2j = VADD(T2b, T2i); Chris@42: T2o = VBYI(VSUB(T2k, T2n)); Chris@42: T2p = BYTWJ(&(W[TWVL * 12]), VSUB(T2j, T2o)); Chris@42: T2q = BYTWJ(&(W[0]), VADD(T2j, T2o)); Chris@42: ST(&(x[WS(vs, 7) + WS(rs, 4)]), T2p, ms, &(x[WS(vs, 7)])); Chris@42: ST(&(x[WS(vs, 1) + WS(rs, 4)]), T2q, ms, &(x[WS(vs, 1)])); Chris@42: } Chris@42: { Chris@42: V T1S, T1T, T1M, T1R; Chris@42: T1M = VADD(T1E, T1L); Chris@42: T1R = VBYI(VSUB(T1N, T1Q)); Chris@42: T1S = BYTWJ(&(W[TWVL * 12]), VSUB(T1M, T1R)); Chris@42: T1T = BYTWJ(&(W[0]), VADD(T1M, T1R)); Chris@42: ST(&(x[WS(vs, 7) + WS(rs, 3)]), T1S, ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 1) + WS(rs, 3)]), T1T, ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: } Chris@42: { Chris@42: V TO, TP, TI, TN; Chris@42: TI = VADD(TA, TH); Chris@42: TN = VBYI(VSUB(TJ, TM)); Chris@42: TO = BYTWJ(&(W[TWVL * 12]), VSUB(TI, TN)); Chris@42: TP = BYTWJ(&(W[0]), VADD(TI, TN)); Chris@42: ST(&(x[WS(vs, 7) + WS(rs, 1)]), TO, ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 1) + WS(rs, 1)]), TP, ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: } Chris@42: { Chris@42: V T1l, T1m, T1f, T1k; Chris@42: T1f = VADD(T17, T1e); Chris@42: T1k = VBYI(VSUB(T1g, T1j)); Chris@42: T1l = BYTWJ(&(W[TWVL * 12]), VSUB(T1f, T1k)); Chris@42: T1m = BYTWJ(&(W[0]), VADD(T1f, T1k)); Chris@42: ST(&(x[WS(vs, 7) + WS(rs, 2)]), T1l, ms, &(x[WS(vs, 7)])); Chris@42: ST(&(x[WS(vs, 1) + WS(rs, 2)]), T1m, ms, &(x[WS(vs, 1)])); Chris@42: } Chris@42: { Chris@42: V T3t, T3u, T3n, T3s; Chris@42: T3n = VADD(T3f, T3m); Chris@42: T3s = VBYI(VSUB(T3o, T3r)); Chris@42: T3t = BYTWJ(&(W[TWVL * 12]), VSUB(T3n, T3s)); Chris@42: T3u = BYTWJ(&(W[0]), VADD(T3n, T3s)); Chris@42: ST(&(x[WS(vs, 7) + WS(rs, 6)]), T3t, ms, &(x[WS(vs, 7)])); Chris@42: ST(&(x[WS(vs, 1) + WS(rs, 6)]), T3u, ms, &(x[WS(vs, 1)])); Chris@42: } Chris@42: { Chris@42: V T2W, T2X, T2Q, T2V; Chris@42: T2Q = VADD(T2I, T2P); Chris@42: T2V = VBYI(VSUB(T2R, T2U)); Chris@42: T2W = BYTWJ(&(W[TWVL * 12]), VSUB(T2Q, T2V)); Chris@42: T2X = BYTWJ(&(W[0]), VADD(T2Q, T2V)); Chris@42: ST(&(x[WS(vs, 7) + WS(rs, 5)]), T2W, ms, &(x[WS(vs, 7) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 1) + WS(rs, 5)]), T2X, ms, &(x[WS(vs, 1) + WS(rs, 1)])); Chris@42: } Chris@42: { Chris@42: V T1p, T1q, T1n, T1o; Chris@42: T1n = VSUB(T17, T1e); Chris@42: T1o = VBYI(VADD(T1j, T1g)); Chris@42: T1p = BYTWJ(&(W[TWVL * 8]), VSUB(T1n, T1o)); Chris@42: T1q = BYTWJ(&(W[TWVL * 4]), VADD(T1n, T1o)); Chris@42: ST(&(x[WS(vs, 5) + WS(rs, 2)]), T1p, ms, &(x[WS(vs, 5)])); Chris@42: ST(&(x[WS(vs, 3) + WS(rs, 2)]), T1q, ms, &(x[WS(vs, 3)])); Chris@42: } Chris@42: { Chris@42: V Tl, Tm, Tj, Tk; Chris@42: Tj = VSUB(T3, Ta); Chris@42: Tk = VBYI(VADD(Tf, Tc)); Chris@42: Tl = BYTWJ(&(W[TWVL * 8]), VSUB(Tj, Tk)); Chris@42: Tm = BYTWJ(&(W[TWVL * 4]), VADD(Tj, Tk)); Chris@42: ST(&(x[WS(vs, 5)]), Tl, ms, &(x[WS(vs, 5)])); Chris@42: ST(&(x[WS(vs, 3)]), Tm, ms, &(x[WS(vs, 3)])); Chris@42: } Chris@42: { Chris@42: V T2t, T2u, T2r, T2s; Chris@42: T2r = VSUB(T2b, T2i); Chris@42: T2s = VBYI(VADD(T2n, T2k)); Chris@42: T2t = BYTWJ(&(W[TWVL * 8]), VSUB(T2r, T2s)); Chris@42: T2u = BYTWJ(&(W[TWVL * 4]), VADD(T2r, T2s)); Chris@42: ST(&(x[WS(vs, 5) + WS(rs, 4)]), T2t, ms, &(x[WS(vs, 5)])); Chris@42: ST(&(x[WS(vs, 3) + WS(rs, 4)]), T2u, ms, &(x[WS(vs, 3)])); Chris@42: } Chris@42: { Chris@42: V T3x, T3y, T3v, T3w; Chris@42: T3v = VSUB(T3f, T3m); Chris@42: T3w = VBYI(VADD(T3r, T3o)); Chris@42: T3x = BYTWJ(&(W[TWVL * 8]), VSUB(T3v, T3w)); Chris@42: T3y = BYTWJ(&(W[TWVL * 4]), VADD(T3v, T3w)); Chris@42: ST(&(x[WS(vs, 5) + WS(rs, 6)]), T3x, ms, &(x[WS(vs, 5)])); Chris@42: ST(&(x[WS(vs, 3) + WS(rs, 6)]), T3y, ms, &(x[WS(vs, 3)])); Chris@42: } Chris@42: { Chris@42: V TS, TT, TQ, TR; Chris@42: TQ = VSUB(TA, TH); Chris@42: TR = VBYI(VADD(TM, TJ)); Chris@42: TS = BYTWJ(&(W[TWVL * 8]), VSUB(TQ, TR)); Chris@42: TT = BYTWJ(&(W[TWVL * 4]), VADD(TQ, TR)); Chris@42: ST(&(x[WS(vs, 5) + WS(rs, 1)]), TS, ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 3) + WS(rs, 1)]), TT, ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: } Chris@42: { Chris@42: V T1W, T1X, T1U, T1V; Chris@42: T1U = VSUB(T1E, T1L); Chris@42: T1V = VBYI(VADD(T1Q, T1N)); Chris@42: T1W = BYTWJ(&(W[TWVL * 8]), VSUB(T1U, T1V)); Chris@42: T1X = BYTWJ(&(W[TWVL * 4]), VADD(T1U, T1V)); Chris@42: ST(&(x[WS(vs, 5) + WS(rs, 3)]), T1W, ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 3) + WS(rs, 3)]), T1X, ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: } Chris@42: { Chris@42: V T30, T31, T2Y, T2Z; Chris@42: T2Y = VSUB(T2I, T2P); Chris@42: T2Z = VBYI(VADD(T2U, T2R)); Chris@42: T30 = BYTWJ(&(W[TWVL * 8]), VSUB(T2Y, T2Z)); Chris@42: T31 = BYTWJ(&(W[TWVL * 4]), VADD(T2Y, T2Z)); Chris@42: ST(&(x[WS(vs, 5) + WS(rs, 5)]), T30, ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 3) + WS(rs, 5)]), T31, ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: } Chris@42: { Chris@42: V T44, T45, T42, T43; Chris@42: T42 = VSUB(T3M, T3T); Chris@42: T43 = VBYI(VADD(T3Y, T3V)); Chris@42: T44 = BYTWJ(&(W[TWVL * 8]), VSUB(T42, T43)); Chris@42: T45 = BYTWJ(&(W[TWVL * 4]), VADD(T42, T43)); Chris@42: ST(&(x[WS(vs, 5) + WS(rs, 7)]), T44, ms, &(x[WS(vs, 5) + WS(rs, 1)])); Chris@42: ST(&(x[WS(vs, 3) + WS(rs, 7)]), T45, ms, &(x[WS(vs, 3) + WS(rs, 1)])); Chris@42: } Chris@42: } Chris@42: } Chris@42: VLEAVE(); Chris@42: } Chris@42: Chris@42: static const tw_instr twinstr[] = { Chris@42: VTW(0, 1), Chris@42: VTW(0, 2), Chris@42: VTW(0, 3), Chris@42: VTW(0, 4), Chris@42: VTW(0, 5), Chris@42: VTW(0, 6), Chris@42: VTW(0, 7), Chris@42: {TW_NEXT, VL, 0} Chris@42: }; Chris@42: Chris@42: static const ct_desc desc = { 8, XSIMD_STRING("q1fv_8"), twinstr, &GENUS, {264, 128, 0, 0}, 0, 0, 0 }; Chris@42: Chris@42: void XSIMD(codelet_q1fv_8) (planner *p) { Chris@42: X(kdft_difsq_register) (p, q1fv_8, &desc); Chris@42: } Chris@42: #endif /* HAVE_FMA */