Chris@82: /* Chris@82: * Copyright (c) 2003, 2007-14 Matteo Frigo Chris@82: * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology Chris@82: * Chris@82: * This program is free software; you can redistribute it and/or modify Chris@82: * it under the terms of the GNU General Public License as published by Chris@82: * the Free Software Foundation; either version 2 of the License, or Chris@82: * (at your option) any later version. Chris@82: * Chris@82: * This program is distributed in the hope that it will be useful, Chris@82: * but WITHOUT ANY WARRANTY; without even the implied warranty of Chris@82: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Chris@82: * GNU General Public License for more details. Chris@82: * Chris@82: * You should have received a copy of the GNU General Public License Chris@82: * along with this program; if not, write to the Free Software Chris@82: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Chris@82: * Chris@82: */ Chris@82: Chris@82: /* This file was automatically generated --- DO NOT EDIT */ Chris@82: /* Generated on Thu May 24 08:05:20 EDT 2018 */ Chris@82: Chris@82: #include "dft/codelet-dft.h" Chris@82: Chris@82: #if defined(ARCH_PREFERS_FMA) || defined(ISA_EXTENSION_PREFERS_FMA) Chris@82: Chris@82: /* Generated by: ../../../genfft/gen_notw.native -fma -simd -compact -variables 4 -pipeline-latency 8 -n 32 -name n2sv_32 -with-ostride 1 -include dft/simd/n2s.h -store-multiple 4 */ Chris@82: Chris@82: /* Chris@82: * This function contains 372 FP additions, 136 FP multiplications, Chris@82: * (or, 236 additions, 0 multiplications, 136 fused multiply/add), Chris@82: * 138 stack variables, 7 constants, and 144 memory accesses Chris@82: */ Chris@82: #include "dft/simd/n2s.h" Chris@82: Chris@82: static void n2sv_32(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs) Chris@82: { Chris@82: DVK(KP980785280, +0.980785280403230449126182236134239036973933731); Chris@82: DVK(KP198912367, +0.198912367379658006911597622644676228597850501); Chris@82: DVK(KP831469612, +0.831469612302545237078788377617905756738560812); Chris@82: DVK(KP668178637, +0.668178637919298919997757686523080761552472251); Chris@82: DVK(KP923879532, +0.923879532511286756128183189396788286822416626); Chris@82: DVK(KP707106781, +0.707106781186547524400844362104849039284835938); Chris@82: DVK(KP414213562, +0.414213562373095048801688724209698078569671875); Chris@82: { Chris@82: INT i; Chris@82: for (i = v; i > 0; i = i - (2 * VL), ri = ri + ((2 * VL) * ivs), ii = ii + ((2 * VL) * ivs), ro = ro + ((2 * VL) * ovs), io = io + ((2 * VL) * ovs), MAKE_VOLATILE_STRIDE(128, is), MAKE_VOLATILE_STRIDE(128, os)) { Chris@82: V T7, T4r, T4Z, T18, T1z, T3t, T3T, T2T, Te, T1f, T50, T4s, T2W, T3u, T1G; Chris@82: V T3U, Tm, T1n, T1O, T2Z, T3y, T3X, T4w, T53, Tt, T1u, T1V, T2Y, T3B, T3W; Chris@82: V T4z, T52, T2t, T3L, T3O, T2K, TR, TY, T5F, T5G, T5H, T5I, T4R, T5k, T2E; Chris@82: V T3M, T4W, T5j, T2N, T3P, T22, T3E, T3H, T2j, TC, TJ, T5A, T5B, T5C, T5D; Chris@82: V T4G, T5h, T2d, T3F, T4L, T5g, T2m, T3I; Chris@82: { Chris@82: V T3, T1x, T14, T2R, T6, T2S, T17, T1y; Chris@82: { Chris@82: V T1, T2, T12, T13; Chris@82: T1 = LD(&(ri[0]), ivs, &(ri[0])); Chris@82: T2 = LD(&(ri[WS(is, 16)]), ivs, &(ri[0])); Chris@82: T3 = VADD(T1, T2); Chris@82: T1x = VSUB(T1, T2); Chris@82: T12 = LD(&(ii[0]), ivs, &(ii[0])); Chris@82: T13 = LD(&(ii[WS(is, 16)]), ivs, &(ii[0])); Chris@82: T14 = VADD(T12, T13); Chris@82: T2R = VSUB(T12, T13); Chris@82: } Chris@82: { Chris@82: V T4, T5, T15, T16; Chris@82: T4 = LD(&(ri[WS(is, 8)]), ivs, &(ri[0])); Chris@82: T5 = LD(&(ri[WS(is, 24)]), ivs, &(ri[0])); Chris@82: T6 = VADD(T4, T5); Chris@82: T2S = VSUB(T4, T5); Chris@82: T15 = LD(&(ii[WS(is, 8)]), ivs, &(ii[0])); Chris@82: T16 = LD(&(ii[WS(is, 24)]), ivs, &(ii[0])); Chris@82: T17 = VADD(T15, T16); Chris@82: T1y = VSUB(T15, T16); Chris@82: } Chris@82: T7 = VADD(T3, T6); Chris@82: T4r = VSUB(T3, T6); Chris@82: T4Z = VSUB(T14, T17); Chris@82: T18 = VADD(T14, T17); Chris@82: T1z = VADD(T1x, T1y); Chris@82: T3t = VSUB(T1x, T1y); Chris@82: T3T = VADD(T2S, T2R); Chris@82: T2T = VSUB(T2R, T2S); Chris@82: } Chris@82: { Chris@82: V Ta, T1A, T1b, T1B, Td, T1D, T1e, T1E; Chris@82: { Chris@82: V T8, T9, T19, T1a; Chris@82: T8 = LD(&(ri[WS(is, 4)]), ivs, &(ri[0])); Chris@82: T9 = LD(&(ri[WS(is, 20)]), ivs, &(ri[0])); Chris@82: Ta = VADD(T8, T9); Chris@82: T1A = VSUB(T8, T9); Chris@82: T19 = LD(&(ii[WS(is, 4)]), ivs, &(ii[0])); Chris@82: T1a = LD(&(ii[WS(is, 20)]), ivs, &(ii[0])); Chris@82: T1b = VADD(T19, T1a); Chris@82: T1B = VSUB(T19, T1a); Chris@82: } Chris@82: { Chris@82: V Tb, Tc, T1c, T1d; Chris@82: Tb = LD(&(ri[WS(is, 28)]), ivs, &(ri[0])); Chris@82: Tc = LD(&(ri[WS(is, 12)]), ivs, &(ri[0])); Chris@82: Td = VADD(Tb, Tc); Chris@82: T1D = VSUB(Tb, Tc); Chris@82: T1c = LD(&(ii[WS(is, 28)]), ivs, &(ii[0])); Chris@82: T1d = LD(&(ii[WS(is, 12)]), ivs, &(ii[0])); Chris@82: T1e = VADD(T1c, T1d); Chris@82: T1E = VSUB(T1c, T1d); Chris@82: } Chris@82: Te = VADD(Ta, Td); Chris@82: T1f = VADD(T1b, T1e); Chris@82: T50 = VSUB(Td, Ta); Chris@82: T4s = VSUB(T1b, T1e); Chris@82: { Chris@82: V T2U, T2V, T1C, T1F; Chris@82: T2U = VSUB(T1B, T1A); Chris@82: T2V = VADD(T1D, T1E); Chris@82: T2W = VADD(T2U, T2V); Chris@82: T3u = VSUB(T2U, T2V); Chris@82: T1C = VADD(T1A, T1B); Chris@82: T1F = VSUB(T1D, T1E); Chris@82: T1G = VADD(T1C, T1F); Chris@82: T3U = VSUB(T1F, T1C); Chris@82: } Chris@82: } Chris@82: { Chris@82: V Ti, T1L, T1j, T1I, Tl, T1J, T1m, T1M, T1K, T1N; Chris@82: { Chris@82: V Tg, Th, T1h, T1i; Chris@82: Tg = LD(&(ri[WS(is, 2)]), ivs, &(ri[0])); Chris@82: Th = LD(&(ri[WS(is, 18)]), ivs, &(ri[0])); Chris@82: Ti = VADD(Tg, Th); Chris@82: T1L = VSUB(Tg, Th); Chris@82: T1h = LD(&(ii[WS(is, 2)]), ivs, &(ii[0])); Chris@82: T1i = LD(&(ii[WS(is, 18)]), ivs, &(ii[0])); Chris@82: T1j = VADD(T1h, T1i); Chris@82: T1I = VSUB(T1h, T1i); Chris@82: } Chris@82: { Chris@82: V Tj, Tk, T1k, T1l; Chris@82: Tj = LD(&(ri[WS(is, 10)]), ivs, &(ri[0])); Chris@82: Tk = LD(&(ri[WS(is, 26)]), ivs, &(ri[0])); Chris@82: Tl = VADD(Tj, Tk); Chris@82: T1J = VSUB(Tj, Tk); Chris@82: T1k = LD(&(ii[WS(is, 10)]), ivs, &(ii[0])); Chris@82: T1l = LD(&(ii[WS(is, 26)]), ivs, &(ii[0])); Chris@82: T1m = VADD(T1k, T1l); Chris@82: T1M = VSUB(T1k, T1l); Chris@82: } Chris@82: Tm = VADD(Ti, Tl); Chris@82: T1n = VADD(T1j, T1m); Chris@82: T1K = VSUB(T1I, T1J); Chris@82: T1N = VADD(T1L, T1M); Chris@82: T1O = VFNMS(LDK(KP414213562), T1N, T1K); Chris@82: T2Z = VFMA(LDK(KP414213562), T1K, T1N); Chris@82: { Chris@82: V T3w, T3x, T4u, T4v; Chris@82: T3w = VADD(T1J, T1I); Chris@82: T3x = VSUB(T1L, T1M); Chris@82: T3y = VFMA(LDK(KP414213562), T3x, T3w); Chris@82: T3X = VFNMS(LDK(KP414213562), T3w, T3x); Chris@82: T4u = VSUB(T1j, T1m); Chris@82: T4v = VSUB(Ti, Tl); Chris@82: T4w = VSUB(T4u, T4v); Chris@82: T53 = VADD(T4v, T4u); Chris@82: } Chris@82: } Chris@82: { Chris@82: V Tp, T1S, T1q, T1P, Ts, T1Q, T1t, T1T, T1R, T1U; Chris@82: { Chris@82: V Tn, To, T1o, T1p; Chris@82: Tn = LD(&(ri[WS(is, 30)]), ivs, &(ri[0])); Chris@82: To = LD(&(ri[WS(is, 14)]), ivs, &(ri[0])); Chris@82: Tp = VADD(Tn, To); Chris@82: T1S = VSUB(Tn, To); Chris@82: T1o = LD(&(ii[WS(is, 30)]), ivs, &(ii[0])); Chris@82: T1p = LD(&(ii[WS(is, 14)]), ivs, &(ii[0])); Chris@82: T1q = VADD(T1o, T1p); Chris@82: T1P = VSUB(T1o, T1p); Chris@82: } Chris@82: { Chris@82: V Tq, Tr, T1r, T1s; Chris@82: Tq = LD(&(ri[WS(is, 6)]), ivs, &(ri[0])); Chris@82: Tr = LD(&(ri[WS(is, 22)]), ivs, &(ri[0])); Chris@82: Ts = VADD(Tq, Tr); Chris@82: T1Q = VSUB(Tq, Tr); Chris@82: T1r = LD(&(ii[WS(is, 6)]), ivs, &(ii[0])); Chris@82: T1s = LD(&(ii[WS(is, 22)]), ivs, &(ii[0])); Chris@82: T1t = VADD(T1r, T1s); Chris@82: T1T = VSUB(T1r, T1s); Chris@82: } Chris@82: Tt = VADD(Tp, Ts); Chris@82: T1u = VADD(T1q, T1t); Chris@82: T1R = VSUB(T1P, T1Q); Chris@82: T1U = VADD(T1S, T1T); Chris@82: T1V = VFMA(LDK(KP414213562), T1U, T1R); Chris@82: T2Y = VFNMS(LDK(KP414213562), T1R, T1U); Chris@82: { Chris@82: V T3z, T3A, T4x, T4y; Chris@82: T3z = VADD(T1Q, T1P); Chris@82: T3A = VSUB(T1S, T1T); Chris@82: T3B = VFNMS(LDK(KP414213562), T3A, T3z); Chris@82: T3W = VFMA(LDK(KP414213562), T3z, T3A); Chris@82: T4x = VSUB(Tp, Ts); Chris@82: T4y = VSUB(T1q, T1t); Chris@82: T4z = VADD(T4x, T4y); Chris@82: T52 = VSUB(T4x, T4y); Chris@82: } Chris@82: } Chris@82: { Chris@82: V TN, T2G, T2r, T4N, TQ, T2s, T2J, T4O, TU, T2x, T2w, T4T, TX, T2z, T2C; Chris@82: V T4U; Chris@82: { Chris@82: V TL, TM, T2p, T2q; Chris@82: TL = LD(&(ri[WS(is, 31)]), ivs, &(ri[WS(is, 1)])); Chris@82: TM = LD(&(ri[WS(is, 15)]), ivs, &(ri[WS(is, 1)])); Chris@82: TN = VADD(TL, TM); Chris@82: T2G = VSUB(TL, TM); Chris@82: T2p = LD(&(ii[WS(is, 31)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2q = LD(&(ii[WS(is, 15)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2r = VSUB(T2p, T2q); Chris@82: T4N = VADD(T2p, T2q); Chris@82: } Chris@82: { Chris@82: V TO, TP, T2H, T2I; Chris@82: TO = LD(&(ri[WS(is, 7)]), ivs, &(ri[WS(is, 1)])); Chris@82: TP = LD(&(ri[WS(is, 23)]), ivs, &(ri[WS(is, 1)])); Chris@82: TQ = VADD(TO, TP); Chris@82: T2s = VSUB(TO, TP); Chris@82: T2H = LD(&(ii[WS(is, 7)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2I = LD(&(ii[WS(is, 23)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2J = VSUB(T2H, T2I); Chris@82: T4O = VADD(T2H, T2I); Chris@82: } Chris@82: { Chris@82: V TS, TT, T2u, T2v; Chris@82: TS = LD(&(ri[WS(is, 3)]), ivs, &(ri[WS(is, 1)])); Chris@82: TT = LD(&(ri[WS(is, 19)]), ivs, &(ri[WS(is, 1)])); Chris@82: TU = VADD(TS, TT); Chris@82: T2x = VSUB(TS, TT); Chris@82: T2u = LD(&(ii[WS(is, 3)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2v = LD(&(ii[WS(is, 19)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2w = VSUB(T2u, T2v); Chris@82: T4T = VADD(T2u, T2v); Chris@82: } Chris@82: { Chris@82: V TV, TW, T2A, T2B; Chris@82: TV = LD(&(ri[WS(is, 27)]), ivs, &(ri[WS(is, 1)])); Chris@82: TW = LD(&(ri[WS(is, 11)]), ivs, &(ri[WS(is, 1)])); Chris@82: TX = VADD(TV, TW); Chris@82: T2z = VSUB(TV, TW); Chris@82: T2A = LD(&(ii[WS(is, 27)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2B = LD(&(ii[WS(is, 11)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2C = VSUB(T2A, T2B); Chris@82: T4U = VADD(T2A, T2B); Chris@82: } Chris@82: T2t = VSUB(T2r, T2s); Chris@82: T3L = VSUB(T2G, T2J); Chris@82: T3O = VADD(T2s, T2r); Chris@82: T2K = VADD(T2G, T2J); Chris@82: TR = VADD(TN, TQ); Chris@82: TY = VADD(TU, TX); Chris@82: T5F = VSUB(TR, TY); Chris@82: { Chris@82: V T4P, T4Q, T2y, T2D; Chris@82: T5G = VADD(T4N, T4O); Chris@82: T5H = VADD(T4T, T4U); Chris@82: T5I = VSUB(T5G, T5H); Chris@82: T4P = VSUB(T4N, T4O); Chris@82: T4Q = VSUB(TX, TU); Chris@82: T4R = VSUB(T4P, T4Q); Chris@82: T5k = VADD(T4Q, T4P); Chris@82: T2y = VSUB(T2w, T2x); Chris@82: T2D = VADD(T2z, T2C); Chris@82: T2E = VADD(T2y, T2D); Chris@82: T3M = VSUB(T2D, T2y); Chris@82: { Chris@82: V T4S, T4V, T2L, T2M; Chris@82: T4S = VSUB(TN, TQ); Chris@82: T4V = VSUB(T4T, T4U); Chris@82: T4W = VSUB(T4S, T4V); Chris@82: T5j = VADD(T4S, T4V); Chris@82: T2L = VADD(T2x, T2w); Chris@82: T2M = VSUB(T2z, T2C); Chris@82: T2N = VADD(T2L, T2M); Chris@82: T3P = VSUB(T2L, T2M); Chris@82: } Chris@82: } Chris@82: } Chris@82: { Chris@82: V Ty, T2f, T20, T4C, TB, T21, T2i, T4D, TF, T26, T25, T4I, TI, T28, T2b; Chris@82: V T4J; Chris@82: { Chris@82: V Tw, Tx, T1Y, T1Z; Chris@82: Tw = LD(&(ri[WS(is, 1)]), ivs, &(ri[WS(is, 1)])); Chris@82: Tx = LD(&(ri[WS(is, 17)]), ivs, &(ri[WS(is, 1)])); Chris@82: Ty = VADD(Tw, Tx); Chris@82: T2f = VSUB(Tw, Tx); Chris@82: T1Y = LD(&(ii[WS(is, 1)]), ivs, &(ii[WS(is, 1)])); Chris@82: T1Z = LD(&(ii[WS(is, 17)]), ivs, &(ii[WS(is, 1)])); Chris@82: T20 = VSUB(T1Y, T1Z); Chris@82: T4C = VADD(T1Y, T1Z); Chris@82: } Chris@82: { Chris@82: V Tz, TA, T2g, T2h; Chris@82: Tz = LD(&(ri[WS(is, 9)]), ivs, &(ri[WS(is, 1)])); Chris@82: TA = LD(&(ri[WS(is, 25)]), ivs, &(ri[WS(is, 1)])); Chris@82: TB = VADD(Tz, TA); Chris@82: T21 = VSUB(Tz, TA); Chris@82: T2g = LD(&(ii[WS(is, 9)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2h = LD(&(ii[WS(is, 25)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2i = VSUB(T2g, T2h); Chris@82: T4D = VADD(T2g, T2h); Chris@82: } Chris@82: { Chris@82: V TD, TE, T23, T24; Chris@82: TD = LD(&(ri[WS(is, 5)]), ivs, &(ri[WS(is, 1)])); Chris@82: TE = LD(&(ri[WS(is, 21)]), ivs, &(ri[WS(is, 1)])); Chris@82: TF = VADD(TD, TE); Chris@82: T26 = VSUB(TD, TE); Chris@82: T23 = LD(&(ii[WS(is, 5)]), ivs, &(ii[WS(is, 1)])); Chris@82: T24 = LD(&(ii[WS(is, 21)]), ivs, &(ii[WS(is, 1)])); Chris@82: T25 = VSUB(T23, T24); Chris@82: T4I = VADD(T23, T24); Chris@82: } Chris@82: { Chris@82: V TG, TH, T29, T2a; Chris@82: TG = LD(&(ri[WS(is, 29)]), ivs, &(ri[WS(is, 1)])); Chris@82: TH = LD(&(ri[WS(is, 13)]), ivs, &(ri[WS(is, 1)])); Chris@82: TI = VADD(TG, TH); Chris@82: T28 = VSUB(TG, TH); Chris@82: T29 = LD(&(ii[WS(is, 29)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2a = LD(&(ii[WS(is, 13)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2b = VSUB(T29, T2a); Chris@82: T4J = VADD(T29, T2a); Chris@82: } Chris@82: T22 = VSUB(T20, T21); Chris@82: T3E = VSUB(T2f, T2i); Chris@82: T3H = VADD(T21, T20); Chris@82: T2j = VADD(T2f, T2i); Chris@82: TC = VADD(Ty, TB); Chris@82: TJ = VADD(TF, TI); Chris@82: T5A = VSUB(TC, TJ); Chris@82: { Chris@82: V T4E, T4F, T27, T2c; Chris@82: T5B = VADD(T4C, T4D); Chris@82: T5C = VADD(T4I, T4J); Chris@82: T5D = VSUB(T5B, T5C); Chris@82: T4E = VSUB(T4C, T4D); Chris@82: T4F = VSUB(TI, TF); Chris@82: T4G = VSUB(T4E, T4F); Chris@82: T5h = VADD(T4F, T4E); Chris@82: T27 = VSUB(T25, T26); Chris@82: T2c = VADD(T28, T2b); Chris@82: T2d = VADD(T27, T2c); Chris@82: T3F = VSUB(T2c, T27); Chris@82: { Chris@82: V T4H, T4K, T2k, T2l; Chris@82: T4H = VSUB(Ty, TB); Chris@82: T4K = VSUB(T4I, T4J); Chris@82: T4L = VSUB(T4H, T4K); Chris@82: T5g = VADD(T4H, T4K); Chris@82: T2k = VADD(T26, T25); Chris@82: T2l = VSUB(T28, T2b); Chris@82: T2m = VADD(T2k, T2l); Chris@82: T3I = VSUB(T2k, T2l); Chris@82: } Chris@82: } Chris@82: } Chris@82: { Chris@82: V T61, T62, T63, T64, T65, T66, T67, T68, T69, T6a, T6b, T6c, T6d, T6e, T6f; Chris@82: V T6g, T6h, T6i, T6j, T6k, T6l, T6m, T6n, T6o, T6p, T6q, T6r, T6s, T6t, T6u; Chris@82: V T6v, T6w; Chris@82: { Chris@82: V T4B, T5b, T5a, T5c, T4Y, T56, T55, T57; Chris@82: { Chris@82: V T4t, T4A, T58, T59; Chris@82: T4t = VSUB(T4r, T4s); Chris@82: T4A = VSUB(T4w, T4z); Chris@82: T4B = VFMA(LDK(KP707106781), T4A, T4t); Chris@82: T5b = VFNMS(LDK(KP707106781), T4A, T4t); Chris@82: T58 = VFMA(LDK(KP414213562), T4R, T4W); Chris@82: T59 = VFNMS(LDK(KP414213562), T4G, T4L); Chris@82: T5a = VSUB(T58, T59); Chris@82: T5c = VADD(T59, T58); Chris@82: } Chris@82: { Chris@82: V T4M, T4X, T51, T54; Chris@82: T4M = VFMA(LDK(KP414213562), T4L, T4G); Chris@82: T4X = VFNMS(LDK(KP414213562), T4W, T4R); Chris@82: T4Y = VSUB(T4M, T4X); Chris@82: T56 = VADD(T4M, T4X); Chris@82: T51 = VSUB(T4Z, T50); Chris@82: T54 = VSUB(T52, T53); Chris@82: T55 = VFNMS(LDK(KP707106781), T54, T51); Chris@82: T57 = VFMA(LDK(KP707106781), T54, T51); Chris@82: } Chris@82: T61 = VFNMS(LDK(KP923879532), T4Y, T4B); Chris@82: STM4(&(ro[22]), T61, ovs, &(ro[0])); Chris@82: T62 = VFNMS(LDK(KP923879532), T5a, T57); Chris@82: STM4(&(io[22]), T62, ovs, &(io[0])); Chris@82: T63 = VFMA(LDK(KP923879532), T4Y, T4B); Chris@82: STM4(&(ro[6]), T63, ovs, &(ro[0])); Chris@82: T64 = VFMA(LDK(KP923879532), T5a, T57); Chris@82: STM4(&(io[6]), T64, ovs, &(io[0])); Chris@82: T65 = VFNMS(LDK(KP923879532), T56, T55); Chris@82: STM4(&(io[14]), T65, ovs, &(io[0])); Chris@82: T66 = VFNMS(LDK(KP923879532), T5c, T5b); Chris@82: STM4(&(ro[14]), T66, ovs, &(ro[0])); Chris@82: T67 = VFMA(LDK(KP923879532), T56, T55); Chris@82: STM4(&(io[30]), T67, ovs, &(io[0])); Chris@82: T68 = VFMA(LDK(KP923879532), T5c, T5b); Chris@82: STM4(&(ro[30]), T68, ovs, &(ro[0])); Chris@82: } Chris@82: { Chris@82: V T5f, T5r, T5u, T5w, T5m, T5q, T5p, T5v; Chris@82: { Chris@82: V T5d, T5e, T5s, T5t; Chris@82: T5d = VADD(T4r, T4s); Chris@82: T5e = VADD(T53, T52); Chris@82: T5f = VFMA(LDK(KP707106781), T5e, T5d); Chris@82: T5r = VFNMS(LDK(KP707106781), T5e, T5d); Chris@82: T5s = VFNMS(LDK(KP414213562), T5g, T5h); Chris@82: T5t = VFMA(LDK(KP414213562), T5j, T5k); Chris@82: T5u = VSUB(T5s, T5t); Chris@82: T5w = VADD(T5s, T5t); Chris@82: } Chris@82: { Chris@82: V T5i, T5l, T5n, T5o; Chris@82: T5i = VFMA(LDK(KP414213562), T5h, T5g); Chris@82: T5l = VFNMS(LDK(KP414213562), T5k, T5j); Chris@82: T5m = VADD(T5i, T5l); Chris@82: T5q = VSUB(T5l, T5i); Chris@82: T5n = VADD(T50, T4Z); Chris@82: T5o = VADD(T4w, T4z); Chris@82: T5p = VFNMS(LDK(KP707106781), T5o, T5n); Chris@82: T5v = VFMA(LDK(KP707106781), T5o, T5n); Chris@82: } Chris@82: T69 = VFNMS(LDK(KP923879532), T5m, T5f); Chris@82: STM4(&(ro[18]), T69, ovs, &(ro[0])); Chris@82: T6a = VFNMS(LDK(KP923879532), T5w, T5v); Chris@82: STM4(&(io[18]), T6a, ovs, &(io[0])); Chris@82: T6b = VFMA(LDK(KP923879532), T5m, T5f); Chris@82: STM4(&(ro[2]), T6b, ovs, &(ro[0])); Chris@82: T6c = VFMA(LDK(KP923879532), T5w, T5v); Chris@82: STM4(&(io[2]), T6c, ovs, &(io[0])); Chris@82: T6d = VFNMS(LDK(KP923879532), T5q, T5p); Chris@82: STM4(&(io[26]), T6d, ovs, &(io[0])); Chris@82: T6e = VFNMS(LDK(KP923879532), T5u, T5r); Chris@82: STM4(&(ro[26]), T6e, ovs, &(ro[0])); Chris@82: T6f = VFMA(LDK(KP923879532), T5q, T5p); Chris@82: STM4(&(io[10]), T6f, ovs, &(io[0])); Chris@82: T6g = VFMA(LDK(KP923879532), T5u, T5r); Chris@82: STM4(&(ro[10]), T6g, ovs, &(ro[0])); Chris@82: } Chris@82: { Chris@82: V T5z, T5P, T5S, T5U, T5K, T5O, T5N, T5T; Chris@82: { Chris@82: V T5x, T5y, T5Q, T5R; Chris@82: T5x = VSUB(T7, Te); Chris@82: T5y = VSUB(T1n, T1u); Chris@82: T5z = VADD(T5x, T5y); Chris@82: T5P = VSUB(T5x, T5y); Chris@82: T5Q = VSUB(T5D, T5A); Chris@82: T5R = VADD(T5F, T5I); Chris@82: T5S = VSUB(T5Q, T5R); Chris@82: T5U = VADD(T5Q, T5R); Chris@82: } Chris@82: { Chris@82: V T5E, T5J, T5L, T5M; Chris@82: T5E = VADD(T5A, T5D); Chris@82: T5J = VSUB(T5F, T5I); Chris@82: T5K = VADD(T5E, T5J); Chris@82: T5O = VSUB(T5J, T5E); Chris@82: T5L = VSUB(T18, T1f); Chris@82: T5M = VSUB(Tt, Tm); Chris@82: T5N = VSUB(T5L, T5M); Chris@82: T5T = VADD(T5M, T5L); Chris@82: } Chris@82: T6h = VFNMS(LDK(KP707106781), T5K, T5z); Chris@82: STM4(&(ro[20]), T6h, ovs, &(ro[0])); Chris@82: T6i = VFNMS(LDK(KP707106781), T5U, T5T); Chris@82: STM4(&(io[20]), T6i, ovs, &(io[0])); Chris@82: T6j = VFMA(LDK(KP707106781), T5K, T5z); Chris@82: STM4(&(ro[4]), T6j, ovs, &(ro[0])); Chris@82: T6k = VFMA(LDK(KP707106781), T5U, T5T); Chris@82: STM4(&(io[4]), T6k, ovs, &(io[0])); Chris@82: T6l = VFNMS(LDK(KP707106781), T5O, T5N); Chris@82: STM4(&(io[28]), T6l, ovs, &(io[0])); Chris@82: T6m = VFNMS(LDK(KP707106781), T5S, T5P); Chris@82: STM4(&(ro[28]), T6m, ovs, &(ro[0])); Chris@82: T6n = VFMA(LDK(KP707106781), T5O, T5N); Chris@82: STM4(&(io[12]), T6n, ovs, &(io[0])); Chris@82: T6o = VFMA(LDK(KP707106781), T5S, T5P); Chris@82: STM4(&(ro[12]), T6o, ovs, &(ro[0])); Chris@82: } Chris@82: { Chris@82: V Tv, T5V, T5Y, T60, T10, T11, T1w, T5Z; Chris@82: { Chris@82: V Tf, Tu, T5W, T5X; Chris@82: Tf = VADD(T7, Te); Chris@82: Tu = VADD(Tm, Tt); Chris@82: Tv = VADD(Tf, Tu); Chris@82: T5V = VSUB(Tf, Tu); Chris@82: T5W = VADD(T5B, T5C); Chris@82: T5X = VADD(T5G, T5H); Chris@82: T5Y = VSUB(T5W, T5X); Chris@82: T60 = VADD(T5W, T5X); Chris@82: } Chris@82: { Chris@82: V TK, TZ, T1g, T1v; Chris@82: TK = VADD(TC, TJ); Chris@82: TZ = VADD(TR, TY); Chris@82: T10 = VADD(TK, TZ); Chris@82: T11 = VSUB(TZ, TK); Chris@82: T1g = VADD(T18, T1f); Chris@82: T1v = VADD(T1n, T1u); Chris@82: T1w = VSUB(T1g, T1v); Chris@82: T5Z = VADD(T1g, T1v); Chris@82: } Chris@82: T6p = VSUB(Tv, T10); Chris@82: STM4(&(ro[16]), T6p, ovs, &(ro[0])); Chris@82: T6q = VSUB(T5Z, T60); Chris@82: STM4(&(io[16]), T6q, ovs, &(io[0])); Chris@82: T6r = VADD(Tv, T10); Chris@82: STM4(&(ro[0]), T6r, ovs, &(ro[0])); Chris@82: T6s = VADD(T5Z, T60); Chris@82: STM4(&(io[0]), T6s, ovs, &(io[0])); Chris@82: T6t = VADD(T11, T1w); Chris@82: STM4(&(io[8]), T6t, ovs, &(io[0])); Chris@82: T6u = VADD(T5V, T5Y); Chris@82: STM4(&(ro[8]), T6u, ovs, &(ro[0])); Chris@82: T6v = VSUB(T1w, T11); Chris@82: STM4(&(io[24]), T6v, ovs, &(io[0])); Chris@82: T6w = VSUB(T5V, T5Y); Chris@82: STM4(&(ro[24]), T6w, ovs, &(ro[0])); Chris@82: } Chris@82: { Chris@82: V T6x, T6y, T6z, T6A, T6B, T6C, T6D, T6E, T6F, T6G, T6H, T6I, T6J, T6K, T6L; Chris@82: V T6M; Chris@82: { Chris@82: V T1X, T37, T31, T33, T2o, T35, T2P, T34; Chris@82: { Chris@82: V T1H, T1W, T2X, T30; Chris@82: T1H = VFNMS(LDK(KP707106781), T1G, T1z); Chris@82: T1W = VSUB(T1O, T1V); Chris@82: T1X = VFMA(LDK(KP923879532), T1W, T1H); Chris@82: T37 = VFNMS(LDK(KP923879532), T1W, T1H); Chris@82: T2X = VFNMS(LDK(KP707106781), T2W, T2T); Chris@82: T30 = VSUB(T2Y, T2Z); Chris@82: T31 = VFNMS(LDK(KP923879532), T30, T2X); Chris@82: T33 = VFMA(LDK(KP923879532), T30, T2X); Chris@82: } Chris@82: { Chris@82: V T2e, T2n, T2F, T2O; Chris@82: T2e = VFNMS(LDK(KP707106781), T2d, T22); Chris@82: T2n = VFNMS(LDK(KP707106781), T2m, T2j); Chris@82: T2o = VFMA(LDK(KP668178637), T2n, T2e); Chris@82: T35 = VFNMS(LDK(KP668178637), T2e, T2n); Chris@82: T2F = VFNMS(LDK(KP707106781), T2E, T2t); Chris@82: T2O = VFNMS(LDK(KP707106781), T2N, T2K); Chris@82: T2P = VFNMS(LDK(KP668178637), T2O, T2F); Chris@82: T34 = VFMA(LDK(KP668178637), T2F, T2O); Chris@82: } Chris@82: { Chris@82: V T2Q, T36, T32, T38; Chris@82: T2Q = VSUB(T2o, T2P); Chris@82: T6x = VFNMS(LDK(KP831469612), T2Q, T1X); Chris@82: STM4(&(ro[21]), T6x, ovs, &(ro[1])); Chris@82: T6y = VFMA(LDK(KP831469612), T2Q, T1X); Chris@82: STM4(&(ro[5]), T6y, ovs, &(ro[1])); Chris@82: T36 = VSUB(T34, T35); Chris@82: T6z = VFNMS(LDK(KP831469612), T36, T33); Chris@82: STM4(&(io[21]), T6z, ovs, &(io[1])); Chris@82: T6A = VFMA(LDK(KP831469612), T36, T33); Chris@82: STM4(&(io[5]), T6A, ovs, &(io[1])); Chris@82: T32 = VADD(T2o, T2P); Chris@82: T6B = VFNMS(LDK(KP831469612), T32, T31); Chris@82: STM4(&(io[13]), T6B, ovs, &(io[1])); Chris@82: T6C = VFMA(LDK(KP831469612), T32, T31); Chris@82: STM4(&(io[29]), T6C, ovs, &(io[1])); Chris@82: T38 = VADD(T35, T34); Chris@82: T6D = VFNMS(LDK(KP831469612), T38, T37); Chris@82: STM4(&(ro[13]), T6D, ovs, &(ro[1])); Chris@82: T6E = VFMA(LDK(KP831469612), T38, T37); Chris@82: STM4(&(ro[29]), T6E, ovs, &(ro[1])); Chris@82: } Chris@82: } Chris@82: { Chris@82: V T3D, T41, T3Z, T45, T3K, T42, T3R, T43; Chris@82: { Chris@82: V T3v, T3C, T3V, T3Y; Chris@82: T3v = VFMA(LDK(KP707106781), T3u, T3t); Chris@82: T3C = VSUB(T3y, T3B); Chris@82: T3D = VFMA(LDK(KP923879532), T3C, T3v); Chris@82: T41 = VFNMS(LDK(KP923879532), T3C, T3v); Chris@82: T3V = VFMA(LDK(KP707106781), T3U, T3T); Chris@82: T3Y = VSUB(T3W, T3X); Chris@82: T3Z = VFNMS(LDK(KP923879532), T3Y, T3V); Chris@82: T45 = VFMA(LDK(KP923879532), T3Y, T3V); Chris@82: } Chris@82: { Chris@82: V T3G, T3J, T3N, T3Q; Chris@82: T3G = VFNMS(LDK(KP707106781), T3F, T3E); Chris@82: T3J = VFNMS(LDK(KP707106781), T3I, T3H); Chris@82: T3K = VFMA(LDK(KP668178637), T3J, T3G); Chris@82: T42 = VFNMS(LDK(KP668178637), T3G, T3J); Chris@82: T3N = VFNMS(LDK(KP707106781), T3M, T3L); Chris@82: T3Q = VFNMS(LDK(KP707106781), T3P, T3O); Chris@82: T3R = VFNMS(LDK(KP668178637), T3Q, T3N); Chris@82: T43 = VFMA(LDK(KP668178637), T3N, T3Q); Chris@82: } Chris@82: { Chris@82: V T3S, T46, T40, T44; Chris@82: T3S = VADD(T3K, T3R); Chris@82: T6F = VFNMS(LDK(KP831469612), T3S, T3D); Chris@82: STM4(&(ro[19]), T6F, ovs, &(ro[1])); Chris@82: T6G = VFMA(LDK(KP831469612), T3S, T3D); Chris@82: STM4(&(ro[3]), T6G, ovs, &(ro[1])); Chris@82: T46 = VADD(T42, T43); Chris@82: T6H = VFNMS(LDK(KP831469612), T46, T45); Chris@82: STM4(&(io[19]), T6H, ovs, &(io[1])); Chris@82: T6I = VFMA(LDK(KP831469612), T46, T45); Chris@82: STM4(&(io[3]), T6I, ovs, &(io[1])); Chris@82: T40 = VSUB(T3R, T3K); Chris@82: T6J = VFNMS(LDK(KP831469612), T40, T3Z); Chris@82: STM4(&(io[27]), T6J, ovs, &(io[1])); Chris@82: T6K = VFMA(LDK(KP831469612), T40, T3Z); Chris@82: STM4(&(io[11]), T6K, ovs, &(io[1])); Chris@82: T44 = VSUB(T42, T43); Chris@82: T6L = VFNMS(LDK(KP831469612), T44, T41); Chris@82: STM4(&(ro[27]), T6L, ovs, &(ro[1])); Chris@82: T6M = VFMA(LDK(KP831469612), T44, T41); Chris@82: STM4(&(ro[11]), T6M, ovs, &(ro[1])); Chris@82: } Chris@82: } Chris@82: { Chris@82: V T49, T4p, T4j, T4l, T4c, T4n, T4f, T4m; Chris@82: { Chris@82: V T47, T48, T4h, T4i; Chris@82: T47 = VFNMS(LDK(KP707106781), T3u, T3t); Chris@82: T48 = VADD(T3X, T3W); Chris@82: T49 = VFNMS(LDK(KP923879532), T48, T47); Chris@82: T4p = VFMA(LDK(KP923879532), T48, T47); Chris@82: T4h = VFNMS(LDK(KP707106781), T3U, T3T); Chris@82: T4i = VADD(T3y, T3B); Chris@82: T4j = VFMA(LDK(KP923879532), T4i, T4h); Chris@82: T4l = VFNMS(LDK(KP923879532), T4i, T4h); Chris@82: } Chris@82: { Chris@82: V T4a, T4b, T4d, T4e; Chris@82: T4a = VFMA(LDK(KP707106781), T3I, T3H); Chris@82: T4b = VFMA(LDK(KP707106781), T3F, T3E); Chris@82: T4c = VFMA(LDK(KP198912367), T4b, T4a); Chris@82: T4n = VFNMS(LDK(KP198912367), T4a, T4b); Chris@82: T4d = VFMA(LDK(KP707106781), T3P, T3O); Chris@82: T4e = VFMA(LDK(KP707106781), T3M, T3L); Chris@82: T4f = VFNMS(LDK(KP198912367), T4e, T4d); Chris@82: T4m = VFMA(LDK(KP198912367), T4d, T4e); Chris@82: } Chris@82: { Chris@82: V T4g, T6N, T6O, T4o, T6P, T6Q; Chris@82: T4g = VSUB(T4c, T4f); Chris@82: T6N = VFNMS(LDK(KP980785280), T4g, T49); Chris@82: STM4(&(ro[23]), T6N, ovs, &(ro[1])); Chris@82: STN4(&(ro[20]), T6h, T6x, T61, T6N, ovs); Chris@82: T6O = VFMA(LDK(KP980785280), T4g, T49); Chris@82: STM4(&(ro[7]), T6O, ovs, &(ro[1])); Chris@82: STN4(&(ro[4]), T6j, T6y, T63, T6O, ovs); Chris@82: T4o = VSUB(T4m, T4n); Chris@82: T6P = VFNMS(LDK(KP980785280), T4o, T4l); Chris@82: STM4(&(io[23]), T6P, ovs, &(io[1])); Chris@82: STN4(&(io[20]), T6i, T6z, T62, T6P, ovs); Chris@82: T6Q = VFMA(LDK(KP980785280), T4o, T4l); Chris@82: STM4(&(io[7]), T6Q, ovs, &(io[1])); Chris@82: STN4(&(io[4]), T6k, T6A, T64, T6Q, ovs); Chris@82: } Chris@82: { Chris@82: V T4k, T6R, T6S, T4q, T6T, T6U; Chris@82: T4k = VADD(T4c, T4f); Chris@82: T6R = VFNMS(LDK(KP980785280), T4k, T4j); Chris@82: STM4(&(io[15]), T6R, ovs, &(io[1])); Chris@82: STN4(&(io[12]), T6n, T6B, T65, T6R, ovs); Chris@82: T6S = VFMA(LDK(KP980785280), T4k, T4j); Chris@82: STM4(&(io[31]), T6S, ovs, &(io[1])); Chris@82: STN4(&(io[28]), T6l, T6C, T67, T6S, ovs); Chris@82: T4q = VADD(T4n, T4m); Chris@82: T6T = VFNMS(LDK(KP980785280), T4q, T4p); Chris@82: STM4(&(ro[15]), T6T, ovs, &(ro[1])); Chris@82: STN4(&(ro[12]), T6o, T6D, T66, T6T, ovs); Chris@82: T6U = VFMA(LDK(KP980785280), T4q, T4p); Chris@82: STM4(&(ro[31]), T6U, ovs, &(ro[1])); Chris@82: STN4(&(ro[28]), T6m, T6E, T68, T6U, ovs); Chris@82: } Chris@82: } Chris@82: { Chris@82: V T3b, T3n, T3l, T3r, T3e, T3o, T3h, T3p; Chris@82: { Chris@82: V T39, T3a, T3j, T3k; Chris@82: T39 = VFMA(LDK(KP707106781), T1G, T1z); Chris@82: T3a = VADD(T2Z, T2Y); Chris@82: T3b = VFMA(LDK(KP923879532), T3a, T39); Chris@82: T3n = VFNMS(LDK(KP923879532), T3a, T39); Chris@82: T3j = VFMA(LDK(KP707106781), T2W, T2T); Chris@82: T3k = VADD(T1O, T1V); Chris@82: T3l = VFNMS(LDK(KP923879532), T3k, T3j); Chris@82: T3r = VFMA(LDK(KP923879532), T3k, T3j); Chris@82: } Chris@82: { Chris@82: V T3c, T3d, T3f, T3g; Chris@82: T3c = VFMA(LDK(KP707106781), T2m, T2j); Chris@82: T3d = VFMA(LDK(KP707106781), T2d, T22); Chris@82: T3e = VFMA(LDK(KP198912367), T3d, T3c); Chris@82: T3o = VFNMS(LDK(KP198912367), T3c, T3d); Chris@82: T3f = VFMA(LDK(KP707106781), T2N, T2K); Chris@82: T3g = VFMA(LDK(KP707106781), T2E, T2t); Chris@82: T3h = VFNMS(LDK(KP198912367), T3g, T3f); Chris@82: T3p = VFMA(LDK(KP198912367), T3f, T3g); Chris@82: } Chris@82: { Chris@82: V T3i, T6V, T6W, T3s, T6X, T6Y; Chris@82: T3i = VADD(T3e, T3h); Chris@82: T6V = VFNMS(LDK(KP980785280), T3i, T3b); Chris@82: STM4(&(ro[17]), T6V, ovs, &(ro[1])); Chris@82: STN4(&(ro[16]), T6p, T6V, T69, T6F, ovs); Chris@82: T6W = VFMA(LDK(KP980785280), T3i, T3b); Chris@82: STM4(&(ro[1]), T6W, ovs, &(ro[1])); Chris@82: STN4(&(ro[0]), T6r, T6W, T6b, T6G, ovs); Chris@82: T3s = VADD(T3o, T3p); Chris@82: T6X = VFNMS(LDK(KP980785280), T3s, T3r); Chris@82: STM4(&(io[17]), T6X, ovs, &(io[1])); Chris@82: STN4(&(io[16]), T6q, T6X, T6a, T6H, ovs); Chris@82: T6Y = VFMA(LDK(KP980785280), T3s, T3r); Chris@82: STM4(&(io[1]), T6Y, ovs, &(io[1])); Chris@82: STN4(&(io[0]), T6s, T6Y, T6c, T6I, ovs); Chris@82: } Chris@82: { Chris@82: V T3m, T6Z, T70, T3q, T71, T72; Chris@82: T3m = VSUB(T3h, T3e); Chris@82: T6Z = VFNMS(LDK(KP980785280), T3m, T3l); Chris@82: STM4(&(io[25]), T6Z, ovs, &(io[1])); Chris@82: STN4(&(io[24]), T6v, T6Z, T6d, T6J, ovs); Chris@82: T70 = VFMA(LDK(KP980785280), T3m, T3l); Chris@82: STM4(&(io[9]), T70, ovs, &(io[1])); Chris@82: STN4(&(io[8]), T6t, T70, T6f, T6K, ovs); Chris@82: T3q = VSUB(T3o, T3p); Chris@82: T71 = VFNMS(LDK(KP980785280), T3q, T3n); Chris@82: STM4(&(ro[25]), T71, ovs, &(ro[1])); Chris@82: STN4(&(ro[24]), T6w, T71, T6e, T6L, ovs); Chris@82: T72 = VFMA(LDK(KP980785280), T3q, T3n); Chris@82: STM4(&(ro[9]), T72, ovs, &(ro[1])); Chris@82: STN4(&(ro[8]), T6u, T72, T6g, T6M, ovs); Chris@82: } Chris@82: } Chris@82: } Chris@82: } Chris@82: } Chris@82: } Chris@82: VLEAVE(); Chris@82: } Chris@82: Chris@82: static const kdft_desc desc = { 32, XSIMD_STRING("n2sv_32"), {236, 0, 136, 0}, &GENUS, 0, 1, 0, 0 }; Chris@82: Chris@82: void XSIMD(codelet_n2sv_32) (planner *p) { Chris@82: X(kdft_register) (p, n2sv_32, &desc); Chris@82: } Chris@82: Chris@82: #else Chris@82: Chris@82: /* Generated by: ../../../genfft/gen_notw.native -simd -compact -variables 4 -pipeline-latency 8 -n 32 -name n2sv_32 -with-ostride 1 -include dft/simd/n2s.h -store-multiple 4 */ Chris@82: Chris@82: /* Chris@82: * This function contains 372 FP additions, 84 FP multiplications, Chris@82: * (or, 340 additions, 52 multiplications, 32 fused multiply/add), Chris@82: * 130 stack variables, 7 constants, and 144 memory accesses Chris@82: */ Chris@82: #include "dft/simd/n2s.h" Chris@82: Chris@82: static void n2sv_32(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs) Chris@82: { Chris@82: DVK(KP831469612, +0.831469612302545237078788377617905756738560812); Chris@82: DVK(KP555570233, +0.555570233019602224742830813948532874374937191); Chris@82: DVK(KP195090322, +0.195090322016128267848284868477022240927691618); Chris@82: DVK(KP980785280, +0.980785280403230449126182236134239036973933731); Chris@82: DVK(KP923879532, +0.923879532511286756128183189396788286822416626); Chris@82: DVK(KP382683432, +0.382683432365089771728459984030398866761344562); Chris@82: DVK(KP707106781, +0.707106781186547524400844362104849039284835938); Chris@82: { Chris@82: INT i; Chris@82: for (i = v; i > 0; i = i - (2 * VL), ri = ri + ((2 * VL) * ivs), ii = ii + ((2 * VL) * ivs), ro = ro + ((2 * VL) * ovs), io = io + ((2 * VL) * ovs), MAKE_VOLATILE_STRIDE(128, is), MAKE_VOLATILE_STRIDE(128, os)) { Chris@82: V T7, T4r, T4Z, T18, T1z, T3t, T3T, T2T, Te, T1f, T50, T4s, T2W, T3u, T1G; Chris@82: V T3U, Tm, T1n, T1O, T2Z, T3y, T3X, T4w, T53, Tt, T1u, T1V, T2Y, T3B, T3W; Chris@82: V T4z, T52, T2t, T3L, T3O, T2K, TR, TY, T5F, T5G, T5H, T5I, T4R, T5j, T2E; Chris@82: V T3P, T4W, T5k, T2N, T3M, T22, T3E, T3H, T2j, TC, TJ, T5A, T5B, T5C, T5D; Chris@82: V T4G, T5g, T2d, T3F, T4L, T5h, T2m, T3I; Chris@82: { Chris@82: V T3, T1x, T14, T2S, T6, T2R, T17, T1y; Chris@82: { Chris@82: V T1, T2, T12, T13; Chris@82: T1 = LD(&(ri[0]), ivs, &(ri[0])); Chris@82: T2 = LD(&(ri[WS(is, 16)]), ivs, &(ri[0])); Chris@82: T3 = VADD(T1, T2); Chris@82: T1x = VSUB(T1, T2); Chris@82: T12 = LD(&(ii[0]), ivs, &(ii[0])); Chris@82: T13 = LD(&(ii[WS(is, 16)]), ivs, &(ii[0])); Chris@82: T14 = VADD(T12, T13); Chris@82: T2S = VSUB(T12, T13); Chris@82: } Chris@82: { Chris@82: V T4, T5, T15, T16; Chris@82: T4 = LD(&(ri[WS(is, 8)]), ivs, &(ri[0])); Chris@82: T5 = LD(&(ri[WS(is, 24)]), ivs, &(ri[0])); Chris@82: T6 = VADD(T4, T5); Chris@82: T2R = VSUB(T4, T5); Chris@82: T15 = LD(&(ii[WS(is, 8)]), ivs, &(ii[0])); Chris@82: T16 = LD(&(ii[WS(is, 24)]), ivs, &(ii[0])); Chris@82: T17 = VADD(T15, T16); Chris@82: T1y = VSUB(T15, T16); Chris@82: } Chris@82: T7 = VADD(T3, T6); Chris@82: T4r = VSUB(T3, T6); Chris@82: T4Z = VSUB(T14, T17); Chris@82: T18 = VADD(T14, T17); Chris@82: T1z = VSUB(T1x, T1y); Chris@82: T3t = VADD(T1x, T1y); Chris@82: T3T = VSUB(T2S, T2R); Chris@82: T2T = VADD(T2R, T2S); Chris@82: } Chris@82: { Chris@82: V Ta, T1B, T1b, T1A, Td, T1D, T1e, T1E; Chris@82: { Chris@82: V T8, T9, T19, T1a; Chris@82: T8 = LD(&(ri[WS(is, 4)]), ivs, &(ri[0])); Chris@82: T9 = LD(&(ri[WS(is, 20)]), ivs, &(ri[0])); Chris@82: Ta = VADD(T8, T9); Chris@82: T1B = VSUB(T8, T9); Chris@82: T19 = LD(&(ii[WS(is, 4)]), ivs, &(ii[0])); Chris@82: T1a = LD(&(ii[WS(is, 20)]), ivs, &(ii[0])); Chris@82: T1b = VADD(T19, T1a); Chris@82: T1A = VSUB(T19, T1a); Chris@82: } Chris@82: { Chris@82: V Tb, Tc, T1c, T1d; Chris@82: Tb = LD(&(ri[WS(is, 28)]), ivs, &(ri[0])); Chris@82: Tc = LD(&(ri[WS(is, 12)]), ivs, &(ri[0])); Chris@82: Td = VADD(Tb, Tc); Chris@82: T1D = VSUB(Tb, Tc); Chris@82: T1c = LD(&(ii[WS(is, 28)]), ivs, &(ii[0])); Chris@82: T1d = LD(&(ii[WS(is, 12)]), ivs, &(ii[0])); Chris@82: T1e = VADD(T1c, T1d); Chris@82: T1E = VSUB(T1c, T1d); Chris@82: } Chris@82: Te = VADD(Ta, Td); Chris@82: T1f = VADD(T1b, T1e); Chris@82: T50 = VSUB(Td, Ta); Chris@82: T4s = VSUB(T1b, T1e); Chris@82: { Chris@82: V T2U, T2V, T1C, T1F; Chris@82: T2U = VSUB(T1D, T1E); Chris@82: T2V = VADD(T1B, T1A); Chris@82: T2W = VMUL(LDK(KP707106781), VSUB(T2U, T2V)); Chris@82: T3u = VMUL(LDK(KP707106781), VADD(T2V, T2U)); Chris@82: T1C = VSUB(T1A, T1B); Chris@82: T1F = VADD(T1D, T1E); Chris@82: T1G = VMUL(LDK(KP707106781), VSUB(T1C, T1F)); Chris@82: T3U = VMUL(LDK(KP707106781), VADD(T1C, T1F)); Chris@82: } Chris@82: } Chris@82: { Chris@82: V Ti, T1L, T1j, T1J, Tl, T1I, T1m, T1M, T1K, T1N; Chris@82: { Chris@82: V Tg, Th, T1h, T1i; Chris@82: Tg = LD(&(ri[WS(is, 2)]), ivs, &(ri[0])); Chris@82: Th = LD(&(ri[WS(is, 18)]), ivs, &(ri[0])); Chris@82: Ti = VADD(Tg, Th); Chris@82: T1L = VSUB(Tg, Th); Chris@82: T1h = LD(&(ii[WS(is, 2)]), ivs, &(ii[0])); Chris@82: T1i = LD(&(ii[WS(is, 18)]), ivs, &(ii[0])); Chris@82: T1j = VADD(T1h, T1i); Chris@82: T1J = VSUB(T1h, T1i); Chris@82: } Chris@82: { Chris@82: V Tj, Tk, T1k, T1l; Chris@82: Tj = LD(&(ri[WS(is, 10)]), ivs, &(ri[0])); Chris@82: Tk = LD(&(ri[WS(is, 26)]), ivs, &(ri[0])); Chris@82: Tl = VADD(Tj, Tk); Chris@82: T1I = VSUB(Tj, Tk); Chris@82: T1k = LD(&(ii[WS(is, 10)]), ivs, &(ii[0])); Chris@82: T1l = LD(&(ii[WS(is, 26)]), ivs, &(ii[0])); Chris@82: T1m = VADD(T1k, T1l); Chris@82: T1M = VSUB(T1k, T1l); Chris@82: } Chris@82: Tm = VADD(Ti, Tl); Chris@82: T1n = VADD(T1j, T1m); Chris@82: T1K = VADD(T1I, T1J); Chris@82: T1N = VSUB(T1L, T1M); Chris@82: T1O = VFNMS(LDK(KP923879532), T1N, VMUL(LDK(KP382683432), T1K)); Chris@82: T2Z = VFMA(LDK(KP923879532), T1K, VMUL(LDK(KP382683432), T1N)); Chris@82: { Chris@82: V T3w, T3x, T4u, T4v; Chris@82: T3w = VSUB(T1J, T1I); Chris@82: T3x = VADD(T1L, T1M); Chris@82: T3y = VFNMS(LDK(KP382683432), T3x, VMUL(LDK(KP923879532), T3w)); Chris@82: T3X = VFMA(LDK(KP382683432), T3w, VMUL(LDK(KP923879532), T3x)); Chris@82: T4u = VSUB(T1j, T1m); Chris@82: T4v = VSUB(Ti, Tl); Chris@82: T4w = VSUB(T4u, T4v); Chris@82: T53 = VADD(T4v, T4u); Chris@82: } Chris@82: } Chris@82: { Chris@82: V Tp, T1S, T1q, T1Q, Ts, T1P, T1t, T1T, T1R, T1U; Chris@82: { Chris@82: V Tn, To, T1o, T1p; Chris@82: Tn = LD(&(ri[WS(is, 30)]), ivs, &(ri[0])); Chris@82: To = LD(&(ri[WS(is, 14)]), ivs, &(ri[0])); Chris@82: Tp = VADD(Tn, To); Chris@82: T1S = VSUB(Tn, To); Chris@82: T1o = LD(&(ii[WS(is, 30)]), ivs, &(ii[0])); Chris@82: T1p = LD(&(ii[WS(is, 14)]), ivs, &(ii[0])); Chris@82: T1q = VADD(T1o, T1p); Chris@82: T1Q = VSUB(T1o, T1p); Chris@82: } Chris@82: { Chris@82: V Tq, Tr, T1r, T1s; Chris@82: Tq = LD(&(ri[WS(is, 6)]), ivs, &(ri[0])); Chris@82: Tr = LD(&(ri[WS(is, 22)]), ivs, &(ri[0])); Chris@82: Ts = VADD(Tq, Tr); Chris@82: T1P = VSUB(Tq, Tr); Chris@82: T1r = LD(&(ii[WS(is, 6)]), ivs, &(ii[0])); Chris@82: T1s = LD(&(ii[WS(is, 22)]), ivs, &(ii[0])); Chris@82: T1t = VADD(T1r, T1s); Chris@82: T1T = VSUB(T1r, T1s); Chris@82: } Chris@82: Tt = VADD(Tp, Ts); Chris@82: T1u = VADD(T1q, T1t); Chris@82: T1R = VADD(T1P, T1Q); Chris@82: T1U = VSUB(T1S, T1T); Chris@82: T1V = VFMA(LDK(KP382683432), T1R, VMUL(LDK(KP923879532), T1U)); Chris@82: T2Y = VFNMS(LDK(KP923879532), T1R, VMUL(LDK(KP382683432), T1U)); Chris@82: { Chris@82: V T3z, T3A, T4x, T4y; Chris@82: T3z = VSUB(T1Q, T1P); Chris@82: T3A = VADD(T1S, T1T); Chris@82: T3B = VFMA(LDK(KP923879532), T3z, VMUL(LDK(KP382683432), T3A)); Chris@82: T3W = VFNMS(LDK(KP382683432), T3z, VMUL(LDK(KP923879532), T3A)); Chris@82: T4x = VSUB(Tp, Ts); Chris@82: T4y = VSUB(T1q, T1t); Chris@82: T4z = VADD(T4x, T4y); Chris@82: T52 = VSUB(T4x, T4y); Chris@82: } Chris@82: } Chris@82: { Chris@82: V TN, T2p, T2J, T4S, TQ, T2G, T2s, T4T, TU, T2x, T2w, T4O, TX, T2z, T2C; Chris@82: V T4P; Chris@82: { Chris@82: V TL, TM, T2H, T2I; Chris@82: TL = LD(&(ri[WS(is, 31)]), ivs, &(ri[WS(is, 1)])); Chris@82: TM = LD(&(ri[WS(is, 15)]), ivs, &(ri[WS(is, 1)])); Chris@82: TN = VADD(TL, TM); Chris@82: T2p = VSUB(TL, TM); Chris@82: T2H = LD(&(ii[WS(is, 31)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2I = LD(&(ii[WS(is, 15)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2J = VSUB(T2H, T2I); Chris@82: T4S = VADD(T2H, T2I); Chris@82: } Chris@82: { Chris@82: V TO, TP, T2q, T2r; Chris@82: TO = LD(&(ri[WS(is, 7)]), ivs, &(ri[WS(is, 1)])); Chris@82: TP = LD(&(ri[WS(is, 23)]), ivs, &(ri[WS(is, 1)])); Chris@82: TQ = VADD(TO, TP); Chris@82: T2G = VSUB(TO, TP); Chris@82: T2q = LD(&(ii[WS(is, 7)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2r = LD(&(ii[WS(is, 23)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2s = VSUB(T2q, T2r); Chris@82: T4T = VADD(T2q, T2r); Chris@82: } Chris@82: { Chris@82: V TS, TT, T2u, T2v; Chris@82: TS = LD(&(ri[WS(is, 3)]), ivs, &(ri[WS(is, 1)])); Chris@82: TT = LD(&(ri[WS(is, 19)]), ivs, &(ri[WS(is, 1)])); Chris@82: TU = VADD(TS, TT); Chris@82: T2x = VSUB(TS, TT); Chris@82: T2u = LD(&(ii[WS(is, 3)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2v = LD(&(ii[WS(is, 19)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2w = VSUB(T2u, T2v); Chris@82: T4O = VADD(T2u, T2v); Chris@82: } Chris@82: { Chris@82: V TV, TW, T2A, T2B; Chris@82: TV = LD(&(ri[WS(is, 27)]), ivs, &(ri[WS(is, 1)])); Chris@82: TW = LD(&(ri[WS(is, 11)]), ivs, &(ri[WS(is, 1)])); Chris@82: TX = VADD(TV, TW); Chris@82: T2z = VSUB(TV, TW); Chris@82: T2A = LD(&(ii[WS(is, 27)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2B = LD(&(ii[WS(is, 11)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2C = VSUB(T2A, T2B); Chris@82: T4P = VADD(T2A, T2B); Chris@82: } Chris@82: T2t = VSUB(T2p, T2s); Chris@82: T3L = VADD(T2p, T2s); Chris@82: T3O = VSUB(T2J, T2G); Chris@82: T2K = VADD(T2G, T2J); Chris@82: TR = VADD(TN, TQ); Chris@82: TY = VADD(TU, TX); Chris@82: T5F = VSUB(TR, TY); Chris@82: { Chris@82: V T4N, T4Q, T2y, T2D; Chris@82: T5G = VADD(T4S, T4T); Chris@82: T5H = VADD(T4O, T4P); Chris@82: T5I = VSUB(T5G, T5H); Chris@82: T4N = VSUB(TN, TQ); Chris@82: T4Q = VSUB(T4O, T4P); Chris@82: T4R = VSUB(T4N, T4Q); Chris@82: T5j = VADD(T4N, T4Q); Chris@82: T2y = VSUB(T2w, T2x); Chris@82: T2D = VADD(T2z, T2C); Chris@82: T2E = VMUL(LDK(KP707106781), VSUB(T2y, T2D)); Chris@82: T3P = VMUL(LDK(KP707106781), VADD(T2y, T2D)); Chris@82: { Chris@82: V T4U, T4V, T2L, T2M; Chris@82: T4U = VSUB(T4S, T4T); Chris@82: T4V = VSUB(TX, TU); Chris@82: T4W = VSUB(T4U, T4V); Chris@82: T5k = VADD(T4V, T4U); Chris@82: T2L = VSUB(T2z, T2C); Chris@82: T2M = VADD(T2x, T2w); Chris@82: T2N = VMUL(LDK(KP707106781), VSUB(T2L, T2M)); Chris@82: T3M = VMUL(LDK(KP707106781), VADD(T2M, T2L)); Chris@82: } Chris@82: } Chris@82: } Chris@82: { Chris@82: V Ty, T2f, T21, T4C, TB, T1Y, T2i, T4D, TF, T28, T2b, T4I, TI, T23, T26; Chris@82: V T4J; Chris@82: { Chris@82: V Tw, Tx, T1Z, T20; Chris@82: Tw = LD(&(ri[WS(is, 1)]), ivs, &(ri[WS(is, 1)])); Chris@82: Tx = LD(&(ri[WS(is, 17)]), ivs, &(ri[WS(is, 1)])); Chris@82: Ty = VADD(Tw, Tx); Chris@82: T2f = VSUB(Tw, Tx); Chris@82: T1Z = LD(&(ii[WS(is, 1)]), ivs, &(ii[WS(is, 1)])); Chris@82: T20 = LD(&(ii[WS(is, 17)]), ivs, &(ii[WS(is, 1)])); Chris@82: T21 = VSUB(T1Z, T20); Chris@82: T4C = VADD(T1Z, T20); Chris@82: } Chris@82: { Chris@82: V Tz, TA, T2g, T2h; Chris@82: Tz = LD(&(ri[WS(is, 9)]), ivs, &(ri[WS(is, 1)])); Chris@82: TA = LD(&(ri[WS(is, 25)]), ivs, &(ri[WS(is, 1)])); Chris@82: TB = VADD(Tz, TA); Chris@82: T1Y = VSUB(Tz, TA); Chris@82: T2g = LD(&(ii[WS(is, 9)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2h = LD(&(ii[WS(is, 25)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2i = VSUB(T2g, T2h); Chris@82: T4D = VADD(T2g, T2h); Chris@82: } Chris@82: { Chris@82: V TD, TE, T29, T2a; Chris@82: TD = LD(&(ri[WS(is, 5)]), ivs, &(ri[WS(is, 1)])); Chris@82: TE = LD(&(ri[WS(is, 21)]), ivs, &(ri[WS(is, 1)])); Chris@82: TF = VADD(TD, TE); Chris@82: T28 = VSUB(TD, TE); Chris@82: T29 = LD(&(ii[WS(is, 5)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2a = LD(&(ii[WS(is, 21)]), ivs, &(ii[WS(is, 1)])); Chris@82: T2b = VSUB(T29, T2a); Chris@82: T4I = VADD(T29, T2a); Chris@82: } Chris@82: { Chris@82: V TG, TH, T24, T25; Chris@82: TG = LD(&(ri[WS(is, 29)]), ivs, &(ri[WS(is, 1)])); Chris@82: TH = LD(&(ri[WS(is, 13)]), ivs, &(ri[WS(is, 1)])); Chris@82: TI = VADD(TG, TH); Chris@82: T23 = VSUB(TG, TH); Chris@82: T24 = LD(&(ii[WS(is, 29)]), ivs, &(ii[WS(is, 1)])); Chris@82: T25 = LD(&(ii[WS(is, 13)]), ivs, &(ii[WS(is, 1)])); Chris@82: T26 = VSUB(T24, T25); Chris@82: T4J = VADD(T24, T25); Chris@82: } Chris@82: T22 = VADD(T1Y, T21); Chris@82: T3E = VADD(T2f, T2i); Chris@82: T3H = VSUB(T21, T1Y); Chris@82: T2j = VSUB(T2f, T2i); Chris@82: TC = VADD(Ty, TB); Chris@82: TJ = VADD(TF, TI); Chris@82: T5A = VSUB(TC, TJ); Chris@82: { Chris@82: V T4E, T4F, T27, T2c; Chris@82: T5B = VADD(T4C, T4D); Chris@82: T5C = VADD(T4I, T4J); Chris@82: T5D = VSUB(T5B, T5C); Chris@82: T4E = VSUB(T4C, T4D); Chris@82: T4F = VSUB(TI, TF); Chris@82: T4G = VSUB(T4E, T4F); Chris@82: T5g = VADD(T4F, T4E); Chris@82: T27 = VSUB(T23, T26); Chris@82: T2c = VADD(T28, T2b); Chris@82: T2d = VMUL(LDK(KP707106781), VSUB(T27, T2c)); Chris@82: T3F = VMUL(LDK(KP707106781), VADD(T2c, T27)); Chris@82: { Chris@82: V T4H, T4K, T2k, T2l; Chris@82: T4H = VSUB(Ty, TB); Chris@82: T4K = VSUB(T4I, T4J); Chris@82: T4L = VSUB(T4H, T4K); Chris@82: T5h = VADD(T4H, T4K); Chris@82: T2k = VSUB(T2b, T28); Chris@82: T2l = VADD(T23, T26); Chris@82: T2m = VMUL(LDK(KP707106781), VSUB(T2k, T2l)); Chris@82: T3I = VMUL(LDK(KP707106781), VADD(T2k, T2l)); Chris@82: } Chris@82: } Chris@82: } Chris@82: { Chris@82: V T61, T62, T63, T64, T65, T66, T67, T68, T69, T6a, T6b, T6c, T6d, T6e, T6f; Chris@82: V T6g, T6h, T6i, T6j, T6k, T6l, T6m, T6n, T6o, T6p, T6q, T6r, T6s, T6t, T6u; Chris@82: V T6v, T6w; Chris@82: { Chris@82: V T4B, T57, T5a, T5c, T4Y, T56, T55, T5b; Chris@82: { Chris@82: V T4t, T4A, T58, T59; Chris@82: T4t = VSUB(T4r, T4s); Chris@82: T4A = VMUL(LDK(KP707106781), VSUB(T4w, T4z)); Chris@82: T4B = VADD(T4t, T4A); Chris@82: T57 = VSUB(T4t, T4A); Chris@82: T58 = VFNMS(LDK(KP923879532), T4L, VMUL(LDK(KP382683432), T4G)); Chris@82: T59 = VFMA(LDK(KP382683432), T4W, VMUL(LDK(KP923879532), T4R)); Chris@82: T5a = VSUB(T58, T59); Chris@82: T5c = VADD(T58, T59); Chris@82: } Chris@82: { Chris@82: V T4M, T4X, T51, T54; Chris@82: T4M = VFMA(LDK(KP923879532), T4G, VMUL(LDK(KP382683432), T4L)); Chris@82: T4X = VFNMS(LDK(KP923879532), T4W, VMUL(LDK(KP382683432), T4R)); Chris@82: T4Y = VADD(T4M, T4X); Chris@82: T56 = VSUB(T4X, T4M); Chris@82: T51 = VSUB(T4Z, T50); Chris@82: T54 = VMUL(LDK(KP707106781), VSUB(T52, T53)); Chris@82: T55 = VSUB(T51, T54); Chris@82: T5b = VADD(T51, T54); Chris@82: } Chris@82: T61 = VSUB(T4B, T4Y); Chris@82: STM4(&(ro[22]), T61, ovs, &(ro[0])); Chris@82: T62 = VSUB(T5b, T5c); Chris@82: STM4(&(io[22]), T62, ovs, &(io[0])); Chris@82: T63 = VADD(T4B, T4Y); Chris@82: STM4(&(ro[6]), T63, ovs, &(ro[0])); Chris@82: T64 = VADD(T5b, T5c); Chris@82: STM4(&(io[6]), T64, ovs, &(io[0])); Chris@82: T65 = VSUB(T55, T56); Chris@82: STM4(&(io[30]), T65, ovs, &(io[0])); Chris@82: T66 = VSUB(T57, T5a); Chris@82: STM4(&(ro[30]), T66, ovs, &(ro[0])); Chris@82: T67 = VADD(T55, T56); Chris@82: STM4(&(io[14]), T67, ovs, &(io[0])); Chris@82: T68 = VADD(T57, T5a); Chris@82: STM4(&(ro[14]), T68, ovs, &(ro[0])); Chris@82: } Chris@82: { Chris@82: V T5f, T5r, T5u, T5w, T5m, T5q, T5p, T5v; Chris@82: { Chris@82: V T5d, T5e, T5s, T5t; Chris@82: T5d = VADD(T4r, T4s); Chris@82: T5e = VMUL(LDK(KP707106781), VADD(T53, T52)); Chris@82: T5f = VADD(T5d, T5e); Chris@82: T5r = VSUB(T5d, T5e); Chris@82: T5s = VFNMS(LDK(KP382683432), T5h, VMUL(LDK(KP923879532), T5g)); Chris@82: T5t = VFMA(LDK(KP923879532), T5k, VMUL(LDK(KP382683432), T5j)); Chris@82: T5u = VSUB(T5s, T5t); Chris@82: T5w = VADD(T5s, T5t); Chris@82: } Chris@82: { Chris@82: V T5i, T5l, T5n, T5o; Chris@82: T5i = VFMA(LDK(KP382683432), T5g, VMUL(LDK(KP923879532), T5h)); Chris@82: T5l = VFNMS(LDK(KP382683432), T5k, VMUL(LDK(KP923879532), T5j)); Chris@82: T5m = VADD(T5i, T5l); Chris@82: T5q = VSUB(T5l, T5i); Chris@82: T5n = VADD(T50, T4Z); Chris@82: T5o = VMUL(LDK(KP707106781), VADD(T4w, T4z)); Chris@82: T5p = VSUB(T5n, T5o); Chris@82: T5v = VADD(T5n, T5o); Chris@82: } Chris@82: T69 = VSUB(T5f, T5m); Chris@82: STM4(&(ro[18]), T69, ovs, &(ro[0])); Chris@82: T6a = VSUB(T5v, T5w); Chris@82: STM4(&(io[18]), T6a, ovs, &(io[0])); Chris@82: T6b = VADD(T5f, T5m); Chris@82: STM4(&(ro[2]), T6b, ovs, &(ro[0])); Chris@82: T6c = VADD(T5v, T5w); Chris@82: STM4(&(io[2]), T6c, ovs, &(io[0])); Chris@82: T6d = VSUB(T5p, T5q); Chris@82: STM4(&(io[26]), T6d, ovs, &(io[0])); Chris@82: T6e = VSUB(T5r, T5u); Chris@82: STM4(&(ro[26]), T6e, ovs, &(ro[0])); Chris@82: T6f = VADD(T5p, T5q); Chris@82: STM4(&(io[10]), T6f, ovs, &(io[0])); Chris@82: T6g = VADD(T5r, T5u); Chris@82: STM4(&(ro[10]), T6g, ovs, &(ro[0])); Chris@82: } Chris@82: { Chris@82: V T5z, T5P, T5S, T5U, T5K, T5O, T5N, T5T; Chris@82: { Chris@82: V T5x, T5y, T5Q, T5R; Chris@82: T5x = VSUB(T7, Te); Chris@82: T5y = VSUB(T1n, T1u); Chris@82: T5z = VADD(T5x, T5y); Chris@82: T5P = VSUB(T5x, T5y); Chris@82: T5Q = VSUB(T5D, T5A); Chris@82: T5R = VADD(T5F, T5I); Chris@82: T5S = VMUL(LDK(KP707106781), VSUB(T5Q, T5R)); Chris@82: T5U = VMUL(LDK(KP707106781), VADD(T5Q, T5R)); Chris@82: } Chris@82: { Chris@82: V T5E, T5J, T5L, T5M; Chris@82: T5E = VADD(T5A, T5D); Chris@82: T5J = VSUB(T5F, T5I); Chris@82: T5K = VMUL(LDK(KP707106781), VADD(T5E, T5J)); Chris@82: T5O = VMUL(LDK(KP707106781), VSUB(T5J, T5E)); Chris@82: T5L = VSUB(T18, T1f); Chris@82: T5M = VSUB(Tt, Tm); Chris@82: T5N = VSUB(T5L, T5M); Chris@82: T5T = VADD(T5M, T5L); Chris@82: } Chris@82: T6h = VSUB(T5z, T5K); Chris@82: STM4(&(ro[20]), T6h, ovs, &(ro[0])); Chris@82: T6i = VSUB(T5T, T5U); Chris@82: STM4(&(io[20]), T6i, ovs, &(io[0])); Chris@82: T6j = VADD(T5z, T5K); Chris@82: STM4(&(ro[4]), T6j, ovs, &(ro[0])); Chris@82: T6k = VADD(T5T, T5U); Chris@82: STM4(&(io[4]), T6k, ovs, &(io[0])); Chris@82: T6l = VSUB(T5N, T5O); Chris@82: STM4(&(io[28]), T6l, ovs, &(io[0])); Chris@82: T6m = VSUB(T5P, T5S); Chris@82: STM4(&(ro[28]), T6m, ovs, &(ro[0])); Chris@82: T6n = VADD(T5N, T5O); Chris@82: STM4(&(io[12]), T6n, ovs, &(io[0])); Chris@82: T6o = VADD(T5P, T5S); Chris@82: STM4(&(ro[12]), T6o, ovs, &(ro[0])); Chris@82: } Chris@82: { Chris@82: V Tv, T5V, T5Y, T60, T10, T11, T1w, T5Z; Chris@82: { Chris@82: V Tf, Tu, T5W, T5X; Chris@82: Tf = VADD(T7, Te); Chris@82: Tu = VADD(Tm, Tt); Chris@82: Tv = VADD(Tf, Tu); Chris@82: T5V = VSUB(Tf, Tu); Chris@82: T5W = VADD(T5B, T5C); Chris@82: T5X = VADD(T5G, T5H); Chris@82: T5Y = VSUB(T5W, T5X); Chris@82: T60 = VADD(T5W, T5X); Chris@82: } Chris@82: { Chris@82: V TK, TZ, T1g, T1v; Chris@82: TK = VADD(TC, TJ); Chris@82: TZ = VADD(TR, TY); Chris@82: T10 = VADD(TK, TZ); Chris@82: T11 = VSUB(TZ, TK); Chris@82: T1g = VADD(T18, T1f); Chris@82: T1v = VADD(T1n, T1u); Chris@82: T1w = VSUB(T1g, T1v); Chris@82: T5Z = VADD(T1g, T1v); Chris@82: } Chris@82: T6p = VSUB(Tv, T10); Chris@82: STM4(&(ro[16]), T6p, ovs, &(ro[0])); Chris@82: T6q = VSUB(T5Z, T60); Chris@82: STM4(&(io[16]), T6q, ovs, &(io[0])); Chris@82: T6r = VADD(Tv, T10); Chris@82: STM4(&(ro[0]), T6r, ovs, &(ro[0])); Chris@82: T6s = VADD(T5Z, T60); Chris@82: STM4(&(io[0]), T6s, ovs, &(io[0])); Chris@82: T6t = VADD(T11, T1w); Chris@82: STM4(&(io[8]), T6t, ovs, &(io[0])); Chris@82: T6u = VADD(T5V, T5Y); Chris@82: STM4(&(ro[8]), T6u, ovs, &(ro[0])); Chris@82: T6v = VSUB(T1w, T11); Chris@82: STM4(&(io[24]), T6v, ovs, &(io[0])); Chris@82: T6w = VSUB(T5V, T5Y); Chris@82: STM4(&(ro[24]), T6w, ovs, &(ro[0])); Chris@82: } Chris@82: { Chris@82: V T6x, T6y, T6z, T6A, T6B, T6C, T6D, T6E; Chris@82: { Chris@82: V T1X, T33, T31, T37, T2o, T34, T2P, T35; Chris@82: { Chris@82: V T1H, T1W, T2X, T30; Chris@82: T1H = VSUB(T1z, T1G); Chris@82: T1W = VSUB(T1O, T1V); Chris@82: T1X = VADD(T1H, T1W); Chris@82: T33 = VSUB(T1H, T1W); Chris@82: T2X = VSUB(T2T, T2W); Chris@82: T30 = VSUB(T2Y, T2Z); Chris@82: T31 = VSUB(T2X, T30); Chris@82: T37 = VADD(T2X, T30); Chris@82: } Chris@82: { Chris@82: V T2e, T2n, T2F, T2O; Chris@82: T2e = VSUB(T22, T2d); Chris@82: T2n = VSUB(T2j, T2m); Chris@82: T2o = VFMA(LDK(KP980785280), T2e, VMUL(LDK(KP195090322), T2n)); Chris@82: T34 = VFNMS(LDK(KP980785280), T2n, VMUL(LDK(KP195090322), T2e)); Chris@82: T2F = VSUB(T2t, T2E); Chris@82: T2O = VSUB(T2K, T2N); Chris@82: T2P = VFNMS(LDK(KP980785280), T2O, VMUL(LDK(KP195090322), T2F)); Chris@82: T35 = VFMA(LDK(KP195090322), T2O, VMUL(LDK(KP980785280), T2F)); Chris@82: } Chris@82: { Chris@82: V T2Q, T38, T32, T36; Chris@82: T2Q = VADD(T2o, T2P); Chris@82: T6x = VSUB(T1X, T2Q); Chris@82: STM4(&(ro[23]), T6x, ovs, &(ro[1])); Chris@82: T6y = VADD(T1X, T2Q); Chris@82: STM4(&(ro[7]), T6y, ovs, &(ro[1])); Chris@82: T38 = VADD(T34, T35); Chris@82: T6z = VSUB(T37, T38); Chris@82: STM4(&(io[23]), T6z, ovs, &(io[1])); Chris@82: T6A = VADD(T37, T38); Chris@82: STM4(&(io[7]), T6A, ovs, &(io[1])); Chris@82: T32 = VSUB(T2P, T2o); Chris@82: T6B = VSUB(T31, T32); Chris@82: STM4(&(io[31]), T6B, ovs, &(io[1])); Chris@82: T6C = VADD(T31, T32); Chris@82: STM4(&(io[15]), T6C, ovs, &(io[1])); Chris@82: T36 = VSUB(T34, T35); Chris@82: T6D = VSUB(T33, T36); Chris@82: STM4(&(ro[31]), T6D, ovs, &(ro[1])); Chris@82: T6E = VADD(T33, T36); Chris@82: STM4(&(ro[15]), T6E, ovs, &(ro[1])); Chris@82: } Chris@82: } Chris@82: { Chris@82: V T3D, T41, T3Z, T45, T3K, T42, T3R, T43; Chris@82: { Chris@82: V T3v, T3C, T3V, T3Y; Chris@82: T3v = VSUB(T3t, T3u); Chris@82: T3C = VSUB(T3y, T3B); Chris@82: T3D = VADD(T3v, T3C); Chris@82: T41 = VSUB(T3v, T3C); Chris@82: T3V = VSUB(T3T, T3U); Chris@82: T3Y = VSUB(T3W, T3X); Chris@82: T3Z = VSUB(T3V, T3Y); Chris@82: T45 = VADD(T3V, T3Y); Chris@82: } Chris@82: { Chris@82: V T3G, T3J, T3N, T3Q; Chris@82: T3G = VSUB(T3E, T3F); Chris@82: T3J = VSUB(T3H, T3I); Chris@82: T3K = VFMA(LDK(KP555570233), T3G, VMUL(LDK(KP831469612), T3J)); Chris@82: T42 = VFNMS(LDK(KP831469612), T3G, VMUL(LDK(KP555570233), T3J)); Chris@82: T3N = VSUB(T3L, T3M); Chris@82: T3Q = VSUB(T3O, T3P); Chris@82: T3R = VFNMS(LDK(KP831469612), T3Q, VMUL(LDK(KP555570233), T3N)); Chris@82: T43 = VFMA(LDK(KP831469612), T3N, VMUL(LDK(KP555570233), T3Q)); Chris@82: } Chris@82: { Chris@82: V T3S, T6F, T6G, T46, T6H, T6I; Chris@82: T3S = VADD(T3K, T3R); Chris@82: T6F = VSUB(T3D, T3S); Chris@82: STM4(&(ro[21]), T6F, ovs, &(ro[1])); Chris@82: STN4(&(ro[20]), T6h, T6F, T61, T6x, ovs); Chris@82: T6G = VADD(T3D, T3S); Chris@82: STM4(&(ro[5]), T6G, ovs, &(ro[1])); Chris@82: STN4(&(ro[4]), T6j, T6G, T63, T6y, ovs); Chris@82: T46 = VADD(T42, T43); Chris@82: T6H = VSUB(T45, T46); Chris@82: STM4(&(io[21]), T6H, ovs, &(io[1])); Chris@82: STN4(&(io[20]), T6i, T6H, T62, T6z, ovs); Chris@82: T6I = VADD(T45, T46); Chris@82: STM4(&(io[5]), T6I, ovs, &(io[1])); Chris@82: STN4(&(io[4]), T6k, T6I, T64, T6A, ovs); Chris@82: } Chris@82: { Chris@82: V T40, T6J, T6K, T44, T6L, T6M; Chris@82: T40 = VSUB(T3R, T3K); Chris@82: T6J = VSUB(T3Z, T40); Chris@82: STM4(&(io[29]), T6J, ovs, &(io[1])); Chris@82: STN4(&(io[28]), T6l, T6J, T65, T6B, ovs); Chris@82: T6K = VADD(T3Z, T40); Chris@82: STM4(&(io[13]), T6K, ovs, &(io[1])); Chris@82: STN4(&(io[12]), T6n, T6K, T67, T6C, ovs); Chris@82: T44 = VSUB(T42, T43); Chris@82: T6L = VSUB(T41, T44); Chris@82: STM4(&(ro[29]), T6L, ovs, &(ro[1])); Chris@82: STN4(&(ro[28]), T6m, T6L, T66, T6D, ovs); Chris@82: T6M = VADD(T41, T44); Chris@82: STM4(&(ro[13]), T6M, ovs, &(ro[1])); Chris@82: STN4(&(ro[12]), T6o, T6M, T68, T6E, ovs); Chris@82: } Chris@82: } Chris@82: } Chris@82: { Chris@82: V T6N, T6O, T6P, T6Q, T6R, T6S, T6T, T6U; Chris@82: { Chris@82: V T49, T4l, T4j, T4p, T4c, T4m, T4f, T4n; Chris@82: { Chris@82: V T47, T48, T4h, T4i; Chris@82: T47 = VADD(T3t, T3u); Chris@82: T48 = VADD(T3X, T3W); Chris@82: T49 = VADD(T47, T48); Chris@82: T4l = VSUB(T47, T48); Chris@82: T4h = VADD(T3T, T3U); Chris@82: T4i = VADD(T3y, T3B); Chris@82: T4j = VSUB(T4h, T4i); Chris@82: T4p = VADD(T4h, T4i); Chris@82: } Chris@82: { Chris@82: V T4a, T4b, T4d, T4e; Chris@82: T4a = VADD(T3E, T3F); Chris@82: T4b = VADD(T3H, T3I); Chris@82: T4c = VFMA(LDK(KP980785280), T4a, VMUL(LDK(KP195090322), T4b)); Chris@82: T4m = VFNMS(LDK(KP195090322), T4a, VMUL(LDK(KP980785280), T4b)); Chris@82: T4d = VADD(T3L, T3M); Chris@82: T4e = VADD(T3O, T3P); Chris@82: T4f = VFNMS(LDK(KP195090322), T4e, VMUL(LDK(KP980785280), T4d)); Chris@82: T4n = VFMA(LDK(KP195090322), T4d, VMUL(LDK(KP980785280), T4e)); Chris@82: } Chris@82: { Chris@82: V T4g, T4q, T4k, T4o; Chris@82: T4g = VADD(T4c, T4f); Chris@82: T6N = VSUB(T49, T4g); Chris@82: STM4(&(ro[17]), T6N, ovs, &(ro[1])); Chris@82: T6O = VADD(T49, T4g); Chris@82: STM4(&(ro[1]), T6O, ovs, &(ro[1])); Chris@82: T4q = VADD(T4m, T4n); Chris@82: T6P = VSUB(T4p, T4q); Chris@82: STM4(&(io[17]), T6P, ovs, &(io[1])); Chris@82: T6Q = VADD(T4p, T4q); Chris@82: STM4(&(io[1]), T6Q, ovs, &(io[1])); Chris@82: T4k = VSUB(T4f, T4c); Chris@82: T6R = VSUB(T4j, T4k); Chris@82: STM4(&(io[25]), T6R, ovs, &(io[1])); Chris@82: T6S = VADD(T4j, T4k); Chris@82: STM4(&(io[9]), T6S, ovs, &(io[1])); Chris@82: T4o = VSUB(T4m, T4n); Chris@82: T6T = VSUB(T4l, T4o); Chris@82: STM4(&(ro[25]), T6T, ovs, &(ro[1])); Chris@82: T6U = VADD(T4l, T4o); Chris@82: STM4(&(ro[9]), T6U, ovs, &(ro[1])); Chris@82: } Chris@82: } Chris@82: { Chris@82: V T3b, T3n, T3l, T3r, T3e, T3o, T3h, T3p; Chris@82: { Chris@82: V T39, T3a, T3j, T3k; Chris@82: T39 = VADD(T1z, T1G); Chris@82: T3a = VADD(T2Z, T2Y); Chris@82: T3b = VADD(T39, T3a); Chris@82: T3n = VSUB(T39, T3a); Chris@82: T3j = VADD(T2T, T2W); Chris@82: T3k = VADD(T1O, T1V); Chris@82: T3l = VSUB(T3j, T3k); Chris@82: T3r = VADD(T3j, T3k); Chris@82: } Chris@82: { Chris@82: V T3c, T3d, T3f, T3g; Chris@82: T3c = VADD(T22, T2d); Chris@82: T3d = VADD(T2j, T2m); Chris@82: T3e = VFMA(LDK(KP555570233), T3c, VMUL(LDK(KP831469612), T3d)); Chris@82: T3o = VFNMS(LDK(KP555570233), T3d, VMUL(LDK(KP831469612), T3c)); Chris@82: T3f = VADD(T2t, T2E); Chris@82: T3g = VADD(T2K, T2N); Chris@82: T3h = VFNMS(LDK(KP555570233), T3g, VMUL(LDK(KP831469612), T3f)); Chris@82: T3p = VFMA(LDK(KP831469612), T3g, VMUL(LDK(KP555570233), T3f)); Chris@82: } Chris@82: { Chris@82: V T3i, T6V, T6W, T3s, T6X, T6Y; Chris@82: T3i = VADD(T3e, T3h); Chris@82: T6V = VSUB(T3b, T3i); Chris@82: STM4(&(ro[19]), T6V, ovs, &(ro[1])); Chris@82: STN4(&(ro[16]), T6p, T6N, T69, T6V, ovs); Chris@82: T6W = VADD(T3b, T3i); Chris@82: STM4(&(ro[3]), T6W, ovs, &(ro[1])); Chris@82: STN4(&(ro[0]), T6r, T6O, T6b, T6W, ovs); Chris@82: T3s = VADD(T3o, T3p); Chris@82: T6X = VSUB(T3r, T3s); Chris@82: STM4(&(io[19]), T6X, ovs, &(io[1])); Chris@82: STN4(&(io[16]), T6q, T6P, T6a, T6X, ovs); Chris@82: T6Y = VADD(T3r, T3s); Chris@82: STM4(&(io[3]), T6Y, ovs, &(io[1])); Chris@82: STN4(&(io[0]), T6s, T6Q, T6c, T6Y, ovs); Chris@82: } Chris@82: { Chris@82: V T3m, T6Z, T70, T3q, T71, T72; Chris@82: T3m = VSUB(T3h, T3e); Chris@82: T6Z = VSUB(T3l, T3m); Chris@82: STM4(&(io[27]), T6Z, ovs, &(io[1])); Chris@82: STN4(&(io[24]), T6v, T6R, T6d, T6Z, ovs); Chris@82: T70 = VADD(T3l, T3m); Chris@82: STM4(&(io[11]), T70, ovs, &(io[1])); Chris@82: STN4(&(io[8]), T6t, T6S, T6f, T70, ovs); Chris@82: T3q = VSUB(T3o, T3p); Chris@82: T71 = VSUB(T3n, T3q); Chris@82: STM4(&(ro[27]), T71, ovs, &(ro[1])); Chris@82: STN4(&(ro[24]), T6w, T6T, T6e, T71, ovs); Chris@82: T72 = VADD(T3n, T3q); Chris@82: STM4(&(ro[11]), T72, ovs, &(ro[1])); Chris@82: STN4(&(ro[8]), T6u, T6U, T6g, T72, ovs); Chris@82: } Chris@82: } Chris@82: } Chris@82: } Chris@82: } Chris@82: } Chris@82: VLEAVE(); Chris@82: } Chris@82: Chris@82: static const kdft_desc desc = { 32, XSIMD_STRING("n2sv_32"), {340, 52, 32, 0}, &GENUS, 0, 1, 0, 0 }; Chris@82: Chris@82: void XSIMD(codelet_n2sv_32) (planner *p) { Chris@82: X(kdft_register) (p, n2sv_32, &desc); Chris@82: } Chris@82: Chris@82: #endif