Chris@42: /* Chris@42: * Copyright (c) 2003, 2007-14 Matteo Frigo Chris@42: * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology Chris@42: * Chris@42: * This program is free software; you can redistribute it and/or modify Chris@42: * it under the terms of the GNU General Public License as published by Chris@42: * the Free Software Foundation; either version 2 of the License, or Chris@42: * (at your option) any later version. Chris@42: * Chris@42: * This program is distributed in the hope that it will be useful, Chris@42: * but WITHOUT ANY WARRANTY; without even the implied warranty of Chris@42: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Chris@42: * GNU General Public License for more details. Chris@42: * Chris@42: * You should have received a copy of the GNU General Public License Chris@42: * along with this program; if not, write to the Free Software Chris@42: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Chris@42: * Chris@42: */ Chris@42: Chris@42: /* This file was automatically generated --- DO NOT EDIT */ Chris@42: /* Generated on Sat Jul 30 16:41:24 EDT 2016 */ Chris@42: Chris@42: #include "codelet-dft.h" Chris@42: Chris@42: #ifdef HAVE_FMA Chris@42: Chris@42: /* Generated by: ../../../genfft/gen_notw.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -n 32 -name n2sv_32 -with-ostride 1 -include n2s.h -store-multiple 4 */ Chris@42: Chris@42: /* Chris@42: * This function contains 372 FP additions, 136 FP multiplications, Chris@42: * (or, 236 additions, 0 multiplications, 136 fused multiply/add), Chris@42: * 194 stack variables, 7 constants, and 144 memory accesses Chris@42: */ Chris@42: #include "n2s.h" Chris@42: Chris@42: static void n2sv_32(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs) Chris@42: { Chris@42: DVK(KP980785280, +0.980785280403230449126182236134239036973933731); Chris@42: DVK(KP198912367, +0.198912367379658006911597622644676228597850501); Chris@42: DVK(KP831469612, +0.831469612302545237078788377617905756738560812); Chris@42: DVK(KP668178637, +0.668178637919298919997757686523080761552472251); Chris@42: DVK(KP923879532, +0.923879532511286756128183189396788286822416626); Chris@42: DVK(KP707106781, +0.707106781186547524400844362104849039284835938); Chris@42: DVK(KP414213562, +0.414213562373095048801688724209698078569671875); Chris@42: { Chris@42: INT i; Chris@42: for (i = v; i > 0; i = i - (2 * VL), ri = ri + ((2 * VL) * ivs), ii = ii + ((2 * VL) * ivs), ro = ro + ((2 * VL) * ovs), io = io + ((2 * VL) * ovs), MAKE_VOLATILE_STRIDE(128, is), MAKE_VOLATILE_STRIDE(128, os)) { Chris@42: V T61, T62, T63, T64, T65, T66, T67, T68, T69, T6a, T6b, T6c, T6d, T6e, T6f; Chris@42: V T6g, T6h, T6i, T6j, T6k, T6l, T6m, T6n, T6o, T6p, T6q, T6r, T6s, T6t, T6u; Chris@42: V T6v, T6w, T3g, T3f, T6x, T6y, T6z, T6A, T6B, T6C, T6D, T6E, T4p, T49, T4l; Chris@42: V T4j, T6F, T6G, T6H, T6I, T6J, T6K, T6L, T6M, T3n, T3b, T3r, T3l, T3o, T3e; Chris@42: V T4q, T4o, T4k, T4g, T3h, T3p; Chris@42: { Chris@42: V T2T, T3T, T4r, T7, T3t, T1z, T18, T4Z, Te, T50, T1f, T4s, T1G, T3U, T2W; Chris@42: V T3u, Tm, T1n, T3X, T3y, T2Z, T1O, T53, T4w, Tt, T1u, T3W, T3B, T2Y, T1V; Chris@42: V T52, T4z, T3O, T2t, T3L, T2K, TZ, T5F, T4R, T5k, T5j, T4W, T5I, T5X, T2E; Chris@42: V T3M, T2N, T3P, T3H, T22, T3E, T2j, T4G, T5h, TK, T5A, T5D, T5W, T2d, T3F; Chris@42: V T4L, T5g, T3I, T2m; Chris@42: { Chris@42: V T1L, T1j, T1k, T1l, T4v, T1K, T3w; Chris@42: { Chris@42: V T1, T2, T12, T13, T4, T5, T15, T16; Chris@42: T1 = LD(&(ri[0]), ivs, &(ri[0])); Chris@42: T2 = LD(&(ri[WS(is, 16)]), ivs, &(ri[0])); Chris@42: T12 = LD(&(ii[0]), ivs, &(ii[0])); Chris@42: T13 = LD(&(ii[WS(is, 16)]), ivs, &(ii[0])); Chris@42: T4 = LD(&(ri[WS(is, 8)]), ivs, &(ri[0])); Chris@42: T5 = LD(&(ri[WS(is, 24)]), ivs, &(ri[0])); Chris@42: T15 = LD(&(ii[WS(is, 8)]), ivs, &(ii[0])); Chris@42: T16 = LD(&(ii[WS(is, 24)]), ivs, &(ii[0])); Chris@42: { Chris@42: V Tb, T1A, Ta, T1B, T1b, Tc, T1c, T1d; Chris@42: { Chris@42: V T8, T1x, T3, T2R, T14, T2S, T6, T1y, T17, T9, T19, T1a; Chris@42: T8 = LD(&(ri[WS(is, 4)]), ivs, &(ri[0])); Chris@42: T1x = VSUB(T1, T2); Chris@42: T3 = VADD(T1, T2); Chris@42: T2R = VSUB(T12, T13); Chris@42: T14 = VADD(T12, T13); Chris@42: T2S = VSUB(T4, T5); Chris@42: T6 = VADD(T4, T5); Chris@42: T1y = VSUB(T15, T16); Chris@42: T17 = VADD(T15, T16); Chris@42: T9 = LD(&(ri[WS(is, 20)]), ivs, &(ri[0])); Chris@42: T19 = LD(&(ii[WS(is, 4)]), ivs, &(ii[0])); Chris@42: T1a = LD(&(ii[WS(is, 20)]), ivs, &(ii[0])); Chris@42: Tb = LD(&(ri[WS(is, 28)]), ivs, &(ri[0])); Chris@42: T2T = VSUB(T2R, T2S); Chris@42: T3T = VADD(T2S, T2R); Chris@42: T4r = VSUB(T3, T6); Chris@42: T7 = VADD(T3, T6); Chris@42: T3t = VSUB(T1x, T1y); Chris@42: T1z = VADD(T1x, T1y); Chris@42: T18 = VADD(T14, T17); Chris@42: T4Z = VSUB(T14, T17); Chris@42: T1A = VSUB(T8, T9); Chris@42: Ta = VADD(T8, T9); Chris@42: T1B = VSUB(T19, T1a); Chris@42: T1b = VADD(T19, T1a); Chris@42: Tc = LD(&(ri[WS(is, 12)]), ivs, &(ri[0])); Chris@42: T1c = LD(&(ii[WS(is, 28)]), ivs, &(ii[0])); Chris@42: T1d = LD(&(ii[WS(is, 12)]), ivs, &(ii[0])); Chris@42: } Chris@42: { Chris@42: V Ti, T1I, T1J, Tl; Chris@42: { Chris@42: V T1h, T1C, T2U, T1D, Td, T1E, T1e, T1i, Tg, Th; Chris@42: Tg = LD(&(ri[WS(is, 2)]), ivs, &(ri[0])); Chris@42: Th = LD(&(ri[WS(is, 18)]), ivs, &(ri[0])); Chris@42: T1h = LD(&(ii[WS(is, 2)]), ivs, &(ii[0])); Chris@42: T1C = VADD(T1A, T1B); Chris@42: T2U = VSUB(T1B, T1A); Chris@42: T1D = VSUB(Tb, Tc); Chris@42: Td = VADD(Tb, Tc); Chris@42: T1E = VSUB(T1c, T1d); Chris@42: T1e = VADD(T1c, T1d); Chris@42: T1L = VSUB(Tg, Th); Chris@42: Ti = VADD(Tg, Th); Chris@42: T1i = LD(&(ii[WS(is, 18)]), ivs, &(ii[0])); Chris@42: { Chris@42: V T2V, T1F, Tj, Tk; Chris@42: Tj = LD(&(ri[WS(is, 10)]), ivs, &(ri[0])); Chris@42: Tk = LD(&(ri[WS(is, 26)]), ivs, &(ri[0])); Chris@42: Te = VADD(Ta, Td); Chris@42: T50 = VSUB(Td, Ta); Chris@42: T2V = VADD(T1D, T1E); Chris@42: T1F = VSUB(T1D, T1E); Chris@42: T1f = VADD(T1b, T1e); Chris@42: T4s = VSUB(T1b, T1e); Chris@42: T1j = VADD(T1h, T1i); Chris@42: T1I = VSUB(T1h, T1i); Chris@42: T1J = VSUB(Tj, Tk); Chris@42: Tl = VADD(Tj, Tk); Chris@42: T1G = VADD(T1C, T1F); Chris@42: T3U = VSUB(T1F, T1C); Chris@42: T2W = VADD(T2U, T2V); Chris@42: T3u = VSUB(T2U, T2V); Chris@42: T1k = LD(&(ii[WS(is, 10)]), ivs, &(ii[0])); Chris@42: T1l = LD(&(ii[WS(is, 26)]), ivs, &(ii[0])); Chris@42: } Chris@42: } Chris@42: T4v = VSUB(Ti, Tl); Chris@42: Tm = VADD(Ti, Tl); Chris@42: T1K = VSUB(T1I, T1J); Chris@42: T3w = VADD(T1J, T1I); Chris@42: } Chris@42: } Chris@42: } Chris@42: { Chris@42: V T1r, T1S, T1q, T1s, T4x, T1R, T3z; Chris@42: { Chris@42: V Tp, T1P, T1Q, Ts; Chris@42: { Chris@42: V Tn, To, T1o, T1M, T1m, T1p; Chris@42: Tn = LD(&(ri[WS(is, 30)]), ivs, &(ri[0])); Chris@42: To = LD(&(ri[WS(is, 14)]), ivs, &(ri[0])); Chris@42: T1o = LD(&(ii[WS(is, 30)]), ivs, &(ii[0])); Chris@42: T1M = VSUB(T1k, T1l); Chris@42: T1m = VADD(T1k, T1l); Chris@42: T1p = LD(&(ii[WS(is, 14)]), ivs, &(ii[0])); Chris@42: { Chris@42: V Tq, Tr, T3x, T1N, T4u; Chris@42: Tq = LD(&(ri[WS(is, 6)]), ivs, &(ri[0])); Chris@42: Tr = LD(&(ri[WS(is, 22)]), ivs, &(ri[0])); Chris@42: T1r = LD(&(ii[WS(is, 6)]), ivs, &(ii[0])); Chris@42: T1S = VSUB(Tn, To); Chris@42: Tp = VADD(Tn, To); Chris@42: T3x = VSUB(T1L, T1M); Chris@42: T1N = VADD(T1L, T1M); Chris@42: T4u = VSUB(T1j, T1m); Chris@42: T1n = VADD(T1j, T1m); Chris@42: T1P = VSUB(T1o, T1p); Chris@42: T1q = VADD(T1o, T1p); Chris@42: T1Q = VSUB(Tq, Tr); Chris@42: Ts = VADD(Tq, Tr); Chris@42: T3X = VFNMS(LDK(KP414213562), T3w, T3x); Chris@42: T3y = VFMA(LDK(KP414213562), T3x, T3w); Chris@42: T2Z = VFMA(LDK(KP414213562), T1K, T1N); Chris@42: T1O = VFNMS(LDK(KP414213562), T1N, T1K); Chris@42: T53 = VADD(T4v, T4u); Chris@42: T4w = VSUB(T4u, T4v); Chris@42: T1s = LD(&(ii[WS(is, 22)]), ivs, &(ii[0])); Chris@42: } Chris@42: } Chris@42: T4x = VSUB(Tp, Ts); Chris@42: Tt = VADD(Tp, Ts); Chris@42: T1R = VSUB(T1P, T1Q); Chris@42: T3z = VADD(T1Q, T1P); Chris@42: } Chris@42: { Chris@42: V T4S, T5G, T2y, T2L, T4V, T5H, T2D, T2M; Chris@42: { Chris@42: V T2G, TN, T4N, T2r, T2s, TQ, T2A, T4O, T2J, T2x, TU, T4T, T2w, T2z, TX; Chris@42: V T2B, T2H, T2I, TR; Chris@42: { Chris@42: V TL, TM, T2p, T1T, T1t, T2q; Chris@42: TL = LD(&(ri[WS(is, 31)]), ivs, &(ri[WS(is, 1)])); Chris@42: TM = LD(&(ri[WS(is, 15)]), ivs, &(ri[WS(is, 1)])); Chris@42: T2p = LD(&(ii[WS(is, 31)]), ivs, &(ii[WS(is, 1)])); Chris@42: T1T = VSUB(T1r, T1s); Chris@42: T1t = VADD(T1r, T1s); Chris@42: T2q = LD(&(ii[WS(is, 15)]), ivs, &(ii[WS(is, 1)])); Chris@42: { Chris@42: V TO, TP, T3A, T1U, T4y; Chris@42: TO = LD(&(ri[WS(is, 7)]), ivs, &(ri[WS(is, 1)])); Chris@42: TP = LD(&(ri[WS(is, 23)]), ivs, &(ri[WS(is, 1)])); Chris@42: T2H = LD(&(ii[WS(is, 7)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2G = VSUB(TL, TM); Chris@42: TN = VADD(TL, TM); Chris@42: T3A = VSUB(T1S, T1T); Chris@42: T1U = VADD(T1S, T1T); Chris@42: T4y = VSUB(T1q, T1t); Chris@42: T1u = VADD(T1q, T1t); Chris@42: T4N = VADD(T2p, T2q); Chris@42: T2r = VSUB(T2p, T2q); Chris@42: T2s = VSUB(TO, TP); Chris@42: TQ = VADD(TO, TP); Chris@42: T3W = VFMA(LDK(KP414213562), T3z, T3A); Chris@42: T3B = VFNMS(LDK(KP414213562), T3A, T3z); Chris@42: T2Y = VFNMS(LDK(KP414213562), T1R, T1U); Chris@42: T1V = VFMA(LDK(KP414213562), T1U, T1R); Chris@42: T52 = VSUB(T4x, T4y); Chris@42: T4z = VADD(T4x, T4y); Chris@42: T2I = LD(&(ii[WS(is, 23)]), ivs, &(ii[WS(is, 1)])); Chris@42: } Chris@42: } Chris@42: { Chris@42: V TS, TT, T2u, T2v, TV, TW; Chris@42: TS = LD(&(ri[WS(is, 3)]), ivs, &(ri[WS(is, 1)])); Chris@42: TT = LD(&(ri[WS(is, 19)]), ivs, &(ri[WS(is, 1)])); Chris@42: T2u = LD(&(ii[WS(is, 3)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2v = LD(&(ii[WS(is, 19)]), ivs, &(ii[WS(is, 1)])); Chris@42: TV = LD(&(ri[WS(is, 27)]), ivs, &(ri[WS(is, 1)])); Chris@42: TW = LD(&(ri[WS(is, 11)]), ivs, &(ri[WS(is, 1)])); Chris@42: T2A = LD(&(ii[WS(is, 27)]), ivs, &(ii[WS(is, 1)])); Chris@42: T4O = VADD(T2H, T2I); Chris@42: T2J = VSUB(T2H, T2I); Chris@42: T2x = VSUB(TS, TT); Chris@42: TU = VADD(TS, TT); Chris@42: T4T = VADD(T2u, T2v); Chris@42: T2w = VSUB(T2u, T2v); Chris@42: T2z = VSUB(TV, TW); Chris@42: TX = VADD(TV, TW); Chris@42: T2B = LD(&(ii[WS(is, 11)]), ivs, &(ii[WS(is, 1)])); Chris@42: } Chris@42: T3O = VADD(T2s, T2r); Chris@42: T2t = VSUB(T2r, T2s); Chris@42: T3L = VSUB(T2G, T2J); Chris@42: T2K = VADD(T2G, T2J); Chris@42: T4S = VSUB(TN, TQ); Chris@42: TR = VADD(TN, TQ); Chris@42: { Chris@42: V T4P, T4Q, TY, T4U, T2C; Chris@42: T5G = VADD(T4N, T4O); Chris@42: T4P = VSUB(T4N, T4O); Chris@42: T4Q = VSUB(TX, TU); Chris@42: TY = VADD(TU, TX); Chris@42: T4U = VADD(T2A, T2B); Chris@42: T2C = VSUB(T2A, T2B); Chris@42: T2y = VSUB(T2w, T2x); Chris@42: T2L = VADD(T2x, T2w); Chris@42: TZ = VADD(TR, TY); Chris@42: T5F = VSUB(TR, TY); Chris@42: T4V = VSUB(T4T, T4U); Chris@42: T5H = VADD(T4T, T4U); Chris@42: T2D = VADD(T2z, T2C); Chris@42: T2M = VSUB(T2z, T2C); Chris@42: T4R = VSUB(T4P, T4Q); Chris@42: T5k = VADD(T4Q, T4P); Chris@42: } Chris@42: } Chris@42: { Chris@42: V T2f, Ty, T23, T4C, T20, T21, TB, T4D, T2i, T26, TF, T24, TG, TH, T29; Chris@42: V T2a; Chris@42: { Chris@42: V T1Y, T1Z, Tz, TA, T2g, T2h, Tw, Tx, TD, TE; Chris@42: Tw = LD(&(ri[WS(is, 1)]), ivs, &(ri[WS(is, 1)])); Chris@42: Tx = LD(&(ri[WS(is, 17)]), ivs, &(ri[WS(is, 1)])); Chris@42: T5j = VADD(T4S, T4V); Chris@42: T4W = VSUB(T4S, T4V); Chris@42: T5I = VSUB(T5G, T5H); Chris@42: T5X = VADD(T5G, T5H); Chris@42: T2E = VADD(T2y, T2D); Chris@42: T3M = VSUB(T2D, T2y); Chris@42: T2N = VADD(T2L, T2M); Chris@42: T3P = VSUB(T2L, T2M); Chris@42: T2f = VSUB(Tw, Tx); Chris@42: Ty = VADD(Tw, Tx); Chris@42: T1Y = LD(&(ii[WS(is, 1)]), ivs, &(ii[WS(is, 1)])); Chris@42: T1Z = LD(&(ii[WS(is, 17)]), ivs, &(ii[WS(is, 1)])); Chris@42: Tz = LD(&(ri[WS(is, 9)]), ivs, &(ri[WS(is, 1)])); Chris@42: TA = LD(&(ri[WS(is, 25)]), ivs, &(ri[WS(is, 1)])); Chris@42: T2g = LD(&(ii[WS(is, 9)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2h = LD(&(ii[WS(is, 25)]), ivs, &(ii[WS(is, 1)])); Chris@42: TD = LD(&(ri[WS(is, 5)]), ivs, &(ri[WS(is, 1)])); Chris@42: TE = LD(&(ri[WS(is, 21)]), ivs, &(ri[WS(is, 1)])); Chris@42: T23 = LD(&(ii[WS(is, 5)]), ivs, &(ii[WS(is, 1)])); Chris@42: T4C = VADD(T1Y, T1Z); Chris@42: T20 = VSUB(T1Y, T1Z); Chris@42: T21 = VSUB(Tz, TA); Chris@42: TB = VADD(Tz, TA); Chris@42: T4D = VADD(T2g, T2h); Chris@42: T2i = VSUB(T2g, T2h); Chris@42: T26 = VSUB(TD, TE); Chris@42: TF = VADD(TD, TE); Chris@42: T24 = LD(&(ii[WS(is, 21)]), ivs, &(ii[WS(is, 1)])); Chris@42: TG = LD(&(ri[WS(is, 29)]), ivs, &(ri[WS(is, 1)])); Chris@42: TH = LD(&(ri[WS(is, 13)]), ivs, &(ri[WS(is, 1)])); Chris@42: T29 = LD(&(ii[WS(is, 29)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2a = LD(&(ii[WS(is, 13)]), ivs, &(ii[WS(is, 1)])); Chris@42: } Chris@42: { Chris@42: V T4I, T25, T28, TI, T4J, T2b, T4H, TC, T5B, T4E; Chris@42: T3H = VADD(T21, T20); Chris@42: T22 = VSUB(T20, T21); Chris@42: T3E = VSUB(T2f, T2i); Chris@42: T2j = VADD(T2f, T2i); Chris@42: T4I = VADD(T23, T24); Chris@42: T25 = VSUB(T23, T24); Chris@42: T28 = VSUB(TG, TH); Chris@42: TI = VADD(TG, TH); Chris@42: T4J = VADD(T29, T2a); Chris@42: T2b = VSUB(T29, T2a); Chris@42: T4H = VSUB(Ty, TB); Chris@42: TC = VADD(Ty, TB); Chris@42: T5B = VADD(T4C, T4D); Chris@42: T4E = VSUB(T4C, T4D); Chris@42: { Chris@42: V T27, T2k, TJ, T4F, T4K, T5C, T2c, T2l; Chris@42: T27 = VSUB(T25, T26); Chris@42: T2k = VADD(T26, T25); Chris@42: TJ = VADD(TF, TI); Chris@42: T4F = VSUB(TI, TF); Chris@42: T4K = VSUB(T4I, T4J); Chris@42: T5C = VADD(T4I, T4J); Chris@42: T2c = VADD(T28, T2b); Chris@42: T2l = VSUB(T28, T2b); Chris@42: T4G = VSUB(T4E, T4F); Chris@42: T5h = VADD(T4F, T4E); Chris@42: TK = VADD(TC, TJ); Chris@42: T5A = VSUB(TC, TJ); Chris@42: T5D = VSUB(T5B, T5C); Chris@42: T5W = VADD(T5B, T5C); Chris@42: T2d = VADD(T27, T2c); Chris@42: T3F = VSUB(T2c, T27); Chris@42: T4L = VSUB(T4H, T4K); Chris@42: T5g = VADD(T4H, T4K); Chris@42: T3I = VSUB(T2k, T2l); Chris@42: T2m = VADD(T2k, T2l); Chris@42: } Chris@42: } Chris@42: } Chris@42: } Chris@42: } Chris@42: } Chris@42: { Chris@42: V T1v, T1g, T5V, Tv, T60, T5Y, T11, T10; Chris@42: { Chris@42: V T5o, T5n, T5i, T5r, T5f, T5l, T5w, T5u; Chris@42: { Chris@42: V T5d, T4t, T4A, T4X, T58, T51, T4M, T59, T54, T5e, T5b, T4B; Chris@42: T5d = VADD(T4r, T4s); Chris@42: T4t = VSUB(T4r, T4s); Chris@42: T4A = VSUB(T4w, T4z); Chris@42: T5o = VADD(T4w, T4z); Chris@42: T4X = VFNMS(LDK(KP414213562), T4W, T4R); Chris@42: T58 = VFMA(LDK(KP414213562), T4R, T4W); Chris@42: T5n = VADD(T50, T4Z); Chris@42: T51 = VSUB(T4Z, T50); Chris@42: T4M = VFMA(LDK(KP414213562), T4L, T4G); Chris@42: T59 = VFNMS(LDK(KP414213562), T4G, T4L); Chris@42: T54 = VSUB(T52, T53); Chris@42: T5e = VADD(T53, T52); Chris@42: T5b = VFNMS(LDK(KP707106781), T4A, T4t); Chris@42: T4B = VFMA(LDK(KP707106781), T4A, T4t); Chris@42: { Chris@42: V T5s, T56, T4Y, T5c, T5a, T57, T55, T5t; Chris@42: T5i = VFMA(LDK(KP414213562), T5h, T5g); Chris@42: T5s = VFNMS(LDK(KP414213562), T5g, T5h); Chris@42: T56 = VADD(T4M, T4X); Chris@42: T4Y = VSUB(T4M, T4X); Chris@42: T5c = VADD(T59, T58); Chris@42: T5a = VSUB(T58, T59); Chris@42: T57 = VFMA(LDK(KP707106781), T54, T51); Chris@42: T55 = VFNMS(LDK(KP707106781), T54, T51); Chris@42: T5r = VFNMS(LDK(KP707106781), T5e, T5d); Chris@42: T5f = VFMA(LDK(KP707106781), T5e, T5d); Chris@42: T5t = VFMA(LDK(KP414213562), T5j, T5k); Chris@42: T5l = VFNMS(LDK(KP414213562), T5k, T5j); Chris@42: T61 = VFMA(LDK(KP923879532), T4Y, T4B); Chris@42: STM4(&(ro[6]), T61, ovs, &(ro[0])); Chris@42: T62 = VFNMS(LDK(KP923879532), T4Y, T4B); Chris@42: STM4(&(ro[22]), T62, ovs, &(ro[0])); Chris@42: T63 = VFMA(LDK(KP923879532), T5c, T5b); Chris@42: STM4(&(ro[30]), T63, ovs, &(ro[0])); Chris@42: T64 = VFNMS(LDK(KP923879532), T5c, T5b); Chris@42: STM4(&(ro[14]), T64, ovs, &(ro[0])); Chris@42: T65 = VFMA(LDK(KP923879532), T5a, T57); Chris@42: STM4(&(io[6]), T65, ovs, &(io[0])); Chris@42: T66 = VFNMS(LDK(KP923879532), T5a, T57); Chris@42: STM4(&(io[22]), T66, ovs, &(io[0])); Chris@42: T67 = VFMA(LDK(KP923879532), T56, T55); Chris@42: STM4(&(io[30]), T67, ovs, &(io[0])); Chris@42: T68 = VFNMS(LDK(KP923879532), T56, T55); Chris@42: STM4(&(io[14]), T68, ovs, &(io[0])); Chris@42: T5w = VADD(T5s, T5t); Chris@42: T5u = VSUB(T5s, T5t); Chris@42: } Chris@42: } Chris@42: { Chris@42: V Tf, T5P, T5z, T5S, T5U, T5O, T5K, T5L, T5M, Tu, T5T, T5N; Chris@42: { Chris@42: V T5E, T5Q, T5q, T5m, T5v, T5p, T5R, T5J, T5x, T5y; Chris@42: Tf = VADD(T7, Te); Chris@42: T5x = VSUB(T7, Te); Chris@42: T5y = VSUB(T1n, T1u); Chris@42: T1v = VADD(T1n, T1u); Chris@42: T69 = VFMA(LDK(KP923879532), T5u, T5r); Chris@42: STM4(&(ro[10]), T69, ovs, &(ro[0])); Chris@42: T6a = VFNMS(LDK(KP923879532), T5u, T5r); Chris@42: STM4(&(ro[26]), T6a, ovs, &(ro[0])); Chris@42: T5E = VADD(T5A, T5D); Chris@42: T5Q = VSUB(T5D, T5A); Chris@42: T5q = VSUB(T5l, T5i); Chris@42: T5m = VADD(T5i, T5l); Chris@42: T5v = VFMA(LDK(KP707106781), T5o, T5n); Chris@42: T5p = VFNMS(LDK(KP707106781), T5o, T5n); Chris@42: T5P = VSUB(T5x, T5y); Chris@42: T5z = VADD(T5x, T5y); Chris@42: T5R = VADD(T5F, T5I); Chris@42: T5J = VSUB(T5F, T5I); Chris@42: T6b = VFMA(LDK(KP923879532), T5m, T5f); Chris@42: STM4(&(ro[2]), T6b, ovs, &(ro[0])); Chris@42: T6c = VFNMS(LDK(KP923879532), T5m, T5f); Chris@42: STM4(&(ro[18]), T6c, ovs, &(ro[0])); Chris@42: T6d = VFMA(LDK(KP923879532), T5w, T5v); Chris@42: STM4(&(io[2]), T6d, ovs, &(io[0])); Chris@42: T6e = VFNMS(LDK(KP923879532), T5w, T5v); Chris@42: STM4(&(io[18]), T6e, ovs, &(io[0])); Chris@42: T6f = VFMA(LDK(KP923879532), T5q, T5p); Chris@42: STM4(&(io[10]), T6f, ovs, &(io[0])); Chris@42: T6g = VFNMS(LDK(KP923879532), T5q, T5p); Chris@42: STM4(&(io[26]), T6g, ovs, &(io[0])); Chris@42: T5S = VSUB(T5Q, T5R); Chris@42: T5U = VADD(T5Q, T5R); Chris@42: T5O = VSUB(T5J, T5E); Chris@42: T5K = VADD(T5E, T5J); Chris@42: T1g = VADD(T18, T1f); Chris@42: T5L = VSUB(T18, T1f); Chris@42: T5M = VSUB(Tt, Tm); Chris@42: Tu = VADD(Tm, Tt); Chris@42: } Chris@42: T6h = VFMA(LDK(KP707106781), T5S, T5P); Chris@42: STM4(&(ro[12]), T6h, ovs, &(ro[0])); Chris@42: T6i = VFNMS(LDK(KP707106781), T5S, T5P); Chris@42: STM4(&(ro[28]), T6i, ovs, &(ro[0])); Chris@42: T6j = VFMA(LDK(KP707106781), T5K, T5z); Chris@42: STM4(&(ro[4]), T6j, ovs, &(ro[0])); Chris@42: T6k = VFNMS(LDK(KP707106781), T5K, T5z); Chris@42: STM4(&(ro[20]), T6k, ovs, &(ro[0])); Chris@42: T5T = VADD(T5M, T5L); Chris@42: T5N = VSUB(T5L, T5M); Chris@42: T5V = VSUB(Tf, Tu); Chris@42: Tv = VADD(Tf, Tu); Chris@42: T6l = VFMA(LDK(KP707106781), T5U, T5T); Chris@42: STM4(&(io[4]), T6l, ovs, &(io[0])); Chris@42: T6m = VFNMS(LDK(KP707106781), T5U, T5T); Chris@42: STM4(&(io[20]), T6m, ovs, &(io[0])); Chris@42: T6n = VFMA(LDK(KP707106781), T5O, T5N); Chris@42: STM4(&(io[12]), T6n, ovs, &(io[0])); Chris@42: T6o = VFNMS(LDK(KP707106781), T5O, T5N); Chris@42: STM4(&(io[28]), T6o, ovs, &(io[0])); Chris@42: T60 = VADD(T5W, T5X); Chris@42: T5Y = VSUB(T5W, T5X); Chris@42: T11 = VSUB(TZ, TK); Chris@42: T10 = VADD(TK, TZ); Chris@42: } Chris@42: } Chris@42: { Chris@42: V T39, T3k, T3j, T3a, T1X, T37, T33, T31, T3d, T3c, T47, T4i, T4h, T48, T4b; Chris@42: V T4a, T4e, T3N, T41, T3D, T45, T3Z, T38, T36, T32, T2Q, T42, T3K, T3Q, T4d; Chris@42: { Chris@42: V T2e, T2n, T2F, T2O, T1w, T5Z; Chris@42: { Chris@42: V T1H, T1W, T2X, T30; Chris@42: T39 = VFMA(LDK(KP707106781), T1G, T1z); Chris@42: T1H = VFNMS(LDK(KP707106781), T1G, T1z); Chris@42: T1W = VSUB(T1O, T1V); Chris@42: T3k = VADD(T1O, T1V); Chris@42: T3j = VFMA(LDK(KP707106781), T2W, T2T); Chris@42: T2X = VFNMS(LDK(KP707106781), T2W, T2T); Chris@42: T30 = VSUB(T2Y, T2Z); Chris@42: T3a = VADD(T2Z, T2Y); Chris@42: T6p = VSUB(T5V, T5Y); Chris@42: STM4(&(ro[24]), T6p, ovs, &(ro[0])); Chris@42: T6q = VADD(T5V, T5Y); Chris@42: STM4(&(ro[8]), T6q, ovs, &(ro[0])); Chris@42: T6r = VADD(Tv, T10); Chris@42: STM4(&(ro[0]), T6r, ovs, &(ro[0])); Chris@42: T6s = VSUB(Tv, T10); Chris@42: STM4(&(ro[16]), T6s, ovs, &(ro[0])); Chris@42: T1w = VSUB(T1g, T1v); Chris@42: T5Z = VADD(T1g, T1v); Chris@42: T1X = VFMA(LDK(KP923879532), T1W, T1H); Chris@42: T37 = VFNMS(LDK(KP923879532), T1W, T1H); Chris@42: T33 = VFMA(LDK(KP923879532), T30, T2X); Chris@42: T31 = VFNMS(LDK(KP923879532), T30, T2X); Chris@42: } Chris@42: T3d = VFMA(LDK(KP707106781), T2d, T22); Chris@42: T2e = VFNMS(LDK(KP707106781), T2d, T22); Chris@42: T2n = VFNMS(LDK(KP707106781), T2m, T2j); Chris@42: T3c = VFMA(LDK(KP707106781), T2m, T2j); Chris@42: T6t = VADD(T5Z, T60); Chris@42: STM4(&(io[0]), T6t, ovs, &(io[0])); Chris@42: T6u = VSUB(T5Z, T60); Chris@42: STM4(&(io[16]), T6u, ovs, &(io[0])); Chris@42: T6v = VSUB(T1w, T11); Chris@42: STM4(&(io[24]), T6v, ovs, &(io[0])); Chris@42: T6w = VADD(T11, T1w); Chris@42: STM4(&(io[8]), T6w, ovs, &(io[0])); Chris@42: T3g = VFMA(LDK(KP707106781), T2E, T2t); Chris@42: T2F = VFNMS(LDK(KP707106781), T2E, T2t); Chris@42: T2O = VFNMS(LDK(KP707106781), T2N, T2K); Chris@42: T3f = VFMA(LDK(KP707106781), T2N, T2K); Chris@42: { Chris@42: V T3v, T35, T2o, T3C, T3V, T3Y; Chris@42: T47 = VFNMS(LDK(KP707106781), T3u, T3t); Chris@42: T3v = VFMA(LDK(KP707106781), T3u, T3t); Chris@42: T35 = VFNMS(LDK(KP668178637), T2e, T2n); Chris@42: T2o = VFMA(LDK(KP668178637), T2n, T2e); Chris@42: T3C = VSUB(T3y, T3B); Chris@42: T4i = VADD(T3y, T3B); Chris@42: T4h = VFNMS(LDK(KP707106781), T3U, T3T); Chris@42: T3V = VFMA(LDK(KP707106781), T3U, T3T); Chris@42: T3Y = VSUB(T3W, T3X); Chris@42: T48 = VADD(T3X, T3W); Chris@42: { Chris@42: V T3G, T34, T2P, T3J; Chris@42: T4b = VFMA(LDK(KP707106781), T3F, T3E); Chris@42: T3G = VFNMS(LDK(KP707106781), T3F, T3E); Chris@42: T34 = VFMA(LDK(KP668178637), T2F, T2O); Chris@42: T2P = VFNMS(LDK(KP668178637), T2O, T2F); Chris@42: T3J = VFNMS(LDK(KP707106781), T3I, T3H); Chris@42: T4a = VFMA(LDK(KP707106781), T3I, T3H); Chris@42: T4e = VFMA(LDK(KP707106781), T3M, T3L); Chris@42: T3N = VFNMS(LDK(KP707106781), T3M, T3L); Chris@42: T41 = VFNMS(LDK(KP923879532), T3C, T3v); Chris@42: T3D = VFMA(LDK(KP923879532), T3C, T3v); Chris@42: T45 = VFMA(LDK(KP923879532), T3Y, T3V); Chris@42: T3Z = VFNMS(LDK(KP923879532), T3Y, T3V); Chris@42: T38 = VADD(T35, T34); Chris@42: T36 = VSUB(T34, T35); Chris@42: T32 = VADD(T2o, T2P); Chris@42: T2Q = VSUB(T2o, T2P); Chris@42: T42 = VFNMS(LDK(KP668178637), T3G, T3J); Chris@42: T3K = VFMA(LDK(KP668178637), T3J, T3G); Chris@42: T3Q = VFNMS(LDK(KP707106781), T3P, T3O); Chris@42: T4d = VFMA(LDK(KP707106781), T3P, T3O); Chris@42: } Chris@42: } Chris@42: } Chris@42: { Chris@42: V T4n, T4c, T43, T3R, T4m, T4f; Chris@42: T6x = VFMA(LDK(KP831469612), T38, T37); Chris@42: STM4(&(ro[29]), T6x, ovs, &(ro[1])); Chris@42: T6y = VFNMS(LDK(KP831469612), T38, T37); Chris@42: STM4(&(ro[13]), T6y, ovs, &(ro[1])); Chris@42: T6z = VFMA(LDK(KP831469612), T36, T33); Chris@42: STM4(&(io[5]), T6z, ovs, &(io[1])); Chris@42: T6A = VFNMS(LDK(KP831469612), T36, T33); Chris@42: STM4(&(io[21]), T6A, ovs, &(io[1])); Chris@42: T6B = VFMA(LDK(KP831469612), T32, T31); Chris@42: STM4(&(io[29]), T6B, ovs, &(io[1])); Chris@42: T6C = VFNMS(LDK(KP831469612), T32, T31); Chris@42: STM4(&(io[13]), T6C, ovs, &(io[1])); Chris@42: T6D = VFMA(LDK(KP831469612), T2Q, T1X); Chris@42: STM4(&(ro[5]), T6D, ovs, &(ro[1])); Chris@42: T6E = VFNMS(LDK(KP831469612), T2Q, T1X); Chris@42: STM4(&(ro[21]), T6E, ovs, &(ro[1])); Chris@42: T43 = VFMA(LDK(KP668178637), T3N, T3Q); Chris@42: T3R = VFNMS(LDK(KP668178637), T3Q, T3N); Chris@42: { Chris@42: V T44, T46, T40, T3S; Chris@42: T44 = VSUB(T42, T43); Chris@42: T46 = VADD(T42, T43); Chris@42: T40 = VSUB(T3R, T3K); Chris@42: T3S = VADD(T3K, T3R); Chris@42: T4p = VFMA(LDK(KP923879532), T48, T47); Chris@42: T49 = VFNMS(LDK(KP923879532), T48, T47); Chris@42: T4l = VFNMS(LDK(KP923879532), T4i, T4h); Chris@42: T4j = VFMA(LDK(KP923879532), T4i, T4h); Chris@42: T4n = VFNMS(LDK(KP198912367), T4a, T4b); Chris@42: T4c = VFMA(LDK(KP198912367), T4b, T4a); Chris@42: T6F = VFMA(LDK(KP831469612), T44, T41); Chris@42: STM4(&(ro[11]), T6F, ovs, &(ro[1])); Chris@42: T6G = VFNMS(LDK(KP831469612), T44, T41); Chris@42: STM4(&(ro[27]), T6G, ovs, &(ro[1])); Chris@42: T6H = VFMA(LDK(KP831469612), T46, T45); Chris@42: STM4(&(io[3]), T6H, ovs, &(io[1])); Chris@42: T6I = VFNMS(LDK(KP831469612), T46, T45); Chris@42: STM4(&(io[19]), T6I, ovs, &(io[1])); Chris@42: T6J = VFMA(LDK(KP831469612), T40, T3Z); Chris@42: STM4(&(io[11]), T6J, ovs, &(io[1])); Chris@42: T6K = VFNMS(LDK(KP831469612), T40, T3Z); Chris@42: STM4(&(io[27]), T6K, ovs, &(io[1])); Chris@42: T6L = VFMA(LDK(KP831469612), T3S, T3D); Chris@42: STM4(&(ro[3]), T6L, ovs, &(ro[1])); Chris@42: T6M = VFNMS(LDK(KP831469612), T3S, T3D); Chris@42: STM4(&(ro[19]), T6M, ovs, &(ro[1])); Chris@42: } Chris@42: T4m = VFMA(LDK(KP198912367), T4d, T4e); Chris@42: T4f = VFNMS(LDK(KP198912367), T4e, T4d); Chris@42: T3n = VFNMS(LDK(KP923879532), T3a, T39); Chris@42: T3b = VFMA(LDK(KP923879532), T3a, T39); Chris@42: T3r = VFMA(LDK(KP923879532), T3k, T3j); Chris@42: T3l = VFNMS(LDK(KP923879532), T3k, T3j); Chris@42: T3o = VFNMS(LDK(KP198912367), T3c, T3d); Chris@42: T3e = VFMA(LDK(KP198912367), T3d, T3c); Chris@42: T4q = VADD(T4n, T4m); Chris@42: T4o = VSUB(T4m, T4n); Chris@42: T4k = VADD(T4c, T4f); Chris@42: T4g = VSUB(T4c, T4f); Chris@42: } Chris@42: } Chris@42: } Chris@42: } Chris@42: { Chris@42: V T6N, T6O, T6P, T6Q; Chris@42: T6N = VFMA(LDK(KP980785280), T4q, T4p); Chris@42: STM4(&(ro[31]), T6N, ovs, &(ro[1])); Chris@42: STN4(&(ro[28]), T6i, T6x, T63, T6N, ovs); Chris@42: T6O = VFNMS(LDK(KP980785280), T4q, T4p); Chris@42: STM4(&(ro[15]), T6O, ovs, &(ro[1])); Chris@42: STN4(&(ro[12]), T6h, T6y, T64, T6O, ovs); Chris@42: T6P = VFMA(LDK(KP980785280), T4o, T4l); Chris@42: STM4(&(io[7]), T6P, ovs, &(io[1])); Chris@42: STN4(&(io[4]), T6l, T6z, T65, T6P, ovs); Chris@42: T6Q = VFNMS(LDK(KP980785280), T4o, T4l); Chris@42: STM4(&(io[23]), T6Q, ovs, &(io[1])); Chris@42: STN4(&(io[20]), T6m, T6A, T66, T6Q, ovs); Chris@42: { Chris@42: V T6R, T6S, T6T, T6U; Chris@42: T6R = VFMA(LDK(KP980785280), T4k, T4j); Chris@42: STM4(&(io[31]), T6R, ovs, &(io[1])); Chris@42: STN4(&(io[28]), T6o, T6B, T67, T6R, ovs); Chris@42: T6S = VFNMS(LDK(KP980785280), T4k, T4j); Chris@42: STM4(&(io[15]), T6S, ovs, &(io[1])); Chris@42: STN4(&(io[12]), T6n, T6C, T68, T6S, ovs); Chris@42: T6T = VFMA(LDK(KP980785280), T4g, T49); Chris@42: STM4(&(ro[7]), T6T, ovs, &(ro[1])); Chris@42: STN4(&(ro[4]), T6j, T6D, T61, T6T, ovs); Chris@42: T6U = VFNMS(LDK(KP980785280), T4g, T49); Chris@42: STM4(&(ro[23]), T6U, ovs, &(ro[1])); Chris@42: STN4(&(ro[20]), T6k, T6E, T62, T6U, ovs); Chris@42: T3h = VFNMS(LDK(KP198912367), T3g, T3f); Chris@42: T3p = VFMA(LDK(KP198912367), T3f, T3g); Chris@42: } Chris@42: } Chris@42: { Chris@42: V T3s, T3q, T3i, T3m; Chris@42: T3s = VADD(T3o, T3p); Chris@42: T3q = VSUB(T3o, T3p); Chris@42: T3i = VADD(T3e, T3h); Chris@42: T3m = VSUB(T3h, T3e); Chris@42: { Chris@42: V T6V, T6W, T6X, T6Y; Chris@42: T6V = VFMA(LDK(KP980785280), T3q, T3n); Chris@42: STM4(&(ro[9]), T6V, ovs, &(ro[1])); Chris@42: STN4(&(ro[8]), T6q, T6V, T69, T6F, ovs); Chris@42: T6W = VFNMS(LDK(KP980785280), T3q, T3n); Chris@42: STM4(&(ro[25]), T6W, ovs, &(ro[1])); Chris@42: STN4(&(ro[24]), T6p, T6W, T6a, T6G, ovs); Chris@42: T6X = VFMA(LDK(KP980785280), T3s, T3r); Chris@42: STM4(&(io[1]), T6X, ovs, &(io[1])); Chris@42: STN4(&(io[0]), T6t, T6X, T6d, T6H, ovs); Chris@42: T6Y = VFNMS(LDK(KP980785280), T3s, T3r); Chris@42: STM4(&(io[17]), T6Y, ovs, &(io[1])); Chris@42: STN4(&(io[16]), T6u, T6Y, T6e, T6I, ovs); Chris@42: { Chris@42: V T6Z, T70, T71, T72; Chris@42: T6Z = VFMA(LDK(KP980785280), T3m, T3l); Chris@42: STM4(&(io[9]), T6Z, ovs, &(io[1])); Chris@42: STN4(&(io[8]), T6w, T6Z, T6f, T6J, ovs); Chris@42: T70 = VFNMS(LDK(KP980785280), T3m, T3l); Chris@42: STM4(&(io[25]), T70, ovs, &(io[1])); Chris@42: STN4(&(io[24]), T6v, T70, T6g, T6K, ovs); Chris@42: T71 = VFMA(LDK(KP980785280), T3i, T3b); Chris@42: STM4(&(ro[1]), T71, ovs, &(ro[1])); Chris@42: STN4(&(ro[0]), T6r, T71, T6b, T6L, ovs); Chris@42: T72 = VFNMS(LDK(KP980785280), T3i, T3b); Chris@42: STM4(&(ro[17]), T72, ovs, &(ro[1])); Chris@42: STN4(&(ro[16]), T6s, T72, T6c, T6M, ovs); Chris@42: } Chris@42: } Chris@42: } Chris@42: } Chris@42: } Chris@42: VLEAVE(); Chris@42: } Chris@42: Chris@42: static const kdft_desc desc = { 32, XSIMD_STRING("n2sv_32"), {236, 0, 136, 0}, &GENUS, 0, 1, 0, 0 }; Chris@42: Chris@42: void XSIMD(codelet_n2sv_32) (planner *p) { Chris@42: X(kdft_register) (p, n2sv_32, &desc); Chris@42: } Chris@42: Chris@42: #else /* HAVE_FMA */ Chris@42: Chris@42: /* Generated by: ../../../genfft/gen_notw.native -simd -compact -variables 4 -pipeline-latency 8 -n 32 -name n2sv_32 -with-ostride 1 -include n2s.h -store-multiple 4 */ Chris@42: Chris@42: /* Chris@42: * This function contains 372 FP additions, 84 FP multiplications, Chris@42: * (or, 340 additions, 52 multiplications, 32 fused multiply/add), Chris@42: * 130 stack variables, 7 constants, and 144 memory accesses Chris@42: */ Chris@42: #include "n2s.h" Chris@42: Chris@42: static void n2sv_32(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs) Chris@42: { Chris@42: DVK(KP831469612, +0.831469612302545237078788377617905756738560812); Chris@42: DVK(KP555570233, +0.555570233019602224742830813948532874374937191); Chris@42: DVK(KP195090322, +0.195090322016128267848284868477022240927691618); Chris@42: DVK(KP980785280, +0.980785280403230449126182236134239036973933731); Chris@42: DVK(KP923879532, +0.923879532511286756128183189396788286822416626); Chris@42: DVK(KP382683432, +0.382683432365089771728459984030398866761344562); Chris@42: DVK(KP707106781, +0.707106781186547524400844362104849039284835938); Chris@42: { Chris@42: INT i; Chris@42: for (i = v; i > 0; i = i - (2 * VL), ri = ri + ((2 * VL) * ivs), ii = ii + ((2 * VL) * ivs), ro = ro + ((2 * VL) * ovs), io = io + ((2 * VL) * ovs), MAKE_VOLATILE_STRIDE(128, is), MAKE_VOLATILE_STRIDE(128, os)) { Chris@42: V T7, T4r, T4Z, T18, T1z, T3t, T3T, T2T, Te, T1f, T50, T4s, T2W, T3u, T1G; Chris@42: V T3U, Tm, T1n, T1O, T2Z, T3y, T3X, T4w, T53, Tt, T1u, T1V, T2Y, T3B, T3W; Chris@42: V T4z, T52, T2t, T3L, T3O, T2K, TR, TY, T5F, T5G, T5H, T5I, T4R, T5j, T2E; Chris@42: V T3P, T4W, T5k, T2N, T3M, T22, T3E, T3H, T2j, TC, TJ, T5A, T5B, T5C, T5D; Chris@42: V T4G, T5g, T2d, T3F, T4L, T5h, T2m, T3I; Chris@42: { Chris@42: V T3, T1x, T14, T2S, T6, T2R, T17, T1y; Chris@42: { Chris@42: V T1, T2, T12, T13; Chris@42: T1 = LD(&(ri[0]), ivs, &(ri[0])); Chris@42: T2 = LD(&(ri[WS(is, 16)]), ivs, &(ri[0])); Chris@42: T3 = VADD(T1, T2); Chris@42: T1x = VSUB(T1, T2); Chris@42: T12 = LD(&(ii[0]), ivs, &(ii[0])); Chris@42: T13 = LD(&(ii[WS(is, 16)]), ivs, &(ii[0])); Chris@42: T14 = VADD(T12, T13); Chris@42: T2S = VSUB(T12, T13); Chris@42: } Chris@42: { Chris@42: V T4, T5, T15, T16; Chris@42: T4 = LD(&(ri[WS(is, 8)]), ivs, &(ri[0])); Chris@42: T5 = LD(&(ri[WS(is, 24)]), ivs, &(ri[0])); Chris@42: T6 = VADD(T4, T5); Chris@42: T2R = VSUB(T4, T5); Chris@42: T15 = LD(&(ii[WS(is, 8)]), ivs, &(ii[0])); Chris@42: T16 = LD(&(ii[WS(is, 24)]), ivs, &(ii[0])); Chris@42: T17 = VADD(T15, T16); Chris@42: T1y = VSUB(T15, T16); Chris@42: } Chris@42: T7 = VADD(T3, T6); Chris@42: T4r = VSUB(T3, T6); Chris@42: T4Z = VSUB(T14, T17); Chris@42: T18 = VADD(T14, T17); Chris@42: T1z = VSUB(T1x, T1y); Chris@42: T3t = VADD(T1x, T1y); Chris@42: T3T = VSUB(T2S, T2R); Chris@42: T2T = VADD(T2R, T2S); Chris@42: } Chris@42: { Chris@42: V Ta, T1B, T1b, T1A, Td, T1D, T1e, T1E; Chris@42: { Chris@42: V T8, T9, T19, T1a; Chris@42: T8 = LD(&(ri[WS(is, 4)]), ivs, &(ri[0])); Chris@42: T9 = LD(&(ri[WS(is, 20)]), ivs, &(ri[0])); Chris@42: Ta = VADD(T8, T9); Chris@42: T1B = VSUB(T8, T9); Chris@42: T19 = LD(&(ii[WS(is, 4)]), ivs, &(ii[0])); Chris@42: T1a = LD(&(ii[WS(is, 20)]), ivs, &(ii[0])); Chris@42: T1b = VADD(T19, T1a); Chris@42: T1A = VSUB(T19, T1a); Chris@42: } Chris@42: { Chris@42: V Tb, Tc, T1c, T1d; Chris@42: Tb = LD(&(ri[WS(is, 28)]), ivs, &(ri[0])); Chris@42: Tc = LD(&(ri[WS(is, 12)]), ivs, &(ri[0])); Chris@42: Td = VADD(Tb, Tc); Chris@42: T1D = VSUB(Tb, Tc); Chris@42: T1c = LD(&(ii[WS(is, 28)]), ivs, &(ii[0])); Chris@42: T1d = LD(&(ii[WS(is, 12)]), ivs, &(ii[0])); Chris@42: T1e = VADD(T1c, T1d); Chris@42: T1E = VSUB(T1c, T1d); Chris@42: } Chris@42: Te = VADD(Ta, Td); Chris@42: T1f = VADD(T1b, T1e); Chris@42: T50 = VSUB(Td, Ta); Chris@42: T4s = VSUB(T1b, T1e); Chris@42: { Chris@42: V T2U, T2V, T1C, T1F; Chris@42: T2U = VSUB(T1D, T1E); Chris@42: T2V = VADD(T1B, T1A); Chris@42: T2W = VMUL(LDK(KP707106781), VSUB(T2U, T2V)); Chris@42: T3u = VMUL(LDK(KP707106781), VADD(T2V, T2U)); Chris@42: T1C = VSUB(T1A, T1B); Chris@42: T1F = VADD(T1D, T1E); Chris@42: T1G = VMUL(LDK(KP707106781), VSUB(T1C, T1F)); Chris@42: T3U = VMUL(LDK(KP707106781), VADD(T1C, T1F)); Chris@42: } Chris@42: } Chris@42: { Chris@42: V Ti, T1L, T1j, T1J, Tl, T1I, T1m, T1M, T1K, T1N; Chris@42: { Chris@42: V Tg, Th, T1h, T1i; Chris@42: Tg = LD(&(ri[WS(is, 2)]), ivs, &(ri[0])); Chris@42: Th = LD(&(ri[WS(is, 18)]), ivs, &(ri[0])); Chris@42: Ti = VADD(Tg, Th); Chris@42: T1L = VSUB(Tg, Th); Chris@42: T1h = LD(&(ii[WS(is, 2)]), ivs, &(ii[0])); Chris@42: T1i = LD(&(ii[WS(is, 18)]), ivs, &(ii[0])); Chris@42: T1j = VADD(T1h, T1i); Chris@42: T1J = VSUB(T1h, T1i); Chris@42: } Chris@42: { Chris@42: V Tj, Tk, T1k, T1l; Chris@42: Tj = LD(&(ri[WS(is, 10)]), ivs, &(ri[0])); Chris@42: Tk = LD(&(ri[WS(is, 26)]), ivs, &(ri[0])); Chris@42: Tl = VADD(Tj, Tk); Chris@42: T1I = VSUB(Tj, Tk); Chris@42: T1k = LD(&(ii[WS(is, 10)]), ivs, &(ii[0])); Chris@42: T1l = LD(&(ii[WS(is, 26)]), ivs, &(ii[0])); Chris@42: T1m = VADD(T1k, T1l); Chris@42: T1M = VSUB(T1k, T1l); Chris@42: } Chris@42: Tm = VADD(Ti, Tl); Chris@42: T1n = VADD(T1j, T1m); Chris@42: T1K = VADD(T1I, T1J); Chris@42: T1N = VSUB(T1L, T1M); Chris@42: T1O = VFNMS(LDK(KP923879532), T1N, VMUL(LDK(KP382683432), T1K)); Chris@42: T2Z = VFMA(LDK(KP923879532), T1K, VMUL(LDK(KP382683432), T1N)); Chris@42: { Chris@42: V T3w, T3x, T4u, T4v; Chris@42: T3w = VSUB(T1J, T1I); Chris@42: T3x = VADD(T1L, T1M); Chris@42: T3y = VFNMS(LDK(KP382683432), T3x, VMUL(LDK(KP923879532), T3w)); Chris@42: T3X = VFMA(LDK(KP382683432), T3w, VMUL(LDK(KP923879532), T3x)); Chris@42: T4u = VSUB(T1j, T1m); Chris@42: T4v = VSUB(Ti, Tl); Chris@42: T4w = VSUB(T4u, T4v); Chris@42: T53 = VADD(T4v, T4u); Chris@42: } Chris@42: } Chris@42: { Chris@42: V Tp, T1S, T1q, T1Q, Ts, T1P, T1t, T1T, T1R, T1U; Chris@42: { Chris@42: V Tn, To, T1o, T1p; Chris@42: Tn = LD(&(ri[WS(is, 30)]), ivs, &(ri[0])); Chris@42: To = LD(&(ri[WS(is, 14)]), ivs, &(ri[0])); Chris@42: Tp = VADD(Tn, To); Chris@42: T1S = VSUB(Tn, To); Chris@42: T1o = LD(&(ii[WS(is, 30)]), ivs, &(ii[0])); Chris@42: T1p = LD(&(ii[WS(is, 14)]), ivs, &(ii[0])); Chris@42: T1q = VADD(T1o, T1p); Chris@42: T1Q = VSUB(T1o, T1p); Chris@42: } Chris@42: { Chris@42: V Tq, Tr, T1r, T1s; Chris@42: Tq = LD(&(ri[WS(is, 6)]), ivs, &(ri[0])); Chris@42: Tr = LD(&(ri[WS(is, 22)]), ivs, &(ri[0])); Chris@42: Ts = VADD(Tq, Tr); Chris@42: T1P = VSUB(Tq, Tr); Chris@42: T1r = LD(&(ii[WS(is, 6)]), ivs, &(ii[0])); Chris@42: T1s = LD(&(ii[WS(is, 22)]), ivs, &(ii[0])); Chris@42: T1t = VADD(T1r, T1s); Chris@42: T1T = VSUB(T1r, T1s); Chris@42: } Chris@42: Tt = VADD(Tp, Ts); Chris@42: T1u = VADD(T1q, T1t); Chris@42: T1R = VADD(T1P, T1Q); Chris@42: T1U = VSUB(T1S, T1T); Chris@42: T1V = VFMA(LDK(KP382683432), T1R, VMUL(LDK(KP923879532), T1U)); Chris@42: T2Y = VFNMS(LDK(KP923879532), T1R, VMUL(LDK(KP382683432), T1U)); Chris@42: { Chris@42: V T3z, T3A, T4x, T4y; Chris@42: T3z = VSUB(T1Q, T1P); Chris@42: T3A = VADD(T1S, T1T); Chris@42: T3B = VFMA(LDK(KP923879532), T3z, VMUL(LDK(KP382683432), T3A)); Chris@42: T3W = VFNMS(LDK(KP382683432), T3z, VMUL(LDK(KP923879532), T3A)); Chris@42: T4x = VSUB(Tp, Ts); Chris@42: T4y = VSUB(T1q, T1t); Chris@42: T4z = VADD(T4x, T4y); Chris@42: T52 = VSUB(T4x, T4y); Chris@42: } Chris@42: } Chris@42: { Chris@42: V TN, T2p, T2J, T4S, TQ, T2G, T2s, T4T, TU, T2x, T2w, T4O, TX, T2z, T2C; Chris@42: V T4P; Chris@42: { Chris@42: V TL, TM, T2H, T2I; Chris@42: TL = LD(&(ri[WS(is, 31)]), ivs, &(ri[WS(is, 1)])); Chris@42: TM = LD(&(ri[WS(is, 15)]), ivs, &(ri[WS(is, 1)])); Chris@42: TN = VADD(TL, TM); Chris@42: T2p = VSUB(TL, TM); Chris@42: T2H = LD(&(ii[WS(is, 31)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2I = LD(&(ii[WS(is, 15)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2J = VSUB(T2H, T2I); Chris@42: T4S = VADD(T2H, T2I); Chris@42: } Chris@42: { Chris@42: V TO, TP, T2q, T2r; Chris@42: TO = LD(&(ri[WS(is, 7)]), ivs, &(ri[WS(is, 1)])); Chris@42: TP = LD(&(ri[WS(is, 23)]), ivs, &(ri[WS(is, 1)])); Chris@42: TQ = VADD(TO, TP); Chris@42: T2G = VSUB(TO, TP); Chris@42: T2q = LD(&(ii[WS(is, 7)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2r = LD(&(ii[WS(is, 23)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2s = VSUB(T2q, T2r); Chris@42: T4T = VADD(T2q, T2r); Chris@42: } Chris@42: { Chris@42: V TS, TT, T2u, T2v; Chris@42: TS = LD(&(ri[WS(is, 3)]), ivs, &(ri[WS(is, 1)])); Chris@42: TT = LD(&(ri[WS(is, 19)]), ivs, &(ri[WS(is, 1)])); Chris@42: TU = VADD(TS, TT); Chris@42: T2x = VSUB(TS, TT); Chris@42: T2u = LD(&(ii[WS(is, 3)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2v = LD(&(ii[WS(is, 19)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2w = VSUB(T2u, T2v); Chris@42: T4O = VADD(T2u, T2v); Chris@42: } Chris@42: { Chris@42: V TV, TW, T2A, T2B; Chris@42: TV = LD(&(ri[WS(is, 27)]), ivs, &(ri[WS(is, 1)])); Chris@42: TW = LD(&(ri[WS(is, 11)]), ivs, &(ri[WS(is, 1)])); Chris@42: TX = VADD(TV, TW); Chris@42: T2z = VSUB(TV, TW); Chris@42: T2A = LD(&(ii[WS(is, 27)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2B = LD(&(ii[WS(is, 11)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2C = VSUB(T2A, T2B); Chris@42: T4P = VADD(T2A, T2B); Chris@42: } Chris@42: T2t = VSUB(T2p, T2s); Chris@42: T3L = VADD(T2p, T2s); Chris@42: T3O = VSUB(T2J, T2G); Chris@42: T2K = VADD(T2G, T2J); Chris@42: TR = VADD(TN, TQ); Chris@42: TY = VADD(TU, TX); Chris@42: T5F = VSUB(TR, TY); Chris@42: { Chris@42: V T4N, T4Q, T2y, T2D; Chris@42: T5G = VADD(T4S, T4T); Chris@42: T5H = VADD(T4O, T4P); Chris@42: T5I = VSUB(T5G, T5H); Chris@42: T4N = VSUB(TN, TQ); Chris@42: T4Q = VSUB(T4O, T4P); Chris@42: T4R = VSUB(T4N, T4Q); Chris@42: T5j = VADD(T4N, T4Q); Chris@42: T2y = VSUB(T2w, T2x); Chris@42: T2D = VADD(T2z, T2C); Chris@42: T2E = VMUL(LDK(KP707106781), VSUB(T2y, T2D)); Chris@42: T3P = VMUL(LDK(KP707106781), VADD(T2y, T2D)); Chris@42: { Chris@42: V T4U, T4V, T2L, T2M; Chris@42: T4U = VSUB(T4S, T4T); Chris@42: T4V = VSUB(TX, TU); Chris@42: T4W = VSUB(T4U, T4V); Chris@42: T5k = VADD(T4V, T4U); Chris@42: T2L = VSUB(T2z, T2C); Chris@42: T2M = VADD(T2x, T2w); Chris@42: T2N = VMUL(LDK(KP707106781), VSUB(T2L, T2M)); Chris@42: T3M = VMUL(LDK(KP707106781), VADD(T2M, T2L)); Chris@42: } Chris@42: } Chris@42: } Chris@42: { Chris@42: V Ty, T2f, T21, T4C, TB, T1Y, T2i, T4D, TF, T28, T2b, T4I, TI, T23, T26; Chris@42: V T4J; Chris@42: { Chris@42: V Tw, Tx, T1Z, T20; Chris@42: Tw = LD(&(ri[WS(is, 1)]), ivs, &(ri[WS(is, 1)])); Chris@42: Tx = LD(&(ri[WS(is, 17)]), ivs, &(ri[WS(is, 1)])); Chris@42: Ty = VADD(Tw, Tx); Chris@42: T2f = VSUB(Tw, Tx); Chris@42: T1Z = LD(&(ii[WS(is, 1)]), ivs, &(ii[WS(is, 1)])); Chris@42: T20 = LD(&(ii[WS(is, 17)]), ivs, &(ii[WS(is, 1)])); Chris@42: T21 = VSUB(T1Z, T20); Chris@42: T4C = VADD(T1Z, T20); Chris@42: } Chris@42: { Chris@42: V Tz, TA, T2g, T2h; Chris@42: Tz = LD(&(ri[WS(is, 9)]), ivs, &(ri[WS(is, 1)])); Chris@42: TA = LD(&(ri[WS(is, 25)]), ivs, &(ri[WS(is, 1)])); Chris@42: TB = VADD(Tz, TA); Chris@42: T1Y = VSUB(Tz, TA); Chris@42: T2g = LD(&(ii[WS(is, 9)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2h = LD(&(ii[WS(is, 25)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2i = VSUB(T2g, T2h); Chris@42: T4D = VADD(T2g, T2h); Chris@42: } Chris@42: { Chris@42: V TD, TE, T29, T2a; Chris@42: TD = LD(&(ri[WS(is, 5)]), ivs, &(ri[WS(is, 1)])); Chris@42: TE = LD(&(ri[WS(is, 21)]), ivs, &(ri[WS(is, 1)])); Chris@42: TF = VADD(TD, TE); Chris@42: T28 = VSUB(TD, TE); Chris@42: T29 = LD(&(ii[WS(is, 5)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2a = LD(&(ii[WS(is, 21)]), ivs, &(ii[WS(is, 1)])); Chris@42: T2b = VSUB(T29, T2a); Chris@42: T4I = VADD(T29, T2a); Chris@42: } Chris@42: { Chris@42: V TG, TH, T24, T25; Chris@42: TG = LD(&(ri[WS(is, 29)]), ivs, &(ri[WS(is, 1)])); Chris@42: TH = LD(&(ri[WS(is, 13)]), ivs, &(ri[WS(is, 1)])); Chris@42: TI = VADD(TG, TH); Chris@42: T23 = VSUB(TG, TH); Chris@42: T24 = LD(&(ii[WS(is, 29)]), ivs, &(ii[WS(is, 1)])); Chris@42: T25 = LD(&(ii[WS(is, 13)]), ivs, &(ii[WS(is, 1)])); Chris@42: T26 = VSUB(T24, T25); Chris@42: T4J = VADD(T24, T25); Chris@42: } Chris@42: T22 = VADD(T1Y, T21); Chris@42: T3E = VADD(T2f, T2i); Chris@42: T3H = VSUB(T21, T1Y); Chris@42: T2j = VSUB(T2f, T2i); Chris@42: TC = VADD(Ty, TB); Chris@42: TJ = VADD(TF, TI); Chris@42: T5A = VSUB(TC, TJ); Chris@42: { Chris@42: V T4E, T4F, T27, T2c; Chris@42: T5B = VADD(T4C, T4D); Chris@42: T5C = VADD(T4I, T4J); Chris@42: T5D = VSUB(T5B, T5C); Chris@42: T4E = VSUB(T4C, T4D); Chris@42: T4F = VSUB(TI, TF); Chris@42: T4G = VSUB(T4E, T4F); Chris@42: T5g = VADD(T4F, T4E); Chris@42: T27 = VSUB(T23, T26); Chris@42: T2c = VADD(T28, T2b); Chris@42: T2d = VMUL(LDK(KP707106781), VSUB(T27, T2c)); Chris@42: T3F = VMUL(LDK(KP707106781), VADD(T2c, T27)); Chris@42: { Chris@42: V T4H, T4K, T2k, T2l; Chris@42: T4H = VSUB(Ty, TB); Chris@42: T4K = VSUB(T4I, T4J); Chris@42: T4L = VSUB(T4H, T4K); Chris@42: T5h = VADD(T4H, T4K); Chris@42: T2k = VSUB(T2b, T28); Chris@42: T2l = VADD(T23, T26); Chris@42: T2m = VMUL(LDK(KP707106781), VSUB(T2k, T2l)); Chris@42: T3I = VMUL(LDK(KP707106781), VADD(T2k, T2l)); Chris@42: } Chris@42: } Chris@42: } Chris@42: { Chris@42: V T61, T62, T63, T64, T65, T66, T67, T68, T69, T6a, T6b, T6c, T6d, T6e, T6f; Chris@42: V T6g, T6h, T6i, T6j, T6k, T6l, T6m, T6n, T6o, T6p, T6q, T6r, T6s, T6t, T6u; Chris@42: V T6v, T6w; Chris@42: { Chris@42: V T4B, T57, T5a, T5c, T4Y, T56, T55, T5b; Chris@42: { Chris@42: V T4t, T4A, T58, T59; Chris@42: T4t = VSUB(T4r, T4s); Chris@42: T4A = VMUL(LDK(KP707106781), VSUB(T4w, T4z)); Chris@42: T4B = VADD(T4t, T4A); Chris@42: T57 = VSUB(T4t, T4A); Chris@42: T58 = VFNMS(LDK(KP923879532), T4L, VMUL(LDK(KP382683432), T4G)); Chris@42: T59 = VFMA(LDK(KP382683432), T4W, VMUL(LDK(KP923879532), T4R)); Chris@42: T5a = VSUB(T58, T59); Chris@42: T5c = VADD(T58, T59); Chris@42: } Chris@42: { Chris@42: V T4M, T4X, T51, T54; Chris@42: T4M = VFMA(LDK(KP923879532), T4G, VMUL(LDK(KP382683432), T4L)); Chris@42: T4X = VFNMS(LDK(KP923879532), T4W, VMUL(LDK(KP382683432), T4R)); Chris@42: T4Y = VADD(T4M, T4X); Chris@42: T56 = VSUB(T4X, T4M); Chris@42: T51 = VSUB(T4Z, T50); Chris@42: T54 = VMUL(LDK(KP707106781), VSUB(T52, T53)); Chris@42: T55 = VSUB(T51, T54); Chris@42: T5b = VADD(T51, T54); Chris@42: } Chris@42: T61 = VSUB(T4B, T4Y); Chris@42: STM4(&(ro[22]), T61, ovs, &(ro[0])); Chris@42: T62 = VSUB(T5b, T5c); Chris@42: STM4(&(io[22]), T62, ovs, &(io[0])); Chris@42: T63 = VADD(T4B, T4Y); Chris@42: STM4(&(ro[6]), T63, ovs, &(ro[0])); Chris@42: T64 = VADD(T5b, T5c); Chris@42: STM4(&(io[6]), T64, ovs, &(io[0])); Chris@42: T65 = VSUB(T55, T56); Chris@42: STM4(&(io[30]), T65, ovs, &(io[0])); Chris@42: T66 = VSUB(T57, T5a); Chris@42: STM4(&(ro[30]), T66, ovs, &(ro[0])); Chris@42: T67 = VADD(T55, T56); Chris@42: STM4(&(io[14]), T67, ovs, &(io[0])); Chris@42: T68 = VADD(T57, T5a); Chris@42: STM4(&(ro[14]), T68, ovs, &(ro[0])); Chris@42: } Chris@42: { Chris@42: V T5f, T5r, T5u, T5w, T5m, T5q, T5p, T5v; Chris@42: { Chris@42: V T5d, T5e, T5s, T5t; Chris@42: T5d = VADD(T4r, T4s); Chris@42: T5e = VMUL(LDK(KP707106781), VADD(T53, T52)); Chris@42: T5f = VADD(T5d, T5e); Chris@42: T5r = VSUB(T5d, T5e); Chris@42: T5s = VFNMS(LDK(KP382683432), T5h, VMUL(LDK(KP923879532), T5g)); Chris@42: T5t = VFMA(LDK(KP923879532), T5k, VMUL(LDK(KP382683432), T5j)); Chris@42: T5u = VSUB(T5s, T5t); Chris@42: T5w = VADD(T5s, T5t); Chris@42: } Chris@42: { Chris@42: V T5i, T5l, T5n, T5o; Chris@42: T5i = VFMA(LDK(KP382683432), T5g, VMUL(LDK(KP923879532), T5h)); Chris@42: T5l = VFNMS(LDK(KP382683432), T5k, VMUL(LDK(KP923879532), T5j)); Chris@42: T5m = VADD(T5i, T5l); Chris@42: T5q = VSUB(T5l, T5i); Chris@42: T5n = VADD(T50, T4Z); Chris@42: T5o = VMUL(LDK(KP707106781), VADD(T4w, T4z)); Chris@42: T5p = VSUB(T5n, T5o); Chris@42: T5v = VADD(T5n, T5o); Chris@42: } Chris@42: T69 = VSUB(T5f, T5m); Chris@42: STM4(&(ro[18]), T69, ovs, &(ro[0])); Chris@42: T6a = VSUB(T5v, T5w); Chris@42: STM4(&(io[18]), T6a, ovs, &(io[0])); Chris@42: T6b = VADD(T5f, T5m); Chris@42: STM4(&(ro[2]), T6b, ovs, &(ro[0])); Chris@42: T6c = VADD(T5v, T5w); Chris@42: STM4(&(io[2]), T6c, ovs, &(io[0])); Chris@42: T6d = VSUB(T5p, T5q); Chris@42: STM4(&(io[26]), T6d, ovs, &(io[0])); Chris@42: T6e = VSUB(T5r, T5u); Chris@42: STM4(&(ro[26]), T6e, ovs, &(ro[0])); Chris@42: T6f = VADD(T5p, T5q); Chris@42: STM4(&(io[10]), T6f, ovs, &(io[0])); Chris@42: T6g = VADD(T5r, T5u); Chris@42: STM4(&(ro[10]), T6g, ovs, &(ro[0])); Chris@42: } Chris@42: { Chris@42: V T5z, T5P, T5S, T5U, T5K, T5O, T5N, T5T; Chris@42: { Chris@42: V T5x, T5y, T5Q, T5R; Chris@42: T5x = VSUB(T7, Te); Chris@42: T5y = VSUB(T1n, T1u); Chris@42: T5z = VADD(T5x, T5y); Chris@42: T5P = VSUB(T5x, T5y); Chris@42: T5Q = VSUB(T5D, T5A); Chris@42: T5R = VADD(T5F, T5I); Chris@42: T5S = VMUL(LDK(KP707106781), VSUB(T5Q, T5R)); Chris@42: T5U = VMUL(LDK(KP707106781), VADD(T5Q, T5R)); Chris@42: } Chris@42: { Chris@42: V T5E, T5J, T5L, T5M; Chris@42: T5E = VADD(T5A, T5D); Chris@42: T5J = VSUB(T5F, T5I); Chris@42: T5K = VMUL(LDK(KP707106781), VADD(T5E, T5J)); Chris@42: T5O = VMUL(LDK(KP707106781), VSUB(T5J, T5E)); Chris@42: T5L = VSUB(T18, T1f); Chris@42: T5M = VSUB(Tt, Tm); Chris@42: T5N = VSUB(T5L, T5M); Chris@42: T5T = VADD(T5M, T5L); Chris@42: } Chris@42: T6h = VSUB(T5z, T5K); Chris@42: STM4(&(ro[20]), T6h, ovs, &(ro[0])); Chris@42: T6i = VSUB(T5T, T5U); Chris@42: STM4(&(io[20]), T6i, ovs, &(io[0])); Chris@42: T6j = VADD(T5z, T5K); Chris@42: STM4(&(ro[4]), T6j, ovs, &(ro[0])); Chris@42: T6k = VADD(T5T, T5U); Chris@42: STM4(&(io[4]), T6k, ovs, &(io[0])); Chris@42: T6l = VSUB(T5N, T5O); Chris@42: STM4(&(io[28]), T6l, ovs, &(io[0])); Chris@42: T6m = VSUB(T5P, T5S); Chris@42: STM4(&(ro[28]), T6m, ovs, &(ro[0])); Chris@42: T6n = VADD(T5N, T5O); Chris@42: STM4(&(io[12]), T6n, ovs, &(io[0])); Chris@42: T6o = VADD(T5P, T5S); Chris@42: STM4(&(ro[12]), T6o, ovs, &(ro[0])); Chris@42: } Chris@42: { Chris@42: V Tv, T5V, T5Y, T60, T10, T11, T1w, T5Z; Chris@42: { Chris@42: V Tf, Tu, T5W, T5X; Chris@42: Tf = VADD(T7, Te); Chris@42: Tu = VADD(Tm, Tt); Chris@42: Tv = VADD(Tf, Tu); Chris@42: T5V = VSUB(Tf, Tu); Chris@42: T5W = VADD(T5B, T5C); Chris@42: T5X = VADD(T5G, T5H); Chris@42: T5Y = VSUB(T5W, T5X); Chris@42: T60 = VADD(T5W, T5X); Chris@42: } Chris@42: { Chris@42: V TK, TZ, T1g, T1v; Chris@42: TK = VADD(TC, TJ); Chris@42: TZ = VADD(TR, TY); Chris@42: T10 = VADD(TK, TZ); Chris@42: T11 = VSUB(TZ, TK); Chris@42: T1g = VADD(T18, T1f); Chris@42: T1v = VADD(T1n, T1u); Chris@42: T1w = VSUB(T1g, T1v); Chris@42: T5Z = VADD(T1g, T1v); Chris@42: } Chris@42: T6p = VSUB(Tv, T10); Chris@42: STM4(&(ro[16]), T6p, ovs, &(ro[0])); Chris@42: T6q = VSUB(T5Z, T60); Chris@42: STM4(&(io[16]), T6q, ovs, &(io[0])); Chris@42: T6r = VADD(Tv, T10); Chris@42: STM4(&(ro[0]), T6r, ovs, &(ro[0])); Chris@42: T6s = VADD(T5Z, T60); Chris@42: STM4(&(io[0]), T6s, ovs, &(io[0])); Chris@42: T6t = VADD(T11, T1w); Chris@42: STM4(&(io[8]), T6t, ovs, &(io[0])); Chris@42: T6u = VADD(T5V, T5Y); Chris@42: STM4(&(ro[8]), T6u, ovs, &(ro[0])); Chris@42: T6v = VSUB(T1w, T11); Chris@42: STM4(&(io[24]), T6v, ovs, &(io[0])); Chris@42: T6w = VSUB(T5V, T5Y); Chris@42: STM4(&(ro[24]), T6w, ovs, &(ro[0])); Chris@42: } Chris@42: { Chris@42: V T6x, T6y, T6z, T6A, T6B, T6C, T6D, T6E; Chris@42: { Chris@42: V T1X, T33, T31, T37, T2o, T34, T2P, T35; Chris@42: { Chris@42: V T1H, T1W, T2X, T30; Chris@42: T1H = VSUB(T1z, T1G); Chris@42: T1W = VSUB(T1O, T1V); Chris@42: T1X = VADD(T1H, T1W); Chris@42: T33 = VSUB(T1H, T1W); Chris@42: T2X = VSUB(T2T, T2W); Chris@42: T30 = VSUB(T2Y, T2Z); Chris@42: T31 = VSUB(T2X, T30); Chris@42: T37 = VADD(T2X, T30); Chris@42: } Chris@42: { Chris@42: V T2e, T2n, T2F, T2O; Chris@42: T2e = VSUB(T22, T2d); Chris@42: T2n = VSUB(T2j, T2m); Chris@42: T2o = VFMA(LDK(KP980785280), T2e, VMUL(LDK(KP195090322), T2n)); Chris@42: T34 = VFNMS(LDK(KP980785280), T2n, VMUL(LDK(KP195090322), T2e)); Chris@42: T2F = VSUB(T2t, T2E); Chris@42: T2O = VSUB(T2K, T2N); Chris@42: T2P = VFNMS(LDK(KP980785280), T2O, VMUL(LDK(KP195090322), T2F)); Chris@42: T35 = VFMA(LDK(KP195090322), T2O, VMUL(LDK(KP980785280), T2F)); Chris@42: } Chris@42: { Chris@42: V T2Q, T38, T32, T36; Chris@42: T2Q = VADD(T2o, T2P); Chris@42: T6x = VSUB(T1X, T2Q); Chris@42: STM4(&(ro[23]), T6x, ovs, &(ro[1])); Chris@42: T6y = VADD(T1X, T2Q); Chris@42: STM4(&(ro[7]), T6y, ovs, &(ro[1])); Chris@42: T38 = VADD(T34, T35); Chris@42: T6z = VSUB(T37, T38); Chris@42: STM4(&(io[23]), T6z, ovs, &(io[1])); Chris@42: T6A = VADD(T37, T38); Chris@42: STM4(&(io[7]), T6A, ovs, &(io[1])); Chris@42: T32 = VSUB(T2P, T2o); Chris@42: T6B = VSUB(T31, T32); Chris@42: STM4(&(io[31]), T6B, ovs, &(io[1])); Chris@42: T6C = VADD(T31, T32); Chris@42: STM4(&(io[15]), T6C, ovs, &(io[1])); Chris@42: T36 = VSUB(T34, T35); Chris@42: T6D = VSUB(T33, T36); Chris@42: STM4(&(ro[31]), T6D, ovs, &(ro[1])); Chris@42: T6E = VADD(T33, T36); Chris@42: STM4(&(ro[15]), T6E, ovs, &(ro[1])); Chris@42: } Chris@42: } Chris@42: { Chris@42: V T3D, T41, T3Z, T45, T3K, T42, T3R, T43; Chris@42: { Chris@42: V T3v, T3C, T3V, T3Y; Chris@42: T3v = VSUB(T3t, T3u); Chris@42: T3C = VSUB(T3y, T3B); Chris@42: T3D = VADD(T3v, T3C); Chris@42: T41 = VSUB(T3v, T3C); Chris@42: T3V = VSUB(T3T, T3U); Chris@42: T3Y = VSUB(T3W, T3X); Chris@42: T3Z = VSUB(T3V, T3Y); Chris@42: T45 = VADD(T3V, T3Y); Chris@42: } Chris@42: { Chris@42: V T3G, T3J, T3N, T3Q; Chris@42: T3G = VSUB(T3E, T3F); Chris@42: T3J = VSUB(T3H, T3I); Chris@42: T3K = VFMA(LDK(KP555570233), T3G, VMUL(LDK(KP831469612), T3J)); Chris@42: T42 = VFNMS(LDK(KP831469612), T3G, VMUL(LDK(KP555570233), T3J)); Chris@42: T3N = VSUB(T3L, T3M); Chris@42: T3Q = VSUB(T3O, T3P); Chris@42: T3R = VFNMS(LDK(KP831469612), T3Q, VMUL(LDK(KP555570233), T3N)); Chris@42: T43 = VFMA(LDK(KP831469612), T3N, VMUL(LDK(KP555570233), T3Q)); Chris@42: } Chris@42: { Chris@42: V T3S, T6F, T6G, T46, T6H, T6I; Chris@42: T3S = VADD(T3K, T3R); Chris@42: T6F = VSUB(T3D, T3S); Chris@42: STM4(&(ro[21]), T6F, ovs, &(ro[1])); Chris@42: STN4(&(ro[20]), T6h, T6F, T61, T6x, ovs); Chris@42: T6G = VADD(T3D, T3S); Chris@42: STM4(&(ro[5]), T6G, ovs, &(ro[1])); Chris@42: STN4(&(ro[4]), T6j, T6G, T63, T6y, ovs); Chris@42: T46 = VADD(T42, T43); Chris@42: T6H = VSUB(T45, T46); Chris@42: STM4(&(io[21]), T6H, ovs, &(io[1])); Chris@42: STN4(&(io[20]), T6i, T6H, T62, T6z, ovs); Chris@42: T6I = VADD(T45, T46); Chris@42: STM4(&(io[5]), T6I, ovs, &(io[1])); Chris@42: STN4(&(io[4]), T6k, T6I, T64, T6A, ovs); Chris@42: } Chris@42: { Chris@42: V T40, T6J, T6K, T44, T6L, T6M; Chris@42: T40 = VSUB(T3R, T3K); Chris@42: T6J = VSUB(T3Z, T40); Chris@42: STM4(&(io[29]), T6J, ovs, &(io[1])); Chris@42: STN4(&(io[28]), T6l, T6J, T65, T6B, ovs); Chris@42: T6K = VADD(T3Z, T40); Chris@42: STM4(&(io[13]), T6K, ovs, &(io[1])); Chris@42: STN4(&(io[12]), T6n, T6K, T67, T6C, ovs); Chris@42: T44 = VSUB(T42, T43); Chris@42: T6L = VSUB(T41, T44); Chris@42: STM4(&(ro[29]), T6L, ovs, &(ro[1])); Chris@42: STN4(&(ro[28]), T6m, T6L, T66, T6D, ovs); Chris@42: T6M = VADD(T41, T44); Chris@42: STM4(&(ro[13]), T6M, ovs, &(ro[1])); Chris@42: STN4(&(ro[12]), T6o, T6M, T68, T6E, ovs); Chris@42: } Chris@42: } Chris@42: } Chris@42: { Chris@42: V T6N, T6O, T6P, T6Q, T6R, T6S, T6T, T6U; Chris@42: { Chris@42: V T49, T4l, T4j, T4p, T4c, T4m, T4f, T4n; Chris@42: { Chris@42: V T47, T48, T4h, T4i; Chris@42: T47 = VADD(T3t, T3u); Chris@42: T48 = VADD(T3X, T3W); Chris@42: T49 = VADD(T47, T48); Chris@42: T4l = VSUB(T47, T48); Chris@42: T4h = VADD(T3T, T3U); Chris@42: T4i = VADD(T3y, T3B); Chris@42: T4j = VSUB(T4h, T4i); Chris@42: T4p = VADD(T4h, T4i); Chris@42: } Chris@42: { Chris@42: V T4a, T4b, T4d, T4e; Chris@42: T4a = VADD(T3E, T3F); Chris@42: T4b = VADD(T3H, T3I); Chris@42: T4c = VFMA(LDK(KP980785280), T4a, VMUL(LDK(KP195090322), T4b)); Chris@42: T4m = VFNMS(LDK(KP195090322), T4a, VMUL(LDK(KP980785280), T4b)); Chris@42: T4d = VADD(T3L, T3M); Chris@42: T4e = VADD(T3O, T3P); Chris@42: T4f = VFNMS(LDK(KP195090322), T4e, VMUL(LDK(KP980785280), T4d)); Chris@42: T4n = VFMA(LDK(KP195090322), T4d, VMUL(LDK(KP980785280), T4e)); Chris@42: } Chris@42: { Chris@42: V T4g, T4q, T4k, T4o; Chris@42: T4g = VADD(T4c, T4f); Chris@42: T6N = VSUB(T49, T4g); Chris@42: STM4(&(ro[17]), T6N, ovs, &(ro[1])); Chris@42: T6O = VADD(T49, T4g); Chris@42: STM4(&(ro[1]), T6O, ovs, &(ro[1])); Chris@42: T4q = VADD(T4m, T4n); Chris@42: T6P = VSUB(T4p, T4q); Chris@42: STM4(&(io[17]), T6P, ovs, &(io[1])); Chris@42: T6Q = VADD(T4p, T4q); Chris@42: STM4(&(io[1]), T6Q, ovs, &(io[1])); Chris@42: T4k = VSUB(T4f, T4c); Chris@42: T6R = VSUB(T4j, T4k); Chris@42: STM4(&(io[25]), T6R, ovs, &(io[1])); Chris@42: T6S = VADD(T4j, T4k); Chris@42: STM4(&(io[9]), T6S, ovs, &(io[1])); Chris@42: T4o = VSUB(T4m, T4n); Chris@42: T6T = VSUB(T4l, T4o); Chris@42: STM4(&(ro[25]), T6T, ovs, &(ro[1])); Chris@42: T6U = VADD(T4l, T4o); Chris@42: STM4(&(ro[9]), T6U, ovs, &(ro[1])); Chris@42: } Chris@42: } Chris@42: { Chris@42: V T3b, T3n, T3l, T3r, T3e, T3o, T3h, T3p; Chris@42: { Chris@42: V T39, T3a, T3j, T3k; Chris@42: T39 = VADD(T1z, T1G); Chris@42: T3a = VADD(T2Z, T2Y); Chris@42: T3b = VADD(T39, T3a); Chris@42: T3n = VSUB(T39, T3a); Chris@42: T3j = VADD(T2T, T2W); Chris@42: T3k = VADD(T1O, T1V); Chris@42: T3l = VSUB(T3j, T3k); Chris@42: T3r = VADD(T3j, T3k); Chris@42: } Chris@42: { Chris@42: V T3c, T3d, T3f, T3g; Chris@42: T3c = VADD(T22, T2d); Chris@42: T3d = VADD(T2j, T2m); Chris@42: T3e = VFMA(LDK(KP555570233), T3c, VMUL(LDK(KP831469612), T3d)); Chris@42: T3o = VFNMS(LDK(KP555570233), T3d, VMUL(LDK(KP831469612), T3c)); Chris@42: T3f = VADD(T2t, T2E); Chris@42: T3g = VADD(T2K, T2N); Chris@42: T3h = VFNMS(LDK(KP555570233), T3g, VMUL(LDK(KP831469612), T3f)); Chris@42: T3p = VFMA(LDK(KP831469612), T3g, VMUL(LDK(KP555570233), T3f)); Chris@42: } Chris@42: { Chris@42: V T3i, T6V, T6W, T3s, T6X, T6Y; Chris@42: T3i = VADD(T3e, T3h); Chris@42: T6V = VSUB(T3b, T3i); Chris@42: STM4(&(ro[19]), T6V, ovs, &(ro[1])); Chris@42: STN4(&(ro[16]), T6p, T6N, T69, T6V, ovs); Chris@42: T6W = VADD(T3b, T3i); Chris@42: STM4(&(ro[3]), T6W, ovs, &(ro[1])); Chris@42: STN4(&(ro[0]), T6r, T6O, T6b, T6W, ovs); Chris@42: T3s = VADD(T3o, T3p); Chris@42: T6X = VSUB(T3r, T3s); Chris@42: STM4(&(io[19]), T6X, ovs, &(io[1])); Chris@42: STN4(&(io[16]), T6q, T6P, T6a, T6X, ovs); Chris@42: T6Y = VADD(T3r, T3s); Chris@42: STM4(&(io[3]), T6Y, ovs, &(io[1])); Chris@42: STN4(&(io[0]), T6s, T6Q, T6c, T6Y, ovs); Chris@42: } Chris@42: { Chris@42: V T3m, T6Z, T70, T3q, T71, T72; Chris@42: T3m = VSUB(T3h, T3e); Chris@42: T6Z = VSUB(T3l, T3m); Chris@42: STM4(&(io[27]), T6Z, ovs, &(io[1])); Chris@42: STN4(&(io[24]), T6v, T6R, T6d, T6Z, ovs); Chris@42: T70 = VADD(T3l, T3m); Chris@42: STM4(&(io[11]), T70, ovs, &(io[1])); Chris@42: STN4(&(io[8]), T6t, T6S, T6f, T70, ovs); Chris@42: T3q = VSUB(T3o, T3p); Chris@42: T71 = VSUB(T3n, T3q); Chris@42: STM4(&(ro[27]), T71, ovs, &(ro[1])); Chris@42: STN4(&(ro[24]), T6w, T6T, T6e, T71, ovs); Chris@42: T72 = VADD(T3n, T3q); Chris@42: STM4(&(ro[11]), T72, ovs, &(ro[1])); Chris@42: STN4(&(ro[8]), T6u, T6U, T6g, T72, ovs); Chris@42: } Chris@42: } Chris@42: } Chris@42: } Chris@42: } Chris@42: } Chris@42: VLEAVE(); Chris@42: } Chris@42: Chris@42: static const kdft_desc desc = { 32, XSIMD_STRING("n2sv_32"), {340, 52, 32, 0}, &GENUS, 0, 1, 0, 0 }; Chris@42: Chris@42: void XSIMD(codelet_n2sv_32) (planner *p) { Chris@42: X(kdft_register) (p, n2sv_32, &desc); Chris@42: } Chris@42: Chris@42: #endif /* HAVE_FMA */