Mercurial > hg > batch-feature-extraction-tool
diff Lib/fftw-3.2.1/cell/spu/spu_t1fv_15.spuc @ 0:25bf17994ef1
First commit. VS2013, Codeblocks and Mac OSX configuration
author | Geogaddi\David <d.m.ronan@qmul.ac.uk> |
---|---|
date | Thu, 09 Jul 2015 01:12:16 +0100 |
parents | |
children |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/fftw-3.2.1/cell/spu/spu_t1fv_15.spuc Thu Jul 09 01:12:16 2015 +0100 @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2003, 2007-8 Matteo Frigo + * Copyright (c) 2003, 2007-8 Massachusetts Institute of Technology + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +/* Generated by: ../../genfft/gen_twiddle_c -standalone -fma -reorder-insns -simd -compact -variables 100000 -include fftw-spu.h -trivial-stores -n 15 -name X(spu_t1fv_15) */ + +/* + * This function contains 92 FP additions, 77 FP multiplications, + * (or, 50 additions, 35 multiplications, 42 fused multiply/add), + * 117 stack variables, 8 constants, and 30 memory accesses + */ +#include "fftw-spu.h" + +void X(spu_t1fv_15) (R *ri, R *ii, const R *W, stride rs, INT mb, INT me, INT ms) { + DVK(KP823639103, +0.823639103546331925877420039278190003029660514); + DVK(KP910592997, +0.910592997310029334643087372129977886038870291); + DVK(KP559016994, +0.559016994374947424102293417182819058860154590); + DVK(KP618033988, +0.618033988749894848204586834365638117720309180); + DVK(KP951056516, +0.951056516295153572116439333379382143405698634); + DVK(KP866025403, +0.866025403784438646763723170752936183471402627); + DVK(KP250000000, +0.250000000000000000000000000000000000000000000); + DVK(KP500000000, +0.500000000000000000000000000000000000000000000); + INT m; + R *x; + x = ri; + for (m = mb, W = W + (mb * ((TWVL / VL) * 28)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 28), MAKE_VOLATILE_STRIDE(rs)) { + V T1g, T7, TU, T17, T1a, To, TL, TK, TF, T1j, T1l, T1d, T1e, T11, T13; + V T1, T5, T3, T4, T2, T6, T9, Tq, Ty, Th, Te, T15, Tv, T18, TD; + V T19, Tm, T16, T8, Tp, Tx, Tg, Tb, Td, Ta, Tc, Ts, Tu, Tr, Tt; + V TA, TC, Tz, TB, Tj, Tl, Ti, Tk, T1h, T1i, TV, TW, Tf, Tn, TY; + V TZ, Tw, TE, TX, T10, T12, T1k, T1J, T1I, T1G, T1H, TQ, TM, TT, TJ; + V TP, TI, TH, TG, TR, TS, TO, TN, T1r, T1n, T1D, T1z, T1q, T1c, T1C; + V T1w, T1f, T1x, T1y, T1m, T1v, T1b, T1u, T14, T1p, T1F, T1o, T1E, T1t, T1B; + V T1s, T1A; + T1 = LD(&(x[0]), ms, &(x[0])); + T4 = LD(&(x[WS(rs, 10)]), ms, &(x[0])); + T5 = BYTWJ(&(W[TWVL * 18]), T4); + T2 = LD(&(x[WS(rs, 5)]), ms, &(x[WS(rs, 1)])); + T3 = BYTWJ(&(W[TWVL * 8]), T2); + T1g = VSUB(T5, T3); + T6 = VADD(T3, T5); + T7 = VADD(T1, T6); + TU = VFNMS(LDK(KP500000000), T6, T1); + T8 = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)])); + T9 = BYTWJ(&(W[TWVL * 4]), T8); + Tp = LD(&(x[WS(rs, 6)]), ms, &(x[0])); + Tq = BYTWJ(&(W[TWVL * 10]), Tp); + Tx = LD(&(x[WS(rs, 9)]), ms, &(x[WS(rs, 1)])); + Ty = BYTWJ(&(W[TWVL * 16]), Tx); + Tg = LD(&(x[WS(rs, 12)]), ms, &(x[0])); + Th = BYTWJ(&(W[TWVL * 22]), Tg); + Ta = LD(&(x[WS(rs, 8)]), ms, &(x[0])); + Tb = BYTWJ(&(W[TWVL * 14]), Ta); + Tc = LD(&(x[WS(rs, 13)]), ms, &(x[WS(rs, 1)])); + Td = BYTWJ(&(W[TWVL * 24]), Tc); + Te = VADD(Tb, Td); + T15 = VSUB(Td, Tb); + Tr = LD(&(x[WS(rs, 11)]), ms, &(x[WS(rs, 1)])); + Ts = BYTWJ(&(W[TWVL * 20]), Tr); + Tt = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)])); + Tu = BYTWJ(&(W[0]), Tt); + Tv = VADD(Ts, Tu); + T18 = VSUB(Tu, Ts); + Tz = LD(&(x[WS(rs, 14)]), ms, &(x[0])); + TA = BYTWJ(&(W[TWVL * 26]), Tz); + TB = LD(&(x[WS(rs, 4)]), ms, &(x[0])); + TC = BYTWJ(&(W[TWVL * 6]), TB); + TD = VADD(TA, TC); + T19 = VSUB(TC, TA); + Ti = LD(&(x[WS(rs, 2)]), ms, &(x[0])); + Tj = BYTWJ(&(W[TWVL * 2]), Ti); + Tk = LD(&(x[WS(rs, 7)]), ms, &(x[WS(rs, 1)])); + Tl = BYTWJ(&(W[TWVL * 12]), Tk); + Tm = VADD(Tj, Tl); + T16 = VSUB(Tl, Tj); + T17 = VSUB(T15, T16); + T1h = VADD(T15, T16); + T1i = VADD(T18, T19); + T1a = VSUB(T18, T19); + Tf = VADD(T9, Te); + TV = VFNMS(LDK(KP500000000), Te, T9); + TW = VFNMS(LDK(KP500000000), Tm, Th); + Tn = VADD(Th, Tm); + To = VADD(Tf, Tn); + TL = VSUB(Tf, Tn); + TY = VFNMS(LDK(KP500000000), Tv, Tq); + Tw = VADD(Tq, Tv); + TE = VADD(Ty, TD); + TZ = VFNMS(LDK(KP500000000), TD, Ty); + TK = VSUB(Tw, TE); + TF = VADD(Tw, TE); + T1j = VADD(T1h, T1i); + T1l = VSUB(T1h, T1i); + TX = VADD(TV, TW); + T1d = VSUB(TV, TW); + T1e = VSUB(TY, TZ); + T10 = VADD(TY, TZ); + T11 = VADD(TX, T10); + T13 = VSUB(TX, T10); + T12 = VFNMS(LDK(KP250000000), T11, TU); + T1G = VADD(TU, T11); + T1H = VMUL(LDK(KP866025403), VADD(T1g, T1j)); + T1k = VFNMS(LDK(KP250000000), T1j, T1g); + T1J = VFMAI(T1H, T1G); + T1I = VFNMSI(T1H, T1G); + ST(&(x[WS(rs, 5)]), T1I, ms, &(x[WS(rs, 1)])); + ST(&(x[WS(rs, 10)]), T1J, ms, &(x[0])); + TQ = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), TK, TL)); + TM = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), TL, TK)); + TG = VADD(To, TF); + TI = VSUB(To, TF); + TT = VADD(T7, TG); + TH = VFNMS(LDK(KP250000000), TG, T7); + TJ = VFNMS(LDK(KP559016994), TI, TH); + TP = VFMA(LDK(KP559016994), TI, TH); + ST(&(x[0]), TT, ms, &(x[0])); + TS = VFMAI(TQ, TP); + TR = VFNMSI(TQ, TP); + ST(&(x[WS(rs, 9)]), TS, ms, &(x[WS(rs, 1)])); + TN = VFNMSI(TM, TJ); + TO = VFMAI(TM, TJ); + ST(&(x[WS(rs, 3)]), TN, ms, &(x[WS(rs, 1)])); + ST(&(x[WS(rs, 12)]), TO, ms, &(x[0])); + ST(&(x[WS(rs, 6)]), TR, ms, &(x[0])); + T1f = VFMA(LDK(KP618033988), T1e, T1d); + T1x = VFNMS(LDK(KP618033988), T1d, T1e); + T1y = VFNMS(LDK(KP559016994), T1l, T1k); + T1m = VFMA(LDK(KP559016994), T1l, T1k); + T1r = VMUL(LDK(KP951056516), VFMA(LDK(KP910592997), T1m, T1f)); + T1n = VMUL(LDK(KP951056516), VFNMS(LDK(KP910592997), T1m, T1f)); + T1D = VMUL(LDK(KP951056516), VFMA(LDK(KP910592997), T1y, T1x)); + T1z = VMUL(LDK(KP951056516), VFNMS(LDK(KP910592997), T1y, T1x)); + T1v = VFNMS(LDK(KP618033988), T17, T1a); + T1b = VFMA(LDK(KP618033988), T1a, T17); + T1u = VFNMS(LDK(KP559016994), T13, T12); + T14 = VFMA(LDK(KP559016994), T13, T12); + T1q = VFNMS(LDK(KP823639103), T1b, T14); + T1c = VFMA(LDK(KP823639103), T1b, T14); + T1C = VFNMS(LDK(KP823639103), T1v, T1u); + T1w = VFMA(LDK(KP823639103), T1v, T1u); + T1p = VFMAI(T1n, T1c); + T1o = VFNMSI(T1n, T1c); + ST(&(x[WS(rs, 1)]), T1o, ms, &(x[WS(rs, 1)])); + T1F = VFMAI(T1D, T1C); + T1E = VFNMSI(T1D, T1C); + ST(&(x[WS(rs, 8)]), T1E, ms, &(x[0])); + ST(&(x[WS(rs, 7)]), T1F, ms, &(x[WS(rs, 1)])); + ST(&(x[WS(rs, 14)]), T1p, ms, &(x[0])); + T1t = VFMAI(T1r, T1q); + T1s = VFNMSI(T1r, T1q); + ST(&(x[WS(rs, 11)]), T1s, ms, &(x[WS(rs, 1)])); + T1B = VFMAI(T1z, T1w); + T1A = VFNMSI(T1z, T1w); + ST(&(x[WS(rs, 13)]), T1A, ms, &(x[WS(rs, 1)])); + ST(&(x[WS(rs, 2)]), T1B, ms, &(x[0])); + ST(&(x[WS(rs, 4)]), T1t, ms, &(x[0])); + } +}