Mercurial > hg > sv-dependency-builds
diff src/fftw-3.3.8/rdft/simd/common/hc2cbdftv_12.c @ 82:d0c2a83c1364
Add FFTW 3.3.8 source, and a Linux build
author | Chris Cannam |
---|---|
date | Tue, 19 Nov 2019 14:52:55 +0000 |
parents | |
children |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/fftw-3.3.8/rdft/simd/common/hc2cbdftv_12.c Tue Nov 19 14:52:55 2019 +0000 @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2003, 2007-14 Matteo Frigo + * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +/* This file was automatically generated --- DO NOT EDIT */ +/* Generated on Thu May 24 08:08:11 EDT 2018 */ + +#include "rdft/codelet-rdft.h" + +#if defined(ARCH_PREFERS_FMA) || defined(ISA_EXTENSION_PREFERS_FMA) + +/* Generated by: ../../../genfft/gen_hc2cdft_c.native -fma -simd -compact -variables 4 -pipeline-latency 8 -trivial-stores -variables 32 -no-generate-bytw -n 12 -dif -sign 1 -name hc2cbdftv_12 -include rdft/simd/hc2cbv.h */ + +/* + * This function contains 71 FP additions, 51 FP multiplications, + * (or, 45 additions, 25 multiplications, 26 fused multiply/add), + * 56 stack variables, 2 constants, and 24 memory accesses + */ +#include "rdft/simd/hc2cbv.h" + +static void hc2cbdftv_12(R *Rp, R *Ip, R *Rm, R *Im, const R *W, stride rs, INT mb, INT me, INT ms) +{ + DVK(KP866025403, +0.866025403784438646763723170752936183471402627); + DVK(KP500000000, +0.500000000000000000000000000000000000000000000); + { + INT m; + for (m = mb, W = W + ((mb - 1) * ((TWVL / VL) * 22)); m < me; m = m + VL, Rp = Rp + (VL * ms), Ip = Ip + (VL * ms), Rm = Rm - (VL * ms), Im = Im - (VL * ms), W = W + (TWVL * 22), MAKE_VOLATILE_STRIDE(48, rs)) { + V Tk, Tw, Td, TA, T11, T1f, TF, TP, Tt, TB, TY, T1e; + { + V T2, Tm, T7, T8, Tp, Tq, T5, Tu, Tg, Tr, Tj, Tn, Tb, Tv, T3; + V T4, Te, Tf, Th, Ti, T9, Ta, T6, Tc, TZ, T10, TD, TE, To, Ts; + V TW, TX; + T2 = LD(&(Rp[0]), ms, &(Rp[0])); + Tm = LD(&(Rp[WS(rs, 3)]), ms, &(Rp[WS(rs, 1)])); + T7 = LD(&(Rm[WS(rs, 5)]), -ms, &(Rm[WS(rs, 1)])); + T8 = VCONJ(T7); + Tp = LD(&(Rm[WS(rs, 2)]), -ms, &(Rm[0])); + Tq = VCONJ(Tp); + T3 = LD(&(Rp[WS(rs, 4)]), ms, &(Rp[0])); + T4 = LD(&(Rm[WS(rs, 3)]), -ms, &(Rm[WS(rs, 1)])); + T5 = VFMACONJ(T4, T3); + Tu = VFNMSCONJ(T4, T3); + Te = LD(&(Rp[WS(rs, 1)]), ms, &(Rp[WS(rs, 1)])); + Tf = LD(&(Rp[WS(rs, 5)]), ms, &(Rp[WS(rs, 1)])); + Tg = VSUB(Te, Tf); + Tr = VADD(Te, Tf); + Th = LD(&(Rm[0]), -ms, &(Rm[0])); + Ti = LD(&(Rm[WS(rs, 4)]), -ms, &(Rm[0])); + Tj = VSUB(Th, Ti); + Tn = VADD(Ti, Th); + T9 = LD(&(Rp[WS(rs, 2)]), ms, &(Rp[0])); + Ta = LD(&(Rm[WS(rs, 1)]), -ms, &(Rm[WS(rs, 1)])); + Tb = VFMACONJ(Ta, T9); + Tv = VFMSCONJ(Ta, T9); + Tk = VFMACONJ(Tj, Tg); + Tw = VSUB(Tu, Tv); + T6 = VFNMS(LDK(KP500000000), T5, T2); + Tc = VFNMS(LDK(KP500000000), Tb, T8); + Td = VSUB(T6, Tc); + TA = VADD(T6, Tc); + TZ = VFMACONJ(Tn, Tm); + T10 = VFMACONJ(Tp, Tr); + T11 = VSUB(TZ, T10); + T1f = VADD(TZ, T10); + TD = VFNMSCONJ(Tj, Tg); + TE = VADD(Tu, Tv); + TF = VMUL(LDK(KP866025403), VSUB(TD, TE)); + TP = VMUL(LDK(KP866025403), VADD(TE, TD)); + To = VFNMS(LDK(KP500000000), VCONJ(Tn), Tm); + Ts = VFNMS(LDK(KP500000000), Tr, Tq); + Tt = VSUB(To, Ts); + TB = VADD(To, Ts); + TW = VADD(T2, T5); + TX = VFMACONJ(T7, Tb); + TY = VSUB(TW, TX); + T1e = VADD(TW, TX); + } + { + V T1l, T12, TG, TU, Ty, T1k, TV, TC, Tz, TT, Tl, Tx, T1, T1j, TH; + V TI, T1n, T1m, T14, T13, T18, T1g, TQ, T16, TM, T1c, T17, T1d, TO, TN; + V T15, TK, TL, TJ, T1b, TR, TS, T1i, T1h, T1a, T19; + T1l = VADD(T1e, T1f); + TV = LDW(&(W[TWVL * 4])); + T12 = VZMULI(TV, VFNMSI(T11, TY)); + TC = VSUB(TA, TB); + Tz = LDW(&(W[TWVL * 18])); + TG = VZMUL(Tz, VFNMSI(TF, TC)); + TT = LDW(&(W[TWVL * 2])); + TU = VZMUL(TT, VFMAI(TF, TC)); + Tl = VFMA(LDK(KP866025403), Tk, Td); + Tx = VFMA(LDK(KP866025403), Tw, Tt); + T1 = LDW(&(W[TWVL * 20])); + Ty = VZMULI(T1, VFNMSI(Tx, Tl)); + T1j = LDW(&(W[0])); + T1k = VZMULI(T1j, VFMAI(Tx, Tl)); + TH = VADD(Ty, TG); + ST(&(Rp[WS(rs, 5)]), TH, ms, &(Rp[WS(rs, 1)])); + TI = VCONJ(VSUB(TG, Ty)); + ST(&(Rm[WS(rs, 5)]), TI, -ms, &(Rm[WS(rs, 1)])); + T1n = VCONJ(VSUB(T1l, T1k)); + ST(&(Rm[0]), T1n, -ms, &(Rm[0])); + T1m = VADD(T1k, T1l); + ST(&(Rp[0]), T1m, ms, &(Rp[0])); + T14 = VADD(TU, T12); + ST(&(Rp[WS(rs, 1)]), T14, ms, &(Rp[WS(rs, 1)])); + T13 = VCONJ(VSUB(TU, T12)); + ST(&(Rm[WS(rs, 1)]), T13, -ms, &(Rm[WS(rs, 1)])); + T17 = LDW(&(W[TWVL * 16])); + T18 = VZMULI(T17, VFMAI(T11, TY)); + T1d = LDW(&(W[TWVL * 10])); + T1g = VZMUL(T1d, VSUB(T1e, T1f)); + TO = VADD(TA, TB); + TN = LDW(&(W[TWVL * 6])); + TQ = VZMUL(TN, VFMAI(TP, TO)); + T15 = LDW(&(W[TWVL * 14])); + T16 = VZMUL(T15, VFNMSI(TP, TO)); + TK = VFNMS(LDK(KP866025403), Tk, Td); + TL = VFNMS(LDK(KP866025403), Tw, Tt); + TJ = LDW(&(W[TWVL * 8])); + TM = VZMULI(TJ, VFMAI(TL, TK)); + T1b = LDW(&(W[TWVL * 12])); + T1c = VZMULI(T1b, VFNMSI(TL, TK)); + TR = VADD(TM, TQ); + ST(&(Rp[WS(rs, 2)]), TR, ms, &(Rp[0])); + TS = VCONJ(VSUB(TQ, TM)); + ST(&(Rm[WS(rs, 2)]), TS, -ms, &(Rm[0])); + T1i = VCONJ(VSUB(T1g, T1c)); + ST(&(Rm[WS(rs, 3)]), T1i, -ms, &(Rm[WS(rs, 1)])); + T1h = VADD(T1c, T1g); + ST(&(Rp[WS(rs, 3)]), T1h, ms, &(Rp[WS(rs, 1)])); + T1a = VADD(T16, T18); + ST(&(Rp[WS(rs, 4)]), T1a, ms, &(Rp[0])); + T19 = VCONJ(VSUB(T16, T18)); + ST(&(Rm[WS(rs, 4)]), T19, -ms, &(Rm[0])); + } + } + } + VLEAVE(); +} + +static const tw_instr twinstr[] = { + VTW(1, 1), + VTW(1, 2), + VTW(1, 3), + VTW(1, 4), + VTW(1, 5), + VTW(1, 6), + VTW(1, 7), + VTW(1, 8), + VTW(1, 9), + VTW(1, 10), + VTW(1, 11), + {TW_NEXT, VL, 0} +}; + +static const hc2c_desc desc = { 12, XSIMD_STRING("hc2cbdftv_12"), twinstr, &GENUS, {45, 25, 26, 0} }; + +void XSIMD(codelet_hc2cbdftv_12) (planner *p) { + X(khc2c_register) (p, hc2cbdftv_12, &desc, HC2C_VIA_DFT); +} +#else + +/* Generated by: ../../../genfft/gen_hc2cdft_c.native -simd -compact -variables 4 -pipeline-latency 8 -trivial-stores -variables 32 -no-generate-bytw -n 12 -dif -sign 1 -name hc2cbdftv_12 -include rdft/simd/hc2cbv.h */ + +/* + * This function contains 71 FP additions, 30 FP multiplications, + * (or, 67 additions, 26 multiplications, 4 fused multiply/add), + * 90 stack variables, 2 constants, and 24 memory accesses + */ +#include "rdft/simd/hc2cbv.h" + +static void hc2cbdftv_12(R *Rp, R *Ip, R *Rm, R *Im, const R *W, stride rs, INT mb, INT me, INT ms) +{ + DVK(KP866025403, +0.866025403784438646763723170752936183471402627); + DVK(KP500000000, +0.500000000000000000000000000000000000000000000); + { + INT m; + for (m = mb, W = W + ((mb - 1) * ((TWVL / VL) * 22)); m < me; m = m + VL, Rp = Rp + (VL * ms), Ip = Ip + (VL * ms), Rm = Rm - (VL * ms), Im = Im - (VL * ms), W = W + (TWVL * 22), MAKE_VOLATILE_STRIDE(48, rs)) { + V TY, TZ, Tf, TC, Tq, TG, Tm, TF, Ty, TD, T13, T1h, T2, T9, T3; + V T5, T6, Tc, Tb, Td, T8, T4, Ta, T7, Te, To, Tp, Tr, Tv, Ti; + V Ts, Tl, Tw, Tu, Tg, Th, Tj, Tk, Tt, Tx, T11, T12; + T2 = LD(&(Rp[0]), ms, &(Rp[0])); + T8 = LD(&(Rm[WS(rs, 5)]), -ms, &(Rm[WS(rs, 1)])); + T9 = VCONJ(T8); + T3 = LD(&(Rp[WS(rs, 4)]), ms, &(Rp[0])); + T4 = LD(&(Rm[WS(rs, 3)]), -ms, &(Rm[WS(rs, 1)])); + T5 = VCONJ(T4); + T6 = VADD(T3, T5); + Tc = LD(&(Rp[WS(rs, 2)]), ms, &(Rp[0])); + Ta = LD(&(Rm[WS(rs, 1)]), -ms, &(Rm[WS(rs, 1)])); + Tb = VCONJ(Ta); + Td = VADD(Tb, Tc); + TY = VADD(T2, T6); + TZ = VADD(T9, Td); + T7 = VFNMS(LDK(KP500000000), T6, T2); + Te = VFNMS(LDK(KP500000000), Td, T9); + Tf = VSUB(T7, Te); + TC = VADD(T7, Te); + To = VSUB(T3, T5); + Tp = VSUB(Tb, Tc); + Tq = VMUL(LDK(KP866025403), VSUB(To, Tp)); + TG = VADD(To, Tp); + Tr = LD(&(Rp[WS(rs, 3)]), ms, &(Rp[WS(rs, 1)])); + Tu = LD(&(Rm[WS(rs, 2)]), -ms, &(Rm[0])); + Tv = VCONJ(Tu); + Tg = LD(&(Rm[WS(rs, 4)]), -ms, &(Rm[0])); + Th = LD(&(Rm[0]), -ms, &(Rm[0])); + Ti = VCONJ(VSUB(Tg, Th)); + Ts = VCONJ(VADD(Tg, Th)); + Tj = LD(&(Rp[WS(rs, 1)]), ms, &(Rp[WS(rs, 1)])); + Tk = LD(&(Rp[WS(rs, 5)]), ms, &(Rp[WS(rs, 1)])); + Tl = VSUB(Tj, Tk); + Tw = VADD(Tj, Tk); + Tm = VMUL(LDK(KP866025403), VSUB(Ti, Tl)); + TF = VADD(Ti, Tl); + Tt = VFNMS(LDK(KP500000000), Ts, Tr); + Tx = VFNMS(LDK(KP500000000), Tw, Tv); + Ty = VSUB(Tt, Tx); + TD = VADD(Tt, Tx); + T11 = VADD(Tr, Ts); + T12 = VADD(Tv, Tw); + T13 = VBYI(VSUB(T11, T12)); + T1h = VADD(T11, T12); + { + V T1n, T1i, T14, T1a, TA, T1m, TS, T18, TO, T1e, TI, TW, T1g, T1f, T10; + V TX, T19, Tn, Tz, T1, T1l, TQ, TR, TP, T17, TM, TN, TL, T1d, TE; + V TH, TB, TV, TJ, T1p, T1k, TT, T1o, TK, TU, T1j, T1b, T16, T1c, T15; + T1g = VADD(TY, TZ); + T1n = VADD(T1g, T1h); + T1f = LDW(&(W[TWVL * 10])); + T1i = VZMUL(T1f, VSUB(T1g, T1h)); + T10 = VSUB(TY, TZ); + TX = LDW(&(W[TWVL * 4])); + T14 = VZMULI(TX, VSUB(T10, T13)); + T19 = LDW(&(W[TWVL * 16])); + T1a = VZMULI(T19, VADD(T10, T13)); + Tn = VSUB(Tf, Tm); + Tz = VBYI(VADD(Tq, Ty)); + T1 = LDW(&(W[TWVL * 20])); + TA = VZMULI(T1, VSUB(Tn, Tz)); + T1l = LDW(&(W[0])); + T1m = VZMULI(T1l, VADD(Tn, Tz)); + TQ = VBYI(VMUL(LDK(KP866025403), VADD(TG, TF))); + TR = VADD(TC, TD); + TP = LDW(&(W[TWVL * 6])); + TS = VZMUL(TP, VADD(TQ, TR)); + T17 = LDW(&(W[TWVL * 14])); + T18 = VZMUL(T17, VSUB(TR, TQ)); + TM = VADD(Tf, Tm); + TN = VBYI(VSUB(Ty, Tq)); + TL = LDW(&(W[TWVL * 8])); + TO = VZMULI(TL, VADD(TM, TN)); + T1d = LDW(&(W[TWVL * 12])); + T1e = VZMULI(T1d, VSUB(TM, TN)); + TE = VSUB(TC, TD); + TH = VBYI(VMUL(LDK(KP866025403), VSUB(TF, TG))); + TB = LDW(&(W[TWVL * 18])); + TI = VZMUL(TB, VSUB(TE, TH)); + TV = LDW(&(W[TWVL * 2])); + TW = VZMUL(TV, VADD(TH, TE)); + TJ = VADD(TA, TI); + ST(&(Rp[WS(rs, 5)]), TJ, ms, &(Rp[WS(rs, 1)])); + T1p = VCONJ(VSUB(T1n, T1m)); + ST(&(Rm[0]), T1p, -ms, &(Rm[0])); + T1k = VCONJ(VSUB(T1i, T1e)); + ST(&(Rm[WS(rs, 3)]), T1k, -ms, &(Rm[WS(rs, 1)])); + TT = VADD(TO, TS); + ST(&(Rp[WS(rs, 2)]), TT, ms, &(Rp[0])); + T1o = VADD(T1m, T1n); + ST(&(Rp[0]), T1o, ms, &(Rp[0])); + TK = VCONJ(VSUB(TI, TA)); + ST(&(Rm[WS(rs, 5)]), TK, -ms, &(Rm[WS(rs, 1)])); + TU = VCONJ(VSUB(TS, TO)); + ST(&(Rm[WS(rs, 2)]), TU, -ms, &(Rm[0])); + T1j = VADD(T1e, T1i); + ST(&(Rp[WS(rs, 3)]), T1j, ms, &(Rp[WS(rs, 1)])); + T1b = VCONJ(VSUB(T18, T1a)); + ST(&(Rm[WS(rs, 4)]), T1b, -ms, &(Rm[0])); + T16 = VADD(TW, T14); + ST(&(Rp[WS(rs, 1)]), T16, ms, &(Rp[WS(rs, 1)])); + T1c = VADD(T18, T1a); + ST(&(Rp[WS(rs, 4)]), T1c, ms, &(Rp[0])); + T15 = VCONJ(VSUB(TW, T14)); + ST(&(Rm[WS(rs, 1)]), T15, -ms, &(Rm[WS(rs, 1)])); + } + } + } + VLEAVE(); +} + +static const tw_instr twinstr[] = { + VTW(1, 1), + VTW(1, 2), + VTW(1, 3), + VTW(1, 4), + VTW(1, 5), + VTW(1, 6), + VTW(1, 7), + VTW(1, 8), + VTW(1, 9), + VTW(1, 10), + VTW(1, 11), + {TW_NEXT, VL, 0} +}; + +static const hc2c_desc desc = { 12, XSIMD_STRING("hc2cbdftv_12"), twinstr, &GENUS, {67, 26, 4, 0} }; + +void XSIMD(codelet_hc2cbdftv_12) (planner *p) { + X(khc2c_register) (p, hc2cbdftv_12, &desc, HC2C_VIA_DFT); +} +#endif