annotate src/fftw-3.3.3/rdft/simd/common/hc2cbdftv_20.c @ 23:619f715526df sv_v2.1

Update Vamp plugin SDK to 2.5
author Chris Cannam
date Thu, 09 May 2013 10:52:46 +0100
parents 37bf6b4a2645
children
rev   line source
Chris@10 1 /*
Chris@10 2 * Copyright (c) 2003, 2007-11 Matteo Frigo
Chris@10 3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
Chris@10 4 *
Chris@10 5 * This program is free software; you can redistribute it and/or modify
Chris@10 6 * it under the terms of the GNU General Public License as published by
Chris@10 7 * the Free Software Foundation; either version 2 of the License, or
Chris@10 8 * (at your option) any later version.
Chris@10 9 *
Chris@10 10 * This program is distributed in the hope that it will be useful,
Chris@10 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@10 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@10 13 * GNU General Public License for more details.
Chris@10 14 *
Chris@10 15 * You should have received a copy of the GNU General Public License
Chris@10 16 * along with this program; if not, write to the Free Software
Chris@10 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@10 18 *
Chris@10 19 */
Chris@10 20
Chris@10 21 /* This file was automatically generated --- DO NOT EDIT */
Chris@10 22 /* Generated on Sun Nov 25 07:42:30 EST 2012 */
Chris@10 23
Chris@10 24 #include "codelet-rdft.h"
Chris@10 25
Chris@10 26 #ifdef HAVE_FMA
Chris@10 27
Chris@10 28 /* Generated by: ../../../genfft/gen_hc2cdft_c.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -trivial-stores -variables 32 -no-generate-bytw -n 20 -dif -sign 1 -name hc2cbdftv_20 -include hc2cbv.h */
Chris@10 29
Chris@10 30 /*
Chris@10 31 * This function contains 143 FP additions, 108 FP multiplications,
Chris@10 32 * (or, 77 additions, 42 multiplications, 66 fused multiply/add),
Chris@10 33 * 134 stack variables, 4 constants, and 40 memory accesses
Chris@10 34 */
Chris@10 35 #include "hc2cbv.h"
Chris@10 36
Chris@10 37 static void hc2cbdftv_20(R *Rp, R *Ip, R *Rm, R *Im, const R *W, stride rs, INT mb, INT me, INT ms)
Chris@10 38 {
Chris@10 39 DVK(KP559016994, +0.559016994374947424102293417182819058860154590);
Chris@10 40 DVK(KP951056516, +0.951056516295153572116439333379382143405698634);
Chris@10 41 DVK(KP250000000, +0.250000000000000000000000000000000000000000000);
Chris@10 42 DVK(KP618033988, +0.618033988749894848204586834365638117720309180);
Chris@10 43 {
Chris@10 44 INT m;
Chris@10 45 for (m = mb, W = W + ((mb - 1) * ((TWVL / VL) * 38)); m < me; m = m + VL, Rp = Rp + (VL * ms), Ip = Ip + (VL * ms), Rm = Rm - (VL * ms), Im = Im - (VL * ms), W = W + (TWVL * 38), MAKE_VOLATILE_STRIDE(80, rs)) {
Chris@10 46 V T1M, T1T, T4, TF, T12, Te, T16, Ts, Tb, TN, TA, TG, TU, T1Y, T11;
Chris@10 47 V T1e, T29, T21, T15, Th, T13, Tp;
Chris@10 48 {
Chris@10 49 V TS, TT, Tf, T10, T20, T1Z, TX, Tg, Tn, To, T2, T3, TD, TE, T8;
Chris@10 50 V TV, T7, TZ, Tz, T9, Tu, Tv, T5, T6, Tx, Ty, Tc, Td, Tq, Tr;
Chris@10 51 V TY, Ta, TW, Tw;
Chris@10 52 T2 = LD(&(Rp[0]), ms, &(Rp[0]));
Chris@10 53 T3 = LD(&(Rm[WS(rs, 9)]), -ms, &(Rm[WS(rs, 1)]));
Chris@10 54 TD = LD(&(Rp[WS(rs, 5)]), ms, &(Rp[WS(rs, 1)]));
Chris@10 55 TE = LD(&(Rm[WS(rs, 4)]), -ms, &(Rm[0]));
Chris@10 56 T5 = LD(&(Rp[WS(rs, 4)]), ms, &(Rp[0]));
Chris@10 57 T6 = LD(&(Rm[WS(rs, 5)]), -ms, &(Rm[WS(rs, 1)]));
Chris@10 58 Tx = LD(&(Rp[WS(rs, 1)]), ms, &(Rp[WS(rs, 1)]));
Chris@10 59 Ty = LD(&(Rm[WS(rs, 8)]), -ms, &(Rm[0]));
Chris@10 60 T8 = LD(&(Rp[WS(rs, 6)]), ms, &(Rp[0]));
Chris@10 61 TS = VFMACONJ(T3, T2);
Chris@10 62 T4 = VFNMSCONJ(T3, T2);
Chris@10 63 TT = VFMACONJ(TE, TD);
Chris@10 64 TF = VFNMSCONJ(TE, TD);
Chris@10 65 TV = VFMACONJ(T6, T5);
Chris@10 66 T7 = VFNMSCONJ(T6, T5);
Chris@10 67 TZ = VFMACONJ(Ty, Tx);
Chris@10 68 Tz = VFNMSCONJ(Ty, Tx);
Chris@10 69 T9 = LD(&(Rm[WS(rs, 3)]), -ms, &(Rm[WS(rs, 1)]));
Chris@10 70 Tu = LD(&(Rp[WS(rs, 9)]), ms, &(Rp[WS(rs, 1)]));
Chris@10 71 Tv = LD(&(Rm[0]), -ms, &(Rm[0]));
Chris@10 72 Tc = LD(&(Rp[WS(rs, 8)]), ms, &(Rp[0]));
Chris@10 73 Td = LD(&(Rm[WS(rs, 1)]), -ms, &(Rm[WS(rs, 1)]));
Chris@10 74 Tq = LD(&(Rp[WS(rs, 7)]), ms, &(Rp[WS(rs, 1)]));
Chris@10 75 Tr = LD(&(Rm[WS(rs, 2)]), -ms, &(Rm[0]));
Chris@10 76 Tf = LD(&(Rp[WS(rs, 2)]), ms, &(Rp[0]));
Chris@10 77 TY = VFMACONJ(T9, T8);
Chris@10 78 Ta = VFMSCONJ(T9, T8);
Chris@10 79 TW = VFMACONJ(Tv, Tu);
Chris@10 80 Tw = VFNMSCONJ(Tv, Tu);
Chris@10 81 T12 = VFMACONJ(Td, Tc);
Chris@10 82 Te = VFNMSCONJ(Td, Tc);
Chris@10 83 T16 = VFMACONJ(Tr, Tq);
Chris@10 84 Ts = VFMSCONJ(Tr, Tq);
Chris@10 85 T10 = VSUB(TY, TZ);
Chris@10 86 T20 = VADD(TY, TZ);
Chris@10 87 Tb = VADD(T7, Ta);
Chris@10 88 TN = VSUB(T7, Ta);
Chris@10 89 T1Z = VADD(TV, TW);
Chris@10 90 TX = VSUB(TV, TW);
Chris@10 91 TA = VSUB(Tw, Tz);
Chris@10 92 TG = VADD(Tw, Tz);
Chris@10 93 Tg = LD(&(Rm[WS(rs, 7)]), -ms, &(Rm[WS(rs, 1)]));
Chris@10 94 Tn = LD(&(Rp[WS(rs, 3)]), ms, &(Rp[WS(rs, 1)]));
Chris@10 95 To = LD(&(Rm[WS(rs, 6)]), -ms, &(Rm[0]));
Chris@10 96 TU = VSUB(TS, TT);
Chris@10 97 T1Y = VADD(TS, TT);
Chris@10 98 T11 = VADD(TX, T10);
Chris@10 99 T1e = VSUB(TX, T10);
Chris@10 100 T29 = VSUB(T1Z, T20);
Chris@10 101 T21 = VADD(T1Z, T20);
Chris@10 102 T15 = VFMACONJ(Tg, Tf);
Chris@10 103 Th = VFMSCONJ(Tg, Tf);
Chris@10 104 T13 = VFMACONJ(To, Tn);
Chris@10 105 Tp = VFMSCONJ(To, Tn);
Chris@10 106 }
Chris@10 107 {
Chris@10 108 V T1S, T2B, T1W, T1I, T2q, T2w, T2i, T2c, T1C, T1K, T1s, T1g, T1, T2t, T1v;
Chris@10 109 V T1Q, T2A, T1q, T2m, TC, T1w, TP, T1x, T2f, T2r, T2g, T1E, T1D, T2y, T2x;
Chris@10 110 V T1i, T1h, T2D, T2C, T2s, T1t, T1u, T1y, T2u, TQ, T2d, T2e, T1U, T1L, T2j;
Chris@10 111 V T2k;
Chris@10 112 {
Chris@10 113 V T1R, T1F, T1V, T1o, TO, Tl, T1d, T2a, T1l, TB, TK, T1G, Tk, T1b, T19;
Chris@10 114 V T27, T25, T1H, TJ, T17, T23, TM, Ti, T14, T22, Tt, TH, Tj, T18, T24;
Chris@10 115 V TI, T2b, T2p, T1X, T2v, T2h, T2n, T1B, T1f, T28, T2o, T1a, TR, T1J, T1r;
Chris@10 116 V T1z, T26, Tm, TL, T1O, T1m, T1j, T2z, T1N, T1p, T1P, T2l, T1c, T1A, T1n;
Chris@10 117 V T1k;
Chris@10 118 T1R = LDW(&(W[TWVL * 18]));
Chris@10 119 T17 = VSUB(T15, T16);
Chris@10 120 T23 = VADD(T15, T16);
Chris@10 121 TM = VSUB(Te, Th);
Chris@10 122 Ti = VADD(Te, Th);
Chris@10 123 T14 = VSUB(T12, T13);
Chris@10 124 T22 = VADD(T12, T13);
Chris@10 125 Tt = VSUB(Tp, Ts);
Chris@10 126 TH = VADD(Tp, Ts);
Chris@10 127 T1F = LDW(&(W[TWVL * 28]));
Chris@10 128 T1V = LDW(&(W[TWVL * 8]));
Chris@10 129 T1o = VFMA(LDK(KP618033988), TM, TN);
Chris@10 130 TO = VFNMS(LDK(KP618033988), TN, TM);
Chris@10 131 Tj = VADD(Tb, Ti);
Chris@10 132 Tl = VSUB(Tb, Ti);
Chris@10 133 T18 = VADD(T14, T17);
Chris@10 134 T1d = VSUB(T14, T17);
Chris@10 135 T24 = VADD(T22, T23);
Chris@10 136 T2a = VSUB(T22, T23);
Chris@10 137 T1l = VFMA(LDK(KP618033988), Tt, TA);
Chris@10 138 TB = VFNMS(LDK(KP618033988), TA, Tt);
Chris@10 139 TI = VADD(TG, TH);
Chris@10 140 TK = VSUB(TG, TH);
Chris@10 141 T1G = VADD(T4, Tj);
Chris@10 142 Tk = VFNMS(LDK(KP250000000), Tj, T4);
Chris@10 143 T1b = VSUB(T11, T18);
Chris@10 144 T19 = VADD(T11, T18);
Chris@10 145 T27 = VSUB(T21, T24);
Chris@10 146 T25 = VADD(T21, T24);
Chris@10 147 T1H = VADD(TF, TI);
Chris@10 148 TJ = VFNMS(LDK(KP250000000), TI, TF);
Chris@10 149 T2b = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), T2a, T29));
Chris@10 150 T2p = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), T29, T2a));
Chris@10 151 T1X = LDW(&(W[TWVL * 6]));
Chris@10 152 T1S = VZMUL(T1R, VADD(TU, T19));
Chris@10 153 T2v = LDW(&(W[TWVL * 22]));
Chris@10 154 T2B = VADD(T1Y, T25);
Chris@10 155 T26 = VFNMS(LDK(KP250000000), T25, T1Y);
Chris@10 156 T1W = VZMULI(T1V, VFMAI(T1H, T1G));
Chris@10 157 T1I = VZMULI(T1F, VFNMSI(T1H, T1G));
Chris@10 158 T2h = LDW(&(W[TWVL * 30]));
Chris@10 159 T2n = LDW(&(W[TWVL * 14]));
Chris@10 160 T1B = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), T1d, T1e));
Chris@10 161 T1f = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), T1e, T1d));
Chris@10 162 T28 = VFMA(LDK(KP559016994), T27, T26);
Chris@10 163 T2o = VFNMS(LDK(KP559016994), T27, T26);
Chris@10 164 T1a = VFNMS(LDK(KP250000000), T19, TU);
Chris@10 165 TR = LDW(&(W[TWVL * 2]));
Chris@10 166 T1J = LDW(&(W[TWVL * 26]));
Chris@10 167 T1r = LDW(&(W[TWVL * 34]));
Chris@10 168 T1z = LDW(&(W[TWVL * 10]));
Chris@10 169 T1k = VFMA(LDK(KP559016994), Tl, Tk);
Chris@10 170 Tm = VFNMS(LDK(KP559016994), Tl, Tk);
Chris@10 171 T2q = VZMUL(T2n, VFMAI(T2p, T2o));
Chris@10 172 T2w = VZMUL(T2v, VFNMSI(T2p, T2o));
Chris@10 173 T2i = VZMUL(T2h, VFMAI(T2b, T28));
Chris@10 174 T2c = VZMUL(T1X, VFNMSI(T2b, T28));
Chris@10 175 T1c = VFNMS(LDK(KP559016994), T1b, T1a);
Chris@10 176 T1A = VFMA(LDK(KP559016994), T1b, T1a);
Chris@10 177 TL = VFNMS(LDK(KP559016994), TK, TJ);
Chris@10 178 T1n = VFMA(LDK(KP559016994), TK, TJ);
Chris@10 179 T1O = VFMA(LDK(KP951056516), T1l, T1k);
Chris@10 180 T1m = VFNMS(LDK(KP951056516), T1l, T1k);
Chris@10 181 T1j = LDW(&(W[TWVL * 36]));
Chris@10 182 T2z = LDW(&(W[0]));
Chris@10 183 T1N = LDW(&(W[TWVL * 20]));
Chris@10 184 T1C = VZMUL(T1z, VFMAI(T1B, T1A));
Chris@10 185 T1K = VZMUL(T1J, VFNMSI(T1B, T1A));
Chris@10 186 T1s = VZMUL(T1r, VFMAI(T1f, T1c));
Chris@10 187 T1g = VZMUL(TR, VFNMSI(T1f, T1c));
Chris@10 188 T1p = VFMA(LDK(KP951056516), T1o, T1n);
Chris@10 189 T1P = VFNMS(LDK(KP951056516), T1o, T1n);
Chris@10 190 T2l = LDW(&(W[TWVL * 16]));
Chris@10 191 T1 = LDW(&(W[TWVL * 4]));
Chris@10 192 T2t = LDW(&(W[TWVL * 24]));
Chris@10 193 T1v = LDW(&(W[TWVL * 12]));
Chris@10 194 T1Q = VZMULI(T1N, VFNMSI(T1P, T1O));
Chris@10 195 T2A = VZMULI(T2z, VFMAI(T1p, T1m));
Chris@10 196 T1q = VZMULI(T1j, VFNMSI(T1p, T1m));
Chris@10 197 T2m = VZMULI(T2l, VFMAI(T1P, T1O));
Chris@10 198 TC = VFMA(LDK(KP951056516), TB, Tm);
Chris@10 199 T1w = VFNMS(LDK(KP951056516), TB, Tm);
Chris@10 200 TP = VFNMS(LDK(KP951056516), TO, TL);
Chris@10 201 T1x = VFMA(LDK(KP951056516), TO, TL);
Chris@10 202 T2f = LDW(&(W[TWVL * 32]));
Chris@10 203 }
Chris@10 204 T2D = VCONJ(VSUB(T2B, T2A));
Chris@10 205 T2C = VADD(T2A, T2B);
Chris@10 206 T2s = VCONJ(VSUB(T2q, T2m));
Chris@10 207 T2r = VADD(T2m, T2q);
Chris@10 208 T1t = VADD(T1q, T1s);
Chris@10 209 T1u = VCONJ(VSUB(T1s, T1q));
Chris@10 210 T1y = VZMULI(T1v, VFNMSI(T1x, T1w));
Chris@10 211 T2u = VZMULI(T2t, VFMAI(T1x, T1w));
Chris@10 212 TQ = VZMULI(T1, VFNMSI(TP, TC));
Chris@10 213 T2g = VZMULI(T2f, VFMAI(TP, TC));
Chris@10 214 ST(&(Rm[0]), T2D, -ms, &(Rm[0]));
Chris@10 215 ST(&(Rp[0]), T2C, ms, &(Rp[0]));
Chris@10 216 ST(&(Rm[WS(rs, 4)]), T2s, -ms, &(Rm[0]));
Chris@10 217 ST(&(Rm[WS(rs, 9)]), T1u, -ms, &(Rm[WS(rs, 1)]));
Chris@10 218 T1E = VCONJ(VSUB(T1C, T1y));
Chris@10 219 T1D = VADD(T1y, T1C);
Chris@10 220 T2y = VCONJ(VSUB(T2w, T2u));
Chris@10 221 T2x = VADD(T2u, T2w);
Chris@10 222 T1i = VCONJ(VSUB(T1g, TQ));
Chris@10 223 T1h = VADD(TQ, T1g);
Chris@10 224 ST(&(Rp[WS(rs, 9)]), T1t, ms, &(Rp[WS(rs, 1)]));
Chris@10 225 T1L = VADD(T1I, T1K);
Chris@10 226 T1M = VCONJ(VSUB(T1K, T1I));
Chris@10 227 ST(&(Rp[WS(rs, 3)]), T1D, ms, &(Rp[WS(rs, 1)]));
Chris@10 228 ST(&(Rm[WS(rs, 6)]), T2y, -ms, &(Rm[0]));
Chris@10 229 ST(&(Rp[WS(rs, 6)]), T2x, ms, &(Rp[0]));
Chris@10 230 ST(&(Rm[WS(rs, 1)]), T1i, -ms, &(Rm[WS(rs, 1)]));
Chris@10 231 ST(&(Rp[WS(rs, 1)]), T1h, ms, &(Rp[WS(rs, 1)]));
Chris@10 232 T2d = VADD(T1W, T2c);
Chris@10 233 T2e = VCONJ(VSUB(T2c, T1W));
Chris@10 234 ST(&(Rm[WS(rs, 3)]), T1E, -ms, &(Rm[WS(rs, 1)]));
Chris@10 235 ST(&(Rp[WS(rs, 7)]), T1L, ms, &(Rp[WS(rs, 1)]));
Chris@10 236 T1U = VCONJ(VSUB(T1S, T1Q));
Chris@10 237 T1T = VADD(T1Q, T1S);
Chris@10 238 T2j = VADD(T2g, T2i);
Chris@10 239 T2k = VCONJ(VSUB(T2i, T2g));
Chris@10 240 ST(&(Rp[WS(rs, 2)]), T2d, ms, &(Rp[0]));
Chris@10 241 ST(&(Rp[WS(rs, 4)]), T2r, ms, &(Rp[0]));
Chris@10 242 ST(&(Rm[WS(rs, 5)]), T1U, -ms, &(Rm[WS(rs, 1)]));
Chris@10 243 ST(&(Rm[WS(rs, 2)]), T2e, -ms, &(Rm[0]));
Chris@10 244 ST(&(Rp[WS(rs, 8)]), T2j, ms, &(Rp[0]));
Chris@10 245 ST(&(Rm[WS(rs, 8)]), T2k, -ms, &(Rm[0]));
Chris@10 246 }
Chris@10 247 ST(&(Rp[WS(rs, 5)]), T1T, ms, &(Rp[WS(rs, 1)]));
Chris@10 248 ST(&(Rm[WS(rs, 7)]), T1M, -ms, &(Rm[WS(rs, 1)]));
Chris@10 249 }
Chris@10 250 }
Chris@10 251 VLEAVE();
Chris@10 252 }
Chris@10 253
Chris@10 254 static const tw_instr twinstr[] = {
Chris@10 255 VTW(1, 1),
Chris@10 256 VTW(1, 2),
Chris@10 257 VTW(1, 3),
Chris@10 258 VTW(1, 4),
Chris@10 259 VTW(1, 5),
Chris@10 260 VTW(1, 6),
Chris@10 261 VTW(1, 7),
Chris@10 262 VTW(1, 8),
Chris@10 263 VTW(1, 9),
Chris@10 264 VTW(1, 10),
Chris@10 265 VTW(1, 11),
Chris@10 266 VTW(1, 12),
Chris@10 267 VTW(1, 13),
Chris@10 268 VTW(1, 14),
Chris@10 269 VTW(1, 15),
Chris@10 270 VTW(1, 16),
Chris@10 271 VTW(1, 17),
Chris@10 272 VTW(1, 18),
Chris@10 273 VTW(1, 19),
Chris@10 274 {TW_NEXT, VL, 0}
Chris@10 275 };
Chris@10 276
Chris@10 277 static const hc2c_desc desc = { 20, XSIMD_STRING("hc2cbdftv_20"), twinstr, &GENUS, {77, 42, 66, 0} };
Chris@10 278
Chris@10 279 void XSIMD(codelet_hc2cbdftv_20) (planner *p) {
Chris@10 280 X(khc2c_register) (p, hc2cbdftv_20, &desc, HC2C_VIA_DFT);
Chris@10 281 }
Chris@10 282 #else /* HAVE_FMA */
Chris@10 283
Chris@10 284 /* Generated by: ../../../genfft/gen_hc2cdft_c.native -simd -compact -variables 4 -pipeline-latency 8 -trivial-stores -variables 32 -no-generate-bytw -n 20 -dif -sign 1 -name hc2cbdftv_20 -include hc2cbv.h */
Chris@10 285
Chris@10 286 /*
Chris@10 287 * This function contains 143 FP additions, 62 FP multiplications,
Chris@10 288 * (or, 131 additions, 50 multiplications, 12 fused multiply/add),
Chris@10 289 * 114 stack variables, 4 constants, and 40 memory accesses
Chris@10 290 */
Chris@10 291 #include "hc2cbv.h"
Chris@10 292
Chris@10 293 static void hc2cbdftv_20(R *Rp, R *Ip, R *Rm, R *Im, const R *W, stride rs, INT mb, INT me, INT ms)
Chris@10 294 {
Chris@10 295 DVK(KP250000000, +0.250000000000000000000000000000000000000000000);
Chris@10 296 DVK(KP559016994, +0.559016994374947424102293417182819058860154590);
Chris@10 297 DVK(KP951056516, +0.951056516295153572116439333379382143405698634);
Chris@10 298 DVK(KP587785252, +0.587785252292473129168705954639072768597652438);
Chris@10 299 {
Chris@10 300 INT m;
Chris@10 301 for (m = mb, W = W + ((mb - 1) * ((TWVL / VL) * 38)); m < me; m = m + VL, Rp = Rp + (VL * ms), Ip = Ip + (VL * ms), Rm = Rm - (VL * ms), Im = Im - (VL * ms), W = W + (TWVL * 38), MAKE_VOLATILE_STRIDE(80, rs)) {
Chris@10 302 V TK, T1v, TY, T1x, T1j, T2f, TS, TT, TO, TU, T5, To, Tp, Tq, T2a;
Chris@10 303 V T2d, T2g, T2k, T2j, T1k, T1l, T18, T1m, T1f;
Chris@10 304 {
Chris@10 305 V T2, TP, T4, TR, TI, T1d, T9, T12, Td, T15, TE, T1a, Tv, T13, Tm;
Chris@10 306 V T1c, Tz, T16, Ti, T19, T3, TQ, TH, TG, TF, T6, T8, T7, Tc, Tb;
Chris@10 307 V Ta, TD, TC, TB, Ts, Tu, Tt, Tl, Tk, Tj, Tw, Ty, Tx, Tf, Th;
Chris@10 308 V Tg, TA, TJ, TW, TX, T1h, T1i, TM, TN, Te, Tn, T28, T29, T2b, T2c;
Chris@10 309 V T14, T17, T1b, T1e;
Chris@10 310 T2 = LD(&(Rp[0]), ms, &(Rp[0]));
Chris@10 311 TP = LD(&(Rp[WS(rs, 5)]), ms, &(Rp[WS(rs, 1)]));
Chris@10 312 T3 = LD(&(Rm[WS(rs, 9)]), -ms, &(Rm[WS(rs, 1)]));
Chris@10 313 T4 = VCONJ(T3);
Chris@10 314 TQ = LD(&(Rm[WS(rs, 4)]), -ms, &(Rm[0]));
Chris@10 315 TR = VCONJ(TQ);
Chris@10 316 TH = LD(&(Rp[WS(rs, 7)]), ms, &(Rp[WS(rs, 1)]));
Chris@10 317 TF = LD(&(Rm[WS(rs, 2)]), -ms, &(Rm[0]));
Chris@10 318 TG = VCONJ(TF);
Chris@10 319 TI = VSUB(TG, TH);
Chris@10 320 T1d = VADD(TG, TH);
Chris@10 321 T6 = LD(&(Rp[WS(rs, 4)]), ms, &(Rp[0]));
Chris@10 322 T7 = LD(&(Rm[WS(rs, 5)]), -ms, &(Rm[WS(rs, 1)]));
Chris@10 323 T8 = VCONJ(T7);
Chris@10 324 T9 = VSUB(T6, T8);
Chris@10 325 T12 = VADD(T6, T8);
Chris@10 326 Tc = LD(&(Rp[WS(rs, 6)]), ms, &(Rp[0]));
Chris@10 327 Ta = LD(&(Rm[WS(rs, 3)]), -ms, &(Rm[WS(rs, 1)]));
Chris@10 328 Tb = VCONJ(Ta);
Chris@10 329 Td = VSUB(Tb, Tc);
Chris@10 330 T15 = VADD(Tb, Tc);
Chris@10 331 TD = LD(&(Rp[WS(rs, 3)]), ms, &(Rp[WS(rs, 1)]));
Chris@10 332 TB = LD(&(Rm[WS(rs, 6)]), -ms, &(Rm[0]));
Chris@10 333 TC = VCONJ(TB);
Chris@10 334 TE = VSUB(TC, TD);
Chris@10 335 T1a = VADD(TC, TD);
Chris@10 336 Ts = LD(&(Rp[WS(rs, 9)]), ms, &(Rp[WS(rs, 1)]));
Chris@10 337 Tt = LD(&(Rm[0]), -ms, &(Rm[0]));
Chris@10 338 Tu = VCONJ(Tt);
Chris@10 339 Tv = VSUB(Ts, Tu);
Chris@10 340 T13 = VADD(Ts, Tu);
Chris@10 341 Tl = LD(&(Rp[WS(rs, 2)]), ms, &(Rp[0]));
Chris@10 342 Tj = LD(&(Rm[WS(rs, 7)]), -ms, &(Rm[WS(rs, 1)]));
Chris@10 343 Tk = VCONJ(Tj);
Chris@10 344 Tm = VSUB(Tk, Tl);
Chris@10 345 T1c = VADD(Tk, Tl);
Chris@10 346 Tw = LD(&(Rp[WS(rs, 1)]), ms, &(Rp[WS(rs, 1)]));
Chris@10 347 Tx = LD(&(Rm[WS(rs, 8)]), -ms, &(Rm[0]));
Chris@10 348 Ty = VCONJ(Tx);
Chris@10 349 Tz = VSUB(Tw, Ty);
Chris@10 350 T16 = VADD(Tw, Ty);
Chris@10 351 Tf = LD(&(Rp[WS(rs, 8)]), ms, &(Rp[0]));
Chris@10 352 Tg = LD(&(Rm[WS(rs, 1)]), -ms, &(Rm[WS(rs, 1)]));
Chris@10 353 Th = VCONJ(Tg);
Chris@10 354 Ti = VSUB(Tf, Th);
Chris@10 355 T19 = VADD(Tf, Th);
Chris@10 356 TA = VSUB(Tv, Tz);
Chris@10 357 TJ = VSUB(TE, TI);
Chris@10 358 TK = VFNMS(LDK(KP951056516), TJ, VMUL(LDK(KP587785252), TA));
Chris@10 359 T1v = VFMA(LDK(KP951056516), TA, VMUL(LDK(KP587785252), TJ));
Chris@10 360 TW = VSUB(T9, Td);
Chris@10 361 TX = VSUB(Ti, Tm);
Chris@10 362 TY = VFNMS(LDK(KP951056516), TX, VMUL(LDK(KP587785252), TW));
Chris@10 363 T1x = VFMA(LDK(KP951056516), TW, VMUL(LDK(KP587785252), TX));
Chris@10 364 T1h = VADD(T2, T4);
Chris@10 365 T1i = VADD(TP, TR);
Chris@10 366 T1j = VSUB(T1h, T1i);
Chris@10 367 T2f = VADD(T1h, T1i);
Chris@10 368 TS = VSUB(TP, TR);
Chris@10 369 TM = VADD(Tv, Tz);
Chris@10 370 TN = VADD(TE, TI);
Chris@10 371 TT = VADD(TM, TN);
Chris@10 372 TO = VMUL(LDK(KP559016994), VSUB(TM, TN));
Chris@10 373 TU = VFNMS(LDK(KP250000000), TT, TS);
Chris@10 374 T5 = VSUB(T2, T4);
Chris@10 375 Te = VADD(T9, Td);
Chris@10 376 Tn = VADD(Ti, Tm);
Chris@10 377 To = VADD(Te, Tn);
Chris@10 378 Tp = VFNMS(LDK(KP250000000), To, T5);
Chris@10 379 Tq = VMUL(LDK(KP559016994), VSUB(Te, Tn));
Chris@10 380 T28 = VADD(T12, T13);
Chris@10 381 T29 = VADD(T15, T16);
Chris@10 382 T2a = VADD(T28, T29);
Chris@10 383 T2b = VADD(T19, T1a);
Chris@10 384 T2c = VADD(T1c, T1d);
Chris@10 385 T2d = VADD(T2b, T2c);
Chris@10 386 T2g = VADD(T2a, T2d);
Chris@10 387 T2k = VSUB(T2b, T2c);
Chris@10 388 T2j = VSUB(T28, T29);
Chris@10 389 T14 = VSUB(T12, T13);
Chris@10 390 T17 = VSUB(T15, T16);
Chris@10 391 T1k = VADD(T14, T17);
Chris@10 392 T1b = VSUB(T19, T1a);
Chris@10 393 T1e = VSUB(T1c, T1d);
Chris@10 394 T1l = VADD(T1b, T1e);
Chris@10 395 T18 = VSUB(T14, T17);
Chris@10 396 T1m = VADD(T1k, T1l);
Chris@10 397 T1f = VSUB(T1b, T1e);
Chris@10 398 }
Chris@10 399 {
Chris@10 400 V T2L, T22, T1S, T26, T2m, T2G, T2s, T2A, T1q, T1U, T1C, T1M, T10, T2E, T1I;
Chris@10 401 V T2q, T1A, T2K, T20, T2w, T21, T1Q, T1R, T1P, T25, T1r, T1s, T2C, T2N, T1N;
Chris@10 402 V T2H, T2I, T2M, T1E, T1D, T1O, T1V, T2n, T2B, T24, T2o, T2t, T2u, T23, T1W;
Chris@10 403 T2L = VADD(T2f, T2g);
Chris@10 404 T21 = LDW(&(W[TWVL * 18]));
Chris@10 405 T22 = VZMUL(T21, VADD(T1j, T1m));
Chris@10 406 T1Q = VADD(T5, To);
Chris@10 407 T1R = VBYI(VADD(TS, TT));
Chris@10 408 T1P = LDW(&(W[TWVL * 28]));
Chris@10 409 T1S = VZMULI(T1P, VSUB(T1Q, T1R));
Chris@10 410 T25 = LDW(&(W[TWVL * 8]));
Chris@10 411 T26 = VZMULI(T25, VADD(T1Q, T1R));
Chris@10 412 {
Chris@10 413 V T2l, T2z, T2i, T2y, T2e, T2h, T27, T2F, T2r, T2x, T1g, T1K, T1p, T1L, T1n;
Chris@10 414 V T1o, T11, T1T, T1B, T1J, TL, T1G, TZ, T1H, Tr, TV, T1, T2D, T1F, T2p;
Chris@10 415 V T1w, T1Y, T1z, T1Z, T1u, T1y, T1t, T2J, T1X, T2v;
Chris@10 416 T2l = VBYI(VFMA(LDK(KP951056516), T2j, VMUL(LDK(KP587785252), T2k)));
Chris@10 417 T2z = VBYI(VFNMS(LDK(KP951056516), T2k, VMUL(LDK(KP587785252), T2j)));
Chris@10 418 T2e = VMUL(LDK(KP559016994), VSUB(T2a, T2d));
Chris@10 419 T2h = VFNMS(LDK(KP250000000), T2g, T2f);
Chris@10 420 T2i = VADD(T2e, T2h);
Chris@10 421 T2y = VSUB(T2h, T2e);
Chris@10 422 T27 = LDW(&(W[TWVL * 6]));
Chris@10 423 T2m = VZMUL(T27, VSUB(T2i, T2l));
Chris@10 424 T2F = LDW(&(W[TWVL * 22]));
Chris@10 425 T2G = VZMUL(T2F, VADD(T2z, T2y));
Chris@10 426 T2r = LDW(&(W[TWVL * 30]));
Chris@10 427 T2s = VZMUL(T2r, VADD(T2l, T2i));
Chris@10 428 T2x = LDW(&(W[TWVL * 14]));
Chris@10 429 T2A = VZMUL(T2x, VSUB(T2y, T2z));
Chris@10 430 T1g = VBYI(VFNMS(LDK(KP951056516), T1f, VMUL(LDK(KP587785252), T18)));
Chris@10 431 T1K = VBYI(VFMA(LDK(KP951056516), T18, VMUL(LDK(KP587785252), T1f)));
Chris@10 432 T1n = VFNMS(LDK(KP250000000), T1m, T1j);
Chris@10 433 T1o = VMUL(LDK(KP559016994), VSUB(T1k, T1l));
Chris@10 434 T1p = VSUB(T1n, T1o);
Chris@10 435 T1L = VADD(T1o, T1n);
Chris@10 436 T11 = LDW(&(W[TWVL * 2]));
Chris@10 437 T1q = VZMUL(T11, VADD(T1g, T1p));
Chris@10 438 T1T = LDW(&(W[TWVL * 26]));
Chris@10 439 T1U = VZMUL(T1T, VSUB(T1L, T1K));
Chris@10 440 T1B = LDW(&(W[TWVL * 34]));
Chris@10 441 T1C = VZMUL(T1B, VSUB(T1p, T1g));
Chris@10 442 T1J = LDW(&(W[TWVL * 10]));
Chris@10 443 T1M = VZMUL(T1J, VADD(T1K, T1L));
Chris@10 444 Tr = VSUB(Tp, Tq);
Chris@10 445 TL = VSUB(Tr, TK);
Chris@10 446 T1G = VADD(Tr, TK);
Chris@10 447 TV = VSUB(TO, TU);
Chris@10 448 TZ = VBYI(VSUB(TV, TY));
Chris@10 449 T1H = VBYI(VADD(TY, TV));
Chris@10 450 T1 = LDW(&(W[TWVL * 4]));
Chris@10 451 T10 = VZMULI(T1, VADD(TL, TZ));
Chris@10 452 T2D = LDW(&(W[TWVL * 24]));
Chris@10 453 T2E = VZMULI(T2D, VSUB(T1G, T1H));
Chris@10 454 T1F = LDW(&(W[TWVL * 12]));
Chris@10 455 T1I = VZMULI(T1F, VADD(T1G, T1H));
Chris@10 456 T2p = LDW(&(W[TWVL * 32]));
Chris@10 457 T2q = VZMULI(T2p, VSUB(TL, TZ));
Chris@10 458 T1u = VADD(Tq, Tp);
Chris@10 459 T1w = VSUB(T1u, T1v);
Chris@10 460 T1Y = VADD(T1u, T1v);
Chris@10 461 T1y = VADD(TO, TU);
Chris@10 462 T1z = VBYI(VADD(T1x, T1y));
Chris@10 463 T1Z = VBYI(VSUB(T1y, T1x));
Chris@10 464 T1t = LDW(&(W[TWVL * 36]));
Chris@10 465 T1A = VZMULI(T1t, VSUB(T1w, T1z));
Chris@10 466 T2J = LDW(&(W[0]));
Chris@10 467 T2K = VZMULI(T2J, VADD(T1w, T1z));
Chris@10 468 T1X = LDW(&(W[TWVL * 20]));
Chris@10 469 T20 = VZMULI(T1X, VSUB(T1Y, T1Z));
Chris@10 470 T2v = LDW(&(W[TWVL * 16]));
Chris@10 471 T2w = VZMULI(T2v, VADD(T1Y, T1Z));
Chris@10 472 }
Chris@10 473 T1r = VADD(T10, T1q);
Chris@10 474 ST(&(Rp[WS(rs, 1)]), T1r, ms, &(Rp[WS(rs, 1)]));
Chris@10 475 T1s = VCONJ(VSUB(T1q, T10));
Chris@10 476 ST(&(Rm[WS(rs, 1)]), T1s, -ms, &(Rm[WS(rs, 1)]));
Chris@10 477 T2C = VCONJ(VSUB(T2A, T2w));
Chris@10 478 ST(&(Rm[WS(rs, 4)]), T2C, -ms, &(Rm[0]));
Chris@10 479 T2N = VCONJ(VSUB(T2L, T2K));
Chris@10 480 ST(&(Rm[0]), T2N, -ms, &(Rm[0]));
Chris@10 481 T1N = VADD(T1I, T1M);
Chris@10 482 ST(&(Rp[WS(rs, 3)]), T1N, ms, &(Rp[WS(rs, 1)]));
Chris@10 483 T2H = VADD(T2E, T2G);
Chris@10 484 ST(&(Rp[WS(rs, 6)]), T2H, ms, &(Rp[0]));
Chris@10 485 T2I = VCONJ(VSUB(T2G, T2E));
Chris@10 486 ST(&(Rm[WS(rs, 6)]), T2I, -ms, &(Rm[0]));
Chris@10 487 T2M = VADD(T2K, T2L);
Chris@10 488 ST(&(Rp[0]), T2M, ms, &(Rp[0]));
Chris@10 489 T1E = VCONJ(VSUB(T1C, T1A));
Chris@10 490 ST(&(Rm[WS(rs, 9)]), T1E, -ms, &(Rm[WS(rs, 1)]));
Chris@10 491 T1D = VADD(T1A, T1C);
Chris@10 492 ST(&(Rp[WS(rs, 9)]), T1D, ms, &(Rp[WS(rs, 1)]));
Chris@10 493 T1O = VCONJ(VSUB(T1M, T1I));
Chris@10 494 ST(&(Rm[WS(rs, 3)]), T1O, -ms, &(Rm[WS(rs, 1)]));
Chris@10 495 T1V = VADD(T1S, T1U);
Chris@10 496 ST(&(Rp[WS(rs, 7)]), T1V, ms, &(Rp[WS(rs, 1)]));
Chris@10 497 T2n = VADD(T26, T2m);
Chris@10 498 ST(&(Rp[WS(rs, 2)]), T2n, ms, &(Rp[0]));
Chris@10 499 T2B = VADD(T2w, T2A);
Chris@10 500 ST(&(Rp[WS(rs, 4)]), T2B, ms, &(Rp[0]));
Chris@10 501 T24 = VCONJ(VSUB(T22, T20));
Chris@10 502 ST(&(Rm[WS(rs, 5)]), T24, -ms, &(Rm[WS(rs, 1)]));
Chris@10 503 T2o = VCONJ(VSUB(T2m, T26));
Chris@10 504 ST(&(Rm[WS(rs, 2)]), T2o, -ms, &(Rm[0]));
Chris@10 505 T2t = VADD(T2q, T2s);
Chris@10 506 ST(&(Rp[WS(rs, 8)]), T2t, ms, &(Rp[0]));
Chris@10 507 T2u = VCONJ(VSUB(T2s, T2q));
Chris@10 508 ST(&(Rm[WS(rs, 8)]), T2u, -ms, &(Rm[0]));
Chris@10 509 T23 = VADD(T20, T22);
Chris@10 510 ST(&(Rp[WS(rs, 5)]), T23, ms, &(Rp[WS(rs, 1)]));
Chris@10 511 T1W = VCONJ(VSUB(T1U, T1S));
Chris@10 512 ST(&(Rm[WS(rs, 7)]), T1W, -ms, &(Rm[WS(rs, 1)]));
Chris@10 513 }
Chris@10 514 }
Chris@10 515 }
Chris@10 516 VLEAVE();
Chris@10 517 }
Chris@10 518
Chris@10 519 static const tw_instr twinstr[] = {
Chris@10 520 VTW(1, 1),
Chris@10 521 VTW(1, 2),
Chris@10 522 VTW(1, 3),
Chris@10 523 VTW(1, 4),
Chris@10 524 VTW(1, 5),
Chris@10 525 VTW(1, 6),
Chris@10 526 VTW(1, 7),
Chris@10 527 VTW(1, 8),
Chris@10 528 VTW(1, 9),
Chris@10 529 VTW(1, 10),
Chris@10 530 VTW(1, 11),
Chris@10 531 VTW(1, 12),
Chris@10 532 VTW(1, 13),
Chris@10 533 VTW(1, 14),
Chris@10 534 VTW(1, 15),
Chris@10 535 VTW(1, 16),
Chris@10 536 VTW(1, 17),
Chris@10 537 VTW(1, 18),
Chris@10 538 VTW(1, 19),
Chris@10 539 {TW_NEXT, VL, 0}
Chris@10 540 };
Chris@10 541
Chris@10 542 static const hc2c_desc desc = { 20, XSIMD_STRING("hc2cbdftv_20"), twinstr, &GENUS, {131, 50, 12, 0} };
Chris@10 543
Chris@10 544 void XSIMD(codelet_hc2cbdftv_20) (planner *p) {
Chris@10 545 X(khc2c_register) (p, hc2cbdftv_20, &desc, HC2C_VIA_DFT);
Chris@10 546 }
Chris@10 547 #endif /* HAVE_FMA */