annotate src/fftw-3.3.5/rdft/simd/common/hc2cbdftv_32.c @ 56:af97cad61ff0

Add updated build of PortAudio for OSX
author Chris Cannam <cannam@all-day-breakfast.com>
date Tue, 03 Jan 2017 15:10:52 +0000
parents 2cd0e3b3e1fd
children
rev   line source
Chris@42 1 /*
Chris@42 2 * Copyright (c) 2003, 2007-14 Matteo Frigo
Chris@42 3 * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
Chris@42 4 *
Chris@42 5 * This program is free software; you can redistribute it and/or modify
Chris@42 6 * it under the terms of the GNU General Public License as published by
Chris@42 7 * the Free Software Foundation; either version 2 of the License, or
Chris@42 8 * (at your option) any later version.
Chris@42 9 *
Chris@42 10 * This program is distributed in the hope that it will be useful,
Chris@42 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@42 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@42 13 * GNU General Public License for more details.
Chris@42 14 *
Chris@42 15 * You should have received a copy of the GNU General Public License
Chris@42 16 * along with this program; if not, write to the Free Software
Chris@42 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@42 18 *
Chris@42 19 */
Chris@42 20
Chris@42 21 /* This file was automatically generated --- DO NOT EDIT */
Chris@42 22 /* Generated on Sat Jul 30 16:52:45 EDT 2016 */
Chris@42 23
Chris@42 24 #include "codelet-rdft.h"
Chris@42 25
Chris@42 26 #ifdef HAVE_FMA
Chris@42 27
Chris@42 28 /* Generated by: ../../../genfft/gen_hc2cdft_c.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -trivial-stores -variables 32 -no-generate-bytw -n 32 -dif -sign 1 -name hc2cbdftv_32 -include hc2cbv.h */
Chris@42 29
Chris@42 30 /*
Chris@42 31 * This function contains 249 FP additions, 192 FP multiplications,
Chris@42 32 * (or, 119 additions, 62 multiplications, 130 fused multiply/add),
Chris@42 33 * 166 stack variables, 7 constants, and 64 memory accesses
Chris@42 34 */
Chris@42 35 #include "hc2cbv.h"
Chris@42 36
Chris@42 37 static void hc2cbdftv_32(R *Rp, R *Ip, R *Rm, R *Im, const R *W, stride rs, INT mb, INT me, INT ms)
Chris@42 38 {
Chris@42 39 DVK(KP980785280, +0.980785280403230449126182236134239036973933731);
Chris@42 40 DVK(KP831469612, +0.831469612302545237078788377617905756738560812);
Chris@42 41 DVK(KP668178637, +0.668178637919298919997757686523080761552472251);
Chris@42 42 DVK(KP198912367, +0.198912367379658006911597622644676228597850501);
Chris@42 43 DVK(KP923879532, +0.923879532511286756128183189396788286822416626);
Chris@42 44 DVK(KP414213562, +0.414213562373095048801688724209698078569671875);
Chris@42 45 DVK(KP707106781, +0.707106781186547524400844362104849039284835938);
Chris@42 46 {
Chris@42 47 INT m;
Chris@42 48 for (m = mb, W = W + ((mb - 1) * ((TWVL / VL) * 62)); m < me; m = m + VL, Rp = Rp + (VL * ms), Ip = Ip + (VL * ms), Rm = Rm - (VL * ms), Im = Im - (VL * ms), W = W + (TWVL * 62), MAKE_VOLATILE_STRIDE(128, rs)) {
Chris@42 49 V T3a, T3N;
Chris@42 50 {
Chris@42 51 V T2G, T1o, T2o, T2Y, T1b, T1V, Ts, T1S, T3A, T48, T3p, T45, T31, T2z, T2H;
Chris@42 52 V T1L, Tv, TG, TM, T3q, T1r, TX, TN, T1s, Ty, T1t, TB, TO, TQ, T1y;
Chris@42 53 V T3t, TR, T1H, T1K, TV, T1p, T1q, T1w, TW, Tt, Tu, TE, TF, TK, TL;
Chris@42 54 V Tw, Tx, Tz, TA, T1x;
Chris@42 55 {
Chris@42 56 V T1i, T4, T1j, T15, T1l, T1m, Tb, T16, Tf, T1G, Ti, T1F, Tm, T1J, T1I;
Chris@42 57 V Tp, T2, T3, T13, T14, T5, T6, T8, T9, Td, T7, Ta, Te, Tg, Th;
Chris@42 58 V Tk, Tl, Tn, To, T2m, Tc, T3l, T1k, T3m, T18, Tj, T3y, T1n, Tq, T19;
Chris@42 59 V T3n, T17, T2x, T1a, T2n, T2y, Tr, T3z, T3o;
Chris@42 60 T2 = LD(&(Rp[0]), ms, &(Rp[0]));
Chris@42 61 T3 = LD(&(Rm[WS(rs, 15)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 62 T13 = LD(&(Rp[WS(rs, 8)]), ms, &(Rp[0]));
Chris@42 63 T14 = LD(&(Rm[WS(rs, 7)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 64 T5 = LD(&(Rp[WS(rs, 4)]), ms, &(Rp[0]));
Chris@42 65 T6 = LD(&(Rm[WS(rs, 11)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 66 T8 = LD(&(Rp[WS(rs, 12)]), ms, &(Rp[0]));
Chris@42 67 T9 = LD(&(Rm[WS(rs, 3)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 68 Td = LD(&(Rp[WS(rs, 10)]), ms, &(Rp[0]));
Chris@42 69 T1i = VFMACONJ(T3, T2);
Chris@42 70 T4 = VFNMSCONJ(T3, T2);
Chris@42 71 T1j = VFMACONJ(T14, T13);
Chris@42 72 T15 = VFNMSCONJ(T14, T13);
Chris@42 73 T1l = VFMACONJ(T6, T5);
Chris@42 74 T7 = VFNMSCONJ(T6, T5);
Chris@42 75 T1m = VFMACONJ(T9, T8);
Chris@42 76 Ta = VFMSCONJ(T9, T8);
Chris@42 77 Te = LD(&(Rm[WS(rs, 5)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 78 Tg = LD(&(Rp[WS(rs, 2)]), ms, &(Rp[0]));
Chris@42 79 Th = LD(&(Rm[WS(rs, 13)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 80 Tk = LD(&(Rp[WS(rs, 6)]), ms, &(Rp[0]));
Chris@42 81 Tl = LD(&(Rm[WS(rs, 9)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 82 Tn = LD(&(Rp[WS(rs, 14)]), ms, &(Rp[0]));
Chris@42 83 To = LD(&(Rm[WS(rs, 1)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 84 Tb = VADD(T7, Ta);
Chris@42 85 T16 = VSUB(T7, Ta);
Chris@42 86 Tf = VFNMSCONJ(Te, Td);
Chris@42 87 T1G = VFMACONJ(Te, Td);
Chris@42 88 Ti = VFNMSCONJ(Th, Tg);
Chris@42 89 T1F = VFMACONJ(Th, Tg);
Chris@42 90 Tm = VFNMSCONJ(Tl, Tk);
Chris@42 91 T1J = VFMACONJ(Tl, Tk);
Chris@42 92 T1I = VFMACONJ(To, Tn);
Chris@42 93 Tp = VFMSCONJ(To, Tn);
Chris@42 94 T2m = VFMA(LDK(KP707106781), Tb, T4);
Chris@42 95 Tc = VFNMS(LDK(KP707106781), Tb, T4);
Chris@42 96 T3l = VSUB(T1i, T1j);
Chris@42 97 T1k = VADD(T1i, T1j);
Chris@42 98 T1H = VADD(T1F, T1G);
Chris@42 99 T3m = VSUB(T1F, T1G);
Chris@42 100 T18 = VFNMS(LDK(KP414213562), Tf, Ti);
Chris@42 101 Tj = VFMA(LDK(KP414213562), Ti, Tf);
Chris@42 102 T3y = VSUB(T1l, T1m);
Chris@42 103 T1n = VADD(T1l, T1m);
Chris@42 104 Tq = VFNMS(LDK(KP414213562), Tp, Tm);
Chris@42 105 T19 = VFMA(LDK(KP414213562), Tm, Tp);
Chris@42 106 T1K = VADD(T1I, T1J);
Chris@42 107 T3n = VSUB(T1I, T1J);
Chris@42 108 T17 = VFNMS(LDK(KP707106781), T16, T15);
Chris@42 109 T2x = VFMA(LDK(KP707106781), T16, T15);
Chris@42 110 T1a = VSUB(T18, T19);
Chris@42 111 T2n = VADD(T18, T19);
Chris@42 112 T2y = VADD(Tj, Tq);
Chris@42 113 Tr = VSUB(Tj, Tq);
Chris@42 114 T3z = VSUB(T3m, T3n);
Chris@42 115 T3o = VADD(T3m, T3n);
Chris@42 116 T2G = VADD(T1k, T1n);
Chris@42 117 T1o = VSUB(T1k, T1n);
Chris@42 118 T2o = VFNMS(LDK(KP923879532), T2n, T2m);
Chris@42 119 T2Y = VFMA(LDK(KP923879532), T2n, T2m);
Chris@42 120 T1b = VFNMS(LDK(KP923879532), T1a, T17);
Chris@42 121 T1V = VFMA(LDK(KP923879532), T1a, T17);
Chris@42 122 Ts = VFMA(LDK(KP923879532), Tr, Tc);
Chris@42 123 T1S = VFNMS(LDK(KP923879532), Tr, Tc);
Chris@42 124 T3A = VFMA(LDK(KP707106781), T3z, T3y);
Chris@42 125 T48 = VFNMS(LDK(KP707106781), T3z, T3y);
Chris@42 126 T3p = VFMA(LDK(KP707106781), T3o, T3l);
Chris@42 127 T45 = VFNMS(LDK(KP707106781), T3o, T3l);
Chris@42 128 T31 = VFMA(LDK(KP923879532), T2y, T2x);
Chris@42 129 T2z = VFNMS(LDK(KP923879532), T2y, T2x);
Chris@42 130 }
Chris@42 131 Tt = LD(&(Rp[WS(rs, 1)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 132 Tu = LD(&(Rm[WS(rs, 14)]), -ms, &(Rm[0]));
Chris@42 133 TE = LD(&(Rp[WS(rs, 9)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 134 TF = LD(&(Rm[WS(rs, 6)]), -ms, &(Rm[0]));
Chris@42 135 TK = LD(&(Rp[WS(rs, 15)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 136 TL = LD(&(Rm[0]), -ms, &(Rm[0]));
Chris@42 137 TV = LD(&(Rp[WS(rs, 7)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 138 T2H = VADD(T1H, T1K);
Chris@42 139 T1L = VSUB(T1H, T1K);
Chris@42 140 Tv = VFNMSCONJ(Tu, Tt);
Chris@42 141 T1p = VFMACONJ(Tu, Tt);
Chris@42 142 TG = VFNMSCONJ(TF, TE);
Chris@42 143 T1q = VFMACONJ(TF, TE);
Chris@42 144 T1w = VFMACONJ(TL, TK);
Chris@42 145 TM = VFMSCONJ(TL, TK);
Chris@42 146 TW = LD(&(Rm[WS(rs, 8)]), -ms, &(Rm[0]));
Chris@42 147 Tw = LD(&(Rp[WS(rs, 5)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 148 Tx = LD(&(Rm[WS(rs, 10)]), -ms, &(Rm[0]));
Chris@42 149 Tz = LD(&(Rp[WS(rs, 13)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 150 TA = LD(&(Rm[WS(rs, 2)]), -ms, &(Rm[0]));
Chris@42 151 T3q = VSUB(T1p, T1q);
Chris@42 152 T1r = VADD(T1p, T1q);
Chris@42 153 T1x = VFMACONJ(TW, TV);
Chris@42 154 TX = VFNMSCONJ(TW, TV);
Chris@42 155 TN = LD(&(Rp[WS(rs, 3)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 156 T1s = VFMACONJ(Tx, Tw);
Chris@42 157 Ty = VFNMSCONJ(Tx, Tw);
Chris@42 158 T1t = VFMACONJ(TA, Tz);
Chris@42 159 TB = VFMSCONJ(TA, Tz);
Chris@42 160 TO = LD(&(Rm[WS(rs, 12)]), -ms, &(Rm[0]));
Chris@42 161 TQ = LD(&(Rp[WS(rs, 11)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 162 T1y = VADD(T1w, T1x);
Chris@42 163 T3t = VSUB(T1w, T1x);
Chris@42 164 TR = LD(&(Rm[WS(rs, 4)]), -ms, &(Rm[0]));
Chris@42 165 {
Chris@42 166 V T38, T3f, T4p, T4v, T3T, T3Z, T2a, T2i, T4b, T4h, T1O, T20, T2M, T2U, T3F;
Chris@42 167 V T3L, T2g, T3X, T3J, T1g, T4f, T2S, T4l, T2E, T2X, T3O, T3b, T3i, T26, T4t;
Chris@42 168 V T43, T1Y, T3c, T30, T3d, T33;
Chris@42 169 {
Chris@42 170 V T2I, T2A, T2r, T1c, TJ, T2L, T2u, T2B, T10, T1d, T3x, T3E, T1E, T1N, T1h;
Chris@42 171 V T1Z, T4m, T1M, T1D, T4a, T4o, T4n, T47, T4u, T3R, T3S, T3Q, T3Y, T28, T29;
Chris@42 172 V T27, T2h, T44, T4g;
Chris@42 173 {
Chris@42 174 V T36, T1v, T2J, T3s, T3B, T2p, TI, T2q, TD, T1B, T3u, TY, TT, T35, T1u;
Chris@42 175 V T3r, TH, TC, T1z, TP, T1A, TS, T3w, T3D, T1C, T2K, T3v, T3C, T2s, TZ;
Chris@42 176 V T2t, TU, T37, T49, T46;
Chris@42 177 T2I = VSUB(T2G, T2H);
Chris@42 178 T36 = VADD(T2G, T2H);
Chris@42 179 T1u = VADD(T1s, T1t);
Chris@42 180 T3r = VSUB(T1s, T1t);
Chris@42 181 TH = VSUB(Ty, TB);
Chris@42 182 TC = VADD(Ty, TB);
Chris@42 183 T1z = VFMACONJ(TO, TN);
Chris@42 184 TP = VFNMSCONJ(TO, TN);
Chris@42 185 T1A = VFMACONJ(TR, TQ);
Chris@42 186 TS = VFMSCONJ(TR, TQ);
Chris@42 187 T1v = VSUB(T1r, T1u);
Chris@42 188 T2J = VADD(T1r, T1u);
Chris@42 189 T3s = VFNMS(LDK(KP414213562), T3r, T3q);
Chris@42 190 T3B = VFMA(LDK(KP414213562), T3q, T3r);
Chris@42 191 T2p = VFMA(LDK(KP707106781), TH, TG);
Chris@42 192 TI = VFNMS(LDK(KP707106781), TH, TG);
Chris@42 193 T2q = VFMA(LDK(KP707106781), TC, Tv);
Chris@42 194 TD = VFNMS(LDK(KP707106781), TC, Tv);
Chris@42 195 T1B = VADD(T1z, T1A);
Chris@42 196 T3u = VSUB(T1A, T1z);
Chris@42 197 TY = VSUB(TS, TP);
Chris@42 198 TT = VADD(TP, TS);
Chris@42 199 T35 = LDW(&(W[TWVL * 30]));
Chris@42 200 T4m = LDW(&(W[TWVL * 10]));
Chris@42 201 T2A = VFNMS(LDK(KP198912367), T2p, T2q);
Chris@42 202 T2r = VFMA(LDK(KP198912367), T2q, T2p);
Chris@42 203 T1c = VFNMS(LDK(KP668178637), TD, TI);
Chris@42 204 TJ = VFMA(LDK(KP668178637), TI, TD);
Chris@42 205 T1C = VSUB(T1y, T1B);
Chris@42 206 T2K = VADD(T1y, T1B);
Chris@42 207 T3v = VFNMS(LDK(KP414213562), T3u, T3t);
Chris@42 208 T3C = VFMA(LDK(KP414213562), T3t, T3u);
Chris@42 209 T2s = VFNMS(LDK(KP707106781), TY, TX);
Chris@42 210 TZ = VFMA(LDK(KP707106781), TY, TX);
Chris@42 211 T2t = VFMA(LDK(KP707106781), TT, TM);
Chris@42 212 TU = VFNMS(LDK(KP707106781), TT, TM);
Chris@42 213 T1M = VSUB(T1v, T1C);
Chris@42 214 T1D = VADD(T1v, T1C);
Chris@42 215 T37 = VADD(T2J, T2K);
Chris@42 216 T2L = VSUB(T2J, T2K);
Chris@42 217 T3w = VADD(T3s, T3v);
Chris@42 218 T49 = VSUB(T3s, T3v);
Chris@42 219 T3D = VSUB(T3B, T3C);
Chris@42 220 T46 = VADD(T3B, T3C);
Chris@42 221 T2u = VFNMS(LDK(KP198912367), T2t, T2s);
Chris@42 222 T2B = VFMA(LDK(KP198912367), T2s, T2t);
Chris@42 223 T10 = VFNMS(LDK(KP668178637), TZ, TU);
Chris@42 224 T1d = VFMA(LDK(KP668178637), TU, TZ);
Chris@42 225 T38 = VZMUL(T35, VSUB(T36, T37));
Chris@42 226 T3f = VADD(T36, T37);
Chris@42 227 T4a = VFMA(LDK(KP923879532), T49, T48);
Chris@42 228 T4o = VFNMS(LDK(KP923879532), T49, T48);
Chris@42 229 T4n = VFMA(LDK(KP923879532), T46, T45);
Chris@42 230 T47 = VFNMS(LDK(KP923879532), T46, T45);
Chris@42 231 T4u = LDW(&(W[TWVL * 50]));
Chris@42 232 T3R = VFMA(LDK(KP923879532), T3w, T3p);
Chris@42 233 T3x = VFNMS(LDK(KP923879532), T3w, T3p);
Chris@42 234 T3E = VFNMS(LDK(KP923879532), T3D, T3A);
Chris@42 235 T3S = VFMA(LDK(KP923879532), T3D, T3A);
Chris@42 236 T3Q = LDW(&(W[TWVL * 58]));
Chris@42 237 T3Y = LDW(&(W[TWVL * 2]));
Chris@42 238 }
Chris@42 239 T28 = VFMA(LDK(KP707106781), T1D, T1o);
Chris@42 240 T1E = VFNMS(LDK(KP707106781), T1D, T1o);
Chris@42 241 T1N = VFNMS(LDK(KP707106781), T1M, T1L);
Chris@42 242 T29 = VFMA(LDK(KP707106781), T1M, T1L);
Chris@42 243 T4p = VZMUL(T4m, VFNMSI(T4o, T4n));
Chris@42 244 T4v = VZMUL(T4u, VFMAI(T4o, T4n));
Chris@42 245 T27 = LDW(&(W[TWVL * 6]));
Chris@42 246 T2h = LDW(&(W[TWVL * 54]));
Chris@42 247 T3T = VZMUL(T3Q, VFNMSI(T3S, T3R));
Chris@42 248 T3Z = VZMUL(T3Y, VFMAI(T3S, T3R));
Chris@42 249 T44 = LDW(&(W[TWVL * 18]));
Chris@42 250 T4g = LDW(&(W[TWVL * 42]));
Chris@42 251 T2a = VZMUL(T27, VFMAI(T29, T28));
Chris@42 252 T2i = VZMUL(T2h, VFNMSI(T29, T28));
Chris@42 253 T1h = LDW(&(W[TWVL * 22]));
Chris@42 254 T1Z = LDW(&(W[TWVL * 38]));
Chris@42 255 T4b = VZMUL(T44, VFMAI(T4a, T47));
Chris@42 256 T4h = VZMUL(T4g, VFNMSI(T4a, T47));
Chris@42 257 {
Chris@42 258 V T1W, T1T, T1, T3W, T2d, T3I, T2e, T12, T2f, T1f, T2F, T2T, T3k, T3K, T11;
Chris@42 259 V T1e, T32, T2Z, T2l, T4k, T2P, T4e, T2Q, T2w, T2R, T2D, T2v, T2C, T1R, T4s;
Chris@42 260 V T23, T42, T24, T1U, T25, T1X;
Chris@42 261 T2F = LDW(&(W[TWVL * 46]));
Chris@42 262 T2T = LDW(&(W[TWVL * 14]));
Chris@42 263 T1O = VZMUL(T1h, VFNMSI(T1N, T1E));
Chris@42 264 T20 = VZMUL(T1Z, VFMAI(T1N, T1E));
Chris@42 265 T3k = LDW(&(W[TWVL * 26]));
Chris@42 266 T3K = LDW(&(W[TWVL * 34]));
Chris@42 267 T2M = VZMUL(T2F, VFNMSI(T2L, T2I));
Chris@42 268 T2U = VZMUL(T2T, VFMAI(T2L, T2I));
Chris@42 269 T11 = VADD(TJ, T10);
Chris@42 270 T1W = VSUB(TJ, T10);
Chris@42 271 T1T = VSUB(T1d, T1c);
Chris@42 272 T1e = VADD(T1c, T1d);
Chris@42 273 T1 = LDW(&(W[TWVL * 24]));
Chris@42 274 T3W = LDW(&(W[TWVL * 4]));
Chris@42 275 T3F = VZMUL(T3k, VFNMSI(T3E, T3x));
Chris@42 276 T3L = VZMUL(T3K, VFMAI(T3E, T3x));
Chris@42 277 T2d = LDW(&(W[TWVL * 56]));
Chris@42 278 T3I = LDW(&(W[TWVL * 36]));
Chris@42 279 T2e = VFMA(LDK(KP831469612), T11, Ts);
Chris@42 280 T12 = VFNMS(LDK(KP831469612), T11, Ts);
Chris@42 281 T2f = VFMA(LDK(KP831469612), T1e, T1b);
Chris@42 282 T1f = VFNMS(LDK(KP831469612), T1e, T1b);
Chris@42 283 T2v = VSUB(T2r, T2u);
Chris@42 284 T32 = VADD(T2r, T2u);
Chris@42 285 T2Z = VADD(T2A, T2B);
Chris@42 286 T2C = VSUB(T2A, T2B);
Chris@42 287 T2l = LDW(&(W[TWVL * 48]));
Chris@42 288 T4k = LDW(&(W[TWVL * 12]));
Chris@42 289 T2P = LDW(&(W[TWVL * 16]));
Chris@42 290 T4e = LDW(&(W[TWVL * 44]));
Chris@42 291 T2g = VZMULI(T2d, VFMAI(T2f, T2e));
Chris@42 292 T3X = VZMULI(T3W, VFNMSI(T2f, T2e));
Chris@42 293 T3J = VZMULI(T3I, VFNMSI(T1f, T12));
Chris@42 294 T1g = VZMULI(T1, VFMAI(T1f, T12));
Chris@42 295 T2Q = VFNMS(LDK(KP980785280), T2v, T2o);
Chris@42 296 T2w = VFMA(LDK(KP980785280), T2v, T2o);
Chris@42 297 T2R = VFMA(LDK(KP980785280), T2C, T2z);
Chris@42 298 T2D = VFNMS(LDK(KP980785280), T2C, T2z);
Chris@42 299 T1R = LDW(&(W[TWVL * 40]));
Chris@42 300 T4s = LDW(&(W[TWVL * 52]));
Chris@42 301 T23 = LDW(&(W[TWVL * 8]));
Chris@42 302 T42 = LDW(&(W[TWVL * 20]));
Chris@42 303 T4f = VZMULI(T4e, VFNMSI(T2R, T2Q));
Chris@42 304 T2S = VZMULI(T2P, VFMAI(T2R, T2Q));
Chris@42 305 T4l = VZMULI(T4k, VFNMSI(T2D, T2w));
Chris@42 306 T2E = VZMULI(T2l, VFMAI(T2D, T2w));
Chris@42 307 T24 = VFMA(LDK(KP831469612), T1T, T1S);
Chris@42 308 T1U = VFNMS(LDK(KP831469612), T1T, T1S);
Chris@42 309 T25 = VFMA(LDK(KP831469612), T1W, T1V);
Chris@42 310 T1X = VFNMS(LDK(KP831469612), T1W, T1V);
Chris@42 311 T2X = LDW(&(W[TWVL * 32]));
Chris@42 312 T3O = LDW(&(W[TWVL * 60]));
Chris@42 313 T3b = LDW(&(W[0]));
Chris@42 314 T3i = LDW(&(W[TWVL * 28]));
Chris@42 315 T26 = VZMULI(T23, VFMAI(T25, T24));
Chris@42 316 T4t = VZMULI(T4s, VFNMSI(T25, T24));
Chris@42 317 T43 = VZMULI(T42, VFNMSI(T1X, T1U));
Chris@42 318 T1Y = VZMULI(T1R, VFMAI(T1X, T1U));
Chris@42 319 T3c = VFMA(LDK(KP980785280), T2Z, T2Y);
Chris@42 320 T30 = VFNMS(LDK(KP980785280), T2Z, T2Y);
Chris@42 321 T3d = VFMA(LDK(KP980785280), T32, T31);
Chris@42 322 T33 = VFNMS(LDK(KP980785280), T32, T31);
Chris@42 323 }
Chris@42 324 }
Chris@42 325 {
Chris@42 326 V T3e, T3P, T3j, T34, T2c, T4j, T2k, T4d, T1P, T1Q, T4x, T4w, T2j, T4c, T21;
Chris@42 327 V T22, T4r, T4q, T2b, T4i, T3h, T3H, T2N, T2O, T41, T40, T3g, T3G, T2V, T2W;
Chris@42 328 V T3V, T3U, T39, T3M;
Chris@42 329 T1P = VADD(T1g, T1O);
Chris@42 330 T1Q = VCONJ(VSUB(T1O, T1g));
Chris@42 331 T4x = VCONJ(VSUB(T4v, T4t));
Chris@42 332 T4w = VADD(T4t, T4v);
Chris@42 333 T2j = VADD(T2g, T2i);
Chris@42 334 T2k = VCONJ(VSUB(T2i, T2g));
Chris@42 335 T4d = VCONJ(VSUB(T4b, T43));
Chris@42 336 T4c = VADD(T43, T4b);
Chris@42 337 T3e = VZMULI(T3b, VFMAI(T3d, T3c));
Chris@42 338 T3P = VZMULI(T3O, VFNMSI(T3d, T3c));
Chris@42 339 T3j = VZMULI(T3i, VFNMSI(T33, T30));
Chris@42 340 T34 = VZMULI(T2X, VFMAI(T33, T30));
Chris@42 341 ST(&(Rp[WS(rs, 6)]), T1P, ms, &(Rp[0]));
Chris@42 342 ST(&(Rp[WS(rs, 13)]), T4w, ms, &(Rp[WS(rs, 1)]));
Chris@42 343 ST(&(Rp[WS(rs, 14)]), T2j, ms, &(Rp[0]));
Chris@42 344 ST(&(Rp[WS(rs, 5)]), T4c, ms, &(Rp[WS(rs, 1)]));
Chris@42 345 ST(&(Rm[WS(rs, 13)]), T4x, -ms, &(Rm[WS(rs, 1)]));
Chris@42 346 ST(&(Rm[WS(rs, 6)]), T1Q, -ms, &(Rm[0]));
Chris@42 347 T21 = VADD(T1Y, T20);
Chris@42 348 T22 = VCONJ(VSUB(T20, T1Y));
Chris@42 349 T4r = VCONJ(VSUB(T4p, T4l));
Chris@42 350 T4q = VADD(T4l, T4p);
Chris@42 351 T2b = VADD(T26, T2a);
Chris@42 352 T2c = VCONJ(VSUB(T2a, T26));
Chris@42 353 T4j = VCONJ(VSUB(T4h, T4f));
Chris@42 354 T4i = VADD(T4f, T4h);
Chris@42 355 ST(&(Rm[WS(rs, 5)]), T4d, -ms, &(Rm[WS(rs, 1)]));
Chris@42 356 ST(&(Rm[WS(rs, 14)]), T2k, -ms, &(Rm[0]));
Chris@42 357 ST(&(Rp[WS(rs, 10)]), T21, ms, &(Rp[0]));
Chris@42 358 ST(&(Rp[WS(rs, 3)]), T4q, ms, &(Rp[WS(rs, 1)]));
Chris@42 359 ST(&(Rp[WS(rs, 2)]), T2b, ms, &(Rp[0]));
Chris@42 360 ST(&(Rp[WS(rs, 11)]), T4i, ms, &(Rp[WS(rs, 1)]));
Chris@42 361 ST(&(Rm[WS(rs, 3)]), T4r, -ms, &(Rm[WS(rs, 1)]));
Chris@42 362 ST(&(Rm[WS(rs, 10)]), T22, -ms, &(Rm[0]));
Chris@42 363 T2N = VADD(T2E, T2M);
Chris@42 364 T2O = VCONJ(VSUB(T2M, T2E));
Chris@42 365 T41 = VCONJ(VSUB(T3Z, T3X));
Chris@42 366 T40 = VADD(T3X, T3Z);
Chris@42 367 T3g = VADD(T3e, T3f);
Chris@42 368 T3h = VCONJ(VSUB(T3f, T3e));
Chris@42 369 T3H = VCONJ(VSUB(T3F, T3j));
Chris@42 370 T3G = VADD(T3j, T3F);
Chris@42 371 ST(&(Rm[WS(rs, 11)]), T4j, -ms, &(Rm[WS(rs, 1)]));
Chris@42 372 ST(&(Rm[WS(rs, 2)]), T2c, -ms, &(Rm[0]));
Chris@42 373 ST(&(Rp[WS(rs, 12)]), T2N, ms, &(Rp[0]));
Chris@42 374 ST(&(Rp[WS(rs, 1)]), T40, ms, &(Rp[WS(rs, 1)]));
Chris@42 375 ST(&(Rp[0]), T3g, ms, &(Rp[0]));
Chris@42 376 ST(&(Rp[WS(rs, 7)]), T3G, ms, &(Rp[WS(rs, 1)]));
Chris@42 377 ST(&(Rm[WS(rs, 1)]), T41, -ms, &(Rm[WS(rs, 1)]));
Chris@42 378 ST(&(Rm[WS(rs, 12)]), T2O, -ms, &(Rm[0]));
Chris@42 379 T2V = VADD(T2S, T2U);
Chris@42 380 T2W = VCONJ(VSUB(T2U, T2S));
Chris@42 381 T3V = VCONJ(VSUB(T3T, T3P));
Chris@42 382 T3U = VADD(T3P, T3T);
Chris@42 383 T39 = VADD(T34, T38);
Chris@42 384 T3a = VCONJ(VSUB(T38, T34));
Chris@42 385 T3N = VCONJ(VSUB(T3L, T3J));
Chris@42 386 T3M = VADD(T3J, T3L);
Chris@42 387 ST(&(Rm[WS(rs, 7)]), T3H, -ms, &(Rm[WS(rs, 1)]));
Chris@42 388 ST(&(Rm[0]), T3h, -ms, &(Rm[0]));
Chris@42 389 ST(&(Rp[WS(rs, 4)]), T2V, ms, &(Rp[0]));
Chris@42 390 ST(&(Rp[WS(rs, 15)]), T3U, ms, &(Rp[WS(rs, 1)]));
Chris@42 391 ST(&(Rp[WS(rs, 8)]), T39, ms, &(Rp[0]));
Chris@42 392 ST(&(Rp[WS(rs, 9)]), T3M, ms, &(Rp[WS(rs, 1)]));
Chris@42 393 ST(&(Rm[WS(rs, 15)]), T3V, -ms, &(Rm[WS(rs, 1)]));
Chris@42 394 ST(&(Rm[WS(rs, 4)]), T2W, -ms, &(Rm[0]));
Chris@42 395 }
Chris@42 396 }
Chris@42 397 }
Chris@42 398 ST(&(Rm[WS(rs, 9)]), T3N, -ms, &(Rm[WS(rs, 1)]));
Chris@42 399 ST(&(Rm[WS(rs, 8)]), T3a, -ms, &(Rm[0]));
Chris@42 400 }
Chris@42 401 }
Chris@42 402 VLEAVE();
Chris@42 403 }
Chris@42 404
Chris@42 405 static const tw_instr twinstr[] = {
Chris@42 406 VTW(1, 1),
Chris@42 407 VTW(1, 2),
Chris@42 408 VTW(1, 3),
Chris@42 409 VTW(1, 4),
Chris@42 410 VTW(1, 5),
Chris@42 411 VTW(1, 6),
Chris@42 412 VTW(1, 7),
Chris@42 413 VTW(1, 8),
Chris@42 414 VTW(1, 9),
Chris@42 415 VTW(1, 10),
Chris@42 416 VTW(1, 11),
Chris@42 417 VTW(1, 12),
Chris@42 418 VTW(1, 13),
Chris@42 419 VTW(1, 14),
Chris@42 420 VTW(1, 15),
Chris@42 421 VTW(1, 16),
Chris@42 422 VTW(1, 17),
Chris@42 423 VTW(1, 18),
Chris@42 424 VTW(1, 19),
Chris@42 425 VTW(1, 20),
Chris@42 426 VTW(1, 21),
Chris@42 427 VTW(1, 22),
Chris@42 428 VTW(1, 23),
Chris@42 429 VTW(1, 24),
Chris@42 430 VTW(1, 25),
Chris@42 431 VTW(1, 26),
Chris@42 432 VTW(1, 27),
Chris@42 433 VTW(1, 28),
Chris@42 434 VTW(1, 29),
Chris@42 435 VTW(1, 30),
Chris@42 436 VTW(1, 31),
Chris@42 437 {TW_NEXT, VL, 0}
Chris@42 438 };
Chris@42 439
Chris@42 440 static const hc2c_desc desc = { 32, XSIMD_STRING("hc2cbdftv_32"), twinstr, &GENUS, {119, 62, 130, 0} };
Chris@42 441
Chris@42 442 void XSIMD(codelet_hc2cbdftv_32) (planner *p) {
Chris@42 443 X(khc2c_register) (p, hc2cbdftv_32, &desc, HC2C_VIA_DFT);
Chris@42 444 }
Chris@42 445 #else /* HAVE_FMA */
Chris@42 446
Chris@42 447 /* Generated by: ../../../genfft/gen_hc2cdft_c.native -simd -compact -variables 4 -pipeline-latency 8 -trivial-stores -variables 32 -no-generate-bytw -n 32 -dif -sign 1 -name hc2cbdftv_32 -include hc2cbv.h */
Chris@42 448
Chris@42 449 /*
Chris@42 450 * This function contains 249 FP additions, 104 FP multiplications,
Chris@42 451 * (or, 233 additions, 88 multiplications, 16 fused multiply/add),
Chris@42 452 * 161 stack variables, 7 constants, and 64 memory accesses
Chris@42 453 */
Chris@42 454 #include "hc2cbv.h"
Chris@42 455
Chris@42 456 static void hc2cbdftv_32(R *Rp, R *Ip, R *Rm, R *Im, const R *W, stride rs, INT mb, INT me, INT ms)
Chris@42 457 {
Chris@42 458 DVK(KP195090322, +0.195090322016128267848284868477022240927691618);
Chris@42 459 DVK(KP980785280, +0.980785280403230449126182236134239036973933731);
Chris@42 460 DVK(KP555570233, +0.555570233019602224742830813948532874374937191);
Chris@42 461 DVK(KP831469612, +0.831469612302545237078788377617905756738560812);
Chris@42 462 DVK(KP923879532, +0.923879532511286756128183189396788286822416626);
Chris@42 463 DVK(KP382683432, +0.382683432365089771728459984030398866761344562);
Chris@42 464 DVK(KP707106781, +0.707106781186547524400844362104849039284835938);
Chris@42 465 {
Chris@42 466 INT m;
Chris@42 467 for (m = mb, W = W + ((mb - 1) * ((TWVL / VL) * 62)); m < me; m = m + VL, Rp = Rp + (VL * ms), Ip = Ip + (VL * ms), Rm = Rm - (VL * ms), Im = Im - (VL * ms), W = W + (TWVL * 62), MAKE_VOLATILE_STRIDE(128, rs)) {
Chris@42 468 V T1W, T21, Tf, T2c, T1t, T2r, T3T, T4m, Ty, T2q, T3P, T4n, T1n, T2d, T1T;
Chris@42 469 V T22, T1E, T24, T3I, T4p, TU, T2n, T1i, T2h, T1L, T25, T3L, T4q, T1f, T2o;
Chris@42 470 V T1j, T2k;
Chris@42 471 {
Chris@42 472 V T2, T4, T1Z, T1p, T1r, T20, T9, T1U, Td, T1V, T3, T1q, T6, T8, T7;
Chris@42 473 V Tc, Tb, Ta, T5, Te, T1o, T1s, T3R, T3S, Tj, T1N, Tw, T1Q, Tn, T1O;
Chris@42 474 V Ts, T1R, Tg, Ti, Th, Tv, Tu, Tt, Tk, Tm, Tl, Tp, Tr, Tq, To;
Chris@42 475 V Tx, T3N, T3O, T1l, T1m, T1P, T1S;
Chris@42 476 T2 = LD(&(Rp[0]), ms, &(Rp[0]));
Chris@42 477 T3 = LD(&(Rm[WS(rs, 15)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 478 T4 = VCONJ(T3);
Chris@42 479 T1Z = VADD(T2, T4);
Chris@42 480 T1p = LD(&(Rp[WS(rs, 8)]), ms, &(Rp[0]));
Chris@42 481 T1q = LD(&(Rm[WS(rs, 7)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 482 T1r = VCONJ(T1q);
Chris@42 483 T20 = VADD(T1p, T1r);
Chris@42 484 T6 = LD(&(Rp[WS(rs, 4)]), ms, &(Rp[0]));
Chris@42 485 T7 = LD(&(Rm[WS(rs, 11)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 486 T8 = VCONJ(T7);
Chris@42 487 T9 = VSUB(T6, T8);
Chris@42 488 T1U = VADD(T6, T8);
Chris@42 489 Tc = LD(&(Rp[WS(rs, 12)]), ms, &(Rp[0]));
Chris@42 490 Ta = LD(&(Rm[WS(rs, 3)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 491 Tb = VCONJ(Ta);
Chris@42 492 Td = VSUB(Tb, Tc);
Chris@42 493 T1V = VADD(Tb, Tc);
Chris@42 494 T1W = VSUB(T1U, T1V);
Chris@42 495 T21 = VSUB(T1Z, T20);
Chris@42 496 T5 = VSUB(T2, T4);
Chris@42 497 Te = VMUL(LDK(KP707106781), VADD(T9, Td));
Chris@42 498 Tf = VSUB(T5, Te);
Chris@42 499 T2c = VADD(T5, Te);
Chris@42 500 T1o = VMUL(LDK(KP707106781), VSUB(T9, Td));
Chris@42 501 T1s = VSUB(T1p, T1r);
Chris@42 502 T1t = VSUB(T1o, T1s);
Chris@42 503 T2r = VADD(T1s, T1o);
Chris@42 504 T3R = VADD(T1Z, T20);
Chris@42 505 T3S = VADD(T1U, T1V);
Chris@42 506 T3T = VSUB(T3R, T3S);
Chris@42 507 T4m = VADD(T3R, T3S);
Chris@42 508 Tg = LD(&(Rp[WS(rs, 2)]), ms, &(Rp[0]));
Chris@42 509 Th = LD(&(Rm[WS(rs, 13)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 510 Ti = VCONJ(Th);
Chris@42 511 Tj = VSUB(Tg, Ti);
Chris@42 512 T1N = VADD(Tg, Ti);
Chris@42 513 Tv = LD(&(Rp[WS(rs, 14)]), ms, &(Rp[0]));
Chris@42 514 Tt = LD(&(Rm[WS(rs, 1)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 515 Tu = VCONJ(Tt);
Chris@42 516 Tw = VSUB(Tu, Tv);
Chris@42 517 T1Q = VADD(Tu, Tv);
Chris@42 518 Tk = LD(&(Rp[WS(rs, 10)]), ms, &(Rp[0]));
Chris@42 519 Tl = LD(&(Rm[WS(rs, 5)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 520 Tm = VCONJ(Tl);
Chris@42 521 Tn = VSUB(Tk, Tm);
Chris@42 522 T1O = VADD(Tk, Tm);
Chris@42 523 Tp = LD(&(Rp[WS(rs, 6)]), ms, &(Rp[0]));
Chris@42 524 Tq = LD(&(Rm[WS(rs, 9)]), -ms, &(Rm[WS(rs, 1)]));
Chris@42 525 Tr = VCONJ(Tq);
Chris@42 526 Ts = VSUB(Tp, Tr);
Chris@42 527 T1R = VADD(Tp, Tr);
Chris@42 528 To = VFMA(LDK(KP382683432), Tj, VMUL(LDK(KP923879532), Tn));
Chris@42 529 Tx = VFNMS(LDK(KP382683432), Tw, VMUL(LDK(KP923879532), Ts));
Chris@42 530 Ty = VSUB(To, Tx);
Chris@42 531 T2q = VADD(To, Tx);
Chris@42 532 T3N = VADD(T1N, T1O);
Chris@42 533 T3O = VADD(T1Q, T1R);
Chris@42 534 T3P = VSUB(T3N, T3O);
Chris@42 535 T4n = VADD(T3N, T3O);
Chris@42 536 T1l = VFNMS(LDK(KP382683432), Tn, VMUL(LDK(KP923879532), Tj));
Chris@42 537 T1m = VFMA(LDK(KP923879532), Tw, VMUL(LDK(KP382683432), Ts));
Chris@42 538 T1n = VSUB(T1l, T1m);
Chris@42 539 T2d = VADD(T1l, T1m);
Chris@42 540 T1P = VSUB(T1N, T1O);
Chris@42 541 T1S = VSUB(T1Q, T1R);
Chris@42 542 T1T = VMUL(LDK(KP707106781), VSUB(T1P, T1S));
Chris@42 543 T22 = VMUL(LDK(KP707106781), VADD(T1P, T1S));
Chris@42 544 }
Chris@42 545 {
Chris@42 546 V TD, T1B, TR, T1y, TH, T1C, TM, T1z, TA, TC, TB, TO, TQ, TP, TG;
Chris@42 547 V TF, TE, TJ, TL, TK, T1A, T1D, T3G, T3H, TN, T2f, TT, T2g, TI, TS;
Chris@42 548 V TY, T1I, T1c, T1F, T12, T1J, T17, T1G, TV, TX, TW, T1b, T1a, T19, T11;
Chris@42 549 V T10, TZ, T14, T16, T15, T1H, T1K, T3J, T3K, T18, T2i, T1e, T2j, T13, T1d;
Chris@42 550 TA = LD(&(Rp[WS(rs, 5)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 551 TB = LD(&(Rm[WS(rs, 10)]), -ms, &(Rm[0]));
Chris@42 552 TC = VCONJ(TB);
Chris@42 553 TD = VSUB(TA, TC);
Chris@42 554 T1B = VADD(TA, TC);
Chris@42 555 TO = LD(&(Rp[WS(rs, 1)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 556 TP = LD(&(Rm[WS(rs, 14)]), -ms, &(Rm[0]));
Chris@42 557 TQ = VCONJ(TP);
Chris@42 558 TR = VSUB(TO, TQ);
Chris@42 559 T1y = VADD(TO, TQ);
Chris@42 560 TG = LD(&(Rp[WS(rs, 13)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 561 TE = LD(&(Rm[WS(rs, 2)]), -ms, &(Rm[0]));
Chris@42 562 TF = VCONJ(TE);
Chris@42 563 TH = VSUB(TF, TG);
Chris@42 564 T1C = VADD(TF, TG);
Chris@42 565 TJ = LD(&(Rp[WS(rs, 9)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 566 TK = LD(&(Rm[WS(rs, 6)]), -ms, &(Rm[0]));
Chris@42 567 TL = VCONJ(TK);
Chris@42 568 TM = VSUB(TJ, TL);
Chris@42 569 T1z = VADD(TJ, TL);
Chris@42 570 T1A = VSUB(T1y, T1z);
Chris@42 571 T1D = VSUB(T1B, T1C);
Chris@42 572 T1E = VFNMS(LDK(KP382683432), T1D, VMUL(LDK(KP923879532), T1A));
Chris@42 573 T24 = VFMA(LDK(KP382683432), T1A, VMUL(LDK(KP923879532), T1D));
Chris@42 574 T3G = VADD(T1y, T1z);
Chris@42 575 T3H = VADD(T1B, T1C);
Chris@42 576 T3I = VSUB(T3G, T3H);
Chris@42 577 T4p = VADD(T3G, T3H);
Chris@42 578 TI = VMUL(LDK(KP707106781), VSUB(TD, TH));
Chris@42 579 TN = VSUB(TI, TM);
Chris@42 580 T2f = VADD(TM, TI);
Chris@42 581 TS = VMUL(LDK(KP707106781), VADD(TD, TH));
Chris@42 582 TT = VSUB(TR, TS);
Chris@42 583 T2g = VADD(TR, TS);
Chris@42 584 TU = VFMA(LDK(KP831469612), TN, VMUL(LDK(KP555570233), TT));
Chris@42 585 T2n = VFNMS(LDK(KP195090322), T2f, VMUL(LDK(KP980785280), T2g));
Chris@42 586 T1i = VFNMS(LDK(KP555570233), TN, VMUL(LDK(KP831469612), TT));
Chris@42 587 T2h = VFMA(LDK(KP980785280), T2f, VMUL(LDK(KP195090322), T2g));
Chris@42 588 TV = LD(&(Rp[WS(rs, 3)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 589 TW = LD(&(Rm[WS(rs, 12)]), -ms, &(Rm[0]));
Chris@42 590 TX = VCONJ(TW);
Chris@42 591 TY = VSUB(TV, TX);
Chris@42 592 T1I = VADD(TV, TX);
Chris@42 593 T1b = LD(&(Rp[WS(rs, 15)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 594 T19 = LD(&(Rm[0]), -ms, &(Rm[0]));
Chris@42 595 T1a = VCONJ(T19);
Chris@42 596 T1c = VSUB(T1a, T1b);
Chris@42 597 T1F = VADD(T1a, T1b);
Chris@42 598 T11 = LD(&(Rp[WS(rs, 11)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 599 TZ = LD(&(Rm[WS(rs, 4)]), -ms, &(Rm[0]));
Chris@42 600 T10 = VCONJ(TZ);
Chris@42 601 T12 = VSUB(T10, T11);
Chris@42 602 T1J = VADD(T10, T11);
Chris@42 603 T14 = LD(&(Rp[WS(rs, 7)]), ms, &(Rp[WS(rs, 1)]));
Chris@42 604 T15 = LD(&(Rm[WS(rs, 8)]), -ms, &(Rm[0]));
Chris@42 605 T16 = VCONJ(T15);
Chris@42 606 T17 = VSUB(T14, T16);
Chris@42 607 T1G = VADD(T14, T16);
Chris@42 608 T1H = VSUB(T1F, T1G);
Chris@42 609 T1K = VSUB(T1I, T1J);
Chris@42 610 T1L = VFMA(LDK(KP923879532), T1H, VMUL(LDK(KP382683432), T1K));
Chris@42 611 T25 = VFNMS(LDK(KP382683432), T1H, VMUL(LDK(KP923879532), T1K));
Chris@42 612 T3J = VADD(T1F, T1G);
Chris@42 613 T3K = VADD(T1I, T1J);
Chris@42 614 T3L = VSUB(T3J, T3K);
Chris@42 615 T4q = VADD(T3J, T3K);
Chris@42 616 T13 = VMUL(LDK(KP707106781), VSUB(TY, T12));
Chris@42 617 T18 = VSUB(T13, T17);
Chris@42 618 T2i = VADD(T17, T13);
Chris@42 619 T1d = VMUL(LDK(KP707106781), VADD(TY, T12));
Chris@42 620 T1e = VSUB(T1c, T1d);
Chris@42 621 T2j = VADD(T1c, T1d);
Chris@42 622 T1f = VFNMS(LDK(KP555570233), T1e, VMUL(LDK(KP831469612), T18));
Chris@42 623 T2o = VFMA(LDK(KP195090322), T2i, VMUL(LDK(KP980785280), T2j));
Chris@42 624 T1j = VFMA(LDK(KP555570233), T18, VMUL(LDK(KP831469612), T1e));
Chris@42 625 T2k = VFNMS(LDK(KP195090322), T2j, VMUL(LDK(KP980785280), T2i));
Chris@42 626 }
Chris@42 627 {
Chris@42 628 V T4L, T4G, T4s, T4y, T3W, T4g, T42, T4a, T3g, T4e, T3o, T3E, T1w, T46, T2M;
Chris@42 629 V T40, T2u, T4w, T2C, T4k, T36, T3A, T3i, T3s, T28, T2O, T2w, T2G, T2Y, T4K;
Chris@42 630 V T3y, T4C;
Chris@42 631 {
Chris@42 632 V T4E, T4F, T4D, T4o, T4r, T4l, T4x, T3Q, T48, T3V, T49, T3M, T3U, T3F, T4f;
Chris@42 633 V T41, T47, T3c, T3n, T3f, T3m, T3a, T3b, T3d, T3e, T39, T4d, T3l, T3D, T1h;
Chris@42 634 V T2K, T1v, T2L, Tz, T1g, T1k, T1u, T1, T45, T2J, T3Z, T2m, T2A, T2t, T2B;
Chris@42 635 V T2e, T2l, T2p, T2s, T2b, T4v, T2z, T4j;
Chris@42 636 T4E = VADD(T4m, T4n);
Chris@42 637 T4F = VADD(T4p, T4q);
Chris@42 638 T4L = VADD(T4E, T4F);
Chris@42 639 T4D = LDW(&(W[TWVL * 30]));
Chris@42 640 T4G = VZMUL(T4D, VSUB(T4E, T4F));
Chris@42 641 T4o = VSUB(T4m, T4n);
Chris@42 642 T4r = VBYI(VSUB(T4p, T4q));
Chris@42 643 T4l = LDW(&(W[TWVL * 46]));
Chris@42 644 T4s = VZMUL(T4l, VSUB(T4o, T4r));
Chris@42 645 T4x = LDW(&(W[TWVL * 14]));
Chris@42 646 T4y = VZMUL(T4x, VADD(T4o, T4r));
Chris@42 647 T3M = VMUL(LDK(KP707106781), VSUB(T3I, T3L));
Chris@42 648 T3Q = VBYI(VSUB(T3M, T3P));
Chris@42 649 T48 = VBYI(VADD(T3P, T3M));
Chris@42 650 T3U = VMUL(LDK(KP707106781), VADD(T3I, T3L));
Chris@42 651 T3V = VSUB(T3T, T3U);
Chris@42 652 T49 = VADD(T3T, T3U);
Chris@42 653 T3F = LDW(&(W[TWVL * 22]));
Chris@42 654 T3W = VZMUL(T3F, VADD(T3Q, T3V));
Chris@42 655 T4f = LDW(&(W[TWVL * 54]));
Chris@42 656 T4g = VZMUL(T4f, VSUB(T49, T48));
Chris@42 657 T41 = LDW(&(W[TWVL * 38]));
Chris@42 658 T42 = VZMUL(T41, VSUB(T3V, T3Q));
Chris@42 659 T47 = LDW(&(W[TWVL * 6]));
Chris@42 660 T4a = VZMUL(T47, VADD(T48, T49));
Chris@42 661 T3a = VADD(T1t, T1n);
Chris@42 662 T3b = VADD(TU, T1f);
Chris@42 663 T3c = VBYI(VADD(T3a, T3b));
Chris@42 664 T3n = VBYI(VSUB(T3b, T3a));
Chris@42 665 T3d = VADD(Tf, Ty);
Chris@42 666 T3e = VADD(T1i, T1j);
Chris@42 667 T3f = VADD(T3d, T3e);
Chris@42 668 T3m = VSUB(T3d, T3e);
Chris@42 669 T39 = LDW(&(W[TWVL * 4]));
Chris@42 670 T3g = VZMULI(T39, VADD(T3c, T3f));
Chris@42 671 T4d = LDW(&(W[TWVL * 56]));
Chris@42 672 T4e = VZMULI(T4d, VSUB(T3f, T3c));
Chris@42 673 T3l = LDW(&(W[TWVL * 36]));
Chris@42 674 T3o = VZMULI(T3l, VSUB(T3m, T3n));
Chris@42 675 T3D = LDW(&(W[TWVL * 24]));
Chris@42 676 T3E = VZMULI(T3D, VADD(T3n, T3m));
Chris@42 677 Tz = VSUB(Tf, Ty);
Chris@42 678 T1g = VSUB(TU, T1f);
Chris@42 679 T1h = VSUB(Tz, T1g);
Chris@42 680 T2K = VADD(Tz, T1g);
Chris@42 681 T1k = VSUB(T1i, T1j);
Chris@42 682 T1u = VSUB(T1n, T1t);
Chris@42 683 T1v = VBYI(VSUB(T1k, T1u));
Chris@42 684 T2L = VBYI(VADD(T1u, T1k));
Chris@42 685 T1 = LDW(&(W[TWVL * 20]));
Chris@42 686 T1w = VZMULI(T1, VADD(T1h, T1v));
Chris@42 687 T45 = LDW(&(W[TWVL * 8]));
Chris@42 688 T46 = VZMULI(T45, VADD(T2K, T2L));
Chris@42 689 T2J = LDW(&(W[TWVL * 52]));
Chris@42 690 T2M = VZMULI(T2J, VSUB(T2K, T2L));
Chris@42 691 T3Z = LDW(&(W[TWVL * 40]));
Chris@42 692 T40 = VZMULI(T3Z, VSUB(T1h, T1v));
Chris@42 693 T2e = VSUB(T2c, T2d);
Chris@42 694 T2l = VSUB(T2h, T2k);
Chris@42 695 T2m = VSUB(T2e, T2l);
Chris@42 696 T2A = VADD(T2e, T2l);
Chris@42 697 T2p = VSUB(T2n, T2o);
Chris@42 698 T2s = VSUB(T2q, T2r);
Chris@42 699 T2t = VBYI(VSUB(T2p, T2s));
Chris@42 700 T2B = VBYI(VADD(T2s, T2p));
Chris@42 701 T2b = LDW(&(W[TWVL * 44]));
Chris@42 702 T2u = VZMULI(T2b, VSUB(T2m, T2t));
Chris@42 703 T4v = LDW(&(W[TWVL * 16]));
Chris@42 704 T4w = VZMULI(T4v, VADD(T2m, T2t));
Chris@42 705 T2z = LDW(&(W[TWVL * 12]));
Chris@42 706 T2C = VZMULI(T2z, VADD(T2A, T2B));
Chris@42 707 T4j = LDW(&(W[TWVL * 48]));
Chris@42 708 T4k = VZMULI(T4j, VSUB(T2A, T2B));
Chris@42 709 {
Chris@42 710 V T32, T3q, T35, T3r, T30, T31, T33, T34, T2Z, T3z, T3h, T3p, T1Y, T2E, T27;
Chris@42 711 V T2F, T1M, T1X, T23, T26, T1x, T2N, T2v, T2D, T2U, T3x, T2X, T3w, T2S, T2T;
Chris@42 712 V T2V, T2W, T2R, T4J, T3v, T4B;
Chris@42 713 T30 = VADD(T21, T22);
Chris@42 714 T31 = VADD(T1E, T1L);
Chris@42 715 T32 = VADD(T30, T31);
Chris@42 716 T3q = VSUB(T30, T31);
Chris@42 717 T33 = VADD(T1W, T1T);
Chris@42 718 T34 = VADD(T24, T25);
Chris@42 719 T35 = VBYI(VADD(T33, T34));
Chris@42 720 T3r = VBYI(VSUB(T34, T33));
Chris@42 721 T2Z = LDW(&(W[TWVL * 58]));
Chris@42 722 T36 = VZMUL(T2Z, VSUB(T32, T35));
Chris@42 723 T3z = LDW(&(W[TWVL * 26]));
Chris@42 724 T3A = VZMUL(T3z, VADD(T3q, T3r));
Chris@42 725 T3h = LDW(&(W[TWVL * 2]));
Chris@42 726 T3i = VZMUL(T3h, VADD(T32, T35));
Chris@42 727 T3p = LDW(&(W[TWVL * 34]));
Chris@42 728 T3s = VZMUL(T3p, VSUB(T3q, T3r));
Chris@42 729 T1M = VSUB(T1E, T1L);
Chris@42 730 T1X = VSUB(T1T, T1W);
Chris@42 731 T1Y = VBYI(VSUB(T1M, T1X));
Chris@42 732 T2E = VBYI(VADD(T1X, T1M));
Chris@42 733 T23 = VSUB(T21, T22);
Chris@42 734 T26 = VSUB(T24, T25);
Chris@42 735 T27 = VSUB(T23, T26);
Chris@42 736 T2F = VADD(T23, T26);
Chris@42 737 T1x = LDW(&(W[TWVL * 18]));
Chris@42 738 T28 = VZMUL(T1x, VADD(T1Y, T27));
Chris@42 739 T2N = LDW(&(W[TWVL * 50]));
Chris@42 740 T2O = VZMUL(T2N, VSUB(T2F, T2E));
Chris@42 741 T2v = LDW(&(W[TWVL * 42]));
Chris@42 742 T2w = VZMUL(T2v, VSUB(T27, T1Y));
Chris@42 743 T2D = LDW(&(W[TWVL * 10]));
Chris@42 744 T2G = VZMUL(T2D, VADD(T2E, T2F));
Chris@42 745 T2S = VADD(T2c, T2d);
Chris@42 746 T2T = VADD(T2n, T2o);
Chris@42 747 T2U = VADD(T2S, T2T);
Chris@42 748 T3x = VSUB(T2S, T2T);
Chris@42 749 T2V = VADD(T2r, T2q);
Chris@42 750 T2W = VADD(T2h, T2k);
Chris@42 751 T2X = VBYI(VADD(T2V, T2W));
Chris@42 752 T3w = VBYI(VSUB(T2W, T2V));
Chris@42 753 T2R = LDW(&(W[TWVL * 60]));
Chris@42 754 T2Y = VZMULI(T2R, VSUB(T2U, T2X));
Chris@42 755 T4J = LDW(&(W[0]));
Chris@42 756 T4K = VZMULI(T4J, VADD(T2X, T2U));
Chris@42 757 T3v = LDW(&(W[TWVL * 28]));
Chris@42 758 T3y = VZMULI(T3v, VADD(T3w, T3x));
Chris@42 759 T4B = LDW(&(W[TWVL * 32]));
Chris@42 760 T4C = VZMULI(T4B, VSUB(T3x, T3w));
Chris@42 761 }
Chris@42 762 }
Chris@42 763 {
Chris@42 764 V T29, T4M, T2P, T4t, T4N, T2a, T4u, T2Q, T2x, T4H, T2H, T4z, T4I, T2y, T4A;
Chris@42 765 V T2I, T37, T4h, T3B, T3X, T4i, T38, T3Y, T3C, T3j, T4b, T3t, T43, T4c, T3k;
Chris@42 766 V T44, T3u;
Chris@42 767 T29 = VADD(T1w, T28);
Chris@42 768 ST(&(Rp[WS(rs, 5)]), T29, ms, &(Rp[WS(rs, 1)]));
Chris@42 769 T4M = VADD(T4K, T4L);
Chris@42 770 ST(&(Rp[0]), T4M, ms, &(Rp[0]));
Chris@42 771 T2P = VADD(T2M, T2O);
Chris@42 772 ST(&(Rp[WS(rs, 13)]), T2P, ms, &(Rp[WS(rs, 1)]));
Chris@42 773 T4t = VADD(T4k, T4s);
Chris@42 774 ST(&(Rp[WS(rs, 12)]), T4t, ms, &(Rp[0]));
Chris@42 775 T4N = VCONJ(VSUB(T4L, T4K));
Chris@42 776 ST(&(Rm[0]), T4N, -ms, &(Rm[0]));
Chris@42 777 T2a = VCONJ(VSUB(T28, T1w));
Chris@42 778 ST(&(Rm[WS(rs, 5)]), T2a, -ms, &(Rm[WS(rs, 1)]));
Chris@42 779 T4u = VCONJ(VSUB(T4s, T4k));
Chris@42 780 ST(&(Rm[WS(rs, 12)]), T4u, -ms, &(Rm[0]));
Chris@42 781 T2Q = VCONJ(VSUB(T2O, T2M));
Chris@42 782 ST(&(Rm[WS(rs, 13)]), T2Q, -ms, &(Rm[WS(rs, 1)]));
Chris@42 783 T2x = VADD(T2u, T2w);
Chris@42 784 ST(&(Rp[WS(rs, 11)]), T2x, ms, &(Rp[WS(rs, 1)]));
Chris@42 785 T4H = VADD(T4C, T4G);
Chris@42 786 ST(&(Rp[WS(rs, 8)]), T4H, ms, &(Rp[0]));
Chris@42 787 T2H = VADD(T2C, T2G);
Chris@42 788 ST(&(Rp[WS(rs, 3)]), T2H, ms, &(Rp[WS(rs, 1)]));
Chris@42 789 T4z = VADD(T4w, T4y);
Chris@42 790 ST(&(Rp[WS(rs, 4)]), T4z, ms, &(Rp[0]));
Chris@42 791 T4I = VCONJ(VSUB(T4G, T4C));
Chris@42 792 ST(&(Rm[WS(rs, 8)]), T4I, -ms, &(Rm[0]));
Chris@42 793 T2y = VCONJ(VSUB(T2w, T2u));
Chris@42 794 ST(&(Rm[WS(rs, 11)]), T2y, -ms, &(Rm[WS(rs, 1)]));
Chris@42 795 T4A = VCONJ(VSUB(T4y, T4w));
Chris@42 796 ST(&(Rm[WS(rs, 4)]), T4A, -ms, &(Rm[0]));
Chris@42 797 T2I = VCONJ(VSUB(T2G, T2C));
Chris@42 798 ST(&(Rm[WS(rs, 3)]), T2I, -ms, &(Rm[WS(rs, 1)]));
Chris@42 799 T37 = VADD(T2Y, T36);
Chris@42 800 ST(&(Rp[WS(rs, 15)]), T37, ms, &(Rp[WS(rs, 1)]));
Chris@42 801 T4h = VADD(T4e, T4g);
Chris@42 802 ST(&(Rp[WS(rs, 14)]), T4h, ms, &(Rp[0]));
Chris@42 803 T3B = VADD(T3y, T3A);
Chris@42 804 ST(&(Rp[WS(rs, 7)]), T3B, ms, &(Rp[WS(rs, 1)]));
Chris@42 805 T3X = VADD(T3E, T3W);
Chris@42 806 ST(&(Rp[WS(rs, 6)]), T3X, ms, &(Rp[0]));
Chris@42 807 T4i = VCONJ(VSUB(T4g, T4e));
Chris@42 808 ST(&(Rm[WS(rs, 14)]), T4i, -ms, &(Rm[0]));
Chris@42 809 T38 = VCONJ(VSUB(T36, T2Y));
Chris@42 810 ST(&(Rm[WS(rs, 15)]), T38, -ms, &(Rm[WS(rs, 1)]));
Chris@42 811 T3Y = VCONJ(VSUB(T3W, T3E));
Chris@42 812 ST(&(Rm[WS(rs, 6)]), T3Y, -ms, &(Rm[0]));
Chris@42 813 T3C = VCONJ(VSUB(T3A, T3y));
Chris@42 814 ST(&(Rm[WS(rs, 7)]), T3C, -ms, &(Rm[WS(rs, 1)]));
Chris@42 815 T3j = VADD(T3g, T3i);
Chris@42 816 ST(&(Rp[WS(rs, 1)]), T3j, ms, &(Rp[WS(rs, 1)]));
Chris@42 817 T4b = VADD(T46, T4a);
Chris@42 818 ST(&(Rp[WS(rs, 2)]), T4b, ms, &(Rp[0]));
Chris@42 819 T3t = VADD(T3o, T3s);
Chris@42 820 ST(&(Rp[WS(rs, 9)]), T3t, ms, &(Rp[WS(rs, 1)]));
Chris@42 821 T43 = VADD(T40, T42);
Chris@42 822 ST(&(Rp[WS(rs, 10)]), T43, ms, &(Rp[0]));
Chris@42 823 T4c = VCONJ(VSUB(T4a, T46));
Chris@42 824 ST(&(Rm[WS(rs, 2)]), T4c, -ms, &(Rm[0]));
Chris@42 825 T3k = VCONJ(VSUB(T3i, T3g));
Chris@42 826 ST(&(Rm[WS(rs, 1)]), T3k, -ms, &(Rm[WS(rs, 1)]));
Chris@42 827 T44 = VCONJ(VSUB(T42, T40));
Chris@42 828 ST(&(Rm[WS(rs, 10)]), T44, -ms, &(Rm[0]));
Chris@42 829 T3u = VCONJ(VSUB(T3s, T3o));
Chris@42 830 ST(&(Rm[WS(rs, 9)]), T3u, -ms, &(Rm[WS(rs, 1)]));
Chris@42 831 }
Chris@42 832 }
Chris@42 833 }
Chris@42 834 }
Chris@42 835 VLEAVE();
Chris@42 836 }
Chris@42 837
Chris@42 838 static const tw_instr twinstr[] = {
Chris@42 839 VTW(1, 1),
Chris@42 840 VTW(1, 2),
Chris@42 841 VTW(1, 3),
Chris@42 842 VTW(1, 4),
Chris@42 843 VTW(1, 5),
Chris@42 844 VTW(1, 6),
Chris@42 845 VTW(1, 7),
Chris@42 846 VTW(1, 8),
Chris@42 847 VTW(1, 9),
Chris@42 848 VTW(1, 10),
Chris@42 849 VTW(1, 11),
Chris@42 850 VTW(1, 12),
Chris@42 851 VTW(1, 13),
Chris@42 852 VTW(1, 14),
Chris@42 853 VTW(1, 15),
Chris@42 854 VTW(1, 16),
Chris@42 855 VTW(1, 17),
Chris@42 856 VTW(1, 18),
Chris@42 857 VTW(1, 19),
Chris@42 858 VTW(1, 20),
Chris@42 859 VTW(1, 21),
Chris@42 860 VTW(1, 22),
Chris@42 861 VTW(1, 23),
Chris@42 862 VTW(1, 24),
Chris@42 863 VTW(1, 25),
Chris@42 864 VTW(1, 26),
Chris@42 865 VTW(1, 27),
Chris@42 866 VTW(1, 28),
Chris@42 867 VTW(1, 29),
Chris@42 868 VTW(1, 30),
Chris@42 869 VTW(1, 31),
Chris@42 870 {TW_NEXT, VL, 0}
Chris@42 871 };
Chris@42 872
Chris@42 873 static const hc2c_desc desc = { 32, XSIMD_STRING("hc2cbdftv_32"), twinstr, &GENUS, {233, 88, 16, 0} };
Chris@42 874
Chris@42 875 void XSIMD(codelet_hc2cbdftv_32) (planner *p) {
Chris@42 876 X(khc2c_register) (p, hc2cbdftv_32, &desc, HC2C_VIA_DFT);
Chris@42 877 }
Chris@42 878 #endif /* HAVE_FMA */