annotate src/fftw-3.3.3/dft/simd/common/t3bv_10.c @ 23:619f715526df sv_v2.1

Update Vamp plugin SDK to 2.5
author Chris Cannam
date Thu, 09 May 2013 10:52:46 +0100
parents 37bf6b4a2645
children
rev   line source
Chris@10 1 /*
Chris@10 2 * Copyright (c) 2003, 2007-11 Matteo Frigo
Chris@10 3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
Chris@10 4 *
Chris@10 5 * This program is free software; you can redistribute it and/or modify
Chris@10 6 * it under the terms of the GNU General Public License as published by
Chris@10 7 * the Free Software Foundation; either version 2 of the License, or
Chris@10 8 * (at your option) any later version.
Chris@10 9 *
Chris@10 10 * This program is distributed in the hope that it will be useful,
Chris@10 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@10 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@10 13 * GNU General Public License for more details.
Chris@10 14 *
Chris@10 15 * You should have received a copy of the GNU General Public License
Chris@10 16 * along with this program; if not, write to the Free Software
Chris@10 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@10 18 *
Chris@10 19 */
Chris@10 20
Chris@10 21 /* This file was automatically generated --- DO NOT EDIT */
Chris@10 22 /* Generated on Sun Nov 25 07:39:22 EST 2012 */
Chris@10 23
Chris@10 24 #include "codelet-dft.h"
Chris@10 25
Chris@10 26 #ifdef HAVE_FMA
Chris@10 27
Chris@10 28 /* Generated by: ../../../genfft/gen_twiddle_c.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -twiddle-log3 -precompute-twiddles -no-generate-bytw -n 10 -name t3bv_10 -include t3b.h -sign 1 */
Chris@10 29
Chris@10 30 /*
Chris@10 31 * This function contains 57 FP additions, 52 FP multiplications,
Chris@10 32 * (or, 39 additions, 34 multiplications, 18 fused multiply/add),
Chris@10 33 * 57 stack variables, 4 constants, and 20 memory accesses
Chris@10 34 */
Chris@10 35 #include "t3b.h"
Chris@10 36
Chris@10 37 static void t3bv_10(R *ri, R *ii, const R *W, stride rs, INT mb, INT me, INT ms)
Chris@10 38 {
Chris@10 39 DVK(KP559016994, +0.559016994374947424102293417182819058860154590);
Chris@10 40 DVK(KP250000000, +0.250000000000000000000000000000000000000000000);
Chris@10 41 DVK(KP618033988, +0.618033988749894848204586834365638117720309180);
Chris@10 42 DVK(KP951056516, +0.951056516295153572116439333379382143405698634);
Chris@10 43 {
Chris@10 44 INT m;
Chris@10 45 R *x;
Chris@10 46 x = ii;
Chris@10 47 for (m = mb, W = W + (mb * ((TWVL / VL) * 6)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 6), MAKE_VOLATILE_STRIDE(10, rs)) {
Chris@10 48 V T1, T7, Th, Tx, Tr, Td, Tp, T6, Tv, Tc, Te, Ti, Tl, T2, T3;
Chris@10 49 V T5;
Chris@10 50 T2 = LDW(&(W[0]));
Chris@10 51 T3 = LDW(&(W[TWVL * 2]));
Chris@10 52 T5 = LDW(&(W[TWVL * 4]));
Chris@10 53 T1 = LD(&(x[0]), ms, &(x[0]));
Chris@10 54 T7 = LD(&(x[WS(rs, 5)]), ms, &(x[WS(rs, 1)]));
Chris@10 55 {
Chris@10 56 V To, Tw, Tq, Tu, Ta, T4, Tt, Tk, Tb;
Chris@10 57 To = LD(&(x[WS(rs, 4)]), ms, &(x[0]));
Chris@10 58 Tw = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)]));
Chris@10 59 Tq = LD(&(x[WS(rs, 9)]), ms, &(x[WS(rs, 1)]));
Chris@10 60 Tu = LD(&(x[WS(rs, 6)]), ms, &(x[0]));
Chris@10 61 Ta = VZMULJ(T2, T3);
Chris@10 62 T4 = VZMUL(T2, T3);
Chris@10 63 Th = VZMULJ(T2, T5);
Chris@10 64 Tt = VZMULJ(T3, T5);
Chris@10 65 Tb = LD(&(x[WS(rs, 2)]), ms, &(x[0]));
Chris@10 66 Tx = VZMUL(T2, Tw);
Chris@10 67 Tr = VZMUL(T5, Tq);
Chris@10 68 Tk = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)]));
Chris@10 69 Td = VZMULJ(Ta, T5);
Chris@10 70 Tp = VZMUL(T4, To);
Chris@10 71 T6 = VZMULJ(T4, T5);
Chris@10 72 Tv = VZMUL(Tt, Tu);
Chris@10 73 Tc = VZMUL(Ta, Tb);
Chris@10 74 Te = LD(&(x[WS(rs, 7)]), ms, &(x[WS(rs, 1)]));
Chris@10 75 Ti = LD(&(x[WS(rs, 8)]), ms, &(x[0]));
Chris@10 76 Tl = VZMUL(T3, Tk);
Chris@10 77 }
Chris@10 78 {
Chris@10 79 V TN, Ts, T8, Ty, TO, Tf, Tj;
Chris@10 80 TN = VADD(Tp, Tr);
Chris@10 81 Ts = VSUB(Tp, Tr);
Chris@10 82 T8 = VZMUL(T6, T7);
Chris@10 83 Ty = VSUB(Tv, Tx);
Chris@10 84 TO = VADD(Tv, Tx);
Chris@10 85 Tf = VZMUL(Td, Te);
Chris@10 86 Tj = VZMUL(Th, Ti);
Chris@10 87 {
Chris@10 88 V T9, TJ, TP, TU, Tz, TF, Tg, TK, Tm, TL;
Chris@10 89 T9 = VSUB(T1, T8);
Chris@10 90 TJ = VADD(T1, T8);
Chris@10 91 TP = VADD(TN, TO);
Chris@10 92 TU = VSUB(TN, TO);
Chris@10 93 Tz = VADD(Ts, Ty);
Chris@10 94 TF = VSUB(Ts, Ty);
Chris@10 95 Tg = VSUB(Tc, Tf);
Chris@10 96 TK = VADD(Tc, Tf);
Chris@10 97 Tm = VSUB(Tj, Tl);
Chris@10 98 TL = VADD(Tj, Tl);
Chris@10 99 {
Chris@10 100 V TM, TV, Tn, TE;
Chris@10 101 TM = VADD(TK, TL);
Chris@10 102 TV = VSUB(TK, TL);
Chris@10 103 Tn = VADD(Tg, Tm);
Chris@10 104 TE = VSUB(Tg, Tm);
Chris@10 105 {
Chris@10 106 V TW, TY, TS, TQ, TG, TI, TC, TA, TR, TB;
Chris@10 107 TW = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), TV, TU));
Chris@10 108 TY = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), TU, TV));
Chris@10 109 TS = VSUB(TM, TP);
Chris@10 110 TQ = VADD(TM, TP);
Chris@10 111 TG = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), TF, TE));
Chris@10 112 TI = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), TE, TF));
Chris@10 113 TC = VSUB(Tn, Tz);
Chris@10 114 TA = VADD(Tn, Tz);
Chris@10 115 ST(&(x[0]), VADD(TJ, TQ), ms, &(x[0]));
Chris@10 116 TR = VFNMS(LDK(KP250000000), TQ, TJ);
Chris@10 117 ST(&(x[WS(rs, 5)]), VADD(T9, TA), ms, &(x[WS(rs, 1)]));
Chris@10 118 TB = VFNMS(LDK(KP250000000), TA, T9);
Chris@10 119 {
Chris@10 120 V TX, TT, TH, TD;
Chris@10 121 TX = VFMA(LDK(KP559016994), TS, TR);
Chris@10 122 TT = VFNMS(LDK(KP559016994), TS, TR);
Chris@10 123 TH = VFNMS(LDK(KP559016994), TC, TB);
Chris@10 124 TD = VFMA(LDK(KP559016994), TC, TB);
Chris@10 125 ST(&(x[WS(rs, 8)]), VFMAI(TW, TT), ms, &(x[0]));
Chris@10 126 ST(&(x[WS(rs, 2)]), VFNMSI(TW, TT), ms, &(x[0]));
Chris@10 127 ST(&(x[WS(rs, 6)]), VFMAI(TY, TX), ms, &(x[0]));
Chris@10 128 ST(&(x[WS(rs, 4)]), VFNMSI(TY, TX), ms, &(x[0]));
Chris@10 129 ST(&(x[WS(rs, 9)]), VFNMSI(TG, TD), ms, &(x[WS(rs, 1)]));
Chris@10 130 ST(&(x[WS(rs, 1)]), VFMAI(TG, TD), ms, &(x[WS(rs, 1)]));
Chris@10 131 ST(&(x[WS(rs, 7)]), VFNMSI(TI, TH), ms, &(x[WS(rs, 1)]));
Chris@10 132 ST(&(x[WS(rs, 3)]), VFMAI(TI, TH), ms, &(x[WS(rs, 1)]));
Chris@10 133 }
Chris@10 134 }
Chris@10 135 }
Chris@10 136 }
Chris@10 137 }
Chris@10 138 }
Chris@10 139 }
Chris@10 140 VLEAVE();
Chris@10 141 }
Chris@10 142
Chris@10 143 static const tw_instr twinstr[] = {
Chris@10 144 VTW(0, 1),
Chris@10 145 VTW(0, 3),
Chris@10 146 VTW(0, 9),
Chris@10 147 {TW_NEXT, VL, 0}
Chris@10 148 };
Chris@10 149
Chris@10 150 static const ct_desc desc = { 10, XSIMD_STRING("t3bv_10"), twinstr, &GENUS, {39, 34, 18, 0}, 0, 0, 0 };
Chris@10 151
Chris@10 152 void XSIMD(codelet_t3bv_10) (planner *p) {
Chris@10 153 X(kdft_dit_register) (p, t3bv_10, &desc);
Chris@10 154 }
Chris@10 155 #else /* HAVE_FMA */
Chris@10 156
Chris@10 157 /* Generated by: ../../../genfft/gen_twiddle_c.native -simd -compact -variables 4 -pipeline-latency 8 -twiddle-log3 -precompute-twiddles -no-generate-bytw -n 10 -name t3bv_10 -include t3b.h -sign 1 */
Chris@10 158
Chris@10 159 /*
Chris@10 160 * This function contains 57 FP additions, 42 FP multiplications,
Chris@10 161 * (or, 51 additions, 36 multiplications, 6 fused multiply/add),
Chris@10 162 * 41 stack variables, 4 constants, and 20 memory accesses
Chris@10 163 */
Chris@10 164 #include "t3b.h"
Chris@10 165
Chris@10 166 static void t3bv_10(R *ri, R *ii, const R *W, stride rs, INT mb, INT me, INT ms)
Chris@10 167 {
Chris@10 168 DVK(KP587785252, +0.587785252292473129168705954639072768597652438);
Chris@10 169 DVK(KP951056516, +0.951056516295153572116439333379382143405698634);
Chris@10 170 DVK(KP250000000, +0.250000000000000000000000000000000000000000000);
Chris@10 171 DVK(KP559016994, +0.559016994374947424102293417182819058860154590);
Chris@10 172 {
Chris@10 173 INT m;
Chris@10 174 R *x;
Chris@10 175 x = ii;
Chris@10 176 for (m = mb, W = W + (mb * ((TWVL / VL) * 6)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 6), MAKE_VOLATILE_STRIDE(10, rs)) {
Chris@10 177 V T1, T2, T3, Ti, T6, T7, TA, Tb, To;
Chris@10 178 T1 = LDW(&(W[0]));
Chris@10 179 T2 = LDW(&(W[TWVL * 2]));
Chris@10 180 T3 = VZMULJ(T1, T2);
Chris@10 181 Ti = VZMUL(T1, T2);
Chris@10 182 T6 = LDW(&(W[TWVL * 4]));
Chris@10 183 T7 = VZMULJ(T3, T6);
Chris@10 184 TA = VZMULJ(Ti, T6);
Chris@10 185 Tb = VZMULJ(T1, T6);
Chris@10 186 To = VZMULJ(T2, T6);
Chris@10 187 {
Chris@10 188 V TD, TQ, Tn, Tt, Tx, TM, TN, TS, Ta, Tg, Tw, TJ, TK, TR, Tz;
Chris@10 189 V TC, TB;
Chris@10 190 Tz = LD(&(x[0]), ms, &(x[0]));
Chris@10 191 TB = LD(&(x[WS(rs, 5)]), ms, &(x[WS(rs, 1)]));
Chris@10 192 TC = VZMUL(TA, TB);
Chris@10 193 TD = VSUB(Tz, TC);
Chris@10 194 TQ = VADD(Tz, TC);
Chris@10 195 {
Chris@10 196 V Tk, Ts, Tm, Tq;
Chris@10 197 {
Chris@10 198 V Tj, Tr, Tl, Tp;
Chris@10 199 Tj = LD(&(x[WS(rs, 4)]), ms, &(x[0]));
Chris@10 200 Tk = VZMUL(Ti, Tj);
Chris@10 201 Tr = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)]));
Chris@10 202 Ts = VZMUL(T1, Tr);
Chris@10 203 Tl = LD(&(x[WS(rs, 9)]), ms, &(x[WS(rs, 1)]));
Chris@10 204 Tm = VZMUL(T6, Tl);
Chris@10 205 Tp = LD(&(x[WS(rs, 6)]), ms, &(x[0]));
Chris@10 206 Tq = VZMUL(To, Tp);
Chris@10 207 }
Chris@10 208 Tn = VSUB(Tk, Tm);
Chris@10 209 Tt = VSUB(Tq, Ts);
Chris@10 210 Tx = VADD(Tn, Tt);
Chris@10 211 TM = VADD(Tk, Tm);
Chris@10 212 TN = VADD(Tq, Ts);
Chris@10 213 TS = VADD(TM, TN);
Chris@10 214 }
Chris@10 215 {
Chris@10 216 V T5, Tf, T9, Td;
Chris@10 217 {
Chris@10 218 V T4, Te, T8, Tc;
Chris@10 219 T4 = LD(&(x[WS(rs, 2)]), ms, &(x[0]));
Chris@10 220 T5 = VZMUL(T3, T4);
Chris@10 221 Te = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)]));
Chris@10 222 Tf = VZMUL(T2, Te);
Chris@10 223 T8 = LD(&(x[WS(rs, 7)]), ms, &(x[WS(rs, 1)]));
Chris@10 224 T9 = VZMUL(T7, T8);
Chris@10 225 Tc = LD(&(x[WS(rs, 8)]), ms, &(x[0]));
Chris@10 226 Td = VZMUL(Tb, Tc);
Chris@10 227 }
Chris@10 228 Ta = VSUB(T5, T9);
Chris@10 229 Tg = VSUB(Td, Tf);
Chris@10 230 Tw = VADD(Ta, Tg);
Chris@10 231 TJ = VADD(T5, T9);
Chris@10 232 TK = VADD(Td, Tf);
Chris@10 233 TR = VADD(TJ, TK);
Chris@10 234 }
Chris@10 235 {
Chris@10 236 V Ty, TE, TF, Tv, TI, Th, Tu, TH, TG;
Chris@10 237 Ty = VMUL(LDK(KP559016994), VSUB(Tw, Tx));
Chris@10 238 TE = VADD(Tw, Tx);
Chris@10 239 TF = VFNMS(LDK(KP250000000), TE, TD);
Chris@10 240 Th = VSUB(Ta, Tg);
Chris@10 241 Tu = VSUB(Tn, Tt);
Chris@10 242 Tv = VBYI(VFMA(LDK(KP951056516), Th, VMUL(LDK(KP587785252), Tu)));
Chris@10 243 TI = VBYI(VFNMS(LDK(KP951056516), Tu, VMUL(LDK(KP587785252), Th)));
Chris@10 244 ST(&(x[WS(rs, 5)]), VADD(TD, TE), ms, &(x[WS(rs, 1)]));
Chris@10 245 TH = VSUB(TF, Ty);
Chris@10 246 ST(&(x[WS(rs, 3)]), VSUB(TH, TI), ms, &(x[WS(rs, 1)]));
Chris@10 247 ST(&(x[WS(rs, 7)]), VADD(TI, TH), ms, &(x[WS(rs, 1)]));
Chris@10 248 TG = VADD(Ty, TF);
Chris@10 249 ST(&(x[WS(rs, 1)]), VADD(Tv, TG), ms, &(x[WS(rs, 1)]));
Chris@10 250 ST(&(x[WS(rs, 9)]), VSUB(TG, Tv), ms, &(x[WS(rs, 1)]));
Chris@10 251 }
Chris@10 252 {
Chris@10 253 V TV, TT, TU, TP, TY, TL, TO, TX, TW;
Chris@10 254 TV = VMUL(LDK(KP559016994), VSUB(TR, TS));
Chris@10 255 TT = VADD(TR, TS);
Chris@10 256 TU = VFNMS(LDK(KP250000000), TT, TQ);
Chris@10 257 TL = VSUB(TJ, TK);
Chris@10 258 TO = VSUB(TM, TN);
Chris@10 259 TP = VBYI(VFNMS(LDK(KP951056516), TO, VMUL(LDK(KP587785252), TL)));
Chris@10 260 TY = VBYI(VFMA(LDK(KP951056516), TL, VMUL(LDK(KP587785252), TO)));
Chris@10 261 ST(&(x[0]), VADD(TQ, TT), ms, &(x[0]));
Chris@10 262 TX = VADD(TV, TU);
Chris@10 263 ST(&(x[WS(rs, 4)]), VSUB(TX, TY), ms, &(x[0]));
Chris@10 264 ST(&(x[WS(rs, 6)]), VADD(TY, TX), ms, &(x[0]));
Chris@10 265 TW = VSUB(TU, TV);
Chris@10 266 ST(&(x[WS(rs, 2)]), VADD(TP, TW), ms, &(x[0]));
Chris@10 267 ST(&(x[WS(rs, 8)]), VSUB(TW, TP), ms, &(x[0]));
Chris@10 268 }
Chris@10 269 }
Chris@10 270 }
Chris@10 271 }
Chris@10 272 VLEAVE();
Chris@10 273 }
Chris@10 274
Chris@10 275 static const tw_instr twinstr[] = {
Chris@10 276 VTW(0, 1),
Chris@10 277 VTW(0, 3),
Chris@10 278 VTW(0, 9),
Chris@10 279 {TW_NEXT, VL, 0}
Chris@10 280 };
Chris@10 281
Chris@10 282 static const ct_desc desc = { 10, XSIMD_STRING("t3bv_10"), twinstr, &GENUS, {51, 36, 6, 0}, 0, 0, 0 };
Chris@10 283
Chris@10 284 void XSIMD(codelet_t3bv_10) (planner *p) {
Chris@10 285 X(kdft_dit_register) (p, t3bv_10, &desc);
Chris@10 286 }
Chris@10 287 #endif /* HAVE_FMA */