annotate src/fftw-3.3.3/dft/simd/common/n2fv_10.c @ 83:ae30d91d2ffe

Replace these with versions built using an older toolset (so as to avoid ABI compatibilities when linking on Ubuntu 14.04 for packaging purposes)
author Chris Cannam
date Fri, 07 Feb 2020 11:51:13 +0000
parents 37bf6b4a2645
children
rev   line source
Chris@10 1 /*
Chris@10 2 * Copyright (c) 2003, 2007-11 Matteo Frigo
Chris@10 3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
Chris@10 4 *
Chris@10 5 * This program is free software; you can redistribute it and/or modify
Chris@10 6 * it under the terms of the GNU General Public License as published by
Chris@10 7 * the Free Software Foundation; either version 2 of the License, or
Chris@10 8 * (at your option) any later version.
Chris@10 9 *
Chris@10 10 * This program is distributed in the hope that it will be useful,
Chris@10 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@10 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@10 13 * GNU General Public License for more details.
Chris@10 14 *
Chris@10 15 * You should have received a copy of the GNU General Public License
Chris@10 16 * along with this program; if not, write to the Free Software
Chris@10 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@10 18 *
Chris@10 19 */
Chris@10 20
Chris@10 21 /* This file was automatically generated --- DO NOT EDIT */
Chris@10 22 /* Generated on Sun Nov 25 07:37:22 EST 2012 */
Chris@10 23
Chris@10 24 #include "codelet-dft.h"
Chris@10 25
Chris@10 26 #ifdef HAVE_FMA
Chris@10 27
Chris@10 28 /* Generated by: ../../../genfft/gen_notw_c.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -n 10 -name n2fv_10 -with-ostride 2 -include n2f.h -store-multiple 2 */
Chris@10 29
Chris@10 30 /*
Chris@10 31 * This function contains 42 FP additions, 22 FP multiplications,
Chris@10 32 * (or, 24 additions, 4 multiplications, 18 fused multiply/add),
Chris@10 33 * 53 stack variables, 4 constants, and 25 memory accesses
Chris@10 34 */
Chris@10 35 #include "n2f.h"
Chris@10 36
Chris@10 37 static void n2fv_10(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@10 38 {
Chris@10 39 DVK(KP559016994, +0.559016994374947424102293417182819058860154590);
Chris@10 40 DVK(KP250000000, +0.250000000000000000000000000000000000000000000);
Chris@10 41 DVK(KP618033988, +0.618033988749894848204586834365638117720309180);
Chris@10 42 DVK(KP951056516, +0.951056516295153572116439333379382143405698634);
Chris@10 43 {
Chris@10 44 INT i;
Chris@10 45 const R *xi;
Chris@10 46 R *xo;
Chris@10 47 xi = ri;
Chris@10 48 xo = ro;
Chris@10 49 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(20, is), MAKE_VOLATILE_STRIDE(20, os)) {
Chris@10 50 V Tb, Tr, T3, Ts, T6, Tw, Tg, Tt, T9, Tc, T1, T2;
Chris@10 51 T1 = LD(&(xi[0]), ivs, &(xi[0]));
Chris@10 52 T2 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@10 53 {
Chris@10 54 V T4, T5, Te, Tf, T7, T8;
Chris@10 55 T4 = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@10 56 T5 = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)]));
Chris@10 57 Te = LD(&(xi[WS(is, 6)]), ivs, &(xi[0]));
Chris@10 58 Tf = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@10 59 T7 = LD(&(xi[WS(is, 8)]), ivs, &(xi[0]));
Chris@10 60 T8 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@10 61 Tb = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@10 62 Tr = VADD(T1, T2);
Chris@10 63 T3 = VSUB(T1, T2);
Chris@10 64 Ts = VADD(T4, T5);
Chris@10 65 T6 = VSUB(T4, T5);
Chris@10 66 Tw = VADD(Te, Tf);
Chris@10 67 Tg = VSUB(Te, Tf);
Chris@10 68 Tt = VADD(T7, T8);
Chris@10 69 T9 = VSUB(T7, T8);
Chris@10 70 Tc = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)]));
Chris@10 71 }
Chris@10 72 {
Chris@10 73 V TD, Tu, Tm, Ta, Td, Tv;
Chris@10 74 TD = VSUB(Ts, Tt);
Chris@10 75 Tu = VADD(Ts, Tt);
Chris@10 76 Tm = VSUB(T6, T9);
Chris@10 77 Ta = VADD(T6, T9);
Chris@10 78 Td = VSUB(Tb, Tc);
Chris@10 79 Tv = VADD(Tb, Tc);
Chris@10 80 {
Chris@10 81 V TC, Tx, Tn, Th;
Chris@10 82 TC = VSUB(Tv, Tw);
Chris@10 83 Tx = VADD(Tv, Tw);
Chris@10 84 Tn = VSUB(Td, Tg);
Chris@10 85 Th = VADD(Td, Tg);
Chris@10 86 {
Chris@10 87 V Ty, TA, TE, TG, Ti, Tk, To, Tq;
Chris@10 88 Ty = VADD(Tu, Tx);
Chris@10 89 TA = VSUB(Tu, Tx);
Chris@10 90 TE = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), TD, TC));
Chris@10 91 TG = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), TC, TD));
Chris@10 92 Ti = VADD(Ta, Th);
Chris@10 93 Tk = VSUB(Ta, Th);
Chris@10 94 To = VMUL(LDK(KP951056516), VFMA(LDK(KP618033988), Tn, Tm));
Chris@10 95 Tq = VMUL(LDK(KP951056516), VFNMS(LDK(KP618033988), Tm, Tn));
Chris@10 96 {
Chris@10 97 V Tz, TH, Tj, TI;
Chris@10 98 Tz = VFNMS(LDK(KP250000000), Ty, Tr);
Chris@10 99 TH = VADD(Tr, Ty);
Chris@10 100 STM2(&(xo[0]), TH, ovs, &(xo[0]));
Chris@10 101 Tj = VFNMS(LDK(KP250000000), Ti, T3);
Chris@10 102 TI = VADD(T3, Ti);
Chris@10 103 STM2(&(xo[10]), TI, ovs, &(xo[2]));
Chris@10 104 {
Chris@10 105 V TB, TF, Tl, Tp;
Chris@10 106 TB = VFNMS(LDK(KP559016994), TA, Tz);
Chris@10 107 TF = VFMA(LDK(KP559016994), TA, Tz);
Chris@10 108 Tl = VFMA(LDK(KP559016994), Tk, Tj);
Chris@10 109 Tp = VFNMS(LDK(KP559016994), Tk, Tj);
Chris@10 110 {
Chris@10 111 V TJ, TK, TL, TM;
Chris@10 112 TJ = VFMAI(TG, TF);
Chris@10 113 STM2(&(xo[8]), TJ, ovs, &(xo[0]));
Chris@10 114 STN2(&(xo[8]), TJ, TI, ovs);
Chris@10 115 TK = VFNMSI(TG, TF);
Chris@10 116 STM2(&(xo[12]), TK, ovs, &(xo[0]));
Chris@10 117 TL = VFNMSI(TE, TB);
Chris@10 118 STM2(&(xo[16]), TL, ovs, &(xo[0]));
Chris@10 119 TM = VFMAI(TE, TB);
Chris@10 120 STM2(&(xo[4]), TM, ovs, &(xo[0]));
Chris@10 121 {
Chris@10 122 V TN, TO, TP, TQ;
Chris@10 123 TN = VFNMSI(Tq, Tp);
Chris@10 124 STM2(&(xo[6]), TN, ovs, &(xo[2]));
Chris@10 125 STN2(&(xo[4]), TM, TN, ovs);
Chris@10 126 TO = VFMAI(Tq, Tp);
Chris@10 127 STM2(&(xo[14]), TO, ovs, &(xo[2]));
Chris@10 128 STN2(&(xo[12]), TK, TO, ovs);
Chris@10 129 TP = VFMAI(To, Tl);
Chris@10 130 STM2(&(xo[18]), TP, ovs, &(xo[2]));
Chris@10 131 STN2(&(xo[16]), TL, TP, ovs);
Chris@10 132 TQ = VFNMSI(To, Tl);
Chris@10 133 STM2(&(xo[2]), TQ, ovs, &(xo[2]));
Chris@10 134 STN2(&(xo[0]), TH, TQ, ovs);
Chris@10 135 }
Chris@10 136 }
Chris@10 137 }
Chris@10 138 }
Chris@10 139 }
Chris@10 140 }
Chris@10 141 }
Chris@10 142 }
Chris@10 143 }
Chris@10 144 VLEAVE();
Chris@10 145 }
Chris@10 146
Chris@10 147 static const kdft_desc desc = { 10, XSIMD_STRING("n2fv_10"), {24, 4, 18, 0}, &GENUS, 0, 2, 0, 0 };
Chris@10 148
Chris@10 149 void XSIMD(codelet_n2fv_10) (planner *p) {
Chris@10 150 X(kdft_register) (p, n2fv_10, &desc);
Chris@10 151 }
Chris@10 152
Chris@10 153 #else /* HAVE_FMA */
Chris@10 154
Chris@10 155 /* Generated by: ../../../genfft/gen_notw_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 10 -name n2fv_10 -with-ostride 2 -include n2f.h -store-multiple 2 */
Chris@10 156
Chris@10 157 /*
Chris@10 158 * This function contains 42 FP additions, 12 FP multiplications,
Chris@10 159 * (or, 36 additions, 6 multiplications, 6 fused multiply/add),
Chris@10 160 * 36 stack variables, 4 constants, and 25 memory accesses
Chris@10 161 */
Chris@10 162 #include "n2f.h"
Chris@10 163
Chris@10 164 static void n2fv_10(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@10 165 {
Chris@10 166 DVK(KP250000000, +0.250000000000000000000000000000000000000000000);
Chris@10 167 DVK(KP559016994, +0.559016994374947424102293417182819058860154590);
Chris@10 168 DVK(KP587785252, +0.587785252292473129168705954639072768597652438);
Chris@10 169 DVK(KP951056516, +0.951056516295153572116439333379382143405698634);
Chris@10 170 {
Chris@10 171 INT i;
Chris@10 172 const R *xi;
Chris@10 173 R *xo;
Chris@10 174 xi = ri;
Chris@10 175 xo = ro;
Chris@10 176 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(20, is), MAKE_VOLATILE_STRIDE(20, os)) {
Chris@10 177 V Ti, Ty, Tm, Tn, Tw, Tt, Tz, TA, TB, T7, Te, Tj, Tg, Th;
Chris@10 178 Tg = LD(&(xi[0]), ivs, &(xi[0]));
Chris@10 179 Th = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@10 180 Ti = VSUB(Tg, Th);
Chris@10 181 Ty = VADD(Tg, Th);
Chris@10 182 {
Chris@10 183 V T3, Tu, Td, Ts, T6, Tv, Ta, Tr;
Chris@10 184 {
Chris@10 185 V T1, T2, Tb, Tc;
Chris@10 186 T1 = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@10 187 T2 = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)]));
Chris@10 188 T3 = VSUB(T1, T2);
Chris@10 189 Tu = VADD(T1, T2);
Chris@10 190 Tb = LD(&(xi[WS(is, 6)]), ivs, &(xi[0]));
Chris@10 191 Tc = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@10 192 Td = VSUB(Tb, Tc);
Chris@10 193 Ts = VADD(Tb, Tc);
Chris@10 194 }
Chris@10 195 {
Chris@10 196 V T4, T5, T8, T9;
Chris@10 197 T4 = LD(&(xi[WS(is, 8)]), ivs, &(xi[0]));
Chris@10 198 T5 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@10 199 T6 = VSUB(T4, T5);
Chris@10 200 Tv = VADD(T4, T5);
Chris@10 201 T8 = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@10 202 T9 = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)]));
Chris@10 203 Ta = VSUB(T8, T9);
Chris@10 204 Tr = VADD(T8, T9);
Chris@10 205 }
Chris@10 206 Tm = VSUB(T3, T6);
Chris@10 207 Tn = VSUB(Ta, Td);
Chris@10 208 Tw = VSUB(Tu, Tv);
Chris@10 209 Tt = VSUB(Tr, Ts);
Chris@10 210 Tz = VADD(Tu, Tv);
Chris@10 211 TA = VADD(Tr, Ts);
Chris@10 212 TB = VADD(Tz, TA);
Chris@10 213 T7 = VADD(T3, T6);
Chris@10 214 Te = VADD(Ta, Td);
Chris@10 215 Tj = VADD(T7, Te);
Chris@10 216 }
Chris@10 217 {
Chris@10 218 V TH, TI, TK, TL, TM;
Chris@10 219 TH = VADD(Ti, Tj);
Chris@10 220 STM2(&(xo[10]), TH, ovs, &(xo[2]));
Chris@10 221 TI = VADD(Ty, TB);
Chris@10 222 STM2(&(xo[0]), TI, ovs, &(xo[0]));
Chris@10 223 {
Chris@10 224 V To, Tq, Tl, Tp, Tf, Tk, TJ;
Chris@10 225 To = VBYI(VFMA(LDK(KP951056516), Tm, VMUL(LDK(KP587785252), Tn)));
Chris@10 226 Tq = VBYI(VFNMS(LDK(KP587785252), Tm, VMUL(LDK(KP951056516), Tn)));
Chris@10 227 Tf = VMUL(LDK(KP559016994), VSUB(T7, Te));
Chris@10 228 Tk = VFNMS(LDK(KP250000000), Tj, Ti);
Chris@10 229 Tl = VADD(Tf, Tk);
Chris@10 230 Tp = VSUB(Tk, Tf);
Chris@10 231 TJ = VSUB(Tl, To);
Chris@10 232 STM2(&(xo[2]), TJ, ovs, &(xo[2]));
Chris@10 233 STN2(&(xo[0]), TI, TJ, ovs);
Chris@10 234 TK = VADD(Tq, Tp);
Chris@10 235 STM2(&(xo[14]), TK, ovs, &(xo[2]));
Chris@10 236 TL = VADD(To, Tl);
Chris@10 237 STM2(&(xo[18]), TL, ovs, &(xo[2]));
Chris@10 238 TM = VSUB(Tp, Tq);
Chris@10 239 STM2(&(xo[6]), TM, ovs, &(xo[2]));
Chris@10 240 }
Chris@10 241 {
Chris@10 242 V Tx, TF, TE, TG, TC, TD;
Chris@10 243 Tx = VBYI(VFNMS(LDK(KP587785252), Tw, VMUL(LDK(KP951056516), Tt)));
Chris@10 244 TF = VBYI(VFMA(LDK(KP951056516), Tw, VMUL(LDK(KP587785252), Tt)));
Chris@10 245 TC = VFNMS(LDK(KP250000000), TB, Ty);
Chris@10 246 TD = VMUL(LDK(KP559016994), VSUB(Tz, TA));
Chris@10 247 TE = VSUB(TC, TD);
Chris@10 248 TG = VADD(TD, TC);
Chris@10 249 {
Chris@10 250 V TN, TO, TP, TQ;
Chris@10 251 TN = VADD(Tx, TE);
Chris@10 252 STM2(&(xo[4]), TN, ovs, &(xo[0]));
Chris@10 253 STN2(&(xo[4]), TN, TM, ovs);
Chris@10 254 TO = VSUB(TG, TF);
Chris@10 255 STM2(&(xo[12]), TO, ovs, &(xo[0]));
Chris@10 256 STN2(&(xo[12]), TO, TK, ovs);
Chris@10 257 TP = VSUB(TE, Tx);
Chris@10 258 STM2(&(xo[16]), TP, ovs, &(xo[0]));
Chris@10 259 STN2(&(xo[16]), TP, TL, ovs);
Chris@10 260 TQ = VADD(TF, TG);
Chris@10 261 STM2(&(xo[8]), TQ, ovs, &(xo[0]));
Chris@10 262 STN2(&(xo[8]), TQ, TH, ovs);
Chris@10 263 }
Chris@10 264 }
Chris@10 265 }
Chris@10 266 }
Chris@10 267 }
Chris@10 268 VLEAVE();
Chris@10 269 }
Chris@10 270
Chris@10 271 static const kdft_desc desc = { 10, XSIMD_STRING("n2fv_10"), {36, 6, 6, 0}, &GENUS, 0, 2, 0, 0 };
Chris@10 272
Chris@10 273 void XSIMD(codelet_n2fv_10) (planner *p) {
Chris@10 274 X(kdft_register) (p, n2fv_10, &desc);
Chris@10 275 }
Chris@10 276
Chris@10 277 #endif /* HAVE_FMA */