annotate src/fftw-3.3.3/dft/simd/common/n2fv_6.c @ 83:ae30d91d2ffe

Replace these with versions built using an older toolset (so as to avoid ABI compatibilities when linking on Ubuntu 14.04 for packaging purposes)
author Chris Cannam
date Fri, 07 Feb 2020 11:51:13 +0000
parents 37bf6b4a2645
children
rev   line source
Chris@10 1 /*
Chris@10 2 * Copyright (c) 2003, 2007-11 Matteo Frigo
Chris@10 3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
Chris@10 4 *
Chris@10 5 * This program is free software; you can redistribute it and/or modify
Chris@10 6 * it under the terms of the GNU General Public License as published by
Chris@10 7 * the Free Software Foundation; either version 2 of the License, or
Chris@10 8 * (at your option) any later version.
Chris@10 9 *
Chris@10 10 * This program is distributed in the hope that it will be useful,
Chris@10 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@10 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@10 13 * GNU General Public License for more details.
Chris@10 14 *
Chris@10 15 * You should have received a copy of the GNU General Public License
Chris@10 16 * along with this program; if not, write to the Free Software
Chris@10 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@10 18 *
Chris@10 19 */
Chris@10 20
Chris@10 21 /* This file was automatically generated --- DO NOT EDIT */
Chris@10 22 /* Generated on Sun Nov 25 07:37:21 EST 2012 */
Chris@10 23
Chris@10 24 #include "codelet-dft.h"
Chris@10 25
Chris@10 26 #ifdef HAVE_FMA
Chris@10 27
Chris@10 28 /* Generated by: ../../../genfft/gen_notw_c.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -n 6 -name n2fv_6 -with-ostride 2 -include n2f.h -store-multiple 2 */
Chris@10 29
Chris@10 30 /*
Chris@10 31 * This function contains 18 FP additions, 8 FP multiplications,
Chris@10 32 * (or, 12 additions, 2 multiplications, 6 fused multiply/add),
Chris@10 33 * 29 stack variables, 2 constants, and 15 memory accesses
Chris@10 34 */
Chris@10 35 #include "n2f.h"
Chris@10 36
Chris@10 37 static void n2fv_6(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@10 38 {
Chris@10 39 DVK(KP500000000, +0.500000000000000000000000000000000000000000000);
Chris@10 40 DVK(KP866025403, +0.866025403784438646763723170752936183471402627);
Chris@10 41 {
Chris@10 42 INT i;
Chris@10 43 const R *xi;
Chris@10 44 R *xo;
Chris@10 45 xi = ri;
Chris@10 46 xo = ro;
Chris@10 47 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(12, is), MAKE_VOLATILE_STRIDE(12, os)) {
Chris@10 48 V T1, T2, T4, T5, T7, T8;
Chris@10 49 T1 = LD(&(xi[0]), ivs, &(xi[0]));
Chris@10 50 T2 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@10 51 T4 = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@10 52 T5 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@10 53 T7 = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@10 54 T8 = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@10 55 {
Chris@10 56 V T3, Td, T6, Te, T9, Tf;
Chris@10 57 T3 = VSUB(T1, T2);
Chris@10 58 Td = VADD(T1, T2);
Chris@10 59 T6 = VSUB(T4, T5);
Chris@10 60 Te = VADD(T4, T5);
Chris@10 61 T9 = VSUB(T7, T8);
Chris@10 62 Tf = VADD(T7, T8);
Chris@10 63 {
Chris@10 64 V Tg, Ti, Ta, Tc;
Chris@10 65 Tg = VADD(Te, Tf);
Chris@10 66 Ti = VMUL(LDK(KP866025403), VSUB(Tf, Te));
Chris@10 67 Ta = VADD(T6, T9);
Chris@10 68 Tc = VMUL(LDK(KP866025403), VSUB(T9, T6));
Chris@10 69 {
Chris@10 70 V Th, Tj, Tb, Tk;
Chris@10 71 Th = VFNMS(LDK(KP500000000), Tg, Td);
Chris@10 72 Tj = VADD(Td, Tg);
Chris@10 73 STM2(&(xo[0]), Tj, ovs, &(xo[0]));
Chris@10 74 Tb = VFNMS(LDK(KP500000000), Ta, T3);
Chris@10 75 Tk = VADD(T3, Ta);
Chris@10 76 STM2(&(xo[6]), Tk, ovs, &(xo[2]));
Chris@10 77 {
Chris@10 78 V Tl, Tm, Tn, To;
Chris@10 79 Tl = VFMAI(Ti, Th);
Chris@10 80 STM2(&(xo[8]), Tl, ovs, &(xo[0]));
Chris@10 81 Tm = VFNMSI(Ti, Th);
Chris@10 82 STM2(&(xo[4]), Tm, ovs, &(xo[0]));
Chris@10 83 STN2(&(xo[4]), Tm, Tk, ovs);
Chris@10 84 Tn = VFMAI(Tc, Tb);
Chris@10 85 STM2(&(xo[2]), Tn, ovs, &(xo[2]));
Chris@10 86 STN2(&(xo[0]), Tj, Tn, ovs);
Chris@10 87 To = VFNMSI(Tc, Tb);
Chris@10 88 STM2(&(xo[10]), To, ovs, &(xo[2]));
Chris@10 89 STN2(&(xo[8]), Tl, To, ovs);
Chris@10 90 }
Chris@10 91 }
Chris@10 92 }
Chris@10 93 }
Chris@10 94 }
Chris@10 95 }
Chris@10 96 VLEAVE();
Chris@10 97 }
Chris@10 98
Chris@10 99 static const kdft_desc desc = { 6, XSIMD_STRING("n2fv_6"), {12, 2, 6, 0}, &GENUS, 0, 2, 0, 0 };
Chris@10 100
Chris@10 101 void XSIMD(codelet_n2fv_6) (planner *p) {
Chris@10 102 X(kdft_register) (p, n2fv_6, &desc);
Chris@10 103 }
Chris@10 104
Chris@10 105 #else /* HAVE_FMA */
Chris@10 106
Chris@10 107 /* Generated by: ../../../genfft/gen_notw_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 6 -name n2fv_6 -with-ostride 2 -include n2f.h -store-multiple 2 */
Chris@10 108
Chris@10 109 /*
Chris@10 110 * This function contains 18 FP additions, 4 FP multiplications,
Chris@10 111 * (or, 16 additions, 2 multiplications, 2 fused multiply/add),
Chris@10 112 * 25 stack variables, 2 constants, and 15 memory accesses
Chris@10 113 */
Chris@10 114 #include "n2f.h"
Chris@10 115
Chris@10 116 static void n2fv_6(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@10 117 {
Chris@10 118 DVK(KP866025403, +0.866025403784438646763723170752936183471402627);
Chris@10 119 DVK(KP500000000, +0.500000000000000000000000000000000000000000000);
Chris@10 120 {
Chris@10 121 INT i;
Chris@10 122 const R *xi;
Chris@10 123 R *xo;
Chris@10 124 xi = ri;
Chris@10 125 xo = ro;
Chris@10 126 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(12, is), MAKE_VOLATILE_STRIDE(12, os)) {
Chris@10 127 V T3, Td, T6, Te, T9, Tf, Ta, Tg, T1, T2, Tj, Tk;
Chris@10 128 T1 = LD(&(xi[0]), ivs, &(xi[0]));
Chris@10 129 T2 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@10 130 T3 = VSUB(T1, T2);
Chris@10 131 Td = VADD(T1, T2);
Chris@10 132 {
Chris@10 133 V T4, T5, T7, T8;
Chris@10 134 T4 = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@10 135 T5 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@10 136 T6 = VSUB(T4, T5);
Chris@10 137 Te = VADD(T4, T5);
Chris@10 138 T7 = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@10 139 T8 = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@10 140 T9 = VSUB(T7, T8);
Chris@10 141 Tf = VADD(T7, T8);
Chris@10 142 }
Chris@10 143 Ta = VADD(T6, T9);
Chris@10 144 Tg = VADD(Te, Tf);
Chris@10 145 Tj = VADD(T3, Ta);
Chris@10 146 STM2(&(xo[6]), Tj, ovs, &(xo[2]));
Chris@10 147 Tk = VADD(Td, Tg);
Chris@10 148 STM2(&(xo[0]), Tk, ovs, &(xo[0]));
Chris@10 149 {
Chris@10 150 V Tl, Tb, Tc, Tm;
Chris@10 151 Tb = VFNMS(LDK(KP500000000), Ta, T3);
Chris@10 152 Tc = VBYI(VMUL(LDK(KP866025403), VSUB(T9, T6)));
Chris@10 153 Tl = VSUB(Tb, Tc);
Chris@10 154 STM2(&(xo[10]), Tl, ovs, &(xo[2]));
Chris@10 155 Tm = VADD(Tb, Tc);
Chris@10 156 STM2(&(xo[2]), Tm, ovs, &(xo[2]));
Chris@10 157 STN2(&(xo[0]), Tk, Tm, ovs);
Chris@10 158 {
Chris@10 159 V Th, Ti, Tn, To;
Chris@10 160 Th = VFNMS(LDK(KP500000000), Tg, Td);
Chris@10 161 Ti = VBYI(VMUL(LDK(KP866025403), VSUB(Tf, Te)));
Chris@10 162 Tn = VSUB(Th, Ti);
Chris@10 163 STM2(&(xo[4]), Tn, ovs, &(xo[0]));
Chris@10 164 STN2(&(xo[4]), Tn, Tj, ovs);
Chris@10 165 To = VADD(Th, Ti);
Chris@10 166 STM2(&(xo[8]), To, ovs, &(xo[0]));
Chris@10 167 STN2(&(xo[8]), To, Tl, ovs);
Chris@10 168 }
Chris@10 169 }
Chris@10 170 }
Chris@10 171 }
Chris@10 172 VLEAVE();
Chris@10 173 }
Chris@10 174
Chris@10 175 static const kdft_desc desc = { 6, XSIMD_STRING("n2fv_6"), {16, 2, 2, 0}, &GENUS, 0, 2, 0, 0 };
Chris@10 176
Chris@10 177 void XSIMD(codelet_n2fv_6) (planner *p) {
Chris@10 178 X(kdft_register) (p, n2fv_6, &desc);
Chris@10 179 }
Chris@10 180
Chris@10 181 #endif /* HAVE_FMA */