annotate src/fftw-3.3.8/dft/simd/common/n2bv_8.c @ 83:ae30d91d2ffe

Replace these with versions built using an older toolset (so as to avoid ABI compatibilities when linking on Ubuntu 14.04 for packaging purposes)
author Chris Cannam
date Fri, 07 Feb 2020 11:51:13 +0000
parents d0c2a83c1364
children
rev   line source
Chris@82 1 /*
Chris@82 2 * Copyright (c) 2003, 2007-14 Matteo Frigo
Chris@82 3 * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
Chris@82 4 *
Chris@82 5 * This program is free software; you can redistribute it and/or modify
Chris@82 6 * it under the terms of the GNU General Public License as published by
Chris@82 7 * the Free Software Foundation; either version 2 of the License, or
Chris@82 8 * (at your option) any later version.
Chris@82 9 *
Chris@82 10 * This program is distributed in the hope that it will be useful,
Chris@82 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@82 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@82 13 * GNU General Public License for more details.
Chris@82 14 *
Chris@82 15 * You should have received a copy of the GNU General Public License
Chris@82 16 * along with this program; if not, write to the Free Software
Chris@82 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@82 18 *
Chris@82 19 */
Chris@82 20
Chris@82 21 /* This file was automatically generated --- DO NOT EDIT */
Chris@82 22 /* Generated on Thu May 24 08:05:11 EDT 2018 */
Chris@82 23
Chris@82 24 #include "dft/codelet-dft.h"
Chris@82 25
Chris@82 26 #if defined(ARCH_PREFERS_FMA) || defined(ISA_EXTENSION_PREFERS_FMA)
Chris@82 27
Chris@82 28 /* Generated by: ../../../genfft/gen_notw_c.native -fma -simd -compact -variables 4 -pipeline-latency 8 -sign 1 -n 8 -name n2bv_8 -with-ostride 2 -include dft/simd/n2b.h -store-multiple 2 */
Chris@82 29
Chris@82 30 /*
Chris@82 31 * This function contains 26 FP additions, 10 FP multiplications,
Chris@82 32 * (or, 16 additions, 0 multiplications, 10 fused multiply/add),
Chris@82 33 * 24 stack variables, 1 constants, and 20 memory accesses
Chris@82 34 */
Chris@82 35 #include "dft/simd/n2b.h"
Chris@82 36
Chris@82 37 static void n2bv_8(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@82 38 {
Chris@82 39 DVK(KP707106781, +0.707106781186547524400844362104849039284835938);
Chris@82 40 {
Chris@82 41 INT i;
Chris@82 42 const R *xi;
Chris@82 43 R *xo;
Chris@82 44 xi = ii;
Chris@82 45 xo = io;
Chris@82 46 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(16, is), MAKE_VOLATILE_STRIDE(16, os)) {
Chris@82 47 V T3, Tj, Te, Tk, Ta, Tn, Tf, Tm, Tr, Tu;
Chris@82 48 {
Chris@82 49 V T1, T2, Tc, Td;
Chris@82 50 T1 = LD(&(xi[0]), ivs, &(xi[0]));
Chris@82 51 T2 = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@82 52 T3 = VSUB(T1, T2);
Chris@82 53 Tj = VADD(T1, T2);
Chris@82 54 Tc = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@82 55 Td = LD(&(xi[WS(is, 6)]), ivs, &(xi[0]));
Chris@82 56 Te = VSUB(Tc, Td);
Chris@82 57 Tk = VADD(Tc, Td);
Chris@82 58 {
Chris@82 59 V T4, T5, T6, T7, T8, T9;
Chris@82 60 T4 = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@82 61 T5 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@82 62 T6 = VSUB(T4, T5);
Chris@82 63 T7 = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)]));
Chris@82 64 T8 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@82 65 T9 = VSUB(T7, T8);
Chris@82 66 Ta = VADD(T6, T9);
Chris@82 67 Tn = VADD(T7, T8);
Chris@82 68 Tf = VSUB(T6, T9);
Chris@82 69 Tm = VADD(T4, T5);
Chris@82 70 }
Chris@82 71 }
Chris@82 72 {
Chris@82 73 V Ts, Tb, Tg, Tp, Tq, Tt;
Chris@82 74 Tb = VFNMS(LDK(KP707106781), Ta, T3);
Chris@82 75 Tg = VFNMS(LDK(KP707106781), Tf, Te);
Chris@82 76 Tr = VFNMSI(Tg, Tb);
Chris@82 77 STM2(&(xo[6]), Tr, ovs, &(xo[2]));
Chris@82 78 Ts = VFMAI(Tg, Tb);
Chris@82 79 STM2(&(xo[10]), Ts, ovs, &(xo[2]));
Chris@82 80 Tp = VADD(Tj, Tk);
Chris@82 81 Tq = VADD(Tm, Tn);
Chris@82 82 Tt = VSUB(Tp, Tq);
Chris@82 83 STM2(&(xo[8]), Tt, ovs, &(xo[0]));
Chris@82 84 STN2(&(xo[8]), Tt, Ts, ovs);
Chris@82 85 Tu = VADD(Tp, Tq);
Chris@82 86 STM2(&(xo[0]), Tu, ovs, &(xo[0]));
Chris@82 87 }
Chris@82 88 {
Chris@82 89 V Tw, Th, Ti, Tv;
Chris@82 90 Th = VFMA(LDK(KP707106781), Ta, T3);
Chris@82 91 Ti = VFMA(LDK(KP707106781), Tf, Te);
Chris@82 92 Tv = VFMAI(Ti, Th);
Chris@82 93 STM2(&(xo[2]), Tv, ovs, &(xo[2]));
Chris@82 94 STN2(&(xo[0]), Tu, Tv, ovs);
Chris@82 95 Tw = VFNMSI(Ti, Th);
Chris@82 96 STM2(&(xo[14]), Tw, ovs, &(xo[2]));
Chris@82 97 {
Chris@82 98 V Tl, To, Tx, Ty;
Chris@82 99 Tl = VSUB(Tj, Tk);
Chris@82 100 To = VSUB(Tm, Tn);
Chris@82 101 Tx = VFNMSI(To, Tl);
Chris@82 102 STM2(&(xo[12]), Tx, ovs, &(xo[0]));
Chris@82 103 STN2(&(xo[12]), Tx, Tw, ovs);
Chris@82 104 Ty = VFMAI(To, Tl);
Chris@82 105 STM2(&(xo[4]), Ty, ovs, &(xo[0]));
Chris@82 106 STN2(&(xo[4]), Ty, Tr, ovs);
Chris@82 107 }
Chris@82 108 }
Chris@82 109 }
Chris@82 110 }
Chris@82 111 VLEAVE();
Chris@82 112 }
Chris@82 113
Chris@82 114 static const kdft_desc desc = { 8, XSIMD_STRING("n2bv_8"), {16, 0, 10, 0}, &GENUS, 0, 2, 0, 0 };
Chris@82 115
Chris@82 116 void XSIMD(codelet_n2bv_8) (planner *p) {
Chris@82 117 X(kdft_register) (p, n2bv_8, &desc);
Chris@82 118 }
Chris@82 119
Chris@82 120 #else
Chris@82 121
Chris@82 122 /* Generated by: ../../../genfft/gen_notw_c.native -simd -compact -variables 4 -pipeline-latency 8 -sign 1 -n 8 -name n2bv_8 -with-ostride 2 -include dft/simd/n2b.h -store-multiple 2 */
Chris@82 123
Chris@82 124 /*
Chris@82 125 * This function contains 26 FP additions, 2 FP multiplications,
Chris@82 126 * (or, 26 additions, 2 multiplications, 0 fused multiply/add),
Chris@82 127 * 24 stack variables, 1 constants, and 20 memory accesses
Chris@82 128 */
Chris@82 129 #include "dft/simd/n2b.h"
Chris@82 130
Chris@82 131 static void n2bv_8(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@82 132 {
Chris@82 133 DVK(KP707106781, +0.707106781186547524400844362104849039284835938);
Chris@82 134 {
Chris@82 135 INT i;
Chris@82 136 const R *xi;
Chris@82 137 R *xo;
Chris@82 138 xi = ii;
Chris@82 139 xo = io;
Chris@82 140 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(16, is), MAKE_VOLATILE_STRIDE(16, os)) {
Chris@82 141 V Ta, Tk, Te, Tj, T7, Tn, Tf, Tm, Tr, Tu;
Chris@82 142 {
Chris@82 143 V T8, T9, Tc, Td;
Chris@82 144 T8 = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@82 145 T9 = LD(&(xi[WS(is, 6)]), ivs, &(xi[0]));
Chris@82 146 Ta = VSUB(T8, T9);
Chris@82 147 Tk = VADD(T8, T9);
Chris@82 148 Tc = LD(&(xi[0]), ivs, &(xi[0]));
Chris@82 149 Td = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@82 150 Te = VSUB(Tc, Td);
Chris@82 151 Tj = VADD(Tc, Td);
Chris@82 152 {
Chris@82 153 V T1, T2, T3, T4, T5, T6;
Chris@82 154 T1 = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@82 155 T2 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@82 156 T3 = VSUB(T1, T2);
Chris@82 157 T4 = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)]));
Chris@82 158 T5 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@82 159 T6 = VSUB(T4, T5);
Chris@82 160 T7 = VMUL(LDK(KP707106781), VSUB(T3, T6));
Chris@82 161 Tn = VADD(T4, T5);
Chris@82 162 Tf = VMUL(LDK(KP707106781), VADD(T3, T6));
Chris@82 163 Tm = VADD(T1, T2);
Chris@82 164 }
Chris@82 165 }
Chris@82 166 {
Chris@82 167 V Ts, Tb, Tg, Tp, Tq, Tt;
Chris@82 168 Tb = VBYI(VSUB(T7, Ta));
Chris@82 169 Tg = VSUB(Te, Tf);
Chris@82 170 Tr = VADD(Tb, Tg);
Chris@82 171 STM2(&(xo[6]), Tr, ovs, &(xo[2]));
Chris@82 172 Ts = VSUB(Tg, Tb);
Chris@82 173 STM2(&(xo[10]), Ts, ovs, &(xo[2]));
Chris@82 174 Tp = VADD(Tj, Tk);
Chris@82 175 Tq = VADD(Tm, Tn);
Chris@82 176 Tt = VSUB(Tp, Tq);
Chris@82 177 STM2(&(xo[8]), Tt, ovs, &(xo[0]));
Chris@82 178 STN2(&(xo[8]), Tt, Ts, ovs);
Chris@82 179 Tu = VADD(Tp, Tq);
Chris@82 180 STM2(&(xo[0]), Tu, ovs, &(xo[0]));
Chris@82 181 }
Chris@82 182 {
Chris@82 183 V Tw, Th, Ti, Tv;
Chris@82 184 Th = VBYI(VADD(Ta, T7));
Chris@82 185 Ti = VADD(Te, Tf);
Chris@82 186 Tv = VADD(Th, Ti);
Chris@82 187 STM2(&(xo[2]), Tv, ovs, &(xo[2]));
Chris@82 188 STN2(&(xo[0]), Tu, Tv, ovs);
Chris@82 189 Tw = VSUB(Ti, Th);
Chris@82 190 STM2(&(xo[14]), Tw, ovs, &(xo[2]));
Chris@82 191 {
Chris@82 192 V Tl, To, Tx, Ty;
Chris@82 193 Tl = VSUB(Tj, Tk);
Chris@82 194 To = VBYI(VSUB(Tm, Tn));
Chris@82 195 Tx = VSUB(Tl, To);
Chris@82 196 STM2(&(xo[12]), Tx, ovs, &(xo[0]));
Chris@82 197 STN2(&(xo[12]), Tx, Tw, ovs);
Chris@82 198 Ty = VADD(Tl, To);
Chris@82 199 STM2(&(xo[4]), Ty, ovs, &(xo[0]));
Chris@82 200 STN2(&(xo[4]), Ty, Tr, ovs);
Chris@82 201 }
Chris@82 202 }
Chris@82 203 }
Chris@82 204 }
Chris@82 205 VLEAVE();
Chris@82 206 }
Chris@82 207
Chris@82 208 static const kdft_desc desc = { 8, XSIMD_STRING("n2bv_8"), {26, 2, 0, 0}, &GENUS, 0, 2, 0, 0 };
Chris@82 209
Chris@82 210 void XSIMD(codelet_n2bv_8) (planner *p) {
Chris@82 211 X(kdft_register) (p, n2bv_8, &desc);
Chris@82 212 }
Chris@82 213
Chris@82 214 #endif