annotate src/fftw-3.3.8/dft/simd/common/n2bv_6.c @ 83:ae30d91d2ffe

Replace these with versions built using an older toolset (so as to avoid ABI compatibilities when linking on Ubuntu 14.04 for packaging purposes)
author Chris Cannam
date Fri, 07 Feb 2020 11:51:13 +0000
parents d0c2a83c1364
children
rev   line source
Chris@82 1 /*
Chris@82 2 * Copyright (c) 2003, 2007-14 Matteo Frigo
Chris@82 3 * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
Chris@82 4 *
Chris@82 5 * This program is free software; you can redistribute it and/or modify
Chris@82 6 * it under the terms of the GNU General Public License as published by
Chris@82 7 * the Free Software Foundation; either version 2 of the License, or
Chris@82 8 * (at your option) any later version.
Chris@82 9 *
Chris@82 10 * This program is distributed in the hope that it will be useful,
Chris@82 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@82 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@82 13 * GNU General Public License for more details.
Chris@82 14 *
Chris@82 15 * You should have received a copy of the GNU General Public License
Chris@82 16 * along with this program; if not, write to the Free Software
Chris@82 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@82 18 *
Chris@82 19 */
Chris@82 20
Chris@82 21 /* This file was automatically generated --- DO NOT EDIT */
Chris@82 22 /* Generated on Thu May 24 08:05:11 EDT 2018 */
Chris@82 23
Chris@82 24 #include "dft/codelet-dft.h"
Chris@82 25
Chris@82 26 #if defined(ARCH_PREFERS_FMA) || defined(ISA_EXTENSION_PREFERS_FMA)
Chris@82 27
Chris@82 28 /* Generated by: ../../../genfft/gen_notw_c.native -fma -simd -compact -variables 4 -pipeline-latency 8 -sign 1 -n 6 -name n2bv_6 -with-ostride 2 -include dft/simd/n2b.h -store-multiple 2 */
Chris@82 29
Chris@82 30 /*
Chris@82 31 * This function contains 18 FP additions, 8 FP multiplications,
Chris@82 32 * (or, 12 additions, 2 multiplications, 6 fused multiply/add),
Chris@82 33 * 25 stack variables, 2 constants, and 15 memory accesses
Chris@82 34 */
Chris@82 35 #include "dft/simd/n2b.h"
Chris@82 36
Chris@82 37 static void n2bv_6(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@82 38 {
Chris@82 39 DVK(KP866025403, +0.866025403784438646763723170752936183471402627);
Chris@82 40 DVK(KP500000000, +0.500000000000000000000000000000000000000000000);
Chris@82 41 {
Chris@82 42 INT i;
Chris@82 43 const R *xi;
Chris@82 44 R *xo;
Chris@82 45 xi = ii;
Chris@82 46 xo = io;
Chris@82 47 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(12, is), MAKE_VOLATILE_STRIDE(12, os)) {
Chris@82 48 V T3, Td, T6, Te, T9, Tf, Ta, Tg, T1, T2, Tj, Tk;
Chris@82 49 T1 = LD(&(xi[0]), ivs, &(xi[0]));
Chris@82 50 T2 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@82 51 T3 = VSUB(T1, T2);
Chris@82 52 Td = VADD(T1, T2);
Chris@82 53 {
Chris@82 54 V T4, T5, T7, T8;
Chris@82 55 T4 = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@82 56 T5 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@82 57 T6 = VSUB(T4, T5);
Chris@82 58 Te = VADD(T4, T5);
Chris@82 59 T7 = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@82 60 T8 = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@82 61 T9 = VSUB(T7, T8);
Chris@82 62 Tf = VADD(T7, T8);
Chris@82 63 }
Chris@82 64 Ta = VADD(T6, T9);
Chris@82 65 Tg = VADD(Te, Tf);
Chris@82 66 Tj = VADD(T3, Ta);
Chris@82 67 STM2(&(xo[6]), Tj, ovs, &(xo[2]));
Chris@82 68 Tk = VADD(Td, Tg);
Chris@82 69 STM2(&(xo[0]), Tk, ovs, &(xo[0]));
Chris@82 70 {
Chris@82 71 V Tm, Tb, Tc, Tl;
Chris@82 72 Tb = VFNMS(LDK(KP500000000), Ta, T3);
Chris@82 73 Tc = VMUL(LDK(KP866025403), VSUB(T6, T9));
Chris@82 74 Tl = VFMAI(Tc, Tb);
Chris@82 75 STM2(&(xo[2]), Tl, ovs, &(xo[2]));
Chris@82 76 STN2(&(xo[0]), Tk, Tl, ovs);
Chris@82 77 Tm = VFNMSI(Tc, Tb);
Chris@82 78 STM2(&(xo[10]), Tm, ovs, &(xo[2]));
Chris@82 79 {
Chris@82 80 V Th, Ti, Tn, To;
Chris@82 81 Th = VFNMS(LDK(KP500000000), Tg, Td);
Chris@82 82 Ti = VMUL(LDK(KP866025403), VSUB(Te, Tf));
Chris@82 83 Tn = VFNMSI(Ti, Th);
Chris@82 84 STM2(&(xo[4]), Tn, ovs, &(xo[0]));
Chris@82 85 STN2(&(xo[4]), Tn, Tj, ovs);
Chris@82 86 To = VFMAI(Ti, Th);
Chris@82 87 STM2(&(xo[8]), To, ovs, &(xo[0]));
Chris@82 88 STN2(&(xo[8]), To, Tm, ovs);
Chris@82 89 }
Chris@82 90 }
Chris@82 91 }
Chris@82 92 }
Chris@82 93 VLEAVE();
Chris@82 94 }
Chris@82 95
Chris@82 96 static const kdft_desc desc = { 6, XSIMD_STRING("n2bv_6"), {12, 2, 6, 0}, &GENUS, 0, 2, 0, 0 };
Chris@82 97
Chris@82 98 void XSIMD(codelet_n2bv_6) (planner *p) {
Chris@82 99 X(kdft_register) (p, n2bv_6, &desc);
Chris@82 100 }
Chris@82 101
Chris@82 102 #else
Chris@82 103
Chris@82 104 /* Generated by: ../../../genfft/gen_notw_c.native -simd -compact -variables 4 -pipeline-latency 8 -sign 1 -n 6 -name n2bv_6 -with-ostride 2 -include dft/simd/n2b.h -store-multiple 2 */
Chris@82 105
Chris@82 106 /*
Chris@82 107 * This function contains 18 FP additions, 4 FP multiplications,
Chris@82 108 * (or, 16 additions, 2 multiplications, 2 fused multiply/add),
Chris@82 109 * 25 stack variables, 2 constants, and 15 memory accesses
Chris@82 110 */
Chris@82 111 #include "dft/simd/n2b.h"
Chris@82 112
Chris@82 113 static void n2bv_6(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@82 114 {
Chris@82 115 DVK(KP500000000, +0.500000000000000000000000000000000000000000000);
Chris@82 116 DVK(KP866025403, +0.866025403784438646763723170752936183471402627);
Chris@82 117 {
Chris@82 118 INT i;
Chris@82 119 const R *xi;
Chris@82 120 R *xo;
Chris@82 121 xi = ii;
Chris@82 122 xo = io;
Chris@82 123 for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(12, is), MAKE_VOLATILE_STRIDE(12, os)) {
Chris@82 124 V Ta, Td, T3, Te, T6, Tf, Tb, Tg, T8, T9, Tj, Tk;
Chris@82 125 T8 = LD(&(xi[0]), ivs, &(xi[0]));
Chris@82 126 T9 = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
Chris@82 127 Ta = VSUB(T8, T9);
Chris@82 128 Td = VADD(T8, T9);
Chris@82 129 {
Chris@82 130 V T1, T2, T4, T5;
Chris@82 131 T1 = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
Chris@82 132 T2 = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
Chris@82 133 T3 = VSUB(T1, T2);
Chris@82 134 Te = VADD(T1, T2);
Chris@82 135 T4 = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
Chris@82 136 T5 = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
Chris@82 137 T6 = VSUB(T4, T5);
Chris@82 138 Tf = VADD(T4, T5);
Chris@82 139 }
Chris@82 140 Tb = VADD(T3, T6);
Chris@82 141 Tg = VADD(Te, Tf);
Chris@82 142 Tj = VADD(Ta, Tb);
Chris@82 143 STM2(&(xo[6]), Tj, ovs, &(xo[2]));
Chris@82 144 Tk = VADD(Td, Tg);
Chris@82 145 STM2(&(xo[0]), Tk, ovs, &(xo[0]));
Chris@82 146 {
Chris@82 147 V Tm, T7, Tc, Tl;
Chris@82 148 T7 = VBYI(VMUL(LDK(KP866025403), VSUB(T3, T6)));
Chris@82 149 Tc = VFNMS(LDK(KP500000000), Tb, Ta);
Chris@82 150 Tl = VADD(T7, Tc);
Chris@82 151 STM2(&(xo[2]), Tl, ovs, &(xo[2]));
Chris@82 152 STN2(&(xo[0]), Tk, Tl, ovs);
Chris@82 153 Tm = VSUB(Tc, T7);
Chris@82 154 STM2(&(xo[10]), Tm, ovs, &(xo[2]));
Chris@82 155 {
Chris@82 156 V Th, Ti, Tn, To;
Chris@82 157 Th = VFNMS(LDK(KP500000000), Tg, Td);
Chris@82 158 Ti = VBYI(VMUL(LDK(KP866025403), VSUB(Te, Tf)));
Chris@82 159 Tn = VSUB(Th, Ti);
Chris@82 160 STM2(&(xo[4]), Tn, ovs, &(xo[0]));
Chris@82 161 STN2(&(xo[4]), Tn, Tj, ovs);
Chris@82 162 To = VADD(Ti, Th);
Chris@82 163 STM2(&(xo[8]), To, ovs, &(xo[0]));
Chris@82 164 STN2(&(xo[8]), To, Tm, ovs);
Chris@82 165 }
Chris@82 166 }
Chris@82 167 }
Chris@82 168 }
Chris@82 169 VLEAVE();
Chris@82 170 }
Chris@82 171
Chris@82 172 static const kdft_desc desc = { 6, XSIMD_STRING("n2bv_6"), {16, 2, 2, 0}, &GENUS, 0, 2, 0, 0 };
Chris@82 173
Chris@82 174 void XSIMD(codelet_n2bv_6) (planner *p) {
Chris@82 175 X(kdft_register) (p, n2bv_6, &desc);
Chris@82 176 }
Chris@82 177
Chris@82 178 #endif