annotate src/fftw-3.3.3/dft/simd/common/n2sv_4.c @ 83:ae30d91d2ffe

Replace these with versions built using an older toolset (so as to avoid ABI compatibilities when linking on Ubuntu 14.04 for packaging purposes)
author Chris Cannam
date Fri, 07 Feb 2020 11:51:13 +0000
parents 37bf6b4a2645
children
rev   line source
Chris@10 1 /*
Chris@10 2 * Copyright (c) 2003, 2007-11 Matteo Frigo
Chris@10 3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
Chris@10 4 *
Chris@10 5 * This program is free software; you can redistribute it and/or modify
Chris@10 6 * it under the terms of the GNU General Public License as published by
Chris@10 7 * the Free Software Foundation; either version 2 of the License, or
Chris@10 8 * (at your option) any later version.
Chris@10 9 *
Chris@10 10 * This program is distributed in the hope that it will be useful,
Chris@10 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@10 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@10 13 * GNU General Public License for more details.
Chris@10 14 *
Chris@10 15 * You should have received a copy of the GNU General Public License
Chris@10 16 * along with this program; if not, write to the Free Software
Chris@10 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@10 18 *
Chris@10 19 */
Chris@10 20
Chris@10 21 /* This file was automatically generated --- DO NOT EDIT */
Chris@10 22 /* Generated on Sun Nov 25 07:37:47 EST 2012 */
Chris@10 23
Chris@10 24 #include "codelet-dft.h"
Chris@10 25
Chris@10 26 #ifdef HAVE_FMA
Chris@10 27
Chris@10 28 /* Generated by: ../../../genfft/gen_notw.native -fma -reorder-insns -schedule-for-pipeline -simd -compact -variables 4 -pipeline-latency 8 -n 4 -name n2sv_4 -with-ostride 1 -include n2s.h -store-multiple 4 */
Chris@10 29
Chris@10 30 /*
Chris@10 31 * This function contains 16 FP additions, 0 FP multiplications,
Chris@10 32 * (or, 16 additions, 0 multiplications, 0 fused multiply/add),
Chris@10 33 * 25 stack variables, 0 constants, and 18 memory accesses
Chris@10 34 */
Chris@10 35 #include "n2s.h"
Chris@10 36
Chris@10 37 static void n2sv_4(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@10 38 {
Chris@10 39 {
Chris@10 40 INT i;
Chris@10 41 for (i = v; i > 0; i = i - (2 * VL), ri = ri + ((2 * VL) * ivs), ii = ii + ((2 * VL) * ivs), ro = ro + ((2 * VL) * ovs), io = io + ((2 * VL) * ovs), MAKE_VOLATILE_STRIDE(16, is), MAKE_VOLATILE_STRIDE(16, os)) {
Chris@10 42 V T1, T2, T7, T8, T4, T5, Tc, Td;
Chris@10 43 T1 = LD(&(ri[0]), ivs, &(ri[0]));
Chris@10 44 T2 = LD(&(ri[WS(is, 2)]), ivs, &(ri[0]));
Chris@10 45 T7 = LD(&(ii[0]), ivs, &(ii[0]));
Chris@10 46 T8 = LD(&(ii[WS(is, 2)]), ivs, &(ii[0]));
Chris@10 47 T4 = LD(&(ri[WS(is, 1)]), ivs, &(ri[WS(is, 1)]));
Chris@10 48 T5 = LD(&(ri[WS(is, 3)]), ivs, &(ri[WS(is, 1)]));
Chris@10 49 Tc = LD(&(ii[WS(is, 1)]), ivs, &(ii[WS(is, 1)]));
Chris@10 50 Td = LD(&(ii[WS(is, 3)]), ivs, &(ii[WS(is, 1)]));
Chris@10 51 {
Chris@10 52 V T3, Tb, T9, Tf, T6, Ta, Te, Tg;
Chris@10 53 T3 = VADD(T1, T2);
Chris@10 54 Tb = VSUB(T1, T2);
Chris@10 55 T9 = VSUB(T7, T8);
Chris@10 56 Tf = VADD(T7, T8);
Chris@10 57 T6 = VADD(T4, T5);
Chris@10 58 Ta = VSUB(T4, T5);
Chris@10 59 Te = VSUB(Tc, Td);
Chris@10 60 Tg = VADD(Tc, Td);
Chris@10 61 {
Chris@10 62 V Th, Ti, Tj, Tk;
Chris@10 63 Th = VADD(Ta, T9);
Chris@10 64 STM4(&(io[3]), Th, ovs, &(io[1]));
Chris@10 65 Ti = VSUB(T9, Ta);
Chris@10 66 STM4(&(io[1]), Ti, ovs, &(io[1]));
Chris@10 67 Tj = VADD(T3, T6);
Chris@10 68 STM4(&(ro[0]), Tj, ovs, &(ro[0]));
Chris@10 69 Tk = VSUB(T3, T6);
Chris@10 70 STM4(&(ro[2]), Tk, ovs, &(ro[0]));
Chris@10 71 {
Chris@10 72 V Tl, Tm, Tn, To;
Chris@10 73 Tl = VADD(Tf, Tg);
Chris@10 74 STM4(&(io[0]), Tl, ovs, &(io[0]));
Chris@10 75 Tm = VSUB(Tf, Tg);
Chris@10 76 STM4(&(io[2]), Tm, ovs, &(io[0]));
Chris@10 77 STN4(&(io[0]), Tl, Ti, Tm, Th, ovs);
Chris@10 78 Tn = VSUB(Tb, Te);
Chris@10 79 STM4(&(ro[3]), Tn, ovs, &(ro[1]));
Chris@10 80 To = VADD(Tb, Te);
Chris@10 81 STM4(&(ro[1]), To, ovs, &(ro[1]));
Chris@10 82 STN4(&(ro[0]), Tj, To, Tk, Tn, ovs);
Chris@10 83 }
Chris@10 84 }
Chris@10 85 }
Chris@10 86 }
Chris@10 87 }
Chris@10 88 VLEAVE();
Chris@10 89 }
Chris@10 90
Chris@10 91 static const kdft_desc desc = { 4, XSIMD_STRING("n2sv_4"), {16, 0, 0, 0}, &GENUS, 0, 1, 0, 0 };
Chris@10 92
Chris@10 93 void XSIMD(codelet_n2sv_4) (planner *p) {
Chris@10 94 X(kdft_register) (p, n2sv_4, &desc);
Chris@10 95 }
Chris@10 96
Chris@10 97 #else /* HAVE_FMA */
Chris@10 98
Chris@10 99 /* Generated by: ../../../genfft/gen_notw.native -simd -compact -variables 4 -pipeline-latency 8 -n 4 -name n2sv_4 -with-ostride 1 -include n2s.h -store-multiple 4 */
Chris@10 100
Chris@10 101 /*
Chris@10 102 * This function contains 16 FP additions, 0 FP multiplications,
Chris@10 103 * (or, 16 additions, 0 multiplications, 0 fused multiply/add),
Chris@10 104 * 17 stack variables, 0 constants, and 18 memory accesses
Chris@10 105 */
Chris@10 106 #include "n2s.h"
Chris@10 107
Chris@10 108 static void n2sv_4(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
Chris@10 109 {
Chris@10 110 {
Chris@10 111 INT i;
Chris@10 112 for (i = v; i > 0; i = i - (2 * VL), ri = ri + ((2 * VL) * ivs), ii = ii + ((2 * VL) * ivs), ro = ro + ((2 * VL) * ovs), io = io + ((2 * VL) * ovs), MAKE_VOLATILE_STRIDE(16, is), MAKE_VOLATILE_STRIDE(16, os)) {
Chris@10 113 V T3, Tb, T9, Tf, T6, Ta, Te, Tg;
Chris@10 114 {
Chris@10 115 V T1, T2, T7, T8;
Chris@10 116 T1 = LD(&(ri[0]), ivs, &(ri[0]));
Chris@10 117 T2 = LD(&(ri[WS(is, 2)]), ivs, &(ri[0]));
Chris@10 118 T3 = VADD(T1, T2);
Chris@10 119 Tb = VSUB(T1, T2);
Chris@10 120 T7 = LD(&(ii[0]), ivs, &(ii[0]));
Chris@10 121 T8 = LD(&(ii[WS(is, 2)]), ivs, &(ii[0]));
Chris@10 122 T9 = VSUB(T7, T8);
Chris@10 123 Tf = VADD(T7, T8);
Chris@10 124 }
Chris@10 125 {
Chris@10 126 V T4, T5, Tc, Td;
Chris@10 127 T4 = LD(&(ri[WS(is, 1)]), ivs, &(ri[WS(is, 1)]));
Chris@10 128 T5 = LD(&(ri[WS(is, 3)]), ivs, &(ri[WS(is, 1)]));
Chris@10 129 T6 = VADD(T4, T5);
Chris@10 130 Ta = VSUB(T4, T5);
Chris@10 131 Tc = LD(&(ii[WS(is, 1)]), ivs, &(ii[WS(is, 1)]));
Chris@10 132 Td = LD(&(ii[WS(is, 3)]), ivs, &(ii[WS(is, 1)]));
Chris@10 133 Te = VSUB(Tc, Td);
Chris@10 134 Tg = VADD(Tc, Td);
Chris@10 135 }
Chris@10 136 {
Chris@10 137 V Th, Ti, Tj, Tk;
Chris@10 138 Th = VSUB(T3, T6);
Chris@10 139 STM4(&(ro[2]), Th, ovs, &(ro[0]));
Chris@10 140 Ti = VSUB(Tf, Tg);
Chris@10 141 STM4(&(io[2]), Ti, ovs, &(io[0]));
Chris@10 142 Tj = VADD(T3, T6);
Chris@10 143 STM4(&(ro[0]), Tj, ovs, &(ro[0]));
Chris@10 144 Tk = VADD(Tf, Tg);
Chris@10 145 STM4(&(io[0]), Tk, ovs, &(io[0]));
Chris@10 146 {
Chris@10 147 V Tl, Tm, Tn, To;
Chris@10 148 Tl = VSUB(T9, Ta);
Chris@10 149 STM4(&(io[1]), Tl, ovs, &(io[1]));
Chris@10 150 Tm = VADD(Tb, Te);
Chris@10 151 STM4(&(ro[1]), Tm, ovs, &(ro[1]));
Chris@10 152 Tn = VADD(Ta, T9);
Chris@10 153 STM4(&(io[3]), Tn, ovs, &(io[1]));
Chris@10 154 STN4(&(io[0]), Tk, Tl, Ti, Tn, ovs);
Chris@10 155 To = VSUB(Tb, Te);
Chris@10 156 STM4(&(ro[3]), To, ovs, &(ro[1]));
Chris@10 157 STN4(&(ro[0]), Tj, Tm, Th, To, ovs);
Chris@10 158 }
Chris@10 159 }
Chris@10 160 }
Chris@10 161 }
Chris@10 162 VLEAVE();
Chris@10 163 }
Chris@10 164
Chris@10 165 static const kdft_desc desc = { 4, XSIMD_STRING("n2sv_4"), {16, 0, 0, 0}, &GENUS, 0, 1, 0, 0 };
Chris@10 166
Chris@10 167 void XSIMD(codelet_n2sv_4) (planner *p) {
Chris@10 168 X(kdft_register) (p, n2sv_4, &desc);
Chris@10 169 }
Chris@10 170
Chris@10 171 #endif /* HAVE_FMA */