annotate fft/fftw/fftw-3.3.4/kernel/tensor7.c @ 40:223f770b5341 kissfft-double tip

Try a double-precision kissfft
author Chris Cannam
date Wed, 07 Sep 2016 10:40:32 +0100
parents 26056e866c29
children
rev   line source
Chris@19 1 /*
Chris@19 2 * Copyright (c) 2003, 2007-14 Matteo Frigo
Chris@19 3 * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
Chris@19 4 *
Chris@19 5 * This program is free software; you can redistribute it and/or modify
Chris@19 6 * it under the terms of the GNU General Public License as published by
Chris@19 7 * the Free Software Foundation; either version 2 of the License, or
Chris@19 8 * (at your option) any later version.
Chris@19 9 *
Chris@19 10 * This program is distributed in the hope that it will be useful,
Chris@19 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@19 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@19 13 * GNU General Public License for more details.
Chris@19 14 *
Chris@19 15 * You should have received a copy of the GNU General Public License
Chris@19 16 * along with this program; if not, write to the Free Software
Chris@19 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@19 18 *
Chris@19 19 */
Chris@19 20
Chris@19 21
Chris@19 22 #include "ifftw.h"
Chris@19 23
Chris@19 24 static int signof(INT x)
Chris@19 25 {
Chris@19 26 if (x < 0) return -1;
Chris@19 27 if (x == 0) return 0;
Chris@19 28 /* if (x > 0) */ return 1;
Chris@19 29 }
Chris@19 30
Chris@19 31 /* total order among iodim's */
Chris@19 32 int X(dimcmp)(const iodim *a, const iodim *b)
Chris@19 33 {
Chris@19 34 INT sai = X(iabs)(a->is), sbi = X(iabs)(b->is);
Chris@19 35 INT sao = X(iabs)(a->os), sbo = X(iabs)(b->os);
Chris@19 36 INT sam = X(imin)(sai, sao), sbm = X(imin)(sbi, sbo);
Chris@19 37
Chris@19 38 /* in descending order of min{istride, ostride} */
Chris@19 39 if (sam != sbm)
Chris@19 40 return signof(sbm - sam);
Chris@19 41
Chris@19 42 /* in case of a tie, in descending order of istride */
Chris@19 43 if (sbi != sai)
Chris@19 44 return signof(sbi - sai);
Chris@19 45
Chris@19 46 /* in case of a tie, in descending order of ostride */
Chris@19 47 if (sbo != sao)
Chris@19 48 return signof(sbo - sao);
Chris@19 49
Chris@19 50 /* in case of a tie, in ascending order of n */
Chris@19 51 return signof(a->n - b->n);
Chris@19 52 }
Chris@19 53
Chris@19 54 static void canonicalize(tensor *x)
Chris@19 55 {
Chris@19 56 if (x->rnk > 1) {
Chris@19 57 qsort(x->dims, (size_t)x->rnk, sizeof(iodim),
Chris@19 58 (int (*)(const void *, const void *))X(dimcmp));
Chris@19 59 }
Chris@19 60 }
Chris@19 61
Chris@19 62 static int compare_by_istride(const iodim *a, const iodim *b)
Chris@19 63 {
Chris@19 64 INT sai = X(iabs)(a->is), sbi = X(iabs)(b->is);
Chris@19 65
Chris@19 66 /* in descending order of istride */
Chris@19 67 return signof(sbi - sai);
Chris@19 68 }
Chris@19 69
Chris@19 70 static tensor *really_compress(const tensor *sz)
Chris@19 71 {
Chris@19 72 int i, rnk;
Chris@19 73 tensor *x;
Chris@19 74
Chris@19 75 A(FINITE_RNK(sz->rnk));
Chris@19 76 for (i = rnk = 0; i < sz->rnk; ++i) {
Chris@19 77 A(sz->dims[i].n > 0);
Chris@19 78 if (sz->dims[i].n != 1)
Chris@19 79 ++rnk;
Chris@19 80 }
Chris@19 81
Chris@19 82 x = X(mktensor)(rnk);
Chris@19 83 for (i = rnk = 0; i < sz->rnk; ++i) {
Chris@19 84 if (sz->dims[i].n != 1)
Chris@19 85 x->dims[rnk++] = sz->dims[i];
Chris@19 86 }
Chris@19 87 return x;
Chris@19 88 }
Chris@19 89
Chris@19 90 /* Like tensor_copy, but eliminate n == 1 dimensions, which
Chris@19 91 never affect any transform or transform vector.
Chris@19 92
Chris@19 93 Also, we sort the tensor into a canonical order of decreasing
Chris@19 94 strides (see X(dimcmp) for an exact definition). In general,
Chris@19 95 processing a loop/array in order of decreasing stride will improve
Chris@19 96 locality. Both forward and backwards traversal of the tensor are
Chris@19 97 considered e.g. by vrank-geq1, so sorting in increasing
Chris@19 98 vs. decreasing order is not really important. */
Chris@19 99 tensor *X(tensor_compress)(const tensor *sz)
Chris@19 100 {
Chris@19 101 tensor *x = really_compress(sz);
Chris@19 102 canonicalize(x);
Chris@19 103 return x;
Chris@19 104 }
Chris@19 105
Chris@19 106 /* Return whether the strides of a and b are such that they form an
Chris@19 107 effective contiguous 1d array. Assumes that a.is >= b.is. */
Chris@19 108 static int strides_contig(iodim *a, iodim *b)
Chris@19 109 {
Chris@19 110 return (a->is == b->is * b->n && a->os == b->os * b->n);
Chris@19 111 }
Chris@19 112
Chris@19 113 /* Like tensor_compress, but also compress into one dimension any
Chris@19 114 group of dimensions that form a contiguous block of indices with
Chris@19 115 some stride. (This can safely be done for transform vector sizes.) */
Chris@19 116 tensor *X(tensor_compress_contiguous)(const tensor *sz)
Chris@19 117 {
Chris@19 118 int i, rnk;
Chris@19 119 tensor *sz2, *x;
Chris@19 120
Chris@19 121 if (X(tensor_sz)(sz) == 0)
Chris@19 122 return X(mktensor)(RNK_MINFTY);
Chris@19 123
Chris@19 124 sz2 = really_compress(sz);
Chris@19 125 A(FINITE_RNK(sz2->rnk));
Chris@19 126
Chris@19 127 if (sz2->rnk <= 1) { /* nothing to compress. */
Chris@19 128 if (0) {
Chris@19 129 /* this call is redundant, because "sz->rnk <= 1" implies
Chris@19 130 that the tensor is already canonical, but I am writing
Chris@19 131 it explicitly because "logically" we need to canonicalize
Chris@19 132 the tensor before returning. */
Chris@19 133 canonicalize(sz2);
Chris@19 134 }
Chris@19 135 return sz2;
Chris@19 136 }
Chris@19 137
Chris@19 138 /* sort in descending order of |istride|, so that compressible
Chris@19 139 dimensions appear contigously */
Chris@19 140 qsort(sz2->dims, (size_t)sz2->rnk, sizeof(iodim),
Chris@19 141 (int (*)(const void *, const void *))compare_by_istride);
Chris@19 142
Chris@19 143 /* compute what the rank will be after compression */
Chris@19 144 for (i = rnk = 1; i < sz2->rnk; ++i)
Chris@19 145 if (!strides_contig(sz2->dims + i - 1, sz2->dims + i))
Chris@19 146 ++rnk;
Chris@19 147
Chris@19 148 /* merge adjacent dimensions whenever possible */
Chris@19 149 x = X(mktensor)(rnk);
Chris@19 150 x->dims[0] = sz2->dims[0];
Chris@19 151 for (i = rnk = 1; i < sz2->rnk; ++i) {
Chris@19 152 if (strides_contig(sz2->dims + i - 1, sz2->dims + i)) {
Chris@19 153 x->dims[rnk - 1].n *= sz2->dims[i].n;
Chris@19 154 x->dims[rnk - 1].is = sz2->dims[i].is;
Chris@19 155 x->dims[rnk - 1].os = sz2->dims[i].os;
Chris@19 156 } else {
Chris@19 157 A(rnk < x->rnk);
Chris@19 158 x->dims[rnk++] = sz2->dims[i];
Chris@19 159 }
Chris@19 160 }
Chris@19 161
Chris@19 162 X(tensor_destroy)(sz2);
Chris@19 163
Chris@19 164 /* reduce to canonical form */
Chris@19 165 canonicalize(x);
Chris@19 166 return x;
Chris@19 167 }
Chris@19 168
Chris@19 169 /* The inverse of X(tensor_append): splits the sz tensor into
Chris@19 170 tensor a followed by tensor b, where a's rank is arnk. */
Chris@19 171 void X(tensor_split)(const tensor *sz, tensor **a, int arnk, tensor **b)
Chris@19 172 {
Chris@19 173 A(FINITE_RNK(sz->rnk) && FINITE_RNK(arnk));
Chris@19 174
Chris@19 175 *a = X(tensor_copy_sub)(sz, 0, arnk);
Chris@19 176 *b = X(tensor_copy_sub)(sz, arnk, sz->rnk - arnk);
Chris@19 177 }
Chris@19 178
Chris@19 179 /* TRUE if the two tensors are equal */
Chris@19 180 int X(tensor_equal)(const tensor *a, const tensor *b)
Chris@19 181 {
Chris@19 182 if (a->rnk != b->rnk)
Chris@19 183 return 0;
Chris@19 184
Chris@19 185 if (FINITE_RNK(a->rnk)) {
Chris@19 186 int i;
Chris@19 187 for (i = 0; i < a->rnk; ++i)
Chris@19 188 if (0
Chris@19 189 || a->dims[i].n != b->dims[i].n
Chris@19 190 || a->dims[i].is != b->dims[i].is
Chris@19 191 || a->dims[i].os != b->dims[i].os
Chris@19 192 )
Chris@19 193 return 0;
Chris@19 194 }
Chris@19 195
Chris@19 196 return 1;
Chris@19 197 }
Chris@19 198
Chris@19 199 /* TRUE if the sets of input and output locations described by
Chris@19 200 (append sz vecsz) are the same */
Chris@19 201 int X(tensor_inplace_locations)(const tensor *sz, const tensor *vecsz)
Chris@19 202 {
Chris@19 203 tensor *t = X(tensor_append)(sz, vecsz);
Chris@19 204 tensor *ti = X(tensor_copy_inplace)(t, INPLACE_IS);
Chris@19 205 tensor *to = X(tensor_copy_inplace)(t, INPLACE_OS);
Chris@19 206 tensor *tic = X(tensor_compress_contiguous)(ti);
Chris@19 207 tensor *toc = X(tensor_compress_contiguous)(to);
Chris@19 208
Chris@19 209 int retval = X(tensor_equal)(tic, toc);
Chris@19 210
Chris@19 211 X(tensor_destroy)(t);
Chris@19 212 X(tensor_destroy4)(ti, to, tic, toc);
Chris@19 213
Chris@19 214 return retval;
Chris@19 215 }