annotate src/fftw-3.3.3/kernel/tensor7.c @ 155:54abead6ecce

Opus for Windows (MSVC)
author Chris Cannam <cannam@all-day-breakfast.com>
date Fri, 25 Jan 2019 12:15:58 +0000
parents 89f5e221ed7b
children
rev   line source
cannam@95 1 /*
cannam@95 2 * Copyright (c) 2003, 2007-11 Matteo Frigo
cannam@95 3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
cannam@95 4 *
cannam@95 5 * This program is free software; you can redistribute it and/or modify
cannam@95 6 * it under the terms of the GNU General Public License as published by
cannam@95 7 * the Free Software Foundation; either version 2 of the License, or
cannam@95 8 * (at your option) any later version.
cannam@95 9 *
cannam@95 10 * This program is distributed in the hope that it will be useful,
cannam@95 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
cannam@95 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
cannam@95 13 * GNU General Public License for more details.
cannam@95 14 *
cannam@95 15 * You should have received a copy of the GNU General Public License
cannam@95 16 * along with this program; if not, write to the Free Software
cannam@95 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
cannam@95 18 *
cannam@95 19 */
cannam@95 20
cannam@95 21
cannam@95 22 #include "ifftw.h"
cannam@95 23
cannam@95 24 static int signof(INT x)
cannam@95 25 {
cannam@95 26 if (x < 0) return -1;
cannam@95 27 if (x == 0) return 0;
cannam@95 28 /* if (x > 0) */ return 1;
cannam@95 29 }
cannam@95 30
cannam@95 31 /* total order among iodim's */
cannam@95 32 int X(dimcmp)(const iodim *a, const iodim *b)
cannam@95 33 {
cannam@95 34 INT sai = X(iabs)(a->is), sbi = X(iabs)(b->is);
cannam@95 35 INT sao = X(iabs)(a->os), sbo = X(iabs)(b->os);
cannam@95 36 INT sam = X(imin)(sai, sao), sbm = X(imin)(sbi, sbo);
cannam@95 37
cannam@95 38 /* in descending order of min{istride, ostride} */
cannam@95 39 if (sam != sbm)
cannam@95 40 return signof(sbm - sam);
cannam@95 41
cannam@95 42 /* in case of a tie, in descending order of istride */
cannam@95 43 if (sbi != sai)
cannam@95 44 return signof(sbi - sai);
cannam@95 45
cannam@95 46 /* in case of a tie, in descending order of ostride */
cannam@95 47 if (sbo != sao)
cannam@95 48 return signof(sbo - sao);
cannam@95 49
cannam@95 50 /* in case of a tie, in ascending order of n */
cannam@95 51 return signof(a->n - b->n);
cannam@95 52 }
cannam@95 53
cannam@95 54 static void canonicalize(tensor *x)
cannam@95 55 {
cannam@95 56 if (x->rnk > 1) {
cannam@95 57 qsort(x->dims, (size_t)x->rnk, sizeof(iodim),
cannam@95 58 (int (*)(const void *, const void *))X(dimcmp));
cannam@95 59 }
cannam@95 60 }
cannam@95 61
cannam@95 62 static int compare_by_istride(const iodim *a, const iodim *b)
cannam@95 63 {
cannam@95 64 INT sai = X(iabs)(a->is), sbi = X(iabs)(b->is);
cannam@95 65
cannam@95 66 /* in descending order of istride */
cannam@95 67 return signof(sbi - sai);
cannam@95 68 }
cannam@95 69
cannam@95 70 static tensor *really_compress(const tensor *sz)
cannam@95 71 {
cannam@95 72 int i, rnk;
cannam@95 73 tensor *x;
cannam@95 74
cannam@95 75 A(FINITE_RNK(sz->rnk));
cannam@95 76 for (i = rnk = 0; i < sz->rnk; ++i) {
cannam@95 77 A(sz->dims[i].n > 0);
cannam@95 78 if (sz->dims[i].n != 1)
cannam@95 79 ++rnk;
cannam@95 80 }
cannam@95 81
cannam@95 82 x = X(mktensor)(rnk);
cannam@95 83 for (i = rnk = 0; i < sz->rnk; ++i) {
cannam@95 84 if (sz->dims[i].n != 1)
cannam@95 85 x->dims[rnk++] = sz->dims[i];
cannam@95 86 }
cannam@95 87 return x;
cannam@95 88 }
cannam@95 89
cannam@95 90 /* Like tensor_copy, but eliminate n == 1 dimensions, which
cannam@95 91 never affect any transform or transform vector.
cannam@95 92
cannam@95 93 Also, we sort the tensor into a canonical order of decreasing
cannam@95 94 strides (see X(dimcmp) for an exact definition). In general,
cannam@95 95 processing a loop/array in order of decreasing stride will improve
cannam@95 96 locality. Both forward and backwards traversal of the tensor are
cannam@95 97 considered e.g. by vrank-geq1, so sorting in increasing
cannam@95 98 vs. decreasing order is not really important. */
cannam@95 99 tensor *X(tensor_compress)(const tensor *sz)
cannam@95 100 {
cannam@95 101 tensor *x = really_compress(sz);
cannam@95 102 canonicalize(x);
cannam@95 103 return x;
cannam@95 104 }
cannam@95 105
cannam@95 106 /* Return whether the strides of a and b are such that they form an
cannam@95 107 effective contiguous 1d array. Assumes that a.is >= b.is. */
cannam@95 108 static int strides_contig(iodim *a, iodim *b)
cannam@95 109 {
cannam@95 110 return (a->is == b->is * b->n && a->os == b->os * b->n);
cannam@95 111 }
cannam@95 112
cannam@95 113 /* Like tensor_compress, but also compress into one dimension any
cannam@95 114 group of dimensions that form a contiguous block of indices with
cannam@95 115 some stride. (This can safely be done for transform vector sizes.) */
cannam@95 116 tensor *X(tensor_compress_contiguous)(const tensor *sz)
cannam@95 117 {
cannam@95 118 int i, rnk;
cannam@95 119 tensor *sz2, *x;
cannam@95 120
cannam@95 121 if (X(tensor_sz)(sz) == 0)
cannam@95 122 return X(mktensor)(RNK_MINFTY);
cannam@95 123
cannam@95 124 sz2 = really_compress(sz);
cannam@95 125 A(FINITE_RNK(sz2->rnk));
cannam@95 126
cannam@95 127 if (sz2->rnk <= 1) { /* nothing to compress. */
cannam@95 128 if (0) {
cannam@95 129 /* this call is redundant, because "sz->rnk <= 1" implies
cannam@95 130 that the tensor is already canonical, but I am writing
cannam@95 131 it explicitly because "logically" we need to canonicalize
cannam@95 132 the tensor before returning. */
cannam@95 133 canonicalize(sz2);
cannam@95 134 }
cannam@95 135 return sz2;
cannam@95 136 }
cannam@95 137
cannam@95 138 /* sort in descending order of |istride|, so that compressible
cannam@95 139 dimensions appear contigously */
cannam@95 140 qsort(sz2->dims, (size_t)sz2->rnk, sizeof(iodim),
cannam@95 141 (int (*)(const void *, const void *))compare_by_istride);
cannam@95 142
cannam@95 143 /* compute what the rank will be after compression */
cannam@95 144 for (i = rnk = 1; i < sz2->rnk; ++i)
cannam@95 145 if (!strides_contig(sz2->dims + i - 1, sz2->dims + i))
cannam@95 146 ++rnk;
cannam@95 147
cannam@95 148 /* merge adjacent dimensions whenever possible */
cannam@95 149 x = X(mktensor)(rnk);
cannam@95 150 x->dims[0] = sz2->dims[0];
cannam@95 151 for (i = rnk = 1; i < sz2->rnk; ++i) {
cannam@95 152 if (strides_contig(sz2->dims + i - 1, sz2->dims + i)) {
cannam@95 153 x->dims[rnk - 1].n *= sz2->dims[i].n;
cannam@95 154 x->dims[rnk - 1].is = sz2->dims[i].is;
cannam@95 155 x->dims[rnk - 1].os = sz2->dims[i].os;
cannam@95 156 } else {
cannam@95 157 A(rnk < x->rnk);
cannam@95 158 x->dims[rnk++] = sz2->dims[i];
cannam@95 159 }
cannam@95 160 }
cannam@95 161
cannam@95 162 X(tensor_destroy)(sz2);
cannam@95 163
cannam@95 164 /* reduce to canonical form */
cannam@95 165 canonicalize(x);
cannam@95 166 return x;
cannam@95 167 }
cannam@95 168
cannam@95 169 /* The inverse of X(tensor_append): splits the sz tensor into
cannam@95 170 tensor a followed by tensor b, where a's rank is arnk. */
cannam@95 171 void X(tensor_split)(const tensor *sz, tensor **a, int arnk, tensor **b)
cannam@95 172 {
cannam@95 173 A(FINITE_RNK(sz->rnk) && FINITE_RNK(arnk));
cannam@95 174
cannam@95 175 *a = X(tensor_copy_sub)(sz, 0, arnk);
cannam@95 176 *b = X(tensor_copy_sub)(sz, arnk, sz->rnk - arnk);
cannam@95 177 }
cannam@95 178
cannam@95 179 /* TRUE if the two tensors are equal */
cannam@95 180 int X(tensor_equal)(const tensor *a, const tensor *b)
cannam@95 181 {
cannam@95 182 if (a->rnk != b->rnk)
cannam@95 183 return 0;
cannam@95 184
cannam@95 185 if (FINITE_RNK(a->rnk)) {
cannam@95 186 int i;
cannam@95 187 for (i = 0; i < a->rnk; ++i)
cannam@95 188 if (0
cannam@95 189 || a->dims[i].n != b->dims[i].n
cannam@95 190 || a->dims[i].is != b->dims[i].is
cannam@95 191 || a->dims[i].os != b->dims[i].os
cannam@95 192 )
cannam@95 193 return 0;
cannam@95 194 }
cannam@95 195
cannam@95 196 return 1;
cannam@95 197 }
cannam@95 198
cannam@95 199 /* TRUE if the sets of input and output locations described by
cannam@95 200 (append sz vecsz) are the same */
cannam@95 201 int X(tensor_inplace_locations)(const tensor *sz, const tensor *vecsz)
cannam@95 202 {
cannam@95 203 tensor *t = X(tensor_append)(sz, vecsz);
cannam@95 204 tensor *ti = X(tensor_copy_inplace)(t, INPLACE_IS);
cannam@95 205 tensor *to = X(tensor_copy_inplace)(t, INPLACE_OS);
cannam@95 206 tensor *tic = X(tensor_compress_contiguous)(ti);
cannam@95 207 tensor *toc = X(tensor_compress_contiguous)(to);
cannam@95 208
cannam@95 209 int retval = X(tensor_equal)(tic, toc);
cannam@95 210
cannam@95 211 X(tensor_destroy)(t);
cannam@95 212 X(tensor_destroy4)(ti, to, tic, toc);
cannam@95 213
cannam@95 214 return retval;
cannam@95 215 }