Chris@19: /* Chris@19: * Copyright (c) 2003, 2007-14 Matteo Frigo Chris@19: * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology Chris@19: * Chris@19: * This program is free software; you can redistribute it and/or modify Chris@19: * it under the terms of the GNU General Public License as published by Chris@19: * the Free Software Foundation; either version 2 of the License, or Chris@19: * (at your option) any later version. Chris@19: * Chris@19: * This program is distributed in the hope that it will be useful, Chris@19: * but WITHOUT ANY WARRANTY; without even the implied warranty of Chris@19: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Chris@19: * GNU General Public License for more details. Chris@19: * Chris@19: * You should have received a copy of the GNU General Public License Chris@19: * along with this program; if not, write to the Free Software Chris@19: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Chris@19: * Chris@19: */ Chris@19: Chris@19: Chris@19: #include "ifftw.h" Chris@19: Chris@19: static int signof(INT x) Chris@19: { Chris@19: if (x < 0) return -1; Chris@19: if (x == 0) return 0; Chris@19: /* if (x > 0) */ return 1; Chris@19: } Chris@19: Chris@19: /* total order among iodim's */ Chris@19: int X(dimcmp)(const iodim *a, const iodim *b) Chris@19: { Chris@19: INT sai = X(iabs)(a->is), sbi = X(iabs)(b->is); Chris@19: INT sao = X(iabs)(a->os), sbo = X(iabs)(b->os); Chris@19: INT sam = X(imin)(sai, sao), sbm = X(imin)(sbi, sbo); Chris@19: Chris@19: /* in descending order of min{istride, ostride} */ Chris@19: if (sam != sbm) Chris@19: return signof(sbm - sam); Chris@19: Chris@19: /* in case of a tie, in descending order of istride */ Chris@19: if (sbi != sai) Chris@19: return signof(sbi - sai); Chris@19: Chris@19: /* in case of a tie, in descending order of ostride */ Chris@19: if (sbo != sao) Chris@19: return signof(sbo - sao); Chris@19: Chris@19: /* in case of a tie, in ascending order of n */ Chris@19: return signof(a->n - b->n); Chris@19: } Chris@19: Chris@19: static void canonicalize(tensor *x) Chris@19: { Chris@19: if (x->rnk > 1) { Chris@19: qsort(x->dims, (size_t)x->rnk, sizeof(iodim), Chris@19: (int (*)(const void *, const void *))X(dimcmp)); Chris@19: } Chris@19: } Chris@19: Chris@19: static int compare_by_istride(const iodim *a, const iodim *b) Chris@19: { Chris@19: INT sai = X(iabs)(a->is), sbi = X(iabs)(b->is); Chris@19: Chris@19: /* in descending order of istride */ Chris@19: return signof(sbi - sai); Chris@19: } Chris@19: Chris@19: static tensor *really_compress(const tensor *sz) Chris@19: { Chris@19: int i, rnk; Chris@19: tensor *x; Chris@19: Chris@19: A(FINITE_RNK(sz->rnk)); Chris@19: for (i = rnk = 0; i < sz->rnk; ++i) { Chris@19: A(sz->dims[i].n > 0); Chris@19: if (sz->dims[i].n != 1) Chris@19: ++rnk; Chris@19: } Chris@19: Chris@19: x = X(mktensor)(rnk); Chris@19: for (i = rnk = 0; i < sz->rnk; ++i) { Chris@19: if (sz->dims[i].n != 1) Chris@19: x->dims[rnk++] = sz->dims[i]; Chris@19: } Chris@19: return x; Chris@19: } Chris@19: Chris@19: /* Like tensor_copy, but eliminate n == 1 dimensions, which Chris@19: never affect any transform or transform vector. Chris@19: Chris@19: Also, we sort the tensor into a canonical order of decreasing Chris@19: strides (see X(dimcmp) for an exact definition). In general, Chris@19: processing a loop/array in order of decreasing stride will improve Chris@19: locality. Both forward and backwards traversal of the tensor are Chris@19: considered e.g. by vrank-geq1, so sorting in increasing Chris@19: vs. decreasing order is not really important. */ Chris@19: tensor *X(tensor_compress)(const tensor *sz) Chris@19: { Chris@19: tensor *x = really_compress(sz); Chris@19: canonicalize(x); Chris@19: return x; Chris@19: } Chris@19: Chris@19: /* Return whether the strides of a and b are such that they form an Chris@19: effective contiguous 1d array. Assumes that a.is >= b.is. */ Chris@19: static int strides_contig(iodim *a, iodim *b) Chris@19: { Chris@19: return (a->is == b->is * b->n && a->os == b->os * b->n); Chris@19: } Chris@19: Chris@19: /* Like tensor_compress, but also compress into one dimension any Chris@19: group of dimensions that form a contiguous block of indices with Chris@19: some stride. (This can safely be done for transform vector sizes.) */ Chris@19: tensor *X(tensor_compress_contiguous)(const tensor *sz) Chris@19: { Chris@19: int i, rnk; Chris@19: tensor *sz2, *x; Chris@19: Chris@19: if (X(tensor_sz)(sz) == 0) Chris@19: return X(mktensor)(RNK_MINFTY); Chris@19: Chris@19: sz2 = really_compress(sz); Chris@19: A(FINITE_RNK(sz2->rnk)); Chris@19: Chris@19: if (sz2->rnk <= 1) { /* nothing to compress. */ Chris@19: if (0) { Chris@19: /* this call is redundant, because "sz->rnk <= 1" implies Chris@19: that the tensor is already canonical, but I am writing Chris@19: it explicitly because "logically" we need to canonicalize Chris@19: the tensor before returning. */ Chris@19: canonicalize(sz2); Chris@19: } Chris@19: return sz2; Chris@19: } Chris@19: Chris@19: /* sort in descending order of |istride|, so that compressible Chris@19: dimensions appear contigously */ Chris@19: qsort(sz2->dims, (size_t)sz2->rnk, sizeof(iodim), Chris@19: (int (*)(const void *, const void *))compare_by_istride); Chris@19: Chris@19: /* compute what the rank will be after compression */ Chris@19: for (i = rnk = 1; i < sz2->rnk; ++i) Chris@19: if (!strides_contig(sz2->dims + i - 1, sz2->dims + i)) Chris@19: ++rnk; Chris@19: Chris@19: /* merge adjacent dimensions whenever possible */ Chris@19: x = X(mktensor)(rnk); Chris@19: x->dims[0] = sz2->dims[0]; Chris@19: for (i = rnk = 1; i < sz2->rnk; ++i) { Chris@19: if (strides_contig(sz2->dims + i - 1, sz2->dims + i)) { Chris@19: x->dims[rnk - 1].n *= sz2->dims[i].n; Chris@19: x->dims[rnk - 1].is = sz2->dims[i].is; Chris@19: x->dims[rnk - 1].os = sz2->dims[i].os; Chris@19: } else { Chris@19: A(rnk < x->rnk); Chris@19: x->dims[rnk++] = sz2->dims[i]; Chris@19: } Chris@19: } Chris@19: Chris@19: X(tensor_destroy)(sz2); Chris@19: Chris@19: /* reduce to canonical form */ Chris@19: canonicalize(x); Chris@19: return x; Chris@19: } Chris@19: Chris@19: /* The inverse of X(tensor_append): splits the sz tensor into Chris@19: tensor a followed by tensor b, where a's rank is arnk. */ Chris@19: void X(tensor_split)(const tensor *sz, tensor **a, int arnk, tensor **b) Chris@19: { Chris@19: A(FINITE_RNK(sz->rnk) && FINITE_RNK(arnk)); Chris@19: Chris@19: *a = X(tensor_copy_sub)(sz, 0, arnk); Chris@19: *b = X(tensor_copy_sub)(sz, arnk, sz->rnk - arnk); Chris@19: } Chris@19: Chris@19: /* TRUE if the two tensors are equal */ Chris@19: int X(tensor_equal)(const tensor *a, const tensor *b) Chris@19: { Chris@19: if (a->rnk != b->rnk) Chris@19: return 0; Chris@19: Chris@19: if (FINITE_RNK(a->rnk)) { Chris@19: int i; Chris@19: for (i = 0; i < a->rnk; ++i) Chris@19: if (0 Chris@19: || a->dims[i].n != b->dims[i].n Chris@19: || a->dims[i].is != b->dims[i].is Chris@19: || a->dims[i].os != b->dims[i].os Chris@19: ) Chris@19: return 0; Chris@19: } Chris@19: Chris@19: return 1; Chris@19: } Chris@19: Chris@19: /* TRUE if the sets of input and output locations described by Chris@19: (append sz vecsz) are the same */ Chris@19: int X(tensor_inplace_locations)(const tensor *sz, const tensor *vecsz) Chris@19: { Chris@19: tensor *t = X(tensor_append)(sz, vecsz); Chris@19: tensor *ti = X(tensor_copy_inplace)(t, INPLACE_IS); Chris@19: tensor *to = X(tensor_copy_inplace)(t, INPLACE_OS); Chris@19: tensor *tic = X(tensor_compress_contiguous)(ti); Chris@19: tensor *toc = X(tensor_compress_contiguous)(to); Chris@19: Chris@19: int retval = X(tensor_equal)(tic, toc); Chris@19: Chris@19: X(tensor_destroy)(t); Chris@19: X(tensor_destroy4)(ti, to, tic, toc); Chris@19: Chris@19: return retval; Chris@19: }