annotate src/fftw-3.3.3/mpi/block.c @ 83:ae30d91d2ffe

Replace these with versions built using an older toolset (so as to avoid ABI compatibilities when linking on Ubuntu 14.04 for packaging purposes)
author Chris Cannam
date Fri, 07 Feb 2020 11:51:13 +0000
parents 37bf6b4a2645
children
rev   line source
Chris@10 1 /*
Chris@10 2 * Copyright (c) 2003, 2007-11 Matteo Frigo
Chris@10 3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
Chris@10 4 *
Chris@10 5 * This program is free software; you can redistribute it and/or modify
Chris@10 6 * it under the terms of the GNU General Public License as published by
Chris@10 7 * the Free Software Foundation; either version 2 of the License, or
Chris@10 8 * (at your option) any later version.
Chris@10 9 *
Chris@10 10 * This program is distributed in the hope that it will be useful,
Chris@10 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@10 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@10 13 * GNU General Public License for more details.
Chris@10 14 *
Chris@10 15 * You should have received a copy of the GNU General Public License
Chris@10 16 * along with this program; if not, write to the Free Software
Chris@10 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@10 18 *
Chris@10 19 */
Chris@10 20
Chris@10 21 #include "ifftw-mpi.h"
Chris@10 22
Chris@10 23 INT XM(num_blocks)(INT n, INT block)
Chris@10 24 {
Chris@10 25 return (n + block - 1) / block;
Chris@10 26 }
Chris@10 27
Chris@10 28 int XM(num_blocks_ok)(INT n, INT block, MPI_Comm comm)
Chris@10 29 {
Chris@10 30 int n_pes;
Chris@10 31 MPI_Comm_size(comm, &n_pes);
Chris@10 32 return n_pes >= XM(num_blocks)(n, block);
Chris@10 33 }
Chris@10 34
Chris@10 35 /* Pick a default block size for dividing a problem of size n among
Chris@10 36 n_pes processes. Divide as equally as possible, while minimizing
Chris@10 37 the maximum block size among the processes as well as the number of
Chris@10 38 processes with nonzero blocks. */
Chris@10 39 INT XM(default_block)(INT n, int n_pes)
Chris@10 40 {
Chris@10 41 return ((n + n_pes - 1) / n_pes);
Chris@10 42 }
Chris@10 43
Chris@10 44 /* For a given block size and dimension n, compute the block size
Chris@10 45 on the given process. */
Chris@10 46 INT XM(block)(INT n, INT block, int which_block)
Chris@10 47 {
Chris@10 48 INT d = n - which_block * block;
Chris@10 49 return d <= 0 ? 0 : (d > block ? block : d);
Chris@10 50 }
Chris@10 51
Chris@10 52 static INT num_blocks_kind(const ddim *dim, block_kind k)
Chris@10 53 {
Chris@10 54 return XM(num_blocks)(dim->n, dim->b[k]);
Chris@10 55 }
Chris@10 56
Chris@10 57 INT XM(num_blocks_total)(const dtensor *sz, block_kind k)
Chris@10 58 {
Chris@10 59 if (FINITE_RNK(sz->rnk)) {
Chris@10 60 int i;
Chris@10 61 INT ntot = 1;
Chris@10 62 for (i = 0; i < sz->rnk; ++i)
Chris@10 63 ntot *= num_blocks_kind(sz->dims + i, k);
Chris@10 64 return ntot;
Chris@10 65 }
Chris@10 66 else
Chris@10 67 return 0;
Chris@10 68 }
Chris@10 69
Chris@10 70 int XM(idle_process)(const dtensor *sz, block_kind k, int which_pe)
Chris@10 71 {
Chris@10 72 return (which_pe >= XM(num_blocks_total)(sz, k));
Chris@10 73 }
Chris@10 74
Chris@10 75 /* Given a non-idle process which_pe, computes the coordinate
Chris@10 76 vector coords[rnk] giving the coordinates of a block in the
Chris@10 77 matrix of blocks. k specifies whether we are talking about
Chris@10 78 the input or output data distribution. */
Chris@10 79 void XM(block_coords)(const dtensor *sz, block_kind k, int which_pe,
Chris@10 80 INT *coords)
Chris@10 81 {
Chris@10 82 int i;
Chris@10 83 A(!XM(idle_process)(sz, k, which_pe) && FINITE_RNK(sz->rnk));
Chris@10 84 for (i = sz->rnk - 1; i >= 0; --i) {
Chris@10 85 INT nb = num_blocks_kind(sz->dims + i, k);
Chris@10 86 coords[i] = which_pe % nb;
Chris@10 87 which_pe /= nb;
Chris@10 88 }
Chris@10 89 }
Chris@10 90
Chris@10 91 INT XM(total_block)(const dtensor *sz, block_kind k, int which_pe)
Chris@10 92 {
Chris@10 93 if (XM(idle_process)(sz, k, which_pe))
Chris@10 94 return 0;
Chris@10 95 else {
Chris@10 96 int i;
Chris@10 97 INT N = 1, *coords;
Chris@10 98 STACK_MALLOC(INT*, coords, sizeof(INT) * sz->rnk);
Chris@10 99 XM(block_coords)(sz, k, which_pe, coords);
Chris@10 100 for (i = 0; i < sz->rnk; ++i)
Chris@10 101 N *= XM(block)(sz->dims[i].n, sz->dims[i].b[k], coords[i]);
Chris@10 102 STACK_FREE(coords);
Chris@10 103 return N;
Chris@10 104 }
Chris@10 105 }
Chris@10 106
Chris@10 107 /* returns whether sz is local for dims >= dim */
Chris@10 108 int XM(is_local_after)(int dim, const dtensor *sz, block_kind k)
Chris@10 109 {
Chris@10 110 if (FINITE_RNK(sz->rnk))
Chris@10 111 for (; dim < sz->rnk; ++dim)
Chris@10 112 if (XM(num_blocks)(sz->dims[dim].n, sz->dims[dim].b[k]) > 1)
Chris@10 113 return 0;
Chris@10 114 return 1;
Chris@10 115 }
Chris@10 116
Chris@10 117 int XM(is_local)(const dtensor *sz, block_kind k)
Chris@10 118 {
Chris@10 119 return XM(is_local_after)(0, sz, k);
Chris@10 120 }
Chris@10 121
Chris@10 122 /* Return whether sz is distributed for k according to a simple
Chris@10 123 1d block distribution in the first or second dimensions */
Chris@10 124 int XM(is_block1d)(const dtensor *sz, block_kind k)
Chris@10 125 {
Chris@10 126 int i;
Chris@10 127 if (!FINITE_RNK(sz->rnk)) return 0;
Chris@10 128 for (i = 0; i < sz->rnk && num_blocks_kind(sz->dims + i, k) == 1; ++i) ;
Chris@10 129 return(i < sz->rnk && i < 2 && XM(is_local_after)(i + 1, sz, k));
Chris@10 130
Chris@10 131 }