annotate src/fftw-3.3.8/mpi/block.c @ 82:d0c2a83c1364

Add FFTW 3.3.8 source, and a Linux build
author Chris Cannam
date Tue, 19 Nov 2019 14:52:55 +0000
parents
children
rev   line source
Chris@82 1 /*
Chris@82 2 * Copyright (c) 2003, 2007-14 Matteo Frigo
Chris@82 3 * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
Chris@82 4 *
Chris@82 5 * This program is free software; you can redistribute it and/or modify
Chris@82 6 * it under the terms of the GNU General Public License as published by
Chris@82 7 * the Free Software Foundation; either version 2 of the License, or
Chris@82 8 * (at your option) any later version.
Chris@82 9 *
Chris@82 10 * This program is distributed in the hope that it will be useful,
Chris@82 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Chris@82 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Chris@82 13 * GNU General Public License for more details.
Chris@82 14 *
Chris@82 15 * You should have received a copy of the GNU General Public License
Chris@82 16 * along with this program; if not, write to the Free Software
Chris@82 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Chris@82 18 *
Chris@82 19 */
Chris@82 20
Chris@82 21 #include "ifftw-mpi.h"
Chris@82 22
Chris@82 23 INT XM(num_blocks)(INT n, INT block)
Chris@82 24 {
Chris@82 25 return (n + block - 1) / block;
Chris@82 26 }
Chris@82 27
Chris@82 28 int XM(num_blocks_ok)(INT n, INT block, MPI_Comm comm)
Chris@82 29 {
Chris@82 30 int n_pes;
Chris@82 31 MPI_Comm_size(comm, &n_pes);
Chris@82 32 return n_pes >= XM(num_blocks)(n, block);
Chris@82 33 }
Chris@82 34
Chris@82 35 /* Pick a default block size for dividing a problem of size n among
Chris@82 36 n_pes processes. Divide as equally as possible, while minimizing
Chris@82 37 the maximum block size among the processes as well as the number of
Chris@82 38 processes with nonzero blocks. */
Chris@82 39 INT XM(default_block)(INT n, int n_pes)
Chris@82 40 {
Chris@82 41 return ((n + n_pes - 1) / n_pes);
Chris@82 42 }
Chris@82 43
Chris@82 44 /* For a given block size and dimension n, compute the block size
Chris@82 45 on the given process. */
Chris@82 46 INT XM(block)(INT n, INT block, int which_block)
Chris@82 47 {
Chris@82 48 INT d = n - which_block * block;
Chris@82 49 return d <= 0 ? 0 : (d > block ? block : d);
Chris@82 50 }
Chris@82 51
Chris@82 52 static INT num_blocks_kind(const ddim *dim, block_kind k)
Chris@82 53 {
Chris@82 54 return XM(num_blocks)(dim->n, dim->b[k]);
Chris@82 55 }
Chris@82 56
Chris@82 57 INT XM(num_blocks_total)(const dtensor *sz, block_kind k)
Chris@82 58 {
Chris@82 59 if (FINITE_RNK(sz->rnk)) {
Chris@82 60 int i;
Chris@82 61 INT ntot = 1;
Chris@82 62 for (i = 0; i < sz->rnk; ++i)
Chris@82 63 ntot *= num_blocks_kind(sz->dims + i, k);
Chris@82 64 return ntot;
Chris@82 65 }
Chris@82 66 else
Chris@82 67 return 0;
Chris@82 68 }
Chris@82 69
Chris@82 70 int XM(idle_process)(const dtensor *sz, block_kind k, int which_pe)
Chris@82 71 {
Chris@82 72 return (which_pe >= XM(num_blocks_total)(sz, k));
Chris@82 73 }
Chris@82 74
Chris@82 75 /* Given a non-idle process which_pe, computes the coordinate
Chris@82 76 vector coords[rnk] giving the coordinates of a block in the
Chris@82 77 matrix of blocks. k specifies whether we are talking about
Chris@82 78 the input or output data distribution. */
Chris@82 79 void XM(block_coords)(const dtensor *sz, block_kind k, int which_pe,
Chris@82 80 INT *coords)
Chris@82 81 {
Chris@82 82 int i;
Chris@82 83 A(!XM(idle_process)(sz, k, which_pe) && FINITE_RNK(sz->rnk));
Chris@82 84 for (i = sz->rnk - 1; i >= 0; --i) {
Chris@82 85 INT nb = num_blocks_kind(sz->dims + i, k);
Chris@82 86 coords[i] = which_pe % nb;
Chris@82 87 which_pe /= nb;
Chris@82 88 }
Chris@82 89 }
Chris@82 90
Chris@82 91 INT XM(total_block)(const dtensor *sz, block_kind k, int which_pe)
Chris@82 92 {
Chris@82 93 if (XM(idle_process)(sz, k, which_pe))
Chris@82 94 return 0;
Chris@82 95 else {
Chris@82 96 int i;
Chris@82 97 INT N = 1, *coords;
Chris@82 98 STACK_MALLOC(INT*, coords, sizeof(INT) * sz->rnk);
Chris@82 99 XM(block_coords)(sz, k, which_pe, coords);
Chris@82 100 for (i = 0; i < sz->rnk; ++i)
Chris@82 101 N *= XM(block)(sz->dims[i].n, sz->dims[i].b[k], coords[i]);
Chris@82 102 STACK_FREE(coords);
Chris@82 103 return N;
Chris@82 104 }
Chris@82 105 }
Chris@82 106
Chris@82 107 /* returns whether sz is local for dims >= dim */
Chris@82 108 int XM(is_local_after)(int dim, const dtensor *sz, block_kind k)
Chris@82 109 {
Chris@82 110 if (FINITE_RNK(sz->rnk))
Chris@82 111 for (; dim < sz->rnk; ++dim)
Chris@82 112 if (XM(num_blocks)(sz->dims[dim].n, sz->dims[dim].b[k]) > 1)
Chris@82 113 return 0;
Chris@82 114 return 1;
Chris@82 115 }
Chris@82 116
Chris@82 117 int XM(is_local)(const dtensor *sz, block_kind k)
Chris@82 118 {
Chris@82 119 return XM(is_local_after)(0, sz, k);
Chris@82 120 }
Chris@82 121
Chris@82 122 /* Return whether sz is distributed for k according to a simple
Chris@82 123 1d block distribution in the first or second dimensions */
Chris@82 124 int XM(is_block1d)(const dtensor *sz, block_kind k)
Chris@82 125 {
Chris@82 126 int i;
Chris@82 127 if (!FINITE_RNK(sz->rnk)) return 0;
Chris@82 128 for (i = 0; i < sz->rnk && num_blocks_kind(sz->dims + i, k) == 1; ++i) ;
Chris@82 129 return(i < sz->rnk && i < 2 && XM(is_local_after)(i + 1, sz, k));
Chris@82 130
Chris@82 131 }