cannam@95: /* cannam@95: * Copyright (c) 2003, 2007-11 Matteo Frigo cannam@95: * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology cannam@95: * cannam@95: * This program is free software; you can redistribute it and/or modify cannam@95: * it under the terms of the GNU General Public License as published by cannam@95: * the Free Software Foundation; either version 2 of the License, or cannam@95: * (at your option) any later version. cannam@95: * cannam@95: * This program is distributed in the hope that it will be useful, cannam@95: * but WITHOUT ANY WARRANTY; without even the implied warranty of cannam@95: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the cannam@95: * GNU General Public License for more details. cannam@95: * cannam@95: * You should have received a copy of the GNU General Public License cannam@95: * along with this program; if not, write to the Free Software cannam@95: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA cannam@95: * cannam@95: */ cannam@95: cannam@95: #include "ifftw-mpi.h" cannam@95: cannam@95: INT XM(num_blocks)(INT n, INT block) cannam@95: { cannam@95: return (n + block - 1) / block; cannam@95: } cannam@95: cannam@95: int XM(num_blocks_ok)(INT n, INT block, MPI_Comm comm) cannam@95: { cannam@95: int n_pes; cannam@95: MPI_Comm_size(comm, &n_pes); cannam@95: return n_pes >= XM(num_blocks)(n, block); cannam@95: } cannam@95: cannam@95: /* Pick a default block size for dividing a problem of size n among cannam@95: n_pes processes. Divide as equally as possible, while minimizing cannam@95: the maximum block size among the processes as well as the number of cannam@95: processes with nonzero blocks. */ cannam@95: INT XM(default_block)(INT n, int n_pes) cannam@95: { cannam@95: return ((n + n_pes - 1) / n_pes); cannam@95: } cannam@95: cannam@95: /* For a given block size and dimension n, compute the block size cannam@95: on the given process. */ cannam@95: INT XM(block)(INT n, INT block, int which_block) cannam@95: { cannam@95: INT d = n - which_block * block; cannam@95: return d <= 0 ? 0 : (d > block ? block : d); cannam@95: } cannam@95: cannam@95: static INT num_blocks_kind(const ddim *dim, block_kind k) cannam@95: { cannam@95: return XM(num_blocks)(dim->n, dim->b[k]); cannam@95: } cannam@95: cannam@95: INT XM(num_blocks_total)(const dtensor *sz, block_kind k) cannam@95: { cannam@95: if (FINITE_RNK(sz->rnk)) { cannam@95: int i; cannam@95: INT ntot = 1; cannam@95: for (i = 0; i < sz->rnk; ++i) cannam@95: ntot *= num_blocks_kind(sz->dims + i, k); cannam@95: return ntot; cannam@95: } cannam@95: else cannam@95: return 0; cannam@95: } cannam@95: cannam@95: int XM(idle_process)(const dtensor *sz, block_kind k, int which_pe) cannam@95: { cannam@95: return (which_pe >= XM(num_blocks_total)(sz, k)); cannam@95: } cannam@95: cannam@95: /* Given a non-idle process which_pe, computes the coordinate cannam@95: vector coords[rnk] giving the coordinates of a block in the cannam@95: matrix of blocks. k specifies whether we are talking about cannam@95: the input or output data distribution. */ cannam@95: void XM(block_coords)(const dtensor *sz, block_kind k, int which_pe, cannam@95: INT *coords) cannam@95: { cannam@95: int i; cannam@95: A(!XM(idle_process)(sz, k, which_pe) && FINITE_RNK(sz->rnk)); cannam@95: for (i = sz->rnk - 1; i >= 0; --i) { cannam@95: INT nb = num_blocks_kind(sz->dims + i, k); cannam@95: coords[i] = which_pe % nb; cannam@95: which_pe /= nb; cannam@95: } cannam@95: } cannam@95: cannam@95: INT XM(total_block)(const dtensor *sz, block_kind k, int which_pe) cannam@95: { cannam@95: if (XM(idle_process)(sz, k, which_pe)) cannam@95: return 0; cannam@95: else { cannam@95: int i; cannam@95: INT N = 1, *coords; cannam@95: STACK_MALLOC(INT*, coords, sizeof(INT) * sz->rnk); cannam@95: XM(block_coords)(sz, k, which_pe, coords); cannam@95: for (i = 0; i < sz->rnk; ++i) cannam@95: N *= XM(block)(sz->dims[i].n, sz->dims[i].b[k], coords[i]); cannam@95: STACK_FREE(coords); cannam@95: return N; cannam@95: } cannam@95: } cannam@95: cannam@95: /* returns whether sz is local for dims >= dim */ cannam@95: int XM(is_local_after)(int dim, const dtensor *sz, block_kind k) cannam@95: { cannam@95: if (FINITE_RNK(sz->rnk)) cannam@95: for (; dim < sz->rnk; ++dim) cannam@95: if (XM(num_blocks)(sz->dims[dim].n, sz->dims[dim].b[k]) > 1) cannam@95: return 0; cannam@95: return 1; cannam@95: } cannam@95: cannam@95: int XM(is_local)(const dtensor *sz, block_kind k) cannam@95: { cannam@95: return XM(is_local_after)(0, sz, k); cannam@95: } cannam@95: cannam@95: /* Return whether sz is distributed for k according to a simple cannam@95: 1d block distribution in the first or second dimensions */ cannam@95: int XM(is_block1d)(const dtensor *sz, block_kind k) cannam@95: { cannam@95: int i; cannam@95: if (!FINITE_RNK(sz->rnk)) return 0; cannam@95: for (i = 0; i < sz->rnk && num_blocks_kind(sz->dims + i, k) == 1; ++i) ; cannam@95: return(i < sz->rnk && i < 2 && XM(is_local_after)(i + 1, sz, k)); cannam@95: cannam@95: }