Chris@10: /* Chris@10: * Copyright (c) 2003, 2007-11 Matteo Frigo Chris@10: * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology Chris@10: * Chris@10: * This program is free software; you can redistribute it and/or modify Chris@10: * it under the terms of the GNU General Public License as published by Chris@10: * the Free Software Foundation; either version 2 of the License, or Chris@10: * (at your option) any later version. Chris@10: * Chris@10: * This program is distributed in the hope that it will be useful, Chris@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of Chris@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Chris@10: * GNU General Public License for more details. Chris@10: * Chris@10: * You should have received a copy of the GNU General Public License Chris@10: * along with this program; if not, write to the Free Software Chris@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Chris@10: * Chris@10: */ Chris@10: Chris@10: /* FFTW-MPI internal header file */ Chris@10: #ifndef __IFFTW_MPI_H__ Chris@10: #define __IFFTW_MPI_H__ Chris@10: Chris@10: #include "ifftw.h" Chris@10: #include "rdft.h" Chris@10: Chris@10: #include Chris@10: Chris@10: /* mpi problem flags: problem-dependent meaning, but in general Chris@10: SCRAMBLED means some reordering *within* the dimensions, while Chris@10: TRANSPOSED means some reordering *of* the dimensions */ Chris@10: #define SCRAMBLED_IN (1 << 0) Chris@10: #define SCRAMBLED_OUT (1 << 1) Chris@10: #define TRANSPOSED_IN (1 << 2) Chris@10: #define TRANSPOSED_OUT (1 << 3) Chris@10: #define RANK1_BIGVEC_ONLY (1 << 4) /* for rank=1, allow only bigvec solver */ Chris@10: Chris@10: #define ONLY_SCRAMBLEDP(flags) (!((flags) & ~(SCRAMBLED_IN|SCRAMBLED_OUT))) Chris@10: #define ONLY_TRANSPOSEDP(flags) (!((flags) & ~(TRANSPOSED_IN|TRANSPOSED_OUT))) Chris@10: Chris@10: #if defined(FFTW_SINGLE) Chris@10: # define FFTW_MPI_TYPE MPI_FLOAT Chris@10: #elif defined(FFTW_LDOUBLE) Chris@10: # define FFTW_MPI_TYPE MPI_LONG_DOUBLE Chris@10: #elif defined(FFTW_QUAD) Chris@10: # error MPI quad-precision type is unknown Chris@10: #else Chris@10: # define FFTW_MPI_TYPE MPI_DOUBLE Chris@10: #endif Chris@10: Chris@10: /* all fftw-mpi identifiers start with fftw_mpi (or fftwf_mpi etc.) */ Chris@10: #define XM(name) X(CONCAT(mpi_, name)) Chris@10: Chris@10: /***********************************************************************/ Chris@10: /* block distributions */ Chris@10: Chris@10: /* a distributed dimension of length n with input and output block Chris@10: sizes ib and ob, respectively. */ Chris@10: typedef enum { IB = 0, OB } block_kind; Chris@10: typedef struct { Chris@10: INT n; Chris@10: INT b[2]; /* b[IB], b[OB] */ Chris@10: } ddim; Chris@10: Chris@10: /* Loop over k in {IB, OB}. Note: need explicit casts for C++. */ Chris@10: #define FORALL_BLOCK_KIND(k) for (k = IB; k <= OB; k = (block_kind) (((int) k) + 1)) Chris@10: Chris@10: /* unlike tensors in the serial FFTW, the ordering of the dtensor Chris@10: dimensions matters - both the array and the block layout are Chris@10: row-major order. */ Chris@10: typedef struct { Chris@10: int rnk; Chris@10: #if defined(STRUCT_HACK_KR) Chris@10: ddim dims[1]; Chris@10: #elif defined(STRUCT_HACK_C99) Chris@10: ddim dims[]; Chris@10: #else Chris@10: ddim *dims; Chris@10: #endif Chris@10: } dtensor; Chris@10: Chris@10: Chris@10: /* dtensor.c: */ Chris@10: dtensor *XM(mkdtensor)(int rnk); Chris@10: void XM(dtensor_destroy)(dtensor *sz); Chris@10: dtensor *XM(dtensor_copy)(const dtensor *sz); Chris@10: dtensor *XM(dtensor_canonical)(const dtensor *sz, int compress); Chris@10: int XM(dtensor_validp)(const dtensor *sz); Chris@10: void XM(dtensor_md5)(md5 *p, const dtensor *t); Chris@10: void XM(dtensor_print)(const dtensor *t, printer *p); Chris@10: Chris@10: /* block.c: */ Chris@10: Chris@10: /* for a single distributed dimension: */ Chris@10: INT XM(num_blocks)(INT n, INT block); Chris@10: int XM(num_blocks_ok)(INT n, INT block, MPI_Comm comm); Chris@10: INT XM(default_block)(INT n, int n_pes); Chris@10: INT XM(block)(INT n, INT block, int which_block); Chris@10: Chris@10: /* for multiple distributed dimensions: */ Chris@10: INT XM(num_blocks_total)(const dtensor *sz, block_kind k); Chris@10: int XM(idle_process)(const dtensor *sz, block_kind k, int which_pe); Chris@10: void XM(block_coords)(const dtensor *sz, block_kind k, int which_pe, Chris@10: INT *coords); Chris@10: INT XM(total_block)(const dtensor *sz, block_kind k, int which_pe); Chris@10: int XM(is_local_after)(int dim, const dtensor *sz, block_kind k); Chris@10: int XM(is_local)(const dtensor *sz, block_kind k); Chris@10: int XM(is_block1d)(const dtensor *sz, block_kind k); Chris@10: Chris@10: /* choose-radix.c */ Chris@10: INT XM(choose_radix)(ddim d, int n_pes, unsigned flags, int sign, Chris@10: INT rblock[2], INT mblock[2]); Chris@10: Chris@10: /***********************************************************************/ Chris@10: /* any_true.c */ Chris@10: int XM(any_true)(int condition, MPI_Comm comm); Chris@10: int XM(md5_equal)(md5 m, MPI_Comm comm); Chris@10: Chris@10: /* conf.c */ Chris@10: void XM(conf_standard)(planner *p); Chris@10: Chris@10: /***********************************************************************/ Chris@10: /* rearrange.c */ Chris@10: Chris@10: /* Different ways to rearrange the vector dimension vn during transposition, Chris@10: reflecting different tradeoffs between ease of transposition and Chris@10: contiguity during the subsequent DFTs. Chris@10: Chris@10: TODO: can we pare this down to CONTIG and DISCONTIG, at least Chris@10: in MEASURE mode? SQUARE_MIDDLE is also used for 1d destroy-input DFTs. */ Chris@10: typedef enum { Chris@10: CONTIG = 0, /* vn x 1: make subsequent DFTs contiguous */ Chris@10: DISCONTIG, /* P x (vn/P) for P processes */ Chris@10: SQUARE_BEFORE, /* try to get square transpose at beginning */ Chris@10: SQUARE_MIDDLE, /* try to get square transpose in the middle */ Chris@10: SQUARE_AFTER /* try to get square transpose at end */ Chris@10: } rearrangement; Chris@10: Chris@10: /* skipping SQUARE_AFTER since it doesn't seem to offer any advantage Chris@10: over SQUARE_BEFORE */ Chris@10: #define FORALL_REARRANGE(rearrange) for (rearrange = CONTIG; rearrange <= SQUARE_MIDDLE; rearrange = (rearrangement) (((int) rearrange) + 1)) Chris@10: Chris@10: int XM(rearrange_applicable)(rearrangement rearrange, Chris@10: ddim dim0, INT vn, int n_pes); Chris@10: INT XM(rearrange_ny)(rearrangement rearrange, ddim dim0, INT vn, int n_pes); Chris@10: Chris@10: /***********************************************************************/ Chris@10: Chris@10: #endif /* __IFFTW_MPI_H__ */ Chris@10: