Chris@10: /* Chris@10: * Copyright (c) 2003, 2007-11 Matteo Frigo Chris@10: * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology Chris@10: * Chris@10: * This program is free software; you can redistribute it and/or modify Chris@10: * it under the terms of the GNU General Public License as published by Chris@10: * the Free Software Foundation; either version 2 of the License, or Chris@10: * (at your option) any later version. Chris@10: * Chris@10: * This program is distributed in the hope that it will be useful, Chris@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of Chris@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Chris@10: * GNU General Public License for more details. Chris@10: * Chris@10: * You should have received a copy of the GNU General Public License Chris@10: * along with this program; if not, write to the Free Software Chris@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Chris@10: * Chris@10: */ Chris@10: Chris@10: /* Distributed transposes using a sequence of carefully scheduled Chris@10: pairwise exchanges. This has the advantage that it can be done Chris@10: in-place, or out-of-place while preserving the input, using buffer Chris@10: space proportional to the local size divided by the number of Chris@10: processes (i.e. to the total array size divided by the number of Chris@10: processes squared). */ Chris@10: Chris@10: #include "mpi-transpose.h" Chris@10: #include Chris@10: Chris@10: typedef struct { Chris@10: solver super; Chris@10: int preserve_input; /* preserve input even if DESTROY_INPUT was passed */ Chris@10: } S; Chris@10: Chris@10: typedef struct { Chris@10: plan_mpi_transpose super; Chris@10: Chris@10: plan *cld1, *cld2, *cld2rest, *cld3; Chris@10: INT rest_Ioff, rest_Ooff; Chris@10: Chris@10: int n_pes, my_pe, *sched; Chris@10: INT *send_block_sizes, *send_block_offsets; Chris@10: INT *recv_block_sizes, *recv_block_offsets; Chris@10: MPI_Comm comm; Chris@10: int preserve_input; Chris@10: } P; Chris@10: Chris@10: static void transpose_chunks(int *sched, int n_pes, int my_pe, Chris@10: INT *sbs, INT *sbo, INT *rbs, INT *rbo, Chris@10: MPI_Comm comm, Chris@10: R *I, R *O) Chris@10: { Chris@10: if (sched) { Chris@10: int i; Chris@10: MPI_Status status; Chris@10: Chris@10: /* TODO: explore non-synchronous send/recv? */ Chris@10: Chris@10: if (I == O) { Chris@10: R *buf = (R*) MALLOC(sizeof(R) * sbs[0], BUFFERS); Chris@10: Chris@10: for (i = 0; i < n_pes; ++i) { Chris@10: int pe = sched[i]; Chris@10: if (my_pe == pe) { Chris@10: if (rbo[pe] != sbo[pe]) Chris@10: memmove(O + rbo[pe], O + sbo[pe], Chris@10: sbs[pe] * sizeof(R)); Chris@10: } Chris@10: else { Chris@10: memcpy(buf, O + sbo[pe], sbs[pe] * sizeof(R)); Chris@10: MPI_Sendrecv(buf, (int) (sbs[pe]), FFTW_MPI_TYPE, Chris@10: pe, (my_pe * n_pes + pe) & 0xffff, Chris@10: O + rbo[pe], (int) (rbs[pe]), Chris@10: FFTW_MPI_TYPE, Chris@10: pe, (pe * n_pes + my_pe) & 0xffff, Chris@10: comm, &status); Chris@10: } Chris@10: } Chris@10: Chris@10: X(ifree)(buf); Chris@10: } Chris@10: else { /* I != O */ Chris@10: for (i = 0; i < n_pes; ++i) { Chris@10: int pe = sched[i]; Chris@10: if (my_pe == pe) Chris@10: memcpy(O + rbo[pe], I + sbo[pe], sbs[pe] * sizeof(R)); Chris@10: else Chris@10: MPI_Sendrecv(I + sbo[pe], (int) (sbs[pe]), Chris@10: FFTW_MPI_TYPE, Chris@10: pe, (my_pe * n_pes + pe) & 0xffff, Chris@10: O + rbo[pe], (int) (rbs[pe]), Chris@10: FFTW_MPI_TYPE, Chris@10: pe, (pe * n_pes + my_pe) & 0xffff, Chris@10: comm, &status); Chris@10: } Chris@10: } Chris@10: } Chris@10: } Chris@10: Chris@10: static void apply(const plan *ego_, R *I, R *O) Chris@10: { Chris@10: const P *ego = (const P *) ego_; Chris@10: plan_rdft *cld1, *cld2, *cld2rest, *cld3; Chris@10: Chris@10: /* transpose locally to get contiguous chunks */ Chris@10: cld1 = (plan_rdft *) ego->cld1; Chris@10: if (cld1) { Chris@10: cld1->apply(ego->cld1, I, O); Chris@10: Chris@10: if (ego->preserve_input) I = O; Chris@10: Chris@10: /* transpose chunks globally */ Chris@10: transpose_chunks(ego->sched, ego->n_pes, ego->my_pe, Chris@10: ego->send_block_sizes, ego->send_block_offsets, Chris@10: ego->recv_block_sizes, ego->recv_block_offsets, Chris@10: ego->comm, O, I); Chris@10: } Chris@10: else if (ego->preserve_input) { Chris@10: /* transpose chunks globally */ Chris@10: transpose_chunks(ego->sched, ego->n_pes, ego->my_pe, Chris@10: ego->send_block_sizes, ego->send_block_offsets, Chris@10: ego->recv_block_sizes, ego->recv_block_offsets, Chris@10: ego->comm, I, O); Chris@10: Chris@10: I = O; Chris@10: } Chris@10: else { Chris@10: /* transpose chunks globally */ Chris@10: transpose_chunks(ego->sched, ego->n_pes, ego->my_pe, Chris@10: ego->send_block_sizes, ego->send_block_offsets, Chris@10: ego->recv_block_sizes, ego->recv_block_offsets, Chris@10: ego->comm, I, I); Chris@10: } Chris@10: Chris@10: /* transpose locally, again, to get ordinary row-major; Chris@10: this may take two transposes if the block sizes are unequal Chris@10: (3 subplans, two of which operate on disjoint data) */ Chris@10: cld2 = (plan_rdft *) ego->cld2; Chris@10: cld2->apply(ego->cld2, I, O); Chris@10: cld2rest = (plan_rdft *) ego->cld2rest; Chris@10: if (cld2rest) { Chris@10: cld2rest->apply(ego->cld2rest, Chris@10: I + ego->rest_Ioff, O + ego->rest_Ooff); Chris@10: cld3 = (plan_rdft *) ego->cld3; Chris@10: if (cld3) Chris@10: cld3->apply(ego->cld3, O, O); Chris@10: /* else TRANSPOSED_OUT is true and user wants O transposed */ Chris@10: } Chris@10: } Chris@10: Chris@10: static int applicable(const S *ego, const problem *p_, Chris@10: const planner *plnr) Chris@10: { Chris@10: const problem_mpi_transpose *p = (const problem_mpi_transpose *) p_; Chris@10: /* Note: this is *not* UGLY for out-of-place, destroy-input plans; Chris@10: the planner often prefers transpose-pairwise to transpose-alltoall, Chris@10: at least with LAM MPI on my machine. */ Chris@10: return (1 Chris@10: && (!ego->preserve_input || (!NO_DESTROY_INPUTP(plnr) Chris@10: && p->I != p->O)) Chris@10: && ONLY_TRANSPOSEDP(p->flags)); Chris@10: } Chris@10: Chris@10: static void awake(plan *ego_, enum wakefulness wakefulness) Chris@10: { Chris@10: P *ego = (P *) ego_; Chris@10: X(plan_awake)(ego->cld1, wakefulness); Chris@10: X(plan_awake)(ego->cld2, wakefulness); Chris@10: X(plan_awake)(ego->cld2rest, wakefulness); Chris@10: X(plan_awake)(ego->cld3, wakefulness); Chris@10: } Chris@10: Chris@10: static void destroy(plan *ego_) Chris@10: { Chris@10: P *ego = (P *) ego_; Chris@10: X(ifree0)(ego->sched); Chris@10: X(ifree0)(ego->send_block_sizes); Chris@10: MPI_Comm_free(&ego->comm); Chris@10: X(plan_destroy_internal)(ego->cld3); Chris@10: X(plan_destroy_internal)(ego->cld2rest); Chris@10: X(plan_destroy_internal)(ego->cld2); Chris@10: X(plan_destroy_internal)(ego->cld1); Chris@10: } Chris@10: Chris@10: static void print(const plan *ego_, printer *p) Chris@10: { Chris@10: const P *ego = (const P *) ego_; Chris@10: p->print(p, "(mpi-transpose-pairwise%s%(%p%)%(%p%)%(%p%)%(%p%))", Chris@10: ego->preserve_input==2 ?"/p":"", Chris@10: ego->cld1, ego->cld2, ego->cld2rest, ego->cld3); Chris@10: } Chris@10: Chris@10: /* Given a process which_pe and a number of processes npes, fills Chris@10: the array sched[npes] with a sequence of processes to communicate Chris@10: with for a deadlock-free, optimum-overlap all-to-all communication. Chris@10: (All processes must call this routine to get their own schedules.) Chris@10: The schedule can be re-ordered arbitrarily as long as all processes Chris@10: apply the same permutation to their schedules. Chris@10: Chris@10: The algorithm here is based upon the one described in: Chris@10: J. A. M. Schreuder, "Constructing timetables for sport Chris@10: competitions," Mathematical Programming Study 13, pp. 58-67 (1980). Chris@10: In a sport competition, you have N teams and want every team to Chris@10: play every other team in as short a time as possible (maximum overlap Chris@10: between games). This timetabling problem is therefore identical Chris@10: to that of an all-to-all communications problem. In our case, there Chris@10: is one wrinkle: as part of the schedule, the process must do Chris@10: some data transfer with itself (local data movement), analogous Chris@10: to a requirement that each team "play itself" in addition to other Chris@10: teams. With this wrinkle, it turns out that an optimal timetable Chris@10: (N parallel games) can be constructed for any N, not just for even Chris@10: N as in the original problem described by Schreuder. Chris@10: */ Chris@10: static void fill1_comm_sched(int *sched, int which_pe, int npes) Chris@10: { Chris@10: int pe, i, n, s = 0; Chris@10: A(which_pe >= 0 && which_pe < npes); Chris@10: if (npes % 2 == 0) { Chris@10: n = npes; Chris@10: sched[s++] = which_pe; Chris@10: } Chris@10: else Chris@10: n = npes + 1; Chris@10: for (pe = 0; pe < n - 1; ++pe) { Chris@10: if (npes % 2 == 0) { Chris@10: if (pe == which_pe) sched[s++] = npes - 1; Chris@10: else if (npes - 1 == which_pe) sched[s++] = pe; Chris@10: } Chris@10: else if (pe == which_pe) sched[s++] = pe; Chris@10: Chris@10: if (pe != which_pe && which_pe < n - 1) { Chris@10: i = (pe - which_pe + (n - 1)) % (n - 1); Chris@10: if (i < n/2) Chris@10: sched[s++] = (pe + i) % (n - 1); Chris@10: Chris@10: i = (which_pe - pe + (n - 1)) % (n - 1); Chris@10: if (i < n/2) Chris@10: sched[s++] = (pe - i + (n - 1)) % (n - 1); Chris@10: } Chris@10: } Chris@10: A(s == npes); Chris@10: } Chris@10: Chris@10: /* Sort the communication schedule sched for npes so that the schedule Chris@10: on process sortpe is ascending or descending (!ascending). This is Chris@10: necessary to allow in-place transposes when the problem does not Chris@10: divide equally among the processes. In this case there is one Chris@10: process where the incoming blocks are bigger/smaller than the Chris@10: outgoing blocks and thus have to be received in Chris@10: descending/ascending order, respectively, to avoid overwriting data Chris@10: before it is sent. */ Chris@10: static void sort1_comm_sched(int *sched, int npes, int sortpe, int ascending) Chris@10: { Chris@10: int *sortsched, i; Chris@10: sortsched = (int *) MALLOC(npes * sizeof(int) * 2, OTHER); Chris@10: fill1_comm_sched(sortsched, sortpe, npes); Chris@10: if (ascending) Chris@10: for (i = 0; i < npes; ++i) Chris@10: sortsched[npes + sortsched[i]] = sched[i]; Chris@10: else Chris@10: for (i = 0; i < npes; ++i) Chris@10: sortsched[2*npes - 1 - sortsched[i]] = sched[i]; Chris@10: for (i = 0; i < npes; ++i) Chris@10: sched[i] = sortsched[npes + i]; Chris@10: X(ifree)(sortsched); Chris@10: } Chris@10: Chris@10: /* make the plans to do the post-MPI transpositions (shared with Chris@10: transpose-alltoall) */ Chris@10: int XM(mkplans_posttranspose)(const problem_mpi_transpose *p, planner *plnr, Chris@10: R *I, R *O, int my_pe, Chris@10: plan **cld2, plan **cld2rest, plan **cld3, Chris@10: INT *rest_Ioff, INT *rest_Ooff) Chris@10: { Chris@10: INT vn = p->vn; Chris@10: INT b = p->block; Chris@10: INT bt = XM(block)(p->ny, p->tblock, my_pe); Chris@10: INT nxb = p->nx / b; /* number of equal-sized blocks */ Chris@10: INT nxr = p->nx - nxb * b; /* leftover rows after equal blocks */ Chris@10: Chris@10: *cld2 = *cld2rest = *cld3 = NULL; Chris@10: *rest_Ioff = *rest_Ooff = 0; Chris@10: Chris@10: if (!(p->flags & TRANSPOSED_OUT) && (nxr == 0 || I != O)) { Chris@10: INT nx = p->nx * vn; Chris@10: b *= vn; Chris@10: *cld2 = X(mkplan_f_d)(plnr, Chris@10: X(mkproblem_rdft_0_d)(X(mktensor_3d) Chris@10: (nxb, bt * b, b, Chris@10: bt, b, nx, Chris@10: b, 1, 1), Chris@10: I, O), Chris@10: 0, 0, NO_SLOW); Chris@10: if (!*cld2) goto nada; Chris@10: Chris@10: if (nxr > 0) { Chris@10: *rest_Ioff = nxb * bt * b; Chris@10: *rest_Ooff = nxb * b; Chris@10: b = nxr * vn; Chris@10: *cld2rest = X(mkplan_f_d)(plnr, Chris@10: X(mkproblem_rdft_0_d)(X(mktensor_2d) Chris@10: (bt, b, nx, Chris@10: b, 1, 1), Chris@10: I + *rest_Ioff, Chris@10: O + *rest_Ooff), Chris@10: 0, 0, NO_SLOW); Chris@10: if (!*cld2rest) goto nada; Chris@10: } Chris@10: } Chris@10: else { Chris@10: *cld2 = X(mkplan_f_d)(plnr, Chris@10: X(mkproblem_rdft_0_d)( Chris@10: X(mktensor_4d) Chris@10: (nxb, bt * b * vn, bt * b * vn, Chris@10: bt, b * vn, vn, Chris@10: b, vn, bt * vn, Chris@10: vn, 1, 1), Chris@10: I, O), Chris@10: 0, 0, NO_SLOW); Chris@10: if (!*cld2) goto nada; Chris@10: Chris@10: *rest_Ioff = *rest_Ooff = nxb * bt * b * vn; Chris@10: *cld2rest = X(mkplan_f_d)(plnr, Chris@10: X(mkproblem_rdft_0_d)( Chris@10: X(mktensor_3d) Chris@10: (bt, nxr * vn, vn, Chris@10: nxr, vn, bt * vn, Chris@10: vn, 1, 1), Chris@10: I + *rest_Ioff, O + *rest_Ooff), Chris@10: 0, 0, NO_SLOW); Chris@10: if (!*cld2rest) goto nada; Chris@10: Chris@10: if (!(p->flags & TRANSPOSED_OUT)) { Chris@10: *cld3 = X(mkplan_f_d)(plnr, Chris@10: X(mkproblem_rdft_0_d)( Chris@10: X(mktensor_3d) Chris@10: (p->nx, bt * vn, vn, Chris@10: bt, vn, p->nx * vn, Chris@10: vn, 1, 1), Chris@10: O, O), Chris@10: 0, 0, NO_SLOW); Chris@10: if (!*cld3) goto nada; Chris@10: } Chris@10: } Chris@10: Chris@10: return 1; Chris@10: Chris@10: nada: Chris@10: X(plan_destroy_internal)(*cld3); Chris@10: X(plan_destroy_internal)(*cld2rest); Chris@10: X(plan_destroy_internal)(*cld2); Chris@10: return 0; Chris@10: } Chris@10: Chris@10: static plan *mkplan(const solver *ego_, const problem *p_, planner *plnr) Chris@10: { Chris@10: const S *ego = (const S *) ego_; Chris@10: const problem_mpi_transpose *p; Chris@10: P *pln; Chris@10: plan *cld1 = 0, *cld2 = 0, *cld2rest = 0, *cld3 = 0; Chris@10: INT b, bt, vn, rest_Ioff, rest_Ooff; Chris@10: INT *sbs, *sbo, *rbs, *rbo; Chris@10: int pe, my_pe, n_pes, sort_pe = -1, ascending = 1; Chris@10: R *I, *O; Chris@10: static const plan_adt padt = { Chris@10: XM(transpose_solve), awake, print, destroy Chris@10: }; Chris@10: Chris@10: UNUSED(ego); Chris@10: Chris@10: if (!applicable(ego, p_, plnr)) Chris@10: return (plan *) 0; Chris@10: Chris@10: p = (const problem_mpi_transpose *) p_; Chris@10: vn = p->vn; Chris@10: I = p->I; O = p->O; Chris@10: Chris@10: MPI_Comm_rank(p->comm, &my_pe); Chris@10: MPI_Comm_size(p->comm, &n_pes); Chris@10: Chris@10: b = XM(block)(p->nx, p->block, my_pe); Chris@10: Chris@10: if (!(p->flags & TRANSPOSED_IN)) { /* b x ny x vn -> ny x b x vn */ Chris@10: cld1 = X(mkplan_f_d)(plnr, Chris@10: X(mkproblem_rdft_0_d)(X(mktensor_3d) Chris@10: (b, p->ny * vn, vn, Chris@10: p->ny, vn, b * vn, Chris@10: vn, 1, 1), Chris@10: I, O), Chris@10: 0, 0, NO_SLOW); Chris@10: if (XM(any_true)(!cld1, p->comm)) goto nada; Chris@10: } Chris@10: if (ego->preserve_input || NO_DESTROY_INPUTP(plnr)) I = O; Chris@10: Chris@10: if (XM(any_true)(!XM(mkplans_posttranspose)(p, plnr, I, O, my_pe, Chris@10: &cld2, &cld2rest, &cld3, Chris@10: &rest_Ioff, &rest_Ooff), Chris@10: p->comm)) goto nada; Chris@10: Chris@10: pln = MKPLAN_MPI_TRANSPOSE(P, &padt, apply); Chris@10: Chris@10: pln->cld1 = cld1; Chris@10: pln->cld2 = cld2; Chris@10: pln->cld2rest = cld2rest; Chris@10: pln->rest_Ioff = rest_Ioff; Chris@10: pln->rest_Ooff = rest_Ooff; Chris@10: pln->cld3 = cld3; Chris@10: pln->preserve_input = ego->preserve_input ? 2 : NO_DESTROY_INPUTP(plnr); Chris@10: Chris@10: MPI_Comm_dup(p->comm, &pln->comm); Chris@10: Chris@10: n_pes = (int) X(imax)(XM(num_blocks)(p->nx, p->block), Chris@10: XM(num_blocks)(p->ny, p->tblock)); Chris@10: Chris@10: /* Compute sizes/offsets of blocks to exchange between processors */ Chris@10: sbs = (INT *) MALLOC(4 * n_pes * sizeof(INT), PLANS); Chris@10: sbo = sbs + n_pes; Chris@10: rbs = sbo + n_pes; Chris@10: rbo = rbs + n_pes; Chris@10: b = XM(block)(p->nx, p->block, my_pe); Chris@10: bt = XM(block)(p->ny, p->tblock, my_pe); Chris@10: for (pe = 0; pe < n_pes; ++pe) { Chris@10: INT db, dbt; /* destination block sizes */ Chris@10: db = XM(block)(p->nx, p->block, pe); Chris@10: dbt = XM(block)(p->ny, p->tblock, pe); Chris@10: Chris@10: sbs[pe] = b * dbt * vn; Chris@10: sbo[pe] = pe * (b * p->tblock) * vn; Chris@10: rbs[pe] = db * bt * vn; Chris@10: rbo[pe] = pe * (p->block * bt) * vn; Chris@10: Chris@10: if (db * dbt > 0 && db * p->tblock != p->block * dbt) { Chris@10: A(sort_pe == -1); /* only one process should need sorting */ Chris@10: sort_pe = pe; Chris@10: ascending = db * p->tblock > p->block * dbt; Chris@10: } Chris@10: } Chris@10: pln->n_pes = n_pes; Chris@10: pln->my_pe = my_pe; Chris@10: pln->send_block_sizes = sbs; Chris@10: pln->send_block_offsets = sbo; Chris@10: pln->recv_block_sizes = rbs; Chris@10: pln->recv_block_offsets = rbo; Chris@10: Chris@10: if (my_pe >= n_pes) { Chris@10: pln->sched = 0; /* this process is not doing anything */ Chris@10: } Chris@10: else { Chris@10: pln->sched = (int *) MALLOC(n_pes * sizeof(int), PLANS); Chris@10: fill1_comm_sched(pln->sched, my_pe, n_pes); Chris@10: if (sort_pe >= 0) Chris@10: sort1_comm_sched(pln->sched, n_pes, sort_pe, ascending); Chris@10: } Chris@10: Chris@10: X(ops_zero)(&pln->super.super.ops); Chris@10: if (cld1) X(ops_add2)(&cld1->ops, &pln->super.super.ops); Chris@10: if (cld2) X(ops_add2)(&cld2->ops, &pln->super.super.ops); Chris@10: if (cld2rest) X(ops_add2)(&cld2rest->ops, &pln->super.super.ops); Chris@10: if (cld3) X(ops_add2)(&cld3->ops, &pln->super.super.ops); Chris@10: /* FIXME: should MPI operations be counted in "other" somehow? */ Chris@10: Chris@10: return &(pln->super.super); Chris@10: Chris@10: nada: Chris@10: X(plan_destroy_internal)(cld3); Chris@10: X(plan_destroy_internal)(cld2rest); Chris@10: X(plan_destroy_internal)(cld2); Chris@10: X(plan_destroy_internal)(cld1); Chris@10: return (plan *) 0; Chris@10: } Chris@10: Chris@10: static solver *mksolver(int preserve_input) Chris@10: { Chris@10: static const solver_adt sadt = { PROBLEM_MPI_TRANSPOSE, mkplan, 0 }; Chris@10: S *slv = MKSOLVER(S, &sadt); Chris@10: slv->preserve_input = preserve_input; Chris@10: return &(slv->super); Chris@10: } Chris@10: Chris@10: void XM(transpose_pairwise_register)(planner *p) Chris@10: { Chris@10: int preserve_input; Chris@10: for (preserve_input = 0; preserve_input <= 1; ++preserve_input) Chris@10: REGISTER_SOLVER(p, mksolver(preserve_input)); Chris@10: }