annotate src/fftw-3.3.3/mpi/transpose-pairwise.c @ 169:223a55898ab9 tip default

Add null config files
author Chris Cannam <cannam@all-day-breakfast.com>
date Mon, 02 Mar 2020 14:03:47 +0000
parents 89f5e221ed7b
children
rev   line source
cannam@95 1 /*
cannam@95 2 * Copyright (c) 2003, 2007-11 Matteo Frigo
cannam@95 3 * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
cannam@95 4 *
cannam@95 5 * This program is free software; you can redistribute it and/or modify
cannam@95 6 * it under the terms of the GNU General Public License as published by
cannam@95 7 * the Free Software Foundation; either version 2 of the License, or
cannam@95 8 * (at your option) any later version.
cannam@95 9 *
cannam@95 10 * This program is distributed in the hope that it will be useful,
cannam@95 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
cannam@95 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
cannam@95 13 * GNU General Public License for more details.
cannam@95 14 *
cannam@95 15 * You should have received a copy of the GNU General Public License
cannam@95 16 * along with this program; if not, write to the Free Software
cannam@95 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
cannam@95 18 *
cannam@95 19 */
cannam@95 20
cannam@95 21 /* Distributed transposes using a sequence of carefully scheduled
cannam@95 22 pairwise exchanges. This has the advantage that it can be done
cannam@95 23 in-place, or out-of-place while preserving the input, using buffer
cannam@95 24 space proportional to the local size divided by the number of
cannam@95 25 processes (i.e. to the total array size divided by the number of
cannam@95 26 processes squared). */
cannam@95 27
cannam@95 28 #include "mpi-transpose.h"
cannam@95 29 #include <string.h>
cannam@95 30
cannam@95 31 typedef struct {
cannam@95 32 solver super;
cannam@95 33 int preserve_input; /* preserve input even if DESTROY_INPUT was passed */
cannam@95 34 } S;
cannam@95 35
cannam@95 36 typedef struct {
cannam@95 37 plan_mpi_transpose super;
cannam@95 38
cannam@95 39 plan *cld1, *cld2, *cld2rest, *cld3;
cannam@95 40 INT rest_Ioff, rest_Ooff;
cannam@95 41
cannam@95 42 int n_pes, my_pe, *sched;
cannam@95 43 INT *send_block_sizes, *send_block_offsets;
cannam@95 44 INT *recv_block_sizes, *recv_block_offsets;
cannam@95 45 MPI_Comm comm;
cannam@95 46 int preserve_input;
cannam@95 47 } P;
cannam@95 48
cannam@95 49 static void transpose_chunks(int *sched, int n_pes, int my_pe,
cannam@95 50 INT *sbs, INT *sbo, INT *rbs, INT *rbo,
cannam@95 51 MPI_Comm comm,
cannam@95 52 R *I, R *O)
cannam@95 53 {
cannam@95 54 if (sched) {
cannam@95 55 int i;
cannam@95 56 MPI_Status status;
cannam@95 57
cannam@95 58 /* TODO: explore non-synchronous send/recv? */
cannam@95 59
cannam@95 60 if (I == O) {
cannam@95 61 R *buf = (R*) MALLOC(sizeof(R) * sbs[0], BUFFERS);
cannam@95 62
cannam@95 63 for (i = 0; i < n_pes; ++i) {
cannam@95 64 int pe = sched[i];
cannam@95 65 if (my_pe == pe) {
cannam@95 66 if (rbo[pe] != sbo[pe])
cannam@95 67 memmove(O + rbo[pe], O + sbo[pe],
cannam@95 68 sbs[pe] * sizeof(R));
cannam@95 69 }
cannam@95 70 else {
cannam@95 71 memcpy(buf, O + sbo[pe], sbs[pe] * sizeof(R));
cannam@95 72 MPI_Sendrecv(buf, (int) (sbs[pe]), FFTW_MPI_TYPE,
cannam@95 73 pe, (my_pe * n_pes + pe) & 0xffff,
cannam@95 74 O + rbo[pe], (int) (rbs[pe]),
cannam@95 75 FFTW_MPI_TYPE,
cannam@95 76 pe, (pe * n_pes + my_pe) & 0xffff,
cannam@95 77 comm, &status);
cannam@95 78 }
cannam@95 79 }
cannam@95 80
cannam@95 81 X(ifree)(buf);
cannam@95 82 }
cannam@95 83 else { /* I != O */
cannam@95 84 for (i = 0; i < n_pes; ++i) {
cannam@95 85 int pe = sched[i];
cannam@95 86 if (my_pe == pe)
cannam@95 87 memcpy(O + rbo[pe], I + sbo[pe], sbs[pe] * sizeof(R));
cannam@95 88 else
cannam@95 89 MPI_Sendrecv(I + sbo[pe], (int) (sbs[pe]),
cannam@95 90 FFTW_MPI_TYPE,
cannam@95 91 pe, (my_pe * n_pes + pe) & 0xffff,
cannam@95 92 O + rbo[pe], (int) (rbs[pe]),
cannam@95 93 FFTW_MPI_TYPE,
cannam@95 94 pe, (pe * n_pes + my_pe) & 0xffff,
cannam@95 95 comm, &status);
cannam@95 96 }
cannam@95 97 }
cannam@95 98 }
cannam@95 99 }
cannam@95 100
cannam@95 101 static void apply(const plan *ego_, R *I, R *O)
cannam@95 102 {
cannam@95 103 const P *ego = (const P *) ego_;
cannam@95 104 plan_rdft *cld1, *cld2, *cld2rest, *cld3;
cannam@95 105
cannam@95 106 /* transpose locally to get contiguous chunks */
cannam@95 107 cld1 = (plan_rdft *) ego->cld1;
cannam@95 108 if (cld1) {
cannam@95 109 cld1->apply(ego->cld1, I, O);
cannam@95 110
cannam@95 111 if (ego->preserve_input) I = O;
cannam@95 112
cannam@95 113 /* transpose chunks globally */
cannam@95 114 transpose_chunks(ego->sched, ego->n_pes, ego->my_pe,
cannam@95 115 ego->send_block_sizes, ego->send_block_offsets,
cannam@95 116 ego->recv_block_sizes, ego->recv_block_offsets,
cannam@95 117 ego->comm, O, I);
cannam@95 118 }
cannam@95 119 else if (ego->preserve_input) {
cannam@95 120 /* transpose chunks globally */
cannam@95 121 transpose_chunks(ego->sched, ego->n_pes, ego->my_pe,
cannam@95 122 ego->send_block_sizes, ego->send_block_offsets,
cannam@95 123 ego->recv_block_sizes, ego->recv_block_offsets,
cannam@95 124 ego->comm, I, O);
cannam@95 125
cannam@95 126 I = O;
cannam@95 127 }
cannam@95 128 else {
cannam@95 129 /* transpose chunks globally */
cannam@95 130 transpose_chunks(ego->sched, ego->n_pes, ego->my_pe,
cannam@95 131 ego->send_block_sizes, ego->send_block_offsets,
cannam@95 132 ego->recv_block_sizes, ego->recv_block_offsets,
cannam@95 133 ego->comm, I, I);
cannam@95 134 }
cannam@95 135
cannam@95 136 /* transpose locally, again, to get ordinary row-major;
cannam@95 137 this may take two transposes if the block sizes are unequal
cannam@95 138 (3 subplans, two of which operate on disjoint data) */
cannam@95 139 cld2 = (plan_rdft *) ego->cld2;
cannam@95 140 cld2->apply(ego->cld2, I, O);
cannam@95 141 cld2rest = (plan_rdft *) ego->cld2rest;
cannam@95 142 if (cld2rest) {
cannam@95 143 cld2rest->apply(ego->cld2rest,
cannam@95 144 I + ego->rest_Ioff, O + ego->rest_Ooff);
cannam@95 145 cld3 = (plan_rdft *) ego->cld3;
cannam@95 146 if (cld3)
cannam@95 147 cld3->apply(ego->cld3, O, O);
cannam@95 148 /* else TRANSPOSED_OUT is true and user wants O transposed */
cannam@95 149 }
cannam@95 150 }
cannam@95 151
cannam@95 152 static int applicable(const S *ego, const problem *p_,
cannam@95 153 const planner *plnr)
cannam@95 154 {
cannam@95 155 const problem_mpi_transpose *p = (const problem_mpi_transpose *) p_;
cannam@95 156 /* Note: this is *not* UGLY for out-of-place, destroy-input plans;
cannam@95 157 the planner often prefers transpose-pairwise to transpose-alltoall,
cannam@95 158 at least with LAM MPI on my machine. */
cannam@95 159 return (1
cannam@95 160 && (!ego->preserve_input || (!NO_DESTROY_INPUTP(plnr)
cannam@95 161 && p->I != p->O))
cannam@95 162 && ONLY_TRANSPOSEDP(p->flags));
cannam@95 163 }
cannam@95 164
cannam@95 165 static void awake(plan *ego_, enum wakefulness wakefulness)
cannam@95 166 {
cannam@95 167 P *ego = (P *) ego_;
cannam@95 168 X(plan_awake)(ego->cld1, wakefulness);
cannam@95 169 X(plan_awake)(ego->cld2, wakefulness);
cannam@95 170 X(plan_awake)(ego->cld2rest, wakefulness);
cannam@95 171 X(plan_awake)(ego->cld3, wakefulness);
cannam@95 172 }
cannam@95 173
cannam@95 174 static void destroy(plan *ego_)
cannam@95 175 {
cannam@95 176 P *ego = (P *) ego_;
cannam@95 177 X(ifree0)(ego->sched);
cannam@95 178 X(ifree0)(ego->send_block_sizes);
cannam@95 179 MPI_Comm_free(&ego->comm);
cannam@95 180 X(plan_destroy_internal)(ego->cld3);
cannam@95 181 X(plan_destroy_internal)(ego->cld2rest);
cannam@95 182 X(plan_destroy_internal)(ego->cld2);
cannam@95 183 X(plan_destroy_internal)(ego->cld1);
cannam@95 184 }
cannam@95 185
cannam@95 186 static void print(const plan *ego_, printer *p)
cannam@95 187 {
cannam@95 188 const P *ego = (const P *) ego_;
cannam@95 189 p->print(p, "(mpi-transpose-pairwise%s%(%p%)%(%p%)%(%p%)%(%p%))",
cannam@95 190 ego->preserve_input==2 ?"/p":"",
cannam@95 191 ego->cld1, ego->cld2, ego->cld2rest, ego->cld3);
cannam@95 192 }
cannam@95 193
cannam@95 194 /* Given a process which_pe and a number of processes npes, fills
cannam@95 195 the array sched[npes] with a sequence of processes to communicate
cannam@95 196 with for a deadlock-free, optimum-overlap all-to-all communication.
cannam@95 197 (All processes must call this routine to get their own schedules.)
cannam@95 198 The schedule can be re-ordered arbitrarily as long as all processes
cannam@95 199 apply the same permutation to their schedules.
cannam@95 200
cannam@95 201 The algorithm here is based upon the one described in:
cannam@95 202 J. A. M. Schreuder, "Constructing timetables for sport
cannam@95 203 competitions," Mathematical Programming Study 13, pp. 58-67 (1980).
cannam@95 204 In a sport competition, you have N teams and want every team to
cannam@95 205 play every other team in as short a time as possible (maximum overlap
cannam@95 206 between games). This timetabling problem is therefore identical
cannam@95 207 to that of an all-to-all communications problem. In our case, there
cannam@95 208 is one wrinkle: as part of the schedule, the process must do
cannam@95 209 some data transfer with itself (local data movement), analogous
cannam@95 210 to a requirement that each team "play itself" in addition to other
cannam@95 211 teams. With this wrinkle, it turns out that an optimal timetable
cannam@95 212 (N parallel games) can be constructed for any N, not just for even
cannam@95 213 N as in the original problem described by Schreuder.
cannam@95 214 */
cannam@95 215 static void fill1_comm_sched(int *sched, int which_pe, int npes)
cannam@95 216 {
cannam@95 217 int pe, i, n, s = 0;
cannam@95 218 A(which_pe >= 0 && which_pe < npes);
cannam@95 219 if (npes % 2 == 0) {
cannam@95 220 n = npes;
cannam@95 221 sched[s++] = which_pe;
cannam@95 222 }
cannam@95 223 else
cannam@95 224 n = npes + 1;
cannam@95 225 for (pe = 0; pe < n - 1; ++pe) {
cannam@95 226 if (npes % 2 == 0) {
cannam@95 227 if (pe == which_pe) sched[s++] = npes - 1;
cannam@95 228 else if (npes - 1 == which_pe) sched[s++] = pe;
cannam@95 229 }
cannam@95 230 else if (pe == which_pe) sched[s++] = pe;
cannam@95 231
cannam@95 232 if (pe != which_pe && which_pe < n - 1) {
cannam@95 233 i = (pe - which_pe + (n - 1)) % (n - 1);
cannam@95 234 if (i < n/2)
cannam@95 235 sched[s++] = (pe + i) % (n - 1);
cannam@95 236
cannam@95 237 i = (which_pe - pe + (n - 1)) % (n - 1);
cannam@95 238 if (i < n/2)
cannam@95 239 sched[s++] = (pe - i + (n - 1)) % (n - 1);
cannam@95 240 }
cannam@95 241 }
cannam@95 242 A(s == npes);
cannam@95 243 }
cannam@95 244
cannam@95 245 /* Sort the communication schedule sched for npes so that the schedule
cannam@95 246 on process sortpe is ascending or descending (!ascending). This is
cannam@95 247 necessary to allow in-place transposes when the problem does not
cannam@95 248 divide equally among the processes. In this case there is one
cannam@95 249 process where the incoming blocks are bigger/smaller than the
cannam@95 250 outgoing blocks and thus have to be received in
cannam@95 251 descending/ascending order, respectively, to avoid overwriting data
cannam@95 252 before it is sent. */
cannam@95 253 static void sort1_comm_sched(int *sched, int npes, int sortpe, int ascending)
cannam@95 254 {
cannam@95 255 int *sortsched, i;
cannam@95 256 sortsched = (int *) MALLOC(npes * sizeof(int) * 2, OTHER);
cannam@95 257 fill1_comm_sched(sortsched, sortpe, npes);
cannam@95 258 if (ascending)
cannam@95 259 for (i = 0; i < npes; ++i)
cannam@95 260 sortsched[npes + sortsched[i]] = sched[i];
cannam@95 261 else
cannam@95 262 for (i = 0; i < npes; ++i)
cannam@95 263 sortsched[2*npes - 1 - sortsched[i]] = sched[i];
cannam@95 264 for (i = 0; i < npes; ++i)
cannam@95 265 sched[i] = sortsched[npes + i];
cannam@95 266 X(ifree)(sortsched);
cannam@95 267 }
cannam@95 268
cannam@95 269 /* make the plans to do the post-MPI transpositions (shared with
cannam@95 270 transpose-alltoall) */
cannam@95 271 int XM(mkplans_posttranspose)(const problem_mpi_transpose *p, planner *plnr,
cannam@95 272 R *I, R *O, int my_pe,
cannam@95 273 plan **cld2, plan **cld2rest, plan **cld3,
cannam@95 274 INT *rest_Ioff, INT *rest_Ooff)
cannam@95 275 {
cannam@95 276 INT vn = p->vn;
cannam@95 277 INT b = p->block;
cannam@95 278 INT bt = XM(block)(p->ny, p->tblock, my_pe);
cannam@95 279 INT nxb = p->nx / b; /* number of equal-sized blocks */
cannam@95 280 INT nxr = p->nx - nxb * b; /* leftover rows after equal blocks */
cannam@95 281
cannam@95 282 *cld2 = *cld2rest = *cld3 = NULL;
cannam@95 283 *rest_Ioff = *rest_Ooff = 0;
cannam@95 284
cannam@95 285 if (!(p->flags & TRANSPOSED_OUT) && (nxr == 0 || I != O)) {
cannam@95 286 INT nx = p->nx * vn;
cannam@95 287 b *= vn;
cannam@95 288 *cld2 = X(mkplan_f_d)(plnr,
cannam@95 289 X(mkproblem_rdft_0_d)(X(mktensor_3d)
cannam@95 290 (nxb, bt * b, b,
cannam@95 291 bt, b, nx,
cannam@95 292 b, 1, 1),
cannam@95 293 I, O),
cannam@95 294 0, 0, NO_SLOW);
cannam@95 295 if (!*cld2) goto nada;
cannam@95 296
cannam@95 297 if (nxr > 0) {
cannam@95 298 *rest_Ioff = nxb * bt * b;
cannam@95 299 *rest_Ooff = nxb * b;
cannam@95 300 b = nxr * vn;
cannam@95 301 *cld2rest = X(mkplan_f_d)(plnr,
cannam@95 302 X(mkproblem_rdft_0_d)(X(mktensor_2d)
cannam@95 303 (bt, b, nx,
cannam@95 304 b, 1, 1),
cannam@95 305 I + *rest_Ioff,
cannam@95 306 O + *rest_Ooff),
cannam@95 307 0, 0, NO_SLOW);
cannam@95 308 if (!*cld2rest) goto nada;
cannam@95 309 }
cannam@95 310 }
cannam@95 311 else {
cannam@95 312 *cld2 = X(mkplan_f_d)(plnr,
cannam@95 313 X(mkproblem_rdft_0_d)(
cannam@95 314 X(mktensor_4d)
cannam@95 315 (nxb, bt * b * vn, bt * b * vn,
cannam@95 316 bt, b * vn, vn,
cannam@95 317 b, vn, bt * vn,
cannam@95 318 vn, 1, 1),
cannam@95 319 I, O),
cannam@95 320 0, 0, NO_SLOW);
cannam@95 321 if (!*cld2) goto nada;
cannam@95 322
cannam@95 323 *rest_Ioff = *rest_Ooff = nxb * bt * b * vn;
cannam@95 324 *cld2rest = X(mkplan_f_d)(plnr,
cannam@95 325 X(mkproblem_rdft_0_d)(
cannam@95 326 X(mktensor_3d)
cannam@95 327 (bt, nxr * vn, vn,
cannam@95 328 nxr, vn, bt * vn,
cannam@95 329 vn, 1, 1),
cannam@95 330 I + *rest_Ioff, O + *rest_Ooff),
cannam@95 331 0, 0, NO_SLOW);
cannam@95 332 if (!*cld2rest) goto nada;
cannam@95 333
cannam@95 334 if (!(p->flags & TRANSPOSED_OUT)) {
cannam@95 335 *cld3 = X(mkplan_f_d)(plnr,
cannam@95 336 X(mkproblem_rdft_0_d)(
cannam@95 337 X(mktensor_3d)
cannam@95 338 (p->nx, bt * vn, vn,
cannam@95 339 bt, vn, p->nx * vn,
cannam@95 340 vn, 1, 1),
cannam@95 341 O, O),
cannam@95 342 0, 0, NO_SLOW);
cannam@95 343 if (!*cld3) goto nada;
cannam@95 344 }
cannam@95 345 }
cannam@95 346
cannam@95 347 return 1;
cannam@95 348
cannam@95 349 nada:
cannam@95 350 X(plan_destroy_internal)(*cld3);
cannam@95 351 X(plan_destroy_internal)(*cld2rest);
cannam@95 352 X(plan_destroy_internal)(*cld2);
cannam@95 353 return 0;
cannam@95 354 }
cannam@95 355
cannam@95 356 static plan *mkplan(const solver *ego_, const problem *p_, planner *plnr)
cannam@95 357 {
cannam@95 358 const S *ego = (const S *) ego_;
cannam@95 359 const problem_mpi_transpose *p;
cannam@95 360 P *pln;
cannam@95 361 plan *cld1 = 0, *cld2 = 0, *cld2rest = 0, *cld3 = 0;
cannam@95 362 INT b, bt, vn, rest_Ioff, rest_Ooff;
cannam@95 363 INT *sbs, *sbo, *rbs, *rbo;
cannam@95 364 int pe, my_pe, n_pes, sort_pe = -1, ascending = 1;
cannam@95 365 R *I, *O;
cannam@95 366 static const plan_adt padt = {
cannam@95 367 XM(transpose_solve), awake, print, destroy
cannam@95 368 };
cannam@95 369
cannam@95 370 UNUSED(ego);
cannam@95 371
cannam@95 372 if (!applicable(ego, p_, plnr))
cannam@95 373 return (plan *) 0;
cannam@95 374
cannam@95 375 p = (const problem_mpi_transpose *) p_;
cannam@95 376 vn = p->vn;
cannam@95 377 I = p->I; O = p->O;
cannam@95 378
cannam@95 379 MPI_Comm_rank(p->comm, &my_pe);
cannam@95 380 MPI_Comm_size(p->comm, &n_pes);
cannam@95 381
cannam@95 382 b = XM(block)(p->nx, p->block, my_pe);
cannam@95 383
cannam@95 384 if (!(p->flags & TRANSPOSED_IN)) { /* b x ny x vn -> ny x b x vn */
cannam@95 385 cld1 = X(mkplan_f_d)(plnr,
cannam@95 386 X(mkproblem_rdft_0_d)(X(mktensor_3d)
cannam@95 387 (b, p->ny * vn, vn,
cannam@95 388 p->ny, vn, b * vn,
cannam@95 389 vn, 1, 1),
cannam@95 390 I, O),
cannam@95 391 0, 0, NO_SLOW);
cannam@95 392 if (XM(any_true)(!cld1, p->comm)) goto nada;
cannam@95 393 }
cannam@95 394 if (ego->preserve_input || NO_DESTROY_INPUTP(plnr)) I = O;
cannam@95 395
cannam@95 396 if (XM(any_true)(!XM(mkplans_posttranspose)(p, plnr, I, O, my_pe,
cannam@95 397 &cld2, &cld2rest, &cld3,
cannam@95 398 &rest_Ioff, &rest_Ooff),
cannam@95 399 p->comm)) goto nada;
cannam@95 400
cannam@95 401 pln = MKPLAN_MPI_TRANSPOSE(P, &padt, apply);
cannam@95 402
cannam@95 403 pln->cld1 = cld1;
cannam@95 404 pln->cld2 = cld2;
cannam@95 405 pln->cld2rest = cld2rest;
cannam@95 406 pln->rest_Ioff = rest_Ioff;
cannam@95 407 pln->rest_Ooff = rest_Ooff;
cannam@95 408 pln->cld3 = cld3;
cannam@95 409 pln->preserve_input = ego->preserve_input ? 2 : NO_DESTROY_INPUTP(plnr);
cannam@95 410
cannam@95 411 MPI_Comm_dup(p->comm, &pln->comm);
cannam@95 412
cannam@95 413 n_pes = (int) X(imax)(XM(num_blocks)(p->nx, p->block),
cannam@95 414 XM(num_blocks)(p->ny, p->tblock));
cannam@95 415
cannam@95 416 /* Compute sizes/offsets of blocks to exchange between processors */
cannam@95 417 sbs = (INT *) MALLOC(4 * n_pes * sizeof(INT), PLANS);
cannam@95 418 sbo = sbs + n_pes;
cannam@95 419 rbs = sbo + n_pes;
cannam@95 420 rbo = rbs + n_pes;
cannam@95 421 b = XM(block)(p->nx, p->block, my_pe);
cannam@95 422 bt = XM(block)(p->ny, p->tblock, my_pe);
cannam@95 423 for (pe = 0; pe < n_pes; ++pe) {
cannam@95 424 INT db, dbt; /* destination block sizes */
cannam@95 425 db = XM(block)(p->nx, p->block, pe);
cannam@95 426 dbt = XM(block)(p->ny, p->tblock, pe);
cannam@95 427
cannam@95 428 sbs[pe] = b * dbt * vn;
cannam@95 429 sbo[pe] = pe * (b * p->tblock) * vn;
cannam@95 430 rbs[pe] = db * bt * vn;
cannam@95 431 rbo[pe] = pe * (p->block * bt) * vn;
cannam@95 432
cannam@95 433 if (db * dbt > 0 && db * p->tblock != p->block * dbt) {
cannam@95 434 A(sort_pe == -1); /* only one process should need sorting */
cannam@95 435 sort_pe = pe;
cannam@95 436 ascending = db * p->tblock > p->block * dbt;
cannam@95 437 }
cannam@95 438 }
cannam@95 439 pln->n_pes = n_pes;
cannam@95 440 pln->my_pe = my_pe;
cannam@95 441 pln->send_block_sizes = sbs;
cannam@95 442 pln->send_block_offsets = sbo;
cannam@95 443 pln->recv_block_sizes = rbs;
cannam@95 444 pln->recv_block_offsets = rbo;
cannam@95 445
cannam@95 446 if (my_pe >= n_pes) {
cannam@95 447 pln->sched = 0; /* this process is not doing anything */
cannam@95 448 }
cannam@95 449 else {
cannam@95 450 pln->sched = (int *) MALLOC(n_pes * sizeof(int), PLANS);
cannam@95 451 fill1_comm_sched(pln->sched, my_pe, n_pes);
cannam@95 452 if (sort_pe >= 0)
cannam@95 453 sort1_comm_sched(pln->sched, n_pes, sort_pe, ascending);
cannam@95 454 }
cannam@95 455
cannam@95 456 X(ops_zero)(&pln->super.super.ops);
cannam@95 457 if (cld1) X(ops_add2)(&cld1->ops, &pln->super.super.ops);
cannam@95 458 if (cld2) X(ops_add2)(&cld2->ops, &pln->super.super.ops);
cannam@95 459 if (cld2rest) X(ops_add2)(&cld2rest->ops, &pln->super.super.ops);
cannam@95 460 if (cld3) X(ops_add2)(&cld3->ops, &pln->super.super.ops);
cannam@95 461 /* FIXME: should MPI operations be counted in "other" somehow? */
cannam@95 462
cannam@95 463 return &(pln->super.super);
cannam@95 464
cannam@95 465 nada:
cannam@95 466 X(plan_destroy_internal)(cld3);
cannam@95 467 X(plan_destroy_internal)(cld2rest);
cannam@95 468 X(plan_destroy_internal)(cld2);
cannam@95 469 X(plan_destroy_internal)(cld1);
cannam@95 470 return (plan *) 0;
cannam@95 471 }
cannam@95 472
cannam@95 473 static solver *mksolver(int preserve_input)
cannam@95 474 {
cannam@95 475 static const solver_adt sadt = { PROBLEM_MPI_TRANSPOSE, mkplan, 0 };
cannam@95 476 S *slv = MKSOLVER(S, &sadt);
cannam@95 477 slv->preserve_input = preserve_input;
cannam@95 478 return &(slv->super);
cannam@95 479 }
cannam@95 480
cannam@95 481 void XM(transpose_pairwise_register)(planner *p)
cannam@95 482 {
cannam@95 483 int preserve_input;
cannam@95 484 for (preserve_input = 0; preserve_input <= 1; ++preserve_input)
cannam@95 485 REGISTER_SOLVER(p, mksolver(preserve_input));
cannam@95 486 }