Chris@42
|
1 /*
|
Chris@42
|
2 * Copyright (c) 2003, 2007-14 Matteo Frigo
|
Chris@42
|
3 * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
|
Chris@42
|
4 *
|
Chris@42
|
5 * This program is free software; you can redistribute it and/or modify
|
Chris@42
|
6 * it under the terms of the GNU General Public License as published by
|
Chris@42
|
7 * the Free Software Foundation; either version 2 of the License, or
|
Chris@42
|
8 * (at your option) any later version.
|
Chris@42
|
9 *
|
Chris@42
|
10 * This program is distributed in the hope that it will be useful,
|
Chris@42
|
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
Chris@42
|
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
Chris@42
|
13 * GNU General Public License for more details.
|
Chris@42
|
14 *
|
Chris@42
|
15 * You should have received a copy of the GNU General Public License
|
Chris@42
|
16 * along with this program; if not, write to the Free Software
|
Chris@42
|
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
Chris@42
|
18 *
|
Chris@42
|
19 */
|
Chris@42
|
20
|
Chris@42
|
21 /* Distributed transposes using a sequence of carefully scheduled
|
Chris@42
|
22 pairwise exchanges. This has the advantage that it can be done
|
Chris@42
|
23 in-place, or out-of-place while preserving the input, using buffer
|
Chris@42
|
24 space proportional to the local size divided by the number of
|
Chris@42
|
25 processes (i.e. to the total array size divided by the number of
|
Chris@42
|
26 processes squared). */
|
Chris@42
|
27
|
Chris@42
|
28 #include "mpi-transpose.h"
|
Chris@42
|
29 #include <string.h>
|
Chris@42
|
30
|
Chris@42
|
31 typedef struct {
|
Chris@42
|
32 solver super;
|
Chris@42
|
33 int preserve_input; /* preserve input even if DESTROY_INPUT was passed */
|
Chris@42
|
34 } S;
|
Chris@42
|
35
|
Chris@42
|
36 typedef struct {
|
Chris@42
|
37 plan_mpi_transpose super;
|
Chris@42
|
38
|
Chris@42
|
39 plan *cld1, *cld2, *cld2rest, *cld3;
|
Chris@42
|
40 INT rest_Ioff, rest_Ooff;
|
Chris@42
|
41
|
Chris@42
|
42 int n_pes, my_pe, *sched;
|
Chris@42
|
43 INT *send_block_sizes, *send_block_offsets;
|
Chris@42
|
44 INT *recv_block_sizes, *recv_block_offsets;
|
Chris@42
|
45 MPI_Comm comm;
|
Chris@42
|
46 int preserve_input;
|
Chris@42
|
47 } P;
|
Chris@42
|
48
|
Chris@42
|
49 static void transpose_chunks(int *sched, int n_pes, int my_pe,
|
Chris@42
|
50 INT *sbs, INT *sbo, INT *rbs, INT *rbo,
|
Chris@42
|
51 MPI_Comm comm,
|
Chris@42
|
52 R *I, R *O)
|
Chris@42
|
53 {
|
Chris@42
|
54 if (sched) {
|
Chris@42
|
55 int i;
|
Chris@42
|
56 MPI_Status status;
|
Chris@42
|
57
|
Chris@42
|
58 /* TODO: explore non-synchronous send/recv? */
|
Chris@42
|
59
|
Chris@42
|
60 if (I == O) {
|
Chris@42
|
61 R *buf = (R*) MALLOC(sizeof(R) * sbs[0], BUFFERS);
|
Chris@42
|
62
|
Chris@42
|
63 for (i = 0; i < n_pes; ++i) {
|
Chris@42
|
64 int pe = sched[i];
|
Chris@42
|
65 if (my_pe == pe) {
|
Chris@42
|
66 if (rbo[pe] != sbo[pe])
|
Chris@42
|
67 memmove(O + rbo[pe], O + sbo[pe],
|
Chris@42
|
68 sbs[pe] * sizeof(R));
|
Chris@42
|
69 }
|
Chris@42
|
70 else {
|
Chris@42
|
71 memcpy(buf, O + sbo[pe], sbs[pe] * sizeof(R));
|
Chris@42
|
72 MPI_Sendrecv(buf, (int) (sbs[pe]), FFTW_MPI_TYPE,
|
Chris@42
|
73 pe, (my_pe * n_pes + pe) & 0xffff,
|
Chris@42
|
74 O + rbo[pe], (int) (rbs[pe]),
|
Chris@42
|
75 FFTW_MPI_TYPE,
|
Chris@42
|
76 pe, (pe * n_pes + my_pe) & 0xffff,
|
Chris@42
|
77 comm, &status);
|
Chris@42
|
78 }
|
Chris@42
|
79 }
|
Chris@42
|
80
|
Chris@42
|
81 X(ifree)(buf);
|
Chris@42
|
82 }
|
Chris@42
|
83 else { /* I != O */
|
Chris@42
|
84 for (i = 0; i < n_pes; ++i) {
|
Chris@42
|
85 int pe = sched[i];
|
Chris@42
|
86 if (my_pe == pe)
|
Chris@42
|
87 memcpy(O + rbo[pe], I + sbo[pe], sbs[pe] * sizeof(R));
|
Chris@42
|
88 else
|
Chris@42
|
89 MPI_Sendrecv(I + sbo[pe], (int) (sbs[pe]),
|
Chris@42
|
90 FFTW_MPI_TYPE,
|
Chris@42
|
91 pe, (my_pe * n_pes + pe) & 0xffff,
|
Chris@42
|
92 O + rbo[pe], (int) (rbs[pe]),
|
Chris@42
|
93 FFTW_MPI_TYPE,
|
Chris@42
|
94 pe, (pe * n_pes + my_pe) & 0xffff,
|
Chris@42
|
95 comm, &status);
|
Chris@42
|
96 }
|
Chris@42
|
97 }
|
Chris@42
|
98 }
|
Chris@42
|
99 }
|
Chris@42
|
100
|
Chris@42
|
101 static void apply(const plan *ego_, R *I, R *O)
|
Chris@42
|
102 {
|
Chris@42
|
103 const P *ego = (const P *) ego_;
|
Chris@42
|
104 plan_rdft *cld1, *cld2, *cld2rest, *cld3;
|
Chris@42
|
105
|
Chris@42
|
106 /* transpose locally to get contiguous chunks */
|
Chris@42
|
107 cld1 = (plan_rdft *) ego->cld1;
|
Chris@42
|
108 if (cld1) {
|
Chris@42
|
109 cld1->apply(ego->cld1, I, O);
|
Chris@42
|
110
|
Chris@42
|
111 if (ego->preserve_input) I = O;
|
Chris@42
|
112
|
Chris@42
|
113 /* transpose chunks globally */
|
Chris@42
|
114 transpose_chunks(ego->sched, ego->n_pes, ego->my_pe,
|
Chris@42
|
115 ego->send_block_sizes, ego->send_block_offsets,
|
Chris@42
|
116 ego->recv_block_sizes, ego->recv_block_offsets,
|
Chris@42
|
117 ego->comm, O, I);
|
Chris@42
|
118 }
|
Chris@42
|
119 else if (ego->preserve_input) {
|
Chris@42
|
120 /* transpose chunks globally */
|
Chris@42
|
121 transpose_chunks(ego->sched, ego->n_pes, ego->my_pe,
|
Chris@42
|
122 ego->send_block_sizes, ego->send_block_offsets,
|
Chris@42
|
123 ego->recv_block_sizes, ego->recv_block_offsets,
|
Chris@42
|
124 ego->comm, I, O);
|
Chris@42
|
125
|
Chris@42
|
126 I = O;
|
Chris@42
|
127 }
|
Chris@42
|
128 else {
|
Chris@42
|
129 /* transpose chunks globally */
|
Chris@42
|
130 transpose_chunks(ego->sched, ego->n_pes, ego->my_pe,
|
Chris@42
|
131 ego->send_block_sizes, ego->send_block_offsets,
|
Chris@42
|
132 ego->recv_block_sizes, ego->recv_block_offsets,
|
Chris@42
|
133 ego->comm, I, I);
|
Chris@42
|
134 }
|
Chris@42
|
135
|
Chris@42
|
136 /* transpose locally, again, to get ordinary row-major;
|
Chris@42
|
137 this may take two transposes if the block sizes are unequal
|
Chris@42
|
138 (3 subplans, two of which operate on disjoint data) */
|
Chris@42
|
139 cld2 = (plan_rdft *) ego->cld2;
|
Chris@42
|
140 cld2->apply(ego->cld2, I, O);
|
Chris@42
|
141 cld2rest = (plan_rdft *) ego->cld2rest;
|
Chris@42
|
142 if (cld2rest) {
|
Chris@42
|
143 cld2rest->apply(ego->cld2rest,
|
Chris@42
|
144 I + ego->rest_Ioff, O + ego->rest_Ooff);
|
Chris@42
|
145 cld3 = (plan_rdft *) ego->cld3;
|
Chris@42
|
146 if (cld3)
|
Chris@42
|
147 cld3->apply(ego->cld3, O, O);
|
Chris@42
|
148 /* else TRANSPOSED_OUT is true and user wants O transposed */
|
Chris@42
|
149 }
|
Chris@42
|
150 }
|
Chris@42
|
151
|
Chris@42
|
152 static int applicable(const S *ego, const problem *p_,
|
Chris@42
|
153 const planner *plnr)
|
Chris@42
|
154 {
|
Chris@42
|
155 const problem_mpi_transpose *p = (const problem_mpi_transpose *) p_;
|
Chris@42
|
156 /* Note: this is *not* UGLY for out-of-place, destroy-input plans;
|
Chris@42
|
157 the planner often prefers transpose-pairwise to transpose-alltoall,
|
Chris@42
|
158 at least with LAM MPI on my machine. */
|
Chris@42
|
159 return (1
|
Chris@42
|
160 && (!ego->preserve_input || (!NO_DESTROY_INPUTP(plnr)
|
Chris@42
|
161 && p->I != p->O))
|
Chris@42
|
162 && ONLY_TRANSPOSEDP(p->flags));
|
Chris@42
|
163 }
|
Chris@42
|
164
|
Chris@42
|
165 static void awake(plan *ego_, enum wakefulness wakefulness)
|
Chris@42
|
166 {
|
Chris@42
|
167 P *ego = (P *) ego_;
|
Chris@42
|
168 X(plan_awake)(ego->cld1, wakefulness);
|
Chris@42
|
169 X(plan_awake)(ego->cld2, wakefulness);
|
Chris@42
|
170 X(plan_awake)(ego->cld2rest, wakefulness);
|
Chris@42
|
171 X(plan_awake)(ego->cld3, wakefulness);
|
Chris@42
|
172 }
|
Chris@42
|
173
|
Chris@42
|
174 static void destroy(plan *ego_)
|
Chris@42
|
175 {
|
Chris@42
|
176 P *ego = (P *) ego_;
|
Chris@42
|
177 X(ifree0)(ego->sched);
|
Chris@42
|
178 X(ifree0)(ego->send_block_sizes);
|
Chris@42
|
179 MPI_Comm_free(&ego->comm);
|
Chris@42
|
180 X(plan_destroy_internal)(ego->cld3);
|
Chris@42
|
181 X(plan_destroy_internal)(ego->cld2rest);
|
Chris@42
|
182 X(plan_destroy_internal)(ego->cld2);
|
Chris@42
|
183 X(plan_destroy_internal)(ego->cld1);
|
Chris@42
|
184 }
|
Chris@42
|
185
|
Chris@42
|
186 static void print(const plan *ego_, printer *p)
|
Chris@42
|
187 {
|
Chris@42
|
188 const P *ego = (const P *) ego_;
|
Chris@42
|
189 p->print(p, "(mpi-transpose-pairwise%s%(%p%)%(%p%)%(%p%)%(%p%))",
|
Chris@42
|
190 ego->preserve_input==2 ?"/p":"",
|
Chris@42
|
191 ego->cld1, ego->cld2, ego->cld2rest, ego->cld3);
|
Chris@42
|
192 }
|
Chris@42
|
193
|
Chris@42
|
194 /* Given a process which_pe and a number of processes npes, fills
|
Chris@42
|
195 the array sched[npes] with a sequence of processes to communicate
|
Chris@42
|
196 with for a deadlock-free, optimum-overlap all-to-all communication.
|
Chris@42
|
197 (All processes must call this routine to get their own schedules.)
|
Chris@42
|
198 The schedule can be re-ordered arbitrarily as long as all processes
|
Chris@42
|
199 apply the same permutation to their schedules.
|
Chris@42
|
200
|
Chris@42
|
201 The algorithm here is based upon the one described in:
|
Chris@42
|
202 J. A. M. Schreuder, "Constructing timetables for sport
|
Chris@42
|
203 competitions," Mathematical Programming Study 13, pp. 58-67 (1980).
|
Chris@42
|
204 In a sport competition, you have N teams and want every team to
|
Chris@42
|
205 play every other team in as short a time as possible (maximum overlap
|
Chris@42
|
206 between games). This timetabling problem is therefore identical
|
Chris@42
|
207 to that of an all-to-all communications problem. In our case, there
|
Chris@42
|
208 is one wrinkle: as part of the schedule, the process must do
|
Chris@42
|
209 some data transfer with itself (local data movement), analogous
|
Chris@42
|
210 to a requirement that each team "play itself" in addition to other
|
Chris@42
|
211 teams. With this wrinkle, it turns out that an optimal timetable
|
Chris@42
|
212 (N parallel games) can be constructed for any N, not just for even
|
Chris@42
|
213 N as in the original problem described by Schreuder.
|
Chris@42
|
214 */
|
Chris@42
|
215 static void fill1_comm_sched(int *sched, int which_pe, int npes)
|
Chris@42
|
216 {
|
Chris@42
|
217 int pe, i, n, s = 0;
|
Chris@42
|
218 A(which_pe >= 0 && which_pe < npes);
|
Chris@42
|
219 if (npes % 2 == 0) {
|
Chris@42
|
220 n = npes;
|
Chris@42
|
221 sched[s++] = which_pe;
|
Chris@42
|
222 }
|
Chris@42
|
223 else
|
Chris@42
|
224 n = npes + 1;
|
Chris@42
|
225 for (pe = 0; pe < n - 1; ++pe) {
|
Chris@42
|
226 if (npes % 2 == 0) {
|
Chris@42
|
227 if (pe == which_pe) sched[s++] = npes - 1;
|
Chris@42
|
228 else if (npes - 1 == which_pe) sched[s++] = pe;
|
Chris@42
|
229 }
|
Chris@42
|
230 else if (pe == which_pe) sched[s++] = pe;
|
Chris@42
|
231
|
Chris@42
|
232 if (pe != which_pe && which_pe < n - 1) {
|
Chris@42
|
233 i = (pe - which_pe + (n - 1)) % (n - 1);
|
Chris@42
|
234 if (i < n/2)
|
Chris@42
|
235 sched[s++] = (pe + i) % (n - 1);
|
Chris@42
|
236
|
Chris@42
|
237 i = (which_pe - pe + (n - 1)) % (n - 1);
|
Chris@42
|
238 if (i < n/2)
|
Chris@42
|
239 sched[s++] = (pe - i + (n - 1)) % (n - 1);
|
Chris@42
|
240 }
|
Chris@42
|
241 }
|
Chris@42
|
242 A(s == npes);
|
Chris@42
|
243 }
|
Chris@42
|
244
|
Chris@42
|
245 /* Sort the communication schedule sched for npes so that the schedule
|
Chris@42
|
246 on process sortpe is ascending or descending (!ascending). This is
|
Chris@42
|
247 necessary to allow in-place transposes when the problem does not
|
Chris@42
|
248 divide equally among the processes. In this case there is one
|
Chris@42
|
249 process where the incoming blocks are bigger/smaller than the
|
Chris@42
|
250 outgoing blocks and thus have to be received in
|
Chris@42
|
251 descending/ascending order, respectively, to avoid overwriting data
|
Chris@42
|
252 before it is sent. */
|
Chris@42
|
253 static void sort1_comm_sched(int *sched, int npes, int sortpe, int ascending)
|
Chris@42
|
254 {
|
Chris@42
|
255 int *sortsched, i;
|
Chris@42
|
256 sortsched = (int *) MALLOC(npes * sizeof(int) * 2, OTHER);
|
Chris@42
|
257 fill1_comm_sched(sortsched, sortpe, npes);
|
Chris@42
|
258 if (ascending)
|
Chris@42
|
259 for (i = 0; i < npes; ++i)
|
Chris@42
|
260 sortsched[npes + sortsched[i]] = sched[i];
|
Chris@42
|
261 else
|
Chris@42
|
262 for (i = 0; i < npes; ++i)
|
Chris@42
|
263 sortsched[2*npes - 1 - sortsched[i]] = sched[i];
|
Chris@42
|
264 for (i = 0; i < npes; ++i)
|
Chris@42
|
265 sched[i] = sortsched[npes + i];
|
Chris@42
|
266 X(ifree)(sortsched);
|
Chris@42
|
267 }
|
Chris@42
|
268
|
Chris@42
|
269 /* make the plans to do the post-MPI transpositions (shared with
|
Chris@42
|
270 transpose-alltoall) */
|
Chris@42
|
271 int XM(mkplans_posttranspose)(const problem_mpi_transpose *p, planner *plnr,
|
Chris@42
|
272 R *I, R *O, int my_pe,
|
Chris@42
|
273 plan **cld2, plan **cld2rest, plan **cld3,
|
Chris@42
|
274 INT *rest_Ioff, INT *rest_Ooff)
|
Chris@42
|
275 {
|
Chris@42
|
276 INT vn = p->vn;
|
Chris@42
|
277 INT b = p->block;
|
Chris@42
|
278 INT bt = XM(block)(p->ny, p->tblock, my_pe);
|
Chris@42
|
279 INT nxb = p->nx / b; /* number of equal-sized blocks */
|
Chris@42
|
280 INT nxr = p->nx - nxb * b; /* leftover rows after equal blocks */
|
Chris@42
|
281
|
Chris@42
|
282 *cld2 = *cld2rest = *cld3 = NULL;
|
Chris@42
|
283 *rest_Ioff = *rest_Ooff = 0;
|
Chris@42
|
284
|
Chris@42
|
285 if (!(p->flags & TRANSPOSED_OUT) && (nxr == 0 || I != O)) {
|
Chris@42
|
286 INT nx = p->nx * vn;
|
Chris@42
|
287 b *= vn;
|
Chris@42
|
288 *cld2 = X(mkplan_f_d)(plnr,
|
Chris@42
|
289 X(mkproblem_rdft_0_d)(X(mktensor_3d)
|
Chris@42
|
290 (nxb, bt * b, b,
|
Chris@42
|
291 bt, b, nx,
|
Chris@42
|
292 b, 1, 1),
|
Chris@42
|
293 I, O),
|
Chris@42
|
294 0, 0, NO_SLOW);
|
Chris@42
|
295 if (!*cld2) goto nada;
|
Chris@42
|
296
|
Chris@42
|
297 if (nxr > 0) {
|
Chris@42
|
298 *rest_Ioff = nxb * bt * b;
|
Chris@42
|
299 *rest_Ooff = nxb * b;
|
Chris@42
|
300 b = nxr * vn;
|
Chris@42
|
301 *cld2rest = X(mkplan_f_d)(plnr,
|
Chris@42
|
302 X(mkproblem_rdft_0_d)(X(mktensor_2d)
|
Chris@42
|
303 (bt, b, nx,
|
Chris@42
|
304 b, 1, 1),
|
Chris@42
|
305 I + *rest_Ioff,
|
Chris@42
|
306 O + *rest_Ooff),
|
Chris@42
|
307 0, 0, NO_SLOW);
|
Chris@42
|
308 if (!*cld2rest) goto nada;
|
Chris@42
|
309 }
|
Chris@42
|
310 }
|
Chris@42
|
311 else {
|
Chris@42
|
312 *cld2 = X(mkplan_f_d)(plnr,
|
Chris@42
|
313 X(mkproblem_rdft_0_d)(
|
Chris@42
|
314 X(mktensor_4d)
|
Chris@42
|
315 (nxb, bt * b * vn, bt * b * vn,
|
Chris@42
|
316 bt, b * vn, vn,
|
Chris@42
|
317 b, vn, bt * vn,
|
Chris@42
|
318 vn, 1, 1),
|
Chris@42
|
319 I, O),
|
Chris@42
|
320 0, 0, NO_SLOW);
|
Chris@42
|
321 if (!*cld2) goto nada;
|
Chris@42
|
322
|
Chris@42
|
323 *rest_Ioff = *rest_Ooff = nxb * bt * b * vn;
|
Chris@42
|
324 *cld2rest = X(mkplan_f_d)(plnr,
|
Chris@42
|
325 X(mkproblem_rdft_0_d)(
|
Chris@42
|
326 X(mktensor_3d)
|
Chris@42
|
327 (bt, nxr * vn, vn,
|
Chris@42
|
328 nxr, vn, bt * vn,
|
Chris@42
|
329 vn, 1, 1),
|
Chris@42
|
330 I + *rest_Ioff, O + *rest_Ooff),
|
Chris@42
|
331 0, 0, NO_SLOW);
|
Chris@42
|
332 if (!*cld2rest) goto nada;
|
Chris@42
|
333
|
Chris@42
|
334 if (!(p->flags & TRANSPOSED_OUT)) {
|
Chris@42
|
335 *cld3 = X(mkplan_f_d)(plnr,
|
Chris@42
|
336 X(mkproblem_rdft_0_d)(
|
Chris@42
|
337 X(mktensor_3d)
|
Chris@42
|
338 (p->nx, bt * vn, vn,
|
Chris@42
|
339 bt, vn, p->nx * vn,
|
Chris@42
|
340 vn, 1, 1),
|
Chris@42
|
341 O, O),
|
Chris@42
|
342 0, 0, NO_SLOW);
|
Chris@42
|
343 if (!*cld3) goto nada;
|
Chris@42
|
344 }
|
Chris@42
|
345 }
|
Chris@42
|
346
|
Chris@42
|
347 return 1;
|
Chris@42
|
348
|
Chris@42
|
349 nada:
|
Chris@42
|
350 X(plan_destroy_internal)(*cld3);
|
Chris@42
|
351 X(plan_destroy_internal)(*cld2rest);
|
Chris@42
|
352 X(plan_destroy_internal)(*cld2);
|
Chris@42
|
353 *cld2 = *cld2rest = *cld3 = NULL;
|
Chris@42
|
354 return 0;
|
Chris@42
|
355 }
|
Chris@42
|
356
|
Chris@42
|
357 static plan *mkplan(const solver *ego_, const problem *p_, planner *plnr)
|
Chris@42
|
358 {
|
Chris@42
|
359 const S *ego = (const S *) ego_;
|
Chris@42
|
360 const problem_mpi_transpose *p;
|
Chris@42
|
361 P *pln;
|
Chris@42
|
362 plan *cld1 = 0, *cld2 = 0, *cld2rest = 0, *cld3 = 0;
|
Chris@42
|
363 INT b, bt, vn, rest_Ioff, rest_Ooff;
|
Chris@42
|
364 INT *sbs, *sbo, *rbs, *rbo;
|
Chris@42
|
365 int pe, my_pe, n_pes, sort_pe = -1, ascending = 1;
|
Chris@42
|
366 R *I, *O;
|
Chris@42
|
367 static const plan_adt padt = {
|
Chris@42
|
368 XM(transpose_solve), awake, print, destroy
|
Chris@42
|
369 };
|
Chris@42
|
370
|
Chris@42
|
371 UNUSED(ego);
|
Chris@42
|
372
|
Chris@42
|
373 if (!applicable(ego, p_, plnr))
|
Chris@42
|
374 return (plan *) 0;
|
Chris@42
|
375
|
Chris@42
|
376 p = (const problem_mpi_transpose *) p_;
|
Chris@42
|
377 vn = p->vn;
|
Chris@42
|
378 I = p->I; O = p->O;
|
Chris@42
|
379
|
Chris@42
|
380 MPI_Comm_rank(p->comm, &my_pe);
|
Chris@42
|
381 MPI_Comm_size(p->comm, &n_pes);
|
Chris@42
|
382
|
Chris@42
|
383 b = XM(block)(p->nx, p->block, my_pe);
|
Chris@42
|
384
|
Chris@42
|
385 if (!(p->flags & TRANSPOSED_IN)) { /* b x ny x vn -> ny x b x vn */
|
Chris@42
|
386 cld1 = X(mkplan_f_d)(plnr,
|
Chris@42
|
387 X(mkproblem_rdft_0_d)(X(mktensor_3d)
|
Chris@42
|
388 (b, p->ny * vn, vn,
|
Chris@42
|
389 p->ny, vn, b * vn,
|
Chris@42
|
390 vn, 1, 1),
|
Chris@42
|
391 I, O),
|
Chris@42
|
392 0, 0, NO_SLOW);
|
Chris@42
|
393 if (XM(any_true)(!cld1, p->comm)) goto nada;
|
Chris@42
|
394 }
|
Chris@42
|
395 if (ego->preserve_input || NO_DESTROY_INPUTP(plnr)) I = O;
|
Chris@42
|
396
|
Chris@42
|
397 if (XM(any_true)(!XM(mkplans_posttranspose)(p, plnr, I, O, my_pe,
|
Chris@42
|
398 &cld2, &cld2rest, &cld3,
|
Chris@42
|
399 &rest_Ioff, &rest_Ooff),
|
Chris@42
|
400 p->comm)) goto nada;
|
Chris@42
|
401
|
Chris@42
|
402 pln = MKPLAN_MPI_TRANSPOSE(P, &padt, apply);
|
Chris@42
|
403
|
Chris@42
|
404 pln->cld1 = cld1;
|
Chris@42
|
405 pln->cld2 = cld2;
|
Chris@42
|
406 pln->cld2rest = cld2rest;
|
Chris@42
|
407 pln->rest_Ioff = rest_Ioff;
|
Chris@42
|
408 pln->rest_Ooff = rest_Ooff;
|
Chris@42
|
409 pln->cld3 = cld3;
|
Chris@42
|
410 pln->preserve_input = ego->preserve_input ? 2 : NO_DESTROY_INPUTP(plnr);
|
Chris@42
|
411
|
Chris@42
|
412 MPI_Comm_dup(p->comm, &pln->comm);
|
Chris@42
|
413
|
Chris@42
|
414 n_pes = (int) X(imax)(XM(num_blocks)(p->nx, p->block),
|
Chris@42
|
415 XM(num_blocks)(p->ny, p->tblock));
|
Chris@42
|
416
|
Chris@42
|
417 /* Compute sizes/offsets of blocks to exchange between processors */
|
Chris@42
|
418 sbs = (INT *) MALLOC(4 * n_pes * sizeof(INT), PLANS);
|
Chris@42
|
419 sbo = sbs + n_pes;
|
Chris@42
|
420 rbs = sbo + n_pes;
|
Chris@42
|
421 rbo = rbs + n_pes;
|
Chris@42
|
422 b = XM(block)(p->nx, p->block, my_pe);
|
Chris@42
|
423 bt = XM(block)(p->ny, p->tblock, my_pe);
|
Chris@42
|
424 for (pe = 0; pe < n_pes; ++pe) {
|
Chris@42
|
425 INT db, dbt; /* destination block sizes */
|
Chris@42
|
426 db = XM(block)(p->nx, p->block, pe);
|
Chris@42
|
427 dbt = XM(block)(p->ny, p->tblock, pe);
|
Chris@42
|
428
|
Chris@42
|
429 sbs[pe] = b * dbt * vn;
|
Chris@42
|
430 sbo[pe] = pe * (b * p->tblock) * vn;
|
Chris@42
|
431 rbs[pe] = db * bt * vn;
|
Chris@42
|
432 rbo[pe] = pe * (p->block * bt) * vn;
|
Chris@42
|
433
|
Chris@42
|
434 if (db * dbt > 0 && db * p->tblock != p->block * dbt) {
|
Chris@42
|
435 A(sort_pe == -1); /* only one process should need sorting */
|
Chris@42
|
436 sort_pe = pe;
|
Chris@42
|
437 ascending = db * p->tblock > p->block * dbt;
|
Chris@42
|
438 }
|
Chris@42
|
439 }
|
Chris@42
|
440 pln->n_pes = n_pes;
|
Chris@42
|
441 pln->my_pe = my_pe;
|
Chris@42
|
442 pln->send_block_sizes = sbs;
|
Chris@42
|
443 pln->send_block_offsets = sbo;
|
Chris@42
|
444 pln->recv_block_sizes = rbs;
|
Chris@42
|
445 pln->recv_block_offsets = rbo;
|
Chris@42
|
446
|
Chris@42
|
447 if (my_pe >= n_pes) {
|
Chris@42
|
448 pln->sched = 0; /* this process is not doing anything */
|
Chris@42
|
449 }
|
Chris@42
|
450 else {
|
Chris@42
|
451 pln->sched = (int *) MALLOC(n_pes * sizeof(int), PLANS);
|
Chris@42
|
452 fill1_comm_sched(pln->sched, my_pe, n_pes);
|
Chris@42
|
453 if (sort_pe >= 0)
|
Chris@42
|
454 sort1_comm_sched(pln->sched, n_pes, sort_pe, ascending);
|
Chris@42
|
455 }
|
Chris@42
|
456
|
Chris@42
|
457 X(ops_zero)(&pln->super.super.ops);
|
Chris@42
|
458 if (cld1) X(ops_add2)(&cld1->ops, &pln->super.super.ops);
|
Chris@42
|
459 if (cld2) X(ops_add2)(&cld2->ops, &pln->super.super.ops);
|
Chris@42
|
460 if (cld2rest) X(ops_add2)(&cld2rest->ops, &pln->super.super.ops);
|
Chris@42
|
461 if (cld3) X(ops_add2)(&cld3->ops, &pln->super.super.ops);
|
Chris@42
|
462 /* FIXME: should MPI operations be counted in "other" somehow? */
|
Chris@42
|
463
|
Chris@42
|
464 return &(pln->super.super);
|
Chris@42
|
465
|
Chris@42
|
466 nada:
|
Chris@42
|
467 X(plan_destroy_internal)(cld3);
|
Chris@42
|
468 X(plan_destroy_internal)(cld2rest);
|
Chris@42
|
469 X(plan_destroy_internal)(cld2);
|
Chris@42
|
470 X(plan_destroy_internal)(cld1);
|
Chris@42
|
471 return (plan *) 0;
|
Chris@42
|
472 }
|
Chris@42
|
473
|
Chris@42
|
474 static solver *mksolver(int preserve_input)
|
Chris@42
|
475 {
|
Chris@42
|
476 static const solver_adt sadt = { PROBLEM_MPI_TRANSPOSE, mkplan, 0 };
|
Chris@42
|
477 S *slv = MKSOLVER(S, &sadt);
|
Chris@42
|
478 slv->preserve_input = preserve_input;
|
Chris@42
|
479 return &(slv->super);
|
Chris@42
|
480 }
|
Chris@42
|
481
|
Chris@42
|
482 void XM(transpose_pairwise_register)(planner *p)
|
Chris@42
|
483 {
|
Chris@42
|
484 int preserve_input;
|
Chris@42
|
485 for (preserve_input = 0; preserve_input <= 1; ++preserve_input)
|
Chris@42
|
486 REGISTER_SOLVER(p, mksolver(preserve_input));
|
Chris@42
|
487 }
|