Chris@82: /* Chris@82: * Copyright (c) 2003, 2007-14 Matteo Frigo Chris@82: * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology Chris@82: * Chris@82: * This program is free software; you can redistribute it and/or modify Chris@82: * it under the terms of the GNU General Public License as published by Chris@82: * the Free Software Foundation; either version 2 of the License, or Chris@82: * (at your option) any later version. Chris@82: * Chris@82: * This program is distributed in the hope that it will be useful, Chris@82: * but WITHOUT ANY WARRANTY; without even the implied warranty of Chris@82: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Chris@82: * GNU General Public License for more details. Chris@82: * Chris@82: * You should have received a copy of the GNU General Public License Chris@82: * along with this program; if not, write to the Free Software Chris@82: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Chris@82: * Chris@82: */ Chris@82: Chris@82: Chris@82: /* direct DFT solver, if we have a codelet */ Chris@82: Chris@82: #include "dft/dft.h" Chris@82: Chris@82: typedef struct { Chris@82: solver super; Chris@82: const kdft_desc *desc; Chris@82: kdft k; Chris@82: int bufferedp; Chris@82: } S; Chris@82: Chris@82: typedef struct { Chris@82: plan_dft super; Chris@82: Chris@82: stride is, os, bufstride; Chris@82: INT n, vl, ivs, ovs; Chris@82: kdft k; Chris@82: const S *slv; Chris@82: } P; Chris@82: Chris@82: static void dobatch(const P *ego, R *ri, R *ii, R *ro, R *io, Chris@82: R *buf, INT batchsz) Chris@82: { Chris@82: X(cpy2d_pair_ci)(ri, ii, buf, buf+1, Chris@82: ego->n, WS(ego->is, 1), WS(ego->bufstride, 1), Chris@82: batchsz, ego->ivs, 2); Chris@82: Chris@82: if (IABS(WS(ego->os, 1)) < IABS(ego->ovs)) { Chris@82: /* transform directly to output */ Chris@82: ego->k(buf, buf+1, ro, io, Chris@82: ego->bufstride, ego->os, batchsz, 2, ego->ovs); Chris@82: } else { Chris@82: /* transform to buffer and copy back */ Chris@82: ego->k(buf, buf+1, buf, buf+1, Chris@82: ego->bufstride, ego->bufstride, batchsz, 2, 2); Chris@82: X(cpy2d_pair_co)(buf, buf+1, ro, io, Chris@82: ego->n, WS(ego->bufstride, 1), WS(ego->os, 1), Chris@82: batchsz, 2, ego->ovs); Chris@82: } Chris@82: } Chris@82: Chris@82: static INT compute_batchsize(INT n) Chris@82: { Chris@82: /* round up to multiple of 4 */ Chris@82: n += 3; Chris@82: n &= -4; Chris@82: Chris@82: return (n + 2); Chris@82: } Chris@82: Chris@82: static void apply_buf(const plan *ego_, R *ri, R *ii, R *ro, R *io) Chris@82: { Chris@82: const P *ego = (const P *) ego_; Chris@82: R *buf; Chris@82: INT vl = ego->vl, n = ego->n, batchsz = compute_batchsize(n); Chris@82: INT i; Chris@82: size_t bufsz = n * batchsz * 2 * sizeof(R); Chris@82: Chris@82: BUF_ALLOC(R *, buf, bufsz); Chris@82: Chris@82: for (i = 0; i < vl - batchsz; i += batchsz) { Chris@82: dobatch(ego, ri, ii, ro, io, buf, batchsz); Chris@82: ri += batchsz * ego->ivs; ii += batchsz * ego->ivs; Chris@82: ro += batchsz * ego->ovs; io += batchsz * ego->ovs; Chris@82: } Chris@82: dobatch(ego, ri, ii, ro, io, buf, vl - i); Chris@82: Chris@82: BUF_FREE(buf, bufsz); Chris@82: } Chris@82: Chris@82: static void apply(const plan *ego_, R *ri, R *ii, R *ro, R *io) Chris@82: { Chris@82: const P *ego = (const P *) ego_; Chris@82: ASSERT_ALIGNED_DOUBLE; Chris@82: ego->k(ri, ii, ro, io, ego->is, ego->os, ego->vl, ego->ivs, ego->ovs); Chris@82: } Chris@82: Chris@82: static void apply_extra_iter(const plan *ego_, R *ri, R *ii, R *ro, R *io) Chris@82: { Chris@82: const P *ego = (const P *) ego_; Chris@82: INT vl = ego->vl; Chris@82: Chris@82: ASSERT_ALIGNED_DOUBLE; Chris@82: Chris@82: /* for 4-way SIMD when VL is odd: iterate over an Chris@82: even vector length VL, and then execute the last Chris@82: iteration as a 2-vector with vector stride 0. */ Chris@82: ego->k(ri, ii, ro, io, ego->is, ego->os, vl - 1, ego->ivs, ego->ovs); Chris@82: Chris@82: ego->k(ri + (vl - 1) * ego->ivs, ii + (vl - 1) * ego->ivs, Chris@82: ro + (vl - 1) * ego->ovs, io + (vl - 1) * ego->ovs, Chris@82: ego->is, ego->os, 1, 0, 0); Chris@82: } Chris@82: Chris@82: static void destroy(plan *ego_) Chris@82: { Chris@82: P *ego = (P *) ego_; Chris@82: X(stride_destroy)(ego->is); Chris@82: X(stride_destroy)(ego->os); Chris@82: X(stride_destroy)(ego->bufstride); Chris@82: } Chris@82: Chris@82: static void print(const plan *ego_, printer *p) Chris@82: { Chris@82: const P *ego = (const P *) ego_; Chris@82: const S *s = ego->slv; Chris@82: const kdft_desc *d = s->desc; Chris@82: Chris@82: if (ego->slv->bufferedp) Chris@82: p->print(p, "(dft-directbuf/%D-%D%v \"%s\")", Chris@82: compute_batchsize(d->sz), d->sz, ego->vl, d->nam); Chris@82: else Chris@82: p->print(p, "(dft-direct-%D%v \"%s\")", d->sz, ego->vl, d->nam); Chris@82: } Chris@82: Chris@82: static int applicable_buf(const solver *ego_, const problem *p_, Chris@82: const planner *plnr) Chris@82: { Chris@82: const S *ego = (const S *) ego_; Chris@82: const problem_dft *p = (const problem_dft *) p_; Chris@82: const kdft_desc *d = ego->desc; Chris@82: INT vl; Chris@82: INT ivs, ovs; Chris@82: INT batchsz; Chris@82: Chris@82: return ( Chris@82: 1 Chris@82: && p->sz->rnk == 1 Chris@82: && p->vecsz->rnk == 1 Chris@82: && p->sz->dims[0].n == d->sz Chris@82: Chris@82: /* check strides etc */ Chris@82: && X(tensor_tornk1)(p->vecsz, &vl, &ivs, &ovs) Chris@82: Chris@82: /* UGLY if IS <= IVS */ Chris@82: && !(NO_UGLYP(plnr) && Chris@82: X(iabs)(p->sz->dims[0].is) <= X(iabs)(ivs)) Chris@82: Chris@82: && (batchsz = compute_batchsize(d->sz), 1) Chris@82: && (d->genus->okp(d, 0, ((const R *)0) + 1, p->ro, p->io, Chris@82: 2 * batchsz, p->sz->dims[0].os, Chris@82: batchsz, 2, ovs, plnr)) Chris@82: && (d->genus->okp(d, 0, ((const R *)0) + 1, p->ro, p->io, Chris@82: 2 * batchsz, p->sz->dims[0].os, Chris@82: vl % batchsz, 2, ovs, plnr)) Chris@82: Chris@82: Chris@82: && (0 Chris@82: /* can operate out-of-place */ Chris@82: || p->ri != p->ro Chris@82: Chris@82: /* can operate in-place as long as strides are the same */ Chris@82: || X(tensor_inplace_strides2)(p->sz, p->vecsz) Chris@82: Chris@82: /* can do it if the problem fits in the buffer, no matter Chris@82: what the strides are */ Chris@82: || vl <= batchsz Chris@82: ) Chris@82: ); Chris@82: } Chris@82: Chris@82: static int applicable(const solver *ego_, const problem *p_, Chris@82: const planner *plnr, int *extra_iterp) Chris@82: { Chris@82: const S *ego = (const S *) ego_; Chris@82: const problem_dft *p = (const problem_dft *) p_; Chris@82: const kdft_desc *d = ego->desc; Chris@82: INT vl; Chris@82: INT ivs, ovs; Chris@82: Chris@82: return ( Chris@82: 1 Chris@82: && p->sz->rnk == 1 Chris@82: && p->vecsz->rnk <= 1 Chris@82: && p->sz->dims[0].n == d->sz Chris@82: Chris@82: /* check strides etc */ Chris@82: && X(tensor_tornk1)(p->vecsz, &vl, &ivs, &ovs) Chris@82: Chris@82: && ((*extra_iterp = 0, Chris@82: (d->genus->okp(d, p->ri, p->ii, p->ro, p->io, Chris@82: p->sz->dims[0].is, p->sz->dims[0].os, Chris@82: vl, ivs, ovs, plnr))) Chris@82: || Chris@82: (*extra_iterp = 1, Chris@82: ((d->genus->okp(d, p->ri, p->ii, p->ro, p->io, Chris@82: p->sz->dims[0].is, p->sz->dims[0].os, Chris@82: vl - 1, ivs, ovs, plnr)) Chris@82: && Chris@82: (d->genus->okp(d, p->ri, p->ii, p->ro, p->io, Chris@82: p->sz->dims[0].is, p->sz->dims[0].os, Chris@82: 2, 0, 0, plnr))))) Chris@82: Chris@82: && (0 Chris@82: /* can operate out-of-place */ Chris@82: || p->ri != p->ro Chris@82: Chris@82: /* can always compute one transform */ Chris@82: || vl == 1 Chris@82: Chris@82: /* can operate in-place as long as strides are the same */ Chris@82: || X(tensor_inplace_strides2)(p->sz, p->vecsz) Chris@82: ) Chris@82: ); Chris@82: } Chris@82: Chris@82: Chris@82: static plan *mkplan(const solver *ego_, const problem *p_, planner *plnr) Chris@82: { Chris@82: const S *ego = (const S *) ego_; Chris@82: P *pln; Chris@82: const problem_dft *p; Chris@82: iodim *d; Chris@82: const kdft_desc *e = ego->desc; Chris@82: Chris@82: static const plan_adt padt = { Chris@82: X(dft_solve), X(null_awake), print, destroy Chris@82: }; Chris@82: Chris@82: UNUSED(plnr); Chris@82: Chris@82: if (ego->bufferedp) { Chris@82: if (!applicable_buf(ego_, p_, plnr)) Chris@82: return (plan *)0; Chris@82: pln = MKPLAN_DFT(P, &padt, apply_buf); Chris@82: } else { Chris@82: int extra_iterp = 0; Chris@82: if (!applicable(ego_, p_, plnr, &extra_iterp)) Chris@82: return (plan *)0; Chris@82: pln = MKPLAN_DFT(P, &padt, extra_iterp ? apply_extra_iter : apply); Chris@82: } Chris@82: Chris@82: p = (const problem_dft *) p_; Chris@82: d = p->sz->dims; Chris@82: pln->k = ego->k; Chris@82: pln->n = d[0].n; Chris@82: pln->is = X(mkstride)(pln->n, d[0].is); Chris@82: pln->os = X(mkstride)(pln->n, d[0].os); Chris@82: pln->bufstride = X(mkstride)(pln->n, 2 * compute_batchsize(pln->n)); Chris@82: Chris@82: X(tensor_tornk1)(p->vecsz, &pln->vl, &pln->ivs, &pln->ovs); Chris@82: pln->slv = ego; Chris@82: Chris@82: X(ops_zero)(&pln->super.super.ops); Chris@82: X(ops_madd2)(pln->vl / e->genus->vl, &e->ops, &pln->super.super.ops); Chris@82: Chris@82: if (ego->bufferedp) Chris@82: pln->super.super.ops.other += 4 * pln->n * pln->vl; Chris@82: Chris@82: pln->super.super.could_prune_now_p = !ego->bufferedp; Chris@82: return &(pln->super.super); Chris@82: } Chris@82: Chris@82: static solver *mksolver(kdft k, const kdft_desc *desc, int bufferedp) Chris@82: { Chris@82: static const solver_adt sadt = { PROBLEM_DFT, mkplan, 0 }; Chris@82: S *slv = MKSOLVER(S, &sadt); Chris@82: slv->k = k; Chris@82: slv->desc = desc; Chris@82: slv->bufferedp = bufferedp; Chris@82: return &(slv->super); Chris@82: } Chris@82: Chris@82: solver *X(mksolver_dft_direct)(kdft k, const kdft_desc *desc) Chris@82: { Chris@82: return mksolver(k, desc, 0); Chris@82: } Chris@82: Chris@82: solver *X(mksolver_dft_directbuf)(kdft k, const kdft_desc *desc) Chris@82: { Chris@82: return mksolver(k, desc, 1); Chris@82: }