cannam@95: /* cannam@95: * Copyright (c) 2003, 2007-11 Matteo Frigo cannam@95: * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology cannam@95: * cannam@95: * This program is free software; you can redistribute it and/or modify cannam@95: * it under the terms of the GNU General Public License as published by cannam@95: * the Free Software Foundation; either version 2 of the License, or cannam@95: * (at your option) any later version. cannam@95: * cannam@95: * This program is distributed in the hope that it will be useful, cannam@95: * but WITHOUT ANY WARRANTY; without even the implied warranty of cannam@95: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the cannam@95: * GNU General Public License for more details. cannam@95: * cannam@95: * You should have received a copy of the GNU General Public License cannam@95: * along with this program; if not, write to the Free Software cannam@95: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA cannam@95: * cannam@95: */ cannam@95: cannam@95: cannam@95: #include "verify.h" cannam@95: cannam@95: /* copy real A into real B, using output stride of A and input stride of B */ cannam@95: typedef struct { cannam@95: dotens2_closure k; cannam@95: R *ra; cannam@95: R *rb; cannam@95: } cpyr_closure; cannam@95: cannam@95: static void cpyr0(dotens2_closure *k_, cannam@95: int indxa, int ondxa, int indxb, int ondxb) cannam@95: { cannam@95: cpyr_closure *k = (cpyr_closure *)k_; cannam@95: k->rb[indxb] = k->ra[ondxa]; cannam@95: UNUSED(indxa); UNUSED(ondxb); cannam@95: } cannam@95: cannam@95: static void cpyr(R *ra, const bench_tensor *sza, cannam@95: R *rb, const bench_tensor *szb) cannam@95: { cannam@95: cpyr_closure k; cannam@95: k.k.apply = cpyr0; cannam@95: k.ra = ra; k.rb = rb; cannam@95: bench_dotens2(sza, szb, &k.k); cannam@95: } cannam@95: cannam@95: /* copy unpacked halfcomplex A[n] into packed-complex B[n], using output stride cannam@95: of A and input stride of B. Only copies non-redundant half; other cannam@95: half must be copied via mkhermitian. */ cannam@95: typedef struct { cannam@95: dotens2_closure k; cannam@95: int n; cannam@95: int as; cannam@95: int scalea; cannam@95: R *ra, *ia; cannam@95: R *rb, *ib; cannam@95: } cpyhc2_closure; cannam@95: cannam@95: static void cpyhc20(dotens2_closure *k_, cannam@95: int indxa, int ondxa, int indxb, int ondxb) cannam@95: { cannam@95: cpyhc2_closure *k = (cpyhc2_closure *)k_; cannam@95: int i, n = k->n; cannam@95: int scalea = k->scalea; cannam@95: int as = k->as * scalea; cannam@95: R *ra = k->ra + ondxa * scalea, *ia = k->ia + ondxa * scalea; cannam@95: R *rb = k->rb + indxb, *ib = k->ib + indxb; cannam@95: UNUSED(indxa); UNUSED(ondxb); cannam@95: cannam@95: for (i = 0; i < n/2 + 1; ++i) { cannam@95: rb[2*i] = ra[as*i]; cannam@95: ib[2*i] = ia[as*i]; cannam@95: } cannam@95: } cannam@95: cannam@95: static void cpyhc2(R *ra, R *ia, cannam@95: const bench_tensor *sza, const bench_tensor *vecsza, cannam@95: int scalea, cannam@95: R *rb, R *ib, const bench_tensor *szb) cannam@95: { cannam@95: cpyhc2_closure k; cannam@95: BENCH_ASSERT(sza->rnk <= 1); cannam@95: k.k.apply = cpyhc20; cannam@95: k.n = tensor_sz(sza); cannam@95: k.scalea = scalea; cannam@95: if (!FINITE_RNK(sza->rnk) || sza->rnk == 0) cannam@95: k.as = 0; cannam@95: else cannam@95: k.as = sza->dims[0].os; cannam@95: k.ra = ra; k.ia = ia; k.rb = rb; k.ib = ib; cannam@95: bench_dotens2(vecsza, szb, &k.k); cannam@95: } cannam@95: cannam@95: /* icpyhc2 is the inverse of cpyhc2 */ cannam@95: cannam@95: static void icpyhc20(dotens2_closure *k_, cannam@95: int indxa, int ondxa, int indxb, int ondxb) cannam@95: { cannam@95: cpyhc2_closure *k = (cpyhc2_closure *)k_; cannam@95: int i, n = k->n; cannam@95: int scalea = k->scalea; cannam@95: int as = k->as * scalea; cannam@95: R *ra = k->ra + indxa * scalea, *ia = k->ia + indxa * scalea; cannam@95: R *rb = k->rb + ondxb, *ib = k->ib + ondxb; cannam@95: UNUSED(ondxa); UNUSED(indxb); cannam@95: cannam@95: for (i = 0; i < n/2 + 1; ++i) { cannam@95: ra[as*i] = rb[2*i]; cannam@95: ia[as*i] = ib[2*i]; cannam@95: } cannam@95: } cannam@95: cannam@95: static void icpyhc2(R *ra, R *ia, cannam@95: const bench_tensor *sza, const bench_tensor *vecsza, cannam@95: int scalea, cannam@95: R *rb, R *ib, const bench_tensor *szb) cannam@95: { cannam@95: cpyhc2_closure k; cannam@95: BENCH_ASSERT(sza->rnk <= 1); cannam@95: k.k.apply = icpyhc20; cannam@95: k.n = tensor_sz(sza); cannam@95: k.scalea = scalea; cannam@95: if (!FINITE_RNK(sza->rnk) || sza->rnk == 0) cannam@95: k.as = 0; cannam@95: else cannam@95: k.as = sza->dims[0].is; cannam@95: k.ra = ra; k.ia = ia; k.rb = rb; k.ib = ib; cannam@95: bench_dotens2(vecsza, szb, &k.k); cannam@95: } cannam@95: cannam@95: typedef struct { cannam@95: dofft_closure k; cannam@95: bench_problem *p; cannam@95: } dofft_rdft2_closure; cannam@95: cannam@95: static void rdft2_apply(dofft_closure *k_, cannam@95: bench_complex *in, bench_complex *out) cannam@95: { cannam@95: dofft_rdft2_closure *k = (dofft_rdft2_closure *)k_; cannam@95: bench_problem *p = k->p; cannam@95: bench_tensor *totalsz, *pckdsz, *totalsz_swap, *pckdsz_swap; cannam@95: bench_tensor *probsz2, *totalsz2, *pckdsz2; cannam@95: bench_tensor *probsz2_swap, *totalsz2_swap, *pckdsz2_swap; cannam@95: bench_real *ri, *ii, *ro, *io; cannam@95: int n2, totalscale; cannam@95: cannam@95: totalsz = tensor_append(p->vecsz, p->sz); cannam@95: pckdsz = verify_pack(totalsz, 2); cannam@95: n2 = tensor_sz(totalsz); cannam@95: if (FINITE_RNK(p->sz->rnk) && p->sz->rnk > 0) cannam@95: n2 = (n2 / p->sz->dims[p->sz->rnk - 1].n) * cannam@95: (p->sz->dims[p->sz->rnk - 1].n / 2 + 1); cannam@95: ri = (bench_real *) p->in; cannam@95: ro = (bench_real *) p->out; cannam@95: cannam@95: if (FINITE_RNK(p->sz->rnk) && p->sz->rnk > 0 && n2 > 0) { cannam@95: probsz2 = tensor_copy_sub(p->sz, p->sz->rnk - 1, 1); cannam@95: totalsz2 = tensor_copy_sub(totalsz, 0, totalsz->rnk - 1); cannam@95: pckdsz2 = tensor_copy_sub(pckdsz, 0, pckdsz->rnk - 1); cannam@95: } cannam@95: else { cannam@95: probsz2 = mktensor(0); cannam@95: totalsz2 = tensor_copy(totalsz); cannam@95: pckdsz2 = tensor_copy(pckdsz); cannam@95: } cannam@95: cannam@95: totalsz_swap = tensor_copy_swapio(totalsz); cannam@95: pckdsz_swap = tensor_copy_swapio(pckdsz); cannam@95: totalsz2_swap = tensor_copy_swapio(totalsz2); cannam@95: pckdsz2_swap = tensor_copy_swapio(pckdsz2); cannam@95: probsz2_swap = tensor_copy_swapio(probsz2); cannam@95: cannam@95: /* confusion: the stride is the distance between complex elements cannam@95: when using interleaved format, but it is the distance between cannam@95: real elements when using split format */ cannam@95: if (p->split) { cannam@95: ii = p->ini ? (bench_real *) p->ini : ri + n2; cannam@95: io = p->outi ? (bench_real *) p->outi : ro + n2; cannam@95: totalscale = 1; cannam@95: } else { cannam@95: ii = p->ini ? (bench_real *) p->ini : ri + 1; cannam@95: io = p->outi ? (bench_real *) p->outi : ro + 1; cannam@95: totalscale = 2; cannam@95: } cannam@95: cannam@95: if (p->sign < 0) { /* R2HC */ cannam@95: int N, vN, i; cannam@95: cpyr(&c_re(in[0]), pckdsz, ri, totalsz); cannam@95: after_problem_rcopy_from(p, ri); cannam@95: doit(1, p); cannam@95: after_problem_hccopy_to(p, ro, io); cannam@95: if (k->k.recopy_input) cannam@95: cpyr(ri, totalsz_swap, &c_re(in[0]), pckdsz_swap); cannam@95: cpyhc2(ro, io, probsz2, totalsz2, totalscale, cannam@95: &c_re(out[0]), &c_im(out[0]), pckdsz2); cannam@95: N = tensor_sz(p->sz); cannam@95: vN = tensor_sz(p->vecsz); cannam@95: for (i = 0; i < vN; ++i) cannam@95: mkhermitian(out + i*N, p->sz->rnk, p->sz->dims, 1); cannam@95: } cannam@95: else { /* HC2R */ cannam@95: icpyhc2(ri, ii, probsz2, totalsz2, totalscale, cannam@95: &c_re(in[0]), &c_im(in[0]), pckdsz2); cannam@95: after_problem_hccopy_from(p, ri, ii); cannam@95: doit(1, p); cannam@95: after_problem_rcopy_to(p, ro); cannam@95: if (k->k.recopy_input) cannam@95: cpyhc2(ri, ii, probsz2_swap, totalsz2_swap, totalscale, cannam@95: &c_re(in[0]), &c_im(in[0]), pckdsz2_swap); cannam@95: mkreal(out, tensor_sz(pckdsz)); cannam@95: cpyr(ro, totalsz, &c_re(out[0]), pckdsz); cannam@95: } cannam@95: cannam@95: tensor_destroy(totalsz); cannam@95: tensor_destroy(pckdsz); cannam@95: tensor_destroy(totalsz_swap); cannam@95: tensor_destroy(pckdsz_swap); cannam@95: tensor_destroy(probsz2); cannam@95: tensor_destroy(totalsz2); cannam@95: tensor_destroy(pckdsz2); cannam@95: tensor_destroy(probsz2_swap); cannam@95: tensor_destroy(totalsz2_swap); cannam@95: tensor_destroy(pckdsz2_swap); cannam@95: } cannam@95: cannam@95: void verify_rdft2(bench_problem *p, int rounds, double tol, errors *e) cannam@95: { cannam@95: C *inA, *inB, *inC, *outA, *outB, *outC, *tmp; cannam@95: int n, vecn, N; cannam@95: dofft_rdft2_closure k; cannam@95: cannam@95: BENCH_ASSERT(p->kind == PROBLEM_REAL); cannam@95: cannam@95: if (!FINITE_RNK(p->sz->rnk) || !FINITE_RNK(p->vecsz->rnk)) cannam@95: return; /* give up */ cannam@95: cannam@95: k.k.apply = rdft2_apply; cannam@95: k.k.recopy_input = 0; cannam@95: k.p = p; cannam@95: cannam@95: if (rounds == 0) cannam@95: rounds = 20; /* default value */ cannam@95: cannam@95: n = tensor_sz(p->sz); cannam@95: vecn = tensor_sz(p->vecsz); cannam@95: N = n * vecn; cannam@95: cannam@95: inA = (C *) bench_malloc(N * sizeof(C)); cannam@95: inB = (C *) bench_malloc(N * sizeof(C)); cannam@95: inC = (C *) bench_malloc(N * sizeof(C)); cannam@95: outA = (C *) bench_malloc(N * sizeof(C)); cannam@95: outB = (C *) bench_malloc(N * sizeof(C)); cannam@95: outC = (C *) bench_malloc(N * sizeof(C)); cannam@95: tmp = (C *) bench_malloc(N * sizeof(C)); cannam@95: cannam@95: e->i = impulse(&k.k, n, vecn, inA, inB, inC, outA, outB, outC, cannam@95: tmp, rounds, tol); cannam@95: e->l = linear(&k.k, 1, N, inA, inB, inC, outA, outB, outC, cannam@95: tmp, rounds, tol); cannam@95: cannam@95: e->s = 0.0; cannam@95: if (p->sign < 0) cannam@95: e->s = dmax(e->s, tf_shift(&k.k, 1, p->sz, n, vecn, p->sign, cannam@95: inA, inB, outA, outB, cannam@95: tmp, rounds, tol, TIME_SHIFT)); cannam@95: else cannam@95: e->s = dmax(e->s, tf_shift(&k.k, 1, p->sz, n, vecn, p->sign, cannam@95: inA, inB, outA, outB, cannam@95: tmp, rounds, tol, FREQ_SHIFT)); cannam@95: cannam@95: if (!p->in_place && !p->destroy_input) cannam@95: preserves_input(&k.k, p->sign < 0 ? mkreal : mkhermitian1, cannam@95: N, inA, inB, outB, rounds); cannam@95: cannam@95: bench_free(tmp); cannam@95: bench_free(outC); cannam@95: bench_free(outB); cannam@95: bench_free(outA); cannam@95: bench_free(inC); cannam@95: bench_free(inB); cannam@95: bench_free(inA); cannam@95: } cannam@95: cannam@95: void accuracy_rdft2(bench_problem *p, int rounds, int impulse_rounds, cannam@95: double t[6]) cannam@95: { cannam@95: dofft_rdft2_closure k; cannam@95: int n; cannam@95: C *a, *b; cannam@95: cannam@95: BENCH_ASSERT(p->kind == PROBLEM_REAL); cannam@95: BENCH_ASSERT(p->sz->rnk == 1); cannam@95: BENCH_ASSERT(p->vecsz->rnk == 0); cannam@95: cannam@95: k.k.apply = rdft2_apply; cannam@95: k.k.recopy_input = 0; cannam@95: k.p = p; cannam@95: n = tensor_sz(p->sz); cannam@95: cannam@95: a = (C *) bench_malloc(n * sizeof(C)); cannam@95: b = (C *) bench_malloc(n * sizeof(C)); cannam@95: accuracy_test(&k.k, p->sign < 0 ? mkreal : mkhermitian1, p->sign, cannam@95: n, a, b, rounds, impulse_rounds, t); cannam@95: bench_free(b); cannam@95: bench_free(a); cannam@95: }