annotate src/opus-1.3/celt/mdct.c @ 79:91c729825bca pa_catalina

Update build for AUDIO_COMPONENT_FIX
author Chris Cannam
date Wed, 30 Oct 2019 12:40:34 +0000
parents 7aeed7906520
children
rev   line source
Chris@69 1 /* Copyright (c) 2007-2008 CSIRO
Chris@69 2 Copyright (c) 2007-2008 Xiph.Org Foundation
Chris@69 3 Written by Jean-Marc Valin */
Chris@69 4 /*
Chris@69 5 Redistribution and use in source and binary forms, with or without
Chris@69 6 modification, are permitted provided that the following conditions
Chris@69 7 are met:
Chris@69 8
Chris@69 9 - Redistributions of source code must retain the above copyright
Chris@69 10 notice, this list of conditions and the following disclaimer.
Chris@69 11
Chris@69 12 - Redistributions in binary form must reproduce the above copyright
Chris@69 13 notice, this list of conditions and the following disclaimer in the
Chris@69 14 documentation and/or other materials provided with the distribution.
Chris@69 15
Chris@69 16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
Chris@69 17 ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
Chris@69 18 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
Chris@69 19 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
Chris@69 20 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
Chris@69 21 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
Chris@69 22 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
Chris@69 23 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
Chris@69 24 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
Chris@69 25 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
Chris@69 26 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Chris@69 27 */
Chris@69 28
Chris@69 29 /* This is a simple MDCT implementation that uses a N/4 complex FFT
Chris@69 30 to do most of the work. It should be relatively straightforward to
Chris@69 31 plug in pretty much and FFT here.
Chris@69 32
Chris@69 33 This replaces the Vorbis FFT (and uses the exact same API), which
Chris@69 34 was a bit too messy and that was ending up duplicating code
Chris@69 35 (might as well use the same FFT everywhere).
Chris@69 36
Chris@69 37 The algorithm is similar to (and inspired from) Fabrice Bellard's
Chris@69 38 MDCT implementation in FFMPEG, but has differences in signs, ordering
Chris@69 39 and scaling in many places.
Chris@69 40 */
Chris@69 41
Chris@69 42 #ifndef SKIP_CONFIG_H
Chris@69 43 #ifdef HAVE_CONFIG_H
Chris@69 44 #include "config.h"
Chris@69 45 #endif
Chris@69 46 #endif
Chris@69 47
Chris@69 48 #include "mdct.h"
Chris@69 49 #include "kiss_fft.h"
Chris@69 50 #include "_kiss_fft_guts.h"
Chris@69 51 #include <math.h>
Chris@69 52 #include "os_support.h"
Chris@69 53 #include "mathops.h"
Chris@69 54 #include "stack_alloc.h"
Chris@69 55
Chris@69 56 #if defined(MIPSr1_ASM)
Chris@69 57 #include "mips/mdct_mipsr1.h"
Chris@69 58 #endif
Chris@69 59
Chris@69 60
Chris@69 61 #ifdef CUSTOM_MODES
Chris@69 62
Chris@69 63 int clt_mdct_init(mdct_lookup *l,int N, int maxshift, int arch)
Chris@69 64 {
Chris@69 65 int i;
Chris@69 66 kiss_twiddle_scalar *trig;
Chris@69 67 int shift;
Chris@69 68 int N2=N>>1;
Chris@69 69 l->n = N;
Chris@69 70 l->maxshift = maxshift;
Chris@69 71 for (i=0;i<=maxshift;i++)
Chris@69 72 {
Chris@69 73 if (i==0)
Chris@69 74 l->kfft[i] = opus_fft_alloc(N>>2>>i, 0, 0, arch);
Chris@69 75 else
Chris@69 76 l->kfft[i] = opus_fft_alloc_twiddles(N>>2>>i, 0, 0, l->kfft[0], arch);
Chris@69 77 #ifndef ENABLE_TI_DSPLIB55
Chris@69 78 if (l->kfft[i]==NULL)
Chris@69 79 return 0;
Chris@69 80 #endif
Chris@69 81 }
Chris@69 82 l->trig = trig = (kiss_twiddle_scalar*)opus_alloc((N-(N2>>maxshift))*sizeof(kiss_twiddle_scalar));
Chris@69 83 if (l->trig==NULL)
Chris@69 84 return 0;
Chris@69 85 for (shift=0;shift<=maxshift;shift++)
Chris@69 86 {
Chris@69 87 /* We have enough points that sine isn't necessary */
Chris@69 88 #if defined(FIXED_POINT)
Chris@69 89 #if 1
Chris@69 90 for (i=0;i<N2;i++)
Chris@69 91 trig[i] = TRIG_UPSCALE*celt_cos_norm(DIV32(ADD32(SHL32(EXTEND32(i),17),N2+16384),N));
Chris@69 92 #else
Chris@69 93 for (i=0;i<N2;i++)
Chris@69 94 trig[i] = (kiss_twiddle_scalar)MAX32(-32767,MIN32(32767,floor(.5+32768*cos(2*M_PI*(i+.125)/N))));
Chris@69 95 #endif
Chris@69 96 #else
Chris@69 97 for (i=0;i<N2;i++)
Chris@69 98 trig[i] = (kiss_twiddle_scalar)cos(2*PI*(i+.125)/N);
Chris@69 99 #endif
Chris@69 100 trig += N2;
Chris@69 101 N2 >>= 1;
Chris@69 102 N >>= 1;
Chris@69 103 }
Chris@69 104 return 1;
Chris@69 105 }
Chris@69 106
Chris@69 107 void clt_mdct_clear(mdct_lookup *l, int arch)
Chris@69 108 {
Chris@69 109 int i;
Chris@69 110 for (i=0;i<=l->maxshift;i++)
Chris@69 111 opus_fft_free(l->kfft[i], arch);
Chris@69 112 opus_free((kiss_twiddle_scalar*)l->trig);
Chris@69 113 }
Chris@69 114
Chris@69 115 #endif /* CUSTOM_MODES */
Chris@69 116
Chris@69 117 /* Forward MDCT trashes the input array */
Chris@69 118 #ifndef OVERRIDE_clt_mdct_forward
Chris@69 119 void clt_mdct_forward_c(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar * OPUS_RESTRICT out,
Chris@69 120 const opus_val16 *window, int overlap, int shift, int stride, int arch)
Chris@69 121 {
Chris@69 122 int i;
Chris@69 123 int N, N2, N4;
Chris@69 124 VARDECL(kiss_fft_scalar, f);
Chris@69 125 VARDECL(kiss_fft_cpx, f2);
Chris@69 126 const kiss_fft_state *st = l->kfft[shift];
Chris@69 127 const kiss_twiddle_scalar *trig;
Chris@69 128 opus_val16 scale;
Chris@69 129 #ifdef FIXED_POINT
Chris@69 130 /* Allows us to scale with MULT16_32_Q16(), which is faster than
Chris@69 131 MULT16_32_Q15() on ARM. */
Chris@69 132 int scale_shift = st->scale_shift-1;
Chris@69 133 #endif
Chris@69 134 SAVE_STACK;
Chris@69 135 (void)arch;
Chris@69 136 scale = st->scale;
Chris@69 137
Chris@69 138 N = l->n;
Chris@69 139 trig = l->trig;
Chris@69 140 for (i=0;i<shift;i++)
Chris@69 141 {
Chris@69 142 N >>= 1;
Chris@69 143 trig += N;
Chris@69 144 }
Chris@69 145 N2 = N>>1;
Chris@69 146 N4 = N>>2;
Chris@69 147
Chris@69 148 ALLOC(f, N2, kiss_fft_scalar);
Chris@69 149 ALLOC(f2, N4, kiss_fft_cpx);
Chris@69 150
Chris@69 151 /* Consider the input to be composed of four blocks: [a, b, c, d] */
Chris@69 152 /* Window, shuffle, fold */
Chris@69 153 {
Chris@69 154 /* Temp pointers to make it really clear to the compiler what we're doing */
Chris@69 155 const kiss_fft_scalar * OPUS_RESTRICT xp1 = in+(overlap>>1);
Chris@69 156 const kiss_fft_scalar * OPUS_RESTRICT xp2 = in+N2-1+(overlap>>1);
Chris@69 157 kiss_fft_scalar * OPUS_RESTRICT yp = f;
Chris@69 158 const opus_val16 * OPUS_RESTRICT wp1 = window+(overlap>>1);
Chris@69 159 const opus_val16 * OPUS_RESTRICT wp2 = window+(overlap>>1)-1;
Chris@69 160 for(i=0;i<((overlap+3)>>2);i++)
Chris@69 161 {
Chris@69 162 /* Real part arranged as -d-cR, Imag part arranged as -b+aR*/
Chris@69 163 *yp++ = MULT16_32_Q15(*wp2, xp1[N2]) + MULT16_32_Q15(*wp1,*xp2);
Chris@69 164 *yp++ = MULT16_32_Q15(*wp1, *xp1) - MULT16_32_Q15(*wp2, xp2[-N2]);
Chris@69 165 xp1+=2;
Chris@69 166 xp2-=2;
Chris@69 167 wp1+=2;
Chris@69 168 wp2-=2;
Chris@69 169 }
Chris@69 170 wp1 = window;
Chris@69 171 wp2 = window+overlap-1;
Chris@69 172 for(;i<N4-((overlap+3)>>2);i++)
Chris@69 173 {
Chris@69 174 /* Real part arranged as a-bR, Imag part arranged as -c-dR */
Chris@69 175 *yp++ = *xp2;
Chris@69 176 *yp++ = *xp1;
Chris@69 177 xp1+=2;
Chris@69 178 xp2-=2;
Chris@69 179 }
Chris@69 180 for(;i<N4;i++)
Chris@69 181 {
Chris@69 182 /* Real part arranged as a-bR, Imag part arranged as -c-dR */
Chris@69 183 *yp++ = -MULT16_32_Q15(*wp1, xp1[-N2]) + MULT16_32_Q15(*wp2, *xp2);
Chris@69 184 *yp++ = MULT16_32_Q15(*wp2, *xp1) + MULT16_32_Q15(*wp1, xp2[N2]);
Chris@69 185 xp1+=2;
Chris@69 186 xp2-=2;
Chris@69 187 wp1+=2;
Chris@69 188 wp2-=2;
Chris@69 189 }
Chris@69 190 }
Chris@69 191 /* Pre-rotation */
Chris@69 192 {
Chris@69 193 kiss_fft_scalar * OPUS_RESTRICT yp = f;
Chris@69 194 const kiss_twiddle_scalar *t = &trig[0];
Chris@69 195 for(i=0;i<N4;i++)
Chris@69 196 {
Chris@69 197 kiss_fft_cpx yc;
Chris@69 198 kiss_twiddle_scalar t0, t1;
Chris@69 199 kiss_fft_scalar re, im, yr, yi;
Chris@69 200 t0 = t[i];
Chris@69 201 t1 = t[N4+i];
Chris@69 202 re = *yp++;
Chris@69 203 im = *yp++;
Chris@69 204 yr = S_MUL(re,t0) - S_MUL(im,t1);
Chris@69 205 yi = S_MUL(im,t0) + S_MUL(re,t1);
Chris@69 206 yc.r = yr;
Chris@69 207 yc.i = yi;
Chris@69 208 yc.r = PSHR32(MULT16_32_Q16(scale, yc.r), scale_shift);
Chris@69 209 yc.i = PSHR32(MULT16_32_Q16(scale, yc.i), scale_shift);
Chris@69 210 f2[st->bitrev[i]] = yc;
Chris@69 211 }
Chris@69 212 }
Chris@69 213
Chris@69 214 /* N/4 complex FFT, does not downscale anymore */
Chris@69 215 opus_fft_impl(st, f2);
Chris@69 216
Chris@69 217 /* Post-rotate */
Chris@69 218 {
Chris@69 219 /* Temp pointers to make it really clear to the compiler what we're doing */
Chris@69 220 const kiss_fft_cpx * OPUS_RESTRICT fp = f2;
Chris@69 221 kiss_fft_scalar * OPUS_RESTRICT yp1 = out;
Chris@69 222 kiss_fft_scalar * OPUS_RESTRICT yp2 = out+stride*(N2-1);
Chris@69 223 const kiss_twiddle_scalar *t = &trig[0];
Chris@69 224 /* Temp pointers to make it really clear to the compiler what we're doing */
Chris@69 225 for(i=0;i<N4;i++)
Chris@69 226 {
Chris@69 227 kiss_fft_scalar yr, yi;
Chris@69 228 yr = S_MUL(fp->i,t[N4+i]) - S_MUL(fp->r,t[i]);
Chris@69 229 yi = S_MUL(fp->r,t[N4+i]) + S_MUL(fp->i,t[i]);
Chris@69 230 *yp1 = yr;
Chris@69 231 *yp2 = yi;
Chris@69 232 fp++;
Chris@69 233 yp1 += 2*stride;
Chris@69 234 yp2 -= 2*stride;
Chris@69 235 }
Chris@69 236 }
Chris@69 237 RESTORE_STACK;
Chris@69 238 }
Chris@69 239 #endif /* OVERRIDE_clt_mdct_forward */
Chris@69 240
Chris@69 241 #ifndef OVERRIDE_clt_mdct_backward
Chris@69 242 void clt_mdct_backward_c(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar * OPUS_RESTRICT out,
Chris@69 243 const opus_val16 * OPUS_RESTRICT window, int overlap, int shift, int stride, int arch)
Chris@69 244 {
Chris@69 245 int i;
Chris@69 246 int N, N2, N4;
Chris@69 247 const kiss_twiddle_scalar *trig;
Chris@69 248 (void) arch;
Chris@69 249
Chris@69 250 N = l->n;
Chris@69 251 trig = l->trig;
Chris@69 252 for (i=0;i<shift;i++)
Chris@69 253 {
Chris@69 254 N >>= 1;
Chris@69 255 trig += N;
Chris@69 256 }
Chris@69 257 N2 = N>>1;
Chris@69 258 N4 = N>>2;
Chris@69 259
Chris@69 260 /* Pre-rotate */
Chris@69 261 {
Chris@69 262 /* Temp pointers to make it really clear to the compiler what we're doing */
Chris@69 263 const kiss_fft_scalar * OPUS_RESTRICT xp1 = in;
Chris@69 264 const kiss_fft_scalar * OPUS_RESTRICT xp2 = in+stride*(N2-1);
Chris@69 265 kiss_fft_scalar * OPUS_RESTRICT yp = out+(overlap>>1);
Chris@69 266 const kiss_twiddle_scalar * OPUS_RESTRICT t = &trig[0];
Chris@69 267 const opus_int16 * OPUS_RESTRICT bitrev = l->kfft[shift]->bitrev;
Chris@69 268 for(i=0;i<N4;i++)
Chris@69 269 {
Chris@69 270 int rev;
Chris@69 271 kiss_fft_scalar yr, yi;
Chris@69 272 rev = *bitrev++;
Chris@69 273 yr = ADD32_ovflw(S_MUL(*xp2, t[i]), S_MUL(*xp1, t[N4+i]));
Chris@69 274 yi = SUB32_ovflw(S_MUL(*xp1, t[i]), S_MUL(*xp2, t[N4+i]));
Chris@69 275 /* We swap real and imag because we use an FFT instead of an IFFT. */
Chris@69 276 yp[2*rev+1] = yr;
Chris@69 277 yp[2*rev] = yi;
Chris@69 278 /* Storing the pre-rotation directly in the bitrev order. */
Chris@69 279 xp1+=2*stride;
Chris@69 280 xp2-=2*stride;
Chris@69 281 }
Chris@69 282 }
Chris@69 283
Chris@69 284 opus_fft_impl(l->kfft[shift], (kiss_fft_cpx*)(out+(overlap>>1)));
Chris@69 285
Chris@69 286 /* Post-rotate and de-shuffle from both ends of the buffer at once to make
Chris@69 287 it in-place. */
Chris@69 288 {
Chris@69 289 kiss_fft_scalar * yp0 = out+(overlap>>1);
Chris@69 290 kiss_fft_scalar * yp1 = out+(overlap>>1)+N2-2;
Chris@69 291 const kiss_twiddle_scalar *t = &trig[0];
Chris@69 292 /* Loop to (N4+1)>>1 to handle odd N4. When N4 is odd, the
Chris@69 293 middle pair will be computed twice. */
Chris@69 294 for(i=0;i<(N4+1)>>1;i++)
Chris@69 295 {
Chris@69 296 kiss_fft_scalar re, im, yr, yi;
Chris@69 297 kiss_twiddle_scalar t0, t1;
Chris@69 298 /* We swap real and imag because we're using an FFT instead of an IFFT. */
Chris@69 299 re = yp0[1];
Chris@69 300 im = yp0[0];
Chris@69 301 t0 = t[i];
Chris@69 302 t1 = t[N4+i];
Chris@69 303 /* We'd scale up by 2 here, but instead it's done when mixing the windows */
Chris@69 304 yr = ADD32_ovflw(S_MUL(re,t0), S_MUL(im,t1));
Chris@69 305 yi = SUB32_ovflw(S_MUL(re,t1), S_MUL(im,t0));
Chris@69 306 /* We swap real and imag because we're using an FFT instead of an IFFT. */
Chris@69 307 re = yp1[1];
Chris@69 308 im = yp1[0];
Chris@69 309 yp0[0] = yr;
Chris@69 310 yp1[1] = yi;
Chris@69 311
Chris@69 312 t0 = t[(N4-i-1)];
Chris@69 313 t1 = t[(N2-i-1)];
Chris@69 314 /* We'd scale up by 2 here, but instead it's done when mixing the windows */
Chris@69 315 yr = ADD32_ovflw(S_MUL(re,t0), S_MUL(im,t1));
Chris@69 316 yi = SUB32_ovflw(S_MUL(re,t1), S_MUL(im,t0));
Chris@69 317 yp1[0] = yr;
Chris@69 318 yp0[1] = yi;
Chris@69 319 yp0 += 2;
Chris@69 320 yp1 -= 2;
Chris@69 321 }
Chris@69 322 }
Chris@69 323
Chris@69 324 /* Mirror on both sides for TDAC */
Chris@69 325 {
Chris@69 326 kiss_fft_scalar * OPUS_RESTRICT xp1 = out+overlap-1;
Chris@69 327 kiss_fft_scalar * OPUS_RESTRICT yp1 = out;
Chris@69 328 const opus_val16 * OPUS_RESTRICT wp1 = window;
Chris@69 329 const opus_val16 * OPUS_RESTRICT wp2 = window+overlap-1;
Chris@69 330
Chris@69 331 for(i = 0; i < overlap/2; i++)
Chris@69 332 {
Chris@69 333 kiss_fft_scalar x1, x2;
Chris@69 334 x1 = *xp1;
Chris@69 335 x2 = *yp1;
Chris@69 336 *yp1++ = SUB32_ovflw(MULT16_32_Q15(*wp2, x2), MULT16_32_Q15(*wp1, x1));
Chris@69 337 *xp1-- = ADD32_ovflw(MULT16_32_Q15(*wp1, x2), MULT16_32_Q15(*wp2, x1));
Chris@69 338 wp1++;
Chris@69 339 wp2--;
Chris@69 340 }
Chris@69 341 }
Chris@69 342 }
Chris@69 343 #endif /* OVERRIDE_clt_mdct_backward */