Chris@69: /* Copyright (c) 2007-2008 CSIRO Chris@69: Copyright (c) 2007-2009 Xiph.Org Foundation Chris@69: Written by Jean-Marc Valin */ Chris@69: /* Chris@69: Redistribution and use in source and binary forms, with or without Chris@69: modification, are permitted provided that the following conditions Chris@69: are met: Chris@69: Chris@69: - Redistributions of source code must retain the above copyright Chris@69: notice, this list of conditions and the following disclaimer. Chris@69: Chris@69: - Redistributions in binary form must reproduce the above copyright Chris@69: notice, this list of conditions and the following disclaimer in the Chris@69: documentation and/or other materials provided with the distribution. Chris@69: Chris@69: THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS Chris@69: ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT Chris@69: LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR Chris@69: A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER Chris@69: OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, Chris@69: EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, Chris@69: PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR Chris@69: PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF Chris@69: LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING Chris@69: NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS Chris@69: SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Chris@69: */ Chris@69: Chris@69: #ifdef HAVE_CONFIG_H Chris@69: #include "config.h" Chris@69: #endif Chris@69: Chris@69: #include "mathops.h" Chris@69: #include "cwrs.h" Chris@69: #include "vq.h" Chris@69: #include "arch.h" Chris@69: #include "os_support.h" Chris@69: #include "bands.h" Chris@69: #include "rate.h" Chris@69: #include "pitch.h" Chris@69: Chris@69: #ifndef OVERRIDE_vq_exp_rotation1 Chris@69: static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_val16 s) Chris@69: { Chris@69: int i; Chris@69: opus_val16 ms; Chris@69: celt_norm *Xptr; Chris@69: Xptr = X; Chris@69: ms = NEG16(s); Chris@69: for (i=0;i=0;i--) Chris@69: { Chris@69: celt_norm x1, x2; Chris@69: x1 = Xptr[0]; Chris@69: x2 = Xptr[stride]; Chris@69: Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2), s, x1), 15)); Chris@69: *Xptr-- = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15)); Chris@69: } Chris@69: } Chris@69: #endif /* OVERRIDE_vq_exp_rotation1 */ Chris@69: Chris@69: void exp_rotation(celt_norm *X, int len, int dir, int stride, int K, int spread) Chris@69: { Chris@69: static const int SPREAD_FACTOR[3]={15,10,5}; Chris@69: int i; Chris@69: opus_val16 c, s; Chris@69: opus_val16 gain, theta; Chris@69: int stride2=0; Chris@69: int factor; Chris@69: Chris@69: if (2*K>=len || spread==SPREAD_NONE) Chris@69: return; Chris@69: factor = SPREAD_FACTOR[spread-1]; Chris@69: Chris@69: gain = celt_div((opus_val32)MULT16_16(Q15_ONE,len),(opus_val32)(len+factor*K)); Chris@69: theta = HALF16(MULT16_16_Q15(gain,gain)); Chris@69: Chris@69: c = celt_cos_norm(EXTEND32(theta)); Chris@69: s = celt_cos_norm(EXTEND32(SUB16(Q15ONE,theta))); /* sin(theta) */ Chris@69: Chris@69: if (len>=8*stride) Chris@69: { Chris@69: stride2 = 1; Chris@69: /* This is just a simple (equivalent) way of computing sqrt(len/stride) with rounding. Chris@69: It's basically incrementing long as (stride2+0.5)^2 < len/stride. */ Chris@69: while ((stride2*stride2+stride2)*stride + (stride>>2) < len) Chris@69: stride2++; Chris@69: } Chris@69: /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for Chris@69: extract_collapse_mask().*/ Chris@69: len = celt_udiv(len, stride); Chris@69: for (i=0;i>1; Chris@69: #endif Chris@69: t = VSHR32(Ryy, 2*(k-7)); Chris@69: g = MULT16_16_P15(celt_rsqrt_norm(t),gain); Chris@69: Chris@69: i=0; Chris@69: do Chris@69: X[i] = EXTRACT16(PSHR32(MULT16_16(g, iy[i]), k+1)); Chris@69: while (++i < N); Chris@69: } Chris@69: Chris@69: static unsigned extract_collapse_mask(int *iy, int N, int B) Chris@69: { Chris@69: unsigned collapse_mask; Chris@69: int N0; Chris@69: int i; Chris@69: if (B<=1) Chris@69: return 1; Chris@69: /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for Chris@69: exp_rotation().*/ Chris@69: N0 = celt_udiv(N, B); Chris@69: collapse_mask = 0; Chris@69: i=0; do { Chris@69: int j; Chris@69: unsigned tmp=0; Chris@69: j=0; do { Chris@69: tmp |= iy[i*N0+j]; Chris@69: } while (++j (N>>1)) Chris@69: { Chris@69: opus_val16 rcp; Chris@69: j=0; do { Chris@69: sum += X[j]; Chris@69: } while (++j EPSILON && sum < 64)) Chris@69: #endif Chris@69: { Chris@69: X[0] = QCONST16(1.f,14); Chris@69: j=1; do Chris@69: X[j]=0; Chris@69: while (++j=0); Chris@69: Chris@69: /* This should never happen, but just in case it does (e.g. on silence) Chris@69: we fill the first bin with pulses. */ Chris@69: #ifdef FIXED_POINT_DEBUG Chris@69: celt_sig_assert(pulsesLeft<=N+3); Chris@69: #endif Chris@69: if (pulsesLeft > N+3) Chris@69: { Chris@69: opus_val16 tmp = (opus_val16)pulsesLeft; Chris@69: yy = MAC16_16(yy, tmp, tmp); Chris@69: yy = MAC16_16(yy, tmp, y[0]); Chris@69: iy[0] += pulsesLeft; Chris@69: pulsesLeft=0; Chris@69: } Chris@69: Chris@69: for (i=0;i= best_num/best_den, but that way Chris@69: we can do it without any division */ Chris@69: /* OPT: It's not clear whether a cmov is faster than a branch here Chris@69: since the condition is more often false than true and using Chris@69: a cmov introduces data dependencies across iterations. The optimal Chris@69: choice may be architecture-dependent. */ Chris@69: if (opus_unlikely(MULT16_16(best_den, Rxy) > MULT16_16(Ryy, best_num))) Chris@69: { Chris@69: best_den = Ryy; Chris@69: best_num = Rxy; Chris@69: best_id = j; Chris@69: } Chris@69: } while (++j0, "alg_quant() needs at least one pulse"); Chris@69: celt_assert2(N>1, "alg_quant() needs at least two dimensions"); Chris@69: Chris@69: /* Covers vectorization by up to 4. */ Chris@69: ALLOC(iy, N+3, int); Chris@69: Chris@69: exp_rotation(X, N, 1, B, K, spread); Chris@69: Chris@69: yy = op_pvq_search(X, iy, K, N, arch); Chris@69: Chris@69: encode_pulses(iy, N, K, enc); Chris@69: Chris@69: if (resynth) Chris@69: { Chris@69: normalise_residual(iy, X, N, yy, gain); Chris@69: exp_rotation(X, N, -1, B, K, spread); Chris@69: } Chris@69: Chris@69: collapse_mask = extract_collapse_mask(iy, N, B); Chris@69: RESTORE_STACK; Chris@69: return collapse_mask; Chris@69: } Chris@69: Chris@69: /** Decode pulse vector and combine the result with the pitch vector to produce Chris@69: the final normalised signal in the current band. */ Chris@69: unsigned alg_unquant(celt_norm *X, int N, int K, int spread, int B, Chris@69: ec_dec *dec, opus_val16 gain) Chris@69: { Chris@69: opus_val32 Ryy; Chris@69: unsigned collapse_mask; Chris@69: VARDECL(int, iy); Chris@69: SAVE_STACK; Chris@69: Chris@69: celt_assert2(K>0, "alg_unquant() needs at least one pulse"); Chris@69: celt_assert2(N>1, "alg_unquant() needs at least two dimensions"); Chris@69: ALLOC(iy, N, int); Chris@69: Ryy = decode_pulses(iy, N, K, dec); Chris@69: normalise_residual(iy, X, N, Ryy, gain); Chris@69: exp_rotation(X, N, -1, B, K, spread); Chris@69: collapse_mask = extract_collapse_mask(iy, N, B); Chris@69: RESTORE_STACK; Chris@69: return collapse_mask; Chris@69: } Chris@69: Chris@69: #ifndef OVERRIDE_renormalise_vector Chris@69: void renormalise_vector(celt_norm *X, int N, opus_val16 gain, int arch) Chris@69: { Chris@69: int i; Chris@69: #ifdef FIXED_POINT Chris@69: int k; Chris@69: #endif Chris@69: opus_val32 E; Chris@69: opus_val16 g; Chris@69: opus_val32 t; Chris@69: celt_norm *xptr; Chris@69: E = EPSILON + celt_inner_prod(X, X, N, arch); Chris@69: #ifdef FIXED_POINT Chris@69: k = celt_ilog2(E)>>1; Chris@69: #endif Chris@69: t = VSHR32(E, 2*(k-7)); Chris@69: g = MULT16_16_P15(celt_rsqrt_norm(t),gain); Chris@69: Chris@69: xptr = X; Chris@69: for (i=0;i