cannam@154: /* Copyright (c) 2007-2008 CSIRO cannam@154: Copyright (c) 2007-2009 Xiph.Org Foundation cannam@154: Written by Jean-Marc Valin */ cannam@154: /* cannam@154: Redistribution and use in source and binary forms, with or without cannam@154: modification, are permitted provided that the following conditions cannam@154: are met: cannam@154: cannam@154: - Redistributions of source code must retain the above copyright cannam@154: notice, this list of conditions and the following disclaimer. cannam@154: cannam@154: - Redistributions in binary form must reproduce the above copyright cannam@154: notice, this list of conditions and the following disclaimer in the cannam@154: documentation and/or other materials provided with the distribution. cannam@154: cannam@154: THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS cannam@154: ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT cannam@154: LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR cannam@154: A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER cannam@154: OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, cannam@154: EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, cannam@154: PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR cannam@154: PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF cannam@154: LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING cannam@154: NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS cannam@154: SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. cannam@154: */ cannam@154: cannam@154: #ifdef HAVE_CONFIG_H cannam@154: #include "config.h" cannam@154: #endif cannam@154: cannam@154: #include "mathops.h" cannam@154: #include "cwrs.h" cannam@154: #include "vq.h" cannam@154: #include "arch.h" cannam@154: #include "os_support.h" cannam@154: #include "bands.h" cannam@154: #include "rate.h" cannam@154: #include "pitch.h" cannam@154: cannam@154: #ifndef OVERRIDE_vq_exp_rotation1 cannam@154: static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_val16 s) cannam@154: { cannam@154: int i; cannam@154: opus_val16 ms; cannam@154: celt_norm *Xptr; cannam@154: Xptr = X; cannam@154: ms = NEG16(s); cannam@154: for (i=0;i=0;i--) cannam@154: { cannam@154: celt_norm x1, x2; cannam@154: x1 = Xptr[0]; cannam@154: x2 = Xptr[stride]; cannam@154: Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2), s, x1), 15)); cannam@154: *Xptr-- = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15)); cannam@154: } cannam@154: } cannam@154: #endif /* OVERRIDE_vq_exp_rotation1 */ cannam@154: cannam@154: void exp_rotation(celt_norm *X, int len, int dir, int stride, int K, int spread) cannam@154: { cannam@154: static const int SPREAD_FACTOR[3]={15,10,5}; cannam@154: int i; cannam@154: opus_val16 c, s; cannam@154: opus_val16 gain, theta; cannam@154: int stride2=0; cannam@154: int factor; cannam@154: cannam@154: if (2*K>=len || spread==SPREAD_NONE) cannam@154: return; cannam@154: factor = SPREAD_FACTOR[spread-1]; cannam@154: cannam@154: gain = celt_div((opus_val32)MULT16_16(Q15_ONE,len),(opus_val32)(len+factor*K)); cannam@154: theta = HALF16(MULT16_16_Q15(gain,gain)); cannam@154: cannam@154: c = celt_cos_norm(EXTEND32(theta)); cannam@154: s = celt_cos_norm(EXTEND32(SUB16(Q15ONE,theta))); /* sin(theta) */ cannam@154: cannam@154: if (len>=8*stride) cannam@154: { cannam@154: stride2 = 1; cannam@154: /* This is just a simple (equivalent) way of computing sqrt(len/stride) with rounding. cannam@154: It's basically incrementing long as (stride2+0.5)^2 < len/stride. */ cannam@154: while ((stride2*stride2+stride2)*stride + (stride>>2) < len) cannam@154: stride2++; cannam@154: } cannam@154: /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for cannam@154: extract_collapse_mask().*/ cannam@154: len = celt_udiv(len, stride); cannam@154: for (i=0;i>1; cannam@154: #endif cannam@154: t = VSHR32(Ryy, 2*(k-7)); cannam@154: g = MULT16_16_P15(celt_rsqrt_norm(t),gain); cannam@154: cannam@154: i=0; cannam@154: do cannam@154: X[i] = EXTRACT16(PSHR32(MULT16_16(g, iy[i]), k+1)); cannam@154: while (++i < N); cannam@154: } cannam@154: cannam@154: static unsigned extract_collapse_mask(int *iy, int N, int B) cannam@154: { cannam@154: unsigned collapse_mask; cannam@154: int N0; cannam@154: int i; cannam@154: if (B<=1) cannam@154: return 1; cannam@154: /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for cannam@154: exp_rotation().*/ cannam@154: N0 = celt_udiv(N, B); cannam@154: collapse_mask = 0; cannam@154: i=0; do { cannam@154: int j; cannam@154: unsigned tmp=0; cannam@154: j=0; do { cannam@154: tmp |= iy[i*N0+j]; cannam@154: } while (++j (N>>1)) cannam@154: { cannam@154: opus_val16 rcp; cannam@154: j=0; do { cannam@154: sum += X[j]; cannam@154: } while (++j EPSILON && sum < 64)) cannam@154: #endif cannam@154: { cannam@154: X[0] = QCONST16(1.f,14); cannam@154: j=1; do cannam@154: X[j]=0; cannam@154: while (++j=0); cannam@154: cannam@154: /* This should never happen, but just in case it does (e.g. on silence) cannam@154: we fill the first bin with pulses. */ cannam@154: #ifdef FIXED_POINT_DEBUG cannam@154: celt_sig_assert(pulsesLeft<=N+3); cannam@154: #endif cannam@154: if (pulsesLeft > N+3) cannam@154: { cannam@154: opus_val16 tmp = (opus_val16)pulsesLeft; cannam@154: yy = MAC16_16(yy, tmp, tmp); cannam@154: yy = MAC16_16(yy, tmp, y[0]); cannam@154: iy[0] += pulsesLeft; cannam@154: pulsesLeft=0; cannam@154: } cannam@154: cannam@154: for (i=0;i= best_num/best_den, but that way cannam@154: we can do it without any division */ cannam@154: /* OPT: It's not clear whether a cmov is faster than a branch here cannam@154: since the condition is more often false than true and using cannam@154: a cmov introduces data dependencies across iterations. The optimal cannam@154: choice may be architecture-dependent. */ cannam@154: if (opus_unlikely(MULT16_16(best_den, Rxy) > MULT16_16(Ryy, best_num))) cannam@154: { cannam@154: best_den = Ryy; cannam@154: best_num = Rxy; cannam@154: best_id = j; cannam@154: } cannam@154: } while (++j0, "alg_quant() needs at least one pulse"); cannam@154: celt_assert2(N>1, "alg_quant() needs at least two dimensions"); cannam@154: cannam@154: /* Covers vectorization by up to 4. */ cannam@154: ALLOC(iy, N+3, int); cannam@154: cannam@154: exp_rotation(X, N, 1, B, K, spread); cannam@154: cannam@154: yy = op_pvq_search(X, iy, K, N, arch); cannam@154: cannam@154: encode_pulses(iy, N, K, enc); cannam@154: cannam@154: if (resynth) cannam@154: { cannam@154: normalise_residual(iy, X, N, yy, gain); cannam@154: exp_rotation(X, N, -1, B, K, spread); cannam@154: } cannam@154: cannam@154: collapse_mask = extract_collapse_mask(iy, N, B); cannam@154: RESTORE_STACK; cannam@154: return collapse_mask; cannam@154: } cannam@154: cannam@154: /** Decode pulse vector and combine the result with the pitch vector to produce cannam@154: the final normalised signal in the current band. */ cannam@154: unsigned alg_unquant(celt_norm *X, int N, int K, int spread, int B, cannam@154: ec_dec *dec, opus_val16 gain) cannam@154: { cannam@154: opus_val32 Ryy; cannam@154: unsigned collapse_mask; cannam@154: VARDECL(int, iy); cannam@154: SAVE_STACK; cannam@154: cannam@154: celt_assert2(K>0, "alg_unquant() needs at least one pulse"); cannam@154: celt_assert2(N>1, "alg_unquant() needs at least two dimensions"); cannam@154: ALLOC(iy, N, int); cannam@154: Ryy = decode_pulses(iy, N, K, dec); cannam@154: normalise_residual(iy, X, N, Ryy, gain); cannam@154: exp_rotation(X, N, -1, B, K, spread); cannam@154: collapse_mask = extract_collapse_mask(iy, N, B); cannam@154: RESTORE_STACK; cannam@154: return collapse_mask; cannam@154: } cannam@154: cannam@154: #ifndef OVERRIDE_renormalise_vector cannam@154: void renormalise_vector(celt_norm *X, int N, opus_val16 gain, int arch) cannam@154: { cannam@154: int i; cannam@154: #ifdef FIXED_POINT cannam@154: int k; cannam@154: #endif cannam@154: opus_val32 E; cannam@154: opus_val16 g; cannam@154: opus_val32 t; cannam@154: celt_norm *xptr; cannam@154: E = EPSILON + celt_inner_prod(X, X, N, arch); cannam@154: #ifdef FIXED_POINT cannam@154: k = celt_ilog2(E)>>1; cannam@154: #endif cannam@154: t = VSHR32(E, 2*(k-7)); cannam@154: g = MULT16_16_P15(celt_rsqrt_norm(t),gain); cannam@154: cannam@154: xptr = X; cannam@154: for (i=0;i