annotate src/opus-1.3/celt/vq.c @ 79:91c729825bca pa_catalina

Update build for AUDIO_COMPONENT_FIX
author Chris Cannam
date Wed, 30 Oct 2019 12:40:34 +0000
parents 7aeed7906520
children
rev   line source
Chris@69 1 /* Copyright (c) 2007-2008 CSIRO
Chris@69 2 Copyright (c) 2007-2009 Xiph.Org Foundation
Chris@69 3 Written by Jean-Marc Valin */
Chris@69 4 /*
Chris@69 5 Redistribution and use in source and binary forms, with or without
Chris@69 6 modification, are permitted provided that the following conditions
Chris@69 7 are met:
Chris@69 8
Chris@69 9 - Redistributions of source code must retain the above copyright
Chris@69 10 notice, this list of conditions and the following disclaimer.
Chris@69 11
Chris@69 12 - Redistributions in binary form must reproduce the above copyright
Chris@69 13 notice, this list of conditions and the following disclaimer in the
Chris@69 14 documentation and/or other materials provided with the distribution.
Chris@69 15
Chris@69 16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
Chris@69 17 ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
Chris@69 18 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
Chris@69 19 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
Chris@69 20 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
Chris@69 21 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
Chris@69 22 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
Chris@69 23 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
Chris@69 24 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
Chris@69 25 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
Chris@69 26 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Chris@69 27 */
Chris@69 28
Chris@69 29 #ifdef HAVE_CONFIG_H
Chris@69 30 #include "config.h"
Chris@69 31 #endif
Chris@69 32
Chris@69 33 #include "mathops.h"
Chris@69 34 #include "cwrs.h"
Chris@69 35 #include "vq.h"
Chris@69 36 #include "arch.h"
Chris@69 37 #include "os_support.h"
Chris@69 38 #include "bands.h"
Chris@69 39 #include "rate.h"
Chris@69 40 #include "pitch.h"
Chris@69 41
Chris@69 42 #ifndef OVERRIDE_vq_exp_rotation1
Chris@69 43 static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_val16 s)
Chris@69 44 {
Chris@69 45 int i;
Chris@69 46 opus_val16 ms;
Chris@69 47 celt_norm *Xptr;
Chris@69 48 Xptr = X;
Chris@69 49 ms = NEG16(s);
Chris@69 50 for (i=0;i<len-stride;i++)
Chris@69 51 {
Chris@69 52 celt_norm x1, x2;
Chris@69 53 x1 = Xptr[0];
Chris@69 54 x2 = Xptr[stride];
Chris@69 55 Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2), s, x1), 15));
Chris@69 56 *Xptr++ = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15));
Chris@69 57 }
Chris@69 58 Xptr = &X[len-2*stride-1];
Chris@69 59 for (i=len-2*stride-1;i>=0;i--)
Chris@69 60 {
Chris@69 61 celt_norm x1, x2;
Chris@69 62 x1 = Xptr[0];
Chris@69 63 x2 = Xptr[stride];
Chris@69 64 Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2), s, x1), 15));
Chris@69 65 *Xptr-- = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15));
Chris@69 66 }
Chris@69 67 }
Chris@69 68 #endif /* OVERRIDE_vq_exp_rotation1 */
Chris@69 69
Chris@69 70 void exp_rotation(celt_norm *X, int len, int dir, int stride, int K, int spread)
Chris@69 71 {
Chris@69 72 static const int SPREAD_FACTOR[3]={15,10,5};
Chris@69 73 int i;
Chris@69 74 opus_val16 c, s;
Chris@69 75 opus_val16 gain, theta;
Chris@69 76 int stride2=0;
Chris@69 77 int factor;
Chris@69 78
Chris@69 79 if (2*K>=len || spread==SPREAD_NONE)
Chris@69 80 return;
Chris@69 81 factor = SPREAD_FACTOR[spread-1];
Chris@69 82
Chris@69 83 gain = celt_div((opus_val32)MULT16_16(Q15_ONE,len),(opus_val32)(len+factor*K));
Chris@69 84 theta = HALF16(MULT16_16_Q15(gain,gain));
Chris@69 85
Chris@69 86 c = celt_cos_norm(EXTEND32(theta));
Chris@69 87 s = celt_cos_norm(EXTEND32(SUB16(Q15ONE,theta))); /* sin(theta) */
Chris@69 88
Chris@69 89 if (len>=8*stride)
Chris@69 90 {
Chris@69 91 stride2 = 1;
Chris@69 92 /* This is just a simple (equivalent) way of computing sqrt(len/stride) with rounding.
Chris@69 93 It's basically incrementing long as (stride2+0.5)^2 < len/stride. */
Chris@69 94 while ((stride2*stride2+stride2)*stride + (stride>>2) < len)
Chris@69 95 stride2++;
Chris@69 96 }
Chris@69 97 /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for
Chris@69 98 extract_collapse_mask().*/
Chris@69 99 len = celt_udiv(len, stride);
Chris@69 100 for (i=0;i<stride;i++)
Chris@69 101 {
Chris@69 102 if (dir < 0)
Chris@69 103 {
Chris@69 104 if (stride2)
Chris@69 105 exp_rotation1(X+i*len, len, stride2, s, c);
Chris@69 106 exp_rotation1(X+i*len, len, 1, c, s);
Chris@69 107 } else {
Chris@69 108 exp_rotation1(X+i*len, len, 1, c, -s);
Chris@69 109 if (stride2)
Chris@69 110 exp_rotation1(X+i*len, len, stride2, s, -c);
Chris@69 111 }
Chris@69 112 }
Chris@69 113 }
Chris@69 114
Chris@69 115 /** Takes the pitch vector and the decoded residual vector, computes the gain
Chris@69 116 that will give ||p+g*y||=1 and mixes the residual with the pitch. */
Chris@69 117 static void normalise_residual(int * OPUS_RESTRICT iy, celt_norm * OPUS_RESTRICT X,
Chris@69 118 int N, opus_val32 Ryy, opus_val16 gain)
Chris@69 119 {
Chris@69 120 int i;
Chris@69 121 #ifdef FIXED_POINT
Chris@69 122 int k;
Chris@69 123 #endif
Chris@69 124 opus_val32 t;
Chris@69 125 opus_val16 g;
Chris@69 126
Chris@69 127 #ifdef FIXED_POINT
Chris@69 128 k = celt_ilog2(Ryy)>>1;
Chris@69 129 #endif
Chris@69 130 t = VSHR32(Ryy, 2*(k-7));
Chris@69 131 g = MULT16_16_P15(celt_rsqrt_norm(t),gain);
Chris@69 132
Chris@69 133 i=0;
Chris@69 134 do
Chris@69 135 X[i] = EXTRACT16(PSHR32(MULT16_16(g, iy[i]), k+1));
Chris@69 136 while (++i < N);
Chris@69 137 }
Chris@69 138
Chris@69 139 static unsigned extract_collapse_mask(int *iy, int N, int B)
Chris@69 140 {
Chris@69 141 unsigned collapse_mask;
Chris@69 142 int N0;
Chris@69 143 int i;
Chris@69 144 if (B<=1)
Chris@69 145 return 1;
Chris@69 146 /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for
Chris@69 147 exp_rotation().*/
Chris@69 148 N0 = celt_udiv(N, B);
Chris@69 149 collapse_mask = 0;
Chris@69 150 i=0; do {
Chris@69 151 int j;
Chris@69 152 unsigned tmp=0;
Chris@69 153 j=0; do {
Chris@69 154 tmp |= iy[i*N0+j];
Chris@69 155 } while (++j<N0);
Chris@69 156 collapse_mask |= (tmp!=0)<<i;
Chris@69 157 } while (++i<B);
Chris@69 158 return collapse_mask;
Chris@69 159 }
Chris@69 160
Chris@69 161 opus_val16 op_pvq_search_c(celt_norm *X, int *iy, int K, int N, int arch)
Chris@69 162 {
Chris@69 163 VARDECL(celt_norm, y);
Chris@69 164 VARDECL(int, signx);
Chris@69 165 int i, j;
Chris@69 166 int pulsesLeft;
Chris@69 167 opus_val32 sum;
Chris@69 168 opus_val32 xy;
Chris@69 169 opus_val16 yy;
Chris@69 170 SAVE_STACK;
Chris@69 171
Chris@69 172 (void)arch;
Chris@69 173 ALLOC(y, N, celt_norm);
Chris@69 174 ALLOC(signx, N, int);
Chris@69 175
Chris@69 176 /* Get rid of the sign */
Chris@69 177 sum = 0;
Chris@69 178 j=0; do {
Chris@69 179 signx[j] = X[j]<0;
Chris@69 180 /* OPT: Make sure the compiler doesn't use a branch on ABS16(). */
Chris@69 181 X[j] = ABS16(X[j]);
Chris@69 182 iy[j] = 0;
Chris@69 183 y[j] = 0;
Chris@69 184 } while (++j<N);
Chris@69 185
Chris@69 186 xy = yy = 0;
Chris@69 187
Chris@69 188 pulsesLeft = K;
Chris@69 189
Chris@69 190 /* Do a pre-search by projecting on the pyramid */
Chris@69 191 if (K > (N>>1))
Chris@69 192 {
Chris@69 193 opus_val16 rcp;
Chris@69 194 j=0; do {
Chris@69 195 sum += X[j];
Chris@69 196 } while (++j<N);
Chris@69 197
Chris@69 198 /* If X is too small, just replace it with a pulse at 0 */
Chris@69 199 #ifdef FIXED_POINT
Chris@69 200 if (sum <= K)
Chris@69 201 #else
Chris@69 202 /* Prevents infinities and NaNs from causing too many pulses
Chris@69 203 to be allocated. 64 is an approximation of infinity here. */
Chris@69 204 if (!(sum > EPSILON && sum < 64))
Chris@69 205 #endif
Chris@69 206 {
Chris@69 207 X[0] = QCONST16(1.f,14);
Chris@69 208 j=1; do
Chris@69 209 X[j]=0;
Chris@69 210 while (++j<N);
Chris@69 211 sum = QCONST16(1.f,14);
Chris@69 212 }
Chris@69 213 #ifdef FIXED_POINT
Chris@69 214 rcp = EXTRACT16(MULT16_32_Q16(K, celt_rcp(sum)));
Chris@69 215 #else
Chris@69 216 /* Using K+e with e < 1 guarantees we cannot get more than K pulses. */
Chris@69 217 rcp = EXTRACT16(MULT16_32_Q16(K+0.8f, celt_rcp(sum)));
Chris@69 218 #endif
Chris@69 219 j=0; do {
Chris@69 220 #ifdef FIXED_POINT
Chris@69 221 /* It's really important to round *towards zero* here */
Chris@69 222 iy[j] = MULT16_16_Q15(X[j],rcp);
Chris@69 223 #else
Chris@69 224 iy[j] = (int)floor(rcp*X[j]);
Chris@69 225 #endif
Chris@69 226 y[j] = (celt_norm)iy[j];
Chris@69 227 yy = MAC16_16(yy, y[j],y[j]);
Chris@69 228 xy = MAC16_16(xy, X[j],y[j]);
Chris@69 229 y[j] *= 2;
Chris@69 230 pulsesLeft -= iy[j];
Chris@69 231 } while (++j<N);
Chris@69 232 }
Chris@69 233 celt_sig_assert(pulsesLeft>=0);
Chris@69 234
Chris@69 235 /* This should never happen, but just in case it does (e.g. on silence)
Chris@69 236 we fill the first bin with pulses. */
Chris@69 237 #ifdef FIXED_POINT_DEBUG
Chris@69 238 celt_sig_assert(pulsesLeft<=N+3);
Chris@69 239 #endif
Chris@69 240 if (pulsesLeft > N+3)
Chris@69 241 {
Chris@69 242 opus_val16 tmp = (opus_val16)pulsesLeft;
Chris@69 243 yy = MAC16_16(yy, tmp, tmp);
Chris@69 244 yy = MAC16_16(yy, tmp, y[0]);
Chris@69 245 iy[0] += pulsesLeft;
Chris@69 246 pulsesLeft=0;
Chris@69 247 }
Chris@69 248
Chris@69 249 for (i=0;i<pulsesLeft;i++)
Chris@69 250 {
Chris@69 251 opus_val16 Rxy, Ryy;
Chris@69 252 int best_id;
Chris@69 253 opus_val32 best_num;
Chris@69 254 opus_val16 best_den;
Chris@69 255 #ifdef FIXED_POINT
Chris@69 256 int rshift;
Chris@69 257 #endif
Chris@69 258 #ifdef FIXED_POINT
Chris@69 259 rshift = 1+celt_ilog2(K-pulsesLeft+i+1);
Chris@69 260 #endif
Chris@69 261 best_id = 0;
Chris@69 262 /* The squared magnitude term gets added anyway, so we might as well
Chris@69 263 add it outside the loop */
Chris@69 264 yy = ADD16(yy, 1);
Chris@69 265
Chris@69 266 /* Calculations for position 0 are out of the loop, in part to reduce
Chris@69 267 mispredicted branches (since the if condition is usually false)
Chris@69 268 in the loop. */
Chris@69 269 /* Temporary sums of the new pulse(s) */
Chris@69 270 Rxy = EXTRACT16(SHR32(ADD32(xy, EXTEND32(X[0])),rshift));
Chris@69 271 /* We're multiplying y[j] by two so we don't have to do it here */
Chris@69 272 Ryy = ADD16(yy, y[0]);
Chris@69 273
Chris@69 274 /* Approximate score: we maximise Rxy/sqrt(Ryy) (we're guaranteed that
Chris@69 275 Rxy is positive because the sign is pre-computed) */
Chris@69 276 Rxy = MULT16_16_Q15(Rxy,Rxy);
Chris@69 277 best_den = Ryy;
Chris@69 278 best_num = Rxy;
Chris@69 279 j=1;
Chris@69 280 do {
Chris@69 281 /* Temporary sums of the new pulse(s) */
Chris@69 282 Rxy = EXTRACT16(SHR32(ADD32(xy, EXTEND32(X[j])),rshift));
Chris@69 283 /* We're multiplying y[j] by two so we don't have to do it here */
Chris@69 284 Ryy = ADD16(yy, y[j]);
Chris@69 285
Chris@69 286 /* Approximate score: we maximise Rxy/sqrt(Ryy) (we're guaranteed that
Chris@69 287 Rxy is positive because the sign is pre-computed) */
Chris@69 288 Rxy = MULT16_16_Q15(Rxy,Rxy);
Chris@69 289 /* The idea is to check for num/den >= best_num/best_den, but that way
Chris@69 290 we can do it without any division */
Chris@69 291 /* OPT: It's not clear whether a cmov is faster than a branch here
Chris@69 292 since the condition is more often false than true and using
Chris@69 293 a cmov introduces data dependencies across iterations. The optimal
Chris@69 294 choice may be architecture-dependent. */
Chris@69 295 if (opus_unlikely(MULT16_16(best_den, Rxy) > MULT16_16(Ryy, best_num)))
Chris@69 296 {
Chris@69 297 best_den = Ryy;
Chris@69 298 best_num = Rxy;
Chris@69 299 best_id = j;
Chris@69 300 }
Chris@69 301 } while (++j<N);
Chris@69 302
Chris@69 303 /* Updating the sums of the new pulse(s) */
Chris@69 304 xy = ADD32(xy, EXTEND32(X[best_id]));
Chris@69 305 /* We're multiplying y[j] by two so we don't have to do it here */
Chris@69 306 yy = ADD16(yy, y[best_id]);
Chris@69 307
Chris@69 308 /* Only now that we've made the final choice, update y/iy */
Chris@69 309 /* Multiplying y[j] by 2 so we don't have to do it everywhere else */
Chris@69 310 y[best_id] += 2;
Chris@69 311 iy[best_id]++;
Chris@69 312 }
Chris@69 313
Chris@69 314 /* Put the original sign back */
Chris@69 315 j=0;
Chris@69 316 do {
Chris@69 317 /*iy[j] = signx[j] ? -iy[j] : iy[j];*/
Chris@69 318 /* OPT: The is more likely to be compiled without a branch than the code above
Chris@69 319 but has the same performance otherwise. */
Chris@69 320 iy[j] = (iy[j]^-signx[j]) + signx[j];
Chris@69 321 } while (++j<N);
Chris@69 322 RESTORE_STACK;
Chris@69 323 return yy;
Chris@69 324 }
Chris@69 325
Chris@69 326 unsigned alg_quant(celt_norm *X, int N, int K, int spread, int B, ec_enc *enc,
Chris@69 327 opus_val16 gain, int resynth, int arch)
Chris@69 328 {
Chris@69 329 VARDECL(int, iy);
Chris@69 330 opus_val16 yy;
Chris@69 331 unsigned collapse_mask;
Chris@69 332 SAVE_STACK;
Chris@69 333
Chris@69 334 celt_assert2(K>0, "alg_quant() needs at least one pulse");
Chris@69 335 celt_assert2(N>1, "alg_quant() needs at least two dimensions");
Chris@69 336
Chris@69 337 /* Covers vectorization by up to 4. */
Chris@69 338 ALLOC(iy, N+3, int);
Chris@69 339
Chris@69 340 exp_rotation(X, N, 1, B, K, spread);
Chris@69 341
Chris@69 342 yy = op_pvq_search(X, iy, K, N, arch);
Chris@69 343
Chris@69 344 encode_pulses(iy, N, K, enc);
Chris@69 345
Chris@69 346 if (resynth)
Chris@69 347 {
Chris@69 348 normalise_residual(iy, X, N, yy, gain);
Chris@69 349 exp_rotation(X, N, -1, B, K, spread);
Chris@69 350 }
Chris@69 351
Chris@69 352 collapse_mask = extract_collapse_mask(iy, N, B);
Chris@69 353 RESTORE_STACK;
Chris@69 354 return collapse_mask;
Chris@69 355 }
Chris@69 356
Chris@69 357 /** Decode pulse vector and combine the result with the pitch vector to produce
Chris@69 358 the final normalised signal in the current band. */
Chris@69 359 unsigned alg_unquant(celt_norm *X, int N, int K, int spread, int B,
Chris@69 360 ec_dec *dec, opus_val16 gain)
Chris@69 361 {
Chris@69 362 opus_val32 Ryy;
Chris@69 363 unsigned collapse_mask;
Chris@69 364 VARDECL(int, iy);
Chris@69 365 SAVE_STACK;
Chris@69 366
Chris@69 367 celt_assert2(K>0, "alg_unquant() needs at least one pulse");
Chris@69 368 celt_assert2(N>1, "alg_unquant() needs at least two dimensions");
Chris@69 369 ALLOC(iy, N, int);
Chris@69 370 Ryy = decode_pulses(iy, N, K, dec);
Chris@69 371 normalise_residual(iy, X, N, Ryy, gain);
Chris@69 372 exp_rotation(X, N, -1, B, K, spread);
Chris@69 373 collapse_mask = extract_collapse_mask(iy, N, B);
Chris@69 374 RESTORE_STACK;
Chris@69 375 return collapse_mask;
Chris@69 376 }
Chris@69 377
Chris@69 378 #ifndef OVERRIDE_renormalise_vector
Chris@69 379 void renormalise_vector(celt_norm *X, int N, opus_val16 gain, int arch)
Chris@69 380 {
Chris@69 381 int i;
Chris@69 382 #ifdef FIXED_POINT
Chris@69 383 int k;
Chris@69 384 #endif
Chris@69 385 opus_val32 E;
Chris@69 386 opus_val16 g;
Chris@69 387 opus_val32 t;
Chris@69 388 celt_norm *xptr;
Chris@69 389 E = EPSILON + celt_inner_prod(X, X, N, arch);
Chris@69 390 #ifdef FIXED_POINT
Chris@69 391 k = celt_ilog2(E)>>1;
Chris@69 392 #endif
Chris@69 393 t = VSHR32(E, 2*(k-7));
Chris@69 394 g = MULT16_16_P15(celt_rsqrt_norm(t),gain);
Chris@69 395
Chris@69 396 xptr = X;
Chris@69 397 for (i=0;i<N;i++)
Chris@69 398 {
Chris@69 399 *xptr = EXTRACT16(PSHR32(MULT16_16(g, *xptr), k+1));
Chris@69 400 xptr++;
Chris@69 401 }
Chris@69 402 /*return celt_sqrt(E);*/
Chris@69 403 }
Chris@69 404 #endif /* OVERRIDE_renormalise_vector */
Chris@69 405
Chris@69 406 int stereo_itheta(const celt_norm *X, const celt_norm *Y, int stereo, int N, int arch)
Chris@69 407 {
Chris@69 408 int i;
Chris@69 409 int itheta;
Chris@69 410 opus_val16 mid, side;
Chris@69 411 opus_val32 Emid, Eside;
Chris@69 412
Chris@69 413 Emid = Eside = EPSILON;
Chris@69 414 if (stereo)
Chris@69 415 {
Chris@69 416 for (i=0;i<N;i++)
Chris@69 417 {
Chris@69 418 celt_norm m, s;
Chris@69 419 m = ADD16(SHR16(X[i],1),SHR16(Y[i],1));
Chris@69 420 s = SUB16(SHR16(X[i],1),SHR16(Y[i],1));
Chris@69 421 Emid = MAC16_16(Emid, m, m);
Chris@69 422 Eside = MAC16_16(Eside, s, s);
Chris@69 423 }
Chris@69 424 } else {
Chris@69 425 Emid += celt_inner_prod(X, X, N, arch);
Chris@69 426 Eside += celt_inner_prod(Y, Y, N, arch);
Chris@69 427 }
Chris@69 428 mid = celt_sqrt(Emid);
Chris@69 429 side = celt_sqrt(Eside);
Chris@69 430 #ifdef FIXED_POINT
Chris@69 431 /* 0.63662 = 2/pi */
Chris@69 432 itheta = MULT16_16_Q15(QCONST16(0.63662f,15),celt_atan2p(side, mid));
Chris@69 433 #else
Chris@69 434 itheta = (int)floor(.5f+16384*0.63662f*fast_atan2f(side,mid));
Chris@69 435 #endif
Chris@69 436
Chris@69 437 return itheta;
Chris@69 438 }