annotate ffmpeg/libavcodec/amrwbdec.c @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * AMR wideband decoder
yading@10 3 * Copyright (c) 2010 Marcelo Galvao Povoa
yading@10 4 *
yading@10 5 * This file is part of FFmpeg.
yading@10 6 *
yading@10 7 * FFmpeg is free software; you can redistribute it and/or
yading@10 8 * modify it under the terms of the GNU Lesser General Public
yading@10 9 * License as published by the Free Software Foundation; either
yading@10 10 * version 2.1 of the License, or (at your option) any later version.
yading@10 11 *
yading@10 12 * FFmpeg is distributed in the hope that it will be useful,
yading@10 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 14 * MERCHANTABILITY or FITNESS FOR A particular PURPOSE. See the GNU
yading@10 15 * Lesser General Public License for more details.
yading@10 16 *
yading@10 17 * You should have received a copy of the GNU Lesser General Public
yading@10 18 * License along with FFmpeg; if not, write to the Free Software
yading@10 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 20 */
yading@10 21
yading@10 22 /**
yading@10 23 * @file
yading@10 24 * AMR wideband decoder
yading@10 25 */
yading@10 26
yading@10 27 #include "libavutil/channel_layout.h"
yading@10 28 #include "libavutil/common.h"
yading@10 29 #include "libavutil/float_dsp.h"
yading@10 30 #include "libavutil/lfg.h"
yading@10 31
yading@10 32 #include "avcodec.h"
yading@10 33 #include "lsp.h"
yading@10 34 #include "celp_filters.h"
yading@10 35 #include "celp_math.h"
yading@10 36 #include "acelp_filters.h"
yading@10 37 #include "acelp_vectors.h"
yading@10 38 #include "acelp_pitch_delay.h"
yading@10 39 #include "internal.h"
yading@10 40
yading@10 41 #define AMR_USE_16BIT_TABLES
yading@10 42 #include "amr.h"
yading@10 43
yading@10 44 #include "amrwbdata.h"
yading@10 45 #include "mips/amrwbdec_mips.h"
yading@10 46
yading@10 47 typedef struct {
yading@10 48 AMRWBFrame frame; ///< AMRWB parameters decoded from bitstream
yading@10 49 enum Mode fr_cur_mode; ///< mode index of current frame
yading@10 50 uint8_t fr_quality; ///< frame quality index (FQI)
yading@10 51 float isf_cur[LP_ORDER]; ///< working ISF vector from current frame
yading@10 52 float isf_q_past[LP_ORDER]; ///< quantized ISF vector of the previous frame
yading@10 53 float isf_past_final[LP_ORDER]; ///< final processed ISF vector of the previous frame
yading@10 54 double isp[4][LP_ORDER]; ///< ISP vectors from current frame
yading@10 55 double isp_sub4_past[LP_ORDER]; ///< ISP vector for the 4th subframe of the previous frame
yading@10 56
yading@10 57 float lp_coef[4][LP_ORDER]; ///< Linear Prediction Coefficients from ISP vector
yading@10 58
yading@10 59 uint8_t base_pitch_lag; ///< integer part of pitch lag for the next relative subframe
yading@10 60 uint8_t pitch_lag_int; ///< integer part of pitch lag of the previous subframe
yading@10 61
yading@10 62 float excitation_buf[AMRWB_P_DELAY_MAX + LP_ORDER + 2 + AMRWB_SFR_SIZE]; ///< current excitation and all necessary excitation history
yading@10 63 float *excitation; ///< points to current excitation in excitation_buf[]
yading@10 64
yading@10 65 float pitch_vector[AMRWB_SFR_SIZE]; ///< adaptive codebook (pitch) vector for current subframe
yading@10 66 float fixed_vector[AMRWB_SFR_SIZE]; ///< algebraic codebook (fixed) vector for current subframe
yading@10 67
yading@10 68 float prediction_error[4]; ///< quantified prediction errors {20log10(^gamma_gc)} for previous four subframes
yading@10 69 float pitch_gain[6]; ///< quantified pitch gains for the current and previous five subframes
yading@10 70 float fixed_gain[2]; ///< quantified fixed gains for the current and previous subframes
yading@10 71
yading@10 72 float tilt_coef; ///< {beta_1} related to the voicing of the previous subframe
yading@10 73
yading@10 74 float prev_sparse_fixed_gain; ///< previous fixed gain; used by anti-sparseness to determine "onset"
yading@10 75 uint8_t prev_ir_filter_nr; ///< previous impulse response filter "impNr": 0 - strong, 1 - medium, 2 - none
yading@10 76 float prev_tr_gain; ///< previous initial gain used by noise enhancer for threshold
yading@10 77
yading@10 78 float samples_az[LP_ORDER + AMRWB_SFR_SIZE]; ///< low-band samples and memory from synthesis at 12.8kHz
yading@10 79 float samples_up[UPS_MEM_SIZE + AMRWB_SFR_SIZE]; ///< low-band samples and memory processed for upsampling
yading@10 80 float samples_hb[LP_ORDER_16k + AMRWB_SFR_SIZE_16k]; ///< high-band samples and memory from synthesis at 16kHz
yading@10 81
yading@10 82 float hpf_31_mem[2], hpf_400_mem[2]; ///< previous values in the high pass filters
yading@10 83 float demph_mem[1]; ///< previous value in the de-emphasis filter
yading@10 84 float bpf_6_7_mem[HB_FIR_SIZE]; ///< previous values in the high-band band pass filter
yading@10 85 float lpf_7_mem[HB_FIR_SIZE]; ///< previous values in the high-band low pass filter
yading@10 86
yading@10 87 AVLFG prng; ///< random number generator for white noise excitation
yading@10 88 uint8_t first_frame; ///< flag active during decoding of the first frame
yading@10 89 ACELPFContext acelpf_ctx; ///< context for filters for ACELP-based codecs
yading@10 90 ACELPVContext acelpv_ctx; ///< context for vector operations for ACELP-based codecs
yading@10 91 CELPFContext celpf_ctx; ///< context for filters for CELP-based codecs
yading@10 92 CELPMContext celpm_ctx; ///< context for fixed point math operations
yading@10 93
yading@10 94 } AMRWBContext;
yading@10 95
yading@10 96 static av_cold int amrwb_decode_init(AVCodecContext *avctx)
yading@10 97 {
yading@10 98 AMRWBContext *ctx = avctx->priv_data;
yading@10 99 int i;
yading@10 100
yading@10 101 if (avctx->channels > 1) {
yading@10 102 avpriv_report_missing_feature(avctx, "multi-channel AMR");
yading@10 103 return AVERROR_PATCHWELCOME;
yading@10 104 }
yading@10 105
yading@10 106 avctx->channels = 1;
yading@10 107 avctx->channel_layout = AV_CH_LAYOUT_MONO;
yading@10 108 if (!avctx->sample_rate)
yading@10 109 avctx->sample_rate = 16000;
yading@10 110 avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
yading@10 111
yading@10 112 av_lfg_init(&ctx->prng, 1);
yading@10 113
yading@10 114 ctx->excitation = &ctx->excitation_buf[AMRWB_P_DELAY_MAX + LP_ORDER + 1];
yading@10 115 ctx->first_frame = 1;
yading@10 116
yading@10 117 for (i = 0; i < LP_ORDER; i++)
yading@10 118 ctx->isf_past_final[i] = isf_init[i] * (1.0f / (1 << 15));
yading@10 119
yading@10 120 for (i = 0; i < 4; i++)
yading@10 121 ctx->prediction_error[i] = MIN_ENERGY;
yading@10 122
yading@10 123 ff_acelp_filter_init(&ctx->acelpf_ctx);
yading@10 124 ff_acelp_vectors_init(&ctx->acelpv_ctx);
yading@10 125 ff_celp_filter_init(&ctx->celpf_ctx);
yading@10 126 ff_celp_math_init(&ctx->celpm_ctx);
yading@10 127
yading@10 128 return 0;
yading@10 129 }
yading@10 130
yading@10 131 /**
yading@10 132 * Decode the frame header in the "MIME/storage" format. This format
yading@10 133 * is simpler and does not carry the auxiliary frame information.
yading@10 134 *
yading@10 135 * @param[in] ctx The Context
yading@10 136 * @param[in] buf Pointer to the input buffer
yading@10 137 *
yading@10 138 * @return The decoded header length in bytes
yading@10 139 */
yading@10 140 static int decode_mime_header(AMRWBContext *ctx, const uint8_t *buf)
yading@10 141 {
yading@10 142 /* Decode frame header (1st octet) */
yading@10 143 ctx->fr_cur_mode = buf[0] >> 3 & 0x0F;
yading@10 144 ctx->fr_quality = (buf[0] & 0x4) == 0x4;
yading@10 145
yading@10 146 return 1;
yading@10 147 }
yading@10 148
yading@10 149 /**
yading@10 150 * Decode quantized ISF vectors using 36-bit indexes (6K60 mode only).
yading@10 151 *
yading@10 152 * @param[in] ind Array of 5 indexes
yading@10 153 * @param[out] isf_q Buffer for isf_q[LP_ORDER]
yading@10 154 *
yading@10 155 */
yading@10 156 static void decode_isf_indices_36b(uint16_t *ind, float *isf_q)
yading@10 157 {
yading@10 158 int i;
yading@10 159
yading@10 160 for (i = 0; i < 9; i++)
yading@10 161 isf_q[i] = dico1_isf[ind[0]][i] * (1.0f / (1 << 15));
yading@10 162
yading@10 163 for (i = 0; i < 7; i++)
yading@10 164 isf_q[i + 9] = dico2_isf[ind[1]][i] * (1.0f / (1 << 15));
yading@10 165
yading@10 166 for (i = 0; i < 5; i++)
yading@10 167 isf_q[i] += dico21_isf_36b[ind[2]][i] * (1.0f / (1 << 15));
yading@10 168
yading@10 169 for (i = 0; i < 4; i++)
yading@10 170 isf_q[i + 5] += dico22_isf_36b[ind[3]][i] * (1.0f / (1 << 15));
yading@10 171
yading@10 172 for (i = 0; i < 7; i++)
yading@10 173 isf_q[i + 9] += dico23_isf_36b[ind[4]][i] * (1.0f / (1 << 15));
yading@10 174 }
yading@10 175
yading@10 176 /**
yading@10 177 * Decode quantized ISF vectors using 46-bit indexes (except 6K60 mode).
yading@10 178 *
yading@10 179 * @param[in] ind Array of 7 indexes
yading@10 180 * @param[out] isf_q Buffer for isf_q[LP_ORDER]
yading@10 181 *
yading@10 182 */
yading@10 183 static void decode_isf_indices_46b(uint16_t *ind, float *isf_q)
yading@10 184 {
yading@10 185 int i;
yading@10 186
yading@10 187 for (i = 0; i < 9; i++)
yading@10 188 isf_q[i] = dico1_isf[ind[0]][i] * (1.0f / (1 << 15));
yading@10 189
yading@10 190 for (i = 0; i < 7; i++)
yading@10 191 isf_q[i + 9] = dico2_isf[ind[1]][i] * (1.0f / (1 << 15));
yading@10 192
yading@10 193 for (i = 0; i < 3; i++)
yading@10 194 isf_q[i] += dico21_isf[ind[2]][i] * (1.0f / (1 << 15));
yading@10 195
yading@10 196 for (i = 0; i < 3; i++)
yading@10 197 isf_q[i + 3] += dico22_isf[ind[3]][i] * (1.0f / (1 << 15));
yading@10 198
yading@10 199 for (i = 0; i < 3; i++)
yading@10 200 isf_q[i + 6] += dico23_isf[ind[4]][i] * (1.0f / (1 << 15));
yading@10 201
yading@10 202 for (i = 0; i < 3; i++)
yading@10 203 isf_q[i + 9] += dico24_isf[ind[5]][i] * (1.0f / (1 << 15));
yading@10 204
yading@10 205 for (i = 0; i < 4; i++)
yading@10 206 isf_q[i + 12] += dico25_isf[ind[6]][i] * (1.0f / (1 << 15));
yading@10 207 }
yading@10 208
yading@10 209 /**
yading@10 210 * Apply mean and past ISF values using the prediction factor.
yading@10 211 * Updates past ISF vector.
yading@10 212 *
yading@10 213 * @param[in,out] isf_q Current quantized ISF
yading@10 214 * @param[in,out] isf_past Past quantized ISF
yading@10 215 *
yading@10 216 */
yading@10 217 static void isf_add_mean_and_past(float *isf_q, float *isf_past)
yading@10 218 {
yading@10 219 int i;
yading@10 220 float tmp;
yading@10 221
yading@10 222 for (i = 0; i < LP_ORDER; i++) {
yading@10 223 tmp = isf_q[i];
yading@10 224 isf_q[i] += isf_mean[i] * (1.0f / (1 << 15));
yading@10 225 isf_q[i] += PRED_FACTOR * isf_past[i];
yading@10 226 isf_past[i] = tmp;
yading@10 227 }
yading@10 228 }
yading@10 229
yading@10 230 /**
yading@10 231 * Interpolate the fourth ISP vector from current and past frames
yading@10 232 * to obtain an ISP vector for each subframe.
yading@10 233 *
yading@10 234 * @param[in,out] isp_q ISPs for each subframe
yading@10 235 * @param[in] isp4_past Past ISP for subframe 4
yading@10 236 */
yading@10 237 static void interpolate_isp(double isp_q[4][LP_ORDER], const double *isp4_past)
yading@10 238 {
yading@10 239 int i, k;
yading@10 240
yading@10 241 for (k = 0; k < 3; k++) {
yading@10 242 float c = isfp_inter[k];
yading@10 243 for (i = 0; i < LP_ORDER; i++)
yading@10 244 isp_q[k][i] = (1.0 - c) * isp4_past[i] + c * isp_q[3][i];
yading@10 245 }
yading@10 246 }
yading@10 247
yading@10 248 /**
yading@10 249 * Decode an adaptive codebook index into pitch lag (except 6k60, 8k85 modes).
yading@10 250 * Calculate integer lag and fractional lag always using 1/4 resolution.
yading@10 251 * In 1st and 3rd subframes the index is relative to last subframe integer lag.
yading@10 252 *
yading@10 253 * @param[out] lag_int Decoded integer pitch lag
yading@10 254 * @param[out] lag_frac Decoded fractional pitch lag
yading@10 255 * @param[in] pitch_index Adaptive codebook pitch index
yading@10 256 * @param[in,out] base_lag_int Base integer lag used in relative subframes
yading@10 257 * @param[in] subframe Current subframe index (0 to 3)
yading@10 258 */
yading@10 259 static void decode_pitch_lag_high(int *lag_int, int *lag_frac, int pitch_index,
yading@10 260 uint8_t *base_lag_int, int subframe)
yading@10 261 {
yading@10 262 if (subframe == 0 || subframe == 2) {
yading@10 263 if (pitch_index < 376) {
yading@10 264 *lag_int = (pitch_index + 137) >> 2;
yading@10 265 *lag_frac = pitch_index - (*lag_int << 2) + 136;
yading@10 266 } else if (pitch_index < 440) {
yading@10 267 *lag_int = (pitch_index + 257 - 376) >> 1;
yading@10 268 *lag_frac = (pitch_index - (*lag_int << 1) + 256 - 376) << 1;
yading@10 269 /* the actual resolution is 1/2 but expressed as 1/4 */
yading@10 270 } else {
yading@10 271 *lag_int = pitch_index - 280;
yading@10 272 *lag_frac = 0;
yading@10 273 }
yading@10 274 /* minimum lag for next subframe */
yading@10 275 *base_lag_int = av_clip(*lag_int - 8 - (*lag_frac < 0),
yading@10 276 AMRWB_P_DELAY_MIN, AMRWB_P_DELAY_MAX - 15);
yading@10 277 // XXX: the spec states clearly that *base_lag_int should be
yading@10 278 // the nearest integer to *lag_int (minus 8), but the ref code
yading@10 279 // actually always uses its floor, I'm following the latter
yading@10 280 } else {
yading@10 281 *lag_int = (pitch_index + 1) >> 2;
yading@10 282 *lag_frac = pitch_index - (*lag_int << 2);
yading@10 283 *lag_int += *base_lag_int;
yading@10 284 }
yading@10 285 }
yading@10 286
yading@10 287 /**
yading@10 288 * Decode an adaptive codebook index into pitch lag for 8k85 and 6k60 modes.
yading@10 289 * The description is analogous to decode_pitch_lag_high, but in 6k60 the
yading@10 290 * relative index is used for all subframes except the first.
yading@10 291 */
yading@10 292 static void decode_pitch_lag_low(int *lag_int, int *lag_frac, int pitch_index,
yading@10 293 uint8_t *base_lag_int, int subframe, enum Mode mode)
yading@10 294 {
yading@10 295 if (subframe == 0 || (subframe == 2 && mode != MODE_6k60)) {
yading@10 296 if (pitch_index < 116) {
yading@10 297 *lag_int = (pitch_index + 69) >> 1;
yading@10 298 *lag_frac = (pitch_index - (*lag_int << 1) + 68) << 1;
yading@10 299 } else {
yading@10 300 *lag_int = pitch_index - 24;
yading@10 301 *lag_frac = 0;
yading@10 302 }
yading@10 303 // XXX: same problem as before
yading@10 304 *base_lag_int = av_clip(*lag_int - 8 - (*lag_frac < 0),
yading@10 305 AMRWB_P_DELAY_MIN, AMRWB_P_DELAY_MAX - 15);
yading@10 306 } else {
yading@10 307 *lag_int = (pitch_index + 1) >> 1;
yading@10 308 *lag_frac = (pitch_index - (*lag_int << 1)) << 1;
yading@10 309 *lag_int += *base_lag_int;
yading@10 310 }
yading@10 311 }
yading@10 312
yading@10 313 /**
yading@10 314 * Find the pitch vector by interpolating the past excitation at the
yading@10 315 * pitch delay, which is obtained in this function.
yading@10 316 *
yading@10 317 * @param[in,out] ctx The context
yading@10 318 * @param[in] amr_subframe Current subframe data
yading@10 319 * @param[in] subframe Current subframe index (0 to 3)
yading@10 320 */
yading@10 321 static void decode_pitch_vector(AMRWBContext *ctx,
yading@10 322 const AMRWBSubFrame *amr_subframe,
yading@10 323 const int subframe)
yading@10 324 {
yading@10 325 int pitch_lag_int, pitch_lag_frac;
yading@10 326 int i;
yading@10 327 float *exc = ctx->excitation;
yading@10 328 enum Mode mode = ctx->fr_cur_mode;
yading@10 329
yading@10 330 if (mode <= MODE_8k85) {
yading@10 331 decode_pitch_lag_low(&pitch_lag_int, &pitch_lag_frac, amr_subframe->adap,
yading@10 332 &ctx->base_pitch_lag, subframe, mode);
yading@10 333 } else
yading@10 334 decode_pitch_lag_high(&pitch_lag_int, &pitch_lag_frac, amr_subframe->adap,
yading@10 335 &ctx->base_pitch_lag, subframe);
yading@10 336
yading@10 337 ctx->pitch_lag_int = pitch_lag_int;
yading@10 338 pitch_lag_int += pitch_lag_frac > 0;
yading@10 339
yading@10 340 /* Calculate the pitch vector by interpolating the past excitation at the
yading@10 341 pitch lag using a hamming windowed sinc function */
yading@10 342 ctx->acelpf_ctx.acelp_interpolatef(exc,
yading@10 343 exc + 1 - pitch_lag_int,
yading@10 344 ac_inter, 4,
yading@10 345 pitch_lag_frac + (pitch_lag_frac > 0 ? 0 : 4),
yading@10 346 LP_ORDER, AMRWB_SFR_SIZE + 1);
yading@10 347
yading@10 348 /* Check which pitch signal path should be used
yading@10 349 * 6k60 and 8k85 modes have the ltp flag set to 0 */
yading@10 350 if (amr_subframe->ltp) {
yading@10 351 memcpy(ctx->pitch_vector, exc, AMRWB_SFR_SIZE * sizeof(float));
yading@10 352 } else {
yading@10 353 for (i = 0; i < AMRWB_SFR_SIZE; i++)
yading@10 354 ctx->pitch_vector[i] = 0.18 * exc[i - 1] + 0.64 * exc[i] +
yading@10 355 0.18 * exc[i + 1];
yading@10 356 memcpy(exc, ctx->pitch_vector, AMRWB_SFR_SIZE * sizeof(float));
yading@10 357 }
yading@10 358 }
yading@10 359
yading@10 360 /** Get x bits in the index interval [lsb,lsb+len-1] inclusive */
yading@10 361 #define BIT_STR(x,lsb,len) (((x) >> (lsb)) & ((1 << (len)) - 1))
yading@10 362
yading@10 363 /** Get the bit at specified position */
yading@10 364 #define BIT_POS(x, p) (((x) >> (p)) & 1)
yading@10 365
yading@10 366 /**
yading@10 367 * The next six functions decode_[i]p_track decode exactly i pulses
yading@10 368 * positions and amplitudes (-1 or 1) in a subframe track using
yading@10 369 * an encoded pulse indexing (TS 26.190 section 5.8.2).
yading@10 370 *
yading@10 371 * The results are given in out[], in which a negative number means
yading@10 372 * amplitude -1 and vice versa (i.e., ampl(x) = x / abs(x) ).
yading@10 373 *
yading@10 374 * @param[out] out Output buffer (writes i elements)
yading@10 375 * @param[in] code Pulse index (no. of bits varies, see below)
yading@10 376 * @param[in] m (log2) Number of potential positions
yading@10 377 * @param[in] off Offset for decoded positions
yading@10 378 */
yading@10 379 static inline void decode_1p_track(int *out, int code, int m, int off)
yading@10 380 {
yading@10 381 int pos = BIT_STR(code, 0, m) + off; ///code: m+1 bits
yading@10 382
yading@10 383 out[0] = BIT_POS(code, m) ? -pos : pos;
yading@10 384 }
yading@10 385
yading@10 386 static inline void decode_2p_track(int *out, int code, int m, int off) ///code: 2m+1 bits
yading@10 387 {
yading@10 388 int pos0 = BIT_STR(code, m, m) + off;
yading@10 389 int pos1 = BIT_STR(code, 0, m) + off;
yading@10 390
yading@10 391 out[0] = BIT_POS(code, 2*m) ? -pos0 : pos0;
yading@10 392 out[1] = BIT_POS(code, 2*m) ? -pos1 : pos1;
yading@10 393 out[1] = pos0 > pos1 ? -out[1] : out[1];
yading@10 394 }
yading@10 395
yading@10 396 static void decode_3p_track(int *out, int code, int m, int off) ///code: 3m+1 bits
yading@10 397 {
yading@10 398 int half_2p = BIT_POS(code, 2*m - 1) << (m - 1);
yading@10 399
yading@10 400 decode_2p_track(out, BIT_STR(code, 0, 2*m - 1),
yading@10 401 m - 1, off + half_2p);
yading@10 402 decode_1p_track(out + 2, BIT_STR(code, 2*m, m + 1), m, off);
yading@10 403 }
yading@10 404
yading@10 405 static void decode_4p_track(int *out, int code, int m, int off) ///code: 4m bits
yading@10 406 {
yading@10 407 int half_4p, subhalf_2p;
yading@10 408 int b_offset = 1 << (m - 1);
yading@10 409
yading@10 410 switch (BIT_STR(code, 4*m - 2, 2)) { /* case ID (2 bits) */
yading@10 411 case 0: /* 0 pulses in A, 4 pulses in B or vice versa */
yading@10 412 half_4p = BIT_POS(code, 4*m - 3) << (m - 1); // which has 4 pulses
yading@10 413 subhalf_2p = BIT_POS(code, 2*m - 3) << (m - 2);
yading@10 414
yading@10 415 decode_2p_track(out, BIT_STR(code, 0, 2*m - 3),
yading@10 416 m - 2, off + half_4p + subhalf_2p);
yading@10 417 decode_2p_track(out + 2, BIT_STR(code, 2*m - 2, 2*m - 1),
yading@10 418 m - 1, off + half_4p);
yading@10 419 break;
yading@10 420 case 1: /* 1 pulse in A, 3 pulses in B */
yading@10 421 decode_1p_track(out, BIT_STR(code, 3*m - 2, m),
yading@10 422 m - 1, off);
yading@10 423 decode_3p_track(out + 1, BIT_STR(code, 0, 3*m - 2),
yading@10 424 m - 1, off + b_offset);
yading@10 425 break;
yading@10 426 case 2: /* 2 pulses in each half */
yading@10 427 decode_2p_track(out, BIT_STR(code, 2*m - 1, 2*m - 1),
yading@10 428 m - 1, off);
yading@10 429 decode_2p_track(out + 2, BIT_STR(code, 0, 2*m - 1),
yading@10 430 m - 1, off + b_offset);
yading@10 431 break;
yading@10 432 case 3: /* 3 pulses in A, 1 pulse in B */
yading@10 433 decode_3p_track(out, BIT_STR(code, m, 3*m - 2),
yading@10 434 m - 1, off);
yading@10 435 decode_1p_track(out + 3, BIT_STR(code, 0, m),
yading@10 436 m - 1, off + b_offset);
yading@10 437 break;
yading@10 438 }
yading@10 439 }
yading@10 440
yading@10 441 static void decode_5p_track(int *out, int code, int m, int off) ///code: 5m bits
yading@10 442 {
yading@10 443 int half_3p = BIT_POS(code, 5*m - 1) << (m - 1);
yading@10 444
yading@10 445 decode_3p_track(out, BIT_STR(code, 2*m + 1, 3*m - 2),
yading@10 446 m - 1, off + half_3p);
yading@10 447
yading@10 448 decode_2p_track(out + 3, BIT_STR(code, 0, 2*m + 1), m, off);
yading@10 449 }
yading@10 450
yading@10 451 static void decode_6p_track(int *out, int code, int m, int off) ///code: 6m-2 bits
yading@10 452 {
yading@10 453 int b_offset = 1 << (m - 1);
yading@10 454 /* which half has more pulses in cases 0 to 2 */
yading@10 455 int half_more = BIT_POS(code, 6*m - 5) << (m - 1);
yading@10 456 int half_other = b_offset - half_more;
yading@10 457
yading@10 458 switch (BIT_STR(code, 6*m - 4, 2)) { /* case ID (2 bits) */
yading@10 459 case 0: /* 0 pulses in A, 6 pulses in B or vice versa */
yading@10 460 decode_1p_track(out, BIT_STR(code, 0, m),
yading@10 461 m - 1, off + half_more);
yading@10 462 decode_5p_track(out + 1, BIT_STR(code, m, 5*m - 5),
yading@10 463 m - 1, off + half_more);
yading@10 464 break;
yading@10 465 case 1: /* 1 pulse in A, 5 pulses in B or vice versa */
yading@10 466 decode_1p_track(out, BIT_STR(code, 0, m),
yading@10 467 m - 1, off + half_other);
yading@10 468 decode_5p_track(out + 1, BIT_STR(code, m, 5*m - 5),
yading@10 469 m - 1, off + half_more);
yading@10 470 break;
yading@10 471 case 2: /* 2 pulses in A, 4 pulses in B or vice versa */
yading@10 472 decode_2p_track(out, BIT_STR(code, 0, 2*m - 1),
yading@10 473 m - 1, off + half_other);
yading@10 474 decode_4p_track(out + 2, BIT_STR(code, 2*m - 1, 4*m - 4),
yading@10 475 m - 1, off + half_more);
yading@10 476 break;
yading@10 477 case 3: /* 3 pulses in A, 3 pulses in B */
yading@10 478 decode_3p_track(out, BIT_STR(code, 3*m - 2, 3*m - 2),
yading@10 479 m - 1, off);
yading@10 480 decode_3p_track(out + 3, BIT_STR(code, 0, 3*m - 2),
yading@10 481 m - 1, off + b_offset);
yading@10 482 break;
yading@10 483 }
yading@10 484 }
yading@10 485
yading@10 486 /**
yading@10 487 * Decode the algebraic codebook index to pulse positions and signs,
yading@10 488 * then construct the algebraic codebook vector.
yading@10 489 *
yading@10 490 * @param[out] fixed_vector Buffer for the fixed codebook excitation
yading@10 491 * @param[in] pulse_hi MSBs part of the pulse index array (higher modes only)
yading@10 492 * @param[in] pulse_lo LSBs part of the pulse index array
yading@10 493 * @param[in] mode Mode of the current frame
yading@10 494 */
yading@10 495 static void decode_fixed_vector(float *fixed_vector, const uint16_t *pulse_hi,
yading@10 496 const uint16_t *pulse_lo, const enum Mode mode)
yading@10 497 {
yading@10 498 /* sig_pos stores for each track the decoded pulse position indexes
yading@10 499 * (1-based) multiplied by its corresponding amplitude (+1 or -1) */
yading@10 500 int sig_pos[4][6];
yading@10 501 int spacing = (mode == MODE_6k60) ? 2 : 4;
yading@10 502 int i, j;
yading@10 503
yading@10 504 switch (mode) {
yading@10 505 case MODE_6k60:
yading@10 506 for (i = 0; i < 2; i++)
yading@10 507 decode_1p_track(sig_pos[i], pulse_lo[i], 5, 1);
yading@10 508 break;
yading@10 509 case MODE_8k85:
yading@10 510 for (i = 0; i < 4; i++)
yading@10 511 decode_1p_track(sig_pos[i], pulse_lo[i], 4, 1);
yading@10 512 break;
yading@10 513 case MODE_12k65:
yading@10 514 for (i = 0; i < 4; i++)
yading@10 515 decode_2p_track(sig_pos[i], pulse_lo[i], 4, 1);
yading@10 516 break;
yading@10 517 case MODE_14k25:
yading@10 518 for (i = 0; i < 2; i++)
yading@10 519 decode_3p_track(sig_pos[i], pulse_lo[i], 4, 1);
yading@10 520 for (i = 2; i < 4; i++)
yading@10 521 decode_2p_track(sig_pos[i], pulse_lo[i], 4, 1);
yading@10 522 break;
yading@10 523 case MODE_15k85:
yading@10 524 for (i = 0; i < 4; i++)
yading@10 525 decode_3p_track(sig_pos[i], pulse_lo[i], 4, 1);
yading@10 526 break;
yading@10 527 case MODE_18k25:
yading@10 528 for (i = 0; i < 4; i++)
yading@10 529 decode_4p_track(sig_pos[i], (int) pulse_lo[i] +
yading@10 530 ((int) pulse_hi[i] << 14), 4, 1);
yading@10 531 break;
yading@10 532 case MODE_19k85:
yading@10 533 for (i = 0; i < 2; i++)
yading@10 534 decode_5p_track(sig_pos[i], (int) pulse_lo[i] +
yading@10 535 ((int) pulse_hi[i] << 10), 4, 1);
yading@10 536 for (i = 2; i < 4; i++)
yading@10 537 decode_4p_track(sig_pos[i], (int) pulse_lo[i] +
yading@10 538 ((int) pulse_hi[i] << 14), 4, 1);
yading@10 539 break;
yading@10 540 case MODE_23k05:
yading@10 541 case MODE_23k85:
yading@10 542 for (i = 0; i < 4; i++)
yading@10 543 decode_6p_track(sig_pos[i], (int) pulse_lo[i] +
yading@10 544 ((int) pulse_hi[i] << 11), 4, 1);
yading@10 545 break;
yading@10 546 }
yading@10 547
yading@10 548 memset(fixed_vector, 0, sizeof(float) * AMRWB_SFR_SIZE);
yading@10 549
yading@10 550 for (i = 0; i < 4; i++)
yading@10 551 for (j = 0; j < pulses_nb_per_mode_tr[mode][i]; j++) {
yading@10 552 int pos = (FFABS(sig_pos[i][j]) - 1) * spacing + i;
yading@10 553
yading@10 554 fixed_vector[pos] += sig_pos[i][j] < 0 ? -1.0 : 1.0;
yading@10 555 }
yading@10 556 }
yading@10 557
yading@10 558 /**
yading@10 559 * Decode pitch gain and fixed gain correction factor.
yading@10 560 *
yading@10 561 * @param[in] vq_gain Vector-quantized index for gains
yading@10 562 * @param[in] mode Mode of the current frame
yading@10 563 * @param[out] fixed_gain_factor Decoded fixed gain correction factor
yading@10 564 * @param[out] pitch_gain Decoded pitch gain
yading@10 565 */
yading@10 566 static void decode_gains(const uint8_t vq_gain, const enum Mode mode,
yading@10 567 float *fixed_gain_factor, float *pitch_gain)
yading@10 568 {
yading@10 569 const int16_t *gains = (mode <= MODE_8k85 ? qua_gain_6b[vq_gain] :
yading@10 570 qua_gain_7b[vq_gain]);
yading@10 571
yading@10 572 *pitch_gain = gains[0] * (1.0f / (1 << 14));
yading@10 573 *fixed_gain_factor = gains[1] * (1.0f / (1 << 11));
yading@10 574 }
yading@10 575
yading@10 576 /**
yading@10 577 * Apply pitch sharpening filters to the fixed codebook vector.
yading@10 578 *
yading@10 579 * @param[in] ctx The context
yading@10 580 * @param[in,out] fixed_vector Fixed codebook excitation
yading@10 581 */
yading@10 582 // XXX: Spec states this procedure should be applied when the pitch
yading@10 583 // lag is less than 64, but this checking seems absent in reference and AMR-NB
yading@10 584 static void pitch_sharpening(AMRWBContext *ctx, float *fixed_vector)
yading@10 585 {
yading@10 586 int i;
yading@10 587
yading@10 588 /* Tilt part */
yading@10 589 for (i = AMRWB_SFR_SIZE - 1; i != 0; i--)
yading@10 590 fixed_vector[i] -= fixed_vector[i - 1] * ctx->tilt_coef;
yading@10 591
yading@10 592 /* Periodicity enhancement part */
yading@10 593 for (i = ctx->pitch_lag_int; i < AMRWB_SFR_SIZE; i++)
yading@10 594 fixed_vector[i] += fixed_vector[i - ctx->pitch_lag_int] * 0.85;
yading@10 595 }
yading@10 596
yading@10 597 /**
yading@10 598 * Calculate the voicing factor (-1.0 = unvoiced to 1.0 = voiced).
yading@10 599 *
yading@10 600 * @param[in] p_vector, f_vector Pitch and fixed excitation vectors
yading@10 601 * @param[in] p_gain, f_gain Pitch and fixed gains
yading@10 602 * @param[in] ctx The context
yading@10 603 */
yading@10 604 // XXX: There is something wrong with the precision here! The magnitudes
yading@10 605 // of the energies are not correct. Please check the reference code carefully
yading@10 606 static float voice_factor(float *p_vector, float p_gain,
yading@10 607 float *f_vector, float f_gain,
yading@10 608 CELPMContext *ctx)
yading@10 609 {
yading@10 610 double p_ener = (double) ctx->dot_productf(p_vector, p_vector,
yading@10 611 AMRWB_SFR_SIZE) *
yading@10 612 p_gain * p_gain;
yading@10 613 double f_ener = (double) ctx->dot_productf(f_vector, f_vector,
yading@10 614 AMRWB_SFR_SIZE) *
yading@10 615 f_gain * f_gain;
yading@10 616
yading@10 617 return (p_ener - f_ener) / (p_ener + f_ener);
yading@10 618 }
yading@10 619
yading@10 620 /**
yading@10 621 * Reduce fixed vector sparseness by smoothing with one of three IR filters,
yading@10 622 * also known as "adaptive phase dispersion".
yading@10 623 *
yading@10 624 * @param[in] ctx The context
yading@10 625 * @param[in,out] fixed_vector Unfiltered fixed vector
yading@10 626 * @param[out] buf Space for modified vector if necessary
yading@10 627 *
yading@10 628 * @return The potentially overwritten filtered fixed vector address
yading@10 629 */
yading@10 630 static float *anti_sparseness(AMRWBContext *ctx,
yading@10 631 float *fixed_vector, float *buf)
yading@10 632 {
yading@10 633 int ir_filter_nr;
yading@10 634
yading@10 635 if (ctx->fr_cur_mode > MODE_8k85) // no filtering in higher modes
yading@10 636 return fixed_vector;
yading@10 637
yading@10 638 if (ctx->pitch_gain[0] < 0.6) {
yading@10 639 ir_filter_nr = 0; // strong filtering
yading@10 640 } else if (ctx->pitch_gain[0] < 0.9) {
yading@10 641 ir_filter_nr = 1; // medium filtering
yading@10 642 } else
yading@10 643 ir_filter_nr = 2; // no filtering
yading@10 644
yading@10 645 /* detect 'onset' */
yading@10 646 if (ctx->fixed_gain[0] > 3.0 * ctx->fixed_gain[1]) {
yading@10 647 if (ir_filter_nr < 2)
yading@10 648 ir_filter_nr++;
yading@10 649 } else {
yading@10 650 int i, count = 0;
yading@10 651
yading@10 652 for (i = 0; i < 6; i++)
yading@10 653 if (ctx->pitch_gain[i] < 0.6)
yading@10 654 count++;
yading@10 655
yading@10 656 if (count > 2)
yading@10 657 ir_filter_nr = 0;
yading@10 658
yading@10 659 if (ir_filter_nr > ctx->prev_ir_filter_nr + 1)
yading@10 660 ir_filter_nr--;
yading@10 661 }
yading@10 662
yading@10 663 /* update ir filter strength history */
yading@10 664 ctx->prev_ir_filter_nr = ir_filter_nr;
yading@10 665
yading@10 666 ir_filter_nr += (ctx->fr_cur_mode == MODE_8k85);
yading@10 667
yading@10 668 if (ir_filter_nr < 2) {
yading@10 669 int i;
yading@10 670 const float *coef = ir_filters_lookup[ir_filter_nr];
yading@10 671
yading@10 672 /* Circular convolution code in the reference
yading@10 673 * decoder was modified to avoid using one
yading@10 674 * extra array. The filtered vector is given by:
yading@10 675 *
yading@10 676 * c2(n) = sum(i,0,len-1){ c(i) * coef( (n - i + len) % len ) }
yading@10 677 */
yading@10 678
yading@10 679 memset(buf, 0, sizeof(float) * AMRWB_SFR_SIZE);
yading@10 680 for (i = 0; i < AMRWB_SFR_SIZE; i++)
yading@10 681 if (fixed_vector[i])
yading@10 682 ff_celp_circ_addf(buf, buf, coef, i, fixed_vector[i],
yading@10 683 AMRWB_SFR_SIZE);
yading@10 684 fixed_vector = buf;
yading@10 685 }
yading@10 686
yading@10 687 return fixed_vector;
yading@10 688 }
yading@10 689
yading@10 690 /**
yading@10 691 * Calculate a stability factor {teta} based on distance between
yading@10 692 * current and past isf. A value of 1 shows maximum signal stability.
yading@10 693 */
yading@10 694 static float stability_factor(const float *isf, const float *isf_past)
yading@10 695 {
yading@10 696 int i;
yading@10 697 float acc = 0.0;
yading@10 698
yading@10 699 for (i = 0; i < LP_ORDER - 1; i++)
yading@10 700 acc += (isf[i] - isf_past[i]) * (isf[i] - isf_past[i]);
yading@10 701
yading@10 702 // XXX: This part is not so clear from the reference code
yading@10 703 // the result is more accurate changing the "/ 256" to "* 512"
yading@10 704 return FFMAX(0.0, 1.25 - acc * 0.8 * 512);
yading@10 705 }
yading@10 706
yading@10 707 /**
yading@10 708 * Apply a non-linear fixed gain smoothing in order to reduce
yading@10 709 * fluctuation in the energy of excitation.
yading@10 710 *
yading@10 711 * @param[in] fixed_gain Unsmoothed fixed gain
yading@10 712 * @param[in,out] prev_tr_gain Previous threshold gain (updated)
yading@10 713 * @param[in] voice_fac Frame voicing factor
yading@10 714 * @param[in] stab_fac Frame stability factor
yading@10 715 *
yading@10 716 * @return The smoothed gain
yading@10 717 */
yading@10 718 static float noise_enhancer(float fixed_gain, float *prev_tr_gain,
yading@10 719 float voice_fac, float stab_fac)
yading@10 720 {
yading@10 721 float sm_fac = 0.5 * (1 - voice_fac) * stab_fac;
yading@10 722 float g0;
yading@10 723
yading@10 724 // XXX: the following fixed-point constants used to in(de)crement
yading@10 725 // gain by 1.5dB were taken from the reference code, maybe it could
yading@10 726 // be simpler
yading@10 727 if (fixed_gain < *prev_tr_gain) {
yading@10 728 g0 = FFMIN(*prev_tr_gain, fixed_gain + fixed_gain *
yading@10 729 (6226 * (1.0f / (1 << 15)))); // +1.5 dB
yading@10 730 } else
yading@10 731 g0 = FFMAX(*prev_tr_gain, fixed_gain *
yading@10 732 (27536 * (1.0f / (1 << 15)))); // -1.5 dB
yading@10 733
yading@10 734 *prev_tr_gain = g0; // update next frame threshold
yading@10 735
yading@10 736 return sm_fac * g0 + (1 - sm_fac) * fixed_gain;
yading@10 737 }
yading@10 738
yading@10 739 /**
yading@10 740 * Filter the fixed_vector to emphasize the higher frequencies.
yading@10 741 *
yading@10 742 * @param[in,out] fixed_vector Fixed codebook vector
yading@10 743 * @param[in] voice_fac Frame voicing factor
yading@10 744 */
yading@10 745 static void pitch_enhancer(float *fixed_vector, float voice_fac)
yading@10 746 {
yading@10 747 int i;
yading@10 748 float cpe = 0.125 * (1 + voice_fac);
yading@10 749 float last = fixed_vector[0]; // holds c(i - 1)
yading@10 750
yading@10 751 fixed_vector[0] -= cpe * fixed_vector[1];
yading@10 752
yading@10 753 for (i = 1; i < AMRWB_SFR_SIZE - 1; i++) {
yading@10 754 float cur = fixed_vector[i];
yading@10 755
yading@10 756 fixed_vector[i] -= cpe * (last + fixed_vector[i + 1]);
yading@10 757 last = cur;
yading@10 758 }
yading@10 759
yading@10 760 fixed_vector[AMRWB_SFR_SIZE - 1] -= cpe * last;
yading@10 761 }
yading@10 762
yading@10 763 /**
yading@10 764 * Conduct 16th order linear predictive coding synthesis from excitation.
yading@10 765 *
yading@10 766 * @param[in] ctx Pointer to the AMRWBContext
yading@10 767 * @param[in] lpc Pointer to the LPC coefficients
yading@10 768 * @param[out] excitation Buffer for synthesis final excitation
yading@10 769 * @param[in] fixed_gain Fixed codebook gain for synthesis
yading@10 770 * @param[in] fixed_vector Algebraic codebook vector
yading@10 771 * @param[in,out] samples Pointer to the output samples and memory
yading@10 772 */
yading@10 773 static void synthesis(AMRWBContext *ctx, float *lpc, float *excitation,
yading@10 774 float fixed_gain, const float *fixed_vector,
yading@10 775 float *samples)
yading@10 776 {
yading@10 777 ctx->acelpv_ctx.weighted_vector_sumf(excitation, ctx->pitch_vector, fixed_vector,
yading@10 778 ctx->pitch_gain[0], fixed_gain, AMRWB_SFR_SIZE);
yading@10 779
yading@10 780 /* emphasize pitch vector contribution in low bitrate modes */
yading@10 781 if (ctx->pitch_gain[0] > 0.5 && ctx->fr_cur_mode <= MODE_8k85) {
yading@10 782 int i;
yading@10 783 float energy = ctx->celpm_ctx.dot_productf(excitation, excitation,
yading@10 784 AMRWB_SFR_SIZE);
yading@10 785
yading@10 786 // XXX: Weird part in both ref code and spec. A unknown parameter
yading@10 787 // {beta} seems to be identical to the current pitch gain
yading@10 788 float pitch_factor = 0.25 * ctx->pitch_gain[0] * ctx->pitch_gain[0];
yading@10 789
yading@10 790 for (i = 0; i < AMRWB_SFR_SIZE; i++)
yading@10 791 excitation[i] += pitch_factor * ctx->pitch_vector[i];
yading@10 792
yading@10 793 ff_scale_vector_to_given_sum_of_squares(excitation, excitation,
yading@10 794 energy, AMRWB_SFR_SIZE);
yading@10 795 }
yading@10 796
yading@10 797 ctx->celpf_ctx.celp_lp_synthesis_filterf(samples, lpc, excitation,
yading@10 798 AMRWB_SFR_SIZE, LP_ORDER);
yading@10 799 }
yading@10 800
yading@10 801 /**
yading@10 802 * Apply to synthesis a de-emphasis filter of the form:
yading@10 803 * H(z) = 1 / (1 - m * z^-1)
yading@10 804 *
yading@10 805 * @param[out] out Output buffer
yading@10 806 * @param[in] in Input samples array with in[-1]
yading@10 807 * @param[in] m Filter coefficient
yading@10 808 * @param[in,out] mem State from last filtering
yading@10 809 */
yading@10 810 static void de_emphasis(float *out, float *in, float m, float mem[1])
yading@10 811 {
yading@10 812 int i;
yading@10 813
yading@10 814 out[0] = in[0] + m * mem[0];
yading@10 815
yading@10 816 for (i = 1; i < AMRWB_SFR_SIZE; i++)
yading@10 817 out[i] = in[i] + out[i - 1] * m;
yading@10 818
yading@10 819 mem[0] = out[AMRWB_SFR_SIZE - 1];
yading@10 820 }
yading@10 821
yading@10 822 /**
yading@10 823 * Upsample a signal by 5/4 ratio (from 12.8kHz to 16kHz) using
yading@10 824 * a FIR interpolation filter. Uses past data from before *in address.
yading@10 825 *
yading@10 826 * @param[out] out Buffer for interpolated signal
yading@10 827 * @param[in] in Current signal data (length 0.8*o_size)
yading@10 828 * @param[in] o_size Output signal length
yading@10 829 * @param[in] ctx The context
yading@10 830 */
yading@10 831 static void upsample_5_4(float *out, const float *in, int o_size, CELPMContext *ctx)
yading@10 832 {
yading@10 833 const float *in0 = in - UPS_FIR_SIZE + 1;
yading@10 834 int i, j, k;
yading@10 835 int int_part = 0, frac_part;
yading@10 836
yading@10 837 i = 0;
yading@10 838 for (j = 0; j < o_size / 5; j++) {
yading@10 839 out[i] = in[int_part];
yading@10 840 frac_part = 4;
yading@10 841 i++;
yading@10 842
yading@10 843 for (k = 1; k < 5; k++) {
yading@10 844 out[i] = ctx->dot_productf(in0 + int_part,
yading@10 845 upsample_fir[4 - frac_part],
yading@10 846 UPS_MEM_SIZE);
yading@10 847 int_part++;
yading@10 848 frac_part--;
yading@10 849 i++;
yading@10 850 }
yading@10 851 }
yading@10 852 }
yading@10 853
yading@10 854 /**
yading@10 855 * Calculate the high-band gain based on encoded index (23k85 mode) or
yading@10 856 * on the low-band speech signal and the Voice Activity Detection flag.
yading@10 857 *
yading@10 858 * @param[in] ctx The context
yading@10 859 * @param[in] synth LB speech synthesis at 12.8k
yading@10 860 * @param[in] hb_idx Gain index for mode 23k85 only
yading@10 861 * @param[in] vad VAD flag for the frame
yading@10 862 */
yading@10 863 static float find_hb_gain(AMRWBContext *ctx, const float *synth,
yading@10 864 uint16_t hb_idx, uint8_t vad)
yading@10 865 {
yading@10 866 int wsp = (vad > 0);
yading@10 867 float tilt;
yading@10 868
yading@10 869 if (ctx->fr_cur_mode == MODE_23k85)
yading@10 870 return qua_hb_gain[hb_idx] * (1.0f / (1 << 14));
yading@10 871
yading@10 872 tilt = ctx->celpm_ctx.dot_productf(synth, synth + 1, AMRWB_SFR_SIZE - 1) /
yading@10 873 ctx->celpm_ctx.dot_productf(synth, synth, AMRWB_SFR_SIZE);
yading@10 874
yading@10 875 /* return gain bounded by [0.1, 1.0] */
yading@10 876 return av_clipf((1.0 - FFMAX(0.0, tilt)) * (1.25 - 0.25 * wsp), 0.1, 1.0);
yading@10 877 }
yading@10 878
yading@10 879 /**
yading@10 880 * Generate the high-band excitation with the same energy from the lower
yading@10 881 * one and scaled by the given gain.
yading@10 882 *
yading@10 883 * @param[in] ctx The context
yading@10 884 * @param[out] hb_exc Buffer for the excitation
yading@10 885 * @param[in] synth_exc Low-band excitation used for synthesis
yading@10 886 * @param[in] hb_gain Wanted excitation gain
yading@10 887 */
yading@10 888 static void scaled_hb_excitation(AMRWBContext *ctx, float *hb_exc,
yading@10 889 const float *synth_exc, float hb_gain)
yading@10 890 {
yading@10 891 int i;
yading@10 892 float energy = ctx->celpm_ctx.dot_productf(synth_exc, synth_exc,
yading@10 893 AMRWB_SFR_SIZE);
yading@10 894
yading@10 895 /* Generate a white-noise excitation */
yading@10 896 for (i = 0; i < AMRWB_SFR_SIZE_16k; i++)
yading@10 897 hb_exc[i] = 32768.0 - (uint16_t) av_lfg_get(&ctx->prng);
yading@10 898
yading@10 899 ff_scale_vector_to_given_sum_of_squares(hb_exc, hb_exc,
yading@10 900 energy * hb_gain * hb_gain,
yading@10 901 AMRWB_SFR_SIZE_16k);
yading@10 902 }
yading@10 903
yading@10 904 /**
yading@10 905 * Calculate the auto-correlation for the ISF difference vector.
yading@10 906 */
yading@10 907 static float auto_correlation(float *diff_isf, float mean, int lag)
yading@10 908 {
yading@10 909 int i;
yading@10 910 float sum = 0.0;
yading@10 911
yading@10 912 for (i = 7; i < LP_ORDER - 2; i++) {
yading@10 913 float prod = (diff_isf[i] - mean) * (diff_isf[i - lag] - mean);
yading@10 914 sum += prod * prod;
yading@10 915 }
yading@10 916 return sum;
yading@10 917 }
yading@10 918
yading@10 919 /**
yading@10 920 * Extrapolate a ISF vector to the 16kHz range (20th order LP)
yading@10 921 * used at mode 6k60 LP filter for the high frequency band.
yading@10 922 *
yading@10 923 * @param[out] isf Buffer for extrapolated isf; contains LP_ORDER
yading@10 924 * values on input
yading@10 925 */
yading@10 926 static void extrapolate_isf(float isf[LP_ORDER_16k])
yading@10 927 {
yading@10 928 float diff_isf[LP_ORDER - 2], diff_mean;
yading@10 929 float corr_lag[3];
yading@10 930 float est, scale;
yading@10 931 int i, j, i_max_corr;
yading@10 932
yading@10 933 isf[LP_ORDER_16k - 1] = isf[LP_ORDER - 1];
yading@10 934
yading@10 935 /* Calculate the difference vector */
yading@10 936 for (i = 0; i < LP_ORDER - 2; i++)
yading@10 937 diff_isf[i] = isf[i + 1] - isf[i];
yading@10 938
yading@10 939 diff_mean = 0.0;
yading@10 940 for (i = 2; i < LP_ORDER - 2; i++)
yading@10 941 diff_mean += diff_isf[i] * (1.0f / (LP_ORDER - 4));
yading@10 942
yading@10 943 /* Find which is the maximum autocorrelation */
yading@10 944 i_max_corr = 0;
yading@10 945 for (i = 0; i < 3; i++) {
yading@10 946 corr_lag[i] = auto_correlation(diff_isf, diff_mean, i + 2);
yading@10 947
yading@10 948 if (corr_lag[i] > corr_lag[i_max_corr])
yading@10 949 i_max_corr = i;
yading@10 950 }
yading@10 951 i_max_corr++;
yading@10 952
yading@10 953 for (i = LP_ORDER - 1; i < LP_ORDER_16k - 1; i++)
yading@10 954 isf[i] = isf[i - 1] + isf[i - 1 - i_max_corr]
yading@10 955 - isf[i - 2 - i_max_corr];
yading@10 956
yading@10 957 /* Calculate an estimate for ISF(18) and scale ISF based on the error */
yading@10 958 est = 7965 + (isf[2] - isf[3] - isf[4]) / 6.0;
yading@10 959 scale = 0.5 * (FFMIN(est, 7600) - isf[LP_ORDER - 2]) /
yading@10 960 (isf[LP_ORDER_16k - 2] - isf[LP_ORDER - 2]);
yading@10 961
yading@10 962 for (i = LP_ORDER - 1, j = 0; i < LP_ORDER_16k - 1; i++, j++)
yading@10 963 diff_isf[j] = scale * (isf[i] - isf[i - 1]);
yading@10 964
yading@10 965 /* Stability insurance */
yading@10 966 for (i = 1; i < LP_ORDER_16k - LP_ORDER; i++)
yading@10 967 if (diff_isf[i] + diff_isf[i - 1] < 5.0) {
yading@10 968 if (diff_isf[i] > diff_isf[i - 1]) {
yading@10 969 diff_isf[i - 1] = 5.0 - diff_isf[i];
yading@10 970 } else
yading@10 971 diff_isf[i] = 5.0 - diff_isf[i - 1];
yading@10 972 }
yading@10 973
yading@10 974 for (i = LP_ORDER - 1, j = 0; i < LP_ORDER_16k - 1; i++, j++)
yading@10 975 isf[i] = isf[i - 1] + diff_isf[j] * (1.0f / (1 << 15));
yading@10 976
yading@10 977 /* Scale the ISF vector for 16000 Hz */
yading@10 978 for (i = 0; i < LP_ORDER_16k - 1; i++)
yading@10 979 isf[i] *= 0.8;
yading@10 980 }
yading@10 981
yading@10 982 /**
yading@10 983 * Spectral expand the LP coefficients using the equation:
yading@10 984 * y[i] = x[i] * (gamma ** i)
yading@10 985 *
yading@10 986 * @param[out] out Output buffer (may use input array)
yading@10 987 * @param[in] lpc LP coefficients array
yading@10 988 * @param[in] gamma Weighting factor
yading@10 989 * @param[in] size LP array size
yading@10 990 */
yading@10 991 static void lpc_weighting(float *out, const float *lpc, float gamma, int size)
yading@10 992 {
yading@10 993 int i;
yading@10 994 float fac = gamma;
yading@10 995
yading@10 996 for (i = 0; i < size; i++) {
yading@10 997 out[i] = lpc[i] * fac;
yading@10 998 fac *= gamma;
yading@10 999 }
yading@10 1000 }
yading@10 1001
yading@10 1002 /**
yading@10 1003 * Conduct 20th order linear predictive coding synthesis for the high
yading@10 1004 * frequency band excitation at 16kHz.
yading@10 1005 *
yading@10 1006 * @param[in] ctx The context
yading@10 1007 * @param[in] subframe Current subframe index (0 to 3)
yading@10 1008 * @param[in,out] samples Pointer to the output speech samples
yading@10 1009 * @param[in] exc Generated white-noise scaled excitation
yading@10 1010 * @param[in] isf Current frame isf vector
yading@10 1011 * @param[in] isf_past Past frame final isf vector
yading@10 1012 */
yading@10 1013 static void hb_synthesis(AMRWBContext *ctx, int subframe, float *samples,
yading@10 1014 const float *exc, const float *isf, const float *isf_past)
yading@10 1015 {
yading@10 1016 float hb_lpc[LP_ORDER_16k];
yading@10 1017 enum Mode mode = ctx->fr_cur_mode;
yading@10 1018
yading@10 1019 if (mode == MODE_6k60) {
yading@10 1020 float e_isf[LP_ORDER_16k]; // ISF vector for extrapolation
yading@10 1021 double e_isp[LP_ORDER_16k];
yading@10 1022
yading@10 1023 ctx->acelpv_ctx.weighted_vector_sumf(e_isf, isf_past, isf, isfp_inter[subframe],
yading@10 1024 1.0 - isfp_inter[subframe], LP_ORDER);
yading@10 1025
yading@10 1026 extrapolate_isf(e_isf);
yading@10 1027
yading@10 1028 e_isf[LP_ORDER_16k - 1] *= 2.0;
yading@10 1029 ff_acelp_lsf2lspd(e_isp, e_isf, LP_ORDER_16k);
yading@10 1030 ff_amrwb_lsp2lpc(e_isp, hb_lpc, LP_ORDER_16k);
yading@10 1031
yading@10 1032 lpc_weighting(hb_lpc, hb_lpc, 0.9, LP_ORDER_16k);
yading@10 1033 } else {
yading@10 1034 lpc_weighting(hb_lpc, ctx->lp_coef[subframe], 0.6, LP_ORDER);
yading@10 1035 }
yading@10 1036
yading@10 1037 ctx->celpf_ctx.celp_lp_synthesis_filterf(samples, hb_lpc, exc, AMRWB_SFR_SIZE_16k,
yading@10 1038 (mode == MODE_6k60) ? LP_ORDER_16k : LP_ORDER);
yading@10 1039 }
yading@10 1040
yading@10 1041 /**
yading@10 1042 * Apply a 15th order filter to high-band samples.
yading@10 1043 * The filter characteristic depends on the given coefficients.
yading@10 1044 *
yading@10 1045 * @param[out] out Buffer for filtered output
yading@10 1046 * @param[in] fir_coef Filter coefficients
yading@10 1047 * @param[in,out] mem State from last filtering (updated)
yading@10 1048 * @param[in] in Input speech data (high-band)
yading@10 1049 *
yading@10 1050 * @remark It is safe to pass the same array in in and out parameters
yading@10 1051 */
yading@10 1052
yading@10 1053 #ifndef hb_fir_filter
yading@10 1054 static void hb_fir_filter(float *out, const float fir_coef[HB_FIR_SIZE + 1],
yading@10 1055 float mem[HB_FIR_SIZE], const float *in)
yading@10 1056 {
yading@10 1057 int i, j;
yading@10 1058 float data[AMRWB_SFR_SIZE_16k + HB_FIR_SIZE]; // past and current samples
yading@10 1059
yading@10 1060 memcpy(data, mem, HB_FIR_SIZE * sizeof(float));
yading@10 1061 memcpy(data + HB_FIR_SIZE, in, AMRWB_SFR_SIZE_16k * sizeof(float));
yading@10 1062
yading@10 1063 for (i = 0; i < AMRWB_SFR_SIZE_16k; i++) {
yading@10 1064 out[i] = 0.0;
yading@10 1065 for (j = 0; j <= HB_FIR_SIZE; j++)
yading@10 1066 out[i] += data[i + j] * fir_coef[j];
yading@10 1067 }
yading@10 1068
yading@10 1069 memcpy(mem, data + AMRWB_SFR_SIZE_16k, HB_FIR_SIZE * sizeof(float));
yading@10 1070 }
yading@10 1071 #endif /* hb_fir_filter */
yading@10 1072
yading@10 1073 /**
yading@10 1074 * Update context state before the next subframe.
yading@10 1075 */
yading@10 1076 static void update_sub_state(AMRWBContext *ctx)
yading@10 1077 {
yading@10 1078 memmove(&ctx->excitation_buf[0], &ctx->excitation_buf[AMRWB_SFR_SIZE],
yading@10 1079 (AMRWB_P_DELAY_MAX + LP_ORDER + 1) * sizeof(float));
yading@10 1080
yading@10 1081 memmove(&ctx->pitch_gain[1], &ctx->pitch_gain[0], 5 * sizeof(float));
yading@10 1082 memmove(&ctx->fixed_gain[1], &ctx->fixed_gain[0], 1 * sizeof(float));
yading@10 1083
yading@10 1084 memmove(&ctx->samples_az[0], &ctx->samples_az[AMRWB_SFR_SIZE],
yading@10 1085 LP_ORDER * sizeof(float));
yading@10 1086 memmove(&ctx->samples_up[0], &ctx->samples_up[AMRWB_SFR_SIZE],
yading@10 1087 UPS_MEM_SIZE * sizeof(float));
yading@10 1088 memmove(&ctx->samples_hb[0], &ctx->samples_hb[AMRWB_SFR_SIZE_16k],
yading@10 1089 LP_ORDER_16k * sizeof(float));
yading@10 1090 }
yading@10 1091
yading@10 1092 static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
yading@10 1093 int *got_frame_ptr, AVPacket *avpkt)
yading@10 1094 {
yading@10 1095 AMRWBContext *ctx = avctx->priv_data;
yading@10 1096 AVFrame *frame = data;
yading@10 1097 AMRWBFrame *cf = &ctx->frame;
yading@10 1098 const uint8_t *buf = avpkt->data;
yading@10 1099 int buf_size = avpkt->size;
yading@10 1100 int expected_fr_size, header_size;
yading@10 1101 float *buf_out;
yading@10 1102 float spare_vector[AMRWB_SFR_SIZE]; // extra stack space to hold result from anti-sparseness processing
yading@10 1103 float fixed_gain_factor; // fixed gain correction factor (gamma)
yading@10 1104 float *synth_fixed_vector; // pointer to the fixed vector that synthesis should use
yading@10 1105 float synth_fixed_gain; // the fixed gain that synthesis should use
yading@10 1106 float voice_fac, stab_fac; // parameters used for gain smoothing
yading@10 1107 float synth_exc[AMRWB_SFR_SIZE]; // post-processed excitation for synthesis
yading@10 1108 float hb_exc[AMRWB_SFR_SIZE_16k]; // excitation for the high frequency band
yading@10 1109 float hb_samples[AMRWB_SFR_SIZE_16k]; // filtered high-band samples from synthesis
yading@10 1110 float hb_gain;
yading@10 1111 int sub, i, ret;
yading@10 1112
yading@10 1113 /* get output buffer */
yading@10 1114 frame->nb_samples = 4 * AMRWB_SFR_SIZE_16k;
yading@10 1115 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
yading@10 1116 return ret;
yading@10 1117 buf_out = (float *)frame->data[0];
yading@10 1118
yading@10 1119 header_size = decode_mime_header(ctx, buf);
yading@10 1120 if (ctx->fr_cur_mode > MODE_SID) {
yading@10 1121 av_log(avctx, AV_LOG_ERROR,
yading@10 1122 "Invalid mode %d\n", ctx->fr_cur_mode);
yading@10 1123 return AVERROR_INVALIDDATA;
yading@10 1124 }
yading@10 1125 expected_fr_size = ((cf_sizes_wb[ctx->fr_cur_mode] + 7) >> 3) + 1;
yading@10 1126
yading@10 1127 if (buf_size < expected_fr_size) {
yading@10 1128 av_log(avctx, AV_LOG_ERROR,
yading@10 1129 "Frame too small (%d bytes). Truncated file?\n", buf_size);
yading@10 1130 *got_frame_ptr = 0;
yading@10 1131 return AVERROR_INVALIDDATA;
yading@10 1132 }
yading@10 1133
yading@10 1134 if (!ctx->fr_quality || ctx->fr_cur_mode > MODE_SID)
yading@10 1135 av_log(avctx, AV_LOG_ERROR, "Encountered a bad or corrupted frame\n");
yading@10 1136
yading@10 1137 if (ctx->fr_cur_mode == MODE_SID) { /* Comfort noise frame */
yading@10 1138 avpriv_request_sample(avctx, "SID mode");
yading@10 1139 return AVERROR_PATCHWELCOME;
yading@10 1140 }
yading@10 1141
yading@10 1142 ff_amr_bit_reorder((uint16_t *) &ctx->frame, sizeof(AMRWBFrame),
yading@10 1143 buf + header_size, amr_bit_orderings_by_mode[ctx->fr_cur_mode]);
yading@10 1144
yading@10 1145 /* Decode the quantized ISF vector */
yading@10 1146 if (ctx->fr_cur_mode == MODE_6k60) {
yading@10 1147 decode_isf_indices_36b(cf->isp_id, ctx->isf_cur);
yading@10 1148 } else {
yading@10 1149 decode_isf_indices_46b(cf->isp_id, ctx->isf_cur);
yading@10 1150 }
yading@10 1151
yading@10 1152 isf_add_mean_and_past(ctx->isf_cur, ctx->isf_q_past);
yading@10 1153 ff_set_min_dist_lsf(ctx->isf_cur, MIN_ISF_SPACING, LP_ORDER - 1);
yading@10 1154
yading@10 1155 stab_fac = stability_factor(ctx->isf_cur, ctx->isf_past_final);
yading@10 1156
yading@10 1157 ctx->isf_cur[LP_ORDER - 1] *= 2.0;
yading@10 1158 ff_acelp_lsf2lspd(ctx->isp[3], ctx->isf_cur, LP_ORDER);
yading@10 1159
yading@10 1160 /* Generate a ISP vector for each subframe */
yading@10 1161 if (ctx->first_frame) {
yading@10 1162 ctx->first_frame = 0;
yading@10 1163 memcpy(ctx->isp_sub4_past, ctx->isp[3], LP_ORDER * sizeof(double));
yading@10 1164 }
yading@10 1165 interpolate_isp(ctx->isp, ctx->isp_sub4_past);
yading@10 1166
yading@10 1167 for (sub = 0; sub < 4; sub++)
yading@10 1168 ff_amrwb_lsp2lpc(ctx->isp[sub], ctx->lp_coef[sub], LP_ORDER);
yading@10 1169
yading@10 1170 for (sub = 0; sub < 4; sub++) {
yading@10 1171 const AMRWBSubFrame *cur_subframe = &cf->subframe[sub];
yading@10 1172 float *sub_buf = buf_out + sub * AMRWB_SFR_SIZE_16k;
yading@10 1173
yading@10 1174 /* Decode adaptive codebook (pitch vector) */
yading@10 1175 decode_pitch_vector(ctx, cur_subframe, sub);
yading@10 1176 /* Decode innovative codebook (fixed vector) */
yading@10 1177 decode_fixed_vector(ctx->fixed_vector, cur_subframe->pul_ih,
yading@10 1178 cur_subframe->pul_il, ctx->fr_cur_mode);
yading@10 1179
yading@10 1180 pitch_sharpening(ctx, ctx->fixed_vector);
yading@10 1181
yading@10 1182 decode_gains(cur_subframe->vq_gain, ctx->fr_cur_mode,
yading@10 1183 &fixed_gain_factor, &ctx->pitch_gain[0]);
yading@10 1184
yading@10 1185 ctx->fixed_gain[0] =
yading@10 1186 ff_amr_set_fixed_gain(fixed_gain_factor,
yading@10 1187 ctx->celpm_ctx.dot_productf(ctx->fixed_vector,
yading@10 1188 ctx->fixed_vector,
yading@10 1189 AMRWB_SFR_SIZE) /
yading@10 1190 AMRWB_SFR_SIZE,
yading@10 1191 ctx->prediction_error,
yading@10 1192 ENERGY_MEAN, energy_pred_fac);
yading@10 1193
yading@10 1194 /* Calculate voice factor and store tilt for next subframe */
yading@10 1195 voice_fac = voice_factor(ctx->pitch_vector, ctx->pitch_gain[0],
yading@10 1196 ctx->fixed_vector, ctx->fixed_gain[0],
yading@10 1197 &ctx->celpm_ctx);
yading@10 1198 ctx->tilt_coef = voice_fac * 0.25 + 0.25;
yading@10 1199
yading@10 1200 /* Construct current excitation */
yading@10 1201 for (i = 0; i < AMRWB_SFR_SIZE; i++) {
yading@10 1202 ctx->excitation[i] *= ctx->pitch_gain[0];
yading@10 1203 ctx->excitation[i] += ctx->fixed_gain[0] * ctx->fixed_vector[i];
yading@10 1204 ctx->excitation[i] = truncf(ctx->excitation[i]);
yading@10 1205 }
yading@10 1206
yading@10 1207 /* Post-processing of excitation elements */
yading@10 1208 synth_fixed_gain = noise_enhancer(ctx->fixed_gain[0], &ctx->prev_tr_gain,
yading@10 1209 voice_fac, stab_fac);
yading@10 1210
yading@10 1211 synth_fixed_vector = anti_sparseness(ctx, ctx->fixed_vector,
yading@10 1212 spare_vector);
yading@10 1213
yading@10 1214 pitch_enhancer(synth_fixed_vector, voice_fac);
yading@10 1215
yading@10 1216 synthesis(ctx, ctx->lp_coef[sub], synth_exc, synth_fixed_gain,
yading@10 1217 synth_fixed_vector, &ctx->samples_az[LP_ORDER]);
yading@10 1218
yading@10 1219 /* Synthesis speech post-processing */
yading@10 1220 de_emphasis(&ctx->samples_up[UPS_MEM_SIZE],
yading@10 1221 &ctx->samples_az[LP_ORDER], PREEMPH_FAC, ctx->demph_mem);
yading@10 1222
yading@10 1223 ctx->acelpf_ctx.acelp_apply_order_2_transfer_function(&ctx->samples_up[UPS_MEM_SIZE],
yading@10 1224 &ctx->samples_up[UPS_MEM_SIZE], hpf_zeros, hpf_31_poles,
yading@10 1225 hpf_31_gain, ctx->hpf_31_mem, AMRWB_SFR_SIZE);
yading@10 1226
yading@10 1227 upsample_5_4(sub_buf, &ctx->samples_up[UPS_FIR_SIZE],
yading@10 1228 AMRWB_SFR_SIZE_16k, &ctx->celpm_ctx);
yading@10 1229
yading@10 1230 /* High frequency band (6.4 - 7.0 kHz) generation part */
yading@10 1231 ctx->acelpf_ctx.acelp_apply_order_2_transfer_function(hb_samples,
yading@10 1232 &ctx->samples_up[UPS_MEM_SIZE], hpf_zeros, hpf_400_poles,
yading@10 1233 hpf_400_gain, ctx->hpf_400_mem, AMRWB_SFR_SIZE);
yading@10 1234
yading@10 1235 hb_gain = find_hb_gain(ctx, hb_samples,
yading@10 1236 cur_subframe->hb_gain, cf->vad);
yading@10 1237
yading@10 1238 scaled_hb_excitation(ctx, hb_exc, synth_exc, hb_gain);
yading@10 1239
yading@10 1240 hb_synthesis(ctx, sub, &ctx->samples_hb[LP_ORDER_16k],
yading@10 1241 hb_exc, ctx->isf_cur, ctx->isf_past_final);
yading@10 1242
yading@10 1243 /* High-band post-processing filters */
yading@10 1244 hb_fir_filter(hb_samples, bpf_6_7_coef, ctx->bpf_6_7_mem,
yading@10 1245 &ctx->samples_hb[LP_ORDER_16k]);
yading@10 1246
yading@10 1247 if (ctx->fr_cur_mode == MODE_23k85)
yading@10 1248 hb_fir_filter(hb_samples, lpf_7_coef, ctx->lpf_7_mem,
yading@10 1249 hb_samples);
yading@10 1250
yading@10 1251 /* Add the low and high frequency bands */
yading@10 1252 for (i = 0; i < AMRWB_SFR_SIZE_16k; i++)
yading@10 1253 sub_buf[i] = (sub_buf[i] + hb_samples[i]) * (1.0f / (1 << 15));
yading@10 1254
yading@10 1255 /* Update buffers and history */
yading@10 1256 update_sub_state(ctx);
yading@10 1257 }
yading@10 1258
yading@10 1259 /* update state for next frame */
yading@10 1260 memcpy(ctx->isp_sub4_past, ctx->isp[3], LP_ORDER * sizeof(ctx->isp[3][0]));
yading@10 1261 memcpy(ctx->isf_past_final, ctx->isf_cur, LP_ORDER * sizeof(float));
yading@10 1262
yading@10 1263 *got_frame_ptr = 1;
yading@10 1264
yading@10 1265 return expected_fr_size;
yading@10 1266 }
yading@10 1267
yading@10 1268 AVCodec ff_amrwb_decoder = {
yading@10 1269 .name = "amrwb",
yading@10 1270 .type = AVMEDIA_TYPE_AUDIO,
yading@10 1271 .id = AV_CODEC_ID_AMR_WB,
yading@10 1272 .priv_data_size = sizeof(AMRWBContext),
yading@10 1273 .init = amrwb_decode_init,
yading@10 1274 .decode = amrwb_decode_frame,
yading@10 1275 .capabilities = CODEC_CAP_DR1,
yading@10 1276 .long_name = NULL_IF_CONFIG_SMALL("AMR-WB (Adaptive Multi-Rate WideBand)"),
yading@10 1277 .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
yading@10 1278 AV_SAMPLE_FMT_NONE },
yading@10 1279 };