annotate ffmpeg/libavcodec/amrnbdec.c @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * AMR narrowband decoder
yading@10 3 * Copyright (c) 2006-2007 Robert Swain
yading@10 4 * Copyright (c) 2009 Colin McQuillan
yading@10 5 *
yading@10 6 * This file is part of FFmpeg.
yading@10 7 *
yading@10 8 * FFmpeg is free software; you can redistribute it and/or
yading@10 9 * modify it under the terms of the GNU Lesser General Public
yading@10 10 * License as published by the Free Software Foundation; either
yading@10 11 * version 2.1 of the License, or (at your option) any later version.
yading@10 12 *
yading@10 13 * FFmpeg is distributed in the hope that it will be useful,
yading@10 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 16 * Lesser General Public License for more details.
yading@10 17 *
yading@10 18 * You should have received a copy of the GNU Lesser General Public
yading@10 19 * License along with FFmpeg; if not, write to the Free Software
yading@10 20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 21 */
yading@10 22
yading@10 23
yading@10 24 /**
yading@10 25 * @file
yading@10 26 * AMR narrowband decoder
yading@10 27 *
yading@10 28 * This decoder uses floats for simplicity and so is not bit-exact. One
yading@10 29 * difference is that differences in phase can accumulate. The test sequences
yading@10 30 * in 3GPP TS 26.074 can still be useful.
yading@10 31 *
yading@10 32 * - Comparing this file's output to the output of the ref decoder gives a
yading@10 33 * PSNR of 30 to 80. Plotting the output samples shows a difference in
yading@10 34 * phase in some areas.
yading@10 35 *
yading@10 36 * - Comparing both decoders against their input, this decoder gives a similar
yading@10 37 * PSNR. If the test sequence homing frames are removed (this decoder does
yading@10 38 * not detect them), the PSNR is at least as good as the reference on 140
yading@10 39 * out of 169 tests.
yading@10 40 */
yading@10 41
yading@10 42
yading@10 43 #include <string.h>
yading@10 44 #include <math.h>
yading@10 45
yading@10 46 #include "libavutil/channel_layout.h"
yading@10 47 #include "libavutil/float_dsp.h"
yading@10 48 #include "avcodec.h"
yading@10 49 #include "libavutil/common.h"
yading@10 50 #include "libavutil/avassert.h"
yading@10 51 #include "celp_math.h"
yading@10 52 #include "celp_filters.h"
yading@10 53 #include "acelp_filters.h"
yading@10 54 #include "acelp_vectors.h"
yading@10 55 #include "acelp_pitch_delay.h"
yading@10 56 #include "lsp.h"
yading@10 57 #include "amr.h"
yading@10 58 #include "internal.h"
yading@10 59
yading@10 60 #include "amrnbdata.h"
yading@10 61
yading@10 62 #define AMR_BLOCK_SIZE 160 ///< samples per frame
yading@10 63 #define AMR_SAMPLE_BOUND 32768.0 ///< threshold for synthesis overflow
yading@10 64
yading@10 65 /**
yading@10 66 * Scale from constructed speech to [-1,1]
yading@10 67 *
yading@10 68 * AMR is designed to produce 16-bit PCM samples (3GPP TS 26.090 4.2) but
yading@10 69 * upscales by two (section 6.2.2).
yading@10 70 *
yading@10 71 * Fundamentally, this scale is determined by energy_mean through
yading@10 72 * the fixed vector contribution to the excitation vector.
yading@10 73 */
yading@10 74 #define AMR_SAMPLE_SCALE (2.0 / 32768.0)
yading@10 75
yading@10 76 /** Prediction factor for 12.2kbit/s mode */
yading@10 77 #define PRED_FAC_MODE_12k2 0.65
yading@10 78
yading@10 79 #define LSF_R_FAC (8000.0 / 32768.0) ///< LSF residual tables to Hertz
yading@10 80 #define MIN_LSF_SPACING (50.0488 / 8000.0) ///< Ensures stability of LPC filter
yading@10 81 #define PITCH_LAG_MIN_MODE_12k2 18 ///< Lower bound on decoded lag search in 12.2kbit/s mode
yading@10 82
yading@10 83 /** Initial energy in dB. Also used for bad frames (unimplemented). */
yading@10 84 #define MIN_ENERGY -14.0
yading@10 85
yading@10 86 /** Maximum sharpening factor
yading@10 87 *
yading@10 88 * The specification says 0.8, which should be 13107, but the reference C code
yading@10 89 * uses 13017 instead. (Amusingly the same applies to SHARP_MAX in g729dec.c.)
yading@10 90 */
yading@10 91 #define SHARP_MAX 0.79449462890625
yading@10 92
yading@10 93 /** Number of impulse response coefficients used for tilt factor */
yading@10 94 #define AMR_TILT_RESPONSE 22
yading@10 95 /** Tilt factor = 1st reflection coefficient * gamma_t */
yading@10 96 #define AMR_TILT_GAMMA_T 0.8
yading@10 97 /** Adaptive gain control factor used in post-filter */
yading@10 98 #define AMR_AGC_ALPHA 0.9
yading@10 99
yading@10 100 typedef struct AMRContext {
yading@10 101 AMRNBFrame frame; ///< decoded AMR parameters (lsf coefficients, codebook indexes, etc)
yading@10 102 uint8_t bad_frame_indicator; ///< bad frame ? 1 : 0
yading@10 103 enum Mode cur_frame_mode;
yading@10 104
yading@10 105 int16_t prev_lsf_r[LP_FILTER_ORDER]; ///< residual LSF vector from previous subframe
yading@10 106 double lsp[4][LP_FILTER_ORDER]; ///< lsp vectors from current frame
yading@10 107 double prev_lsp_sub4[LP_FILTER_ORDER]; ///< lsp vector for the 4th subframe of the previous frame
yading@10 108
yading@10 109 float lsf_q[4][LP_FILTER_ORDER]; ///< Interpolated LSF vector for fixed gain smoothing
yading@10 110 float lsf_avg[LP_FILTER_ORDER]; ///< vector of averaged lsf vector
yading@10 111
yading@10 112 float lpc[4][LP_FILTER_ORDER]; ///< lpc coefficient vectors for 4 subframes
yading@10 113
yading@10 114 uint8_t pitch_lag_int; ///< integer part of pitch lag from current subframe
yading@10 115
yading@10 116 float excitation_buf[PITCH_DELAY_MAX + LP_FILTER_ORDER + 1 + AMR_SUBFRAME_SIZE]; ///< current excitation and all necessary excitation history
yading@10 117 float *excitation; ///< pointer to the current excitation vector in excitation_buf
yading@10 118
yading@10 119 float pitch_vector[AMR_SUBFRAME_SIZE]; ///< adaptive code book (pitch) vector
yading@10 120 float fixed_vector[AMR_SUBFRAME_SIZE]; ///< algebraic codebook (fixed) vector (must be kept zero between frames)
yading@10 121
yading@10 122 float prediction_error[4]; ///< quantified prediction errors {20log10(^gamma_gc)} for previous four subframes
yading@10 123 float pitch_gain[5]; ///< quantified pitch gains for the current and previous four subframes
yading@10 124 float fixed_gain[5]; ///< quantified fixed gains for the current and previous four subframes
yading@10 125
yading@10 126 float beta; ///< previous pitch_gain, bounded by [0.0,SHARP_MAX]
yading@10 127 uint8_t diff_count; ///< the number of subframes for which diff has been above 0.65
yading@10 128 uint8_t hang_count; ///< the number of subframes since a hangover period started
yading@10 129
yading@10 130 float prev_sparse_fixed_gain; ///< previous fixed gain; used by anti-sparseness processing to determine "onset"
yading@10 131 uint8_t prev_ir_filter_nr; ///< previous impulse response filter "impNr": 0 - strong, 1 - medium, 2 - none
yading@10 132 uint8_t ir_filter_onset; ///< flag for impulse response filter strength
yading@10 133
yading@10 134 float postfilter_mem[10]; ///< previous intermediate values in the formant filter
yading@10 135 float tilt_mem; ///< previous input to tilt compensation filter
yading@10 136 float postfilter_agc; ///< previous factor used for adaptive gain control
yading@10 137 float high_pass_mem[2]; ///< previous intermediate values in the high-pass filter
yading@10 138
yading@10 139 float samples_in[LP_FILTER_ORDER + AMR_SUBFRAME_SIZE]; ///< floating point samples
yading@10 140
yading@10 141 ACELPFContext acelpf_ctx; ///< context for filters for ACELP-based codecs
yading@10 142 ACELPVContext acelpv_ctx; ///< context for vector operations for ACELP-based codecs
yading@10 143 CELPFContext celpf_ctx; ///< context for filters for CELP-based codecs
yading@10 144 CELPMContext celpm_ctx; ///< context for fixed point math operations
yading@10 145
yading@10 146 } AMRContext;
yading@10 147
yading@10 148 /** Double version of ff_weighted_vector_sumf() */
yading@10 149 static void weighted_vector_sumd(double *out, const double *in_a,
yading@10 150 const double *in_b, double weight_coeff_a,
yading@10 151 double weight_coeff_b, int length)
yading@10 152 {
yading@10 153 int i;
yading@10 154
yading@10 155 for (i = 0; i < length; i++)
yading@10 156 out[i] = weight_coeff_a * in_a[i]
yading@10 157 + weight_coeff_b * in_b[i];
yading@10 158 }
yading@10 159
yading@10 160 static av_cold int amrnb_decode_init(AVCodecContext *avctx)
yading@10 161 {
yading@10 162 AMRContext *p = avctx->priv_data;
yading@10 163 int i;
yading@10 164
yading@10 165 if (avctx->channels > 1) {
yading@10 166 avpriv_report_missing_feature(avctx, "multi-channel AMR");
yading@10 167 return AVERROR_PATCHWELCOME;
yading@10 168 }
yading@10 169
yading@10 170 avctx->channels = 1;
yading@10 171 avctx->channel_layout = AV_CH_LAYOUT_MONO;
yading@10 172 if (!avctx->sample_rate)
yading@10 173 avctx->sample_rate = 8000;
yading@10 174 avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
yading@10 175
yading@10 176 // p->excitation always points to the same position in p->excitation_buf
yading@10 177 p->excitation = &p->excitation_buf[PITCH_DELAY_MAX + LP_FILTER_ORDER + 1];
yading@10 178
yading@10 179 for (i = 0; i < LP_FILTER_ORDER; i++) {
yading@10 180 p->prev_lsp_sub4[i] = lsp_sub4_init[i] * 1000 / (float)(1 << 15);
yading@10 181 p->lsf_avg[i] = p->lsf_q[3][i] = lsp_avg_init[i] / (float)(1 << 15);
yading@10 182 }
yading@10 183
yading@10 184 for (i = 0; i < 4; i++)
yading@10 185 p->prediction_error[i] = MIN_ENERGY;
yading@10 186
yading@10 187 ff_acelp_filter_init(&p->acelpf_ctx);
yading@10 188 ff_acelp_vectors_init(&p->acelpv_ctx);
yading@10 189 ff_celp_filter_init(&p->celpf_ctx);
yading@10 190 ff_celp_math_init(&p->celpm_ctx);
yading@10 191
yading@10 192 return 0;
yading@10 193 }
yading@10 194
yading@10 195
yading@10 196 /**
yading@10 197 * Unpack an RFC4867 speech frame into the AMR frame mode and parameters.
yading@10 198 *
yading@10 199 * The order of speech bits is specified by 3GPP TS 26.101.
yading@10 200 *
yading@10 201 * @param p the context
yading@10 202 * @param buf pointer to the input buffer
yading@10 203 * @param buf_size size of the input buffer
yading@10 204 *
yading@10 205 * @return the frame mode
yading@10 206 */
yading@10 207 static enum Mode unpack_bitstream(AMRContext *p, const uint8_t *buf,
yading@10 208 int buf_size)
yading@10 209 {
yading@10 210 enum Mode mode;
yading@10 211
yading@10 212 // Decode the first octet.
yading@10 213 mode = buf[0] >> 3 & 0x0F; // frame type
yading@10 214 p->bad_frame_indicator = (buf[0] & 0x4) != 0x4; // quality bit
yading@10 215
yading@10 216 if (mode >= N_MODES || buf_size < frame_sizes_nb[mode] + 1) {
yading@10 217 return NO_DATA;
yading@10 218 }
yading@10 219
yading@10 220 if (mode < MODE_DTX)
yading@10 221 ff_amr_bit_reorder((uint16_t *) &p->frame, sizeof(AMRNBFrame), buf + 1,
yading@10 222 amr_unpacking_bitmaps_per_mode[mode]);
yading@10 223
yading@10 224 return mode;
yading@10 225 }
yading@10 226
yading@10 227
yading@10 228 /// @name AMR pitch LPC coefficient decoding functions
yading@10 229 /// @{
yading@10 230
yading@10 231 /**
yading@10 232 * Interpolate the LSF vector (used for fixed gain smoothing).
yading@10 233 * The interpolation is done over all four subframes even in MODE_12k2.
yading@10 234 *
yading@10 235 * @param[in] ctx The Context
yading@10 236 * @param[in,out] lsf_q LSFs in [0,1] for each subframe
yading@10 237 * @param[in] lsf_new New LSFs in [0,1] for subframe 4
yading@10 238 */
yading@10 239 static void interpolate_lsf(ACELPVContext *ctx, float lsf_q[4][LP_FILTER_ORDER], float *lsf_new)
yading@10 240 {
yading@10 241 int i;
yading@10 242
yading@10 243 for (i = 0; i < 4; i++)
yading@10 244 ctx->weighted_vector_sumf(lsf_q[i], lsf_q[3], lsf_new,
yading@10 245 0.25 * (3 - i), 0.25 * (i + 1),
yading@10 246 LP_FILTER_ORDER);
yading@10 247 }
yading@10 248
yading@10 249 /**
yading@10 250 * Decode a set of 5 split-matrix quantized lsf indexes into an lsp vector.
yading@10 251 *
yading@10 252 * @param p the context
yading@10 253 * @param lsp output LSP vector
yading@10 254 * @param lsf_no_r LSF vector without the residual vector added
yading@10 255 * @param lsf_quantizer pointers to LSF dictionary tables
yading@10 256 * @param quantizer_offset offset in tables
yading@10 257 * @param sign for the 3 dictionary table
yading@10 258 * @param update store data for computing the next frame's LSFs
yading@10 259 */
yading@10 260 static void lsf2lsp_for_mode12k2(AMRContext *p, double lsp[LP_FILTER_ORDER],
yading@10 261 const float lsf_no_r[LP_FILTER_ORDER],
yading@10 262 const int16_t *lsf_quantizer[5],
yading@10 263 const int quantizer_offset,
yading@10 264 const int sign, const int update)
yading@10 265 {
yading@10 266 int16_t lsf_r[LP_FILTER_ORDER]; // residual LSF vector
yading@10 267 float lsf_q[LP_FILTER_ORDER]; // quantified LSF vector
yading@10 268 int i;
yading@10 269
yading@10 270 for (i = 0; i < LP_FILTER_ORDER >> 1; i++)
yading@10 271 memcpy(&lsf_r[i << 1], &lsf_quantizer[i][quantizer_offset],
yading@10 272 2 * sizeof(*lsf_r));
yading@10 273
yading@10 274 if (sign) {
yading@10 275 lsf_r[4] *= -1;
yading@10 276 lsf_r[5] *= -1;
yading@10 277 }
yading@10 278
yading@10 279 if (update)
yading@10 280 memcpy(p->prev_lsf_r, lsf_r, LP_FILTER_ORDER * sizeof(*lsf_r));
yading@10 281
yading@10 282 for (i = 0; i < LP_FILTER_ORDER; i++)
yading@10 283 lsf_q[i] = lsf_r[i] * (LSF_R_FAC / 8000.0) + lsf_no_r[i] * (1.0 / 8000.0);
yading@10 284
yading@10 285 ff_set_min_dist_lsf(lsf_q, MIN_LSF_SPACING, LP_FILTER_ORDER);
yading@10 286
yading@10 287 if (update)
yading@10 288 interpolate_lsf(&p->acelpv_ctx, p->lsf_q, lsf_q);
yading@10 289
yading@10 290 ff_acelp_lsf2lspd(lsp, lsf_q, LP_FILTER_ORDER);
yading@10 291 }
yading@10 292
yading@10 293 /**
yading@10 294 * Decode a set of 5 split-matrix quantized lsf indexes into 2 lsp vectors.
yading@10 295 *
yading@10 296 * @param p pointer to the AMRContext
yading@10 297 */
yading@10 298 static void lsf2lsp_5(AMRContext *p)
yading@10 299 {
yading@10 300 const uint16_t *lsf_param = p->frame.lsf;
yading@10 301 float lsf_no_r[LP_FILTER_ORDER]; // LSFs without the residual vector
yading@10 302 const int16_t *lsf_quantizer[5];
yading@10 303 int i;
yading@10 304
yading@10 305 lsf_quantizer[0] = lsf_5_1[lsf_param[0]];
yading@10 306 lsf_quantizer[1] = lsf_5_2[lsf_param[1]];
yading@10 307 lsf_quantizer[2] = lsf_5_3[lsf_param[2] >> 1];
yading@10 308 lsf_quantizer[3] = lsf_5_4[lsf_param[3]];
yading@10 309 lsf_quantizer[4] = lsf_5_5[lsf_param[4]];
yading@10 310
yading@10 311 for (i = 0; i < LP_FILTER_ORDER; i++)
yading@10 312 lsf_no_r[i] = p->prev_lsf_r[i] * LSF_R_FAC * PRED_FAC_MODE_12k2 + lsf_5_mean[i];
yading@10 313
yading@10 314 lsf2lsp_for_mode12k2(p, p->lsp[1], lsf_no_r, lsf_quantizer, 0, lsf_param[2] & 1, 0);
yading@10 315 lsf2lsp_for_mode12k2(p, p->lsp[3], lsf_no_r, lsf_quantizer, 2, lsf_param[2] & 1, 1);
yading@10 316
yading@10 317 // interpolate LSP vectors at subframes 1 and 3
yading@10 318 weighted_vector_sumd(p->lsp[0], p->prev_lsp_sub4, p->lsp[1], 0.5, 0.5, LP_FILTER_ORDER);
yading@10 319 weighted_vector_sumd(p->lsp[2], p->lsp[1] , p->lsp[3], 0.5, 0.5, LP_FILTER_ORDER);
yading@10 320 }
yading@10 321
yading@10 322 /**
yading@10 323 * Decode a set of 3 split-matrix quantized lsf indexes into an lsp vector.
yading@10 324 *
yading@10 325 * @param p pointer to the AMRContext
yading@10 326 */
yading@10 327 static void lsf2lsp_3(AMRContext *p)
yading@10 328 {
yading@10 329 const uint16_t *lsf_param = p->frame.lsf;
yading@10 330 int16_t lsf_r[LP_FILTER_ORDER]; // residual LSF vector
yading@10 331 float lsf_q[LP_FILTER_ORDER]; // quantified LSF vector
yading@10 332 const int16_t *lsf_quantizer;
yading@10 333 int i, j;
yading@10 334
yading@10 335 lsf_quantizer = (p->cur_frame_mode == MODE_7k95 ? lsf_3_1_MODE_7k95 : lsf_3_1)[lsf_param[0]];
yading@10 336 memcpy(lsf_r, lsf_quantizer, 3 * sizeof(*lsf_r));
yading@10 337
yading@10 338 lsf_quantizer = lsf_3_2[lsf_param[1] << (p->cur_frame_mode <= MODE_5k15)];
yading@10 339 memcpy(lsf_r + 3, lsf_quantizer, 3 * sizeof(*lsf_r));
yading@10 340
yading@10 341 lsf_quantizer = (p->cur_frame_mode <= MODE_5k15 ? lsf_3_3_MODE_5k15 : lsf_3_3)[lsf_param[2]];
yading@10 342 memcpy(lsf_r + 6, lsf_quantizer, 4 * sizeof(*lsf_r));
yading@10 343
yading@10 344 // calculate mean-removed LSF vector and add mean
yading@10 345 for (i = 0; i < LP_FILTER_ORDER; i++)
yading@10 346 lsf_q[i] = (lsf_r[i] + p->prev_lsf_r[i] * pred_fac[i]) * (LSF_R_FAC / 8000.0) + lsf_3_mean[i] * (1.0 / 8000.0);
yading@10 347
yading@10 348 ff_set_min_dist_lsf(lsf_q, MIN_LSF_SPACING, LP_FILTER_ORDER);
yading@10 349
yading@10 350 // store data for computing the next frame's LSFs
yading@10 351 interpolate_lsf(&p->acelpv_ctx, p->lsf_q, lsf_q);
yading@10 352 memcpy(p->prev_lsf_r, lsf_r, LP_FILTER_ORDER * sizeof(*lsf_r));
yading@10 353
yading@10 354 ff_acelp_lsf2lspd(p->lsp[3], lsf_q, LP_FILTER_ORDER);
yading@10 355
yading@10 356 // interpolate LSP vectors at subframes 1, 2 and 3
yading@10 357 for (i = 1; i <= 3; i++)
yading@10 358 for(j = 0; j < LP_FILTER_ORDER; j++)
yading@10 359 p->lsp[i-1][j] = p->prev_lsp_sub4[j] +
yading@10 360 (p->lsp[3][j] - p->prev_lsp_sub4[j]) * 0.25 * i;
yading@10 361 }
yading@10 362
yading@10 363 /// @}
yading@10 364
yading@10 365
yading@10 366 /// @name AMR pitch vector decoding functions
yading@10 367 /// @{
yading@10 368
yading@10 369 /**
yading@10 370 * Like ff_decode_pitch_lag(), but with 1/6 resolution
yading@10 371 */
yading@10 372 static void decode_pitch_lag_1_6(int *lag_int, int *lag_frac, int pitch_index,
yading@10 373 const int prev_lag_int, const int subframe)
yading@10 374 {
yading@10 375 if (subframe == 0 || subframe == 2) {
yading@10 376 if (pitch_index < 463) {
yading@10 377 *lag_int = (pitch_index + 107) * 10923 >> 16;
yading@10 378 *lag_frac = pitch_index - *lag_int * 6 + 105;
yading@10 379 } else {
yading@10 380 *lag_int = pitch_index - 368;
yading@10 381 *lag_frac = 0;
yading@10 382 }
yading@10 383 } else {
yading@10 384 *lag_int = ((pitch_index + 5) * 10923 >> 16) - 1;
yading@10 385 *lag_frac = pitch_index - *lag_int * 6 - 3;
yading@10 386 *lag_int += av_clip(prev_lag_int - 5, PITCH_LAG_MIN_MODE_12k2,
yading@10 387 PITCH_DELAY_MAX - 9);
yading@10 388 }
yading@10 389 }
yading@10 390
yading@10 391 static void decode_pitch_vector(AMRContext *p,
yading@10 392 const AMRNBSubframe *amr_subframe,
yading@10 393 const int subframe)
yading@10 394 {
yading@10 395 int pitch_lag_int, pitch_lag_frac;
yading@10 396 enum Mode mode = p->cur_frame_mode;
yading@10 397
yading@10 398 if (p->cur_frame_mode == MODE_12k2) {
yading@10 399 decode_pitch_lag_1_6(&pitch_lag_int, &pitch_lag_frac,
yading@10 400 amr_subframe->p_lag, p->pitch_lag_int,
yading@10 401 subframe);
yading@10 402 } else
yading@10 403 ff_decode_pitch_lag(&pitch_lag_int, &pitch_lag_frac,
yading@10 404 amr_subframe->p_lag,
yading@10 405 p->pitch_lag_int, subframe,
yading@10 406 mode != MODE_4k75 && mode != MODE_5k15,
yading@10 407 mode <= MODE_6k7 ? 4 : (mode == MODE_7k95 ? 5 : 6));
yading@10 408
yading@10 409 p->pitch_lag_int = pitch_lag_int; // store previous lag in a uint8_t
yading@10 410
yading@10 411 pitch_lag_frac <<= (p->cur_frame_mode != MODE_12k2);
yading@10 412
yading@10 413 pitch_lag_int += pitch_lag_frac > 0;
yading@10 414
yading@10 415 /* Calculate the pitch vector by interpolating the past excitation at the
yading@10 416 pitch lag using a b60 hamming windowed sinc function. */
yading@10 417 p->acelpf_ctx.acelp_interpolatef(p->excitation,
yading@10 418 p->excitation + 1 - pitch_lag_int,
yading@10 419 ff_b60_sinc, 6,
yading@10 420 pitch_lag_frac + 6 - 6*(pitch_lag_frac > 0),
yading@10 421 10, AMR_SUBFRAME_SIZE);
yading@10 422
yading@10 423 memcpy(p->pitch_vector, p->excitation, AMR_SUBFRAME_SIZE * sizeof(float));
yading@10 424 }
yading@10 425
yading@10 426 /// @}
yading@10 427
yading@10 428
yading@10 429 /// @name AMR algebraic code book (fixed) vector decoding functions
yading@10 430 /// @{
yading@10 431
yading@10 432 /**
yading@10 433 * Decode a 10-bit algebraic codebook index from a 10.2 kbit/s frame.
yading@10 434 */
yading@10 435 static void decode_10bit_pulse(int code, int pulse_position[8],
yading@10 436 int i1, int i2, int i3)
yading@10 437 {
yading@10 438 // coded using 7+3 bits with the 3 LSBs being, individually, the LSB of 1 of
yading@10 439 // the 3 pulses and the upper 7 bits being coded in base 5
yading@10 440 const uint8_t *positions = base_five_table[code >> 3];
yading@10 441 pulse_position[i1] = (positions[2] << 1) + ( code & 1);
yading@10 442 pulse_position[i2] = (positions[1] << 1) + ((code >> 1) & 1);
yading@10 443 pulse_position[i3] = (positions[0] << 1) + ((code >> 2) & 1);
yading@10 444 }
yading@10 445
yading@10 446 /**
yading@10 447 * Decode the algebraic codebook index to pulse positions and signs and
yading@10 448 * construct the algebraic codebook vector for MODE_10k2.
yading@10 449 *
yading@10 450 * @param fixed_index positions of the eight pulses
yading@10 451 * @param fixed_sparse pointer to the algebraic codebook vector
yading@10 452 */
yading@10 453 static void decode_8_pulses_31bits(const int16_t *fixed_index,
yading@10 454 AMRFixed *fixed_sparse)
yading@10 455 {
yading@10 456 int pulse_position[8];
yading@10 457 int i, temp;
yading@10 458
yading@10 459 decode_10bit_pulse(fixed_index[4], pulse_position, 0, 4, 1);
yading@10 460 decode_10bit_pulse(fixed_index[5], pulse_position, 2, 6, 5);
yading@10 461
yading@10 462 // coded using 5+2 bits with the 2 LSBs being, individually, the LSB of 1 of
yading@10 463 // the 2 pulses and the upper 5 bits being coded in base 5
yading@10 464 temp = ((fixed_index[6] >> 2) * 25 + 12) >> 5;
yading@10 465 pulse_position[3] = temp % 5;
yading@10 466 pulse_position[7] = temp / 5;
yading@10 467 if (pulse_position[7] & 1)
yading@10 468 pulse_position[3] = 4 - pulse_position[3];
yading@10 469 pulse_position[3] = (pulse_position[3] << 1) + ( fixed_index[6] & 1);
yading@10 470 pulse_position[7] = (pulse_position[7] << 1) + ((fixed_index[6] >> 1) & 1);
yading@10 471
yading@10 472 fixed_sparse->n = 8;
yading@10 473 for (i = 0; i < 4; i++) {
yading@10 474 const int pos1 = (pulse_position[i] << 2) + i;
yading@10 475 const int pos2 = (pulse_position[i + 4] << 2) + i;
yading@10 476 const float sign = fixed_index[i] ? -1.0 : 1.0;
yading@10 477 fixed_sparse->x[i ] = pos1;
yading@10 478 fixed_sparse->x[i + 4] = pos2;
yading@10 479 fixed_sparse->y[i ] = sign;
yading@10 480 fixed_sparse->y[i + 4] = pos2 < pos1 ? -sign : sign;
yading@10 481 }
yading@10 482 }
yading@10 483
yading@10 484 /**
yading@10 485 * Decode the algebraic codebook index to pulse positions and signs,
yading@10 486 * then construct the algebraic codebook vector.
yading@10 487 *
yading@10 488 * nb of pulses | bits encoding pulses
yading@10 489 * For MODE_4k75 or MODE_5k15, 2 | 1-3, 4-6, 7
yading@10 490 * MODE_5k9, 2 | 1, 2-4, 5-6, 7-9
yading@10 491 * MODE_6k7, 3 | 1-3, 4, 5-7, 8, 9-11
yading@10 492 * MODE_7k4 or MODE_7k95, 4 | 1-3, 4-6, 7-9, 10, 11-13
yading@10 493 *
yading@10 494 * @param fixed_sparse pointer to the algebraic codebook vector
yading@10 495 * @param pulses algebraic codebook indexes
yading@10 496 * @param mode mode of the current frame
yading@10 497 * @param subframe current subframe number
yading@10 498 */
yading@10 499 static void decode_fixed_sparse(AMRFixed *fixed_sparse, const uint16_t *pulses,
yading@10 500 const enum Mode mode, const int subframe)
yading@10 501 {
yading@10 502 av_assert1(MODE_4k75 <= (signed)mode && mode <= MODE_12k2);
yading@10 503
yading@10 504 if (mode == MODE_12k2) {
yading@10 505 ff_decode_10_pulses_35bits(pulses, fixed_sparse, gray_decode, 5, 3);
yading@10 506 } else if (mode == MODE_10k2) {
yading@10 507 decode_8_pulses_31bits(pulses, fixed_sparse);
yading@10 508 } else {
yading@10 509 int *pulse_position = fixed_sparse->x;
yading@10 510 int i, pulse_subset;
yading@10 511 const int fixed_index = pulses[0];
yading@10 512
yading@10 513 if (mode <= MODE_5k15) {
yading@10 514 pulse_subset = ((fixed_index >> 3) & 8) + (subframe << 1);
yading@10 515 pulse_position[0] = ( fixed_index & 7) * 5 + track_position[pulse_subset];
yading@10 516 pulse_position[1] = ((fixed_index >> 3) & 7) * 5 + track_position[pulse_subset + 1];
yading@10 517 fixed_sparse->n = 2;
yading@10 518 } else if (mode == MODE_5k9) {
yading@10 519 pulse_subset = ((fixed_index & 1) << 1) + 1;
yading@10 520 pulse_position[0] = ((fixed_index >> 1) & 7) * 5 + pulse_subset;
yading@10 521 pulse_subset = (fixed_index >> 4) & 3;
yading@10 522 pulse_position[1] = ((fixed_index >> 6) & 7) * 5 + pulse_subset + (pulse_subset == 3 ? 1 : 0);
yading@10 523 fixed_sparse->n = pulse_position[0] == pulse_position[1] ? 1 : 2;
yading@10 524 } else if (mode == MODE_6k7) {
yading@10 525 pulse_position[0] = (fixed_index & 7) * 5;
yading@10 526 pulse_subset = (fixed_index >> 2) & 2;
yading@10 527 pulse_position[1] = ((fixed_index >> 4) & 7) * 5 + pulse_subset + 1;
yading@10 528 pulse_subset = (fixed_index >> 6) & 2;
yading@10 529 pulse_position[2] = ((fixed_index >> 8) & 7) * 5 + pulse_subset + 2;
yading@10 530 fixed_sparse->n = 3;
yading@10 531 } else { // mode <= MODE_7k95
yading@10 532 pulse_position[0] = gray_decode[ fixed_index & 7];
yading@10 533 pulse_position[1] = gray_decode[(fixed_index >> 3) & 7] + 1;
yading@10 534 pulse_position[2] = gray_decode[(fixed_index >> 6) & 7] + 2;
yading@10 535 pulse_subset = (fixed_index >> 9) & 1;
yading@10 536 pulse_position[3] = gray_decode[(fixed_index >> 10) & 7] + pulse_subset + 3;
yading@10 537 fixed_sparse->n = 4;
yading@10 538 }
yading@10 539 for (i = 0; i < fixed_sparse->n; i++)
yading@10 540 fixed_sparse->y[i] = (pulses[1] >> i) & 1 ? 1.0 : -1.0;
yading@10 541 }
yading@10 542 }
yading@10 543
yading@10 544 /**
yading@10 545 * Apply pitch lag to obtain the sharpened fixed vector (section 6.1.2)
yading@10 546 *
yading@10 547 * @param p the context
yading@10 548 * @param subframe unpacked amr subframe
yading@10 549 * @param mode mode of the current frame
yading@10 550 * @param fixed_sparse sparse respresentation of the fixed vector
yading@10 551 */
yading@10 552 static void pitch_sharpening(AMRContext *p, int subframe, enum Mode mode,
yading@10 553 AMRFixed *fixed_sparse)
yading@10 554 {
yading@10 555 // The spec suggests the current pitch gain is always used, but in other
yading@10 556 // modes the pitch and codebook gains are joinly quantized (sec 5.8.2)
yading@10 557 // so the codebook gain cannot depend on the quantized pitch gain.
yading@10 558 if (mode == MODE_12k2)
yading@10 559 p->beta = FFMIN(p->pitch_gain[4], 1.0);
yading@10 560
yading@10 561 fixed_sparse->pitch_lag = p->pitch_lag_int;
yading@10 562 fixed_sparse->pitch_fac = p->beta;
yading@10 563
yading@10 564 // Save pitch sharpening factor for the next subframe
yading@10 565 // MODE_4k75 only updates on the 2nd and 4th subframes - this follows from
yading@10 566 // the fact that the gains for two subframes are jointly quantized.
yading@10 567 if (mode != MODE_4k75 || subframe & 1)
yading@10 568 p->beta = av_clipf(p->pitch_gain[4], 0.0, SHARP_MAX);
yading@10 569 }
yading@10 570 /// @}
yading@10 571
yading@10 572
yading@10 573 /// @name AMR gain decoding functions
yading@10 574 /// @{
yading@10 575
yading@10 576 /**
yading@10 577 * fixed gain smoothing
yading@10 578 * Note that where the spec specifies the "spectrum in the q domain"
yading@10 579 * in section 6.1.4, in fact frequencies should be used.
yading@10 580 *
yading@10 581 * @param p the context
yading@10 582 * @param lsf LSFs for the current subframe, in the range [0,1]
yading@10 583 * @param lsf_avg averaged LSFs
yading@10 584 * @param mode mode of the current frame
yading@10 585 *
yading@10 586 * @return fixed gain smoothed
yading@10 587 */
yading@10 588 static float fixed_gain_smooth(AMRContext *p , const float *lsf,
yading@10 589 const float *lsf_avg, const enum Mode mode)
yading@10 590 {
yading@10 591 float diff = 0.0;
yading@10 592 int i;
yading@10 593
yading@10 594 for (i = 0; i < LP_FILTER_ORDER; i++)
yading@10 595 diff += fabs(lsf_avg[i] - lsf[i]) / lsf_avg[i];
yading@10 596
yading@10 597 // If diff is large for ten subframes, disable smoothing for a 40-subframe
yading@10 598 // hangover period.
yading@10 599 p->diff_count++;
yading@10 600 if (diff <= 0.65)
yading@10 601 p->diff_count = 0;
yading@10 602
yading@10 603 if (p->diff_count > 10) {
yading@10 604 p->hang_count = 0;
yading@10 605 p->diff_count--; // don't let diff_count overflow
yading@10 606 }
yading@10 607
yading@10 608 if (p->hang_count < 40) {
yading@10 609 p->hang_count++;
yading@10 610 } else if (mode < MODE_7k4 || mode == MODE_10k2) {
yading@10 611 const float smoothing_factor = av_clipf(4.0 * diff - 1.6, 0.0, 1.0);
yading@10 612 const float fixed_gain_mean = (p->fixed_gain[0] + p->fixed_gain[1] +
yading@10 613 p->fixed_gain[2] + p->fixed_gain[3] +
yading@10 614 p->fixed_gain[4]) * 0.2;
yading@10 615 return smoothing_factor * p->fixed_gain[4] +
yading@10 616 (1.0 - smoothing_factor) * fixed_gain_mean;
yading@10 617 }
yading@10 618 return p->fixed_gain[4];
yading@10 619 }
yading@10 620
yading@10 621 /**
yading@10 622 * Decode pitch gain and fixed gain factor (part of section 6.1.3).
yading@10 623 *
yading@10 624 * @param p the context
yading@10 625 * @param amr_subframe unpacked amr subframe
yading@10 626 * @param mode mode of the current frame
yading@10 627 * @param subframe current subframe number
yading@10 628 * @param fixed_gain_factor decoded gain correction factor
yading@10 629 */
yading@10 630 static void decode_gains(AMRContext *p, const AMRNBSubframe *amr_subframe,
yading@10 631 const enum Mode mode, const int subframe,
yading@10 632 float *fixed_gain_factor)
yading@10 633 {
yading@10 634 if (mode == MODE_12k2 || mode == MODE_7k95) {
yading@10 635 p->pitch_gain[4] = qua_gain_pit [amr_subframe->p_gain ]
yading@10 636 * (1.0 / 16384.0);
yading@10 637 *fixed_gain_factor = qua_gain_code[amr_subframe->fixed_gain]
yading@10 638 * (1.0 / 2048.0);
yading@10 639 } else {
yading@10 640 const uint16_t *gains;
yading@10 641
yading@10 642 if (mode >= MODE_6k7) {
yading@10 643 gains = gains_high[amr_subframe->p_gain];
yading@10 644 } else if (mode >= MODE_5k15) {
yading@10 645 gains = gains_low [amr_subframe->p_gain];
yading@10 646 } else {
yading@10 647 // gain index is only coded in subframes 0,2 for MODE_4k75
yading@10 648 gains = gains_MODE_4k75[(p->frame.subframe[subframe & 2].p_gain << 1) + (subframe & 1)];
yading@10 649 }
yading@10 650
yading@10 651 p->pitch_gain[4] = gains[0] * (1.0 / 16384.0);
yading@10 652 *fixed_gain_factor = gains[1] * (1.0 / 4096.0);
yading@10 653 }
yading@10 654 }
yading@10 655
yading@10 656 /// @}
yading@10 657
yading@10 658
yading@10 659 /// @name AMR preprocessing functions
yading@10 660 /// @{
yading@10 661
yading@10 662 /**
yading@10 663 * Circularly convolve a sparse fixed vector with a phase dispersion impulse
yading@10 664 * response filter (D.6.2 of G.729 and 6.1.5 of AMR).
yading@10 665 *
yading@10 666 * @param out vector with filter applied
yading@10 667 * @param in source vector
yading@10 668 * @param filter phase filter coefficients
yading@10 669 *
yading@10 670 * out[n] = sum(i,0,len-1){ in[i] * filter[(len + n - i)%len] }
yading@10 671 */
yading@10 672 static void apply_ir_filter(float *out, const AMRFixed *in,
yading@10 673 const float *filter)
yading@10 674 {
yading@10 675 float filter1[AMR_SUBFRAME_SIZE], ///< filters at pitch lag*1 and *2
yading@10 676 filter2[AMR_SUBFRAME_SIZE];
yading@10 677 int lag = in->pitch_lag;
yading@10 678 float fac = in->pitch_fac;
yading@10 679 int i;
yading@10 680
yading@10 681 if (lag < AMR_SUBFRAME_SIZE) {
yading@10 682 ff_celp_circ_addf(filter1, filter, filter, lag, fac,
yading@10 683 AMR_SUBFRAME_SIZE);
yading@10 684
yading@10 685 if (lag < AMR_SUBFRAME_SIZE >> 1)
yading@10 686 ff_celp_circ_addf(filter2, filter, filter1, lag, fac,
yading@10 687 AMR_SUBFRAME_SIZE);
yading@10 688 }
yading@10 689
yading@10 690 memset(out, 0, sizeof(float) * AMR_SUBFRAME_SIZE);
yading@10 691 for (i = 0; i < in->n; i++) {
yading@10 692 int x = in->x[i];
yading@10 693 float y = in->y[i];
yading@10 694 const float *filterp;
yading@10 695
yading@10 696 if (x >= AMR_SUBFRAME_SIZE - lag) {
yading@10 697 filterp = filter;
yading@10 698 } else if (x >= AMR_SUBFRAME_SIZE - (lag << 1)) {
yading@10 699 filterp = filter1;
yading@10 700 } else
yading@10 701 filterp = filter2;
yading@10 702
yading@10 703 ff_celp_circ_addf(out, out, filterp, x, y, AMR_SUBFRAME_SIZE);
yading@10 704 }
yading@10 705 }
yading@10 706
yading@10 707 /**
yading@10 708 * Reduce fixed vector sparseness by smoothing with one of three IR filters.
yading@10 709 * Also know as "adaptive phase dispersion".
yading@10 710 *
yading@10 711 * This implements 3GPP TS 26.090 section 6.1(5).
yading@10 712 *
yading@10 713 * @param p the context
yading@10 714 * @param fixed_sparse algebraic codebook vector
yading@10 715 * @param fixed_vector unfiltered fixed vector
yading@10 716 * @param fixed_gain smoothed gain
yading@10 717 * @param out space for modified vector if necessary
yading@10 718 */
yading@10 719 static const float *anti_sparseness(AMRContext *p, AMRFixed *fixed_sparse,
yading@10 720 const float *fixed_vector,
yading@10 721 float fixed_gain, float *out)
yading@10 722 {
yading@10 723 int ir_filter_nr;
yading@10 724
yading@10 725 if (p->pitch_gain[4] < 0.6) {
yading@10 726 ir_filter_nr = 0; // strong filtering
yading@10 727 } else if (p->pitch_gain[4] < 0.9) {
yading@10 728 ir_filter_nr = 1; // medium filtering
yading@10 729 } else
yading@10 730 ir_filter_nr = 2; // no filtering
yading@10 731
yading@10 732 // detect 'onset'
yading@10 733 if (fixed_gain > 2.0 * p->prev_sparse_fixed_gain) {
yading@10 734 p->ir_filter_onset = 2;
yading@10 735 } else if (p->ir_filter_onset)
yading@10 736 p->ir_filter_onset--;
yading@10 737
yading@10 738 if (!p->ir_filter_onset) {
yading@10 739 int i, count = 0;
yading@10 740
yading@10 741 for (i = 0; i < 5; i++)
yading@10 742 if (p->pitch_gain[i] < 0.6)
yading@10 743 count++;
yading@10 744 if (count > 2)
yading@10 745 ir_filter_nr = 0;
yading@10 746
yading@10 747 if (ir_filter_nr > p->prev_ir_filter_nr + 1)
yading@10 748 ir_filter_nr--;
yading@10 749 } else if (ir_filter_nr < 2)
yading@10 750 ir_filter_nr++;
yading@10 751
yading@10 752 // Disable filtering for very low level of fixed_gain.
yading@10 753 // Note this step is not specified in the technical description but is in
yading@10 754 // the reference source in the function Ph_disp.
yading@10 755 if (fixed_gain < 5.0)
yading@10 756 ir_filter_nr = 2;
yading@10 757
yading@10 758 if (p->cur_frame_mode != MODE_7k4 && p->cur_frame_mode < MODE_10k2
yading@10 759 && ir_filter_nr < 2) {
yading@10 760 apply_ir_filter(out, fixed_sparse,
yading@10 761 (p->cur_frame_mode == MODE_7k95 ?
yading@10 762 ir_filters_lookup_MODE_7k95 :
yading@10 763 ir_filters_lookup)[ir_filter_nr]);
yading@10 764 fixed_vector = out;
yading@10 765 }
yading@10 766
yading@10 767 // update ir filter strength history
yading@10 768 p->prev_ir_filter_nr = ir_filter_nr;
yading@10 769 p->prev_sparse_fixed_gain = fixed_gain;
yading@10 770
yading@10 771 return fixed_vector;
yading@10 772 }
yading@10 773
yading@10 774 /// @}
yading@10 775
yading@10 776
yading@10 777 /// @name AMR synthesis functions
yading@10 778 /// @{
yading@10 779
yading@10 780 /**
yading@10 781 * Conduct 10th order linear predictive coding synthesis.
yading@10 782 *
yading@10 783 * @param p pointer to the AMRContext
yading@10 784 * @param lpc pointer to the LPC coefficients
yading@10 785 * @param fixed_gain fixed codebook gain for synthesis
yading@10 786 * @param fixed_vector algebraic codebook vector
yading@10 787 * @param samples pointer to the output speech samples
yading@10 788 * @param overflow 16-bit overflow flag
yading@10 789 */
yading@10 790 static int synthesis(AMRContext *p, float *lpc,
yading@10 791 float fixed_gain, const float *fixed_vector,
yading@10 792 float *samples, uint8_t overflow)
yading@10 793 {
yading@10 794 int i;
yading@10 795 float excitation[AMR_SUBFRAME_SIZE];
yading@10 796
yading@10 797 // if an overflow has been detected, the pitch vector is scaled down by a
yading@10 798 // factor of 4
yading@10 799 if (overflow)
yading@10 800 for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
yading@10 801 p->pitch_vector[i] *= 0.25;
yading@10 802
yading@10 803 p->acelpv_ctx.weighted_vector_sumf(excitation, p->pitch_vector, fixed_vector,
yading@10 804 p->pitch_gain[4], fixed_gain, AMR_SUBFRAME_SIZE);
yading@10 805
yading@10 806 // emphasize pitch vector contribution
yading@10 807 if (p->pitch_gain[4] > 0.5 && !overflow) {
yading@10 808 float energy = p->celpm_ctx.dot_productf(excitation, excitation,
yading@10 809 AMR_SUBFRAME_SIZE);
yading@10 810 float pitch_factor =
yading@10 811 p->pitch_gain[4] *
yading@10 812 (p->cur_frame_mode == MODE_12k2 ?
yading@10 813 0.25 * FFMIN(p->pitch_gain[4], 1.0) :
yading@10 814 0.5 * FFMIN(p->pitch_gain[4], SHARP_MAX));
yading@10 815
yading@10 816 for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
yading@10 817 excitation[i] += pitch_factor * p->pitch_vector[i];
yading@10 818
yading@10 819 ff_scale_vector_to_given_sum_of_squares(excitation, excitation, energy,
yading@10 820 AMR_SUBFRAME_SIZE);
yading@10 821 }
yading@10 822
yading@10 823 p->celpf_ctx.celp_lp_synthesis_filterf(samples, lpc, excitation,
yading@10 824 AMR_SUBFRAME_SIZE,
yading@10 825 LP_FILTER_ORDER);
yading@10 826
yading@10 827 // detect overflow
yading@10 828 for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
yading@10 829 if (fabsf(samples[i]) > AMR_SAMPLE_BOUND) {
yading@10 830 return 1;
yading@10 831 }
yading@10 832
yading@10 833 return 0;
yading@10 834 }
yading@10 835
yading@10 836 /// @}
yading@10 837
yading@10 838
yading@10 839 /// @name AMR update functions
yading@10 840 /// @{
yading@10 841
yading@10 842 /**
yading@10 843 * Update buffers and history at the end of decoding a subframe.
yading@10 844 *
yading@10 845 * @param p pointer to the AMRContext
yading@10 846 */
yading@10 847 static void update_state(AMRContext *p)
yading@10 848 {
yading@10 849 memcpy(p->prev_lsp_sub4, p->lsp[3], LP_FILTER_ORDER * sizeof(p->lsp[3][0]));
yading@10 850
yading@10 851 memmove(&p->excitation_buf[0], &p->excitation_buf[AMR_SUBFRAME_SIZE],
yading@10 852 (PITCH_DELAY_MAX + LP_FILTER_ORDER + 1) * sizeof(float));
yading@10 853
yading@10 854 memmove(&p->pitch_gain[0], &p->pitch_gain[1], 4 * sizeof(float));
yading@10 855 memmove(&p->fixed_gain[0], &p->fixed_gain[1], 4 * sizeof(float));
yading@10 856
yading@10 857 memmove(&p->samples_in[0], &p->samples_in[AMR_SUBFRAME_SIZE],
yading@10 858 LP_FILTER_ORDER * sizeof(float));
yading@10 859 }
yading@10 860
yading@10 861 /// @}
yading@10 862
yading@10 863
yading@10 864 /// @name AMR Postprocessing functions
yading@10 865 /// @{
yading@10 866
yading@10 867 /**
yading@10 868 * Get the tilt factor of a formant filter from its transfer function
yading@10 869 *
yading@10 870 * @param p The Context
yading@10 871 * @param lpc_n LP_FILTER_ORDER coefficients of the numerator
yading@10 872 * @param lpc_d LP_FILTER_ORDER coefficients of the denominator
yading@10 873 */
yading@10 874 static float tilt_factor(AMRContext *p, float *lpc_n, float *lpc_d)
yading@10 875 {
yading@10 876 float rh0, rh1; // autocorrelation at lag 0 and 1
yading@10 877
yading@10 878 // LP_FILTER_ORDER prior zeros are needed for ff_celp_lp_synthesis_filterf
yading@10 879 float impulse_buffer[LP_FILTER_ORDER + AMR_TILT_RESPONSE] = { 0 };
yading@10 880 float *hf = impulse_buffer + LP_FILTER_ORDER; // start of impulse response
yading@10 881
yading@10 882 hf[0] = 1.0;
yading@10 883 memcpy(hf + 1, lpc_n, sizeof(float) * LP_FILTER_ORDER);
yading@10 884 p->celpf_ctx.celp_lp_synthesis_filterf(hf, lpc_d, hf,
yading@10 885 AMR_TILT_RESPONSE,
yading@10 886 LP_FILTER_ORDER);
yading@10 887
yading@10 888 rh0 = p->celpm_ctx.dot_productf(hf, hf, AMR_TILT_RESPONSE);
yading@10 889 rh1 = p->celpm_ctx.dot_productf(hf, hf + 1, AMR_TILT_RESPONSE - 1);
yading@10 890
yading@10 891 // The spec only specifies this check for 12.2 and 10.2 kbit/s
yading@10 892 // modes. But in the ref source the tilt is always non-negative.
yading@10 893 return rh1 >= 0.0 ? rh1 / rh0 * AMR_TILT_GAMMA_T : 0.0;
yading@10 894 }
yading@10 895
yading@10 896 /**
yading@10 897 * Perform adaptive post-filtering to enhance the quality of the speech.
yading@10 898 * See section 6.2.1.
yading@10 899 *
yading@10 900 * @param p pointer to the AMRContext
yading@10 901 * @param lpc interpolated LP coefficients for this subframe
yading@10 902 * @param buf_out output of the filter
yading@10 903 */
yading@10 904 static void postfilter(AMRContext *p, float *lpc, float *buf_out)
yading@10 905 {
yading@10 906 int i;
yading@10 907 float *samples = p->samples_in + LP_FILTER_ORDER; // Start of input
yading@10 908
yading@10 909 float speech_gain = p->celpm_ctx.dot_productf(samples, samples,
yading@10 910 AMR_SUBFRAME_SIZE);
yading@10 911
yading@10 912 float pole_out[AMR_SUBFRAME_SIZE + LP_FILTER_ORDER]; // Output of pole filter
yading@10 913 const float *gamma_n, *gamma_d; // Formant filter factor table
yading@10 914 float lpc_n[LP_FILTER_ORDER], lpc_d[LP_FILTER_ORDER]; // Transfer function coefficients
yading@10 915
yading@10 916 if (p->cur_frame_mode == MODE_12k2 || p->cur_frame_mode == MODE_10k2) {
yading@10 917 gamma_n = ff_pow_0_7;
yading@10 918 gamma_d = ff_pow_0_75;
yading@10 919 } else {
yading@10 920 gamma_n = ff_pow_0_55;
yading@10 921 gamma_d = ff_pow_0_7;
yading@10 922 }
yading@10 923
yading@10 924 for (i = 0; i < LP_FILTER_ORDER; i++) {
yading@10 925 lpc_n[i] = lpc[i] * gamma_n[i];
yading@10 926 lpc_d[i] = lpc[i] * gamma_d[i];
yading@10 927 }
yading@10 928
yading@10 929 memcpy(pole_out, p->postfilter_mem, sizeof(float) * LP_FILTER_ORDER);
yading@10 930 p->celpf_ctx.celp_lp_synthesis_filterf(pole_out + LP_FILTER_ORDER, lpc_d, samples,
yading@10 931 AMR_SUBFRAME_SIZE, LP_FILTER_ORDER);
yading@10 932 memcpy(p->postfilter_mem, pole_out + AMR_SUBFRAME_SIZE,
yading@10 933 sizeof(float) * LP_FILTER_ORDER);
yading@10 934
yading@10 935 p->celpf_ctx.celp_lp_zero_synthesis_filterf(buf_out, lpc_n,
yading@10 936 pole_out + LP_FILTER_ORDER,
yading@10 937 AMR_SUBFRAME_SIZE, LP_FILTER_ORDER);
yading@10 938
yading@10 939 ff_tilt_compensation(&p->tilt_mem, tilt_factor(p, lpc_n, lpc_d), buf_out,
yading@10 940 AMR_SUBFRAME_SIZE);
yading@10 941
yading@10 942 ff_adaptive_gain_control(buf_out, buf_out, speech_gain, AMR_SUBFRAME_SIZE,
yading@10 943 AMR_AGC_ALPHA, &p->postfilter_agc);
yading@10 944 }
yading@10 945
yading@10 946 /// @}
yading@10 947
yading@10 948 static int amrnb_decode_frame(AVCodecContext *avctx, void *data,
yading@10 949 int *got_frame_ptr, AVPacket *avpkt)
yading@10 950 {
yading@10 951
yading@10 952 AMRContext *p = avctx->priv_data; // pointer to private data
yading@10 953 AVFrame *frame = data;
yading@10 954 const uint8_t *buf = avpkt->data;
yading@10 955 int buf_size = avpkt->size;
yading@10 956 float *buf_out; // pointer to the output data buffer
yading@10 957 int i, subframe, ret;
yading@10 958 float fixed_gain_factor;
yading@10 959 AMRFixed fixed_sparse = {0}; // fixed vector up to anti-sparseness processing
yading@10 960 float spare_vector[AMR_SUBFRAME_SIZE]; // extra stack space to hold result from anti-sparseness processing
yading@10 961 float synth_fixed_gain; // the fixed gain that synthesis should use
yading@10 962 const float *synth_fixed_vector; // pointer to the fixed vector that synthesis should use
yading@10 963
yading@10 964 /* get output buffer */
yading@10 965 frame->nb_samples = AMR_BLOCK_SIZE;
yading@10 966 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
yading@10 967 return ret;
yading@10 968 buf_out = (float *)frame->data[0];
yading@10 969
yading@10 970 p->cur_frame_mode = unpack_bitstream(p, buf, buf_size);
yading@10 971 if (p->cur_frame_mode == NO_DATA) {
yading@10 972 av_log(avctx, AV_LOG_ERROR, "Corrupt bitstream\n");
yading@10 973 return AVERROR_INVALIDDATA;
yading@10 974 }
yading@10 975 if (p->cur_frame_mode == MODE_DTX) {
yading@10 976 avpriv_report_missing_feature(avctx, "dtx mode");
yading@10 977 av_log(avctx, AV_LOG_INFO, "Note: libopencore_amrnb supports dtx\n");
yading@10 978 return AVERROR_PATCHWELCOME;
yading@10 979 }
yading@10 980
yading@10 981 if (p->cur_frame_mode == MODE_12k2) {
yading@10 982 lsf2lsp_5(p);
yading@10 983 } else
yading@10 984 lsf2lsp_3(p);
yading@10 985
yading@10 986 for (i = 0; i < 4; i++)
yading@10 987 ff_acelp_lspd2lpc(p->lsp[i], p->lpc[i], 5);
yading@10 988
yading@10 989 for (subframe = 0; subframe < 4; subframe++) {
yading@10 990 const AMRNBSubframe *amr_subframe = &p->frame.subframe[subframe];
yading@10 991
yading@10 992 decode_pitch_vector(p, amr_subframe, subframe);
yading@10 993
yading@10 994 decode_fixed_sparse(&fixed_sparse, amr_subframe->pulses,
yading@10 995 p->cur_frame_mode, subframe);
yading@10 996
yading@10 997 // The fixed gain (section 6.1.3) depends on the fixed vector
yading@10 998 // (section 6.1.2), but the fixed vector calculation uses
yading@10 999 // pitch sharpening based on the on the pitch gain (section 6.1.3).
yading@10 1000 // So the correct order is: pitch gain, pitch sharpening, fixed gain.
yading@10 1001 decode_gains(p, amr_subframe, p->cur_frame_mode, subframe,
yading@10 1002 &fixed_gain_factor);
yading@10 1003
yading@10 1004 pitch_sharpening(p, subframe, p->cur_frame_mode, &fixed_sparse);
yading@10 1005
yading@10 1006 if (fixed_sparse.pitch_lag == 0) {
yading@10 1007 av_log(avctx, AV_LOG_ERROR, "The file is corrupted, pitch_lag = 0 is not allowed\n");
yading@10 1008 return AVERROR_INVALIDDATA;
yading@10 1009 }
yading@10 1010 ff_set_fixed_vector(p->fixed_vector, &fixed_sparse, 1.0,
yading@10 1011 AMR_SUBFRAME_SIZE);
yading@10 1012
yading@10 1013 p->fixed_gain[4] =
yading@10 1014 ff_amr_set_fixed_gain(fixed_gain_factor,
yading@10 1015 p->celpm_ctx.dot_productf(p->fixed_vector,
yading@10 1016 p->fixed_vector,
yading@10 1017 AMR_SUBFRAME_SIZE) /
yading@10 1018 AMR_SUBFRAME_SIZE,
yading@10 1019 p->prediction_error,
yading@10 1020 energy_mean[p->cur_frame_mode], energy_pred_fac);
yading@10 1021
yading@10 1022 // The excitation feedback is calculated without any processing such
yading@10 1023 // as fixed gain smoothing. This isn't mentioned in the specification.
yading@10 1024 for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
yading@10 1025 p->excitation[i] *= p->pitch_gain[4];
yading@10 1026 ff_set_fixed_vector(p->excitation, &fixed_sparse, p->fixed_gain[4],
yading@10 1027 AMR_SUBFRAME_SIZE);
yading@10 1028
yading@10 1029 // In the ref decoder, excitation is stored with no fractional bits.
yading@10 1030 // This step prevents buzz in silent periods. The ref encoder can
yading@10 1031 // emit long sequences with pitch factor greater than one. This
yading@10 1032 // creates unwanted feedback if the excitation vector is nonzero.
yading@10 1033 // (e.g. test sequence T19_795.COD in 3GPP TS 26.074)
yading@10 1034 for (i = 0; i < AMR_SUBFRAME_SIZE; i++)
yading@10 1035 p->excitation[i] = truncf(p->excitation[i]);
yading@10 1036
yading@10 1037 // Smooth fixed gain.
yading@10 1038 // The specification is ambiguous, but in the reference source, the
yading@10 1039 // smoothed value is NOT fed back into later fixed gain smoothing.
yading@10 1040 synth_fixed_gain = fixed_gain_smooth(p, p->lsf_q[subframe],
yading@10 1041 p->lsf_avg, p->cur_frame_mode);
yading@10 1042
yading@10 1043 synth_fixed_vector = anti_sparseness(p, &fixed_sparse, p->fixed_vector,
yading@10 1044 synth_fixed_gain, spare_vector);
yading@10 1045
yading@10 1046 if (synthesis(p, p->lpc[subframe], synth_fixed_gain,
yading@10 1047 synth_fixed_vector, &p->samples_in[LP_FILTER_ORDER], 0))
yading@10 1048 // overflow detected -> rerun synthesis scaling pitch vector down
yading@10 1049 // by a factor of 4, skipping pitch vector contribution emphasis
yading@10 1050 // and adaptive gain control
yading@10 1051 synthesis(p, p->lpc[subframe], synth_fixed_gain,
yading@10 1052 synth_fixed_vector, &p->samples_in[LP_FILTER_ORDER], 1);
yading@10 1053
yading@10 1054 postfilter(p, p->lpc[subframe], buf_out + subframe * AMR_SUBFRAME_SIZE);
yading@10 1055
yading@10 1056 // update buffers and history
yading@10 1057 ff_clear_fixed_vector(p->fixed_vector, &fixed_sparse, AMR_SUBFRAME_SIZE);
yading@10 1058 update_state(p);
yading@10 1059 }
yading@10 1060
yading@10 1061 p->acelpf_ctx.acelp_apply_order_2_transfer_function(buf_out,
yading@10 1062 buf_out, highpass_zeros,
yading@10 1063 highpass_poles,
yading@10 1064 highpass_gain * AMR_SAMPLE_SCALE,
yading@10 1065 p->high_pass_mem, AMR_BLOCK_SIZE);
yading@10 1066
yading@10 1067 /* Update averaged lsf vector (used for fixed gain smoothing).
yading@10 1068 *
yading@10 1069 * Note that lsf_avg should not incorporate the current frame's LSFs
yading@10 1070 * for fixed_gain_smooth.
yading@10 1071 * The specification has an incorrect formula: the reference decoder uses
yading@10 1072 * qbar(n-1) rather than qbar(n) in section 6.1(4) equation 71. */
yading@10 1073 p->acelpv_ctx.weighted_vector_sumf(p->lsf_avg, p->lsf_avg, p->lsf_q[3],
yading@10 1074 0.84, 0.16, LP_FILTER_ORDER);
yading@10 1075
yading@10 1076 *got_frame_ptr = 1;
yading@10 1077
yading@10 1078 /* return the amount of bytes consumed if everything was OK */
yading@10 1079 return frame_sizes_nb[p->cur_frame_mode] + 1; // +7 for rounding and +8 for TOC
yading@10 1080 }
yading@10 1081
yading@10 1082
yading@10 1083 AVCodec ff_amrnb_decoder = {
yading@10 1084 .name = "amrnb",
yading@10 1085 .type = AVMEDIA_TYPE_AUDIO,
yading@10 1086 .id = AV_CODEC_ID_AMR_NB,
yading@10 1087 .priv_data_size = sizeof(AMRContext),
yading@10 1088 .init = amrnb_decode_init,
yading@10 1089 .decode = amrnb_decode_frame,
yading@10 1090 .capabilities = CODEC_CAP_DR1,
yading@10 1091 .long_name = NULL_IF_CONFIG_SMALL("AMR-NB (Adaptive Multi-Rate NarrowBand)"),
yading@10 1092 .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
yading@10 1093 AV_SAMPLE_FMT_NONE },
yading@10 1094 };