yading@11: /* yading@11: * Copyright (c) 2012 Justin Ruggles yading@11: * yading@11: * Triangular with Noise Shaping is based on opusfile. yading@11: * Copyright (c) 1994-2012 by the Xiph.Org Foundation and contributors yading@11: * yading@11: * This file is part of Libav. yading@11: * yading@11: * Libav is free software; you can redistribute it and/or yading@11: * modify it under the terms of the GNU Lesser General Public yading@11: * License as published by the Free Software Foundation; either yading@11: * version 2.1 of the License, or (at your option) any later version. yading@11: * yading@11: * Libav is distributed in the hope that it will be useful, yading@11: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@11: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@11: * Lesser General Public License for more details. yading@11: * yading@11: * You should have received a copy of the GNU Lesser General Public yading@11: * License along with Libav; if not, write to the Free Software yading@11: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@11: */ yading@11: yading@11: /** yading@11: * @file yading@11: * Dithered Audio Sample Quantization yading@11: * yading@11: * Converts from dbl, flt, or s32 to s16 using dithering. yading@11: */ yading@11: yading@11: #include yading@11: #include yading@11: yading@11: #include "libavutil/common.h" yading@11: #include "libavutil/lfg.h" yading@11: #include "libavutil/mem.h" yading@11: #include "libavutil/samplefmt.h" yading@11: #include "audio_convert.h" yading@11: #include "dither.h" yading@11: #include "internal.h" yading@11: yading@11: typedef struct DitherState { yading@11: int mute; yading@11: unsigned int seed; yading@11: AVLFG lfg; yading@11: float *noise_buf; yading@11: int noise_buf_size; yading@11: int noise_buf_ptr; yading@11: float dither_a[4]; yading@11: float dither_b[4]; yading@11: } DitherState; yading@11: yading@11: struct DitherContext { yading@11: DitherDSPContext ddsp; yading@11: enum AVResampleDitherMethod method; yading@11: int apply_map; yading@11: ChannelMapInfo *ch_map_info; yading@11: yading@11: int mute_dither_threshold; // threshold for disabling dither yading@11: int mute_reset_threshold; // threshold for resetting noise shaping yading@11: const float *ns_coef_b; // noise shaping coeffs yading@11: const float *ns_coef_a; // noise shaping coeffs yading@11: yading@11: int channels; yading@11: DitherState *state; // dither states for each channel yading@11: yading@11: AudioData *flt_data; // input data in fltp yading@11: AudioData *s16_data; // dithered output in s16p yading@11: AudioConvert *ac_in; // converter for input to fltp yading@11: AudioConvert *ac_out; // converter for s16p to s16 (if needed) yading@11: yading@11: void (*quantize)(int16_t *dst, const float *src, float *dither, int len); yading@11: int samples_align; yading@11: }; yading@11: yading@11: /* mute threshold, in seconds */ yading@11: #define MUTE_THRESHOLD_SEC 0.000333 yading@11: yading@11: /* scale factor for 16-bit output. yading@11: The signal is attenuated slightly to avoid clipping */ yading@11: #define S16_SCALE 32753.0f yading@11: yading@11: /* scale to convert lfg from INT_MIN/INT_MAX to -0.5/0.5 */ yading@11: #define LFG_SCALE (1.0f / (2.0f * INT32_MAX)) yading@11: yading@11: /* noise shaping coefficients */ yading@11: yading@11: static const float ns_48_coef_b[4] = { yading@11: 2.2374f, -0.7339f, -0.1251f, -0.6033f yading@11: }; yading@11: yading@11: static const float ns_48_coef_a[4] = { yading@11: 0.9030f, 0.0116f, -0.5853f, -0.2571f yading@11: }; yading@11: yading@11: static const float ns_44_coef_b[4] = { yading@11: 2.2061f, -0.4707f, -0.2534f, -0.6213f yading@11: }; yading@11: yading@11: static const float ns_44_coef_a[4] = { yading@11: 1.0587f, 0.0676f, -0.6054f, -0.2738f yading@11: }; yading@11: yading@11: static void dither_int_to_float_rectangular_c(float *dst, int *src, int len) yading@11: { yading@11: int i; yading@11: for (i = 0; i < len; i++) yading@11: dst[i] = src[i] * LFG_SCALE; yading@11: } yading@11: yading@11: static void dither_int_to_float_triangular_c(float *dst, int *src0, int len) yading@11: { yading@11: int i; yading@11: int *src1 = src0 + len; yading@11: yading@11: for (i = 0; i < len; i++) { yading@11: float r = src0[i] * LFG_SCALE; yading@11: r += src1[i] * LFG_SCALE; yading@11: dst[i] = r; yading@11: } yading@11: } yading@11: yading@11: static void quantize_c(int16_t *dst, const float *src, float *dither, int len) yading@11: { yading@11: int i; yading@11: for (i = 0; i < len; i++) yading@11: dst[i] = av_clip_int16(lrintf(src[i] * S16_SCALE + dither[i])); yading@11: } yading@11: yading@11: #define SQRT_1_6 0.40824829046386301723f yading@11: yading@11: static void dither_highpass_filter(float *src, int len) yading@11: { yading@11: int i; yading@11: yading@11: /* filter is from libswresample in FFmpeg */ yading@11: for (i = 0; i < len - 2; i++) yading@11: src[i] = (-src[i] + 2 * src[i + 1] - src[i + 2]) * SQRT_1_6; yading@11: } yading@11: yading@11: static int generate_dither_noise(DitherContext *c, DitherState *state, yading@11: int min_samples) yading@11: { yading@11: int i; yading@11: int nb_samples = FFALIGN(min_samples, 16) + 16; yading@11: int buf_samples = nb_samples * yading@11: (c->method == AV_RESAMPLE_DITHER_RECTANGULAR ? 1 : 2); yading@11: unsigned int *noise_buf_ui; yading@11: yading@11: av_freep(&state->noise_buf); yading@11: state->noise_buf_size = state->noise_buf_ptr = 0; yading@11: yading@11: state->noise_buf = av_malloc(buf_samples * sizeof(*state->noise_buf)); yading@11: if (!state->noise_buf) yading@11: return AVERROR(ENOMEM); yading@11: state->noise_buf_size = FFALIGN(min_samples, 16); yading@11: noise_buf_ui = (unsigned int *)state->noise_buf; yading@11: yading@11: av_lfg_init(&state->lfg, state->seed); yading@11: for (i = 0; i < buf_samples; i++) yading@11: noise_buf_ui[i] = av_lfg_get(&state->lfg); yading@11: yading@11: c->ddsp.dither_int_to_float(state->noise_buf, noise_buf_ui, nb_samples); yading@11: yading@11: if (c->method == AV_RESAMPLE_DITHER_TRIANGULAR_HP) yading@11: dither_highpass_filter(state->noise_buf, nb_samples); yading@11: yading@11: return 0; yading@11: } yading@11: yading@11: static void quantize_triangular_ns(DitherContext *c, DitherState *state, yading@11: int16_t *dst, const float *src, yading@11: int nb_samples) yading@11: { yading@11: int i, j; yading@11: float *dither = &state->noise_buf[state->noise_buf_ptr]; yading@11: yading@11: if (state->mute > c->mute_reset_threshold) yading@11: memset(state->dither_a, 0, sizeof(state->dither_a)); yading@11: yading@11: for (i = 0; i < nb_samples; i++) { yading@11: float err = 0; yading@11: float sample = src[i] * S16_SCALE; yading@11: yading@11: for (j = 0; j < 4; j++) { yading@11: err += c->ns_coef_b[j] * state->dither_b[j] - yading@11: c->ns_coef_a[j] * state->dither_a[j]; yading@11: } yading@11: for (j = 3; j > 0; j--) { yading@11: state->dither_a[j] = state->dither_a[j - 1]; yading@11: state->dither_b[j] = state->dither_b[j - 1]; yading@11: } yading@11: state->dither_a[0] = err; yading@11: sample -= err; yading@11: yading@11: if (state->mute > c->mute_dither_threshold) { yading@11: dst[i] = av_clip_int16(lrintf(sample)); yading@11: state->dither_b[0] = 0; yading@11: } else { yading@11: dst[i] = av_clip_int16(lrintf(sample + dither[i])); yading@11: state->dither_b[0] = av_clipf(dst[i] - sample, -1.5f, 1.5f); yading@11: } yading@11: yading@11: state->mute++; yading@11: if (src[i]) yading@11: state->mute = 0; yading@11: } yading@11: } yading@11: yading@11: static int convert_samples(DitherContext *c, int16_t **dst, float * const *src, yading@11: int channels, int nb_samples) yading@11: { yading@11: int ch, ret; yading@11: int aligned_samples = FFALIGN(nb_samples, 16); yading@11: yading@11: for (ch = 0; ch < channels; ch++) { yading@11: DitherState *state = &c->state[ch]; yading@11: yading@11: if (state->noise_buf_size < aligned_samples) { yading@11: ret = generate_dither_noise(c, state, nb_samples); yading@11: if (ret < 0) yading@11: return ret; yading@11: } else if (state->noise_buf_size - state->noise_buf_ptr < aligned_samples) { yading@11: state->noise_buf_ptr = 0; yading@11: } yading@11: yading@11: if (c->method == AV_RESAMPLE_DITHER_TRIANGULAR_NS) { yading@11: quantize_triangular_ns(c, state, dst[ch], src[ch], nb_samples); yading@11: } else { yading@11: c->quantize(dst[ch], src[ch], yading@11: &state->noise_buf[state->noise_buf_ptr], yading@11: FFALIGN(nb_samples, c->samples_align)); yading@11: } yading@11: yading@11: state->noise_buf_ptr += aligned_samples; yading@11: } yading@11: yading@11: return 0; yading@11: } yading@11: yading@11: int ff_convert_dither(DitherContext *c, AudioData *dst, AudioData *src) yading@11: { yading@11: int ret; yading@11: AudioData *flt_data; yading@11: yading@11: /* output directly to dst if it is planar */ yading@11: if (dst->sample_fmt == AV_SAMPLE_FMT_S16P) yading@11: c->s16_data = dst; yading@11: else { yading@11: /* make sure s16_data is large enough for the output */ yading@11: ret = ff_audio_data_realloc(c->s16_data, src->nb_samples); yading@11: if (ret < 0) yading@11: return ret; yading@11: } yading@11: yading@11: if (src->sample_fmt != AV_SAMPLE_FMT_FLTP || c->apply_map) { yading@11: /* make sure flt_data is large enough for the input */ yading@11: ret = ff_audio_data_realloc(c->flt_data, src->nb_samples); yading@11: if (ret < 0) yading@11: return ret; yading@11: flt_data = c->flt_data; yading@11: } yading@11: yading@11: if (src->sample_fmt != AV_SAMPLE_FMT_FLTP) { yading@11: /* convert input samples to fltp and scale to s16 range */ yading@11: ret = ff_audio_convert(c->ac_in, flt_data, src); yading@11: if (ret < 0) yading@11: return ret; yading@11: } else if (c->apply_map) { yading@11: ret = ff_audio_data_copy(flt_data, src, c->ch_map_info); yading@11: if (ret < 0) yading@11: return ret; yading@11: } else { yading@11: flt_data = src; yading@11: } yading@11: yading@11: /* check alignment and padding constraints */ yading@11: if (c->method != AV_RESAMPLE_DITHER_TRIANGULAR_NS) { yading@11: int ptr_align = FFMIN(flt_data->ptr_align, c->s16_data->ptr_align); yading@11: int samples_align = FFMIN(flt_data->samples_align, c->s16_data->samples_align); yading@11: int aligned_len = FFALIGN(src->nb_samples, c->ddsp.samples_align); yading@11: yading@11: if (!(ptr_align % c->ddsp.ptr_align) && samples_align >= aligned_len) { yading@11: c->quantize = c->ddsp.quantize; yading@11: c->samples_align = c->ddsp.samples_align; yading@11: } else { yading@11: c->quantize = quantize_c; yading@11: c->samples_align = 1; yading@11: } yading@11: } yading@11: yading@11: ret = convert_samples(c, (int16_t **)c->s16_data->data, yading@11: (float * const *)flt_data->data, src->channels, yading@11: src->nb_samples); yading@11: if (ret < 0) yading@11: return ret; yading@11: yading@11: c->s16_data->nb_samples = src->nb_samples; yading@11: yading@11: /* interleave output to dst if needed */ yading@11: if (dst->sample_fmt == AV_SAMPLE_FMT_S16) { yading@11: ret = ff_audio_convert(c->ac_out, dst, c->s16_data); yading@11: if (ret < 0) yading@11: return ret; yading@11: } else yading@11: c->s16_data = NULL; yading@11: yading@11: return 0; yading@11: } yading@11: yading@11: void ff_dither_free(DitherContext **cp) yading@11: { yading@11: DitherContext *c = *cp; yading@11: int ch; yading@11: yading@11: if (!c) yading@11: return; yading@11: ff_audio_data_free(&c->flt_data); yading@11: ff_audio_data_free(&c->s16_data); yading@11: ff_audio_convert_free(&c->ac_in); yading@11: ff_audio_convert_free(&c->ac_out); yading@11: for (ch = 0; ch < c->channels; ch++) yading@11: av_free(c->state[ch].noise_buf); yading@11: av_free(c->state); yading@11: av_freep(cp); yading@11: } yading@11: yading@11: static void dither_init(DitherDSPContext *ddsp, yading@11: enum AVResampleDitherMethod method) yading@11: { yading@11: ddsp->quantize = quantize_c; yading@11: ddsp->ptr_align = 1; yading@11: ddsp->samples_align = 1; yading@11: yading@11: if (method == AV_RESAMPLE_DITHER_RECTANGULAR) yading@11: ddsp->dither_int_to_float = dither_int_to_float_rectangular_c; yading@11: else yading@11: ddsp->dither_int_to_float = dither_int_to_float_triangular_c; yading@11: yading@11: if (ARCH_X86) yading@11: ff_dither_init_x86(ddsp, method); yading@11: } yading@11: yading@11: DitherContext *ff_dither_alloc(AVAudioResampleContext *avr, yading@11: enum AVSampleFormat out_fmt, yading@11: enum AVSampleFormat in_fmt, yading@11: int channels, int sample_rate, int apply_map) yading@11: { yading@11: AVLFG seed_gen; yading@11: DitherContext *c; yading@11: int ch; yading@11: yading@11: if (av_get_packed_sample_fmt(out_fmt) != AV_SAMPLE_FMT_S16 || yading@11: av_get_bytes_per_sample(in_fmt) <= 2) { yading@11: av_log(avr, AV_LOG_ERROR, "dithering %s to %s is not supported\n", yading@11: av_get_sample_fmt_name(in_fmt), av_get_sample_fmt_name(out_fmt)); yading@11: return NULL; yading@11: } yading@11: yading@11: c = av_mallocz(sizeof(*c)); yading@11: if (!c) yading@11: return NULL; yading@11: yading@11: c->apply_map = apply_map; yading@11: if (apply_map) yading@11: c->ch_map_info = &avr->ch_map_info; yading@11: yading@11: if (avr->dither_method == AV_RESAMPLE_DITHER_TRIANGULAR_NS && yading@11: sample_rate != 48000 && sample_rate != 44100) { yading@11: av_log(avr, AV_LOG_WARNING, "sample rate must be 48000 or 44100 Hz " yading@11: "for triangular_ns dither. using triangular_hp instead.\n"); yading@11: avr->dither_method = AV_RESAMPLE_DITHER_TRIANGULAR_HP; yading@11: } yading@11: c->method = avr->dither_method; yading@11: dither_init(&c->ddsp, c->method); yading@11: yading@11: if (c->method == AV_RESAMPLE_DITHER_TRIANGULAR_NS) { yading@11: if (sample_rate == 48000) { yading@11: c->ns_coef_b = ns_48_coef_b; yading@11: c->ns_coef_a = ns_48_coef_a; yading@11: } else { yading@11: c->ns_coef_b = ns_44_coef_b; yading@11: c->ns_coef_a = ns_44_coef_a; yading@11: } yading@11: } yading@11: yading@11: /* Either s16 or s16p output format is allowed, but s16p is used yading@11: internally, so we need to use a temp buffer and interleave if the output yading@11: format is s16 */ yading@11: if (out_fmt != AV_SAMPLE_FMT_S16P) { yading@11: c->s16_data = ff_audio_data_alloc(channels, 1024, AV_SAMPLE_FMT_S16P, yading@11: "dither s16 buffer"); yading@11: if (!c->s16_data) yading@11: goto fail; yading@11: yading@11: c->ac_out = ff_audio_convert_alloc(avr, out_fmt, AV_SAMPLE_FMT_S16P, yading@11: channels, sample_rate, 0); yading@11: if (!c->ac_out) yading@11: goto fail; yading@11: } yading@11: yading@11: if (in_fmt != AV_SAMPLE_FMT_FLTP || c->apply_map) { yading@11: c->flt_data = ff_audio_data_alloc(channels, 1024, AV_SAMPLE_FMT_FLTP, yading@11: "dither flt buffer"); yading@11: if (!c->flt_data) yading@11: goto fail; yading@11: } yading@11: if (in_fmt != AV_SAMPLE_FMT_FLTP) { yading@11: c->ac_in = ff_audio_convert_alloc(avr, AV_SAMPLE_FMT_FLTP, in_fmt, yading@11: channels, sample_rate, c->apply_map); yading@11: if (!c->ac_in) yading@11: goto fail; yading@11: } yading@11: yading@11: c->state = av_mallocz(channels * sizeof(*c->state)); yading@11: if (!c->state) yading@11: goto fail; yading@11: c->channels = channels; yading@11: yading@11: /* calculate thresholds for turning off dithering during periods of yading@11: silence to avoid replacing digital silence with quiet dither noise */ yading@11: c->mute_dither_threshold = lrintf(sample_rate * MUTE_THRESHOLD_SEC); yading@11: c->mute_reset_threshold = c->mute_dither_threshold * 4; yading@11: yading@11: /* initialize dither states */ yading@11: av_lfg_init(&seed_gen, 0xC0FFEE); yading@11: for (ch = 0; ch < channels; ch++) { yading@11: DitherState *state = &c->state[ch]; yading@11: state->mute = c->mute_reset_threshold + 1; yading@11: state->seed = av_lfg_get(&seed_gen); yading@11: generate_dither_noise(c, state, FFMAX(32768, sample_rate / 2)); yading@11: } yading@11: yading@11: return c; yading@11: yading@11: fail: yading@11: ff_dither_free(&c); yading@11: return NULL; yading@11: }