yading@10: /* yading@10: * Copyright (c) CMU 1993 Computer Science, Speech Group yading@10: * Chengxiang Lu and Alex Hauptmann yading@10: * Copyright (c) 2005 Steve Underwood yading@10: * Copyright (c) 2009 Kenan Gillet yading@10: * Copyright (c) 2010 Martin Storsjo yading@10: * yading@10: * This file is part of Libav. yading@10: * yading@10: * Libav is free software; you can redistribute it and/or yading@10: * modify it under the terms of the GNU Lesser General Public yading@10: * License as published by the Free Software Foundation; either yading@10: * version 2.1 of the License, or (at your option) any later version. yading@10: * yading@10: * Libav is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: * Lesser General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU Lesser General Public yading@10: * License along with Libav; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: */ yading@10: yading@10: /** yading@10: * @file yading@10: * G.722 ADPCM audio encoder yading@10: */ yading@10: yading@10: #include "libavutil/avassert.h" yading@10: #include "avcodec.h" yading@10: #include "internal.h" yading@10: #include "g722.h" yading@10: #include "libavutil/common.h" yading@10: yading@10: #define FREEZE_INTERVAL 128 yading@10: yading@10: /* This is an arbitrary value. Allowing insanely large values leads to strange yading@10: problems, so we limit it to a reasonable value */ yading@10: #define MAX_FRAME_SIZE 32768 yading@10: yading@10: /* We clip the value of avctx->trellis to prevent data type overflows and yading@10: undefined behavior. Using larger values is insanely slow anyway. */ yading@10: #define MIN_TRELLIS 0 yading@10: #define MAX_TRELLIS 16 yading@10: yading@10: static av_cold int g722_encode_close(AVCodecContext *avctx) yading@10: { yading@10: G722Context *c = avctx->priv_data; yading@10: int i; yading@10: for (i = 0; i < 2; i++) { yading@10: av_freep(&c->paths[i]); yading@10: av_freep(&c->node_buf[i]); yading@10: av_freep(&c->nodep_buf[i]); yading@10: } yading@10: return 0; yading@10: } yading@10: yading@10: static av_cold int g722_encode_init(AVCodecContext * avctx) yading@10: { yading@10: G722Context *c = avctx->priv_data; yading@10: int ret; yading@10: yading@10: if (avctx->channels != 1) { yading@10: av_log(avctx, AV_LOG_ERROR, "Only mono tracks are allowed.\n"); yading@10: return AVERROR_INVALIDDATA; yading@10: } yading@10: yading@10: c->band[0].scale_factor = 8; yading@10: c->band[1].scale_factor = 2; yading@10: c->prev_samples_pos = 22; yading@10: yading@10: if (avctx->trellis) { yading@10: int frontier = 1 << avctx->trellis; yading@10: int max_paths = frontier * FREEZE_INTERVAL; yading@10: int i; yading@10: for (i = 0; i < 2; i++) { yading@10: c->paths[i] = av_mallocz(max_paths * sizeof(**c->paths)); yading@10: c->node_buf[i] = av_mallocz(2 * frontier * sizeof(**c->node_buf)); yading@10: c->nodep_buf[i] = av_mallocz(2 * frontier * sizeof(**c->nodep_buf)); yading@10: if (!c->paths[i] || !c->node_buf[i] || !c->nodep_buf[i]) { yading@10: ret = AVERROR(ENOMEM); yading@10: goto error; yading@10: } yading@10: } yading@10: } yading@10: yading@10: if (avctx->frame_size) { yading@10: /* validate frame size */ yading@10: if (avctx->frame_size & 1 || avctx->frame_size > MAX_FRAME_SIZE) { yading@10: int new_frame_size; yading@10: yading@10: if (avctx->frame_size == 1) yading@10: new_frame_size = 2; yading@10: else if (avctx->frame_size > MAX_FRAME_SIZE) yading@10: new_frame_size = MAX_FRAME_SIZE; yading@10: else yading@10: new_frame_size = avctx->frame_size - 1; yading@10: yading@10: av_log(avctx, AV_LOG_WARNING, "Requested frame size is not " yading@10: "allowed. Using %d instead of %d\n", new_frame_size, yading@10: avctx->frame_size); yading@10: avctx->frame_size = new_frame_size; yading@10: } yading@10: } else { yading@10: /* This is arbitrary. We use 320 because it's 20ms @ 16kHz, which is yading@10: a common packet size for VoIP applications */ yading@10: avctx->frame_size = 320; yading@10: } yading@10: avctx->delay = 22; yading@10: yading@10: if (avctx->trellis) { yading@10: /* validate trellis */ yading@10: if (avctx->trellis < MIN_TRELLIS || avctx->trellis > MAX_TRELLIS) { yading@10: int new_trellis = av_clip(avctx->trellis, MIN_TRELLIS, MAX_TRELLIS); yading@10: av_log(avctx, AV_LOG_WARNING, "Requested trellis value is not " yading@10: "allowed. Using %d instead of %d\n", new_trellis, yading@10: avctx->trellis); yading@10: avctx->trellis = new_trellis; yading@10: } yading@10: } yading@10: yading@10: return 0; yading@10: error: yading@10: g722_encode_close(avctx); yading@10: return ret; yading@10: } yading@10: yading@10: static const int16_t low_quant[33] = { yading@10: 35, 72, 110, 150, 190, 233, 276, 323, yading@10: 370, 422, 473, 530, 587, 650, 714, 786, yading@10: 858, 940, 1023, 1121, 1219, 1339, 1458, 1612, yading@10: 1765, 1980, 2195, 2557, 2919 yading@10: }; yading@10: yading@10: static inline void filter_samples(G722Context *c, const int16_t *samples, yading@10: int *xlow, int *xhigh) yading@10: { yading@10: int xout1, xout2; yading@10: c->prev_samples[c->prev_samples_pos++] = samples[0]; yading@10: c->prev_samples[c->prev_samples_pos++] = samples[1]; yading@10: ff_g722_apply_qmf(c->prev_samples + c->prev_samples_pos - 24, &xout1, &xout2); yading@10: *xlow = xout1 + xout2 >> 14; yading@10: *xhigh = xout1 - xout2 >> 14; yading@10: if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) { yading@10: memmove(c->prev_samples, yading@10: c->prev_samples + c->prev_samples_pos - 22, yading@10: 22 * sizeof(c->prev_samples[0])); yading@10: c->prev_samples_pos = 22; yading@10: } yading@10: } yading@10: yading@10: static inline int encode_high(const struct G722Band *state, int xhigh) yading@10: { yading@10: int diff = av_clip_int16(xhigh - state->s_predictor); yading@10: int pred = 141 * state->scale_factor >> 8; yading@10: /* = diff >= 0 ? (diff < pred) + 2 : diff >= -pred */ yading@10: return ((diff ^ (diff >> (sizeof(diff)*8-1))) < pred) + 2*(diff >= 0); yading@10: } yading@10: yading@10: static inline int encode_low(const struct G722Band* state, int xlow) yading@10: { yading@10: int diff = av_clip_int16(xlow - state->s_predictor); yading@10: /* = diff >= 0 ? diff : -(diff + 1) */ yading@10: int limit = diff ^ (diff >> (sizeof(diff)*8-1)); yading@10: int i = 0; yading@10: limit = limit + 1 << 10; yading@10: if (limit > low_quant[8] * state->scale_factor) yading@10: i = 9; yading@10: while (i < 29 && limit > low_quant[i] * state->scale_factor) yading@10: i++; yading@10: return (diff < 0 ? (i < 2 ? 63 : 33) : 61) - i; yading@10: } yading@10: yading@10: static void g722_encode_trellis(G722Context *c, int trellis, yading@10: uint8_t *dst, int nb_samples, yading@10: const int16_t *samples) yading@10: { yading@10: int i, j, k; yading@10: int frontier = 1 << trellis; yading@10: struct TrellisNode **nodes[2]; yading@10: struct TrellisNode **nodes_next[2]; yading@10: int pathn[2] = {0, 0}, froze = -1; yading@10: struct TrellisPath *p[2]; yading@10: yading@10: for (i = 0; i < 2; i++) { yading@10: nodes[i] = c->nodep_buf[i]; yading@10: nodes_next[i] = c->nodep_buf[i] + frontier; yading@10: memset(c->nodep_buf[i], 0, 2 * frontier * sizeof(*c->nodep_buf[i])); yading@10: nodes[i][0] = c->node_buf[i] + frontier; yading@10: nodes[i][0]->ssd = 0; yading@10: nodes[i][0]->path = 0; yading@10: nodes[i][0]->state = c->band[i]; yading@10: } yading@10: yading@10: for (i = 0; i < nb_samples >> 1; i++) { yading@10: int xlow, xhigh; yading@10: struct TrellisNode *next[2]; yading@10: int heap_pos[2] = {0, 0}; yading@10: yading@10: for (j = 0; j < 2; j++) { yading@10: next[j] = c->node_buf[j] + frontier*(i & 1); yading@10: memset(nodes_next[j], 0, frontier * sizeof(**nodes_next)); yading@10: } yading@10: yading@10: filter_samples(c, &samples[2*i], &xlow, &xhigh); yading@10: yading@10: for (j = 0; j < frontier && nodes[0][j]; j++) { yading@10: /* Only k >> 2 affects the future adaptive state, therefore testing yading@10: * small steps that don't change k >> 2 is useless, the original yading@10: * value from encode_low is better than them. Since we step k yading@10: * in steps of 4, make sure range is a multiple of 4, so that yading@10: * we don't miss the original value from encode_low. */ yading@10: int range = j < frontier/2 ? 4 : 0; yading@10: struct TrellisNode *cur_node = nodes[0][j]; yading@10: yading@10: int ilow = encode_low(&cur_node->state, xlow); yading@10: yading@10: for (k = ilow - range; k <= ilow + range && k <= 63; k += 4) { yading@10: int decoded, dec_diff, pos; yading@10: uint32_t ssd; yading@10: struct TrellisNode* node; yading@10: yading@10: if (k < 0) yading@10: continue; yading@10: yading@10: decoded = av_clip((cur_node->state.scale_factor * yading@10: ff_g722_low_inv_quant6[k] >> 10) yading@10: + cur_node->state.s_predictor, -16384, 16383); yading@10: dec_diff = xlow - decoded; yading@10: yading@10: #define STORE_NODE(index, UPDATE, VALUE)\ yading@10: ssd = cur_node->ssd + dec_diff*dec_diff;\ yading@10: /* Check for wraparound. Using 64 bit ssd counters would \ yading@10: * be simpler, but is slower on x86 32 bit. */\ yading@10: if (ssd < cur_node->ssd)\ yading@10: continue;\ yading@10: if (heap_pos[index] < frontier) {\ yading@10: pos = heap_pos[index]++;\ yading@10: av_assert2(pathn[index] < FREEZE_INTERVAL * frontier);\ yading@10: node = nodes_next[index][pos] = next[index]++;\ yading@10: node->path = pathn[index]++;\ yading@10: } else {\ yading@10: /* Try to replace one of the leaf nodes with the new \ yading@10: * one, but not always testing the same leaf position */\ yading@10: pos = (frontier>>1) + (heap_pos[index] & ((frontier>>1) - 1));\ yading@10: if (ssd >= nodes_next[index][pos]->ssd)\ yading@10: continue;\ yading@10: heap_pos[index]++;\ yading@10: node = nodes_next[index][pos];\ yading@10: }\ yading@10: node->ssd = ssd;\ yading@10: node->state = cur_node->state;\ yading@10: UPDATE;\ yading@10: c->paths[index][node->path].value = VALUE;\ yading@10: c->paths[index][node->path].prev = cur_node->path;\ yading@10: /* Sift the newly inserted node up in the heap to restore \ yading@10: * the heap property */\ yading@10: while (pos > 0) {\ yading@10: int parent = (pos - 1) >> 1;\ yading@10: if (nodes_next[index][parent]->ssd <= ssd)\ yading@10: break;\ yading@10: FFSWAP(struct TrellisNode*, nodes_next[index][parent],\ yading@10: nodes_next[index][pos]);\ yading@10: pos = parent;\ yading@10: } yading@10: STORE_NODE(0, ff_g722_update_low_predictor(&node->state, k >> 2), k); yading@10: } yading@10: } yading@10: yading@10: for (j = 0; j < frontier && nodes[1][j]; j++) { yading@10: int ihigh; yading@10: struct TrellisNode *cur_node = nodes[1][j]; yading@10: yading@10: /* We don't try to get any initial guess for ihigh via yading@10: * encode_high - since there's only 4 possible values, test yading@10: * them all. Testing all of these gives a much, much larger yading@10: * gain than testing a larger range around ilow. */ yading@10: for (ihigh = 0; ihigh < 4; ihigh++) { yading@10: int dhigh, decoded, dec_diff, pos; yading@10: uint32_t ssd; yading@10: struct TrellisNode* node; yading@10: yading@10: dhigh = cur_node->state.scale_factor * yading@10: ff_g722_high_inv_quant[ihigh] >> 10; yading@10: decoded = av_clip(dhigh + cur_node->state.s_predictor, yading@10: -16384, 16383); yading@10: dec_diff = xhigh - decoded; yading@10: yading@10: STORE_NODE(1, ff_g722_update_high_predictor(&node->state, dhigh, ihigh), ihigh); yading@10: } yading@10: } yading@10: yading@10: for (j = 0; j < 2; j++) { yading@10: FFSWAP(struct TrellisNode**, nodes[j], nodes_next[j]); yading@10: yading@10: if (nodes[j][0]->ssd > (1 << 16)) { yading@10: for (k = 1; k < frontier && nodes[j][k]; k++) yading@10: nodes[j][k]->ssd -= nodes[j][0]->ssd; yading@10: nodes[j][0]->ssd = 0; yading@10: } yading@10: } yading@10: yading@10: if (i == froze + FREEZE_INTERVAL) { yading@10: p[0] = &c->paths[0][nodes[0][0]->path]; yading@10: p[1] = &c->paths[1][nodes[1][0]->path]; yading@10: for (j = i; j > froze; j--) { yading@10: dst[j] = p[1]->value << 6 | p[0]->value; yading@10: p[0] = &c->paths[0][p[0]->prev]; yading@10: p[1] = &c->paths[1][p[1]->prev]; yading@10: } yading@10: froze = i; yading@10: pathn[0] = pathn[1] = 0; yading@10: memset(nodes[0] + 1, 0, (frontier - 1)*sizeof(**nodes)); yading@10: memset(nodes[1] + 1, 0, (frontier - 1)*sizeof(**nodes)); yading@10: } yading@10: } yading@10: yading@10: p[0] = &c->paths[0][nodes[0][0]->path]; yading@10: p[1] = &c->paths[1][nodes[1][0]->path]; yading@10: for (j = i; j > froze; j--) { yading@10: dst[j] = p[1]->value << 6 | p[0]->value; yading@10: p[0] = &c->paths[0][p[0]->prev]; yading@10: p[1] = &c->paths[1][p[1]->prev]; yading@10: } yading@10: c->band[0] = nodes[0][0]->state; yading@10: c->band[1] = nodes[1][0]->state; yading@10: } yading@10: yading@10: static av_always_inline void encode_byte(G722Context *c, uint8_t *dst, yading@10: const int16_t *samples) yading@10: { yading@10: int xlow, xhigh, ilow, ihigh; yading@10: filter_samples(c, samples, &xlow, &xhigh); yading@10: ihigh = encode_high(&c->band[1], xhigh); yading@10: ilow = encode_low (&c->band[0], xlow); yading@10: ff_g722_update_high_predictor(&c->band[1], c->band[1].scale_factor * yading@10: ff_g722_high_inv_quant[ihigh] >> 10, ihigh); yading@10: ff_g722_update_low_predictor(&c->band[0], ilow >> 2); yading@10: *dst = ihigh << 6 | ilow; yading@10: } yading@10: yading@10: static void g722_encode_no_trellis(G722Context *c, yading@10: uint8_t *dst, int nb_samples, yading@10: const int16_t *samples) yading@10: { yading@10: int i; yading@10: for (i = 0; i < nb_samples; i += 2) yading@10: encode_byte(c, dst++, &samples[i]); yading@10: } yading@10: yading@10: static int g722_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, yading@10: const AVFrame *frame, int *got_packet_ptr) yading@10: { yading@10: G722Context *c = avctx->priv_data; yading@10: const int16_t *samples = (const int16_t *)frame->data[0]; yading@10: int nb_samples, out_size, ret; yading@10: yading@10: out_size = (frame->nb_samples + 1) / 2; yading@10: if ((ret = ff_alloc_packet2(avctx, avpkt, out_size)) < 0) yading@10: return ret; yading@10: yading@10: nb_samples = frame->nb_samples - (frame->nb_samples & 1); yading@10: yading@10: if (avctx->trellis) yading@10: g722_encode_trellis(c, avctx->trellis, avpkt->data, nb_samples, samples); yading@10: else yading@10: g722_encode_no_trellis(c, avpkt->data, nb_samples, samples); yading@10: yading@10: /* handle last frame with odd frame_size */ yading@10: if (nb_samples < frame->nb_samples) { yading@10: int16_t last_samples[2] = { samples[nb_samples], samples[nb_samples] }; yading@10: encode_byte(c, &avpkt->data[nb_samples >> 1], last_samples); yading@10: } yading@10: yading@10: if (frame->pts != AV_NOPTS_VALUE) yading@10: avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->delay); yading@10: *got_packet_ptr = 1; yading@10: return 0; yading@10: } yading@10: yading@10: AVCodec ff_adpcm_g722_encoder = { yading@10: .name = "g722", yading@10: .type = AVMEDIA_TYPE_AUDIO, yading@10: .id = AV_CODEC_ID_ADPCM_G722, yading@10: .priv_data_size = sizeof(G722Context), yading@10: .init = g722_encode_init, yading@10: .close = g722_encode_close, yading@10: .encode2 = g722_encode_frame, yading@10: .capabilities = CODEC_CAP_SMALL_LAST_FRAME, yading@10: .long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"), yading@10: .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, yading@10: AV_SAMPLE_FMT_NONE }, yading@10: };