102 #define WMAPRO_MAX_CHANNELS 8 103 #define MAX_SUBFRAMES 32
105 #define MAX_FRAMESIZE 32768
107 #define WMAPRO_BLOCK_MIN_BITS 6 108 #define WMAPRO_BLOCK_MAX_BITS 13
109 #define WMAPRO_BLOCK_MAX_SIZE (1 << WMAPRO_BLOCK_MAX_BITS)
110 #define WMAPRO_BLOCK_SIZES (WMAPRO_BLOCK_MAX_BITS - WMAPRO_BLOCK_MIN_BITS + 1)
114 #define SCALEVLCBITS 8 115 #define VEC4MAXDEPTH ((HUFF_VEC4_MAXBITS+VLCBITS-1)/VLCBITS) 116 #define VEC2MAXDEPTH ((HUFF_VEC2_MAXBITS+VLCBITS-1)/VLCBITS) 117 #define VEC1MAXDEPTH ((HUFF_VEC1_MAXBITS+VLCBITS-1)/VLCBITS) 118 #define SCALEMAXDEPTH ((HUFF_SCALE_MAXBITS+SCALEVLCBITS-1)/SCALEVLCBITS) 119 #define SCALERLMAXDEPTH ((HUFF_SCALE_RL_MAXBITS+VLCBITS-1)/VLCBITS) 180 uint32_t decode_flags;
182 uint8_t dynamic_range_compression;
184 uint16_t samples_per_frame;
185 uint16_t log2_frame_size;
190 uint16_t min_samples_per_subframe;
198 int next_packet_start;
200 uint8_t packet_sequence_number;
213 int8_t parsed_all_subframes;
216 int16_t subframe_len;
217 int8_t channels_for_cur_subframe;
220 int8_t transmit_num_vec_coeffs;
221 int16_t* cur_sfb_offsets;
238 #define PRINT(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %d\n", a, b); 239 #define PRINT_HEX(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %x\n", a, b); 241 PRINT(
"ed sample bit depth", s->bits_per_sample);
242 PRINT_HEX(
"ed decode flags", s->decode_flags);
243 PRINT(
"samples per frame", s->samples_per_frame);
244 PRINT(
"log2 frame size", s->log2_frame_size);
245 PRINT(
"max num subframes", s->max_num_subframes);
246 PRINT(
"len prefix", s->len_prefix);
275 unsigned int channel_mask;
277 int log2_max_num_subframes;
278 int num_possible_block_sizes;
293 s->decode_flags =
AV_RL16(edata_ptr+14);
294 channel_mask =
AV_RL32(edata_ptr+2);
295 s->bits_per_sample =
AV_RL16(edata_ptr);
312 s->len_prefix = (s->decode_flags & 0x40);
320 s->samples_per_frame = 1 <<
bits;
323 log2_max_num_subframes = ((s->decode_flags & 0x38) >> 3);
324 s->max_num_subframes = 1 << log2_max_num_subframes;
325 if (s->max_num_subframes == 16 || s->max_num_subframes == 4)
326 s->max_subframe_len_bit = 1;
327 s->subframe_len_bits =
av_log2(log2_max_num_subframes) + 1;
329 num_possible_block_sizes = log2_max_num_subframes + 1;
330 s->min_samples_per_subframe = s->samples_per_frame / s->max_num_subframes;
331 s->dynamic_range_compression = (s->decode_flags & 0x80);
335 s->max_num_subframes);
341 s->min_samples_per_subframe);
361 for (i = 0; i < avctx->
channels; i++)
362 s->channel[i].prev_block_len = s->samples_per_frame;
367 if (channel_mask & 8) {
369 for (mask = 1; mask < 16; mask <<= 1) {
370 if (channel_mask & mask)
405 for (i = 0; i < num_possible_block_sizes; i++) {
406 int subframe_len = s->samples_per_frame >>
i;
410 s->sfb_offsets[
i][0] = 0;
412 for (x = 0; x <
MAX_BANDS-1 && s->sfb_offsets[
i][band - 1] < subframe_len; x++) {
416 if (offset > s->sfb_offsets[i][band - 1])
417 s->sfb_offsets[
i][band++] =
offset;
419 s->sfb_offsets[
i][band - 1] = subframe_len;
420 s->num_sfb[
i] = band - 1;
421 if (s->num_sfb[i] <= 0) {
433 for (i = 0; i < num_possible_block_sizes; i++) {
435 for (b = 0; b < s->num_sfb[
i]; b++) {
438 + s->sfb_offsets[
i][b + 1] - 1) << i) >> 1;
439 for (x = 0; x < num_possible_block_sizes; x++) {
441 while (s->sfb_offsets[x][v + 1] << x < offset)
443 s->sf_offsets[
i][
x][
b] =
v;
452 / (1 << (s->bits_per_sample - 1)));
462 for (i = 0; i < num_possible_block_sizes; i++) {
463 int block_size = s->samples_per_frame >>
i;
466 s->subwoofer_cutoffs[
i] = av_clip(cutoff, 4, block_size);
470 for (i = 0; i < 33; i++)
489 int frame_len_shift = 0;
493 if (offset == s->samples_per_frame - s->min_samples_per_subframe)
494 return s->min_samples_per_subframe;
497 if (s->max_subframe_len_bit) {
499 frame_len_shift = 1 +
get_bits(&s->gb, s->subframe_len_bits-1);
501 frame_len_shift =
get_bits(&s->gb, s->subframe_len_bits);
503 subframe_len = s->samples_per_frame >> frame_len_shift;
506 if (subframe_len < s->min_samples_per_subframe ||
507 subframe_len > s->samples_per_frame) {
540 int fixed_channel_layout = 0;
541 int min_channel_len = 0;
552 s->channel[c].num_subframes = 0;
554 if (s->max_num_subframes == 1 ||
get_bits1(&s->gb))
555 fixed_channel_layout = 1;
563 if (num_samples[c] == min_channel_len) {
564 if (fixed_channel_layout || channels_for_cur_subframe == 1 ||
565 (min_channel_len == s->samples_per_frame - s->min_samples_per_subframe))
566 contains_subframe[
c] = 1;
570 contains_subframe[
c] = 0;
578 min_channel_len += subframe_len;
582 if (contains_subframe[c]) {
585 "broken frame: num subframes > 31\n");
589 num_samples[
c] += subframe_len;
591 if (num_samples[c] > s->samples_per_frame) {
593 "channel len > samples_per_frame\n");
596 }
else if (num_samples[c] <= min_channel_len) {
597 if (num_samples[c] < min_channel_len) {
598 channels_for_cur_subframe = 0;
599 min_channel_len = num_samples[
c];
601 ++channels_for_cur_subframe;
604 }
while (min_channel_len < s->samples_per_frame);
609 for (i = 0; i < s->channel[
c].num_subframes; i++) {
611 " len %i\n", s->frame_num, c, i,
612 s->channel[c].subframe_len[i]);
613 s->channel[
c].subframe_offset[
i] =
offset;
614 offset += s->channel[
c].subframe_len[
i];
636 rotation_offset[i] =
get_bits(&s->gb, 6);
644 for (x = 0; x <
i; x++) {
646 for (y = 0; y < i + 1; y++) {
649 int n = rotation_offset[offset +
x];
658 cosv = -
sin64[n - 32];
662 (v1 * sinv) - (v2 * cosv);
664 (v1 * cosv) + (v2 * sinv);
687 int remaining_channels = s->channels_for_cur_subframe;
691 "Channel transform bit");
695 for (s->num_chgroups = 0; remaining_channels &&
696 s->num_chgroups < s->channels_for_cur_subframe; s->num_chgroups++) {
703 if (remaining_channels > 2) {
704 for (i = 0; i < s->channels_for_cur_subframe; i++) {
705 int channel_idx = s->channel_indexes_for_cur_subframe[
i];
706 if (!s->channel[channel_idx].grouped
709 s->channel[channel_idx].grouped = 1;
710 *channel_data++ = s->channel[channel_idx].coeffs;
715 for (i = 0; i < s->channels_for_cur_subframe; i++) {
716 int channel_idx = s->channel_indexes_for_cur_subframe[
i];
717 if (!s->channel[channel_idx].grouped)
718 *channel_data++ = s->channel[channel_idx].coeffs;
719 s->channel[channel_idx].grouped = 1;
728 "Unknown channel transform type");
754 "Coupled channels > 6");
770 for (i = 0; i < s->num_bands; i++) {
794 static const uint32_t fval_tab[16] = {
795 0x00000000, 0x3f800000, 0x40000000, 0x40400000,
796 0x40800000, 0x40a00000, 0x40c00000, 0x40e00000,
797 0x41000000, 0x41100000, 0x41200000, 0x41300000,
798 0x41400000, 0x41500000, 0x41600000, 0x41700000,
809 av_dlog(s->
avctx,
"decode coefficients for channel %i\n", c);
812 vlc = &coef_vlc[vlctable];
824 while ((s->transmit_num_vec_coeffs || !rl_mode) &&
833 for (i = 0; i < 4; i += 2) {
847 vals[i+1] = fval_tab[symbol_to_vec2[idx] & 0xF];
852 vals[1] = fval_tab[(symbol_to_vec4[idx] >> 8) & 0xF];
853 vals[2] = fval_tab[(symbol_to_vec4[idx] >> 4) & 0xF];
854 vals[3] = fval_tab[ symbol_to_vec4[idx] & 0xF];
858 for (i = 0; i < 4; i++) {
864 ci->
coeffs[cur_coeff] = 0;
867 rl_mode |= (++num_zeros > s->subframe_len >> 8);
874 if (cur_coeff < s->subframe_len) {
875 memset(&ci->
coeffs[cur_coeff], 0,
876 sizeof(*ci->
coeffs) * (s->subframe_len - cur_coeff));
878 level, run, 1, ci->
coeffs,
879 cur_coeff, s->subframe_len,
880 s->subframe_len, s->esc_len, 0))
900 for (i = 0; i < s->channels_for_cur_subframe; i++) {
901 int c = s->channel_indexes_for_cur_subframe[
i];
904 s->channel[
c].scale_factors = s->channel[
c].saved_scale_factors[!s->channel[
c].scale_factor_idx];
905 sf_end = s->channel[
c].scale_factors + s->num_bands;
912 if (s->channel[c].reuse_sf) {
913 const int8_t* sf_offsets = s->sf_offsets[s->table_idx][s->channel[
c].table_idx];
915 for (b = 0; b < s->num_bands; b++)
916 s->channel[c].scale_factors[b] =
917 s->channel[c].saved_scale_factors[s->channel[c].scale_factor_idx][*sf_offsets++];
920 if (!s->channel[c].cur_subframe ||
get_bits1(&s->gb)) {
922 if (!s->channel[c].reuse_sf) {
925 s->channel[
c].scale_factor_step =
get_bits(&s->gb, 2) + 1;
926 val = 45 / s->channel[
c].scale_factor_step;
927 for (sf = s->channel[c].scale_factors; sf < sf_end; sf++) {
934 for (i = 0; i < s->num_bands; i++) {
945 sign = (code & 1) - 1;
946 skip = (code & 0x3f) >> 1;
947 }
else if (idx == 1) {
956 if (i >= s->num_bands) {
958 "invalid scale factor coding\n");
961 s->channel[
c].scale_factors[
i] += (val ^ sign) - sign;
965 s->channel[
c].scale_factor_idx = !s->channel[
c].scale_factor_idx;
966 s->channel[
c].table_idx = s->table_idx;
967 s->channel[
c].reuse_sf = 1;
971 s->channel[
c].max_scale_factor = s->channel[
c].scale_factors[0];
972 for (sf = s->channel[c].scale_factors + 1; sf < sf_end; sf++) {
973 s->channel[
c].max_scale_factor =
974 FFMAX(s->channel[c].max_scale_factor, *sf);
989 for (i = 0; i < s->num_chgroups; i++) {
990 if (s->chgroup[i].transform) {
992 const int num_channels = s->chgroup[
i].num_channels;
993 float** ch_data = s->chgroup[
i].channel_data;
994 float** ch_end = ch_data + num_channels;
995 const int8_t*
tb = s->chgroup[
i].transform_band;
999 for (sfb = s->cur_sfb_offsets;
1000 sfb < s->cur_sfb_offsets + s->num_bands; sfb++) {
1004 for (y = sfb[0]; y <
FFMIN(sfb[1], s->subframe_len); y++) {
1005 const float* mat = s->chgroup[
i].decorrelation_matrix;
1006 const float* data_end = data + num_channels;
1007 float* data_ptr =
data;
1010 for (ch = ch_data; ch < ch_end; ch++)
1011 *data_ptr++ = (*ch)[
y];
1013 for (ch = ch_data; ch < ch_end; ch++) {
1016 while (data_ptr < data_end)
1017 sum += *data_ptr++ * *mat++;
1023 int len =
FFMIN(sfb[1], s->subframe_len) - sfb[0];
1025 ch_data[0] + sfb[0],
1028 ch_data[1] + sfb[0],
1043 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1044 int c = s->channel_indexes_for_cur_subframe[
i];
1046 int winlen = s->channel[
c].prev_block_len;
1047 float*
start = s->channel[
c].coeffs - (winlen >> 1);
1049 if (s->subframe_len < winlen) {
1050 start += (winlen - s->subframe_len) >> 1;
1051 winlen = s->subframe_len;
1061 s->channel[
c].prev_block_len = s->subframe_len;
1072 int offset = s->samples_per_frame;
1073 int subframe_len = s->samples_per_frame;
1075 int total_samples = s->samples_per_frame * s->
avctx->
channels;
1076 int transmit_coeffs = 0;
1077 int cur_subwoofer_cutoff;
1086 s->channel[
i].grouped = 0;
1087 if (offset > s->channel[i].decoded_samples) {
1088 offset = s->channel[
i].decoded_samples;
1090 s->channel[
i].subframe_len[s->channel[
i].cur_subframe];
1095 "processing subframe with offset %i len %i\n", offset, subframe_len);
1098 s->channels_for_cur_subframe = 0;
1100 const int cur_subframe = s->channel[
i].cur_subframe;
1102 total_samples -= s->channel[
i].decoded_samples;
1105 if (offset == s->channel[i].decoded_samples &&
1106 subframe_len == s->channel[i].subframe_len[cur_subframe]) {
1107 total_samples -= s->channel[
i].subframe_len[cur_subframe];
1108 s->channel[
i].decoded_samples +=
1109 s->channel[
i].subframe_len[cur_subframe];
1110 s->channel_indexes_for_cur_subframe[s->channels_for_cur_subframe] =
i;
1111 ++s->channels_for_cur_subframe;
1118 s->parsed_all_subframes = 1;
1122 s->channels_for_cur_subframe);
1125 s->table_idx =
av_log2(s->samples_per_frame/subframe_len);
1126 s->num_bands = s->num_sfb[s->table_idx];
1127 s->cur_sfb_offsets = s->sfb_offsets[s->table_idx];
1128 cur_subwoofer_cutoff = s->subwoofer_cutoffs[s->table_idx];
1131 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1132 int c = s->channel_indexes_for_cur_subframe[
i];
1134 s->channel[
c].coeffs = &s->channel[
c].out[(s->samples_per_frame >> 1)
1138 s->subframe_len = subframe_len;
1139 s->esc_len =
av_log2(s->subframe_len - 1) + 1;
1144 if (!(num_fill_bits =
get_bits(&s->gb, 2))) {
1146 num_fill_bits = (len ?
get_bits(&s->gb, len) : 0) + 1;
1149 if (num_fill_bits >= 0) {
1150 if (
get_bits_count(&s->gb) + num_fill_bits > s->num_saved_bits) {
1170 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1171 int c = s->channel_indexes_for_cur_subframe[
i];
1172 if ((s->channel[c].transmit_coefs =
get_bits1(&s->gb)))
1173 transmit_coeffs = 1;
1177 if (transmit_coeffs) {
1179 int quant_step = 90 * s->bits_per_sample >> 4;
1182 if ((s->transmit_num_vec_coeffs =
get_bits1(&s->gb))) {
1183 int num_bits =
av_log2((s->subframe_len + 3)/4) + 1;
1184 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1185 int c = s->channel_indexes_for_cur_subframe[
i];
1186 int num_vec_coeffs =
get_bits(&s->gb, num_bits) << 2;
1187 if (num_vec_coeffs > s->subframe_len) {
1191 s->channel[
c].num_vec_coeffs = num_vec_coeffs;
1194 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1195 int c = s->channel_indexes_for_cur_subframe[
i];
1196 s->channel[
c].num_vec_coeffs = s->subframe_len;
1202 if (step == -32 || step == 31) {
1203 const int sign = (step == 31) - 1;
1206 (step =
get_bits(&s->gb, 5)) == 31) {
1209 quant_step += ((quant +
step) ^ sign) - sign;
1211 if (quant_step < 0) {
1217 if (s->channels_for_cur_subframe == 1) {
1218 s->channel[s->channel_indexes_for_cur_subframe[0]].quant_step = quant_step;
1220 int modifier_len =
get_bits(&s->gb, 3);
1221 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1222 int c = s->channel_indexes_for_cur_subframe[
i];
1223 s->channel[
c].quant_step = quant_step;
1226 s->channel[
c].quant_step +=
get_bits(&s->gb, modifier_len) + 1;
1228 ++s->channel[
c].quant_step;
1238 av_dlog(s->
avctx,
"BITSTREAM: subframe header length was %i\n",
1242 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1243 int c = s->channel_indexes_for_cur_subframe[
i];
1244 if (s->channel[c].transmit_coefs &&
1248 memset(s->channel[c].coeffs, 0,
1249 sizeof(*s->channel[c].coeffs) * subframe_len);
1252 av_dlog(s->
avctx,
"BITSTREAM: subframe length was %i\n",
1255 if (transmit_coeffs) {
1259 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1260 int c = s->channel_indexes_for_cur_subframe[
i];
1261 const int* sf = s->channel[
c].scale_factors;
1264 if (c == s->lfe_channel)
1265 memset(&s->tmp[cur_subwoofer_cutoff], 0,
sizeof(*s->tmp) *
1266 (subframe_len - cur_subwoofer_cutoff));
1269 for (b = 0; b < s->num_bands; b++) {
1270 const int end =
FFMIN(s->cur_sfb_offsets[b+1], s->subframe_len);
1271 const int exp = s->channel[
c].quant_step -
1272 (s->channel[
c].max_scale_factor - *sf++) *
1273 s->channel[c].scale_factor_step;
1274 const float quant = pow(10.0, exp / 20.0);
1275 int start = s->cur_sfb_offsets[
b];
1277 s->channel[c].coeffs + start,
1278 quant, end - start);
1282 mdct->
imdct_half(mdct, s->channel[c].coeffs, s->tmp);
1290 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1291 int c = s->channel_indexes_for_cur_subframe[
i];
1292 if (s->channel[c].cur_subframe >= s->channel[c].num_subframes) {
1296 ++s->channel[
c].cur_subframe;
1312 int more_frames = 0;
1318 len =
get_bits(gb, s->log2_frame_size);
1320 av_dlog(s->
avctx,
"decoding frame with length %x\n", len);
1337 if (s->dynamic_range_compression) {
1361 av_dlog(s->
avctx,
"BITSTREAM: frame header length was %i\n",
1365 s->parsed_all_subframes = 0;
1366 for (i = 0; i < avctx->
channels; i++) {
1367 s->channel[
i].decoded_samples = 0;
1368 s->channel[
i].cur_subframe = 0;
1369 s->channel[
i].reuse_sf = 0;
1373 while (!s->parsed_all_subframes) {
1388 for (i = 0; i < avctx->
channels; i++)
1390 s->samples_per_frame *
sizeof(*s->channel[i].out));
1392 for (i = 0; i < avctx->
channels; i++) {
1394 memcpy(&s->channel[i].out[0],
1395 &s->channel[i].out[s->samples_per_frame],
1396 s->samples_per_frame *
sizeof(*s->channel[i].out) >> 1);
1399 if (s->skip_frame) {
1407 if (s->len_prefix) {
1411 "frame[%i] would have to skip %i bits\n", s->frame_num,
1460 s->num_saved_bits = s->frame_offset;
1472 s->num_saved_bits +=
len;
1478 align =
FFMIN(align, len);
1502 int *got_frame_ptr,
AVPacket* avpkt)
1507 int buf_size = avpkt->
size;
1508 int num_bits_prev_frame;
1509 int packet_sequence_number;
1513 if (s->packet_done || s->packet_loss) {
1517 if (buf_size < avctx->block_align) {
1523 s->next_packet_start = buf_size - avctx->
block_align;
1525 s->buf_bit_size = buf_size << 3;
1529 packet_sequence_number =
get_bits(gb, 4);
1533 num_bits_prev_frame =
get_bits(gb, s->log2_frame_size);
1535 num_bits_prev_frame);
1538 if (!s->packet_loss &&
1539 ((s->packet_sequence_number + 1) & 0xF) != packet_sequence_number) {
1542 s->packet_sequence_number, packet_sequence_number);
1544 s->packet_sequence_number = packet_sequence_number;
1546 if (num_bits_prev_frame > 0) {
1547 int remaining_packet_bits = s->buf_bit_size -
get_bits_count(gb);
1548 if (num_bits_prev_frame >= remaining_packet_bits) {
1549 num_bits_prev_frame = remaining_packet_bits;
1555 save_bits(s, gb, num_bits_prev_frame, 1);
1556 av_dlog(avctx,
"accumulated %x bits of frame data\n",
1557 s->num_saved_bits - s->frame_offset);
1560 if (!s->packet_loss)
1562 }
else if (s->num_saved_bits - s->frame_offset) {
1563 av_dlog(avctx,
"ignoring %x previously saved bits\n",
1564 s->num_saved_bits - s->frame_offset);
1567 if (s->packet_loss) {
1571 s->num_saved_bits = 0;
1577 s->buf_bit_size = (avpkt->
size - s->next_packet_start) << 3;
1580 if (s->len_prefix &&
remaining_bits(s, gb) > s->log2_frame_size &&
1581 (frame_size =
show_bits(gb, s->log2_frame_size)) &&
1584 s->packet_done = !
decode_frame(s, data, got_frame_ptr);
1585 }
else if (!s->len_prefix
1594 s->packet_done = !
decode_frame(s, data, got_frame_ptr);
1599 if (s->packet_done && !s->packet_loss &&
1623 for (i = 0; i < avctx->
channels; i++)
1624 memset(s->channel[i].out, 0, s->samples_per_frame *
1625 sizeof(*s->channel[i].out));
float * channel_data[WMAPRO_MAX_CHANNELS]
transformation coefficients
static const uint16_t critical_freq[]
frequencies to divide the frequency spectrum into scale factor bands
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int decode_tilehdr(WMAProDecodeCtx *s)
Decode how the data in the frame is split into subframes.
This structure describes decoded (raw) audio or video data.
static const uint16_t vec1_huffcodes[HUFF_VEC1_SIZE]
static const float coef0_level[HUFF_COEF0_SIZE]
static const uint32_t scale_rl_huffcodes[HUFF_SCALE_RL_SIZE]
uint16_t num_vec_coeffs
number of vector coded coefficients
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static void skip_bits_long(GetBitContext *s, int n)
static av_cold int init(AVCodecContext *avctx)
AVCodecContext * avctx
codec context for av_log
static const uint16_t vec4_huffcodes[HUFF_VEC4_SIZE]
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
int8_t scale_factor_step
scaling step for the current subframe
#define DECLARE_ALIGNED(n, t, v)
int ff_wma_run_level_decode(AVCodecContext *avctx, GetBitContext *gb, VLC *vlc, const float *level_table, const uint16_t *run_table, int version, WMACoef *ptr, int offset, int num_coefs, int block_len, int frame_len_bits, int coef_nb_bits)
Decode run level compressed coefficients.
static const uint8_t scale_huffbits[HUFF_SCALE_SIZE]
uint8_t table_idx
index in sf_offsets for the scale factor reference block
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
PutBitContext pb
context for filling the frame_data buffer
#define WMAPRO_MAX_CHANNELS
current decoder limitations
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
static int get_sbits(GetBitContext *s, int n)
static VLC vec1_vlc
1 coefficient per symbol
static av_cold void dump_context(WMAProDecodeCtx *s)
helper function to print the most important members of the context
static const uint8_t scale_rl_run[HUFF_SCALE_RL_SIZE]
static int decode_packet(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Decode a single WMA packet.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static const float coef1_level[HUFF_COEF1_SIZE]
enum AVSampleFormat sample_fmt
audio sample format
static void inverse_channel_transform(WMAProDecodeCtx *s)
Reconstruct the individual channel data.
#define INIT_VLC_STATIC(vlc, bits, a, b, c, d, e, f, g, static_size)
uint8_t frame_data[MAX_FRAMESIZE+FF_INPUT_BUFFER_PADDING_SIZE]
compressed frame data
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static uint8_t * append(uint8_t *buf, const uint8_t *src, int size)
#define MAX_FRAMESIZE
maximum compressed frame size
int16_t prev_block_len
length of the previous block
uint8_t grouped
channel is part of a group
static int get_bits_count(const GetBitContext *s)
#define WMAPRO_BLOCK_MAX_BITS
log2 of max block size
int * scale_factors
pointer to the scale factor values used for decoding
bitstream reader API header.
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
static const uint16_t scale_huffcodes[HUFF_SCALE_SIZE]
static const uint32_t coef1_huffcodes[555]
static VLC vec4_vlc
4 coefficients per symbol
static const uint8_t frame_size[4]
void(* vector_fmul_window)(float *dst, const float *src0, const float *src1, const float *win, int len)
Overlap/add with window function.
static int decode_subframe(WMAProDecodeCtx *s)
Decode a single subframe (block).
static const uint16_t mask[17]
#define MAX_SUBFRAMES
max number of subframes per channel
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static const uint8_t coef0_huffbits[666]
static float sin64[33]
sinus table for decorrelation
void av_log(void *avcl, int level, const char *fmt,...)
int max_scale_factor
maximum scale factor for the current subframe
const char * name
Name of the codec implementation.
overlapping window(triangular window to avoid too much overlapping) ovidx
static void put_bits(J2kEncoderContext *s, int val, int n)
put n times val bit
static const uint8_t offset[127][2]
AVCodec ff_wmapro_decoder
wmapro decoder
static int decode_subframe_length(WMAProDecodeCtx *s, int offset)
Decode the subframe length.
static const uint8_t coef1_huffbits[555]
int quant_step
quantization step for the current subframe
static const uint16_t coef1_run[HUFF_COEF1_SIZE]
uint64_t channel_layout
Audio channel layout.
frame specific decoder context for a single channel
static int put_bits_count(PutBitContext *s)
static VLC sf_vlc
scale factor DPCM vlc
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
static const uint8_t scale_rl_huffbits[HUFF_SCALE_RL_SIZE]
static void flush(AVCodecContext *avctx)
Clear decoder buffers (for seeking).
static const uint16_t symbol_to_vec4[HUFF_VEC4_SIZE]
struct WMAProDecodeCtx WMAProDecodeCtx
main decoder context
static int decode_coeffs(WMAProDecodeCtx *s, int c)
Extract the coefficients from the bitstream.
int8_t transform
transform on / off
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
uint16_t decoded_samples
number of already processed samples
static const float *const default_decorrelation[]
default decorrelation matrix offsets
static void save_bits(WMAProDecodeCtx *s, GetBitContext *gb, int len, int append)
Fill the bit reservoir with a (partial) frame.
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
#define MAX_BANDS
max number of scale factor bands
FIXME Range Coding of cr are level
int8_t transform_band[MAX_BANDS]
controls if the transform is enabled for a certain band
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
const AVS_VideoInfo int align
or the Software in violation of any applicable export control laws in any jurisdiction Except as provided by mandatorily applicable UPF has no obligation to provide you with source code to the Software In the event Software contains any source code
static VLC vec2_vlc
2 coefficients per symbol
channel group for channel transformations
int sample_rate
samples per second
#define WMAPRO_BLOCK_SIZES
possible block sizes
static const uint8_t vec4_huffbits[HUFF_VEC4_SIZE]
main external API structure.
static void close(AVCodecParserContext *s)
static const uint8_t symbol_to_vec2[HUFF_VEC2_SIZE]
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
tables for wmapro decoding
static VLC coef_vlc[2]
coefficient run length vlc codes
static unsigned int get_bits1(GetBitContext *s)
static void skip_bits(GetBitContext *s, int n)
int8_t reuse_sf
share scale factors between subframes
synthesis window for stochastic i
SINETABLE_CONST float *const ff_sine_windows[14]
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static int decode_scale_factors(WMAProDecodeCtx *s)
Extract scale factors from the bitstream.
void(* vector_fmul_scalar)(float *dst, const float *src, float mul, int len)
Multiply a vector of floats by a scalar float.
uint8_t num_channels
number of channels in the group
static VLC sf_rl_vlc
scale factor run length vlc
float * coeffs
pointer to the subframe decode buffer
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
uint8_t cur_subframe
current subframe number
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
static av_always_inline uint32_t av_float2int(float f)
Reinterpret a float as a 32-bit integer.
static int remaining_bits(WMAProDecodeCtx *s, GetBitContext *gb)
Calculate remaining input buffer length.
void(* imdct_half)(struct FFTContext *s, FFTSample *output, const FFTSample *input)
static int decode_channel_transform(WMAProDecodeCtx *s)
Decode channel transformation parameters.
av_cold int ff_wma_get_frame_len_bits(int sample_rate, int version, unsigned int decode_flags)
Get the samples per frame for this stream.
FFTContext mdct_ctx[WMAPRO_BLOCK_SIZES]
MDCT context per block size.
int8_t scale_factor_idx
index for the transmitted scale factor values (used for resampling)
#define WMAPRO_BLOCK_MAX_SIZE
maximum block size
static av_cold int decode_init(AVCodecContext *avctx)
Initialize the decoder.
uint16_t subframe_len[MAX_SUBFRAMES]
subframe length in samples
#define CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
common internal api header.
static void decode_decorrelation_matrix(WMAProDecodeCtx *s, WMAProChannelGrp *chgroup)
Calculate a decorrelation matrix from the bitstream parameters.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
AVSampleFormat
Audio Sample Formats.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static void wmapro_window(WMAProDecodeCtx *s)
Apply sine window and reconstruct the output buffer.
#define WMAPRO_BLOCK_MIN_BITS
log2 of min block size
static const uint8_t scale_rl_level[HUFF_SCALE_RL_SIZE]
#define FF_DEBUG_BITSTREAM
int channels
number of audio channels
VLC_TYPE(* table)[2]
code, bits
unsigned int ff_wma_get_large_val(GetBitContext *gb)
Decode an uncompressed coefficient.
static const uint32_t coef0_huffcodes[666]
static int decode_frame(WMAProDecodeCtx *s, AVFrame *frame, int *got_frame_ptr)
Decode one WMA frame.
int frame_number
Frame counter, set by libavcodec.
static const uint8_t vec1_huffbits[HUFF_VEC1_SIZE]
static const uint16_t coef0_run[HUFF_COEF0_SIZE]
static const uint16_t vec2_huffcodes[HUFF_VEC2_SIZE]
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
float decorrelation_matrix[WMAPRO_MAX_CHANNELS *WMAPRO_MAX_CHANNELS]
void avpriv_float_dsp_init(AVFloatDSPContext *fdsp, int bit_exact)
Initialize a float DSP context.
uint8_t ** extended_data
pointers to the data planes/channels.
static av_cold int decode_end(AVCodecContext *avctx)
Uninitialize the decoder and free all resources.
This structure stores compressed data.
#define HUFF_SCALE_RL_SIZE
int nb_samples
number of audio samples (per channel) described by this frame
void ff_init_ff_sine_windows(int index)
initialize the specified entry of ff_sine_windows
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
static const uint8_t vec2_huffbits[HUFF_VEC2_SIZE]