apedec.c
Go to the documentation of this file.
1 /*
2  * Monkey's Audio lossless audio decoder
3  * Copyright (c) 2007 Benjamin Zores <ben@geexbox.org>
4  * based upon libdemac from Dave Chapman.
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "libavutil/avassert.h"
25 #include "libavutil/opt.h"
26 #include "avcodec.h"
27 #include "dsputil.h"
28 #include "bytestream.h"
29 #include "internal.h"
30 #include "get_bits.h"
31 #include "unary.h"
32 
33 /**
34  * @file
35  * Monkey's Audio lossless audio decoder
36  */
37 
38 #define MAX_CHANNELS 2
39 #define MAX_BYTESPERSAMPLE 3
40 
41 #define APE_FRAMECODE_MONO_SILENCE 1
42 #define APE_FRAMECODE_STEREO_SILENCE 3
43 #define APE_FRAMECODE_PSEUDO_STEREO 4
44 
45 #define HISTORY_SIZE 512
46 #define PREDICTOR_ORDER 8
47 /** Total size of all predictor histories */
48 #define PREDICTOR_SIZE 50
49 
50 #define YDELAYA (18 + PREDICTOR_ORDER*4)
51 #define YDELAYB (18 + PREDICTOR_ORDER*3)
52 #define XDELAYA (18 + PREDICTOR_ORDER*2)
53 #define XDELAYB (18 + PREDICTOR_ORDER)
54 
55 #define YADAPTCOEFFSA 18
56 #define XADAPTCOEFFSA 14
57 #define YADAPTCOEFFSB 10
58 #define XADAPTCOEFFSB 5
59 
60 /**
61  * Possible compression levels
62  * @{
63  */
70 };
71 /** @} */
72 
73 #define APE_FILTER_LEVELS 3
74 
75 /** Filter orders depending on compression level */
76 static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS] = {
77  { 0, 0, 0 },
78  { 16, 0, 0 },
79  { 64, 0, 0 },
80  { 32, 256, 0 },
81  { 16, 256, 1280 }
82 };
83 
84 /** Filter fraction bits depending on compression level */
86  { 0, 0, 0 },
87  { 11, 0, 0 },
88  { 11, 0, 0 },
89  { 10, 13, 0 },
90  { 11, 13, 15 }
91 };
92 
93 
94 /** Filters applied to the decoded data */
95 typedef struct APEFilter {
96  int16_t *coeffs; ///< actual coefficients used in filtering
97  int16_t *adaptcoeffs; ///< adaptive filter coefficients used for correcting of actual filter coefficients
98  int16_t *historybuffer; ///< filter memory
99  int16_t *delay; ///< filtered values
100 
101  int avg;
102 } APEFilter;
103 
104 typedef struct APERice {
105  uint32_t k;
106  uint32_t ksum;
107 } APERice;
108 
109 typedef struct APERangecoder {
110  uint32_t low; ///< low end of interval
111  uint32_t range; ///< length of interval
112  uint32_t help; ///< bytes_to_follow resp. intermediate value
113  unsigned int buffer; ///< buffer for input/output
114 } APERangecoder;
115 
116 /** Filter histories */
117 typedef struct APEPredictor {
119 
120  int32_t lastA[2];
121 
122  int32_t filterA[2];
123  int32_t filterB[2];
124 
125  int32_t coeffsA[2][4]; ///< adaption coefficients
126  int32_t coeffsB[2][5]; ///< adaption coefficients
128 
129  unsigned int sample_pos;
130 } APEPredictor;
131 
132 /** Decoder context */
133 typedef struct APEContext {
134  AVClass *class; ///< class for AVOptions
137  int channels;
138  int samples; ///< samples left to decode in current frame
139  int bps;
140 
141  int fileversion; ///< codec version, very important in decoding process
142  int compression_level; ///< compression levels
143  int fset; ///< which filter set to use (calculated from compression level)
144  int flags; ///< global decoder flags
145 
146  uint32_t CRC; ///< frame CRC
147  int frameflags; ///< frame flags
148  APEPredictor predictor; ///< predictor used for final reconstruction
149 
152  int32_t *decoded[MAX_CHANNELS]; ///< decoded data for each channel
153  int blocks_per_loop; ///< maximum number of samples to decode for each call
154 
155  int16_t* filterbuf[APE_FILTER_LEVELS]; ///< filter memory
156 
157  APERangecoder rc; ///< rangecoder used to decode actual values
158  APERice riceX; ///< rice code parameters for the second channel
159  APERice riceY; ///< rice code parameters for the first channel
160  APEFilter filters[APE_FILTER_LEVELS][2]; ///< filters used for reconstruction
162 
163  uint8_t *data; ///< current frame data
164  uint8_t *data_end; ///< frame data end
165  int data_size; ///< frame data allocated size
166  const uint8_t *ptr; ///< current position in frame data
167 
168  int error;
169 
170  void (*entropy_decode_mono)(struct APEContext *ctx, int blockstodecode);
171  void (*entropy_decode_stereo)(struct APEContext *ctx, int blockstodecode);
174 } APEContext;
175 
176 static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
177  int32_t *decoded1, int count);
178 
179 static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode);
180 static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode);
181 static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode);
182 static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode);
183 static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode);
184 static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode);
185 static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode);
186 static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode);
187 static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode);
188 
189 static void predictor_decode_mono_3800(APEContext *ctx, int count);
190 static void predictor_decode_stereo_3800(APEContext *ctx, int count);
191 static void predictor_decode_mono_3930(APEContext *ctx, int count);
192 static void predictor_decode_stereo_3930(APEContext *ctx, int count);
193 static void predictor_decode_mono_3950(APEContext *ctx, int count);
194 static void predictor_decode_stereo_3950(APEContext *ctx, int count);
195 
196 // TODO: dsputilize
197 
199 {
200  APEContext *s = avctx->priv_data;
201  int i;
202 
203  for (i = 0; i < APE_FILTER_LEVELS; i++)
204  av_freep(&s->filterbuf[i]);
205 
207  av_freep(&s->data);
208  s->decoded_size = s->data_size = 0;
209 
210  return 0;
211 }
212 
214 {
215  APEContext *s = avctx->priv_data;
216  int i;
217 
218  if (avctx->extradata_size != 6) {
219  av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n");
220  return AVERROR(EINVAL);
221  }
222  if (avctx->channels > 2) {
223  av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n");
224  return AVERROR(EINVAL);
225  }
226  s->bps = avctx->bits_per_coded_sample;
227  switch (s->bps) {
228  case 8:
229  avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
230  break;
231  case 16:
233  break;
234  case 24:
236  break;
237  default:
238  avpriv_request_sample(avctx,
239  "%d bits per coded sample", s->bps);
240  return AVERROR_PATCHWELCOME;
241  }
242  s->avctx = avctx;
243  s->channels = avctx->channels;
244  s->fileversion = AV_RL16(avctx->extradata);
245  s->compression_level = AV_RL16(avctx->extradata + 2);
246  s->flags = AV_RL16(avctx->extradata + 4);
247 
248  av_log(avctx, AV_LOG_DEBUG, "Compression Level: %d - Flags: %d\n",
249  s->compression_level, s->flags);
251  !s->compression_level ||
253  av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n",
254  s->compression_level);
255  return AVERROR_INVALIDDATA;
256  }
257  s->fset = s->compression_level / 1000 - 1;
258  for (i = 0; i < APE_FILTER_LEVELS; i++) {
259  if (!ape_filter_orders[s->fset][i])
260  break;
261  FF_ALLOC_OR_GOTO(avctx, s->filterbuf[i],
262  (ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4,
263  filter_alloc_fail);
264  }
265 
266  if (s->fileversion < 3860) {
269  } else if (s->fileversion < 3900) {
272  } else if (s->fileversion < 3930) {
275  } else if (s->fileversion < 3990) {
278  } else {
281  }
282 
283  if (s->fileversion < 3930) {
286  } else if (s->fileversion < 3950) {
289  } else {
292  }
293 
294  ff_dsputil_init(&s->dsp, avctx);
296 
297  return 0;
298 filter_alloc_fail:
299  ape_decode_close(avctx);
300  return AVERROR(ENOMEM);
301 }
302 
303 /**
304  * @name APE range decoding functions
305  * @{
306  */
307 
308 #define CODE_BITS 32
309 #define TOP_VALUE ((unsigned int)1 << (CODE_BITS-1))
310 #define SHIFT_BITS (CODE_BITS - 9)
311 #define EXTRA_BITS ((CODE_BITS-2) % 8 + 1)
312 #define BOTTOM_VALUE (TOP_VALUE >> 8)
313 
314 /** Start the decoder */
315 static inline void range_start_decoding(APEContext *ctx)
316 {
317  ctx->rc.buffer = bytestream_get_byte(&ctx->ptr);
318  ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS);
319  ctx->rc.range = (uint32_t) 1 << EXTRA_BITS;
320 }
321 
322 /** Perform normalization */
323 static inline void range_dec_normalize(APEContext *ctx)
324 {
325  while (ctx->rc.range <= BOTTOM_VALUE) {
326  ctx->rc.buffer <<= 8;
327  if(ctx->ptr < ctx->data_end) {
328  ctx->rc.buffer += *ctx->ptr;
329  ctx->ptr++;
330  } else {
331  ctx->error = 1;
332  }
333  ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF);
334  ctx->rc.range <<= 8;
335  }
336 }
337 
338 /**
339  * Calculate culmulative frequency for next symbol. Does NO update!
340  * @param ctx decoder context
341  * @param tot_f is the total frequency or (code_value)1<<shift
342  * @return the culmulative frequency
343  */
344 static inline int range_decode_culfreq(APEContext *ctx, int tot_f)
345 {
346  range_dec_normalize(ctx);
347  ctx->rc.help = ctx->rc.range / tot_f;
348  return ctx->rc.low / ctx->rc.help;
349 }
350 
351 /**
352  * Decode value with given size in bits
353  * @param ctx decoder context
354  * @param shift number of bits to decode
355  */
356 static inline int range_decode_culshift(APEContext *ctx, int shift)
357 {
358  range_dec_normalize(ctx);
359  ctx->rc.help = ctx->rc.range >> shift;
360  return ctx->rc.low / ctx->rc.help;
361 }
362 
363 
364 /**
365  * Update decoding state
366  * @param ctx decoder context
367  * @param sy_f the interval length (frequency of the symbol)
368  * @param lt_f the lower end (frequency sum of < symbols)
369  */
370 static inline void range_decode_update(APEContext *ctx, int sy_f, int lt_f)
371 {
372  ctx->rc.low -= ctx->rc.help * lt_f;
373  ctx->rc.range = ctx->rc.help * sy_f;
374 }
375 
376 /** Decode n bits (n <= 16) without modelling */
377 static inline int range_decode_bits(APEContext *ctx, int n)
378 {
379  int sym = range_decode_culshift(ctx, n);
380  range_decode_update(ctx, 1, sym);
381  return sym;
382 }
383 
384 
385 #define MODEL_ELEMENTS 64
386 
387 /**
388  * Fixed probabilities for symbols in Monkey Audio version 3.97
389  */
390 static const uint16_t counts_3970[22] = {
391  0, 14824, 28224, 39348, 47855, 53994, 58171, 60926,
392  62682, 63786, 64463, 64878, 65126, 65276, 65365, 65419,
393  65450, 65469, 65480, 65487, 65491, 65493,
394 };
395 
396 /**
397  * Probability ranges for symbols in Monkey Audio version 3.97
398  */
399 static const uint16_t counts_diff_3970[21] = {
400  14824, 13400, 11124, 8507, 6139, 4177, 2755, 1756,
401  1104, 677, 415, 248, 150, 89, 54, 31,
402  19, 11, 7, 4, 2,
403 };
404 
405 /**
406  * Fixed probabilities for symbols in Monkey Audio version 3.98
407  */
408 static const uint16_t counts_3980[22] = {
409  0, 19578, 36160, 48417, 56323, 60899, 63265, 64435,
410  64971, 65232, 65351, 65416, 65447, 65466, 65476, 65482,
411  65485, 65488, 65490, 65491, 65492, 65493,
412 };
413 
414 /**
415  * Probability ranges for symbols in Monkey Audio version 3.98
416  */
417 static const uint16_t counts_diff_3980[21] = {
418  19578, 16582, 12257, 7906, 4576, 2366, 1170, 536,
419  261, 119, 65, 31, 19, 10, 6, 3,
420  3, 2, 1, 1, 1,
421 };
422 
423 /**
424  * Decode symbol
425  * @param ctx decoder context
426  * @param counts probability range start position
427  * @param counts_diff probability range widths
428  */
429 static inline int range_get_symbol(APEContext *ctx,
430  const uint16_t counts[],
431  const uint16_t counts_diff[])
432 {
433  int symbol, cf;
434 
435  cf = range_decode_culshift(ctx, 16);
436 
437  if(cf > 65492){
438  symbol= cf - 65535 + 63;
439  range_decode_update(ctx, 1, cf);
440  if(cf > 65535)
441  ctx->error=1;
442  return symbol;
443  }
444  /* figure out the symbol inefficiently; a binary search would be much better */
445  for (symbol = 0; counts[symbol + 1] <= cf; symbol++);
446 
447  range_decode_update(ctx, counts_diff[symbol], counts[symbol]);
448 
449  return symbol;
450 }
451 /** @} */ // group rangecoder
452 
453 static inline void update_rice(APERice *rice, unsigned int x)
454 {
455  int lim = rice->k ? (1 << (rice->k + 4)) : 0;
456  rice->ksum += ((x + 1) / 2) - ((rice->ksum + 16) >> 5);
457 
458  if (rice->ksum < lim)
459  rice->k--;
460  else if (rice->ksum >= (1 << (rice->k + 5)))
461  rice->k++;
462 }
463 
464 static inline int get_rice_ook(GetBitContext *gb, int k)
465 {
466  unsigned int x;
467 
468  x = get_unary(gb, 1, get_bits_left(gb));
469 
470  if (k)
471  x = (x << k) | get_bits(gb, k);
472 
473  return x;
474 }
475 
477  APERice *rice)
478 {
479  unsigned int x, overflow;
480 
481  overflow = get_unary(gb, 1, get_bits_left(gb));
482 
483  if (ctx->fileversion > 3880) {
484  while (overflow >= 16) {
485  overflow -= 16;
486  rice->k += 4;
487  }
488  }
489 
490  if (!rice->k)
491  x = overflow;
492  else
493  x = (overflow << rice->k) + get_bits(gb, rice->k);
494 
495  rice->ksum += x - (rice->ksum + 8 >> 4);
496  if (rice->ksum < (rice->k ? 1 << (rice->k + 4) : 0))
497  rice->k--;
498  else if (rice->ksum >= (1 << (rice->k + 5)) && rice->k < 24)
499  rice->k++;
500 
501  /* Convert to signed */
502  if (x & 1)
503  return (x >> 1) + 1;
504  else
505  return -(x >> 1);
506 }
507 
508 static inline int ape_decode_value_3900(APEContext *ctx, APERice *rice)
509 {
510  unsigned int x, overflow;
511  int tmpk;
512 
514 
515  if (overflow == (MODEL_ELEMENTS - 1)) {
516  tmpk = range_decode_bits(ctx, 5);
517  overflow = 0;
518  } else
519  tmpk = (rice->k < 1) ? 0 : rice->k - 1;
520 
521  if (tmpk <= 16 || ctx->fileversion < 3910)
522  x = range_decode_bits(ctx, tmpk);
523  else if (tmpk <= 32) {
524  x = range_decode_bits(ctx, 16);
525  x |= (range_decode_bits(ctx, tmpk - 16) << 16);
526  } else {
527  av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
528  return AVERROR_INVALIDDATA;
529  }
530  x += overflow << tmpk;
531 
532  update_rice(rice, x);
533 
534  /* Convert to signed */
535  if (x & 1)
536  return (x >> 1) + 1;
537  else
538  return -(x >> 1);
539 }
540 
541 static inline int ape_decode_value_3990(APEContext *ctx, APERice *rice)
542 {
543  unsigned int x, overflow;
544  int base, pivot;
545 
546  pivot = rice->ksum >> 5;
547  if (pivot == 0)
548  pivot = 1;
549 
551 
552  if (overflow == (MODEL_ELEMENTS - 1)) {
553  overflow = range_decode_bits(ctx, 16) << 16;
554  overflow |= range_decode_bits(ctx, 16);
555  }
556 
557  if (pivot < 0x10000) {
558  base = range_decode_culfreq(ctx, pivot);
559  range_decode_update(ctx, 1, base);
560  } else {
561  int base_hi = pivot, base_lo;
562  int bbits = 0;
563 
564  while (base_hi & ~0xFFFF) {
565  base_hi >>= 1;
566  bbits++;
567  }
568  base_hi = range_decode_culfreq(ctx, base_hi + 1);
569  range_decode_update(ctx, 1, base_hi);
570  base_lo = range_decode_culfreq(ctx, 1 << bbits);
571  range_decode_update(ctx, 1, base_lo);
572 
573  base = (base_hi << bbits) + base_lo;
574  }
575 
576  x = base + overflow * pivot;
577 
578  update_rice(rice, x);
579 
580  /* Convert to signed */
581  if (x & 1)
582  return (x >> 1) + 1;
583  else
584  return -(x >> 1);
585 }
586 
588  int32_t *out, APERice *rice, int blockstodecode)
589 {
590  int i;
591  int ksummax, ksummin;
592 
593  rice->ksum = 0;
594  for (i = 0; i < 5; i++) {
595  out[i] = get_rice_ook(&ctx->gb, 10);
596  rice->ksum += out[i];
597  }
598  rice->k = av_log2(rice->ksum / 10) + 1;
599  for (; i < 64; i++) {
600  out[i] = get_rice_ook(&ctx->gb, rice->k);
601  rice->ksum += out[i];
602  rice->k = av_log2(rice->ksum / ((i + 1) * 2)) + 1;
603  }
604  ksummax = 1 << rice->k + 7;
605  ksummin = rice->k ? (1 << rice->k + 6) : 0;
606  for (; i < blockstodecode; i++) {
607  out[i] = get_rice_ook(&ctx->gb, rice->k);
608  rice->ksum += out[i] - out[i - 64];
609  while (rice->ksum < ksummin) {
610  rice->k--;
611  ksummin = rice->k ? ksummin >> 1 : 0;
612  ksummax >>= 1;
613  }
614  while (rice->ksum >= ksummax) {
615  rice->k++;
616  if (rice->k > 24)
617  return;
618  ksummax <<= 1;
619  ksummin = ksummin ? ksummin << 1 : 128;
620  }
621  }
622 
623  for (i = 0; i < blockstodecode; i++) {
624  if (out[i] & 1)
625  out[i] = (out[i] >> 1) + 1;
626  else
627  out[i] = -(out[i] >> 1);
628  }
629 }
630 
631 static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode)
632 {
633  decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY,
634  blockstodecode);
635 }
636 
637 static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode)
638 {
639  decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY,
640  blockstodecode);
641  decode_array_0000(ctx, &ctx->gb, ctx->decoded[1], &ctx->riceX,
642  blockstodecode);
643 }
644 
645 static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode)
646 {
647  int32_t *decoded0 = ctx->decoded[0];
648 
649  while (blockstodecode--)
650  *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY);
651 }
652 
653 static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode)
654 {
655  int32_t *decoded0 = ctx->decoded[0];
656  int32_t *decoded1 = ctx->decoded[1];
657  int blocks = blockstodecode;
658 
659  while (blockstodecode--)
660  *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY);
661  while (blocks--)
662  *decoded1++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceX);
663 }
664 
665 static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode)
666 {
667  int32_t *decoded0 = ctx->decoded[0];
668 
669  while (blockstodecode--)
670  *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
671 }
672 
673 static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode)
674 {
675  int32_t *decoded0 = ctx->decoded[0];
676  int32_t *decoded1 = ctx->decoded[1];
677  int blocks = blockstodecode;
678 
679  while (blockstodecode--)
680  *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
681  range_dec_normalize(ctx);
682  // because of some implementation peculiarities we need to backpedal here
683  ctx->ptr -= 1;
685  while (blocks--)
686  *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX);
687 }
688 
689 static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode)
690 {
691  int32_t *decoded0 = ctx->decoded[0];
692  int32_t *decoded1 = ctx->decoded[1];
693 
694  while (blockstodecode--) {
695  *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
696  *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX);
697  }
698 }
699 
700 static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode)
701 {
702  int32_t *decoded0 = ctx->decoded[0];
703 
704  while (blockstodecode--)
705  *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY);
706 }
707 
708 static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode)
709 {
710  int32_t *decoded0 = ctx->decoded[0];
711  int32_t *decoded1 = ctx->decoded[1];
712 
713  while (blockstodecode--) {
714  *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY);
715  *decoded1++ = ape_decode_value_3990(ctx, &ctx->riceX);
716  }
717 }
718 
720 {
721  /* Read the CRC */
722  if (ctx->fileversion >= 3900) {
723  if (ctx->data_end - ctx->ptr < 6)
724  return AVERROR_INVALIDDATA;
725  ctx->CRC = bytestream_get_be32(&ctx->ptr);
726  } else {
727  ctx->CRC = get_bits_long(&ctx->gb, 32);
728  }
729 
730  /* Read the frame flags if they exist */
731  ctx->frameflags = 0;
732  if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) {
733  ctx->CRC &= ~0x80000000;
734 
735  if (ctx->data_end - ctx->ptr < 6)
736  return AVERROR_INVALIDDATA;
737  ctx->frameflags = bytestream_get_be32(&ctx->ptr);
738  }
739 
740  /* Initialize the rice structs */
741  ctx->riceX.k = 10;
742  ctx->riceX.ksum = (1 << ctx->riceX.k) * 16;
743  ctx->riceY.k = 10;
744  ctx->riceY.ksum = (1 << ctx->riceY.k) * 16;
745 
746  if (ctx->fileversion >= 3900) {
747  /* The first 8 bits of input are ignored. */
748  ctx->ptr++;
749 
751  }
752 
753  return 0;
754 }
755 
757  375,
758 };
759 
760 static const int32_t initial_coeffs_a_3800[3] = {
761  64, 115, 64,
762 };
763 
764 static const int32_t initial_coeffs_b_3800[2] = {
765  740, 0
766 };
767 
768 static const int32_t initial_coeffs_3930[4] = {
769  360, 317, -109, 98
770 };
771 
773 {
774  APEPredictor *p = &ctx->predictor;
775 
776  /* Zero the history buffers */
777  memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(*p->historybuffer));
778  p->buf = p->historybuffer;
779 
780  /* Initialize and zero the coefficients */
781  if (ctx->fileversion < 3930) {
783  memcpy(p->coeffsA[0], initial_coeffs_fast_3320,
784  sizeof(initial_coeffs_fast_3320));
785  memcpy(p->coeffsA[1], initial_coeffs_fast_3320,
786  sizeof(initial_coeffs_fast_3320));
787  } else {
788  memcpy(p->coeffsA[0], initial_coeffs_a_3800,
789  sizeof(initial_coeffs_a_3800));
790  memcpy(p->coeffsA[1], initial_coeffs_a_3800,
791  sizeof(initial_coeffs_a_3800));
792  }
793  } else {
794  memcpy(p->coeffsA[0], initial_coeffs_3930, sizeof(initial_coeffs_3930));
795  memcpy(p->coeffsA[1], initial_coeffs_3930, sizeof(initial_coeffs_3930));
796  }
797  memset(p->coeffsB, 0, sizeof(p->coeffsB));
798  if (ctx->fileversion < 3930) {
799  memcpy(p->coeffsB[0], initial_coeffs_b_3800,
800  sizeof(initial_coeffs_b_3800));
801  memcpy(p->coeffsB[1], initial_coeffs_b_3800,
802  sizeof(initial_coeffs_b_3800));
803  }
804 
805  p->filterA[0] = p->filterA[1] = 0;
806  p->filterB[0] = p->filterB[1] = 0;
807  p->lastA[0] = p->lastA[1] = 0;
808 
809  p->sample_pos = 0;
810 }
811 
812 /** Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero) */
813 static inline int APESIGN(int32_t x) {
814  return (x < 0) - (x > 0);
815 }
816 
818  const int decoded, const int filter,
819  const int delayA)
820 {
821  int32_t predictionA;
822 
823  p->buf[delayA] = p->lastA[filter];
824  if (p->sample_pos < 3) {
825  p->lastA[filter] = decoded;
826  p->filterA[filter] = decoded;
827  return decoded;
828  }
829 
830  predictionA = p->buf[delayA] * 2 - p->buf[delayA - 1];
831  p->lastA[filter] = decoded + (predictionA * p->coeffsA[filter][0] >> 9);
832 
833  if ((decoded ^ predictionA) > 0)
834  p->coeffsA[filter][0]++;
835  else
836  p->coeffsA[filter][0]--;
837 
838  p->filterA[filter] += p->lastA[filter];
839 
840  return p->filterA[filter];
841 }
842 
844  const int decoded, const int filter,
845  const int delayA, const int delayB,
846  const int start, const int shift)
847 {
848  int32_t predictionA, predictionB, sign;
849  int32_t d0, d1, d2, d3, d4;
850 
851  p->buf[delayA] = p->lastA[filter];
852  p->buf[delayB] = p->filterB[filter];
853  if (p->sample_pos < start) {
854  predictionA = decoded + p->filterA[filter];
855  p->lastA[filter] = decoded;
856  p->filterB[filter] = decoded;
857  p->filterA[filter] = predictionA;
858  return predictionA;
859  }
860  d2 = p->buf[delayA];
861  d1 = (p->buf[delayA] - p->buf[delayA - 1]) << 1;
862  d0 = p->buf[delayA] + ((p->buf[delayA - 2] - p->buf[delayA - 1]) << 3);
863  d3 = p->buf[delayB] * 2 - p->buf[delayB - 1];
864  d4 = p->buf[delayB];
865 
866  predictionA = d0 * p->coeffsA[filter][0] +
867  d1 * p->coeffsA[filter][1] +
868  d2 * p->coeffsA[filter][2];
869 
870  sign = APESIGN(decoded);
871  p->coeffsA[filter][0] += (((d0 >> 30) & 2) - 1) * sign;
872  p->coeffsA[filter][1] += (((d1 >> 28) & 8) - 4) * sign;
873  p->coeffsA[filter][2] += (((d2 >> 28) & 8) - 4) * sign;
874 
875  predictionB = d3 * p->coeffsB[filter][0] -
876  d4 * p->coeffsB[filter][1];
877  p->lastA[filter] = decoded + (predictionA >> 11);
878  sign = APESIGN(p->lastA[filter]);
879  p->coeffsB[filter][0] += (((d3 >> 29) & 4) - 2) * sign;
880  p->coeffsB[filter][1] -= (((d4 >> 30) & 2) - 1) * sign;
881 
882  p->filterB[filter] = p->lastA[filter] + (predictionB >> shift);
883  p->filterA[filter] = p->filterB[filter] + ((p->filterA[filter] * 31) >> 5);
884 
885  return p->filterA[filter];
886 }
887 
888 static void long_filter_high_3800(int32_t *buffer, int order, int shift,
890 {
891  int i, j;
892  int32_t dotprod, sign;
893 
894  memset(coeffs, 0, order * sizeof(*coeffs));
895  for (i = 0; i < order; i++)
896  delay[i] = buffer[i];
897  for (i = order; i < length; i++) {
898  dotprod = 0;
899  sign = APESIGN(buffer[i]);
900  for (j = 0; j < order; j++) {
901  dotprod += delay[j] * coeffs[j];
902  coeffs[j] -= (((delay[j] >> 30) & 2) - 1) * sign;
903  }
904  buffer[i] -= dotprod >> shift;
905  for (j = 0; j < order - 1; j++)
906  delay[j] = delay[j + 1];
907  delay[order - 1] = buffer[i];
908  }
909 }
910 
912 {
913  int i, j;
914  int32_t dotprod, sign;
915  int32_t coeffs[8], delay[8];
916 
917  memset(coeffs, 0, sizeof(coeffs));
918  memset(delay, 0, sizeof(delay));
919  for (i = 0; i < length; i++) {
920  dotprod = 0;
921  sign = APESIGN(buffer[i]);
922  for (j = 7; j >= 0; j--) {
923  dotprod += delay[j] * coeffs[j];
924  coeffs[j] -= (((delay[j] >> 30) & 2) - 1) * sign;
925  }
926  for (j = 7; j > 0; j--)
927  delay[j] = delay[j - 1];
928  delay[0] = buffer[i];
929  buffer[i] -= dotprod >> 9;
930  }
931 }
932 
934 {
935  APEPredictor *p = &ctx->predictor;
936  int32_t *decoded0 = ctx->decoded[0];
937  int32_t *decoded1 = ctx->decoded[1];
938  int32_t coeffs[256], delay[256];
939  int start = 4, shift = 10;
940 
942  start = 16;
943  long_filter_high_3800(decoded0, 16, 9, coeffs, delay, count);
944  long_filter_high_3800(decoded1, 16, 9, coeffs, delay, count);
945  } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) {
946  int order = 128, shift2 = 11;
947 
948  if (ctx->fileversion >= 3830) {
949  order <<= 1;
950  shift++;
951  shift2++;
952  long_filter_ehigh_3830(decoded0 + order, count - order);
953  long_filter_ehigh_3830(decoded1 + order, count - order);
954  }
955  start = order;
956  long_filter_high_3800(decoded0, order, shift2, coeffs, delay, count);
957  long_filter_high_3800(decoded1, order, shift2, coeffs, delay, count);
958  }
959 
960  while (count--) {
961  int X = *decoded0, Y = *decoded1;
963  *decoded0 = filter_fast_3320(p, Y, 0, YDELAYA);
964  decoded0++;
965  *decoded1 = filter_fast_3320(p, X, 1, XDELAYA);
966  decoded1++;
967  } else {
968  *decoded0 = filter_3800(p, Y, 0, YDELAYA, YDELAYB,
969  start, shift);
970  decoded0++;
971  *decoded1 = filter_3800(p, X, 1, XDELAYA, XDELAYB,
972  start, shift);
973  decoded1++;
974  }
975 
976  /* Combined */
977  p->buf++;
978  p->sample_pos++;
979 
980  /* Have we filled the history buffer? */
981  if (p->buf == p->historybuffer + HISTORY_SIZE) {
982  memmove(p->historybuffer, p->buf,
983  PREDICTOR_SIZE * sizeof(*p->historybuffer));
984  p->buf = p->historybuffer;
985  }
986  }
987 }
988 
990 {
991  APEPredictor *p = &ctx->predictor;
992  int32_t *decoded0 = ctx->decoded[0];
993  int32_t coeffs[256], delay[256];
994  int start = 4, shift = 10;
995 
997  start = 16;
998  long_filter_high_3800(decoded0, 16, 9, coeffs, delay, count);
999  } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) {
1000  int order = 128, shift2 = 11;
1001 
1002  if (ctx->fileversion >= 3830) {
1003  order <<= 1;
1004  shift++;
1005  shift2++;
1006  long_filter_ehigh_3830(decoded0 + order, count - order);
1007  }
1008  start = order;
1009  long_filter_high_3800(decoded0, order, shift2, coeffs, delay, count);
1010  }
1011 
1012  while (count--) {
1014  *decoded0 = filter_fast_3320(p, *decoded0, 0, YDELAYA);
1015  decoded0++;
1016  } else {
1017  *decoded0 = filter_3800(p, *decoded0, 0, YDELAYA, YDELAYB,
1018  start, shift);
1019  decoded0++;
1020  }
1021 
1022  /* Combined */
1023  p->buf++;
1024  p->sample_pos++;
1025 
1026  /* Have we filled the history buffer? */
1027  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1028  memmove(p->historybuffer, p->buf,
1029  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1030  p->buf = p->historybuffer;
1031  }
1032  }
1033 }
1034 
1036  const int decoded, const int filter,
1037  const int delayA)
1038 {
1039  int32_t predictionA, sign;
1040  int32_t d0, d1, d2, d3;
1041 
1042  p->buf[delayA] = p->lastA[filter];
1043  d0 = p->buf[delayA ];
1044  d1 = p->buf[delayA ] - p->buf[delayA - 1];
1045  d2 = p->buf[delayA - 1] - p->buf[delayA - 2];
1046  d3 = p->buf[delayA - 2] - p->buf[delayA - 3];
1047 
1048  predictionA = d0 * p->coeffsA[filter][0] +
1049  d1 * p->coeffsA[filter][1] +
1050  d2 * p->coeffsA[filter][2] +
1051  d3 * p->coeffsA[filter][3];
1052 
1053  p->lastA[filter] = decoded + (predictionA >> 9);
1054  p->filterA[filter] = p->lastA[filter] + ((p->filterA[filter] * 31) >> 5);
1055 
1056  sign = APESIGN(decoded);
1057  p->coeffsA[filter][0] += ((d0 < 0) * 2 - 1) * sign;
1058  p->coeffsA[filter][1] += ((d1 < 0) * 2 - 1) * sign;
1059  p->coeffsA[filter][2] += ((d2 < 0) * 2 - 1) * sign;
1060  p->coeffsA[filter][3] += ((d3 < 0) * 2 - 1) * sign;
1061 
1062  return p->filterA[filter];
1063 }
1064 
1066 {
1067  APEPredictor *p = &ctx->predictor;
1068  int32_t *decoded0 = ctx->decoded[0];
1069  int32_t *decoded1 = ctx->decoded[1];
1070 
1071  ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count);
1072 
1073  while (count--) {
1074  /* Predictor Y */
1075  int Y = *decoded1, X = *decoded0;
1076  *decoded0 = predictor_update_3930(p, Y, 0, YDELAYA);
1077  decoded0++;
1078  *decoded1 = predictor_update_3930(p, X, 1, XDELAYA);
1079  decoded1++;
1080 
1081  /* Combined */
1082  p->buf++;
1083 
1084  /* Have we filled the history buffer? */
1085  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1086  memmove(p->historybuffer, p->buf,
1087  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1088  p->buf = p->historybuffer;
1089  }
1090  }
1091 }
1092 
1094 {
1095  APEPredictor *p = &ctx->predictor;
1096  int32_t *decoded0 = ctx->decoded[0];
1097 
1098  ape_apply_filters(ctx, ctx->decoded[0], NULL, count);
1099 
1100  while (count--) {
1101  *decoded0 = predictor_update_3930(p, *decoded0, 0, YDELAYA);
1102  decoded0++;
1103 
1104  p->buf++;
1105 
1106  /* Have we filled the history buffer? */
1107  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1108  memmove(p->historybuffer, p->buf,
1109  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1110  p->buf = p->historybuffer;
1111  }
1112  }
1113 }
1114 
1116  const int decoded, const int filter,
1117  const int delayA, const int delayB,
1118  const int adaptA, const int adaptB)
1119 {
1120  int32_t predictionA, predictionB, sign;
1121 
1122  p->buf[delayA] = p->lastA[filter];
1123  p->buf[adaptA] = APESIGN(p->buf[delayA]);
1124  p->buf[delayA - 1] = p->buf[delayA] - p->buf[delayA - 1];
1125  p->buf[adaptA - 1] = APESIGN(p->buf[delayA - 1]);
1126 
1127  predictionA = p->buf[delayA ] * p->coeffsA[filter][0] +
1128  p->buf[delayA - 1] * p->coeffsA[filter][1] +
1129  p->buf[delayA - 2] * p->coeffsA[filter][2] +
1130  p->buf[delayA - 3] * p->coeffsA[filter][3];
1131 
1132  /* Apply a scaled first-order filter compression */
1133  p->buf[delayB] = p->filterA[filter ^ 1] - ((p->filterB[filter] * 31) >> 5);
1134  p->buf[adaptB] = APESIGN(p->buf[delayB]);
1135  p->buf[delayB - 1] = p->buf[delayB] - p->buf[delayB - 1];
1136  p->buf[adaptB - 1] = APESIGN(p->buf[delayB - 1]);
1137  p->filterB[filter] = p->filterA[filter ^ 1];
1138 
1139  predictionB = p->buf[delayB ] * p->coeffsB[filter][0] +
1140  p->buf[delayB - 1] * p->coeffsB[filter][1] +
1141  p->buf[delayB - 2] * p->coeffsB[filter][2] +
1142  p->buf[delayB - 3] * p->coeffsB[filter][3] +
1143  p->buf[delayB - 4] * p->coeffsB[filter][4];
1144 
1145  p->lastA[filter] = decoded + ((predictionA + (predictionB >> 1)) >> 10);
1146  p->filterA[filter] = p->lastA[filter] + ((p->filterA[filter] * 31) >> 5);
1147 
1148  sign = APESIGN(decoded);
1149  p->coeffsA[filter][0] += p->buf[adaptA ] * sign;
1150  p->coeffsA[filter][1] += p->buf[adaptA - 1] * sign;
1151  p->coeffsA[filter][2] += p->buf[adaptA - 2] * sign;
1152  p->coeffsA[filter][3] += p->buf[adaptA - 3] * sign;
1153  p->coeffsB[filter][0] += p->buf[adaptB ] * sign;
1154  p->coeffsB[filter][1] += p->buf[adaptB - 1] * sign;
1155  p->coeffsB[filter][2] += p->buf[adaptB - 2] * sign;
1156  p->coeffsB[filter][3] += p->buf[adaptB - 3] * sign;
1157  p->coeffsB[filter][4] += p->buf[adaptB - 4] * sign;
1158 
1159  return p->filterA[filter];
1160 }
1161 
1163 {
1164  APEPredictor *p = &ctx->predictor;
1165  int32_t *decoded0 = ctx->decoded[0];
1166  int32_t *decoded1 = ctx->decoded[1];
1167 
1168  ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count);
1169 
1170  while (count--) {
1171  /* Predictor Y */
1172  *decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB,
1174  decoded0++;
1175  *decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB,
1177  decoded1++;
1178 
1179  /* Combined */
1180  p->buf++;
1181 
1182  /* Have we filled the history buffer? */
1183  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1184  memmove(p->historybuffer, p->buf,
1185  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1186  p->buf = p->historybuffer;
1187  }
1188  }
1189 }
1190 
1192 {
1193  APEPredictor *p = &ctx->predictor;
1194  int32_t *decoded0 = ctx->decoded[0];
1195  int32_t predictionA, currentA, A, sign;
1196 
1197  ape_apply_filters(ctx, ctx->decoded[0], NULL, count);
1198 
1199  currentA = p->lastA[0];
1200 
1201  while (count--) {
1202  A = *decoded0;
1203 
1204  p->buf[YDELAYA] = currentA;
1205  p->buf[YDELAYA - 1] = p->buf[YDELAYA] - p->buf[YDELAYA - 1];
1206 
1207  predictionA = p->buf[YDELAYA ] * p->coeffsA[0][0] +
1208  p->buf[YDELAYA - 1] * p->coeffsA[0][1] +
1209  p->buf[YDELAYA - 2] * p->coeffsA[0][2] +
1210  p->buf[YDELAYA - 3] * p->coeffsA[0][3];
1211 
1212  currentA = A + (predictionA >> 10);
1213 
1214  p->buf[YADAPTCOEFFSA] = APESIGN(p->buf[YDELAYA ]);
1215  p->buf[YADAPTCOEFFSA - 1] = APESIGN(p->buf[YDELAYA - 1]);
1216 
1217  sign = APESIGN(A);
1218  p->coeffsA[0][0] += p->buf[YADAPTCOEFFSA ] * sign;
1219  p->coeffsA[0][1] += p->buf[YADAPTCOEFFSA - 1] * sign;
1220  p->coeffsA[0][2] += p->buf[YADAPTCOEFFSA - 2] * sign;
1221  p->coeffsA[0][3] += p->buf[YADAPTCOEFFSA - 3] * sign;
1222 
1223  p->buf++;
1224 
1225  /* Have we filled the history buffer? */
1226  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1227  memmove(p->historybuffer, p->buf,
1228  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1229  p->buf = p->historybuffer;
1230  }
1231 
1232  p->filterA[0] = currentA + ((p->filterA[0] * 31) >> 5);
1233  *(decoded0++) = p->filterA[0];
1234  }
1235 
1236  p->lastA[0] = currentA;
1237 }
1238 
1239 static void do_init_filter(APEFilter *f, int16_t *buf, int order)
1240 {
1241  f->coeffs = buf;
1242  f->historybuffer = buf + order;
1243  f->delay = f->historybuffer + order * 2;
1244  f->adaptcoeffs = f->historybuffer + order;
1245 
1246  memset(f->historybuffer, 0, (order * 2) * sizeof(*f->historybuffer));
1247  memset(f->coeffs, 0, order * sizeof(*f->coeffs));
1248  f->avg = 0;
1249 }
1250 
1251 static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order)
1252 {
1253  do_init_filter(&f[0], buf, order);
1254  do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order);
1255 }
1256 
1258  int32_t *data, int count, int order, int fracbits)
1259 {
1260  int res;
1261  int absres;
1262 
1263  while (count--) {
1264  /* round fixedpoint scalar product */
1265  res = ctx->dsp.scalarproduct_and_madd_int16(f->coeffs, f->delay - order,
1266  f->adaptcoeffs - order,
1267  order, APESIGN(*data));
1268  res = (res + (1 << (fracbits - 1))) >> fracbits;
1269  res += *data;
1270  *data++ = res;
1271 
1272  /* Update the output history */
1273  *f->delay++ = av_clip_int16(res);
1274 
1275  if (version < 3980) {
1276  /* Version ??? to < 3.98 files (untested) */
1277  f->adaptcoeffs[0] = (res == 0) ? 0 : ((res >> 28) & 8) - 4;
1278  f->adaptcoeffs[-4] >>= 1;
1279  f->adaptcoeffs[-8] >>= 1;
1280  } else {
1281  /* Version 3.98 and later files */
1282 
1283  /* Update the adaption coefficients */
1284  absres = FFABS(res);
1285  if (absres)
1286  *f->adaptcoeffs = ((res & (-1<<31)) ^ (-1<<30)) >>
1287  (25 + (absres <= f->avg*3) + (absres <= f->avg*4/3));
1288  else
1289  *f->adaptcoeffs = 0;
1290 
1291  f->avg += (absres - f->avg) / 16;
1292 
1293  f->adaptcoeffs[-1] >>= 1;
1294  f->adaptcoeffs[-2] >>= 1;
1295  f->adaptcoeffs[-8] >>= 1;
1296  }
1297 
1298  f->adaptcoeffs++;
1299 
1300  /* Have we filled the history buffer? */
1301  if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) {
1302  memmove(f->historybuffer, f->delay - (order * 2),
1303  (order * 2) * sizeof(*f->historybuffer));
1304  f->delay = f->historybuffer + order * 2;
1305  f->adaptcoeffs = f->historybuffer + order;
1306  }
1307  }
1308 }
1309 
1310 static void apply_filter(APEContext *ctx, APEFilter *f,
1311  int32_t *data0, int32_t *data1,
1312  int count, int order, int fracbits)
1313 {
1314  do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits);
1315  if (data1)
1316  do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits);
1317 }
1318 
1319 static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
1320  int32_t *decoded1, int count)
1321 {
1322  int i;
1323 
1324  for (i = 0; i < APE_FILTER_LEVELS; i++) {
1325  if (!ape_filter_orders[ctx->fset][i])
1326  break;
1327  apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count,
1328  ape_filter_orders[ctx->fset][i],
1329  ape_filter_fracbits[ctx->fset][i]);
1330  }
1331 }
1332 
1334 {
1335  int i, ret;
1336  if ((ret = init_entropy_decoder(ctx)) < 0)
1337  return ret;
1339 
1340  for (i = 0; i < APE_FILTER_LEVELS; i++) {
1341  if (!ape_filter_orders[ctx->fset][i])
1342  break;
1343  init_filter(ctx, ctx->filters[i], ctx->filterbuf[i],
1344  ape_filter_orders[ctx->fset][i]);
1345  }
1346  return 0;
1347 }
1348 
1349 static void ape_unpack_mono(APEContext *ctx, int count)
1350 {
1352  /* We are pure silence, so we're done. */
1353  av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n");
1354  return;
1355  }
1356 
1357  ctx->entropy_decode_mono(ctx, count);
1358 
1359  /* Now apply the predictor decoding */
1360  ctx->predictor_decode_mono(ctx, count);
1361 
1362  /* Pseudo-stereo - just copy left channel to right channel */
1363  if (ctx->channels == 2) {
1364  memcpy(ctx->decoded[1], ctx->decoded[0], count * sizeof(*ctx->decoded[1]));
1365  }
1366 }
1367 
1368 static void ape_unpack_stereo(APEContext *ctx, int count)
1369 {
1370  int32_t left, right;
1371  int32_t *decoded0 = ctx->decoded[0];
1372  int32_t *decoded1 = ctx->decoded[1];
1373 
1375  /* We are pure silence, so we're done. */
1376  av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence stereo\n");
1377  return;
1378  }
1379 
1380  ctx->entropy_decode_stereo(ctx, count);
1381 
1382  /* Now apply the predictor decoding */
1383  ctx->predictor_decode_stereo(ctx, count);
1384 
1385  /* Decorrelate and scale to output depth */
1386  while (count--) {
1387  left = *decoded1 - (*decoded0 / 2);
1388  right = left + *decoded0;
1389 
1390  *(decoded0++) = left;
1391  *(decoded1++) = right;
1392  }
1393 }
1394 
1396  int *got_frame_ptr, AVPacket *avpkt)
1397 {
1398  AVFrame *frame = data;
1399  const uint8_t *buf = avpkt->data;
1400  APEContext *s = avctx->priv_data;
1401  uint8_t *sample8;
1402  int16_t *sample16;
1403  int32_t *sample24;
1404  int i, ch, ret;
1405  int blockstodecode;
1406 
1407  /* this should never be negative, but bad things will happen if it is, so
1408  check it just to make sure. */
1409  av_assert0(s->samples >= 0);
1410 
1411  if(!s->samples){
1412  uint32_t nblocks, offset;
1413  int buf_size;
1414 
1415  if (!avpkt->size) {
1416  *got_frame_ptr = 0;
1417  return 0;
1418  }
1419  if (avpkt->size < 8) {
1420  av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
1421  return AVERROR_INVALIDDATA;
1422  }
1423  buf_size = avpkt->size & ~3;
1424  if (buf_size != avpkt->size) {
1425  av_log(avctx, AV_LOG_WARNING, "packet size is not a multiple of 4. "
1426  "extra bytes at the end will be skipped.\n");
1427  }
1428  if (s->fileversion < 3950) // previous versions overread two bytes
1429  buf_size += 2;
1430  av_fast_malloc(&s->data, &s->data_size, buf_size);
1431  if (!s->data)
1432  return AVERROR(ENOMEM);
1433  s->dsp.bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2);
1434  memset(s->data + (buf_size & ~3), 0, buf_size & 3);
1435  s->ptr = s->data;
1436  s->data_end = s->data + buf_size;
1437 
1438  nblocks = bytestream_get_be32(&s->ptr);
1439  offset = bytestream_get_be32(&s->ptr);
1440  if (s->fileversion >= 3900) {
1441  if (offset > 3) {
1442  av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
1443  s->data = NULL;
1444  return AVERROR_INVALIDDATA;
1445  }
1446  if (s->data_end - s->ptr < offset) {
1447  av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
1448  return AVERROR_INVALIDDATA;
1449  }
1450  s->ptr += offset;
1451  } else {
1452  init_get_bits(&s->gb, s->ptr, (s->data_end - s->ptr) * 8);
1453  if (s->fileversion > 3800)
1454  skip_bits_long(&s->gb, offset * 8);
1455  else
1456  skip_bits_long(&s->gb, offset);
1457  }
1458 
1459  if (!nblocks || nblocks > INT_MAX) {
1460  av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %u.\n", nblocks);
1461  return AVERROR_INVALIDDATA;
1462  }
1463  s->samples = nblocks;
1464 
1465  /* Initialize the frame decoder */
1466  if (init_frame_decoder(s) < 0) {
1467  av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n");
1468  return AVERROR_INVALIDDATA;
1469  }
1470  }
1471 
1472  if (!s->data) {
1473  *got_frame_ptr = 0;
1474  return avpkt->size;
1475  }
1476 
1477  blockstodecode = FFMIN(s->blocks_per_loop, s->samples);
1478  // for old files coefficients were not interleaved,
1479  // so we need to decode all of them at once
1480  if (s->fileversion < 3930)
1481  blockstodecode = s->samples;
1482 
1483  /* reallocate decoded sample buffer if needed */
1485  2 * FFALIGN(blockstodecode, 8) * sizeof(*s->decoded_buffer));
1486  if (!s->decoded_buffer)
1487  return AVERROR(ENOMEM);
1488  memset(s->decoded_buffer, 0, s->decoded_size);
1489  s->decoded[0] = s->decoded_buffer;
1490  s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8);
1491 
1492  /* get output buffer */
1493  frame->nb_samples = blockstodecode;
1494  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
1495  return ret;
1496 
1497  s->error=0;
1498 
1499  if ((s->channels == 1) || (s->frameflags & APE_FRAMECODE_PSEUDO_STEREO))
1500  ape_unpack_mono(s, blockstodecode);
1501  else
1502  ape_unpack_stereo(s, blockstodecode);
1503  emms_c();
1504 
1505  if (s->error) {
1506  s->samples=0;
1507  av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n");
1508  return AVERROR_INVALIDDATA;
1509  }
1510 
1511  switch (s->bps) {
1512  case 8:
1513  for (ch = 0; ch < s->channels; ch++) {
1514  sample8 = (uint8_t *)frame->data[ch];
1515  for (i = 0; i < blockstodecode; i++)
1516  *sample8++ = (s->decoded[ch][i] + 0x80) & 0xff;
1517  }
1518  break;
1519  case 16:
1520  for (ch = 0; ch < s->channels; ch++) {
1521  sample16 = (int16_t *)frame->data[ch];
1522  for (i = 0; i < blockstodecode; i++)
1523  *sample16++ = s->decoded[ch][i];
1524  }
1525  break;
1526  case 24:
1527  for (ch = 0; ch < s->channels; ch++) {
1528  sample24 = (int32_t *)frame->data[ch];
1529  for (i = 0; i < blockstodecode; i++)
1530  *sample24++ = s->decoded[ch][i] << 8;
1531  }
1532  break;
1533  }
1534 
1535  s->samples -= blockstodecode;
1536 
1537  *got_frame_ptr = 1;
1538 
1539  return !s->samples ? avpkt->size : 0;
1540 }
1541 
1543 {
1544  APEContext *s = avctx->priv_data;
1545  s->samples= 0;
1546 }
1547 
1548 #define OFFSET(x) offsetof(APEContext, x)
1549 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM)
1550 static const AVOption options[] = {
1551  { "max_samples", "maximum number of samples decoded per call", OFFSET(blocks_per_loop), AV_OPT_TYPE_INT, { .i64 = 4608 }, 1, INT_MAX, PAR, "max_samples" },
1552  { "all", "no maximum. decode all samples for each packet at once", 0, AV_OPT_TYPE_CONST, { .i64 = INT_MAX }, INT_MIN, INT_MAX, PAR, "max_samples" },
1553  { NULL},
1554 };
1555 
1556 static const AVClass ape_decoder_class = {
1557  .class_name = "APE decoder",
1558  .item_name = av_default_item_name,
1559  .option = options,
1560  .version = LIBAVUTIL_VERSION_INT,
1561 };
1562 
1564  .name = "ape",
1565  .type = AVMEDIA_TYPE_AUDIO,
1566  .id = AV_CODEC_ID_APE,
1567  .priv_data_size = sizeof(APEContext),
1568  .init = ape_decode_init,
1572  .flush = ape_flush,
1573  .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
1574  .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
1578  .priv_class = &ape_decoder_class,
1579 };
static int init_frame_decoder(APEContext *ctx)
Definition: apedec.c:1333
static const int32_t initial_coeffs_3930[4]
Definition: apedec.c:768
static void decode_array_0000(APEContext *ctx, GetBitContext *gb, int32_t *out, APERice *rice, int blockstodecode)
Definition: apedec.c:587
int compression_level
compression levels
Definition: apedec.c:142
AVCodec ff_ape_decoder
Definition: apedec.c:1563
#define MODEL_ELEMENTS
Definition: apedec.c:385
static av_always_inline int filter_3800(APEPredictor *p, const int decoded, const int filter, const int delayA, const int delayB, const int start, const int shift)
Definition: apedec.c:843
Definition: start.py:1
av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
Definition: dsputil.c:2675
const char * s
Definition: avisynth_c.h:668
int32_t coeffsB[2][5]
adaption coefficients
Definition: apedec.c:126
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int decoded_size
Definition: apedec.c:151
#define YADAPTCOEFFSB
Definition: apedec.c:57
static int shift(int a, int b)
Definition: sonic.c:86
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
static void range_start_decoding(APEContext *ctx)
Start the decoder.
Definition: apedec.c:315
AVOption.
Definition: opt.h:251
#define XDELAYA
Definition: apedec.c:52
static void apply_filter(APEContext *ctx, APEFilter *f, int32_t *data0, int32_t *data1, int count, int order, int fracbits)
Definition: apedec.c:1310
int fileversion
codec version, very important in decoding process
Definition: apedec.c:141
av_default_item_name
static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode)
Definition: apedec.c:637
int32_t filterA[2]
Definition: apedec.c:122
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:198
void(* entropy_decode_mono)(struct APEContext *ctx, int blockstodecode)
Definition: apedec.c:170
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:154
void(* entropy_decode_stereo)(struct APEContext *ctx, int blockstodecode)
Definition: apedec.c:171
static int APESIGN(int32_t x)
Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero)
Definition: apedec.c:813
static void update_rice(APERice *rice, unsigned int x)
Definition: apedec.c:453
static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode)
Definition: apedec.c:673
static av_cold int ape_decode_init(AVCodecContext *avctx)
Definition: apedec.c:213
unsigned int buffer
buffer for input/output
Definition: apedec.c:113
Sinusoidal phase f
static int init_entropy_decoder(APEContext *ctx)
Definition: apedec.c:719
#define AV_RL16
static void ape_flush(AVCodecContext *avctx)
Definition: apedec.c:1542
static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode)
Definition: apedec.c:689
int version
Definition: avisynth_c.h:666
static av_always_inline int predictor_update_3930(APEPredictor *p, const int decoded, const int filter, const int delayA)
Definition: apedec.c:1035
#define AV_CH_LAYOUT_STEREO
#define OFFSET(x)
Definition: apedec.c:1548
#define XADAPTCOEFFSA
Definition: apedec.c:56
int16_t * filterbuf[APE_FILTER_LEVELS]
filter memory
Definition: apedec.c:155
static void predictor_decode_mono_3800(APEContext *ctx, int count)
Definition: apedec.c:989
#define FFALIGN(x, a)
Definition: common.h:63
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
initialize output if(nPeaks >3)%at least 3 peaks in spectrum for trying to find f0 nf0peaks
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:55
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static int ape_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: apedec.c:1395
Filter histories.
Definition: apedec.c:117
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
enum AVSampleFormat sample_fmt
audio sample format
uint8_t
#define av_cold
Definition: attributes.h:78
int16_t * delay
filtered values
Definition: apedec.c:99
AVOptions.
#define Y
Definition: vf_boxblur.c:76
static void do_init_filter(APEFilter *f, int16_t *buf, int order)
Definition: apedec.c:1239
static const int32_t initial_coeffs_a_3800[3]
Definition: apedec.c:760
static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode)
Definition: apedec.c:653
static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode)
Definition: apedec.c:700
static void ape_unpack_mono(APEContext *ctx, int count)
Definition: apedec.c:1349
#define emms_c()
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
APERangecoder rc
rangecoder used to decode actual values
Definition: apedec.c:157
#define YDELAYB
Definition: apedec.c:51
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
the mask is usually to keep the same permissions Filters should remove permissions on reference they give to output whenever necessary It can be automatically done by setting the rej_perms field on the output pad Here are a few guidelines corresponding to common then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static const uint8_t ape_filter_fracbits[5][APE_FILTER_LEVELS]
Filter fraction bits depending on compression level.
Definition: apedec.c:85
uint8_t * data
static void ape_apply_filters(APEContext *ctx, int32_t *decoded0, int32_t *decoded1, int count)
Definition: apedec.c:1319
bitstream reader API header.
for audio filters
signed 32 bits, planar
Definition: samplefmt.h:59
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Decoder context.
Definition: apedec.c:133
#define A(x)
static const uint16_t counts_3970[22]
Fixed probabilities for symbols in Monkey Audio version 3.97.
Definition: apedec.c:390
static void range_dec_normalize(APEContext *ctx)
Perform normalization.
Definition: apedec.c:323
frame
Definition: stft.m:14
Discrete Time axis x
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:557
static const uint16_t counts_diff_3980[21]
Probability ranges for symbols in Monkey Audio version 3.98.
Definition: apedec.c:417
int bps
Definition: apedec.c:139
void(* predictor_decode_mono)(struct APEContext *ctx, int count)
Definition: apedec.c:172
#define YDELAYA
Definition: apedec.c:50
int32_t lastA[2]
Definition: apedec.c:120
static av_cold int ape_decode_close(AVCodecContext *avctx)
Definition: apedec.c:198
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
static int ape_decode_value_3900(APEContext *ctx, APERice *rice)
Definition: apedec.c:508
int32_t historybuffer[HISTORY_SIZE+PREDICTOR_SIZE]
Definition: apedec.c:127
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Spectrum Plot time data
#define XDELAYB
Definition: apedec.c:53
int32_t * decoded_buffer
Definition: apedec.c:150
simple assert() macros that are a bit more flexible than ISO C assert().
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
init variable d2
int avg
Definition: apedec.c:101
const char * name
Name of the codec implementation.
struct APEContext APEContext
Decoder context.
static int range_decode_culshift(APEContext *ctx, int shift)
Decode value with given size in bits.
Definition: apedec.c:356
#define APE_FILTER_LEVELS
Definition: apedec.c:73
static const uint8_t offset[127][2]
Definition: vf_spp.c:70
int error
Definition: apedec.c:168
external API header
uint64_t channel_layout
Audio channel layout.
static int range_decode_bits(APEContext *ctx, int n)
Decode n bits (n <= 16) without modelling.
Definition: apedec.c:377
struct APEPredictor APEPredictor
Filter histories.
audio channel layout utility functions
static void predictor_decode_mono_3930(APEContext *ctx, int count)
Definition: apedec.c:1093
uint8_t * data
current frame data
Definition: apedec.c:163
static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS]
Filter orders depending on compression level.
Definition: apedec.c:76
#define FFMIN(a, b)
Definition: common.h:58
int32_t(* scalarproduct_and_madd_int16)(int16_t *v1, const int16_t *v2, const int16_t *v3, int len, int mul)
Calculate scalar product of v1 and v2, and v1[i] += v3[i] * mul.
Definition: dsputil.h:281
static int get_rice_ook(GetBitContext *gb, int k)
Definition: apedec.c:464
ret
Definition: avfilter.c:821
static void long_filter_high_3800(int32_t *buffer, int order, int shift, int32_t *coeffs, int32_t *delay, int length)
Definition: apedec.c:888
Sampled sinusoid X
static av_always_inline int filter_fast_3320(APEPredictor *p, const int decoded, const int filter, const int delayA)
Definition: apedec.c:817
AVCodecContext * avctx
Definition: apedec.c:135
static void ape_unpack_stereo(APEContext *ctx, int count)
Definition: apedec.c:1368
const uint8_t * ptr
current position in frame data
Definition: apedec.c:166
int32_t
static int range_decode_culfreq(APEContext *ctx, int tot_f)
Calculate culmulative frequency for next symbol.
Definition: apedec.c:344
#define FFABS(a)
Definition: common.h:53
static void predictor_decode_stereo_3930(APEContext *ctx, int count)
Definition: apedec.c:1065
uint32_t ksum
Definition: apedec.c:106
uint32_t help
bytes_to_follow resp. intermediate value
Definition: apedec.c:112
LIBAVUTIL_VERSION_INT
Definition: eval.c:55
static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode)
Definition: apedec.c:708
#define APE_FRAMECODE_PSEUDO_STEREO
Definition: apedec.c:43
static void flush(AVCodecContext *avctx)
uint32_t range
length of interval
Definition: apedec.c:111
int samples
samples left to decode in current frame
Definition: apedec.c:138
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int fset
which filter set to use (calculated from compression level)
Definition: apedec.c:143
for k
static int ape_decode_value_3860(APEContext *ctx, GetBitContext *gb, APERice *rice)
Definition: apedec.c:476
NULL
Definition: eval.c:55
APERice riceX
rice code parameters for the second channel
Definition: apedec.c:158
static void predictor_decode_stereo_3950(APEContext *ctx, int count)
Definition: apedec.c:1162
typedef void(RENAME(mix_any_func_type))
static void predictor_decode_stereo_3800(APEContext *ctx, int count)
Definition: apedec.c:933
#define APE_FRAMECODE_STEREO_SILENCE
Definition: apedec.c:42
static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order)
Definition: apedec.c:1251
int frameflags
frame flags
Definition: apedec.c:147
main external API structure.
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:375
static int ape_decode_value_3990(APEContext *ctx, APERice *rice)
Definition: apedec.c:541
uint32_t CRC
frame CRC
Definition: apedec.c:146
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
void * buf
Definition: avisynth_c.h:594
unsigned int sample_pos
Definition: apedec.c:129
static const uint16_t counts_3980[22]
Fixed probabilities for symbols in Monkey Audio version 3.98.
Definition: apedec.c:408
static int range_get_symbol(APEContext *ctx, const uint16_t counts[], const uint16_t counts_diff[])
Decode symbol.
Definition: apedec.c:429
Describe the class of an AVClass context structure.
Definition: log.h:50
synthesis window for stochastic i
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: dsputil.h:208
uint32_t low
low end of interval
Definition: apedec.c:110
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:379
int flags
global decoder flags
Definition: apedec.c:144
APECompressionLevel
Possible compression levels.
Definition: apedec.c:64
void(* predictor_decode_stereo)(struct APEContext *ctx, int count)
Definition: apedec.c:173
#define EXTRA_BITS
Definition: apedec.c:311
int32_t coeffsA[2][4]
adaption coefficients
Definition: apedec.c:125
static void range_decode_update(APEContext *ctx, int sy_f, int lt_f)
Update decoding state.
Definition: apedec.c:370
static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode)
Definition: apedec.c:665
uint32_t k
Definition: apedec.c:105
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:306
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
#define MAX_CHANNELS
Definition: apedec.c:38
static const int32_t initial_coeffs_fast_3320[1]
Definition: apedec.c:756
struct APEFilter APEFilter
Filters applied to the decoded data.
static void do_apply_filter(APEContext *ctx, int version, APEFilter *f, int32_t *data, int count, int order, int fracbits)
Definition: apedec.c:1257
struct APERangecoder APERangecoder
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
#define PREDICTOR_SIZE
Total size of all predictor histories.
Definition: apedec.c:48
static const uint16_t counts_diff_3970[21]
Probability ranges for symbols in Monkey Audio version 3.97.
Definition: apedec.c:399
int blocks_per_loop
maximum number of samples to decode for each call
Definition: apedec.c:153
#define CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
uint8_t * data_end
frame data end
Definition: apedec.c:164
common internal api header.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
APERice riceY
rice code parameters for the first channel
Definition: apedec.c:159
static const int shift2[6]
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:33
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
APEFilter filters[APE_FILTER_LEVELS][2]
filters used for reconstruction
Definition: apedec.c:160
static av_always_inline int predictor_update_filter(APEPredictor *p, const int decoded, const int filter, const int delayA, const int delayB, const int adaptA, const int adaptB)
Definition: apedec.c:1115
AVSampleFormat
Audio Sample Formats.
Definition: samplefmt.h:49
DSPContext dsp
Definition: apedec.c:136
the buffer and buffer reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFilterBuffer structures They must not be accessed but through references stored in AVFilterBufferRef structures Several references can point to the same buffer
int16_t * coeffs
actual coefficients used in filtering
Definition: apedec.c:96
int32_t filterB[2]
Definition: apedec.c:123
DSP utils.
#define YADAPTCOEFFSA
Definition: apedec.c:55
#define PAR
Definition: apedec.c:1549
static void init_predictor_decoder(APEContext *ctx)
Definition: apedec.c:772
static const int32_t initial_coeffs_b_3800[2]
Definition: apedec.c:764
APEPredictor predictor
predictor used for final reconstruction
Definition: apedec.c:148
static const AVClass ape_decoder_class
Definition: apedec.c:1556
unsigned 8 bits, planar
Definition: samplefmt.h:57
int channels
number of audio channels
static void long_filter_ehigh_3830(int32_t *buffer, int length)
Definition: apedec.c:911
#define av_log2
Definition: intmath.h:89
static void predictor_decode_mono_3950(APEContext *ctx, int count)
Definition: apedec.c:1191
GetBitContext gb
Definition: apedec.c:161
Filters applied to the decoded data.
Definition: apedec.c:95
#define XADAPTCOEFFSB
Definition: apedec.c:58
signed 16 bits, planar
Definition: samplefmt.h:58
int32_t * decoded[MAX_CHANNELS]
decoded data for each channel
Definition: apedec.c:152
int32_t * buf
Definition: apedec.c:118
void INT64 INT64 count
Definition: avisynth_c.h:594
#define HISTORY_SIZE
Definition: apedec.c:45
#define av_always_inline
Definition: attributes.h:41
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
const char int length
Definition: avisynth_c.h:668
struct APERice APERice
int data_size
frame data allocated size
Definition: apedec.c:165
static const AVOption options[]
Definition: apedec.c:1550
#define AV_CH_LAYOUT_MONO
int16_t * adaptcoeffs
adaptive filter coefficients used for correcting of actual filter coefficients
Definition: apedec.c:97
int channels
Definition: apedec.c:137
#define BOTTOM_VALUE
Definition: apedec.c:312
This structure stores compressed data.
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:127
for(j=16;j >0;--j)
static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode)
Definition: apedec.c:631
DSPContext.
Definition: dsputil.h:127
int16_t * historybuffer
filter memory
Definition: apedec.c:98
static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode)
Definition: apedec.c:645