adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The ffmpeg Project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  *
16  * This file is part of FFmpeg.
17  *
18  * FFmpeg is free software; you can redistribute it and/or
19  * modify it under the terms of the GNU Lesser General Public
20  * License as published by the Free Software Foundation; either
21  * version 2.1 of the License, or (at your option) any later version.
22  *
23  * FFmpeg is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26  * Lesser General Public License for more details.
27  *
28  * You should have received a copy of the GNU Lesser General Public
29  * License along with FFmpeg; if not, write to the Free Software
30  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31  */
32 #include "avcodec.h"
33 #include "get_bits.h"
34 #include "put_bits.h"
35 #include "bytestream.h"
36 #include "adpcm.h"
37 #include "adpcm_data.h"
38 #include "internal.h"
39 
40 /**
41  * @file
42  * ADPCM decoders
43  * Features and limitations:
44  *
45  * Reference documents:
46  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
47  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
48  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
49  * http://openquicktime.sourceforge.net/
50  * XAnim sources (xa_codec.c) http://xanim.polter.net/
51  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
52  * SoX source code http://sox.sourceforge.net/
53  *
54  * CD-ROM XA:
55  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
56  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
57  * readstr http://www.geocities.co.jp/Playtown/2004/
58  */
59 
60 /* These are for CD-ROM XA ADPCM */
61 static const int xa_adpcm_table[5][2] = {
62  { 0, 0 },
63  { 60, 0 },
64  { 115, -52 },
65  { 98, -55 },
66  { 122, -60 }
67 };
68 
69 static const int ea_adpcm_table[] = {
70  0, 240, 460, 392,
71  0, 0, -208, -220,
72  0, 1, 3, 4,
73  7, 8, 10, 11,
74  0, -1, -3, -4
75 };
76 
77 // padded to zero where table size is less then 16
78 static const int swf_index_tables[4][16] = {
79  /*2*/ { -1, 2 },
80  /*3*/ { -1, -1, 2, 4 },
81  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
82  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
83 };
84 
85 /* end of tables */
86 
87 typedef struct ADPCMDecodeContext {
89  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
91 
93 {
94  ADPCMDecodeContext *c = avctx->priv_data;
95  unsigned int min_channels = 1;
96  unsigned int max_channels = 2;
97 
98  switch(avctx->codec->id) {
100  min_channels = 2;
101  break;
108  max_channels = 6;
109  break;
110  }
111  if (avctx->channels < min_channels || avctx->channels > max_channels) {
112  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
113  return AVERROR(EINVAL);
114  }
115 
116  switch(avctx->codec->id) {
118  c->status[0].step = c->status[1].step = 511;
119  break;
121  if (avctx->bits_per_coded_sample != 4) {
122  av_log(avctx, AV_LOG_ERROR, "Only 4-bit ADPCM IMA WAV files are supported\n");
123  return -1;
124  }
125  break;
127  if (avctx->extradata && avctx->extradata_size >= 8) {
128  c->status[0].predictor = AV_RL32(avctx->extradata);
129  c->status[1].predictor = AV_RL32(avctx->extradata + 4);
130  }
131  break;
133  if (avctx->extradata && avctx->extradata_size >= 2)
134  c->vqa_version = AV_RL16(avctx->extradata);
135  break;
136  default:
137  break;
138  }
139 
140  switch(avctx->codec->id) {
152  break;
154  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
156  break;
157  default:
158  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
159  }
160 
161  return 0;
162 }
163 
164 static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift)
165 {
166  int step_index;
167  int predictor;
168  int sign, delta, diff, step;
169 
170  step = ff_adpcm_step_table[c->step_index];
171  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
172  step_index = av_clip(step_index, 0, 88);
173 
174  sign = nibble & 8;
175  delta = nibble & 7;
176  /* perform direct multiplication instead of series of jumps proposed by
177  * the reference ADPCM implementation since modern CPUs can do the mults
178  * quickly enough */
179  diff = ((2 * delta + 1) * step) >> shift;
180  predictor = c->predictor;
181  if (sign) predictor -= diff;
182  else predictor += diff;
183 
184  c->predictor = av_clip_int16(predictor);
185  c->step_index = step_index;
186 
187  return (short)c->predictor;
188 }
189 
190 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
191 {
192  int step_index;
193  int predictor;
194  int diff, step;
195 
196  step = ff_adpcm_step_table[c->step_index];
197  step_index = c->step_index + ff_adpcm_index_table[nibble];
198  step_index = av_clip(step_index, 0, 88);
199 
200  diff = step >> 3;
201  if (nibble & 4) diff += step;
202  if (nibble & 2) diff += step >> 1;
203  if (nibble & 1) diff += step >> 2;
204 
205  if (nibble & 8)
206  predictor = c->predictor - diff;
207  else
208  predictor = c->predictor + diff;
209 
210  c->predictor = av_clip_int16(predictor);
211  c->step_index = step_index;
212 
213  return c->predictor;
214 }
215 
216 static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
217 {
218  int predictor;
219 
220  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
221  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
222 
223  c->sample2 = c->sample1;
224  c->sample1 = av_clip_int16(predictor);
225  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
226  if (c->idelta < 16) c->idelta = 16;
227 
228  return c->sample1;
229 }
230 
231 static inline short adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
232 {
233  int step_index, predictor, sign, delta, diff, step;
234 
236  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
237  step_index = av_clip(step_index, 0, 48);
238 
239  sign = nibble & 8;
240  delta = nibble & 7;
241  diff = ((2 * delta + 1) * step) >> 3;
242  predictor = c->predictor;
243  if (sign) predictor -= diff;
244  else predictor += diff;
245 
246  c->predictor = av_clip(predictor, -2048, 2047);
247  c->step_index = step_index;
248 
249  return c->predictor << 4;
250 }
251 
252 static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble)
253 {
254  int sign, delta, diff;
255  int new_step;
256 
257  sign = nibble & 8;
258  delta = nibble & 7;
259  /* perform direct multiplication instead of series of jumps proposed by
260  * the reference ADPCM implementation since modern CPUs can do the mults
261  * quickly enough */
262  diff = ((2 * delta + 1) * c->step) >> 3;
263  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
264  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
265  c->predictor = av_clip_int16(c->predictor);
266  /* calculate new step and clamp it to range 511..32767 */
267  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
268  c->step = av_clip(new_step, 511, 32767);
269 
270  return (short)c->predictor;
271 }
272 
273 static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift)
274 {
275  int sign, delta, diff;
276 
277  sign = nibble & (1<<(size-1));
278  delta = nibble & ((1<<(size-1))-1);
279  diff = delta << (7 + c->step + shift);
280 
281  /* clamp result */
282  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
283 
284  /* calculate new step */
285  if (delta >= (2*size - 3) && c->step < 3)
286  c->step++;
287  else if (delta == 0 && c->step > 0)
288  c->step--;
289 
290  return (short) c->predictor;
291 }
292 
293 static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble)
294 {
295  if(!c->step) {
296  c->predictor = 0;
297  c->step = 127;
298  }
299 
300  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
301  c->predictor = av_clip_int16(c->predictor);
302  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
303  c->step = av_clip(c->step, 127, 24567);
304  return c->predictor;
305 }
306 
307 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
308  const uint8_t *in, ADPCMChannelStatus *left,
309  ADPCMChannelStatus *right, int channels, int sample_offset)
310 {
311  int i, j;
312  int shift,filter,f0,f1;
313  int s_1,s_2;
314  int d,s,t;
315 
316  out0 += sample_offset;
317  if (channels == 1)
318  out1 = out0 + 28;
319  else
320  out1 += sample_offset;
321 
322  for(i=0;i<4;i++) {
323  shift = 12 - (in[4+i*2] & 15);
324  filter = in[4+i*2] >> 4;
325  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
326  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
327  filter=0;
328  }
329  f0 = xa_adpcm_table[filter][0];
330  f1 = xa_adpcm_table[filter][1];
331 
332  s_1 = left->sample1;
333  s_2 = left->sample2;
334 
335  for(j=0;j<28;j++) {
336  d = in[16+i+j*4];
337 
338  t = sign_extend(d, 4);
339  s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
340  s_2 = s_1;
341  s_1 = av_clip_int16(s);
342  out0[j] = s_1;
343  }
344 
345  if (channels == 2) {
346  left->sample1 = s_1;
347  left->sample2 = s_2;
348  s_1 = right->sample1;
349  s_2 = right->sample2;
350  }
351 
352  shift = 12 - (in[5+i*2] & 15);
353  filter = in[5+i*2] >> 4;
354  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
355  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
356  filter=0;
357  }
358 
359  f0 = xa_adpcm_table[filter][0];
360  f1 = xa_adpcm_table[filter][1];
361 
362  for(j=0;j<28;j++) {
363  d = in[16+i+j*4];
364 
365  t = sign_extend(d >> 4, 4);
366  s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
367  s_2 = s_1;
368  s_1 = av_clip_int16(s);
369  out1[j] = s_1;
370  }
371 
372  if (channels == 2) {
373  right->sample1 = s_1;
374  right->sample2 = s_2;
375  } else {
376  left->sample1 = s_1;
377  left->sample2 = s_2;
378  }
379 
380  out0 += 28 * (3 - channels);
381  out1 += 28 * (3 - channels);
382  }
383 
384  return 0;
385 }
386 
387 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
388 {
389  ADPCMDecodeContext *c = avctx->priv_data;
390  GetBitContext gb;
391  const int *table;
392  int k0, signmask, nb_bits, count;
393  int size = buf_size*8;
394  int i;
395 
396  init_get_bits(&gb, buf, size);
397 
398  //read bits & initial values
399  nb_bits = get_bits(&gb, 2)+2;
400  table = swf_index_tables[nb_bits-2];
401  k0 = 1 << (nb_bits-2);
402  signmask = 1 << (nb_bits-1);
403 
404  while (get_bits_count(&gb) <= size - 22*avctx->channels) {
405  for (i = 0; i < avctx->channels; i++) {
406  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
407  c->status[i].step_index = get_bits(&gb, 6);
408  }
409 
410  for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
411  int i;
412 
413  for (i = 0; i < avctx->channels; i++) {
414  // similar to IMA adpcm
415  int delta = get_bits(&gb, nb_bits);
417  long vpdiff = 0; // vpdiff = (delta+0.5)*step/4
418  int k = k0;
419 
420  do {
421  if (delta & k)
422  vpdiff += step;
423  step >>= 1;
424  k >>= 1;
425  } while(k);
426  vpdiff += step;
427 
428  if (delta & signmask)
429  c->status[i].predictor -= vpdiff;
430  else
431  c->status[i].predictor += vpdiff;
432 
433  c->status[i].step_index += table[delta & (~signmask)];
434 
435  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
436  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
437 
438  *samples++ = c->status[i].predictor;
439  }
440  }
441  }
442 }
443 
444 /**
445  * Get the number of samples that will be decoded from the packet.
446  * In one case, this is actually the maximum number of samples possible to
447  * decode with the given buf_size.
448  *
449  * @param[out] coded_samples set to the number of samples as coded in the
450  * packet, or 0 if the codec does not encode the
451  * number of samples in each frame.
452  */
454  int buf_size, int *coded_samples)
455 {
456  ADPCMDecodeContext *s = avctx->priv_data;
457  int nb_samples = 0;
458  int ch = avctx->channels;
459  int has_coded_samples = 0;
460  int header_size;
461 
462  *coded_samples = 0;
463 
464  if(ch <= 0)
465  return 0;
466 
467  switch (avctx->codec->id) {
468  /* constant, only check buf_size */
470  if (buf_size < 76 * ch)
471  return 0;
472  nb_samples = 128;
473  break;
475  if (buf_size < 34 * ch)
476  return 0;
477  nb_samples = 64;
478  break;
479  /* simple 4-bit adpcm */
486  nb_samples = buf_size * 2 / ch;
487  break;
488  }
489  if (nb_samples)
490  return nb_samples;
491 
492  /* simple 4-bit adpcm, with header */
493  header_size = 0;
494  switch (avctx->codec->id) {
496  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
497  case AV_CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break;
498  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
499  }
500  if (header_size > 0)
501  return (buf_size - header_size) * 2 / ch;
502 
503  /* more complex formats */
504  switch (avctx->codec->id) {
506  has_coded_samples = 1;
507  *coded_samples = bytestream2_get_le32(gb);
508  *coded_samples -= *coded_samples % 28;
509  nb_samples = (buf_size - 12) / 30 * 28;
510  break;
512  has_coded_samples = 1;
513  *coded_samples = bytestream2_get_le32(gb);
514  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
515  break;
517  nb_samples = (buf_size - ch) / ch * 2;
518  break;
522  /* maximum number of samples */
523  /* has internal offsets and a per-frame switch to signal raw 16-bit */
524  has_coded_samples = 1;
525  switch (avctx->codec->id) {
527  header_size = 4 + 9 * ch;
528  *coded_samples = bytestream2_get_le32(gb);
529  break;
531  header_size = 4 + 5 * ch;
532  *coded_samples = bytestream2_get_le32(gb);
533  break;
535  header_size = 4 + 5 * ch;
536  *coded_samples = bytestream2_get_be32(gb);
537  break;
538  }
539  *coded_samples -= *coded_samples % 28;
540  nb_samples = (buf_size - header_size) * 2 / ch;
541  nb_samples -= nb_samples % 28;
542  break;
544  if (avctx->block_align > 0)
545  buf_size = FFMIN(buf_size, avctx->block_align);
546  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
547  break;
549  if (avctx->block_align > 0)
550  buf_size = FFMIN(buf_size, avctx->block_align);
551  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
552  break;
554  if (avctx->block_align > 0)
555  buf_size = FFMIN(buf_size, avctx->block_align);
556  nb_samples = 1 + (buf_size - 4 * ch) / (4 * ch) * 8;
557  break;
559  if (avctx->block_align > 0)
560  buf_size = FFMIN(buf_size, avctx->block_align);
561  nb_samples = 2 + (buf_size - 7 * ch) * 2 / ch;
562  break;
566  {
567  int samples_per_byte;
568  switch (avctx->codec->id) {
569  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
570  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
571  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
572  }
573  if (!s->status[0].step_index) {
574  nb_samples++;
575  buf_size -= ch;
576  }
577  nb_samples += buf_size * samples_per_byte / ch;
578  break;
579  }
581  {
582  int buf_bits = buf_size * 8 - 2;
583  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
584  int block_hdr_size = 22 * ch;
585  int block_size = block_hdr_size + nbits * ch * 4095;
586  int nblocks = buf_bits / block_size;
587  int bits_left = buf_bits - nblocks * block_size;
588  nb_samples = nblocks * 4096;
589  if (bits_left >= block_hdr_size)
590  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
591  break;
592  }
594  has_coded_samples = 1;
595  bytestream2_skip(gb, 4); // channel size
596  *coded_samples = bytestream2_get_be32(gb);
597  *coded_samples -= *coded_samples % 14;
598  nb_samples = (buf_size - (8 + 36 * ch)) / (8 * ch) * 14;
599  break;
601  nb_samples = buf_size / (9 * ch) * 16;
602  break;
604  nb_samples = (buf_size / 128) * 224 / ch;
605  break;
606  }
607 
608  /* validate coded sample count */
609  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
610  return AVERROR_INVALIDDATA;
611 
612  return nb_samples;
613 }
614 
615 static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
616  int *got_frame_ptr, AVPacket *avpkt)
617 {
618  AVFrame *frame = data;
619  const uint8_t *buf = avpkt->data;
620  int buf_size = avpkt->size;
621  ADPCMDecodeContext *c = avctx->priv_data;
622  ADPCMChannelStatus *cs;
623  int n, m, channel, i;
624  short *samples;
625  int16_t **samples_p;
626  int st; /* stereo */
627  int count1, count2;
628  int nb_samples, coded_samples, ret;
629  GetByteContext gb;
630 
631  bytestream2_init(&gb, buf, buf_size);
632  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples);
633  if (nb_samples <= 0) {
634  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
635  return AVERROR_INVALIDDATA;
636  }
637 
638  /* get output buffer */
639  frame->nb_samples = nb_samples;
640  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
641  return ret;
642  samples = (short *)frame->data[0];
643  samples_p = (int16_t **)frame->extended_data;
644 
645  /* use coded_samples when applicable */
646  /* it is always <= nb_samples, so the output buffer will be large enough */
647  if (coded_samples) {
648  if (coded_samples != nb_samples)
649  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
650  frame->nb_samples = nb_samples = coded_samples;
651  }
652 
653  st = avctx->channels == 2 ? 1 : 0;
654 
655  switch(avctx->codec->id) {
657  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
658  Channel data is interleaved per-chunk. */
659  for (channel = 0; channel < avctx->channels; channel++) {
660  int predictor;
661  int step_index;
662  cs = &(c->status[channel]);
663  /* (pppppp) (piiiiiii) */
664 
665  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
666  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
667  step_index = predictor & 0x7F;
668  predictor &= ~0x7F;
669 
670  if (cs->step_index == step_index) {
671  int diff = predictor - cs->predictor;
672  if (diff < 0)
673  diff = - diff;
674  if (diff > 0x7f)
675  goto update;
676  } else {
677  update:
678  cs->step_index = step_index;
679  cs->predictor = predictor;
680  }
681 
682  if (cs->step_index > 88u){
683  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
684  channel, cs->step_index);
685  return AVERROR_INVALIDDATA;
686  }
687 
688  samples = samples_p[channel];
689 
690  for (m = 0; m < 64; m += 2) {
691  int byte = bytestream2_get_byteu(&gb);
692  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F, 3);
693  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 , 3);
694  }
695  }
696  break;
698  for(i=0; i<avctx->channels; i++){
699  cs = &(c->status[i]);
700  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
701 
702  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
703  if (cs->step_index > 88u){
704  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
705  i, cs->step_index);
706  return AVERROR_INVALIDDATA;
707  }
708  }
709 
710  for (n = 0; n < (nb_samples - 1) / 8; n++) {
711  for (i = 0; i < avctx->channels; i++) {
712  cs = &c->status[i];
713  samples = &samples_p[i][1 + n * 8];
714  for (m = 0; m < 8; m += 2) {
715  int v = bytestream2_get_byteu(&gb);
716  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
717  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
718  }
719  }
720  }
721  break;
723  for (i = 0; i < avctx->channels; i++)
724  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
725 
726  for (i = 0; i < avctx->channels; i++) {
727  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
728  if (c->status[i].step_index > 88u) {
729  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
730  i, c->status[i].step_index);
731  return AVERROR_INVALIDDATA;
732  }
733  }
734 
735  for (i = 0; i < avctx->channels; i++) {
736  samples = (int16_t *)frame->data[i];
737  cs = &c->status[i];
738  for (n = nb_samples >> 1; n > 0; n--) {
739  int v = bytestream2_get_byteu(&gb);
740  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
741  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
742  }
743  }
744  break;
746  {
747  int block_predictor;
748 
749  block_predictor = bytestream2_get_byteu(&gb);
750  if (block_predictor > 6) {
751  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
752  block_predictor);
753  return AVERROR_INVALIDDATA;
754  }
755  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
756  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
757  if (st) {
758  block_predictor = bytestream2_get_byteu(&gb);
759  if (block_predictor > 6) {
760  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
761  block_predictor);
762  return AVERROR_INVALIDDATA;
763  }
764  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
765  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
766  }
767  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
768  if (st){
769  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
770  }
771 
772  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
773  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
774  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
775  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
776 
777  *samples++ = c->status[0].sample2;
778  if (st) *samples++ = c->status[1].sample2;
779  *samples++ = c->status[0].sample1;
780  if (st) *samples++ = c->status[1].sample1;
781  for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
782  int byte = bytestream2_get_byteu(&gb);
783  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
784  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
785  }
786  break;
787  }
789  for (channel = 0; channel < avctx->channels; channel++) {
790  cs = &c->status[channel];
791  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
792  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
793  if (cs->step_index > 88u){
794  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
795  channel, cs->step_index);
796  return AVERROR_INVALIDDATA;
797  }
798  }
799  for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
800  int v = bytestream2_get_byteu(&gb);
801  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
802  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
803  }
804  break;
806  {
807  int last_byte = 0;
808  int nibble;
809  int decode_top_nibble_next = 0;
810  int diff_channel;
811  const int16_t *samples_end = samples + avctx->channels * nb_samples;
812 
813  bytestream2_skipu(&gb, 10);
814  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
815  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
816  c->status[0].step_index = bytestream2_get_byteu(&gb);
817  c->status[1].step_index = bytestream2_get_byteu(&gb);
818  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
819  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
820  c->status[0].step_index, c->status[1].step_index);
821  return AVERROR_INVALIDDATA;
822  }
823  /* sign extend the predictors */
824  diff_channel = c->status[1].predictor;
825 
826  /* DK3 ADPCM support macro */
827 #define DK3_GET_NEXT_NIBBLE() \
828  if (decode_top_nibble_next) { \
829  nibble = last_byte >> 4; \
830  decode_top_nibble_next = 0; \
831  } else { \
832  last_byte = bytestream2_get_byteu(&gb); \
833  nibble = last_byte & 0x0F; \
834  decode_top_nibble_next = 1; \
835  }
836 
837  while (samples < samples_end) {
838 
839  /* for this algorithm, c->status[0] is the sum channel and
840  * c->status[1] is the diff channel */
841 
842  /* process the first predictor of the sum channel */
844  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
845 
846  /* process the diff channel predictor */
848  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
849 
850  /* process the first pair of stereo PCM samples */
851  diff_channel = (diff_channel + c->status[1].predictor) / 2;
852  *samples++ = c->status[0].predictor + c->status[1].predictor;
853  *samples++ = c->status[0].predictor - c->status[1].predictor;
854 
855  /* process the second predictor of the sum channel */
857  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
858 
859  /* process the second pair of stereo PCM samples */
860  diff_channel = (diff_channel + c->status[1].predictor) / 2;
861  *samples++ = c->status[0].predictor + c->status[1].predictor;
862  *samples++ = c->status[0].predictor - c->status[1].predictor;
863  }
864  break;
865  }
867  for (channel = 0; channel < avctx->channels; channel++) {
868  cs = &c->status[channel];
869  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
870  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
871  if (cs->step_index > 88u){
872  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
873  channel, cs->step_index);
874  return AVERROR_INVALIDDATA;
875  }
876  }
877 
878  for (n = nb_samples >> (1 - st); n > 0; n--) {
879  int v1, v2;
880  int v = bytestream2_get_byteu(&gb);
881  /* nibbles are swapped for mono */
882  if (st) {
883  v1 = v >> 4;
884  v2 = v & 0x0F;
885  } else {
886  v2 = v >> 4;
887  v1 = v & 0x0F;
888  }
889  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
890  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
891  }
892  break;
894  while (bytestream2_get_bytes_left(&gb) > 0) {
895  int v = bytestream2_get_byteu(&gb);
896  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
897  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
898  }
899  break;
901  while (bytestream2_get_bytes_left(&gb) > 0) {
902  int v = bytestream2_get_byteu(&gb);
903  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
904  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
905  }
906  break;
908  if (c->vqa_version == 3) {
909  for (channel = 0; channel < avctx->channels; channel++) {
910  int16_t *smp = samples_p[channel];
911 
912  for (n = nb_samples / 2; n > 0; n--) {
913  int v = bytestream2_get_byteu(&gb);
914  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
915  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
916  }
917  }
918  } else {
919  for (n = nb_samples / 2; n > 0; n--) {
920  for (channel = 0; channel < avctx->channels; channel++) {
921  int v = bytestream2_get_byteu(&gb);
922  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
923  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
924  }
925  samples += avctx->channels;
926  }
927  }
928  bytestream2_seek(&gb, 0, SEEK_END);
929  break;
931  {
932  int16_t *out0 = samples_p[0];
933  int16_t *out1 = samples_p[1];
934  int samples_per_block = 28 * (3 - avctx->channels) * 4;
935  int sample_offset = 0;
936  while (bytestream2_get_bytes_left(&gb) >= 128) {
937  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
938  &c->status[0], &c->status[1],
939  avctx->channels, sample_offset)) < 0)
940  return ret;
941  bytestream2_skipu(&gb, 128);
942  sample_offset += samples_per_block;
943  }
944  break;
945  }
947  for (i=0; i<=st; i++) {
948  c->status[i].step_index = bytestream2_get_le32u(&gb);
949  if (c->status[i].step_index > 88u) {
950  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
951  i, c->status[i].step_index);
952  return AVERROR_INVALIDDATA;
953  }
954  }
955  for (i=0; i<=st; i++)
956  c->status[i].predictor = bytestream2_get_le32u(&gb);
957 
958  for (n = nb_samples >> (1 - st); n > 0; n--) {
959  int byte = bytestream2_get_byteu(&gb);
960  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
961  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
962  }
963  break;
965  for (n = nb_samples >> (1 - st); n > 0; n--) {
966  int byte = bytestream2_get_byteu(&gb);
967  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
968  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
969  }
970  break;
972  {
973  int previous_left_sample, previous_right_sample;
974  int current_left_sample, current_right_sample;
975  int next_left_sample, next_right_sample;
976  int coeff1l, coeff2l, coeff1r, coeff2r;
977  int shift_left, shift_right;
978 
979  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
980  each coding 28 stereo samples. */
981 
982  if(avctx->channels != 2)
983  return AVERROR_INVALIDDATA;
984 
985  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
986  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
987  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
988  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
989 
990  for (count1 = 0; count1 < nb_samples / 28; count1++) {
991  int byte = bytestream2_get_byteu(&gb);
992  coeff1l = ea_adpcm_table[ byte >> 4 ];
993  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
994  coeff1r = ea_adpcm_table[ byte & 0x0F];
995  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
996 
997  byte = bytestream2_get_byteu(&gb);
998  shift_left = 20 - (byte >> 4);
999  shift_right = 20 - (byte & 0x0F);
1000 
1001  for (count2 = 0; count2 < 28; count2++) {
1002  byte = bytestream2_get_byteu(&gb);
1003  next_left_sample = sign_extend(byte >> 4, 4) << shift_left;
1004  next_right_sample = sign_extend(byte, 4) << shift_right;
1005 
1006  next_left_sample = (next_left_sample +
1007  (current_left_sample * coeff1l) +
1008  (previous_left_sample * coeff2l) + 0x80) >> 8;
1009  next_right_sample = (next_right_sample +
1010  (current_right_sample * coeff1r) +
1011  (previous_right_sample * coeff2r) + 0x80) >> 8;
1012 
1013  previous_left_sample = current_left_sample;
1014  current_left_sample = av_clip_int16(next_left_sample);
1015  previous_right_sample = current_right_sample;
1016  current_right_sample = av_clip_int16(next_right_sample);
1017  *samples++ = current_left_sample;
1018  *samples++ = current_right_sample;
1019  }
1020  }
1021 
1022  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1023 
1024  break;
1025  }
1027  {
1028  int coeff[2][2], shift[2];
1029 
1030  for(channel = 0; channel < avctx->channels; channel++) {
1031  int byte = bytestream2_get_byteu(&gb);
1032  for (i=0; i<2; i++)
1033  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1034  shift[channel] = 20 - (byte & 0x0F);
1035  }
1036  for (count1 = 0; count1 < nb_samples / 2; count1++) {
1037  int byte[2];
1038 
1039  byte[0] = bytestream2_get_byteu(&gb);
1040  if (st) byte[1] = bytestream2_get_byteu(&gb);
1041  for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1042  for(channel = 0; channel < avctx->channels; channel++) {
1043  int sample = sign_extend(byte[channel] >> i, 4) << shift[channel];
1044  sample = (sample +
1045  c->status[channel].sample1 * coeff[channel][0] +
1046  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1047  c->status[channel].sample2 = c->status[channel].sample1;
1048  c->status[channel].sample1 = av_clip_int16(sample);
1049  *samples++ = c->status[channel].sample1;
1050  }
1051  }
1052  }
1053  bytestream2_seek(&gb, 0, SEEK_END);
1054  break;
1055  }
1058  case AV_CODEC_ID_ADPCM_EA_R3: {
1059  /* channel numbering
1060  2chan: 0=fl, 1=fr
1061  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1062  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1063  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1064  int previous_sample, current_sample, next_sample;
1065  int coeff1, coeff2;
1066  int shift;
1067  unsigned int channel;
1068  uint16_t *samplesC;
1069  int count = 0;
1070  int offsets[6];
1071 
1072  for (channel=0; channel<avctx->channels; channel++)
1073  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1074  bytestream2_get_le32(&gb)) +
1075  (avctx->channels + 1) * 4;
1076 
1077  for (channel=0; channel<avctx->channels; channel++) {
1078  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1079  samplesC = samples_p[channel];
1080 
1081  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1082  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1083  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1084  } else {
1085  current_sample = c->status[channel].predictor;
1086  previous_sample = c->status[channel].prev_sample;
1087  }
1088 
1089  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1090  int byte = bytestream2_get_byte(&gb);
1091  if (byte == 0xEE) { /* only seen in R2 and R3 */
1092  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1093  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1094 
1095  for (count2=0; count2<28; count2++)
1096  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1097  } else {
1098  coeff1 = ea_adpcm_table[ byte >> 4 ];
1099  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1100  shift = 20 - (byte & 0x0F);
1101 
1102  for (count2=0; count2<28; count2++) {
1103  if (count2 & 1)
1104  next_sample = sign_extend(byte, 4) << shift;
1105  else {
1106  byte = bytestream2_get_byte(&gb);
1107  next_sample = sign_extend(byte >> 4, 4) << shift;
1108  }
1109 
1110  next_sample += (current_sample * coeff1) +
1111  (previous_sample * coeff2);
1112  next_sample = av_clip_int16(next_sample >> 8);
1113 
1114  previous_sample = current_sample;
1115  current_sample = next_sample;
1116  *samplesC++ = current_sample;
1117  }
1118  }
1119  }
1120  if (!count) {
1121  count = count1;
1122  } else if (count != count1) {
1123  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1124  count = FFMAX(count, count1);
1125  }
1126 
1127  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1128  c->status[channel].predictor = current_sample;
1129  c->status[channel].prev_sample = previous_sample;
1130  }
1131  }
1132 
1133  frame->nb_samples = count * 28;
1134  bytestream2_seek(&gb, 0, SEEK_END);
1135  break;
1136  }
1138  for (channel=0; channel<avctx->channels; channel++) {
1139  int coeff[2][4], shift[4];
1140  int16_t *s = samples_p[channel];
1141  for (n = 0; n < 4; n++, s += 32) {
1142  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1143  for (i=0; i<2; i++)
1144  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1145  s[0] = val & ~0x0F;
1146 
1147  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1148  shift[n] = 20 - (val & 0x0F);
1149  s[1] = val & ~0x0F;
1150  }
1151 
1152  for (m=2; m<32; m+=2) {
1153  s = &samples_p[channel][m];
1154  for (n = 0; n < 4; n++, s += 32) {
1155  int level, pred;
1156  int byte = bytestream2_get_byteu(&gb);
1157 
1158  level = sign_extend(byte >> 4, 4) << shift[n];
1159  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1160  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1161 
1162  level = sign_extend(byte, 4) << shift[n];
1163  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1164  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1165  }
1166  }
1167  }
1168  break;
1170  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1171  c->status[0].step_index = bytestream2_get_le16u(&gb);
1172  bytestream2_skipu(&gb, 4);
1173  if (c->status[0].step_index > 88u) {
1174  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1175  c->status[0].step_index);
1176  return AVERROR_INVALIDDATA;
1177  }
1178 
1179  for (n = nb_samples >> (1 - st); n > 0; n--) {
1180  int v = bytestream2_get_byteu(&gb);
1181 
1182  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1183  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1184  }
1185  break;
1187  for (i = 0; i < avctx->channels; i++) {
1188  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1189  c->status[i].step_index = bytestream2_get_byteu(&gb);
1190  bytestream2_skipu(&gb, 1);
1191  if (c->status[i].step_index > 88u) {
1192  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1193  c->status[i].step_index);
1194  return AVERROR_INVALIDDATA;
1195  }
1196  }
1197 
1198  for (n = nb_samples >> (1 - st); n > 0; n--) {
1199  int v = bytestream2_get_byteu(&gb);
1200 
1201  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4, 3);
1202  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf, 3);
1203  }
1204  break;
1205  case AV_CODEC_ID_ADPCM_CT:
1206  for (n = nb_samples >> (1 - st); n > 0; n--) {
1207  int v = bytestream2_get_byteu(&gb);
1208  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1209  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1210  }
1211  break;
1215  if (!c->status[0].step_index) {
1216  /* the first byte is a raw sample */
1217  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1218  if (st)
1219  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1220  c->status[0].step_index = 1;
1221  nb_samples--;
1222  }
1223  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1224  for (n = nb_samples >> (1 - st); n > 0; n--) {
1225  int byte = bytestream2_get_byteu(&gb);
1226  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1227  byte >> 4, 4, 0);
1228  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1229  byte & 0x0F, 4, 0);
1230  }
1231  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1232  for (n = nb_samples / 3; n > 0; n--) {
1233  int byte = bytestream2_get_byteu(&gb);
1234  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1235  byte >> 5 , 3, 0);
1236  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1237  (byte >> 2) & 0x07, 3, 0);
1238  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1239  byte & 0x03, 2, 0);
1240  }
1241  } else {
1242  for (n = nb_samples >> (2 - st); n > 0; n--) {
1243  int byte = bytestream2_get_byteu(&gb);
1244  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1245  byte >> 6 , 2, 2);
1246  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1247  (byte >> 4) & 0x03, 2, 2);
1248  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1249  (byte >> 2) & 0x03, 2, 2);
1250  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1251  byte & 0x03, 2, 2);
1252  }
1253  }
1254  break;
1255  case AV_CODEC_ID_ADPCM_SWF:
1256  adpcm_swf_decode(avctx, buf, buf_size, samples);
1257  bytestream2_seek(&gb, 0, SEEK_END);
1258  break;
1260  for (n = nb_samples >> (1 - st); n > 0; n--) {
1261  int v = bytestream2_get_byteu(&gb);
1262  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1263  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1264  }
1265  break;
1266  case AV_CODEC_ID_ADPCM_AFC:
1267  {
1268  int samples_per_block;
1269  int blocks;
1270 
1271  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1272  samples_per_block = avctx->extradata[0] / 16;
1273  blocks = nb_samples / avctx->extradata[0];
1274  } else {
1275  samples_per_block = nb_samples / 16;
1276  blocks = 1;
1277  }
1278 
1279  for (m = 0; m < blocks; m++) {
1280  for (channel = 0; channel < avctx->channels; channel++) {
1281  int prev1 = c->status[channel].sample1;
1282  int prev2 = c->status[channel].sample2;
1283 
1284  samples = samples_p[channel] + m * 16;
1285  /* Read in every sample for this channel. */
1286  for (i = 0; i < samples_per_block; i++) {
1287  int byte = bytestream2_get_byteu(&gb);
1288  int scale = 1 << (byte >> 4);
1289  int index = byte & 0xf;
1290  int factor1 = ff_adpcm_afc_coeffs[0][index];
1291  int factor2 = ff_adpcm_afc_coeffs[1][index];
1292 
1293  /* Decode 16 samples. */
1294  for (n = 0; n < 16; n++) {
1295  int32_t sampledat;
1296 
1297  if (n & 1) {
1298  sampledat = sign_extend(byte, 4);
1299  } else {
1300  byte = bytestream2_get_byteu(&gb);
1301  sampledat = sign_extend(byte >> 4, 4);
1302  }
1303 
1304  sampledat = ((prev1 * factor1 + prev2 * factor2) +
1305  ((sampledat * scale) << 11)) >> 11;
1306  *samples = av_clip_int16(sampledat);
1307  prev2 = prev1;
1308  prev1 = *samples++;
1309  }
1310  }
1311 
1312  c->status[channel].sample1 = prev1;
1313  c->status[channel].sample2 = prev2;
1314  }
1315  }
1316  bytestream2_seek(&gb, 0, SEEK_END);
1317  break;
1318  }
1319  case AV_CODEC_ID_ADPCM_THP:
1320  {
1321  int table[6][16];
1322  int ch;
1323 
1324  for (i = 0; i < avctx->channels; i++)
1325  for (n = 0; n < 16; n++)
1326  table[i][n] = sign_extend(bytestream2_get_be16u(&gb), 16);
1327 
1328  /* Initialize the previous sample. */
1329  for (i = 0; i < avctx->channels; i++) {
1330  c->status[i].sample1 = sign_extend(bytestream2_get_be16u(&gb), 16);
1331  c->status[i].sample2 = sign_extend(bytestream2_get_be16u(&gb), 16);
1332  }
1333 
1334  for (ch = 0; ch < avctx->channels; ch++) {
1335  samples = samples_p[ch];
1336 
1337  /* Read in every sample for this channel. */
1338  for (i = 0; i < nb_samples / 14; i++) {
1339  int byte = bytestream2_get_byteu(&gb);
1340  int index = (byte >> 4) & 7;
1341  unsigned int exp = byte & 0x0F;
1342  int factor1 = table[ch][index * 2];
1343  int factor2 = table[ch][index * 2 + 1];
1344 
1345  /* Decode 14 samples. */
1346  for (n = 0; n < 14; n++) {
1347  int32_t sampledat;
1348 
1349  if (n & 1) {
1350  sampledat = sign_extend(byte, 4);
1351  } else {
1352  byte = bytestream2_get_byteu(&gb);
1353  sampledat = sign_extend(byte >> 4, 4);
1354  }
1355 
1356  sampledat = ((c->status[ch].sample1 * factor1
1357  + c->status[ch].sample2 * factor2) >> 11) + (sampledat << exp);
1358  *samples = av_clip_int16(sampledat);
1359  c->status[ch].sample2 = c->status[ch].sample1;
1360  c->status[ch].sample1 = *samples++;
1361  }
1362  }
1363  }
1364  break;
1365  }
1366 
1367  default:
1368  return -1;
1369  }
1370 
1371  if (avpkt->size && bytestream2_tell(&gb) == 0) {
1372  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
1373  return AVERROR_INVALIDDATA;
1374  }
1375 
1376  *got_frame_ptr = 1;
1377 
1378  return bytestream2_tell(&gb);
1379 }
1380 
1381 
1389 
1390 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
1391 AVCodec ff_ ## name_ ## _decoder = { \
1392  .name = #name_, \
1393  .type = AVMEDIA_TYPE_AUDIO, \
1394  .id = id_, \
1395  .priv_data_size = sizeof(ADPCMDecodeContext), \
1396  .init = adpcm_decode_init, \
1397  .decode = adpcm_decode_frame, \
1398  .capabilities = CODEC_CAP_DR1, \
1399  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
1400  .sample_fmts = sample_fmts_, \
1401 }
1402 
1403 /* Note: Do not forget to add new entries to the Makefile as well. */
1404 ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie");
1405 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
1406 ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
1407 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
1408 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
1409 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
1410 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
1411 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
1412 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
1413 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
1414 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
1415 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
1416 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
1417 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
1418 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
1419 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
1420 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
1421 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
1422 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
1423 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV");
1424 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
1425 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_s16, adpcm_ms, "ADPCM Microsoft");
1426 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
1427 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
1428 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
1429 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
1430 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo Gamecube THP");
1431 ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA");
1432 ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");
struct ADPCMDecodeContext ADPCMDecodeContext
const struct AVCodec * codec
float v
const char * s
Definition: avisynth_c.h:668
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift)
Definition: adpcm.c:273
static int shift(int a, int b)
Definition: sonic.c:86
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
const int16_t ff_adpcm_afc_coeffs[2][16]
Definition: adpcm_data.c:88
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:154
static short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble)
Definition: adpcm.c:252
About Git write you should know how to use GIT properly Luckily Git comes with excellent documentation git help man git shows you the available git< command > help man git< command > shows information about the subcommand< command > The most comprehensive manual is the website Git Reference visit they are quite exhaustive You do not need a special username or password All you need is to provide a ssh public key to the Git server admin What follows now is a basic introduction to Git and some FFmpeg specific guidelines Read it at least if you are granted commit privileges to the FFmpeg project you are expected to be familiar with these rules I if not You can get git from etc no matter how small Every one of them has been saved from looking like a fool by this many times It s very easy for stray debug output or cosmetic modifications to slip in
Definition: git-howto.txt:5
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:130
#define AV_RL16
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:1382
#define FF_ARRAY_ELEMS(a)
signed 16 bits
Definition: samplefmt.h:52
#define sample
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:225
set threshold d
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:69
enum AVSampleFormat sample_fmt
audio sample format
uint8_t
#define av_cold
Definition: attributes.h:78
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:92
float delta
window constants for m
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:387
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static const int xa_adpcm_table[5][2]
Definition: adpcm.c:61
ADPCM tables.
the mask is usually to keep the same permissions Filters should remove permissions on reference they give to output whenever necessary It can be automatically done by setting the rej_perms field on the output pad Here are a few guidelines corresponding to common then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
uint8_t * data
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:193
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:165
bitstream reader API header.
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
function f0
frame
Definition: stft.m:14
static void predictor(uint8_t *src, int size)
Definition: exr.c:188
enum AVCodecID id
ADPCM encoder/decoder common header.
static short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble)
Definition: adpcm.c:293
static const int ea_adpcm_table[]
Definition: adpcm.c:69
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:159
static const struct endianess table[]
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:40
Spectrum Plot time data
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
Definition: adpcm.c:190
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:149
int16_t sample2
Definition: adpcm.h:42
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
#define FFMAX(a, b)
Definition: common.h:56
external API header
ADPCMChannelStatus status[6]
Definition: adpcm.c:88
int size
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:31
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:307
#define FFMIN(a, b)
Definition: common.h:58
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:74
ret
Definition: avfilter.c:821
int vqa_version
VQA version.
Definition: adpcm.c:89
int16_t sample1
Definition: adpcm.h:41
t
Definition: genspecsines3.m:6
int32_t
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:1384
#define AV_RL32
float u
#define diff(a, as, b, bs)
Definition: vf_phase.c:80
FIXME Range Coding of cr are level
Definition: snow.txt:367
const int16_t ff_adpcm_oki_step_table[49]
Definition: adpcm_data.c:52
static short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift)
Definition: adpcm.c:164
static const float pred[4]
Definition: siprdata.h:259
static const int swf_index_tables[4][16]
Definition: adpcm.c:78
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:183
for k
1i.*Xphase exp()
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:63
static short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:216
main external API structure.
#define DK3_GET_NEXT_NIBBLE()
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
void * buf
Definition: avisynth_c.h:594
int index
Definition: gxfenc.c:89
synthesis window for stochastic i
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:379
static const double coeff[2][5]
Definition: vf_ow.c:64
static short adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:231
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:123
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples)
Get the number of samples that will be decoded from the packet.
Definition: adpcm.c:453
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:83
common internal api header.
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:78
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:615
static double c[64]
AVSampleFormat
Audio Sample Formats.
Definition: samplefmt.h:49
Same thing on a dB scale
int channels
number of audio channels
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:203
signed 16 bits, planar
Definition: samplefmt.h:58
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:1386
Filter the word “frame” indicates either a video frame or a group of audio samples
void INT64 INT64 count
Definition: avisynth_c.h:594
int16_t step_index
Definition: adpcm.h:35
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:117
This structure stores compressed data.
f1
Definition: genspecsines3.m:3
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:127
for(j=16;j >0;--j)
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
Definition: adpcm.c:1390
bitstream writer API