vmdav.c
Go to the documentation of this file.
1 /*
2  * Sierra VMD Audio & Video Decoders
3  * Copyright (C) 2004 the ffmpeg project
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Sierra VMD audio & video decoders
25  * by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
26  * for more information on the Sierra VMD format, visit:
27  * http://www.pcisys.net/~melanson/codecs/
28  *
29  * The video decoder outputs PAL8 colorspace data. The decoder expects
30  * a 0x330-byte VMD file header to be transmitted via extradata during
31  * codec initialization. Each encoded frame that is sent to this decoder
32  * is expected to be prepended with the appropriate 16-byte frame
33  * information record from the VMD file.
34  *
35  * The audio decoder, like the video decoder, expects each encoded data
36  * chunk to be prepended with the appropriate 16-byte frame information
37  * record from the VMD file. It does not require the 0x330-byte VMD file
38  * header, but it does need the audio setup parameters passed in through
39  * normal libavcodec API means.
40  */
41 
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <string.h>
45 
47 #include "libavutil/common.h"
48 #include "libavutil/intreadwrite.h"
49 #include "avcodec.h"
50 #include "internal.h"
51 #include "bytestream.h"
52 
53 #define VMD_HEADER_SIZE 0x330
54 #define PALETTE_COUNT 256
55 
56 /*
57  * Video Decoder
58  */
59 
60 typedef struct VmdVideoContext {
61 
64 
65  const unsigned char *buf;
66  int size;
67 
68  unsigned char palette[PALETTE_COUNT * 4];
69  unsigned char *unpack_buffer;
71 
72  int x_off, y_off;
74 
75 #define QUEUE_SIZE 0x1000
76 #define QUEUE_MASK 0x0FFF
77 
78 static void lz_unpack(const unsigned char *src, int src_len,
79  unsigned char *dest, int dest_len)
80 {
81  unsigned char *d;
82  unsigned char *d_end;
83  unsigned char queue[QUEUE_SIZE];
84  unsigned int qpos;
85  unsigned int dataleft;
86  unsigned int chainofs;
87  unsigned int chainlen;
88  unsigned int speclen;
89  unsigned char tag;
90  unsigned int i, j;
91  GetByteContext gb;
92 
93  bytestream2_init(&gb, src, src_len);
94  d = dest;
95  d_end = d + dest_len;
96  dataleft = bytestream2_get_le32(&gb);
97  memset(queue, 0x20, QUEUE_SIZE);
98  if (bytestream2_get_bytes_left(&gb) < 4)
99  return;
100  if (bytestream2_peek_le32(&gb) == 0x56781234) {
101  bytestream2_skipu(&gb, 4);
102  qpos = 0x111;
103  speclen = 0xF + 3;
104  } else {
105  qpos = 0xFEE;
106  speclen = 100; /* no speclen */
107  }
108 
109  while (dataleft > 0 && bytestream2_get_bytes_left(&gb) > 0) {
110  tag = bytestream2_get_byteu(&gb);
111  if ((tag == 0xFF) && (dataleft > 8)) {
112  if (d_end - d < 8 || bytestream2_get_bytes_left(&gb) < 8)
113  return;
114  for (i = 0; i < 8; i++) {
115  queue[qpos++] = *d++ = bytestream2_get_byteu(&gb);
116  qpos &= QUEUE_MASK;
117  }
118  dataleft -= 8;
119  } else {
120  for (i = 0; i < 8; i++) {
121  if (dataleft == 0)
122  break;
123  if (tag & 0x01) {
124  if (d_end - d < 1 || bytestream2_get_bytes_left(&gb) < 1)
125  return;
126  queue[qpos++] = *d++ = bytestream2_get_byteu(&gb);
127  qpos &= QUEUE_MASK;
128  dataleft--;
129  } else {
130  chainofs = bytestream2_get_byte(&gb);
131  chainofs |= ((bytestream2_peek_byte(&gb) & 0xF0) << 4);
132  chainlen = (bytestream2_get_byte(&gb) & 0x0F) + 3;
133  if (chainlen == speclen) {
134  chainlen = bytestream2_get_byte(&gb) + 0xF + 3;
135  }
136  if (d_end - d < chainlen)
137  return;
138  for (j = 0; j < chainlen; j++) {
139  *d = queue[chainofs++ & QUEUE_MASK];
140  queue[qpos++] = *d++;
141  qpos &= QUEUE_MASK;
142  }
143  dataleft -= chainlen;
144  }
145  tag >>= 1;
146  }
147  }
148  }
149 }
150 static int rle_unpack(const unsigned char *src, unsigned char *dest,
151  int src_count, int src_size, int dest_len)
152 {
153  unsigned char *pd;
154  int i, l;
155  unsigned char *dest_end = dest + dest_len;
156  GetByteContext gb;
157 
158  bytestream2_init(&gb, src, src_size);
159  pd = dest;
160  if (src_count & 1) {
161  if (bytestream2_get_bytes_left(&gb) < 1)
162  return 0;
163  *pd++ = bytestream2_get_byteu(&gb);
164  }
165 
166  src_count >>= 1;
167  i = 0;
168  do {
169  if (bytestream2_get_bytes_left(&gb) < 1)
170  break;
171  l = bytestream2_get_byteu(&gb);
172  if (l & 0x80) {
173  l = (l & 0x7F) * 2;
174  if (dest_end - pd < l || bytestream2_get_bytes_left(&gb) < l)
175  return bytestream2_tell(&gb);
176  bytestream2_get_bufferu(&gb, pd, l);
177  pd += l;
178  } else {
179  if (dest_end - pd < i || bytestream2_get_bytes_left(&gb) < 2)
180  return bytestream2_tell(&gb);
181  for (i = 0; i < l; i++) {
182  *pd++ = bytestream2_get_byteu(&gb);
183  *pd++ = bytestream2_get_byteu(&gb);
184  }
185  bytestream2_skip(&gb, 2);
186  }
187  i += l;
188  } while (i < src_count);
189 
190  return bytestream2_tell(&gb);
191 }
192 
194 {
195  int i;
196  unsigned int *palette32;
197  unsigned char r, g, b;
198 
199  GetByteContext gb;
200 
201  unsigned char meth;
202  unsigned char *dp; /* pointer to current frame */
203  unsigned char *pp; /* pointer to previous frame */
204  unsigned char len;
205  int ofs;
206 
207  int frame_x, frame_y;
208  int frame_width, frame_height;
209 
210  frame_x = AV_RL16(&s->buf[6]);
211  frame_y = AV_RL16(&s->buf[8]);
212  frame_width = AV_RL16(&s->buf[10]) - frame_x + 1;
213  frame_height = AV_RL16(&s->buf[12]) - frame_y + 1;
214  if (frame_x < 0 || frame_width < 0 ||
215  frame_x >= s->avctx->width ||
216  frame_width > s->avctx->width ||
217  frame_x + frame_width > s->avctx->width)
218  return;
219  if (frame_y < 0 || frame_height < 0 ||
220  frame_y >= s->avctx->height ||
221  frame_height > s->avctx->height ||
222  frame_y + frame_height > s->avctx->height)
223  return;
224 
225  if ((frame_width == s->avctx->width && frame_height == s->avctx->height) &&
226  (frame_x || frame_y)) {
227 
228  s->x_off = frame_x;
229  s->y_off = frame_y;
230  }
231  frame_x -= s->x_off;
232  frame_y -= s->y_off;
233 
234  /* if only a certain region will be updated, copy the entire previous
235  * frame before the decode */
236  if (s->prev_frame.data[0] &&
237  (frame_x || frame_y || (frame_width != s->avctx->width) ||
238  (frame_height != s->avctx->height))) {
239 
240  memcpy(frame->data[0], s->prev_frame.data[0],
241  s->avctx->height * frame->linesize[0]);
242  }
243 
244  /* check if there is a new palette */
245  bytestream2_init(&gb, s->buf + 16, s->size - 16);
246  if (s->buf[15] & 0x02) {
247  bytestream2_skip(&gb, 2);
248  palette32 = (unsigned int *)s->palette;
250  for (i = 0; i < PALETTE_COUNT; i++) {
251  r = bytestream2_get_byteu(&gb) * 4;
252  g = bytestream2_get_byteu(&gb) * 4;
253  b = bytestream2_get_byteu(&gb) * 4;
254  palette32[i] = 0xFFU << 24 | (r << 16) | (g << 8) | (b);
255  palette32[i] |= palette32[i] >> 6 & 0x30303;
256  }
257  }
258  }
259  if (s->size > 0) {
260  /* originally UnpackFrame in VAG's code */
261  bytestream2_init(&gb, gb.buffer, s->buf + s->size - gb.buffer);
262  if (bytestream2_get_bytes_left(&gb) < 1)
263  return;
264  meth = bytestream2_get_byteu(&gb);
265  if (meth & 0x80) {
268  meth &= 0x7F;
270  }
271 
272  dp = &frame->data[0][frame_y * frame->linesize[0] + frame_x];
273  pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
274  switch (meth) {
275  case 1:
276  for (i = 0; i < frame_height; i++) {
277  ofs = 0;
278  do {
279  len = bytestream2_get_byte(&gb);
280  if (len & 0x80) {
281  len = (len & 0x7F) + 1;
282  if (ofs + len > frame_width || bytestream2_get_bytes_left(&gb) < len)
283  return;
284  bytestream2_get_bufferu(&gb, &dp[ofs], len);
285  ofs += len;
286  } else {
287  /* interframe pixel copy */
288  if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
289  return;
290  memcpy(&dp[ofs], &pp[ofs], len + 1);
291  ofs += len + 1;
292  }
293  } while (ofs < frame_width);
294  if (ofs > frame_width) {
295  av_log(s->avctx, AV_LOG_ERROR, "offset > width (%d > %d)\n",
296  ofs, frame_width);
297  break;
298  }
299  dp += frame->linesize[0];
300  pp += s->prev_frame.linesize[0];
301  }
302  break;
303 
304  case 2:
305  for (i = 0; i < frame_height; i++) {
306  bytestream2_get_buffer(&gb, dp, frame_width);
307  dp += frame->linesize[0];
308  pp += s->prev_frame.linesize[0];
309  }
310  break;
311 
312  case 3:
313  for (i = 0; i < frame_height; i++) {
314  ofs = 0;
315  do {
316  len = bytestream2_get_byte(&gb);
317  if (len & 0x80) {
318  len = (len & 0x7F) + 1;
319  if (bytestream2_get_byte(&gb) == 0xFF)
320  len = rle_unpack(gb.buffer, &dp[ofs],
321  len, bytestream2_get_bytes_left(&gb),
322  frame_width - ofs);
323  else
324  bytestream2_get_buffer(&gb, &dp[ofs], len);
325  bytestream2_skip(&gb, len);
326  } else {
327  /* interframe pixel copy */
328  if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
329  return;
330  memcpy(&dp[ofs], &pp[ofs], len + 1);
331  ofs += len + 1;
332  }
333  } while (ofs < frame_width);
334  if (ofs > frame_width) {
335  av_log(s->avctx, AV_LOG_ERROR, "offset > width (%d > %d)\n",
336  ofs, frame_width);
337  }
338  dp += frame->linesize[0];
339  pp += s->prev_frame.linesize[0];
340  }
341  break;
342  }
343  }
344 }
345 
347 {
348  VmdVideoContext *s = avctx->priv_data;
349  int i;
350  unsigned int *palette32;
351  int palette_index = 0;
352  unsigned char r, g, b;
353  unsigned char *vmd_header;
354  unsigned char *raw_palette;
355 
356  s->avctx = avctx;
357  avctx->pix_fmt = AV_PIX_FMT_PAL8;
358 
359  /* make sure the VMD header made it */
360  if (s->avctx->extradata_size != VMD_HEADER_SIZE) {
361  av_log(s->avctx, AV_LOG_ERROR, "expected extradata size of %d\n",
363  return -1;
364  }
365  vmd_header = (unsigned char *)avctx->extradata;
366 
367  s->unpack_buffer_size = AV_RL32(&vmd_header[800]);
369  if (!s->unpack_buffer)
370  return -1;
371 
372  /* load up the initial palette */
373  raw_palette = &vmd_header[28];
374  palette32 = (unsigned int *)s->palette;
375  for (i = 0; i < PALETTE_COUNT; i++) {
376  r = raw_palette[palette_index++] * 4;
377  g = raw_palette[palette_index++] * 4;
378  b = raw_palette[palette_index++] * 4;
379  palette32[i] = (r << 16) | (g << 8) | (b);
380  }
381 
383 
384  return 0;
385 }
386 
388  void *data, int *got_frame,
389  AVPacket *avpkt)
390 {
391  const uint8_t *buf = avpkt->data;
392  int buf_size = avpkt->size;
393  VmdVideoContext *s = avctx->priv_data;
394  AVFrame *frame = data;
395  int ret;
396 
397  s->buf = buf;
398  s->size = buf_size;
399 
400  if (buf_size < 16)
401  return buf_size;
402 
403  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
404  return ret;
405 
406  vmd_decode(s, frame);
407 
408  /* make the palette available on the way out */
409  memcpy(frame->data[1], s->palette, PALETTE_COUNT * 4);
410 
411  /* shuffle frames */
413  if ((ret = av_frame_ref(&s->prev_frame, frame)) < 0)
414  return ret;
415 
416  *got_frame = 1;
417 
418  /* report that the buffer was completely consumed */
419  return buf_size;
420 }
421 
423 {
424  VmdVideoContext *s = avctx->priv_data;
425 
428 
429  return 0;
430 }
431 
432 
433 /*
434  * Audio Decoder
435  */
436 
437 #define BLOCK_TYPE_AUDIO 1
438 #define BLOCK_TYPE_INITIAL 2
439 #define BLOCK_TYPE_SILENCE 3
440 
441 typedef struct VmdAudioContext {
442  int out_bps;
445 
446 static const uint16_t vmdaudio_table[128] = {
447  0x000, 0x008, 0x010, 0x020, 0x030, 0x040, 0x050, 0x060, 0x070, 0x080,
448  0x090, 0x0A0, 0x0B0, 0x0C0, 0x0D0, 0x0E0, 0x0F0, 0x100, 0x110, 0x120,
449  0x130, 0x140, 0x150, 0x160, 0x170, 0x180, 0x190, 0x1A0, 0x1B0, 0x1C0,
450  0x1D0, 0x1E0, 0x1F0, 0x200, 0x208, 0x210, 0x218, 0x220, 0x228, 0x230,
451  0x238, 0x240, 0x248, 0x250, 0x258, 0x260, 0x268, 0x270, 0x278, 0x280,
452  0x288, 0x290, 0x298, 0x2A0, 0x2A8, 0x2B0, 0x2B8, 0x2C0, 0x2C8, 0x2D0,
453  0x2D8, 0x2E0, 0x2E8, 0x2F0, 0x2F8, 0x300, 0x308, 0x310, 0x318, 0x320,
454  0x328, 0x330, 0x338, 0x340, 0x348, 0x350, 0x358, 0x360, 0x368, 0x370,
455  0x378, 0x380, 0x388, 0x390, 0x398, 0x3A0, 0x3A8, 0x3B0, 0x3B8, 0x3C0,
456  0x3C8, 0x3D0, 0x3D8, 0x3E0, 0x3E8, 0x3F0, 0x3F8, 0x400, 0x440, 0x480,
457  0x4C0, 0x500, 0x540, 0x580, 0x5C0, 0x600, 0x640, 0x680, 0x6C0, 0x700,
458  0x740, 0x780, 0x7C0, 0x800, 0x900, 0xA00, 0xB00, 0xC00, 0xD00, 0xE00,
459  0xF00, 0x1000, 0x1400, 0x1800, 0x1C00, 0x2000, 0x3000, 0x4000
460 };
461 
463 {
464  VmdAudioContext *s = avctx->priv_data;
465 
466  if (avctx->channels < 1 || avctx->channels > 2) {
467  av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n");
468  return AVERROR(EINVAL);
469  }
470  if (avctx->block_align < 1 || avctx->block_align % avctx->channels) {
471  av_log(avctx, AV_LOG_ERROR, "invalid block align\n");
472  return AVERROR(EINVAL);
473  }
474 
475  avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO :
477 
478  if (avctx->bits_per_coded_sample == 16)
479  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
480  else
481  avctx->sample_fmt = AV_SAMPLE_FMT_U8;
483 
484  s->chunk_size = avctx->block_align + avctx->channels * (s->out_bps == 2);
485 
486  av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, "
487  "block align = %d, sample rate = %d\n",
488  avctx->channels, avctx->bits_per_coded_sample, avctx->block_align,
489  avctx->sample_rate);
490 
491  return 0;
492 }
493 
494 static void decode_audio_s16(int16_t *out, const uint8_t *buf, int buf_size,
495  int channels)
496 {
497  int ch;
498  const uint8_t *buf_end = buf + buf_size;
499  int predictor[2];
500  int st = channels - 1;
501 
502  /* decode initial raw sample */
503  for (ch = 0; ch < channels; ch++) {
504  predictor[ch] = (int16_t)AV_RL16(buf);
505  buf += 2;
506  *out++ = predictor[ch];
507  }
508 
509  /* decode DPCM samples */
510  ch = 0;
511  while (buf < buf_end) {
512  uint8_t b = *buf++;
513  if (b & 0x80)
514  predictor[ch] -= vmdaudio_table[b & 0x7F];
515  else
516  predictor[ch] += vmdaudio_table[b];
517  predictor[ch] = av_clip_int16(predictor[ch]);
518  *out++ = predictor[ch];
519  ch ^= st;
520  }
521 }
522 
524  int *got_frame_ptr, AVPacket *avpkt)
525 {
526  AVFrame *frame = data;
527  const uint8_t *buf = avpkt->data;
528  const uint8_t *buf_end;
529  int buf_size = avpkt->size;
530  VmdAudioContext *s = avctx->priv_data;
531  int block_type, silent_chunks, audio_chunks;
532  int ret;
533  uint8_t *output_samples_u8;
534  int16_t *output_samples_s16;
535 
536  if (buf_size < 16) {
537  av_log(avctx, AV_LOG_WARNING, "skipping small junk packet\n");
538  *got_frame_ptr = 0;
539  return buf_size;
540  }
541 
542  block_type = buf[6];
543  if (block_type < BLOCK_TYPE_AUDIO || block_type > BLOCK_TYPE_SILENCE) {
544  av_log(avctx, AV_LOG_ERROR, "unknown block type: %d\n", block_type);
545  return AVERROR(EINVAL);
546  }
547  buf += 16;
548  buf_size -= 16;
549 
550  /* get number of silent chunks */
551  silent_chunks = 0;
552  if (block_type == BLOCK_TYPE_INITIAL) {
553  uint32_t flags;
554  if (buf_size < 4) {
555  av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
556  return AVERROR(EINVAL);
557  }
558  flags = AV_RB32(buf);
559  silent_chunks = av_popcount(flags);
560  buf += 4;
561  buf_size -= 4;
562  } else if (block_type == BLOCK_TYPE_SILENCE) {
563  silent_chunks = 1;
564  buf_size = 0; // should already be zero but set it just to be sure
565  }
566 
567  /* ensure output buffer is large enough */
568  audio_chunks = buf_size / s->chunk_size;
569 
570  /* get output buffer */
571  frame->nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) /
572  avctx->channels;
573  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
574  return ret;
575  output_samples_u8 = frame->data[0];
576  output_samples_s16 = (int16_t *)frame->data[0];
577 
578  /* decode silent chunks */
579  if (silent_chunks > 0) {
580  int silent_size = avctx->block_align * silent_chunks;
581  if (s->out_bps == 2) {
582  memset(output_samples_s16, 0x00, silent_size * 2);
583  output_samples_s16 += silent_size;
584  } else {
585  memset(output_samples_u8, 0x80, silent_size);
586  output_samples_u8 += silent_size;
587  }
588  }
589 
590  /* decode audio chunks */
591  if (audio_chunks > 0) {
592  buf_end = buf + buf_size;
593  while (buf_end - buf >= s->chunk_size) {
594  if (s->out_bps == 2) {
595  decode_audio_s16(output_samples_s16, buf, s->chunk_size,
596  avctx->channels);
597  output_samples_s16 += avctx->block_align;
598  } else {
599  memcpy(output_samples_u8, buf, s->chunk_size);
600  output_samples_u8 += avctx->block_align;
601  }
602  buf += s->chunk_size;
603  }
604  }
605 
606  *got_frame_ptr = 1;
607 
608  return avpkt->size;
609 }
610 
611 
612 /*
613  * Public Data Structures
614  */
615 
617  .name = "vmdvideo",
618  .type = AVMEDIA_TYPE_VIDEO,
619  .id = AV_CODEC_ID_VMDVIDEO,
620  .priv_data_size = sizeof(VmdVideoContext),
624  .capabilities = CODEC_CAP_DR1,
625  .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD video"),
626 };
627 
629  .name = "vmdaudio",
630  .type = AVMEDIA_TYPE_AUDIO,
631  .id = AV_CODEC_ID_VMDAUDIO,
632  .priv_data_size = sizeof(VmdAudioContext),
635  .capabilities = CODEC_CAP_DR1,
636  .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"),
637 };
const char * s
Definition: avisynth_c.h:668
int chunk_size
Definition: vmdav.c:443
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
struct VmdVideoContext VmdVideoContext
#define BLOCK_TYPE_SILENCE
Definition: vmdav.c:439
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:154
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:130
#define AV_RL16
#define AV_CH_LAYOUT_STEREO
signed 16 bits
Definition: samplefmt.h:52
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:268
AVCodecContext * avctx
Definition: vmdav.c:62
set threshold d
initialize output if(nPeaks >3)%at least 3 peaks in spectrum for trying to find f0 nf0peaks
enum AVSampleFormat sample_fmt
audio sample format
uint8_t
#define av_cold
Definition: attributes.h:78
AV_SAMPLE_FMT_U8
8 bit with PIX_FMT_RGB32 palette
Definition: pixfmt.h:79
#define AV_RB32
#define QUEUE_SIZE
Definition: vmdav.c:75
#define b
Definition: input.c:42
static int vmdvideo_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vmdav.c:387
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
uint8_t * data
const uint8_t * buffer
Definition: bytestream.h:33
unsigned char * unpack_buffer
Definition: vmdav.c:69
uint32_t tag
Definition: movenc.c:894
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:165
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
#define BLOCK_TYPE_INITIAL
Definition: vmdav.c:438
unsigned char palette[PALETTE_COUNT *4]
Definition: vmdav.c:68
static void lz_unpack(const unsigned char *src, int src_len, unsigned char *dest, int dest_len)
Definition: vmdav.c:78
frame
Definition: stft.m:14
static void predictor(uint8_t *src, int size)
Definition: exr.c:188
#define U(x)
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:183
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:159
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Spectrum Plot time data
const char * r
Definition: vf_curves.c:94
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:258
int unpack_buffer_size
Definition: vmdav.c:70
static void vmd_decode(VmdVideoContext *s, AVFrame *frame)
Definition: vmdav.c:193
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:149
const unsigned char * buf
Definition: vmdav.c:65
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
const char * name
Name of the codec implementation.
external API header
static int rle_unpack(const unsigned char *src, unsigned char *dest, int src_count, int src_size, int dest_len)
Definition: vmdav.c:150
uint64_t channel_layout
Audio channel layout.
FFT buffer for g
Definition: stft_peak.m:17
audio channel layout utility functions
ret
Definition: avfilter.c:821
int width
picture width / height.
static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
Definition: vmdav.c:422
#define AV_RL32
static av_cold int vmdaudio_decode_init(AVCodecContext *avctx)
Definition: vmdav.c:462
#define PALETTE_COUNT
Definition: vmdav.c:54
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:104
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:183
#define VMD_HEADER_SIZE
Definition: vmdav.c:53
AVCodec ff_vmdaudio_decoder
Definition: vmdav.c:628
static void decode_audio_s16(int16_t *out, const uint8_t *buf, int buf_size, int channels)
Definition: vmdav.c:494
dest
Definition: start.py:60
AVS_Value src
Definition: avisynth_c.h:523
int sample_rate
samples per second
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
main external API structure.
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:375
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
void avcodec_get_frame_defaults(AVFrame *frame)
Set the fields of the given AVFrame to default values.
synthesis window for stochastic i
static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: vmdav.c:523
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:330
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
static int flags
Definition: cpu.c:23
int av_frame_ref(AVFrame *dst, AVFrame *src)
Setup a new reference to the data described by an given frame.
Definition: frame.c:228
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
AVCodec ff_vmdvideo_decoder
Definition: vmdav.c:616
#define QUEUE_MASK
Definition: vmdav.c:76
common internal api header.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
common internal and external API header
static av_cold int vmdvideo_decode_init(AVCodecContext *avctx)
Definition: vmdav.c:346
int len
int channels
number of audio channels
static const uint16_t vmdaudio_table[128]
Definition: vmdav.c:446
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
AVFrame prev_frame
Definition: vmdav.c:63
#define AV_CH_LAYOUT_MONO
struct VmdAudioContext VmdAudioContext
This structure stores compressed data.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:127
for(j=16;j >0;--j)