ffv1dec.c
Go to the documentation of this file.
1 /*
2  * FFV1 decoder
3  *
4  * Copyright (c) 2003-2012 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * FF Video Codec 1 (a lossless codec) decoder
26  */
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/crc.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/timer.h"
34 #include "avcodec.h"
35 #include "internal.h"
36 #include "get_bits.h"
37 #include "put_bits.h"
38 #include "rangecoder.h"
39 #include "golomb.h"
40 #include "mathops.h"
41 #include "ffv1.h"
42 
44  int is_signed)
45 {
46  if (get_rac(c, state + 0))
47  return 0;
48  else {
49  int i, e, a;
50  e = 0;
51  while (get_rac(c, state + 1 + FFMIN(e, 9))) // 1..10
52  e++;
53 
54  a = 1;
55  for (i = e - 1; i >= 0; i--)
56  a += a + get_rac(c, state + 22 + FFMIN(i, 9)); // 22..31
57 
58  e = -(is_signed && get_rac(c, state + 11 + FFMIN(e, 10))); // 11..21
59  return (a ^ e) - e;
60  }
61 }
62 
63 static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
64 {
65  return get_symbol_inline(c, state, is_signed);
66 }
67 
68 static inline int get_vlc_symbol(GetBitContext *gb, VlcState *const state,
69  int bits)
70 {
71  int k, i, v, ret;
72 
73  i = state->count;
74  k = 0;
75  while (i < state->error_sum) { // FIXME: optimize
76  k++;
77  i += i;
78  }
79 
80  v = get_sr_golomb(gb, k, 12, bits);
81  av_dlog(NULL, "v:%d bias:%d error:%d drift:%d count:%d k:%d",
82  v, state->bias, state->error_sum, state->drift, state->count, k);
83 
84 #if 0 // JPEG LS
85  if (k == 0 && 2 * state->drift <= -state->count)
86  v ^= (-1);
87 #else
88  v ^= ((2 * state->drift + state->count) >> 31);
89 #endif
90 
91  ret = fold(v + state->bias, bits);
92 
93  update_vlc_state(state, v);
94 
95  return ret;
96 }
97 
99  int16_t *sample[2],
100  int plane_index, int bits)
101 {
102  PlaneContext *const p = &s->plane[plane_index];
103  RangeCoder *const c = &s->c;
104  int x;
105  int run_count = 0;
106  int run_mode = 0;
107  int run_index = s->run_index;
108 
109  for (x = 0; x < w; x++) {
110  int diff, context, sign;
111 
112  context = get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
113  if (context < 0) {
114  context = -context;
115  sign = 1;
116  } else
117  sign = 0;
118 
119  av_assert2(context < p->context_count);
120 
121  if (s->ac) {
122  diff = get_symbol_inline(c, p->state[context], 1);
123  } else {
124  if (context == 0 && run_mode == 0)
125  run_mode = 1;
126 
127  if (run_mode) {
128  if (run_count == 0 && run_mode == 1) {
129  if (get_bits1(&s->gb)) {
130  run_count = 1 << ff_log2_run[run_index];
131  if (x + run_count <= w)
132  run_index++;
133  } else {
134  if (ff_log2_run[run_index])
135  run_count = get_bits(&s->gb, ff_log2_run[run_index]);
136  else
137  run_count = 0;
138  if (run_index)
139  run_index--;
140  run_mode = 2;
141  }
142  }
143  run_count--;
144  if (run_count < 0) {
145  run_mode = 0;
146  run_count = 0;
147  diff = get_vlc_symbol(&s->gb, &p->vlc_state[context],
148  bits);
149  if (diff >= 0)
150  diff++;
151  } else
152  diff = 0;
153  } else
154  diff = get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
155 
156  av_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
157  run_count, run_index, run_mode, x, get_bits_count(&s->gb));
158  }
159 
160  if (sign)
161  diff = -diff;
162 
163  sample[1][x] = (predict(sample[1] + x, sample[0] + x) + diff) &
164  ((1 << bits) - 1);
165  }
166  s->run_index = run_index;
167 }
168 
170  int w, int h, int stride, int plane_index)
171 {
172  int x, y;
173  int16_t *sample[2];
174  sample[0] = s->sample_buffer + 3;
175  sample[1] = s->sample_buffer + w + 6 + 3;
176 
177  s->run_index = 0;
178 
179  memset(s->sample_buffer, 0, 2 * (w + 6) * sizeof(*s->sample_buffer));
180 
181  for (y = 0; y < h; y++) {
182  int16_t *temp = sample[0]; // FIXME: try a normal buffer
183 
184  sample[0] = sample[1];
185  sample[1] = temp;
186 
187  sample[1][-1] = sample[0][0];
188  sample[0][w] = sample[0][w - 1];
189 
190 // { START_TIMER
191  if (s->avctx->bits_per_raw_sample <= 8) {
192  decode_line(s, w, sample, plane_index, 8);
193  for (x = 0; x < w; x++)
194  src[x + stride * y] = sample[1][x];
195  } else {
196  decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
197  if (s->packed_at_lsb) {
198  for (x = 0; x < w; x++) {
199  ((uint16_t*)(src + stride*y))[x] = sample[1][x];
200  }
201  } else {
202  for (x = 0; x < w; x++) {
203  ((uint16_t*)(src + stride*y))[x] = sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
204  }
205  }
206  }
207 // STOP_TIMER("decode-line") }
208  }
209 }
210 
211 static void decode_rgb_frame(FFV1Context *s, uint8_t *src[3], int w, int h, int stride[3])
212 {
213  int x, y, p;
214  int16_t *sample[4][2];
215  int lbd = s->avctx->bits_per_raw_sample <= 8;
216  int bits = s->avctx->bits_per_raw_sample > 0 ? s->avctx->bits_per_raw_sample : 8;
217  int offset = 1 << bits;
218 
219  for (x = 0; x < 4; x++) {
220  sample[x][0] = s->sample_buffer + x * 2 * (w + 6) + 3;
221  sample[x][1] = s->sample_buffer + (x * 2 + 1) * (w + 6) + 3;
222  }
223 
224  s->run_index = 0;
225 
226  memset(s->sample_buffer, 0, 8 * (w + 6) * sizeof(*s->sample_buffer));
227 
228  for (y = 0; y < h; y++) {
229  for (p = 0; p < 3 + s->transparency; p++) {
230  int16_t *temp = sample[p][0]; // FIXME: try a normal buffer
231 
232  sample[p][0] = sample[p][1];
233  sample[p][1] = temp;
234 
235  sample[p][1][-1]= sample[p][0][0 ];
236  sample[p][0][ w]= sample[p][0][w-1];
237  if (lbd)
238  decode_line(s, w, sample[p], (p + 1)/2, 9);
239  else
240  decode_line(s, w, sample[p], (p + 1)/2, bits + 1);
241  }
242  for (x = 0; x < w; x++) {
243  int g = sample[0][1][x];
244  int b = sample[1][1][x];
245  int r = sample[2][1][x];
246  int a = sample[3][1][x];
247 
248  b -= offset;
249  r -= offset;
250  g -= (b + r) >> 2;
251  b += g;
252  r += g;
253 
254  if (lbd)
255  *((uint32_t*)(src[0] + x*4 + stride[0]*y)) = b + (g<<8) + (r<<16) + (a<<24);
256  else {
257  *((uint16_t*)(src[0] + x*2 + stride[0]*y)) = b;
258  *((uint16_t*)(src[1] + x*2 + stride[1]*y)) = g;
259  *((uint16_t*)(src[2] + x*2 + stride[2]*y)) = r;
260  }
261  }
262  }
263 }
264 
266 {
267  RangeCoder *c = &fs->c;
269  unsigned ps, i, context_count;
270  memset(state, 128, sizeof(state));
271 
272  av_assert0(f->version > 2);
273 
274  fs->slice_x = get_symbol(c, state, 0) * f->width ;
275  fs->slice_y = get_symbol(c, state, 0) * f->height;
276  fs->slice_width = (get_symbol(c, state, 0) + 1) * f->width + fs->slice_x;
277  fs->slice_height = (get_symbol(c, state, 0) + 1) * f->height + fs->slice_y;
278 
279  fs->slice_x /= f->num_h_slices;
280  fs->slice_y /= f->num_v_slices;
281  fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
282  fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
283  if ((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
284  return -1;
285  if ( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
286  || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
287  return -1;
288 
289  for (i = 0; i < f->plane_count; i++) {
290  PlaneContext * const p = &fs->plane[i];
291  int idx = get_symbol(c, state, 0);
292  if (idx > (unsigned)f->quant_table_count) {
293  av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
294  return -1;
295  }
296  p->quant_table_index = idx;
297  memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
298  context_count = f->context_count[idx];
299 
300  if (p->context_count < context_count) {
301  av_freep(&p->state);
302  av_freep(&p->vlc_state);
303  }
305  }
306 
307  ps = get_symbol(c, state, 0);
308  if (ps == 1) {
309  f->cur->interlaced_frame = 1;
310  f->cur->top_field_first = 1;
311  } else if (ps == 2) {
312  f->cur->interlaced_frame = 1;
313  f->cur->top_field_first = 0;
314  } else if (ps == 3) {
315  f->cur->interlaced_frame = 0;
316  }
317  f->cur->sample_aspect_ratio.num = get_symbol(c, state, 0);
318  f->cur->sample_aspect_ratio.den = get_symbol(c, state, 0);
319 
320  return 0;
321 }
322 
323 static int decode_slice(AVCodecContext *c, void *arg)
324 {
325  FFV1Context *fs = *(void **)arg;
326  FFV1Context *f = fs->avctx->priv_data;
327  int width, height, x, y, ret;
328  const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step_minus1 + 1;
329  AVFrame * const p = f->cur;
330 
331  if (f->version > 2) {
332  if (ffv1_init_slice_state(f, fs) < 0)
333  return AVERROR(ENOMEM);
334  if (decode_slice_header(f, fs) < 0) {
335  fs->slice_damaged = 1;
336  return AVERROR_INVALIDDATA;
337  }
338  }
339  if ((ret = ffv1_init_slice_state(f, fs)) < 0)
340  return ret;
341  if (f->cur->key_frame)
342  ffv1_clear_slice_state(f, fs);
343 
344  width = fs->slice_width;
345  height = fs->slice_height;
346  x = fs->slice_x;
347  y = fs->slice_y;
348 
349  if (!fs->ac) {
350  if (f->version == 3 && f->minor_version > 1 || f->version > 3)
351  get_rac(&fs->c, (uint8_t[]) { 129 });
352  fs->ac_byte_count = f->version > 2 || (!x && !y) ? fs->c.bytestream - fs->c.bytestream_start - 1 : 0;
353  init_get_bits(&fs->gb,
354  fs->c.bytestream_start + fs->ac_byte_count,
355  (fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count) * 8);
356  }
357 
358  av_assert1(width && height);
359  if (f->colorspace == 0) {
360  const int chroma_width = -((-width) >> f->chroma_h_shift);
361  const int chroma_height = -((-height) >> f->chroma_v_shift);
362  const int cx = x >> f->chroma_h_shift;
363  const int cy = y >> f->chroma_v_shift;
364  decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
365 
366  if (f->chroma_planes) {
367  decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
368  decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
369  }
370  if (fs->transparency)
371  decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
372  } else {
373  uint8_t *planes[3] = { p->data[0] + ps * x + y * p->linesize[0],
374  p->data[1] + ps * x + y * p->linesize[1],
375  p->data[2] + ps * x + y * p->linesize[2] };
376  decode_rgb_frame(fs, planes, width, height, p->linesize);
377  }
378  if (fs->ac && f->version > 2) {
379  int v;
380  get_rac(&fs->c, (uint8_t[]) { 129 });
381  v = fs->c.bytestream_end - fs->c.bytestream - 2 - 5*f->ec;
382  if (v) {
383  av_log(f->avctx, AV_LOG_ERROR, "bytestream end mismatching by %d\n", v);
384  fs->slice_damaged = 1;
385  }
386  }
387 
388  emms_c();
389 
390  return 0;
391 }
392 
393 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
394 {
395  int v;
396  int i = 0;
398 
399  memset(state, 128, sizeof(state));
400 
401  for (v = 0; i < 128; v++) {
402  unsigned len = get_symbol(c, state, 0) + 1;
403 
404  if (len > 128 - i)
405  return AVERROR_INVALIDDATA;
406 
407  while (len--) {
408  quant_table[i] = scale * v;
409  i++;
410  }
411  }
412 
413  for (i = 1; i < 128; i++)
414  quant_table[256 - i] = -quant_table[i];
415  quant_table[128] = -quant_table[127];
416 
417  return 2 * v - 1;
418 }
419 
421  int16_t quant_table[MAX_CONTEXT_INPUTS][256])
422 {
423  int i;
424  int context_count = 1;
425 
426  for (i = 0; i < 5; i++) {
427  context_count *= read_quant_table(c, quant_table[i], context_count);
428  if (context_count > 32768U) {
429  return AVERROR_INVALIDDATA;
430  }
431  }
432  return (context_count + 1) / 2;
433 }
434 
436 {
437  RangeCoder *const c = &f->c;
439  int i, j, k, ret;
440  uint8_t state2[32][CONTEXT_SIZE];
441 
442  memset(state2, 128, sizeof(state2));
443  memset(state, 128, sizeof(state));
444 
446  ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
447 
448  f->version = get_symbol(c, state, 0);
449  if (f->version > 2) {
450  c->bytestream_end -= 4;
451  f->minor_version = get_symbol(c, state, 0);
452  }
453  f->ac = f->avctx->coder_type = get_symbol(c, state, 0);
454  if (f->ac > 1) {
455  for (i = 1; i < 256; i++)
456  f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
457  }
458 
459  f->colorspace = get_symbol(c, state, 0); //YUV cs type
460  f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
461  f->chroma_planes = get_rac(c, state);
462  f->chroma_h_shift = get_symbol(c, state, 0);
463  f->chroma_v_shift = get_symbol(c, state, 0);
464  f->transparency = get_rac(c, state);
465  f->plane_count = 2 + f->transparency;
466  f->num_h_slices = 1 + get_symbol(c, state, 0);
467  f->num_v_slices = 1 + get_symbol(c, state, 0);
468 
469  if (f->num_h_slices > (unsigned)f->width || !f->num_h_slices ||
470  f->num_v_slices > (unsigned)f->height || !f->num_v_slices
471  ) {
472  av_log(f->avctx, AV_LOG_ERROR, "slice count invalid\n");
473  return AVERROR_INVALIDDATA;
474  }
475 
476  f->quant_table_count = get_symbol(c, state, 0);
477  if (f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
478  return AVERROR_INVALIDDATA;
479 
480  for (i = 0; i < f->quant_table_count; i++) {
482  if (f->context_count[i] < 0) {
483  av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
484  return AVERROR_INVALIDDATA;
485  }
486  }
487  if ((ret = ffv1_allocate_initial_states(f)) < 0)
488  return ret;
489 
490  for (i = 0; i < f->quant_table_count; i++)
491  if (get_rac(c, state)) {
492  for (j = 0; j < f->context_count[i]; j++)
493  for (k = 0; k < CONTEXT_SIZE; k++) {
494  int pred = j ? f->initial_states[i][j - 1][k] : 128;
495  f->initial_states[i][j][k] =
496  (pred + get_symbol(c, state2[k], 1)) & 0xFF;
497  }
498  }
499 
500  if (f->version > 2) {
501  f->ec = get_symbol(c, state, 0);
502  }
503 
504  if (f->version > 2) {
505  unsigned v;
508  if (v) {
509  av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", v);
510  return AVERROR_INVALIDDATA;
511  }
512  }
513 
514  return 0;
515 }
516 
518 {
520  int i, j, context_count = -1; //-1 to avoid warning
521  RangeCoder *const c = &f->slice_context[0]->c;
522 
523  memset(state, 128, sizeof(state));
524 
525  if (f->version < 2) {
526  unsigned v= get_symbol(c, state, 0);
527  if (v >= 2) {
528  av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
529  return AVERROR_INVALIDDATA;
530  }
531  f->version = v;
532  f->ac = f->avctx->coder_type = get_symbol(c, state, 0);
533  if (f->ac > 1) {
534  for (i = 1; i < 256; i++)
535  f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
536  }
537 
538  f->colorspace = get_symbol(c, state, 0); //YUV cs type
539 
540  if (f->version > 0)
541  f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
542 
543  f->chroma_planes = get_rac(c, state);
544  f->chroma_h_shift = get_symbol(c, state, 0);
545  f->chroma_v_shift = get_symbol(c, state, 0);
546  f->transparency = get_rac(c, state);
547  f->plane_count = 2 + f->transparency;
548  }
549 
550  if (f->colorspace == 0) {
551  if (!f->transparency && !f->chroma_planes) {
552  if (f->avctx->bits_per_raw_sample <= 8)
554  else
556  } else if (f->avctx->bits_per_raw_sample<=8 && !f->transparency) {
557  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
558  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P; break;
559  case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P; break;
560  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P; break;
561  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
562  case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
563  case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
564  default:
565  av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
566  return AVERROR(ENOSYS);
567  }
568  } else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
569  switch(16*f->chroma_h_shift + f->chroma_v_shift) {
570  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
571  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
572  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
573  default:
574  av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
575  return AVERROR(ENOSYS);
576  }
577  } else if (f->avctx->bits_per_raw_sample == 9) {
578  f->packed_at_lsb = 1;
579  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
580  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
581  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
582  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
583  default:
584  av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
585  return AVERROR(ENOSYS);
586  }
587  } else if (f->avctx->bits_per_raw_sample == 10) {
588  f->packed_at_lsb = 1;
589  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
590  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
591  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
592  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
593  default:
594  av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
595  return AVERROR(ENOSYS);
596  }
597  } else {
598  switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
599  case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
600  case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
601  case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
602  default:
603  av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
604  return AVERROR(ENOSYS);
605  }
606  }
607  } else if (f->colorspace == 1) {
608  if (f->chroma_h_shift || f->chroma_v_shift) {
610  "chroma subsampling not supported in this colorspace\n");
611  return AVERROR(ENOSYS);
612  }
613  if ( f->avctx->bits_per_raw_sample == 9)
615  else if (f->avctx->bits_per_raw_sample == 10)
617  else if (f->avctx->bits_per_raw_sample == 12)
619  else if (f->avctx->bits_per_raw_sample == 14)
621  else
623  else f->avctx->pix_fmt = AV_PIX_FMT_0RGB32;
624  } else {
625  av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
626  return AVERROR(ENOSYS);
627  }
628 
629  av_dlog(f->avctx, "%d %d %d\n",
631  if (f->version < 2) {
632  context_count = read_quant_tables(c, f->quant_table);
633  if (context_count < 0) {
634  av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
635  return AVERROR_INVALIDDATA;
636  }
637  } else if (f->version < 3) {
638  f->slice_count = get_symbol(c, state, 0);
639  } else {
640  const uint8_t *p = c->bytestream_end;
641  for (f->slice_count = 0;
642  f->slice_count < MAX_SLICES && 3 < p - c->bytestream_start;
643  f->slice_count++) {
644  int trailer = 3 + 5*!!f->ec;
645  int size = AV_RB24(p-trailer);
646  if (size + trailer > p - c->bytestream_start)
647  break;
648  p -= size + trailer;
649  }
650  }
651  if (f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0) {
652  av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid\n", f->slice_count);
653  return AVERROR_INVALIDDATA;
654  }
655 
656  for (j = 0; j < f->slice_count; j++) {
657  FFV1Context *fs = f->slice_context[j];
658  fs->ac = f->ac;
659  fs->packed_at_lsb = f->packed_at_lsb;
660 
661  fs->slice_damaged = 0;
662 
663  if (f->version == 2) {
664  fs->slice_x = get_symbol(c, state, 0) * f->width ;
665  fs->slice_y = get_symbol(c, state, 0) * f->height;
666  fs->slice_width = (get_symbol(c, state, 0) + 1) * f->width + fs->slice_x;
667  fs->slice_height = (get_symbol(c, state, 0) + 1) * f->height + fs->slice_y;
668 
669  fs->slice_x /= f->num_h_slices;
670  fs->slice_y /= f->num_v_slices;
671  fs->slice_width = fs->slice_width / f->num_h_slices - fs->slice_x;
672  fs->slice_height = fs->slice_height / f->num_v_slices - fs->slice_y;
673  if ((unsigned)fs->slice_width > f->width ||
674  (unsigned)fs->slice_height > f->height)
675  return AVERROR_INVALIDDATA;
676  if ( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
677  || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
678  return AVERROR_INVALIDDATA;
679  }
680 
681  for (i = 0; i < f->plane_count; i++) {
682  PlaneContext *const p = &fs->plane[i];
683 
684  if (f->version == 2) {
685  int idx = get_symbol(c, state, 0);
686  if (idx > (unsigned)f->quant_table_count) {
688  "quant_table_index out of range\n");
689  return AVERROR_INVALIDDATA;
690  }
691  p->quant_table_index = idx;
692  memcpy(p->quant_table, f->quant_tables[idx],
693  sizeof(p->quant_table));
694  context_count = f->context_count[idx];
695  } else {
696  memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
697  }
698 
699  if (f->version <= 2) {
700  av_assert0(context_count >= 0);
701  if (p->context_count < context_count) {
702  av_freep(&p->state);
703  av_freep(&p->vlc_state);
704  }
706  }
707  }
708  }
709  return 0;
710 }
711 
713 {
714  FFV1Context *f = avctx->priv_data;
715  int ret;
716 
717  if ((ret = ffv1_common_init(avctx)) < 0)
718  return ret;
719 
720  if (avctx->extradata && (ret = read_extra_header(f)) < 0)
721  return ret;
722 
723  if ((ret = ffv1_init_slice_contexts(f)) < 0)
724  return ret;
725 
726  return 0;
727 }
728 
729 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
730 {
731  const uint8_t *buf = avpkt->data;
732  int buf_size = avpkt->size;
733  FFV1Context *f = avctx->priv_data;
734  RangeCoder *const c = &f->slice_context[0]->c;
735  int i, ret;
736  uint8_t keystate = 128;
737  const uint8_t *buf_p;
738  AVFrame *const p = data;
739 
740  f->cur = p;
741 
742  ff_init_range_decoder(c, buf, buf_size);
743  ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
744 
745  p->pict_type = AV_PICTURE_TYPE_I; //FIXME I vs. P
746  if (get_rac(c, &keystate)) {
747  p->key_frame = 1;
748  f->key_frame_ok = 0;
749  if ((ret = read_header(f)) < 0)
750  return ret;
751  f->key_frame_ok = 1;
752  } else {
753  if (!f->key_frame_ok) {
754  av_log(avctx, AV_LOG_ERROR,
755  "Cannot decode non-keyframe without valid keyframe\n");
756  return AVERROR_INVALIDDATA;
757  }
758  p->key_frame = 0;
759  }
760 
761  if ((ret = ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF)) < 0)
762  return ret;
763 
764  if (avctx->debug & FF_DEBUG_PICT_INFO)
765  av_log(avctx, AV_LOG_DEBUG, "ver:%d keyframe:%d coder:%d ec:%d slices:%d bps:%d\n",
766  f->version, p->key_frame, f->ac, f->ec, f->slice_count, f->avctx->bits_per_raw_sample);
767 
768  buf_p = buf + buf_size;
769  for (i = f->slice_count - 1; i >= 0; i--) {
770  FFV1Context *fs = f->slice_context[i];
771  int trailer = 3 + 5*!!f->ec;
772  int v;
773 
774  if (i || f->version > 2) v = AV_RB24(buf_p-trailer) + trailer;
775  else v = buf_p - c->bytestream_start;
776  if (buf_p - c->bytestream_start < v) {
777  av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
778  return AVERROR_INVALIDDATA;
779  }
780  buf_p -= v;
781 
782  if (f->ec) {
783  unsigned crc = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, v);
784  if (crc) {
785  int64_t ts = avpkt->pts != AV_NOPTS_VALUE ? avpkt->pts : avpkt->dts;
786  av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!", crc);
787  if (ts != AV_NOPTS_VALUE && avctx->pkt_timebase.num) {
788  av_log(f->avctx, AV_LOG_ERROR, "at %f seconds\n", ts*av_q2d(avctx->pkt_timebase));
789  } else if (ts != AV_NOPTS_VALUE) {
790  av_log(f->avctx, AV_LOG_ERROR, "at %"PRId64"\n", ts);
791  } else {
792  av_log(f->avctx, AV_LOG_ERROR, "\n");
793  }
794  fs->slice_damaged = 1;
795  }
796  }
797 
798  if (i) {
799  ff_init_range_decoder(&fs->c, buf_p, v);
800  } else
801  fs->c.bytestream_end = (uint8_t *)(buf_p + v);
802 
803  fs->cur = p;
804  }
805 
806  avctx->execute(avctx,
807  decode_slice,
808  &f->slice_context[0],
809  NULL,
810  f->slice_count,
811  sizeof(void*));
812 
813  for (i = f->slice_count - 1; i >= 0; i--) {
814  FFV1Context *fs = f->slice_context[i];
815  int j;
816  if (fs->slice_damaged && f->last_picture.data[0]) {
817  const uint8_t *src[4];
818  uint8_t *dst[4];
819  for (j = 0; j < 4; j++) {
820  int sh = (j==1 || j==2) ? f->chroma_h_shift : 0;
821  int sv = (j==1 || j==2) ? f->chroma_v_shift : 0;
822  dst[j] = p->data[j] + p->linesize[j]*
823  (fs->slice_y>>sv) + (fs->slice_x>>sh);
824  src[j] = f->last_picture.data[j] + f->last_picture.linesize[j]*
825  (fs->slice_y>>sv) + (fs->slice_x>>sh);
826  }
827  av_image_copy(dst, p->linesize, (const uint8_t **)src,
829  avctx->pix_fmt,
830  fs->slice_width,
831  fs->slice_height);
832  }
833  }
834 
835  f->picture_number++;
836 
838  if ((ret = av_frame_ref(&f->last_picture, p)) < 0)
839  return ret;
840  f->cur = NULL;
841 
842  *got_frame = 1;
843 
844  return buf_size;
845 }
846 
848  .name = "ffv1",
849  .type = AVMEDIA_TYPE_VIDEO,
850  .id = AV_CODEC_ID_FFV1,
851  .priv_data_size = sizeof(FFV1Context),
852  .init = decode_init,
853  .close = ffv1_close,
854  .decode = decode_frame,
855  .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ |
857  .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
858 };
static av_always_inline int fold(int diff, int bits)
Definition: ffv1.h:138
int ffv1_allocate_initial_states(FFV1Context *f)
Definition: ffv1.c:146
const uint8_t ff_log2_run[41]
Definition: bitstream.c:37
float v
const char * s
Definition: avisynth_c.h:668
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1778
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:275
av_cold int ffv1_common_init(AVCodecContext *avctx)
Definition: ffv1.c:41
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:73
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
int16_t quant_table[MAX_CONTEXT_INPUTS][256]
Definition: ffv1.h:66
int quant_table_count
Definition: ffv1.h:116
int ffv1_init_slice_state(FFV1Context *f, FFV1Context *fs)
Definition: ffv1.c:65
else temp
Definition: vf_mcdeint.c:148
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:323
int slice_height
Definition: ffv1.h:125
#define MAX_CONTEXT_INPUTS
Definition: ffv1.h:54
int16_t * sample_buffer
Definition: ffv1.h:106
int version
Definition: ffv1.h:84
Range coder.
uint8_t * bytestream_end
Definition: rangecoder.h:44
int num
numerator
Definition: rational.h:44
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:295
Sinusoidal phase f
static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
Definition: ffv1dec.c:393
#define AV_RB24
static av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed)
Definition: ffv1dec.c:43
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: ffv1dec.c:729
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
FF Video Codec 1 (a lossless codec)
static av_always_inline void predict(PredictorState *ps, float *coef, int output_enable)
#define sample
int height
Definition: ffv1.h:86
int stride
Definition: mace.c:144
uint8_t one_state[256]
Definition: rangecoder.h:41
output residual component w
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
int plane_count
Definition: ffv1.h:95
initialize output if(nPeaks >3)%at least 3 peaks in spectrum for trying to find f0 nf0peaks
int slice_damaged
Definition: ffv1.h:109
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:105
static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256])
Definition: ffv1dec.c:420
uint8_t bits
Definition: crc.c:216
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:86
uint8_t
#define av_cold
Definition: attributes.h:78
static int get_rac(RangeCoder *c, uint8_t *const state)
Definition: rangecoder.h:115
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:63
AVOptions.
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:261
int8_t bias
Definition: ffv1.h:61
#define b
Definition: input.c:42
RangeCoder c
Definition: ffv1.h:79
#define emms_c()
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:294
int slice_y
Definition: ffv1.h:127
uint8_t(*[MAX_QUANT_TABLES] initial_states)[32]
Definition: ffv1.h:103
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:69
int coder_type
coder type
uint8_t * data
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:193
uint8_t count
Definition: ffv1.h:62
bitstream reader API header.
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:290
static void decode_rgb_frame(FFV1Context *s, uint8_t *src[3], int w, int h, int stride[3])
Definition: ffv1dec.c:211
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:270
VlcState * vlc_state
Definition: ffv1.h:70
high precision timer, useful to profile code
int minor_version
Definition: ffv1.h:85
struct FFV1Context FFV1Context
int slice_width
Definition: ffv1.h:124
GetBitContext gb
Definition: ffv1.h:80
Discrete Time axis x
#define U(x)
AVFrame * cur
Definition: ffv1.h:94
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:219
static int decode_slice_header(FFV1Context *f, FFV1Context *fs)
Definition: ffv1dec.c:265
int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256]
Definition: ffv1.h:100
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Spectrum Plot time data
const char * r
Definition: vf_curves.c:94
int context_count
Definition: ffv1.h:68
const char * arg
simple assert() macros that are a bit more flexible than ISO C assert().
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:281
static const uint8_t offset[127][2]
Definition: vf_spp.c:70
external API header
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:257
static int get_vlc_symbol(GetBitContext *gb, VlcState *const state, int bits)
Definition: ffv1dec.c:68
int size
uint8_t * bytestream
Definition: rangecoder.h:43
static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index)
Definition: ffv1dec.c:169
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:72
int ac
1=range coder <-> 0=golomb rice
Definition: ffv1.h:96
int16_t quant_table[MAX_CONTEXT_INPUTS][256]
Definition: ffv1.h:99
int run_index
Definition: ffv1.h:104
Definition: ffv1.h:58
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:277
#define av_flatten
Definition: attributes.h:84
static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
Definition: ffv1dec.c:63
uint8_t state_transition[256]
Definition: ffv1.h:102
AVFrame last_picture
Definition: ffv1.h:92
FFT buffer for g
Definition: stft_peak.m:17
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:266
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FFMIN(a, b)
Definition: common.h:58
int num_h_slices
Definition: ffv1.h:123
ret
Definition: avfilter.c:821
#define MAX_QUANT_TABLES
Definition: ffv1.h:53
int colorspace
Definition: ffv1.h:105
static float quant_table[96]
Definition: binkaudio.c:43
static int get_context(PlaneContext *p, int16_t *src, int16_t *last, int16_t *last2)
Definition: ffv1.h:160
static void update_vlc_state(VlcState *const state, const int v)
Definition: ffv1.h:182
int slice_count
Definition: ffv1.h:121
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:60
int ac_byte_count
number of bytes used for AC coding
Definition: ffv1.h:97
#define diff(a, as, b, bs)
Definition: vf_phase.c:80
static av_always_inline void decode_line(FFV1Context *s, int w, int16_t *sample[2], int plane_index, int bits)
Definition: ffv1dec.c:98
int16_t drift
Definition: ffv1.h:59
int packed_at_lsb
Definition: ffv1.h:113
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:278
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:297
av_cold int ffv1_init_slice_contexts(FFV1Context *f)
Definition: ffv1.c:110
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:517
static const float pred[4]
Definition: siprdata.h:259
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:288
for k
NULL
Definition: eval.c:55
static int width
Definition: tests/utils.c:158
int context_count[MAX_QUANT_TABLES]
Definition: ffv1.h:101
AVS_Value src
Definition: avisynth_c.h:523
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:218
main external API structure.
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:375
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:259
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:154
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
void * buf
Definition: avisynth_c.h:594
void ffv1_clear_slice_state(FFV1Context *f, FFV1Context *fs)
Definition: ffv1.c:161
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:273
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:279
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:74
synthesis window for stochastic i
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:379
uint16_t step_minus1
Number of elements between 2 horizontally consecutive pixels minus 1.
Definition: pixdesc.h:35
int picture_number
Definition: ffv1.h:91
uint16_t error_sum
Definition: ffv1.h:60
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:276
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:330
void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: rangecoder.c:52
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
int key_frame_ok
Definition: ffv1.h:110
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:296
static uint32_t state
Definition: trasher.c:27
#define CONTEXT_SIZE
Definition: ffv1.h:51
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:280
int av_frame_ref(AVFrame *dst, AVFrame *src)
Setup a new reference to the data described by an given frame.
Definition: frame.c:228
int quant_table_index
Definition: ffv1.h:67
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
#define FF_DEBUG_PICT_INFO
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
Y , 8bpp.
Definition: pixfmt.h:76
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
common internal api header.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
#define MAX_SLICES
Definition: dxva2_mpeg2.c:25
static double c[64]
function fs
uint8_t(* state)[CONTEXT_SIZE]
Definition: ffv1.h:69
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:75
Same thing on a dB scale
int den
denominator
Definition: rational.h:45
function y
Definition: D.m:1
uint8_t * bytestream_start
Definition: rangecoder.h:42
static av_cold int decode_init(AVCodecContext *avctx)
Definition: ffv1dec.c:712
int chroma_h_shift
Definition: ffv1.h:88
PlaneContext plane[MAX_PLANES]
Definition: ffv1.h:98
int transparency
Definition: ffv1.h:89
int chroma_v_shift
Definition: ffv1.h:88
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:275
int len
int chroma_planes
Definition: ffv1.h:87
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
av_cold int ffv1_close(AVCodecContext *avctx)
Definition: ffv1.c:189
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:139
struct FFV1Context * slice_context[MAX_SLICES]
Definition: ffv1.h:120
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
#define av_noinline
Definition: attributes.h:56
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:103
#define av_always_inline
Definition: attributes.h:41
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
int ec
Definition: ffv1.h:108
static int get_sr_golomb(GetBitContext *gb, int k, int limit, int esc_len)
read signed golomb rice code (ffv1).
Definition: golomb.h:345
int num_v_slices
Definition: ffv1.h:122
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
exp golomb vlc stuff
This structure stores compressed data.
static int read_extra_header(FFV1Context *f)
Definition: ffv1dec.c:435
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
AVCodecContext * avctx
Definition: ffv1.h:78
int slice_x
Definition: ffv1.h:126
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:289
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:190
AVCodec ff_ffv1_decoder
Definition: ffv1dec.c:847
int width
Definition: ffv1.h:86
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:263
bitstream writer API