proresdec_lgpl.c
Go to the documentation of this file.
1 /*
2  * Apple ProRes compatible decoder
3  *
4  * Copyright (c) 2010-2011 Maxim Poliakovski
5  *
6  * This file is part of Libav.
7  *
8  * Libav is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * Libav is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with Libav; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * This is a decoder for Apple ProRes 422 SD/HQ/LT/Proxy and ProRes 4444.
26  * It is used for storing and editing high definition video data in Apple's Final Cut Pro.
27  *
28  * @see http://wiki.multimedia.cx/index.php?title=Apple_ProRes
29  */
30 
31 #define LONG_BITSTREAM_READER // some ProRes vlc codes require up to 28 bits to be read at once
32 
33 #include <stdint.h>
34 
35 #include "libavutil/intmath.h"
36 #include "avcodec.h"
37 #include "dsputil.h"
38 #include "internal.h"
39 #include "proresdata.h"
40 #include "proresdsp.h"
41 #include "get_bits.h"
42 
43 typedef struct {
44  const uint8_t *index; ///< pointers to the data of this slice
45  int slice_num;
46  int x_pos, y_pos;
48  int prev_slice_sf; ///< scalefactor of the previous decoded slice
49  DECLARE_ALIGNED(16, int16_t, blocks)[8 * 4 * 64];
50  DECLARE_ALIGNED(16, int16_t, qmat_luma_scaled)[64];
51  DECLARE_ALIGNED(16, int16_t, qmat_chroma_scaled)[64];
53 
54 typedef struct {
56  AVFrame *frame;
58  int scantable_type; ///< -1 = uninitialized, 0 = progressive, 1/2 = interlaced
59 
60  int frame_type; ///< 0 = progressive, 1 = top-field first, 2 = bottom-field first
61  int pic_format; ///< 2 = 422, 3 = 444
62  uint8_t qmat_luma[64]; ///< dequantization matrix for luma
63  uint8_t qmat_chroma[64]; ///< dequantization matrix for chroma
64  int qmat_changed; ///< 1 - global quantization matrices changed
65  int total_slices; ///< total number of slices in a picture
67  int pic_num;
70  int num_chroma_blocks; ///< number of chrominance blocks in a macroblock
75  int num_x_mbs;
76  int num_y_mbs;
79 
80 
82 {
83  ProresContext *ctx = avctx->priv_data;
84 
85  ctx->total_slices = 0;
86  ctx->slice_data = NULL;
87 
89  ff_proresdsp_init(&ctx->dsp, avctx);
90 
91  ctx->scantable_type = -1; // set scantable type to uninitialized
92  memset(ctx->qmat_luma, 4, 64);
93  memset(ctx->qmat_chroma, 4, 64);
94 
95  return 0;
96 }
97 
98 
99 static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
100  const int data_size, AVCodecContext *avctx)
101 {
102  int hdr_size, version, width, height, flags;
103  const uint8_t *ptr;
104 
105  hdr_size = AV_RB16(buf);
106  if (hdr_size > data_size) {
107  av_log(avctx, AV_LOG_ERROR, "frame data too small\n");
108  return AVERROR_INVALIDDATA;
109  }
110 
111  version = AV_RB16(buf + 2);
112  if (version >= 2) {
113  av_log(avctx, AV_LOG_ERROR,
114  "unsupported header version: %d\n", version);
115  return AVERROR_INVALIDDATA;
116  }
117 
118  width = AV_RB16(buf + 8);
119  height = AV_RB16(buf + 10);
120  if (width != avctx->width || height != avctx->height) {
121  av_log(avctx, AV_LOG_ERROR,
122  "picture dimension changed: old: %d x %d, new: %d x %d\n",
123  avctx->width, avctx->height, width, height);
124  return AVERROR_INVALIDDATA;
125  }
126 
127  ctx->frame_type = (buf[12] >> 2) & 3;
128  if (ctx->frame_type > 2) {
129  av_log(avctx, AV_LOG_ERROR,
130  "unsupported frame type: %d\n", ctx->frame_type);
131  return AVERROR_INVALIDDATA;
132  }
133 
134  ctx->chroma_factor = (buf[12] >> 6) & 3;
135  ctx->mb_chroma_factor = ctx->chroma_factor + 2;
136  ctx->num_chroma_blocks = (1 << ctx->chroma_factor) >> 1;
137  switch (ctx->chroma_factor) {
138  case 2:
139  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
140  break;
141  case 3:
142  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
143  break;
144  default:
145  av_log(avctx, AV_LOG_ERROR,
146  "unsupported picture format: %d\n", ctx->pic_format);
147  return AVERROR_INVALIDDATA;
148  }
149 
150  if (ctx->scantable_type != ctx->frame_type) {
151  if (!ctx->frame_type)
154  else
157  ctx->scantable_type = ctx->frame_type;
158  }
159 
160  if (ctx->frame_type) { /* if interlaced */
161  ctx->frame->interlaced_frame = 1;
162  ctx->frame->top_field_first = ctx->frame_type & 1;
163  } else {
164  ctx->frame->interlaced_frame = 0;
165  }
166 
167  avctx->color_primaries = buf[14];
168  avctx->color_trc = buf[15];
169  avctx->colorspace = buf[16];
170 
171  ctx->alpha_info = buf[17] & 0xf;
172  if (ctx->alpha_info)
173  avpriv_report_missing_feature(avctx, "Alpha channel");
174 
175  ctx->qmat_changed = 0;
176  ptr = buf + 20;
177  flags = buf[19];
178  if (flags & 2) {
179  if (ptr - buf > hdr_size - 64) {
180  av_log(avctx, AV_LOG_ERROR, "header data too small\n");
181  return AVERROR_INVALIDDATA;
182  }
183  if (memcmp(ctx->qmat_luma, ptr, 64)) {
184  memcpy(ctx->qmat_luma, ptr, 64);
185  ctx->qmat_changed = 1;
186  }
187  ptr += 64;
188  } else {
189  memset(ctx->qmat_luma, 4, 64);
190  ctx->qmat_changed = 1;
191  }
192 
193  if (flags & 1) {
194  if (ptr - buf > hdr_size - 64) {
195  av_log(avctx, AV_LOG_ERROR, "header data too small\n");
196  return -1;
197  }
198  if (memcmp(ctx->qmat_chroma, ptr, 64)) {
199  memcpy(ctx->qmat_chroma, ptr, 64);
200  ctx->qmat_changed = 1;
201  }
202  } else {
203  memset(ctx->qmat_chroma, 4, 64);
204  ctx->qmat_changed = 1;
205  }
206 
207  return hdr_size;
208 }
209 
210 
212  const int data_size, AVCodecContext *avctx)
213 {
214  int i, hdr_size, pic_data_size, num_slices;
215  int slice_width_factor, slice_height_factor;
216  int remainder, num_x_slices;
217  const uint8_t *data_ptr, *index_ptr;
218 
219  hdr_size = data_size > 0 ? buf[0] >> 3 : 0;
220  if (hdr_size < 8 || hdr_size > data_size) {
221  av_log(avctx, AV_LOG_ERROR, "picture header too small\n");
222  return AVERROR_INVALIDDATA;
223  }
224 
225  pic_data_size = AV_RB32(buf + 1);
226  if (pic_data_size > data_size) {
227  av_log(avctx, AV_LOG_ERROR, "picture data too small\n");
228  return AVERROR_INVALIDDATA;
229  }
230 
231  slice_width_factor = buf[7] >> 4;
232  slice_height_factor = buf[7] & 0xF;
233  if (slice_width_factor > 3 || slice_height_factor) {
234  av_log(avctx, AV_LOG_ERROR,
235  "unsupported slice dimension: %d x %d\n",
236  1 << slice_width_factor, 1 << slice_height_factor);
237  return AVERROR_INVALIDDATA;
238  }
239 
240  ctx->slice_width_factor = slice_width_factor;
241  ctx->slice_height_factor = slice_height_factor;
242 
243  ctx->num_x_mbs = (avctx->width + 15) >> 4;
244  ctx->num_y_mbs = (avctx->height +
245  (1 << (4 + ctx->frame->interlaced_frame)) - 1) >>
246  (4 + ctx->frame->interlaced_frame);
247 
248  remainder = ctx->num_x_mbs & ((1 << slice_width_factor) - 1);
249  num_x_slices = (ctx->num_x_mbs >> slice_width_factor) + (remainder & 1) +
250  ((remainder >> 1) & 1) + ((remainder >> 2) & 1);
251 
252  num_slices = num_x_slices * ctx->num_y_mbs;
253  if (num_slices != AV_RB16(buf + 5)) {
254  av_log(avctx, AV_LOG_ERROR, "invalid number of slices\n");
255  return AVERROR_INVALIDDATA;
256  }
257 
258  if (ctx->total_slices != num_slices) {
259  av_freep(&ctx->slice_data);
260  ctx->slice_data = av_malloc((num_slices + 1) * sizeof(ctx->slice_data[0]));
261  if (!ctx->slice_data)
262  return AVERROR(ENOMEM);
263  ctx->total_slices = num_slices;
264  }
265 
266  if (hdr_size + num_slices * 2 > data_size) {
267  av_log(avctx, AV_LOG_ERROR, "slice table too small\n");
268  return AVERROR_INVALIDDATA;
269  }
270 
271  /* parse slice table allowing quick access to the slice data */
272  index_ptr = buf + hdr_size;
273  data_ptr = index_ptr + num_slices * 2;
274 
275  for (i = 0; i < num_slices; i++) {
276  ctx->slice_data[i].index = data_ptr;
277  ctx->slice_data[i].prev_slice_sf = 0;
278  data_ptr += AV_RB16(index_ptr + i * 2);
279  }
280  ctx->slice_data[i].index = data_ptr;
281  ctx->slice_data[i].prev_slice_sf = 0;
282 
283  if (data_ptr > buf + data_size) {
284  av_log(avctx, AV_LOG_ERROR, "out of slice data\n");
285  return -1;
286  }
287 
288  return pic_data_size;
289 }
290 
291 
292 /**
293  * Read an unsigned rice/exp golomb codeword.
294  */
295 static inline int decode_vlc_codeword(GetBitContext *gb, unsigned codebook)
296 {
297  unsigned int rice_order, exp_order, switch_bits;
298  unsigned int buf, code;
299  int log, prefix_len, len;
300 
301  OPEN_READER(re, gb);
302  UPDATE_CACHE(re, gb);
303  buf = GET_CACHE(re, gb);
304 
305  /* number of prefix bits to switch between Rice and expGolomb */
306  switch_bits = (codebook & 3) + 1;
307  rice_order = codebook >> 5; /* rice code order */
308  exp_order = (codebook >> 2) & 7; /* exp golomb code order */
309 
310  log = 31 - av_log2(buf); /* count prefix bits (zeroes) */
311 
312  if (log < switch_bits) { /* ok, we got a rice code */
313  if (!rice_order) {
314  /* shortcut for faster decoding of rice codes without remainder */
315  code = log;
316  LAST_SKIP_BITS(re, gb, log + 1);
317  } else {
318  prefix_len = log + 1;
319  code = (log << rice_order) + NEG_USR32(buf << prefix_len, rice_order);
320  LAST_SKIP_BITS(re, gb, prefix_len + rice_order);
321  }
322  } else { /* otherwise we got a exp golomb code */
323  len = (log << 1) - switch_bits + exp_order + 1;
324  code = NEG_USR32(buf, len) - (1 << exp_order) + (switch_bits << rice_order);
325  LAST_SKIP_BITS(re, gb, len);
326  }
327 
328  CLOSE_READER(re, gb);
329 
330  return code;
331 }
332 
333 #define LSB2SIGN(x) (-((x) & 1))
334 #define TOSIGNED(x) (((x) >> 1) ^ LSB2SIGN(x))
335 
336 /**
337  * Decode DC coefficients for all blocks in a slice.
338  */
339 static inline void decode_dc_coeffs(GetBitContext *gb, int16_t *out,
340  int nblocks)
341 {
342  int16_t prev_dc;
343  int i, sign;
344  int16_t delta;
345  unsigned int code;
346 
347  code = decode_vlc_codeword(gb, FIRST_DC_CB);
348  out[0] = prev_dc = TOSIGNED(code);
349 
350  out += 64; /* move to the DC coeff of the next block */
351  delta = 3;
352 
353  for (i = 1; i < nblocks; i++, out += 64) {
354  code = decode_vlc_codeword(gb, ff_prores_dc_codebook[FFMIN(FFABS(delta), 3)]);
355 
356  sign = -(((delta >> 15) & 1) ^ (code & 1));
357  delta = (((code + 1) >> 1) ^ sign) - sign;
358  prev_dc += delta;
359  out[0] = prev_dc;
360  }
361 }
362 
363 
364 /**
365  * Decode AC coefficients for all blocks in a slice.
366  */
367 static inline void decode_ac_coeffs(GetBitContext *gb, int16_t *out,
368  int blocks_per_slice,
369  int plane_size_factor,
370  const uint8_t *scan)
371 {
372  int pos, block_mask, run, level, sign, run_cb_index, lev_cb_index;
373  int max_coeffs, bits_left;
374 
375  /* set initial prediction values */
376  run = 4;
377  level = 2;
378 
379  max_coeffs = blocks_per_slice << 6;
380  block_mask = blocks_per_slice - 1;
381 
382  for (pos = blocks_per_slice - 1; pos < max_coeffs;) {
383  run_cb_index = ff_prores_run_to_cb_index[FFMIN(run, 15)];
384  lev_cb_index = ff_prores_lev_to_cb_index[FFMIN(level, 9)];
385 
386  bits_left = get_bits_left(gb);
387  if (bits_left <= 0 || (bits_left <= 8 && !show_bits(gb, bits_left)))
388  return;
389 
390  run = decode_vlc_codeword(gb, ff_prores_ac_codebook[run_cb_index]);
391 
392  bits_left = get_bits_left(gb);
393  if (bits_left <= 0 || (bits_left <= 8 && !show_bits(gb, bits_left)))
394  return;
395 
396  level = decode_vlc_codeword(gb, ff_prores_ac_codebook[lev_cb_index]) + 1;
397 
398  pos += run + 1;
399  if (pos >= max_coeffs)
400  break;
401 
402  sign = get_sbits(gb, 1);
403  out[((pos & block_mask) << 6) + scan[pos >> plane_size_factor]] =
404  (level ^ sign) - sign;
405  }
406 }
407 
408 
409 /**
410  * Decode a slice plane (luma or chroma).
411  */
413  const uint8_t *buf,
414  int data_size, uint16_t *out_ptr,
415  int linesize, int mbs_per_slice,
416  int blocks_per_mb, int plane_size_factor,
417  const int16_t *qmat, int is_chroma)
418 {
419  GetBitContext gb;
420  int16_t *block_ptr;
421  int mb_num, blocks_per_slice;
422 
423  blocks_per_slice = mbs_per_slice * blocks_per_mb;
424 
425  memset(td->blocks, 0, 8 * 4 * 64 * sizeof(*td->blocks));
426 
427  init_get_bits(&gb, buf, data_size << 3);
428 
429  decode_dc_coeffs(&gb, td->blocks, blocks_per_slice);
430 
431  decode_ac_coeffs(&gb, td->blocks, blocks_per_slice,
432  plane_size_factor, ctx->scantable.permutated);
433 
434  /* inverse quantization, inverse transform and output */
435  block_ptr = td->blocks;
436 
437  if (!is_chroma) {
438  for (mb_num = 0; mb_num < mbs_per_slice; mb_num++, out_ptr += blocks_per_mb * 4) {
439  ctx->dsp.idct_put(out_ptr, linesize, block_ptr, qmat);
440  block_ptr += 64;
441  if (blocks_per_mb > 2) {
442  ctx->dsp.idct_put(out_ptr + 8, linesize, block_ptr, qmat);
443  block_ptr += 64;
444  }
445  ctx->dsp.idct_put(out_ptr + linesize * 4, linesize, block_ptr, qmat);
446  block_ptr += 64;
447  if (blocks_per_mb > 2) {
448  ctx->dsp.idct_put(out_ptr + linesize * 4 + 8, linesize, block_ptr, qmat);
449  block_ptr += 64;
450  }
451  }
452  } else {
453  for (mb_num = 0; mb_num < mbs_per_slice; mb_num++, out_ptr += blocks_per_mb * 4) {
454  ctx->dsp.idct_put(out_ptr, linesize, block_ptr, qmat);
455  block_ptr += 64;
456  ctx->dsp.idct_put(out_ptr + linesize * 4, linesize, block_ptr, qmat);
457  block_ptr += 64;
458  if (blocks_per_mb > 2) {
459  ctx->dsp.idct_put(out_ptr + 8, linesize, block_ptr, qmat);
460  block_ptr += 64;
461  ctx->dsp.idct_put(out_ptr + linesize * 4 + 8, linesize, block_ptr, qmat);
462  block_ptr += 64;
463  }
464  }
465  }
466 }
467 
468 
469 static int decode_slice(AVCodecContext *avctx, void *tdata)
470 {
471  ProresThreadData *td = tdata;
472  ProresContext *ctx = avctx->priv_data;
473  int mb_x_pos = td->x_pos;
474  int mb_y_pos = td->y_pos;
475  int pic_num = ctx->pic_num;
476  int slice_num = td->slice_num;
477  int mbs_per_slice = td->slice_width;
478  const uint8_t *buf;
479  uint8_t *y_data, *u_data, *v_data;
480  AVFrame *pic = ctx->frame;
481  int i, sf, slice_width_factor;
482  int slice_data_size, hdr_size, y_data_size, u_data_size, v_data_size;
483  int y_linesize, u_linesize, v_linesize;
484 
485  buf = ctx->slice_data[slice_num].index;
486  slice_data_size = ctx->slice_data[slice_num + 1].index - buf;
487 
488  slice_width_factor = av_log2(mbs_per_slice);
489 
490  y_data = pic->data[0];
491  u_data = pic->data[1];
492  v_data = pic->data[2];
493  y_linesize = pic->linesize[0];
494  u_linesize = pic->linesize[1];
495  v_linesize = pic->linesize[2];
496 
497  if (pic->interlaced_frame) {
498  if (!(pic_num ^ pic->top_field_first)) {
499  y_data += y_linesize;
500  u_data += u_linesize;
501  v_data += v_linesize;
502  }
503  y_linesize <<= 1;
504  u_linesize <<= 1;
505  v_linesize <<= 1;
506  }
507 
508  if (slice_data_size < 6) {
509  av_log(avctx, AV_LOG_ERROR, "slice data too small\n");
510  return AVERROR_INVALIDDATA;
511  }
512 
513  /* parse slice header */
514  hdr_size = buf[0] >> 3;
515  y_data_size = AV_RB16(buf + 2);
516  u_data_size = AV_RB16(buf + 4);
517  v_data_size = hdr_size > 7 ? AV_RB16(buf + 6) :
518  slice_data_size - y_data_size - u_data_size - hdr_size;
519 
520  if (hdr_size + y_data_size + u_data_size + v_data_size > slice_data_size ||
521  v_data_size < 0 || hdr_size < 6) {
522  av_log(avctx, AV_LOG_ERROR, "invalid data size\n");
523  return AVERROR_INVALIDDATA;
524  }
525 
526  sf = av_clip(buf[1], 1, 224);
527  sf = sf > 128 ? (sf - 96) << 2 : sf;
528 
529  /* scale quantization matrixes according with slice's scale factor */
530  /* TODO: this can be SIMD-optimized a lot */
531  if (ctx->qmat_changed || sf != td->prev_slice_sf) {
532  td->prev_slice_sf = sf;
533  for (i = 0; i < 64; i++) {
534  td->qmat_luma_scaled[ctx->dsp.idct_permutation[i]] = ctx->qmat_luma[i] * sf;
535  td->qmat_chroma_scaled[ctx->dsp.idct_permutation[i]] = ctx->qmat_chroma[i] * sf;
536  }
537  }
538 
539  /* decode luma plane */
540  decode_slice_plane(ctx, td, buf + hdr_size, y_data_size,
541  (uint16_t*) (y_data + (mb_y_pos << 4) * y_linesize +
542  (mb_x_pos << 5)), y_linesize,
543  mbs_per_slice, 4, slice_width_factor + 2,
544  td->qmat_luma_scaled, 0);
545 
546  /* decode U chroma plane */
547  decode_slice_plane(ctx, td, buf + hdr_size + y_data_size, u_data_size,
548  (uint16_t*) (u_data + (mb_y_pos << 4) * u_linesize +
549  (mb_x_pos << ctx->mb_chroma_factor)),
550  u_linesize, mbs_per_slice, ctx->num_chroma_blocks,
551  slice_width_factor + ctx->chroma_factor - 1,
552  td->qmat_chroma_scaled, 1);
553 
554  /* decode V chroma plane */
555  decode_slice_plane(ctx, td, buf + hdr_size + y_data_size + u_data_size,
556  v_data_size,
557  (uint16_t*) (v_data + (mb_y_pos << 4) * v_linesize +
558  (mb_x_pos << ctx->mb_chroma_factor)),
559  v_linesize, mbs_per_slice, ctx->num_chroma_blocks,
560  slice_width_factor + ctx->chroma_factor - 1,
561  td->qmat_chroma_scaled, 1);
562 
563  return 0;
564 }
565 
566 
567 static int decode_picture(ProresContext *ctx, int pic_num,
568  AVCodecContext *avctx)
569 {
570  int slice_num, slice_width, x_pos, y_pos;
571 
572  slice_num = 0;
573 
574  ctx->pic_num = pic_num;
575  for (y_pos = 0; y_pos < ctx->num_y_mbs; y_pos++) {
576  slice_width = 1 << ctx->slice_width_factor;
577 
578  for (x_pos = 0; x_pos < ctx->num_x_mbs && slice_width;
579  x_pos += slice_width) {
580  while (ctx->num_x_mbs - x_pos < slice_width)
581  slice_width >>= 1;
582 
583  ctx->slice_data[slice_num].slice_num = slice_num;
584  ctx->slice_data[slice_num].x_pos = x_pos;
585  ctx->slice_data[slice_num].y_pos = y_pos;
586  ctx->slice_data[slice_num].slice_width = slice_width;
587 
588  slice_num++;
589  }
590  }
591 
592  return avctx->execute(avctx, decode_slice,
593  ctx->slice_data, NULL, slice_num,
594  sizeof(ctx->slice_data[0]));
595 }
596 
597 
598 #define MOVE_DATA_PTR(nbytes) buf += (nbytes); buf_size -= (nbytes)
599 
600 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
601  AVPacket *avpkt)
602 {
603  ProresContext *ctx = avctx->priv_data;
604  const uint8_t *buf = avpkt->data;
605  int buf_size = avpkt->size;
606  int frame_hdr_size, pic_num, pic_data_size;
607 
608  ctx->frame = data;
610  ctx->frame->key_frame = 1;
611 
612  /* check frame atom container */
613  if (buf_size < 28 || buf_size < AV_RB32(buf) ||
614  AV_RB32(buf + 4) != FRAME_ID) {
615  av_log(avctx, AV_LOG_ERROR, "invalid frame\n");
616  return AVERROR_INVALIDDATA;
617  }
618 
619  MOVE_DATA_PTR(8);
620 
621  frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
622  if (frame_hdr_size < 0)
623  return AVERROR_INVALIDDATA;
624 
625  MOVE_DATA_PTR(frame_hdr_size);
626 
627  if (ff_get_buffer(avctx, ctx->frame, 0) < 0)
628  return -1;
629 
630  for (pic_num = 0; ctx->frame->interlaced_frame - pic_num + 1; pic_num++) {
631  pic_data_size = decode_picture_header(ctx, buf, buf_size, avctx);
632  if (pic_data_size < 0)
633  return AVERROR_INVALIDDATA;
634 
635  if (decode_picture(ctx, pic_num, avctx))
636  return -1;
637 
638  MOVE_DATA_PTR(pic_data_size);
639  }
640 
641  ctx->frame = NULL;
642  *got_frame = 1;
643 
644  return avpkt->size;
645 }
646 
647 
649 {
650  ProresContext *ctx = avctx->priv_data;
651 
652  av_freep(&ctx->slice_data);
653 
654  return 0;
655 }
656 
657 
659  .name = "prores_lgpl",
660  .type = AVMEDIA_TYPE_VIDEO,
661  .id = AV_CODEC_ID_PRORES,
662  .priv_data_size = sizeof(ProresContext),
663  .init = decode_init,
664  .close = decode_close,
665  .decode = decode_frame,
666  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
667  .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes (iCodec Pro)")
668 };
static int decode_picture_header(ProresContext *ctx, const uint8_t *buf, const int data_size, AVCodecContext *avctx)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
static int decode_frame_header(ProresContext *ctx, const uint8_t *buf, const int data_size, AVCodecContext *avctx)
const uint8_t ff_prores_ac_codebook[7]
Definition: proresdata.c:55
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
Scantable.
Definition: dsputil.h:114
uint8_t qmat_chroma[64]
dequantization matrix for chroma
Definition: proresdec.h:43
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:59
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int version
Definition: avisynth_c.h:666
uint8_t permutated[64]
Definition: dsputil.h:116
uint8_t run
Definition: svq3.c:136
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
static int decode_picture(ProresContext *ctx, int pic_num, AVCodecContext *avctx)
int scantable_type
-1 = uninitialized, 0 = progressive, 1/2 = interlaced
#define MOVE_DATA_PTR(nbytes)
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:225
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
AVFrame * frame
Definition: proresdec.h:40
static void decode_slice_plane(ProresContext *ctx, ProresThreadData *td, const uint8_t *buf, int data_size, uint16_t *out_ptr, int linesize, int mbs_per_slice, int blocks_per_mb, int plane_size_factor, const int16_t *qmat, int is_chroma)
Decode a slice plane (luma or chroma).
const uint8_t * index
pointers to the data of this slice
AVCodec ff_prores_lgpl_decoder
uint8_t
static int decode_slice(AVCodecContext *avctx, void *tdata)
#define av_cold
Definition: attributes.h:78
float delta
#define TOSIGNED(x)
#define AV_RB32
struct ProresThreadData ProresThreadData
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
uint8_t * data
const uint8_t ff_prores_run_to_cb_index[16]
Lookup tables for adaptive switching between codebooks according with previous run/level value...
Definition: proresdata.c:69
const uint8_t ff_prores_lev_to_cb_index[10]
Definition: proresdata.c:72
bitstream reader API header.
uint8_t idct_permutation[64]
idct input permutation.
Definition: dsputil.h:249
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:270
void ff_proresdsp_init(ProresDSPContext *dsp, AVCodecContext *avctx)
Definition: proresdsp.c:74
frame
Definition: stft.m:14
ProresThreadData * slice_data
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:557
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:160
static void decode_ac_coeffs(GetBitContext *gb, int16_t *out, int blocks_per_slice, int plane_size_factor, const uint8_t *scan)
Decode AC coefficients for all blocks in a slice.
#define td
Definition: regdef.h:70
#define AV_RB16
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Spectrum Plot time data
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:281
#define CLOSE_READER(name, gb)
Definition: get_bits.h:140
external API header
DSPContext dsp
Definition: proresdec.h:38
static av_cold int decode_close(AVCodecContext *avctx)
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
#define FFMIN(a, b)
Definition: common.h:58
static void decode_dc_coeffs(GetBitContext *gb, int16_t *out, int nblocks)
Decode DC coefficients for all blocks in a slice.
int num_chroma_blocks
number of chrominance blocks in a macroblock
int width
picture width / height.
#define NEG_USR32(a, s)
Definition: mathops.h:159
const uint8_t ff_prores_dc_codebook[4]
Definition: proresdata.c:48
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:255
#define FFABS(a)
Definition: common.h:53
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:181
static av_cold int decode_init(AVCodecContext *avctx)
int total_slices
total number of slices in a picture
FIXME Range Coding of cr are level
Definition: snow.txt:367
ProresDSPContext dsp
#define FIRST_DC_CB
Definition: proresdata.h:33
static int decode_vlc_codeword(GetBitContext *gb, unsigned codebook)
Read an unsigned rice/exp golomb codeword.
NULL
Definition: eval.c:55
or the Software in violation of any applicable export control laws in any jurisdiction Except as provided by mandatorily applicable UPF has no obligation to provide you with source code to the Software In the event Software contains any source code
static int width
Definition: tests/utils.c:158
const uint8_t ff_prores_interlaced_scan[64]
Definition: proresdata.c:36
ScanTable scantable
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
main external API structure.
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:375
const uint8_t ff_prores_progressive_scan[64]
Definition: proresdata.c:25
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
#define OPEN_READER(name, gb)
Definition: get_bits.h:126
void * buf
Definition: avisynth_c.h:594
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
synthesis window for stochastic i
enum AVColorSpace colorspace
YUV colorspace type.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:379
#define GET_CACHE(name, gb)
Definition: get_bits.h:191
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
int pic_format
2 = 422, 3 = 444
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:280
static int flags
Definition: cpu.c:23
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
int prev_slice_sf
scalefactor of the previous decoded slice
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
common internal api header.
int qmat_changed
1 - global quantization matrices changed
#define PRORES_BITS_PER_SAMPLE
output precision of prores decoder
Definition: proresdsp.h:29
DSP utils.
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: dsputil.h:229
float re
Definition: fft-test.c:64
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:275
int len
void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: dsputil.c:110
#define av_log2
Definition: intmath.h:89
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:139
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
struct ProresContext ProresContext
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
uint8_t qmat_luma[64]
dequantization matrix for luma
Definition: proresdec.h:42
#define FRAME_ID
Definition: proresdata.h:28
int frame_type
0 = progressive, 1 = tff, 2 = bff
Definition: proresdec.h:41
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
This structure stores compressed data.