h264.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #define UNCHECKED_BITSTREAM_READER 1
29 
30 #include "libavutil/imgutils.h"
31 #include "libavutil/opt.h"
32 #include "internal.h"
33 #include "cabac.h"
34 #include "cabac_functions.h"
35 #include "dsputil.h"
36 #include "error_resilience.h"
37 #include "avcodec.h"
38 #include "mpegvideo.h"
39 #include "h264.h"
40 #include "h264data.h"
41 #include "h264chroma.h"
42 #include "h264_mvpred.h"
43 #include "golomb.h"
44 #include "mathops.h"
45 #include "rectangle.h"
46 #include "svq3.h"
47 #include "thread.h"
48 #include "vdpau_internal.h"
49 #include "libavutil/avassert.h"
50 
51 // #undef NDEBUG
52 #include <assert.h>
53 
54 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
55 
56 static const uint8_t rem6[QP_MAX_NUM + 1] = {
57  0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
58  3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
59  0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
60  3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
61  0, 1, 2, 3,
62 };
63 
64 static const uint8_t div6[QP_MAX_NUM + 1] = {
65  0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
66  3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
67  7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10,
68  10,10,10,11,11,11,11,11,11,12,12,12,12,12,12,13,13,13, 13, 13, 13,
69  14,14,14,14,
70 };
71 
73 #if CONFIG_H264_DXVA2_HWACCEL
75 #endif
76 #if CONFIG_H264_VAAPI_HWACCEL
78 #endif
79 #if CONFIG_H264_VDA_HWACCEL
81 #endif
82 #if CONFIG_H264_VDPAU_HWACCEL
84 #endif
87 };
88 
90 #if CONFIG_H264_DXVA2_HWACCEL
92 #endif
93 #if CONFIG_H264_VAAPI_HWACCEL
95 #endif
96 #if CONFIG_H264_VDA_HWACCEL
98 #endif
99 #if CONFIG_H264_VDPAU_HWACCEL
101 #endif
104 };
105 
107 {
108  H264Context *h = avctx->priv_data;
109  return h ? h->sps.num_reorder_frames : 0;
110 }
111 
112 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
113  int (*mv)[2][4][2],
114  int mb_x, int mb_y, int mb_intra, int mb_skipped)
115 {
116  H264Context *h = opaque;
117 
118  h->mb_x = mb_x;
119  h->mb_y = mb_y;
120  h->mb_xy = mb_x + mb_y * h->mb_stride;
121  memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
122  av_assert1(ref >= 0);
123  /* FIXME: It is possible albeit uncommon that slice references
124  * differ between slices. We take the easy approach and ignore
125  * it for now. If this turns out to have any relevance in
126  * practice then correct remapping should be added. */
127  if (ref >= h->ref_count[0])
128  ref = 0;
129  if (!h->ref_list[0][ref].f.data[0]) {
130  av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
131  ref = 0;
132  }
133  if ((h->ref_list[0][ref].reference&3) != 3) {
134  av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
135  return;
136  }
137  fill_rectangle(&h->cur_pic.ref_index[0][4 * h->mb_xy],
138  2, 2, 2, ref, 1);
139  fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
140  fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
141  pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
142  h->mb_mbaff =
143  h->mb_field_decoding_flag = 0;
145 }
146 
148 {
149  AVCodecContext *avctx = h->avctx;
150  Picture *cur = &h->cur_pic;
151  Picture *last = h->ref_list[0][0].f.data[0] ? &h->ref_list[0][0] : NULL;
152  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
153  int vshift = desc->log2_chroma_h;
154  const int field_pic = h->picture_structure != PICT_FRAME;
155  if (field_pic) {
156  height <<= 1;
157  y <<= 1;
158  }
159 
160  height = FFMIN(height, avctx->height - y);
161 
162  if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
163  return;
164 
165  if (avctx->draw_horiz_band) {
166  AVFrame *src;
168  int i;
169 
170  if (cur->f.pict_type == AV_PICTURE_TYPE_B || h->low_delay ||
172  src = &cur->f;
173  else if (last)
174  src = &last->f;
175  else
176  return;
177 
178  offset[0] = y * src->linesize[0];
179  offset[1] =
180  offset[2] = (y >> vshift) * src->linesize[1];
181  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
182  offset[i] = 0;
183 
184  emms_c();
185 
186  avctx->draw_horiz_band(avctx, src, offset,
187  y, h->picture_structure, height);
188  }
189 }
190 
191 static void unref_picture(H264Context *h, Picture *pic)
192 {
193  int off = offsetof(Picture, tf) + sizeof(pic->tf);
194  int i;
195 
196  if (!pic->f.data[0])
197  return;
198 
199  ff_thread_release_buffer(h->avctx, &pic->tf);
201 
204  for (i = 0; i < 2; i++) {
206  av_buffer_unref(&pic->ref_index_buf[i]);
207  }
208 
209  memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
210 }
211 
212 static void release_unused_pictures(H264Context *h, int remove_current)
213 {
214  int i;
215 
216  /* release non reference frames */
217  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
218  if (h->DPB[i].f.data[0] && !h->DPB[i].reference &&
219  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
220  unref_picture(h, &h->DPB[i]);
221  }
222  }
223 }
224 
226 {
227  int ret, i;
228 
229  av_assert0(!dst->f.buf[0]);
230  av_assert0(src->f.buf[0]);
231 
232  src->tf.f = &src->f;
233  dst->tf.f = &dst->f;
234  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
235  if (ret < 0)
236  goto fail;
237 
238 
241  if (!dst->qscale_table_buf || !dst->mb_type_buf)
242  goto fail;
243  dst->qscale_table = src->qscale_table;
244  dst->mb_type = src->mb_type;
245 
246  for (i = 0; i < 2; i ++) {
247  dst->motion_val_buf[i] = av_buffer_ref(src->motion_val_buf[i]);
248  dst->ref_index_buf[i] = av_buffer_ref(src->ref_index_buf[i]);
249  if (!dst->motion_val_buf[i] || !dst->ref_index_buf[i])
250  goto fail;
251  dst->motion_val[i] = src->motion_val[i];
252  dst->ref_index[i] = src->ref_index[i];
253  }
254 
255  if (src->hwaccel_picture_private) {
257  if (!dst->hwaccel_priv_buf)
258  goto fail;
260  }
261 
262  for (i = 0; i < 2; i++)
263  dst->field_poc[i] = src->field_poc[i];
264 
265  memcpy(dst->ref_poc, src->ref_poc, sizeof(src->ref_poc));
266  memcpy(dst->ref_count, src->ref_count, sizeof(src->ref_count));
267 
268  dst->poc = src->poc;
269  dst->frame_num = src->frame_num;
270  dst->mmco_reset = src->mmco_reset;
271  dst->pic_id = src->pic_id;
272  dst->long_ref = src->long_ref;
273  dst->mbaff = src->mbaff;
274  dst->field_picture = src->field_picture;
275  dst->needs_realloc = src->needs_realloc;
276  dst->reference = src->reference;
277  dst->sync = src->sync;
278 
279  return 0;
280 fail:
281  unref_picture(h, dst);
282  return ret;
283 }
284 
285 
286 static int alloc_scratch_buffers(H264Context *h, int linesize)
287 {
288  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
289 
290  if (h->bipred_scratchpad)
291  return 0;
292 
293  h->bipred_scratchpad = av_malloc(16 * 6 * alloc_size);
294  // edge emu needs blocksize + filter length - 1
295  // (= 21x21 for h264)
296  h->edge_emu_buffer = av_mallocz(alloc_size * 2 * 21);
297  h->me.scratchpad = av_mallocz(alloc_size * 2 * 16 * 2);
298 
299  if (!h->bipred_scratchpad || !h->edge_emu_buffer || !h->me.scratchpad) {
302  av_freep(&h->me.scratchpad);
303  return AVERROR(ENOMEM);
304  }
305 
306  h->me.temp = h->me.scratchpad;
307 
308  return 0;
309 }
310 
312 {
313  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
314  const int mb_array_size = h->mb_stride * h->mb_height;
315  const int b4_stride = h->mb_width * 4 + 1;
316  const int b4_array_size = b4_stride * h->mb_height * 4;
317 
318  h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
320  h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
321  sizeof(uint32_t), av_buffer_allocz);
322  h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
323  sizeof(int16_t), av_buffer_allocz);
324  h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
325 
326  if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
327  !h->ref_index_pool) {
332  return AVERROR(ENOMEM);
333  }
334 
335  return 0;
336 }
337 
338 static int alloc_picture(H264Context *h, Picture *pic)
339 {
340  int i, ret = 0;
341 
342  av_assert0(!pic->f.data[0]);
343 
344  pic->tf.f = &pic->f;
345  ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
347  if (ret < 0)
348  goto fail;
349 
350  h->linesize = pic->f.linesize[0];
351  h->uvlinesize = pic->f.linesize[1];
352 
353  if (h->avctx->hwaccel) {
354  const AVHWAccel *hwaccel = h->avctx->hwaccel;
356  if (hwaccel->priv_data_size) {
358  if (!pic->hwaccel_priv_buf)
359  return AVERROR(ENOMEM);
361  }
362  }
363 
364  if (!h->qscale_table_pool) {
365  ret = init_table_pools(h);
366  if (ret < 0)
367  goto fail;
368  }
369 
372  if (!pic->qscale_table_buf || !pic->mb_type_buf)
373  goto fail;
374 
375  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
376  pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
377 
378  for (i = 0; i < 2; i++) {
381  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
382  goto fail;
383 
384  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
385  pic->ref_index[i] = pic->ref_index_buf[i]->data;
386  }
387 
388  return 0;
389 fail:
390  unref_picture(h, pic);
391  return (ret < 0) ? ret : AVERROR(ENOMEM);
392 }
393 
394 static inline int pic_is_unused(H264Context *h, Picture *pic)
395 {
396  if (pic->f.data[0] == NULL)
397  return 1;
398  if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
399  return 1;
400  return 0;
401 }
402 
404 {
405  int i;
406 
407  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
408  if (pic_is_unused(h, &h->DPB[i]))
409  break;
410  }
411  if (i == MAX_PICTURE_COUNT)
412  return AVERROR_INVALIDDATA;
413 
414  if (h->DPB[i].needs_realloc) {
415  h->DPB[i].needs_realloc = 0;
416  unref_picture(h, &h->DPB[i]);
417  }
418 
419  return i;
420 }
421 
422 /**
423  * Check if the top & left blocks are available if needed and
424  * change the dc mode so it only uses the available blocks.
425  */
427 {
428  static const int8_t top[12] = {
429  -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
430  };
431  static const int8_t left[12] = {
432  0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
433  };
434  int i;
435 
436  if (!(h->top_samples_available & 0x8000)) {
437  for (i = 0; i < 4; i++) {
438  int status = top[h->intra4x4_pred_mode_cache[scan8[0] + i]];
439  if (status < 0) {
441  "top block unavailable for requested intra4x4 mode %d at %d %d\n",
442  status, h->mb_x, h->mb_y);
443  return -1;
444  } else if (status) {
445  h->intra4x4_pred_mode_cache[scan8[0] + i] = status;
446  }
447  }
448  }
449 
450  if ((h->left_samples_available & 0x8888) != 0x8888) {
451  static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
452  for (i = 0; i < 4; i++)
453  if (!(h->left_samples_available & mask[i])) {
454  int status = left[h->intra4x4_pred_mode_cache[scan8[0] + 8 * i]];
455  if (status < 0) {
457  "left block unavailable for requested intra4x4 mode %d at %d %d\n",
458  status, h->mb_x, h->mb_y);
459  return -1;
460  } else if (status) {
461  h->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status;
462  }
463  }
464  }
465 
466  return 0;
467 } // FIXME cleanup like ff_h264_check_intra_pred_mode
468 
469 /**
470  * Check if the top & left blocks are available if needed and
471  * change the dc mode so it only uses the available blocks.
472  */
473 int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
474 {
475  static const int8_t top[7] = { LEFT_DC_PRED8x8, 1, -1, -1 };
476  static const int8_t left[7] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
477 
478  if (mode > 6U) {
480  "out of range intra chroma pred mode at %d %d\n",
481  h->mb_x, h->mb_y);
482  return -1;
483  }
484 
485  if (!(h->top_samples_available & 0x8000)) {
486  mode = top[mode];
487  if (mode < 0) {
489  "top block unavailable for requested intra mode at %d %d\n",
490  h->mb_x, h->mb_y);
491  return -1;
492  }
493  }
494 
495  if ((h->left_samples_available & 0x8080) != 0x8080) {
496  mode = left[mode];
497  if (is_chroma && (h->left_samples_available & 0x8080)) {
498  // mad cow disease mode, aka MBAFF + constrained_intra_pred
499  mode = ALZHEIMER_DC_L0T_PRED8x8 +
500  (!(h->left_samples_available & 0x8000)) +
501  2 * (mode == DC_128_PRED8x8);
502  }
503  if (mode < 0) {
505  "left block unavailable for requested intra mode at %d %d\n",
506  h->mb_x, h->mb_y);
507  return -1;
508  }
509  }
510 
511  return mode;
512 }
513 
515  int *dst_length, int *consumed, int length)
516 {
517  int i, si, di;
518  uint8_t *dst;
519  int bufidx;
520 
521  // src[0]&0x80; // forbidden bit
522  h->nal_ref_idc = src[0] >> 5;
523  h->nal_unit_type = src[0] & 0x1F;
524 
525  src++;
526  length--;
527 
528 #define STARTCODE_TEST \
529  if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
530  if (src[i + 2] != 3) { \
531  /* startcode, so we must be past the end */ \
532  length = i; \
533  } \
534  break; \
535  }
536 #if HAVE_FAST_UNALIGNED
537 #define FIND_FIRST_ZERO \
538  if (i > 0 && !src[i]) \
539  i--; \
540  while (src[i]) \
541  i++
542 #if HAVE_FAST_64BIT
543  for (i = 0; i + 1 < length; i += 9) {
544  if (!((~AV_RN64A(src + i) &
545  (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
546  0x8000800080008080ULL))
547  continue;
548  FIND_FIRST_ZERO;
550  i -= 7;
551  }
552 #else
553  for (i = 0; i + 1 < length; i += 5) {
554  if (!((~AV_RN32A(src + i) &
555  (AV_RN32A(src + i) - 0x01000101U)) &
556  0x80008080U))
557  continue;
558  FIND_FIRST_ZERO;
560  i -= 3;
561  }
562 #endif
563 #else
564  for (i = 0; i + 1 < length; i += 2) {
565  if (src[i])
566  continue;
567  if (i > 0 && src[i - 1] == 0)
568  i--;
570  }
571 #endif
572 
573  // use second escape buffer for inter data
574  bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0;
575 
576  si = h->rbsp_buffer_size[bufidx];
577  av_fast_padded_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+MAX_MBPAIR_SIZE);
578  dst = h->rbsp_buffer[bufidx];
579 
580  if (dst == NULL)
581  return NULL;
582 
583  if(i>=length-1){ //no escaped 0
584  *dst_length= length;
585  *consumed= length+1; //+1 for the header
586  if(h->avctx->flags2 & CODEC_FLAG2_FAST){
587  return src;
588  }else{
589  memcpy(dst, src, length);
590  return dst;
591  }
592  }
593 
594  memcpy(dst, src, i);
595  si = di = i;
596  while (si + 2 < length) {
597  // remove escapes (very rare 1:2^22)
598  if (src[si + 2] > 3) {
599  dst[di++] = src[si++];
600  dst[di++] = src[si++];
601  } else if (src[si] == 0 && src[si + 1] == 0) {
602  if (src[si + 2] == 3) { // escape
603  dst[di++] = 0;
604  dst[di++] = 0;
605  si += 3;
606  continue;
607  } else // next start code
608  goto nsc;
609  }
610 
611  dst[di++] = src[si++];
612  }
613  while (si < length)
614  dst[di++] = src[si++];
615 nsc:
616 
617  memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE);
618 
619  *dst_length = di;
620  *consumed = si + 1; // +1 for the header
621  /* FIXME store exact number of bits in the getbitcontext
622  * (it is needed for decoding) */
623  return dst;
624 }
625 
626 /**
627  * Identify the exact end of the bitstream
628  * @return the length of the trailing, or 0 if damaged
629  */
631 {
632  int v = *src;
633  int r;
634 
635  tprintf(h->avctx, "rbsp trailing %X\n", v);
636 
637  for (r = 1; r < 9; r++) {
638  if (v & 1)
639  return r;
640  v >>= 1;
641  }
642  return 0;
643 }
644 
645 static inline int get_lowest_part_list_y(H264Context *h, Picture *pic, int n,
646  int height, int y_offset, int list)
647 {
648  int raw_my = h->mv_cache[list][scan8[n]][1];
649  int filter_height_down = (raw_my & 3) ? 3 : 0;
650  int full_my = (raw_my >> 2) + y_offset;
651  int bottom = full_my + filter_height_down + height;
652 
653  av_assert2(height >= 0);
654 
655  return FFMAX(0, bottom);
656 }
657 
658 static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
659  int height, int y_offset, int list0,
660  int list1, int *nrefs)
661 {
662  int my;
663 
664  y_offset += 16 * (h->mb_y >> MB_FIELD(h));
665 
666  if (list0) {
667  int ref_n = h->ref_cache[0][scan8[n]];
668  Picture *ref = &h->ref_list[0][ref_n];
669 
670  // Error resilience puts the current picture in the ref list.
671  // Don't try to wait on these as it will cause a deadlock.
672  // Fields can wait on each other, though.
673  if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
674  (ref->reference & 3) != h->picture_structure) {
675  my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
676  if (refs[0][ref_n] < 0)
677  nrefs[0] += 1;
678  refs[0][ref_n] = FFMAX(refs[0][ref_n], my);
679  }
680  }
681 
682  if (list1) {
683  int ref_n = h->ref_cache[1][scan8[n]];
684  Picture *ref = &h->ref_list[1][ref_n];
685 
686  if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
687  (ref->reference & 3) != h->picture_structure) {
688  my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
689  if (refs[1][ref_n] < 0)
690  nrefs[1] += 1;
691  refs[1][ref_n] = FFMAX(refs[1][ref_n], my);
692  }
693  }
694 }
695 
696 /**
697  * Wait until all reference frames are available for MC operations.
698  *
699  * @param h the H264 context
700  */
702 {
703  const int mb_xy = h->mb_xy;
704  const int mb_type = h->cur_pic.mb_type[mb_xy];
705  int refs[2][48];
706  int nrefs[2] = { 0 };
707  int ref, list;
708 
709  memset(refs, -1, sizeof(refs));
710 
711  if (IS_16X16(mb_type)) {
712  get_lowest_part_y(h, refs, 0, 16, 0,
713  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
714  } else if (IS_16X8(mb_type)) {
715  get_lowest_part_y(h, refs, 0, 8, 0,
716  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
717  get_lowest_part_y(h, refs, 8, 8, 8,
718  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
719  } else if (IS_8X16(mb_type)) {
720  get_lowest_part_y(h, refs, 0, 16, 0,
721  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
722  get_lowest_part_y(h, refs, 4, 16, 0,
723  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
724  } else {
725  int i;
726 
727  av_assert2(IS_8X8(mb_type));
728 
729  for (i = 0; i < 4; i++) {
730  const int sub_mb_type = h->sub_mb_type[i];
731  const int n = 4 * i;
732  int y_offset = (i & 2) << 2;
733 
734  if (IS_SUB_8X8(sub_mb_type)) {
735  get_lowest_part_y(h, refs, n, 8, y_offset,
736  IS_DIR(sub_mb_type, 0, 0),
737  IS_DIR(sub_mb_type, 0, 1),
738  nrefs);
739  } else if (IS_SUB_8X4(sub_mb_type)) {
740  get_lowest_part_y(h, refs, n, 4, y_offset,
741  IS_DIR(sub_mb_type, 0, 0),
742  IS_DIR(sub_mb_type, 0, 1),
743  nrefs);
744  get_lowest_part_y(h, refs, n + 2, 4, y_offset + 4,
745  IS_DIR(sub_mb_type, 0, 0),
746  IS_DIR(sub_mb_type, 0, 1),
747  nrefs);
748  } else if (IS_SUB_4X8(sub_mb_type)) {
749  get_lowest_part_y(h, refs, n, 8, y_offset,
750  IS_DIR(sub_mb_type, 0, 0),
751  IS_DIR(sub_mb_type, 0, 1),
752  nrefs);
753  get_lowest_part_y(h, refs, n + 1, 8, y_offset,
754  IS_DIR(sub_mb_type, 0, 0),
755  IS_DIR(sub_mb_type, 0, 1),
756  nrefs);
757  } else {
758  int j;
759  av_assert2(IS_SUB_4X4(sub_mb_type));
760  for (j = 0; j < 4; j++) {
761  int sub_y_offset = y_offset + 2 * (j & 2);
762  get_lowest_part_y(h, refs, n + j, 4, sub_y_offset,
763  IS_DIR(sub_mb_type, 0, 0),
764  IS_DIR(sub_mb_type, 0, 1),
765  nrefs);
766  }
767  }
768  }
769  }
770 
771  for (list = h->list_count - 1; list >= 0; list--)
772  for (ref = 0; ref < 48 && nrefs[list]; ref++) {
773  int row = refs[list][ref];
774  if (row >= 0) {
775  Picture *ref_pic = &h->ref_list[list][ref];
776  int ref_field = ref_pic->reference - 1;
777  int ref_field_picture = ref_pic->field_picture;
778  int pic_height = 16 * h->mb_height >> ref_field_picture;
779 
780  row <<= MB_MBAFF(h);
781  nrefs[list]--;
782 
783  if (!FIELD_PICTURE(h) && ref_field_picture) { // frame referencing two fields
784  ff_thread_await_progress(&ref_pic->tf,
785  FFMIN((row >> 1) - !(row & 1),
786  pic_height - 1),
787  1);
788  ff_thread_await_progress(&ref_pic->tf,
789  FFMIN((row >> 1), pic_height - 1),
790  0);
791  } else if (FIELD_PICTURE(h) && !ref_field_picture) { // field referencing one field of a frame
792  ff_thread_await_progress(&ref_pic->tf,
793  FFMIN(row * 2 + ref_field,
794  pic_height - 1),
795  0);
796  } else if (FIELD_PICTURE(h)) {
797  ff_thread_await_progress(&ref_pic->tf,
798  FFMIN(row, pic_height - 1),
799  ref_field);
800  } else {
801  ff_thread_await_progress(&ref_pic->tf,
802  FFMIN(row, pic_height - 1),
803  0);
804  }
805  }
806  }
807 }
808 
810  int n, int square, int height,
811  int delta, int list,
812  uint8_t *dest_y, uint8_t *dest_cb,
813  uint8_t *dest_cr,
814  int src_x_offset, int src_y_offset,
815  qpel_mc_func *qpix_op,
816  h264_chroma_mc_func chroma_op,
817  int pixel_shift, int chroma_idc)
818 {
819  const int mx = h->mv_cache[list][scan8[n]][0] + src_x_offset * 8;
820  int my = h->mv_cache[list][scan8[n]][1] + src_y_offset * 8;
821  const int luma_xy = (mx & 3) + ((my & 3) << 2);
822  int offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize;
823  uint8_t *src_y = pic->f.data[0] + offset;
824  uint8_t *src_cb, *src_cr;
825  int extra_width = 0;
826  int extra_height = 0;
827  int emu = 0;
828  const int full_mx = mx >> 2;
829  const int full_my = my >> 2;
830  const int pic_width = 16 * h->mb_width;
831  const int pic_height = 16 * h->mb_height >> MB_FIELD(h);
832  int ysh;
833 
834  if (mx & 7)
835  extra_width -= 3;
836  if (my & 7)
837  extra_height -= 3;
838 
839  if (full_mx < 0 - extra_width ||
840  full_my < 0 - extra_height ||
841  full_mx + 16 /*FIXME*/ > pic_width + extra_width ||
842  full_my + 16 /*FIXME*/ > pic_height + extra_height) {
844  src_y - (2 << pixel_shift) - 2 * h->mb_linesize,
845  h->mb_linesize,
846  16 + 5, 16 + 5 /*FIXME*/, full_mx - 2,
847  full_my - 2, pic_width, pic_height);
848  src_y = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
849  emu = 1;
850  }
851 
852  qpix_op[luma_xy](dest_y, src_y, h->mb_linesize); // FIXME try variable height perhaps?
853  if (!square)
854  qpix_op[luma_xy](dest_y + delta, src_y + delta, h->mb_linesize);
855 
856  if (CONFIG_GRAY && h->flags & CODEC_FLAG_GRAY)
857  return;
858 
859  if (chroma_idc == 3 /* yuv444 */) {
860  src_cb = pic->f.data[1] + offset;
861  if (emu) {
863  src_cb - (2 << pixel_shift) - 2 * h->mb_linesize,
864  h->mb_linesize,
865  16 + 5, 16 + 5 /*FIXME*/,
866  full_mx - 2, full_my - 2,
867  pic_width, pic_height);
868  src_cb = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
869  }
870  qpix_op[luma_xy](dest_cb, src_cb, h->mb_linesize); // FIXME try variable height perhaps?
871  if (!square)
872  qpix_op[luma_xy](dest_cb + delta, src_cb + delta, h->mb_linesize);
873 
874  src_cr = pic->f.data[2] + offset;
875  if (emu) {
877  src_cr - (2 << pixel_shift) - 2 * h->mb_linesize,
878  h->mb_linesize,
879  16 + 5, 16 + 5 /*FIXME*/,
880  full_mx - 2, full_my - 2,
881  pic_width, pic_height);
882  src_cr = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
883  }
884  qpix_op[luma_xy](dest_cr, src_cr, h->mb_linesize); // FIXME try variable height perhaps?
885  if (!square)
886  qpix_op[luma_xy](dest_cr + delta, src_cr + delta, h->mb_linesize);
887  return;
888  }
889 
890  ysh = 3 - (chroma_idc == 2 /* yuv422 */);
891  if (chroma_idc == 1 /* yuv420 */ && MB_FIELD(h)) {
892  // chroma offset when predicting from a field of opposite parity
893  my += 2 * ((h->mb_y & 1) - (pic->reference - 1));
894  emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
895  }
896 
897  src_cb = pic->f.data[1] + ((mx >> 3) << pixel_shift) +
898  (my >> ysh) * h->mb_uvlinesize;
899  src_cr = pic->f.data[2] + ((mx >> 3) << pixel_shift) +
900  (my >> ysh) * h->mb_uvlinesize;
901 
902  if (emu) {
904  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
905  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
906  src_cb = h->edge_emu_buffer;
907  }
908  chroma_op(dest_cb, src_cb, h->mb_uvlinesize,
909  height >> (chroma_idc == 1 /* yuv420 */),
910  mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
911 
912  if (emu) {
914  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
915  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
916  src_cr = h->edge_emu_buffer;
917  }
918  chroma_op(dest_cr, src_cr, h->mb_uvlinesize, height >> (chroma_idc == 1 /* yuv420 */),
919  mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
920 }
921 
923  int height, int delta,
924  uint8_t *dest_y, uint8_t *dest_cb,
925  uint8_t *dest_cr,
926  int x_offset, int y_offset,
927  qpel_mc_func *qpix_put,
928  h264_chroma_mc_func chroma_put,
929  qpel_mc_func *qpix_avg,
930  h264_chroma_mc_func chroma_avg,
931  int list0, int list1,
932  int pixel_shift, int chroma_idc)
933 {
934  qpel_mc_func *qpix_op = qpix_put;
935  h264_chroma_mc_func chroma_op = chroma_put;
936 
937  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
938  if (chroma_idc == 3 /* yuv444 */) {
939  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
940  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
941  } else if (chroma_idc == 2 /* yuv422 */) {
942  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
943  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
944  } else { /* yuv420 */
945  dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
946  dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
947  }
948  x_offset += 8 * h->mb_x;
949  y_offset += 8 * (h->mb_y >> MB_FIELD(h));
950 
951  if (list0) {
952  Picture *ref = &h->ref_list[0][h->ref_cache[0][scan8[n]]];
953  mc_dir_part(h, ref, n, square, height, delta, 0,
954  dest_y, dest_cb, dest_cr, x_offset, y_offset,
955  qpix_op, chroma_op, pixel_shift, chroma_idc);
956 
957  qpix_op = qpix_avg;
958  chroma_op = chroma_avg;
959  }
960 
961  if (list1) {
962  Picture *ref = &h->ref_list[1][h->ref_cache[1][scan8[n]]];
963  mc_dir_part(h, ref, n, square, height, delta, 1,
964  dest_y, dest_cb, dest_cr, x_offset, y_offset,
965  qpix_op, chroma_op, pixel_shift, chroma_idc);
966  }
967 }
968 
970  int height, int delta,
971  uint8_t *dest_y, uint8_t *dest_cb,
972  uint8_t *dest_cr,
973  int x_offset, int y_offset,
974  qpel_mc_func *qpix_put,
975  h264_chroma_mc_func chroma_put,
976  h264_weight_func luma_weight_op,
977  h264_weight_func chroma_weight_op,
978  h264_biweight_func luma_weight_avg,
979  h264_biweight_func chroma_weight_avg,
980  int list0, int list1,
981  int pixel_shift, int chroma_idc)
982 {
983  int chroma_height;
984 
985  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
986  if (chroma_idc == 3 /* yuv444 */) {
987  chroma_height = height;
988  chroma_weight_avg = luma_weight_avg;
989  chroma_weight_op = luma_weight_op;
990  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
991  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
992  } else if (chroma_idc == 2 /* yuv422 */) {
993  chroma_height = height;
994  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
995  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
996  } else { /* yuv420 */
997  chroma_height = height >> 1;
998  dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
999  dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
1000  }
1001  x_offset += 8 * h->mb_x;
1002  y_offset += 8 * (h->mb_y >> MB_FIELD(h));
1003 
1004  if (list0 && list1) {
1005  /* don't optimize for luma-only case, since B-frames usually
1006  * use implicit weights => chroma too. */
1007  uint8_t *tmp_cb = h->bipred_scratchpad;
1008  uint8_t *tmp_cr = h->bipred_scratchpad + (16 << pixel_shift);
1009  uint8_t *tmp_y = h->bipred_scratchpad + 16 * h->mb_uvlinesize;
1010  int refn0 = h->ref_cache[0][scan8[n]];
1011  int refn1 = h->ref_cache[1][scan8[n]];
1012 
1013  mc_dir_part(h, &h->ref_list[0][refn0], n, square, height, delta, 0,
1014  dest_y, dest_cb, dest_cr,
1015  x_offset, y_offset, qpix_put, chroma_put,
1016  pixel_shift, chroma_idc);
1017  mc_dir_part(h, &h->ref_list[1][refn1], n, square, height, delta, 1,
1018  tmp_y, tmp_cb, tmp_cr,
1019  x_offset, y_offset, qpix_put, chroma_put,
1020  pixel_shift, chroma_idc);
1021 
1022  if (h->use_weight == 2) {
1023  int weight0 = h->implicit_weight[refn0][refn1][h->mb_y & 1];
1024  int weight1 = 64 - weight0;
1025  luma_weight_avg(dest_y, tmp_y, h->mb_linesize,
1026  height, 5, weight0, weight1, 0);
1027  chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize,
1028  chroma_height, 5, weight0, weight1, 0);
1029  chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize,
1030  chroma_height, 5, weight0, weight1, 0);
1031  } else {
1032  luma_weight_avg(dest_y, tmp_y, h->mb_linesize, height,
1034  h->luma_weight[refn0][0][0],
1035  h->luma_weight[refn1][1][0],
1036  h->luma_weight[refn0][0][1] +
1037  h->luma_weight[refn1][1][1]);
1038  chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, chroma_height,
1040  h->chroma_weight[refn0][0][0][0],
1041  h->chroma_weight[refn1][1][0][0],
1042  h->chroma_weight[refn0][0][0][1] +
1043  h->chroma_weight[refn1][1][0][1]);
1044  chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, chroma_height,
1046  h->chroma_weight[refn0][0][1][0],
1047  h->chroma_weight[refn1][1][1][0],
1048  h->chroma_weight[refn0][0][1][1] +
1049  h->chroma_weight[refn1][1][1][1]);
1050  }
1051  } else {
1052  int list = list1 ? 1 : 0;
1053  int refn = h->ref_cache[list][scan8[n]];
1054  Picture *ref = &h->ref_list[list][refn];
1055  mc_dir_part(h, ref, n, square, height, delta, list,
1056  dest_y, dest_cb, dest_cr, x_offset, y_offset,
1057  qpix_put, chroma_put, pixel_shift, chroma_idc);
1058 
1059  luma_weight_op(dest_y, h->mb_linesize, height,
1061  h->luma_weight[refn][list][0],
1062  h->luma_weight[refn][list][1]);
1063  if (h->use_weight_chroma) {
1064  chroma_weight_op(dest_cb, h->mb_uvlinesize, chroma_height,
1066  h->chroma_weight[refn][list][0][0],
1067  h->chroma_weight[refn][list][0][1]);
1068  chroma_weight_op(dest_cr, h->mb_uvlinesize, chroma_height,
1070  h->chroma_weight[refn][list][1][0],
1071  h->chroma_weight[refn][list][1][1]);
1072  }
1073  }
1074 }
1075 
1077  int pixel_shift, int chroma_idc)
1078 {
1079  /* fetch pixels for estimated mv 4 macroblocks ahead
1080  * optimized for 64byte cache lines */
1081  const int refn = h->ref_cache[list][scan8[0]];
1082  if (refn >= 0) {
1083  const int mx = (h->mv_cache[list][scan8[0]][0] >> 2) + 16 * h->mb_x + 8;
1084  const int my = (h->mv_cache[list][scan8[0]][1] >> 2) + 16 * h->mb_y;
1085  uint8_t **src = h->ref_list[list][refn].f.data;
1086  int off = (mx << pixel_shift) +
1087  (my + (h->mb_x & 3) * 4) * h->mb_linesize +
1088  (64 << pixel_shift);
1089  h->vdsp.prefetch(src[0] + off, h->linesize, 4);
1090  if (chroma_idc == 3 /* yuv444 */) {
1091  h->vdsp.prefetch(src[1] + off, h->linesize, 4);
1092  h->vdsp.prefetch(src[2] + off, h->linesize, 4);
1093  } else {
1094  off= (((mx>>1)+64)<<pixel_shift) + ((my>>1) + (h->mb_x&7))*h->uvlinesize;
1095  h->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
1096  }
1097  }
1098 }
1099 
1100 static void free_tables(H264Context *h, int free_rbsp)
1101 {
1102  int i;
1103  H264Context *hx;
1104 
1107  av_freep(&h->cbp_table);
1108  av_freep(&h->mvd_table[0]);
1109  av_freep(&h->mvd_table[1]);
1110  av_freep(&h->direct_table);
1111  av_freep(&h->non_zero_count);
1113  h->slice_table = NULL;
1114  av_freep(&h->list_counts);
1115 
1116  av_freep(&h->mb2b_xy);
1117  av_freep(&h->mb2br_xy);
1118 
1119  for (i = 0; i < 3; i++)
1121 
1126 
1127  if (free_rbsp && h->DPB) {
1128  for (i = 0; i < MAX_PICTURE_COUNT; i++)
1129  unref_picture(h, &h->DPB[i]);
1130  av_freep(&h->DPB);
1131  } else if (h->DPB) {
1132  for (i = 0; i < MAX_PICTURE_COUNT; i++)
1133  h->DPB[i].needs_realloc = 1;
1134  }
1135 
1136  h->cur_pic_ptr = NULL;
1137 
1138  for (i = 0; i < MAX_THREADS; i++) {
1139  hx = h->thread_context[i];
1140  if (!hx)
1141  continue;
1142  av_freep(&hx->top_borders[1]);
1143  av_freep(&hx->top_borders[0]);
1145  av_freep(&hx->edge_emu_buffer);
1146  av_freep(&hx->dc_val_base);
1147  av_freep(&hx->me.scratchpad);
1148  av_freep(&hx->er.mb_index2xy);
1150  av_freep(&hx->er.er_temp_buffer);
1151  av_freep(&hx->er.mbintra_table);
1152  av_freep(&hx->er.mbskip_table);
1153 
1154  if (free_rbsp) {
1155  av_freep(&hx->rbsp_buffer[1]);
1156  av_freep(&hx->rbsp_buffer[0]);
1157  hx->rbsp_buffer_size[0] = 0;
1158  hx->rbsp_buffer_size[1] = 0;
1159  }
1160  if (i)
1161  av_freep(&h->thread_context[i]);
1162  }
1163 }
1164 
1166 {
1167  int i, j, q, x;
1168  const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
1169 
1170  for (i = 0; i < 6; i++) {
1171  h->dequant8_coeff[i] = h->dequant8_buffer[i];
1172  for (j = 0; j < i; j++)
1173  if (!memcmp(h->pps.scaling_matrix8[j], h->pps.scaling_matrix8[i],
1174  64 * sizeof(uint8_t))) {
1175  h->dequant8_coeff[i] = h->dequant8_buffer[j];
1176  break;
1177  }
1178  if (j < i)
1179  continue;
1180 
1181  for (q = 0; q < max_qp + 1; q++) {
1182  int shift = div6[q];
1183  int idx = rem6[q];
1184  for (x = 0; x < 64; x++)
1185  h->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] =
1186  ((uint32_t)dequant8_coeff_init[idx][dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] *
1187  h->pps.scaling_matrix8[i][x]) << shift;
1188  }
1189  }
1190 }
1191 
1193 {
1194  int i, j, q, x;
1195  const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
1196  for (i = 0; i < 6; i++) {
1197  h->dequant4_coeff[i] = h->dequant4_buffer[i];
1198  for (j = 0; j < i; j++)
1199  if (!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i],
1200  16 * sizeof(uint8_t))) {
1201  h->dequant4_coeff[i] = h->dequant4_buffer[j];
1202  break;
1203  }
1204  if (j < i)
1205  continue;
1206 
1207  for (q = 0; q < max_qp + 1; q++) {
1208  int shift = div6[q] + 2;
1209  int idx = rem6[q];
1210  for (x = 0; x < 16; x++)
1211  h->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] =
1212  ((uint32_t)dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] *
1213  h->pps.scaling_matrix4[i][x]) << shift;
1214  }
1215  }
1216 }
1217 
1219 {
1220  int i, x;
1222  if (h->pps.transform_8x8_mode)
1224  if (h->sps.transform_bypass) {
1225  for (i = 0; i < 6; i++)
1226  for (x = 0; x < 16; x++)
1227  h->dequant4_coeff[i][0][x] = 1 << 6;
1229  for (i = 0; i < 6; i++)
1230  for (x = 0; x < 64; x++)
1231  h->dequant8_coeff[i][0][x] = 1 << 6;
1232  }
1233 }
1234 
1236 {
1237  const int big_mb_num = h->mb_stride * (h->mb_height + 1);
1238  const int row_mb_num = 2*h->mb_stride*FFMAX(h->avctx->thread_count, 1);
1239  int x, y, i;
1240 
1242  row_mb_num * 8 * sizeof(uint8_t), fail)
1244  big_mb_num * 48 * sizeof(uint8_t), fail)
1246  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
1248  big_mb_num * sizeof(uint16_t), fail)
1250  big_mb_num * sizeof(uint8_t), fail)
1251  FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[0],
1252  16 * row_mb_num * sizeof(uint8_t), fail);
1253  FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[1],
1254  16 * row_mb_num * sizeof(uint8_t), fail);
1256  4 * big_mb_num * sizeof(uint8_t), fail);
1258  big_mb_num * sizeof(uint8_t), fail)
1259 
1260  memset(h->slice_table_base, -1,
1261  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
1262  h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
1263 
1265  big_mb_num * sizeof(uint32_t), fail);
1267  big_mb_num * sizeof(uint32_t), fail);
1268  for (y = 0; y < h->mb_height; y++)
1269  for (x = 0; x < h->mb_width; x++) {
1270  const int mb_xy = x + y * h->mb_stride;
1271  const int b_xy = 4 * x + 4 * y * h->b_stride;
1272 
1273  h->mb2b_xy[mb_xy] = b_xy;
1274  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
1275  }
1276 
1277  if (!h->dequant4_coeff[0])
1279 
1280  if (!h->DPB) {
1281  h->DPB = av_mallocz_array(MAX_PICTURE_COUNT, sizeof(*h->DPB));
1282  if (!h->DPB)
1283  return AVERROR(ENOMEM);
1284  for (i = 0; i < MAX_PICTURE_COUNT; i++)
1287  }
1288 
1289  return 0;
1290 
1291 fail:
1292  free_tables(h, 1);
1293  return -1;
1294 }
1295 
1296 /**
1297  * Mimic alloc_tables(), but for every context thread.
1298  */
1300 {
1301  dst->intra4x4_pred_mode = src->intra4x4_pred_mode + i * 8 * 2 * src->mb_stride;
1302  dst->non_zero_count = src->non_zero_count;
1303  dst->slice_table = src->slice_table;
1304  dst->cbp_table = src->cbp_table;
1305  dst->mb2b_xy = src->mb2b_xy;
1306  dst->mb2br_xy = src->mb2br_xy;
1308  dst->mvd_table[0] = src->mvd_table[0] + i * 8 * 2 * src->mb_stride;
1309  dst->mvd_table[1] = src->mvd_table[1] + i * 8 * 2 * src->mb_stride;
1310  dst->direct_table = src->direct_table;
1311  dst->list_counts = src->list_counts;
1312  dst->DPB = src->DPB;
1313  dst->cur_pic_ptr = src->cur_pic_ptr;
1314  dst->cur_pic = src->cur_pic;
1315  dst->bipred_scratchpad = NULL;
1316  dst->edge_emu_buffer = NULL;
1317  dst->me.scratchpad = NULL;
1319  src->sps.chroma_format_idc);
1320 }
1321 
1322 /**
1323  * Init context
1324  * Allocate buffers which are not shared amongst multiple threads.
1325  */
1327 {
1328  ERContext *er = &h->er;
1329  int mb_array_size = h->mb_height * h->mb_stride;
1330  int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
1331  int c_size = h->mb_stride * (h->mb_height + 1);
1332  int yc_size = y_size + 2 * c_size;
1333  int x, y, i;
1334 
1336  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
1338  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
1339 
1340  h->ref_cache[0][scan8[5] + 1] =
1341  h->ref_cache[0][scan8[7] + 1] =
1342  h->ref_cache[0][scan8[13] + 1] =
1343  h->ref_cache[1][scan8[5] + 1] =
1344  h->ref_cache[1][scan8[7] + 1] =
1345  h->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
1346 
1348  /* init ER */
1349  er->avctx = h->avctx;
1350  er->dsp = &h->dsp;
1352  er->opaque = h;
1353  er->quarter_sample = 1;
1354 
1355  er->mb_num = h->mb_num;
1356  er->mb_width = h->mb_width;
1357  er->mb_height = h->mb_height;
1358  er->mb_stride = h->mb_stride;
1359  er->b8_stride = h->mb_width * 2 + 1;
1360 
1361  FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy, (h->mb_num + 1) * sizeof(int),
1362  fail); // error ressilience code looks cleaner with this
1363  for (y = 0; y < h->mb_height; y++)
1364  for (x = 0; x < h->mb_width; x++)
1365  er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
1366 
1367  er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
1368  h->mb_stride + h->mb_width;
1369 
1371  mb_array_size * sizeof(uint8_t), fail);
1372 
1373  FF_ALLOC_OR_GOTO(h->avctx, er->mbintra_table, mb_array_size, fail);
1374  memset(er->mbintra_table, 1, mb_array_size);
1375 
1376  FF_ALLOCZ_OR_GOTO(h->avctx, er->mbskip_table, mb_array_size + 2, fail);
1377 
1379  fail);
1380 
1381  FF_ALLOCZ_OR_GOTO(h->avctx, h->dc_val_base, yc_size * sizeof(int16_t), fail);
1382  er->dc_val[0] = h->dc_val_base + h->mb_width * 2 + 2;
1383  er->dc_val[1] = h->dc_val_base + y_size + h->mb_stride + 1;
1384  er->dc_val[2] = er->dc_val[1] + c_size;
1385  for (i = 0; i < yc_size; i++)
1386  h->dc_val_base[i] = 1024;
1387  }
1388 
1389  return 0;
1390 
1391 fail:
1392  return -1; // free_tables will clean up for us
1393 }
1394 
1395 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
1396  int parse_extradata);
1397 
1399 {
1400  AVCodecContext *avctx = h->avctx;
1401 
1402  if (!buf || size <= 0)
1403  return -1;
1404 
1405  if (buf[0] == 1) {
1406  int i, cnt, nalsize;
1407  const unsigned char *p = buf;
1408 
1409  h->is_avc = 1;
1410 
1411  if (size < 7) {
1412  av_log(avctx, AV_LOG_ERROR, "avcC too short\n");
1413  return -1;
1414  }
1415  /* sps and pps in the avcC always have length coded with 2 bytes,
1416  * so put a fake nal_length_size = 2 while parsing them */
1417  h->nal_length_size = 2;
1418  // Decode sps from avcC
1419  cnt = *(p + 5) & 0x1f; // Number of sps
1420  p += 6;
1421  for (i = 0; i < cnt; i++) {
1422  nalsize = AV_RB16(p) + 2;
1423  if(nalsize > size - (p-buf))
1424  return -1;
1425  if (decode_nal_units(h, p, nalsize, 1) < 0) {
1426  av_log(avctx, AV_LOG_ERROR,
1427  "Decoding sps %d from avcC failed\n", i);
1428  return -1;
1429  }
1430  p += nalsize;
1431  }
1432  // Decode pps from avcC
1433  cnt = *(p++); // Number of pps
1434  for (i = 0; i < cnt; i++) {
1435  nalsize = AV_RB16(p) + 2;
1436  if(nalsize > size - (p-buf))
1437  return -1;
1438  if (decode_nal_units(h, p, nalsize, 1) < 0) {
1439  av_log(avctx, AV_LOG_ERROR,
1440  "Decoding pps %d from avcC failed\n", i);
1441  return -1;
1442  }
1443  p += nalsize;
1444  }
1445  // Now store right nal length size, that will be used to parse all other nals
1446  h->nal_length_size = (buf[4] & 0x03) + 1;
1447  } else {
1448  h->is_avc = 0;
1449  if (decode_nal_units(h, buf, size, 1) < 0)
1450  return -1;
1451  }
1452  return size;
1453 }
1454 
1456 {
1457  H264Context *h = avctx->priv_data;
1458  int i;
1459 
1460  h->avctx = avctx;
1461 
1462  h->bit_depth_luma = 8;
1463  h->chroma_format_idc = 1;
1464 
1465  h->avctx->bits_per_raw_sample = 8;
1466  h->cur_chroma_format_idc = 1;
1467 
1468  ff_h264dsp_init(&h->h264dsp, 8, 1);
1469  av_assert0(h->sps.bit_depth_chroma == 0);
1471  ff_h264qpel_init(&h->h264qpel, 8);
1472  ff_h264_pred_init(&h->hpc, h->avctx->codec_id, 8, 1);
1473 
1474  h->dequant_coeff_pps = -1;
1475 
1476  /* needed so that IDCT permutation is known early */
1478  ff_dsputil_init(&h->dsp, h->avctx);
1479  ff_videodsp_init(&h->vdsp, 8);
1480 
1481  memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
1482  memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
1483 
1485  h->slice_context_count = 1;
1486  h->workaround_bugs = avctx->workaround_bugs;
1487  h->flags = avctx->flags;
1488 
1489  /* set defaults */
1490  // s->decode_mb = ff_h263_decode_mb;
1491  if (!avctx->has_b_frames)
1492  h->low_delay = 1;
1493 
1495 
1497 
1498  h->pixel_shift = 0;
1499  h->sps.bit_depth_luma = avctx->bits_per_raw_sample = 8;
1500 
1501  h->thread_context[0] = h;
1502  h->outputed_poc = h->next_outputed_poc = INT_MIN;
1503  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
1504  h->last_pocs[i] = INT_MIN;
1505  h->prev_poc_msb = 1 << 16;
1506  h->prev_frame_num = -1;
1507  h->x264_build = -1;
1508  ff_h264_reset_sei(h);
1509  if (avctx->codec_id == AV_CODEC_ID_H264) {
1510  if (avctx->ticks_per_frame == 1) {
1511  if(h->avctx->time_base.den < INT_MAX/2) {
1512  h->avctx->time_base.den *= 2;
1513  } else
1514  h->avctx->time_base.num /= 2;
1515  }
1516  avctx->ticks_per_frame = 2;
1517  }
1518 
1519  if (avctx->extradata_size > 0 && avctx->extradata &&
1520  ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size) < 0) {
1522  return -1;
1523  }
1524 
1528  h->low_delay = 0;
1529  }
1530 
1532  avctx->internal->allocate_progress = 1;
1533 
1534  return 0;
1535 }
1536 
1537 #define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b) + (size))))
1538 #undef REBASE_PICTURE
1539 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
1540  ((pic && pic >= old_ctx->DPB && \
1541  pic < old_ctx->DPB + MAX_PICTURE_COUNT) ? \
1542  &new_ctx->DPB[pic - old_ctx->DPB] : NULL)
1543 
1545  H264Context *new_base,
1546  H264Context *old_base)
1547 {
1548  int i;
1549 
1550  for (i = 0; i < count; i++) {
1551  assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) ||
1552  IN_RANGE(from[i], old_base->DPB,
1553  sizeof(Picture) * MAX_PICTURE_COUNT) ||
1554  !from[i]));
1555  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
1556  }
1557 }
1558 
1559 static void copy_parameter_set(void **to, void **from, int count, int size)
1560 {
1561  int i;
1562 
1563  for (i = 0; i < count; i++) {
1564  if (to[i] && !from[i])
1565  av_freep(&to[i]);
1566  else if (from[i] && !to[i])
1567  to[i] = av_malloc(size);
1568 
1569  if (from[i])
1570  memcpy(to[i], from[i], size);
1571  }
1572 }
1573 
1575 {
1576  H264Context *h = avctx->priv_data;
1577 
1578  if (!avctx->internal->is_copy)
1579  return 0;
1580  memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
1581  memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
1582 
1583  h->context_initialized = 0;
1584 
1585  return 0;
1586 }
1587 
1588 #define copy_fields(to, from, start_field, end_field) \
1589  memcpy(&to->start_field, &from->start_field, \
1590  (char *)&to->end_field - (char *)&to->start_field)
1591 
1592 static int h264_slice_header_init(H264Context *, int);
1593 
1595 
1597  const AVCodecContext *src)
1598 {
1599  H264Context *h = dst->priv_data, *h1 = src->priv_data;
1600  int inited = h->context_initialized, err = 0;
1601  int context_reinitialized = 0;
1602  int i, ret;
1603 
1604  if (dst == src)
1605  return 0;
1606 
1607  if (inited &&
1608  (h->width != h1->width ||
1609  h->height != h1->height ||
1610  h->mb_width != h1->mb_width ||
1611  h->mb_height != h1->mb_height ||
1612  h->sps.bit_depth_luma != h1->sps.bit_depth_luma ||
1613  h->sps.chroma_format_idc != h1->sps.chroma_format_idc ||
1614  h->sps.colorspace != h1->sps.colorspace)) {
1615 
1616  /* set bits_per_raw_sample to the previous value. the check for changed
1617  * bit depth in h264_set_parameter_from_sps() uses it and sets it to
1618  * the current value */
1620 
1622 
1623  h->width = h1->width;
1624  h->height = h1->height;
1625  h->mb_height = h1->mb_height;
1626  h->mb_width = h1->mb_width;
1627  h->mb_num = h1->mb_num;
1628  h->mb_stride = h1->mb_stride;
1629  h->b_stride = h1->b_stride;
1630  // SPS/PPS
1631  copy_parameter_set((void **)h->sps_buffers, (void **)h1->sps_buffers,
1632  MAX_SPS_COUNT, sizeof(SPS));
1633  h->sps = h1->sps;
1634  copy_parameter_set((void **)h->pps_buffers, (void **)h1->pps_buffers,
1635  MAX_PPS_COUNT, sizeof(PPS));
1636  h->pps = h1->pps;
1637 
1638  if ((err = h264_slice_header_init(h, 1)) < 0) {
1639  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
1640  return err;
1641  }
1642  context_reinitialized = 1;
1643 
1644 #if 0
1646  //Note we set context_reinitialized which will cause h264_set_parameter_from_sps to be reexecuted
1647  h->cur_chroma_format_idc = h1->cur_chroma_format_idc;
1648 #endif
1649  }
1650  /* update linesize on resize for h264. The h264 decoder doesn't
1651  * necessarily call ff_MPV_frame_start in the new thread */
1652  h->linesize = h1->linesize;
1653  h->uvlinesize = h1->uvlinesize;
1654 
1655  /* copy block_offset since frame_start may not be called */
1656  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
1657 
1658  if (!inited) {
1659  for (i = 0; i < MAX_SPS_COUNT; i++)
1660  av_freep(h->sps_buffers + i);
1661 
1662  for (i = 0; i < MAX_PPS_COUNT; i++)
1663  av_freep(h->pps_buffers + i);
1664 
1665  memcpy(h, h1, offsetof(H264Context, intra_pcm_ptr));
1666  memcpy(&h->cabac, &h1->cabac,
1667  sizeof(H264Context) - offsetof(H264Context, cabac));
1668  av_assert0((void*)&h->cabac == &h->mb_padding + 1);
1669 
1670  memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
1671  memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
1672 
1673  memset(&h->er, 0, sizeof(h->er));
1674  memset(&h->me, 0, sizeof(h->me));
1675  memset(&h->mb, 0, sizeof(h->mb));
1676  memset(&h->mb_luma_dc, 0, sizeof(h->mb_luma_dc));
1677  memset(&h->mb_padding, 0, sizeof(h->mb_padding));
1678 
1679  h->avctx = dst;
1680  h->DPB = NULL;
1681  h->qscale_table_pool = NULL;
1682  h->mb_type_pool = NULL;
1683  h->ref_index_pool = NULL;
1684  h->motion_val_pool = NULL;
1685 
1686  if (h1->context_initialized) {
1687  h->context_initialized = 0;
1688 
1689  memset(&h->cur_pic, 0, sizeof(h->cur_pic));
1691  h->cur_pic.tf.f = &h->cur_pic.f;
1692 
1693  if (ff_h264_alloc_tables(h) < 0) {
1694  av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
1695  return AVERROR(ENOMEM);
1696  }
1697  context_init(h);
1698  }
1699 
1700  for (i = 0; i < 2; i++) {
1701  h->rbsp_buffer[i] = NULL;
1702  h->rbsp_buffer_size[i] = 0;
1703  }
1704  h->bipred_scratchpad = NULL;
1705  h->edge_emu_buffer = NULL;
1706 
1707  h->thread_context[0] = h;
1708  h->context_initialized = h1->context_initialized;
1709  }
1710 
1711  h->avctx->coded_height = h1->avctx->coded_height;
1712  h->avctx->coded_width = h1->avctx->coded_width;
1713  h->avctx->width = h1->avctx->width;
1714  h->avctx->height = h1->avctx->height;
1715  h->coded_picture_number = h1->coded_picture_number;
1716  h->first_field = h1->first_field;
1717  h->picture_structure = h1->picture_structure;
1718  h->qscale = h1->qscale;
1719  h->droppable = h1->droppable;
1720  h->data_partitioning = h1->data_partitioning;
1721  h->low_delay = h1->low_delay;
1722 
1723  for (i = 0; h->DPB && i < MAX_PICTURE_COUNT; i++) {
1724  unref_picture(h, &h->DPB[i]);
1725  if (h1->DPB[i].f.data[0] &&
1726  (ret = ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
1727  return ret;
1728  }
1729 
1730  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
1731  unref_picture(h, &h->cur_pic);
1732  if (h1->cur_pic.f.buf[0] && (ret = ref_picture(h, &h->cur_pic, &h1->cur_pic)) < 0)
1733  return ret;
1734 
1735  h->workaround_bugs = h1->workaround_bugs;
1736  h->low_delay = h1->low_delay;
1737  h->droppable = h1->droppable;
1738 
1739  // extradata/NAL handling
1740  h->is_avc = h1->is_avc;
1741 
1742  // SPS/PPS
1743  copy_parameter_set((void **)h->sps_buffers, (void **)h1->sps_buffers,
1744  MAX_SPS_COUNT, sizeof(SPS));
1745  h->sps = h1->sps;
1746  copy_parameter_set((void **)h->pps_buffers, (void **)h1->pps_buffers,
1747  MAX_PPS_COUNT, sizeof(PPS));
1748  h->pps = h1->pps;
1749 
1750  // Dequantization matrices
1751  // FIXME these are big - can they be only copied when PPS changes?
1752  copy_fields(h, h1, dequant4_buffer, dequant4_coeff);
1753 
1754  for (i = 0; i < 6; i++)
1755  h->dequant4_coeff[i] = h->dequant4_buffer[0] +
1756  (h1->dequant4_coeff[i] - h1->dequant4_buffer[0]);
1757 
1758  for (i = 0; i < 6; i++)
1759  h->dequant8_coeff[i] = h->dequant8_buffer[0] +
1760  (h1->dequant8_coeff[i] - h1->dequant8_buffer[0]);
1761 
1762  h->dequant_coeff_pps = h1->dequant_coeff_pps;
1763 
1764  // POC timing
1765  copy_fields(h, h1, poc_lsb, redundant_pic_count);
1766 
1767  // reference lists
1768  copy_fields(h, h1, short_ref, cabac_init_idc);
1769 
1770  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
1771  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
1772  copy_picture_range(h->delayed_pic, h1->delayed_pic,
1773  MAX_DELAYED_PIC_COUNT + 2, h, h1);
1774 
1775  h->sync = h1->sync;
1776 
1777  if (context_reinitialized)
1779 
1780  if (!h->cur_pic_ptr)
1781  return 0;
1782 
1783  if (!h->droppable) {
1785  h->prev_poc_msb = h->poc_msb;
1786  h->prev_poc_lsb = h->poc_lsb;
1787  }
1789  h->prev_frame_num = h->frame_num;
1791 
1792  return err;
1793 }
1794 
1796 {
1797  Picture *pic;
1798  int i, ret;
1799  const int pixel_shift = h->pixel_shift;
1800  int c[4] = {
1801  1<<(h->sps.bit_depth_luma-1),
1802  1<<(h->sps.bit_depth_chroma-1),
1803  1<<(h->sps.bit_depth_chroma-1),
1804  -1
1805  };
1806 
1807  if (!ff_thread_can_start_frame(h->avctx)) {
1808  av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1809  return -1;
1810  }
1811 
1813  h->cur_pic_ptr = NULL;
1814 
1815  i = find_unused_picture(h);
1816  if (i < 0) {
1817  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1818  return i;
1819  }
1820  pic = &h->DPB[i];
1821 
1822  pic->reference = h->droppable ? 0 : h->picture_structure;
1825 
1826  /*
1827  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
1828  * in later.
1829  * See decode_nal_units().
1830  */
1831  pic->f.key_frame = 0;
1832  pic->sync = 0;
1833  pic->mmco_reset = 0;
1834 
1835  if ((ret = alloc_picture(h, pic)) < 0)
1836  return ret;
1837  if(!h->sync && !h->avctx->hwaccel &&
1839  avpriv_color_frame(&pic->f, c);
1840 
1841  h->cur_pic_ptr = pic;
1842  unref_picture(h, &h->cur_pic);
1843  if ((ret = ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
1844  return ret;
1845 
1847  ff_er_frame_start(&h->er);
1848  h->er.last_pic =
1849  h->er.next_pic = NULL;
1850  }
1851 
1852  assert(h->linesize && h->uvlinesize);
1853 
1854  for (i = 0; i < 16; i++) {
1855  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1856  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1857  }
1858  for (i = 0; i < 16; i++) {
1859  h->block_offset[16 + i] =
1860  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1861  h->block_offset[48 + 16 + i] =
1862  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1863  }
1864 
1865  // s->decode = (h->flags & CODEC_FLAG_PSNR) || !s->encoding ||
1866  // h->cur_pic.reference /* || h->contains_intra */ || 1;
1867 
1868  /* We mark the current picture as non-reference after allocating it, so
1869  * that if we break out due to an error it can be released automatically
1870  * in the next ff_MPV_frame_start().
1871  */
1872  h->cur_pic_ptr->reference = 0;
1873 
1874  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
1875 
1876  h->next_output_pic = NULL;
1877 
1878  assert(h->cur_pic_ptr->long_ref == 0);
1879 
1880  return 0;
1881 }
1882 
1883 /**
1884  * Run setup operations that must be run after slice header decoding.
1885  * This includes finding the next displayed frame.
1886  *
1887  * @param h h264 master context
1888  * @param setup_finished enough NALs have been read that we can call
1889  * ff_thread_finish_setup()
1890  */
1891 static void decode_postinit(H264Context *h, int setup_finished)
1892 {
1893  Picture *out = h->cur_pic_ptr;
1894  Picture *cur = h->cur_pic_ptr;
1895  int i, pics, out_of_order, out_idx;
1896 
1897  h->cur_pic_ptr->f.pict_type = h->pict_type;
1898 
1899  if (h->next_output_pic)
1900  return;
1901 
1902  if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
1903  /* FIXME: if we have two PAFF fields in one packet, we can't start
1904  * the next thread here. If we have one field per packet, we can.
1905  * The check in decode_nal_units() is not good enough to find this
1906  * yet, so we assume the worst for now. */
1907  // if (setup_finished)
1908  // ff_thread_finish_setup(h->avctx);
1909  return;
1910  }
1911 
1912  cur->f.interlaced_frame = 0;
1913  cur->f.repeat_pict = 0;
1914 
1915  /* Signal interlacing information externally. */
1916  /* Prioritize picture timing SEI information over used
1917  * decoding process if it exists. */
1918 
1919  if (h->sps.pic_struct_present_flag) {
1920  switch (h->sei_pic_struct) {
1921  case SEI_PIC_STRUCT_FRAME:
1922  break;
1925  cur->f.interlaced_frame = 1;
1926  break;
1929  if (FIELD_OR_MBAFF_PICTURE(h))
1930  cur->f.interlaced_frame = 1;
1931  else
1932  // try to flag soft telecine progressive
1934  break;
1937  /* Signal the possibility of telecined film externally
1938  * (pic_struct 5,6). From these hints, let the applications
1939  * decide if they apply deinterlacing. */
1940  cur->f.repeat_pict = 1;
1941  break;
1943  cur->f.repeat_pict = 2;
1944  break;
1946  cur->f.repeat_pict = 4;
1947  break;
1948  }
1949 
1950  if ((h->sei_ct_type & 3) &&
1952  cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
1953  } else {
1954  /* Derive interlacing flag from used decoding process. */
1956  }
1958 
1959  if (cur->field_poc[0] != cur->field_poc[1]) {
1960  /* Derive top_field_first from field pocs. */
1961  cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1];
1962  } else {
1963  if (cur->f.interlaced_frame || h->sps.pic_struct_present_flag) {
1964  /* Use picture timing SEI information. Even if it is a
1965  * information of a past frame, better than nothing. */
1968  cur->f.top_field_first = 1;
1969  else
1970  cur->f.top_field_first = 0;
1971  } else {
1972  /* Most likely progressive */
1973  cur->f.top_field_first = 0;
1974  }
1975  }
1976 
1977  cur->mmco_reset = h->mmco_reset;
1978  h->mmco_reset = 0;
1979  // FIXME do something with unavailable reference frames
1980 
1981  /* Sort B-frames into display order */
1982 
1986  h->low_delay = 0;
1987  }
1988 
1992  h->low_delay = 0;
1993  }
1994 
1995  for (i = 0; 1; i++) {
1996  if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
1997  if(i)
1998  h->last_pocs[i-1] = cur->poc;
1999  break;
2000  } else if(i) {
2001  h->last_pocs[i-1]= h->last_pocs[i];
2002  }
2003  }
2004  out_of_order = MAX_DELAYED_PIC_COUNT - i;
2005  if( cur->f.pict_type == AV_PICTURE_TYPE_B
2007  out_of_order = FFMAX(out_of_order, 1);
2008  if (out_of_order == MAX_DELAYED_PIC_COUNT) {
2009  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
2010  for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
2011  h->last_pocs[i] = INT_MIN;
2012  h->last_pocs[0] = cur->poc;
2013  cur->mmco_reset = 1;
2014  } else if(h->avctx->has_b_frames < out_of_order && !h->sps.bitstream_restriction_flag){
2015  av_log(h->avctx, AV_LOG_VERBOSE, "Increasing reorder buffer to %d\n", out_of_order);
2016  h->avctx->has_b_frames = out_of_order;
2017  h->low_delay = 0;
2018  }
2019 
2020  pics = 0;
2021  while (h->delayed_pic[pics])
2022  pics++;
2023 
2025 
2026  h->delayed_pic[pics++] = cur;
2027  if (cur->reference == 0)
2028  cur->reference = DELAYED_PIC_REF;
2029 
2030  out = h->delayed_pic[0];
2031  out_idx = 0;
2032  for (i = 1; h->delayed_pic[i] &&
2033  !h->delayed_pic[i]->f.key_frame &&
2034  !h->delayed_pic[i]->mmco_reset;
2035  i++)
2036  if (h->delayed_pic[i]->poc < out->poc) {
2037  out = h->delayed_pic[i];
2038  out_idx = i;
2039  }
2040  if (h->avctx->has_b_frames == 0 &&
2041  (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset))
2042  h->next_outputed_poc = INT_MIN;
2043  out_of_order = out->poc < h->next_outputed_poc;
2044 
2045  if (out_of_order || pics > h->avctx->has_b_frames) {
2046  out->reference &= ~DELAYED_PIC_REF;
2047  // for frame threading, the owner must be the second field's thread or
2048  // else the first thread can release the picture and reuse it unsafely
2049  for (i = out_idx; h->delayed_pic[i]; i++)
2050  h->delayed_pic[i] = h->delayed_pic[i + 1];
2051  }
2052  if (!out_of_order && pics > h->avctx->has_b_frames) {
2053  h->next_output_pic = out;
2054  if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset)) {
2055  h->next_outputed_poc = INT_MIN;
2056  } else
2057  h->next_outputed_poc = out->poc;
2058  } else {
2059  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
2060  }
2061 
2062  if (h->next_output_pic && h->next_output_pic->sync) {
2063  h->sync |= 2;
2064  }
2065 
2066  if (setup_finished && !h->avctx->hwaccel)
2068 }
2069 
2071  uint8_t *src_cb, uint8_t *src_cr,
2072  int linesize, int uvlinesize,
2073  int simple)
2074 {
2075  uint8_t *top_border;
2076  int top_idx = 1;
2077  const int pixel_shift = h->pixel_shift;
2078  int chroma444 = CHROMA444(h);
2079  int chroma422 = CHROMA422(h);
2080 
2081  src_y -= linesize;
2082  src_cb -= uvlinesize;
2083  src_cr -= uvlinesize;
2084 
2085  if (!simple && FRAME_MBAFF(h)) {
2086  if (h->mb_y & 1) {
2087  if (!MB_MBAFF(h)) {
2088  top_border = h->top_borders[0][h->mb_x];
2089  AV_COPY128(top_border, src_y + 15 * linesize);
2090  if (pixel_shift)
2091  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
2092  if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
2093  if (chroma444) {
2094  if (pixel_shift) {
2095  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
2096  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
2097  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
2098  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
2099  } else {
2100  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
2101  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
2102  }
2103  } else if (chroma422) {
2104  if (pixel_shift) {
2105  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
2106  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
2107  } else {
2108  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
2109  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
2110  }
2111  } else {
2112  if (pixel_shift) {
2113  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
2114  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
2115  } else {
2116  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
2117  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
2118  }
2119  }
2120  }
2121  }
2122  } else if (MB_MBAFF(h)) {
2123  top_idx = 0;
2124  } else
2125  return;
2126  }
2127 
2128  top_border = h->top_borders[top_idx][h->mb_x];
2129  /* There are two lines saved, the line above the top macroblock
2130  * of a pair, and the line above the bottom macroblock. */
2131  AV_COPY128(top_border, src_y + 16 * linesize);
2132  if (pixel_shift)
2133  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
2134 
2135  if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
2136  if (chroma444) {
2137  if (pixel_shift) {
2138  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
2139  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
2140  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
2141  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
2142  } else {
2143  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
2144  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
2145  }
2146  } else if (chroma422) {
2147  if (pixel_shift) {
2148  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
2149  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
2150  } else {
2151  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
2152  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
2153  }
2154  } else {
2155  if (pixel_shift) {
2156  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
2157  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
2158  } else {
2159  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
2160  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
2161  }
2162  }
2163  }
2164 }
2165 
2167  uint8_t *src_cb, uint8_t *src_cr,
2168  int linesize, int uvlinesize,
2169  int xchg, int chroma444,
2170  int simple, int pixel_shift)
2171 {
2172  int deblock_topleft;
2173  int deblock_top;
2174  int top_idx = 1;
2175  uint8_t *top_border_m1;
2176  uint8_t *top_border;
2177 
2178  if (!simple && FRAME_MBAFF(h)) {
2179  if (h->mb_y & 1) {
2180  if (!MB_MBAFF(h))
2181  return;
2182  } else {
2183  top_idx = MB_MBAFF(h) ? 0 : 1;
2184  }
2185  }
2186 
2187  if (h->deblocking_filter == 2) {
2188  deblock_topleft = h->slice_table[h->mb_xy - 1 - h->mb_stride] == h->slice_num;
2189  deblock_top = h->top_type;
2190  } else {
2191  deblock_topleft = (h->mb_x > 0);
2192  deblock_top = (h->mb_y > !!MB_FIELD(h));
2193  }
2194 
2195  src_y -= linesize + 1 + pixel_shift;
2196  src_cb -= uvlinesize + 1 + pixel_shift;
2197  src_cr -= uvlinesize + 1 + pixel_shift;
2198 
2199  top_border_m1 = h->top_borders[top_idx][h->mb_x - 1];
2200  top_border = h->top_borders[top_idx][h->mb_x];
2201 
2202 #define XCHG(a, b, xchg) \
2203  if (pixel_shift) { \
2204  if (xchg) { \
2205  AV_SWAP64(b + 0, a + 0); \
2206  AV_SWAP64(b + 8, a + 8); \
2207  } else { \
2208  AV_COPY128(b, a); \
2209  } \
2210  } else if (xchg) \
2211  AV_SWAP64(b, a); \
2212  else \
2213  AV_COPY64(b, a);
2214 
2215  if (deblock_top) {
2216  if (deblock_topleft) {
2217  XCHG(top_border_m1 + (8 << pixel_shift),
2218  src_y - (7 << pixel_shift), 1);
2219  }
2220  XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
2221  XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
2222  if (h->mb_x + 1 < h->mb_width) {
2223  XCHG(h->top_borders[top_idx][h->mb_x + 1],
2224  src_y + (17 << pixel_shift), 1);
2225  }
2226  }
2227  if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
2228  if (chroma444) {
2229  if (deblock_topleft) {
2230  XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1);
2231  XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1);
2232  }
2233  XCHG(top_border + (16 << pixel_shift), src_cb + (1 << pixel_shift), xchg);
2234  XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1);
2235  XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg);
2236  XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1);
2237  if (h->mb_x + 1 < h->mb_width) {
2238  XCHG(h->top_borders[top_idx][h->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
2239  XCHG(h->top_borders[top_idx][h->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
2240  }
2241  } else {
2242  if (deblock_top) {
2243  if (deblock_topleft) {
2244  XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1);
2245  XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1);
2246  }
2247  XCHG(top_border + (16 << pixel_shift), src_cb + 1 + pixel_shift, 1);
2248  XCHG(top_border + (24 << pixel_shift), src_cr + 1 + pixel_shift, 1);
2249  }
2250  }
2251  }
2252 }
2253 
2254 static av_always_inline int dctcoef_get(int16_t *mb, int high_bit_depth,
2255  int index)
2256 {
2257  if (high_bit_depth) {
2258  return AV_RN32A(((int32_t *)mb) + index);
2259  } else
2260  return AV_RN16A(mb + index);
2261 }
2262 
2263 static av_always_inline void dctcoef_set(int16_t *mb, int high_bit_depth,
2264  int index, int value)
2265 {
2266  if (high_bit_depth) {
2267  AV_WN32A(((int32_t *)mb) + index, value);
2268  } else
2269  AV_WN16A(mb + index, value);
2270 }
2271 
2273  int mb_type, int is_h264,
2274  int simple,
2275  int transform_bypass,
2276  int pixel_shift,
2277  int *block_offset,
2278  int linesize,
2279  uint8_t *dest_y, int p)
2280 {
2281  void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
2282  void (*idct_dc_add)(uint8_t *dst, int16_t *block, int stride);
2283  int i;
2284  int qscale = p == 0 ? h->qscale : h->chroma_qp[p - 1];
2285  block_offset += 16 * p;
2286  if (IS_INTRA4x4(mb_type)) {
2287  if (IS_8x8DCT(mb_type)) {
2288  if (transform_bypass) {
2289  idct_dc_add =
2291  } else {
2292  idct_dc_add = h->h264dsp.h264_idct8_dc_add;
2294  }
2295  for (i = 0; i < 16; i += 4) {
2296  uint8_t *const ptr = dest_y + block_offset[i];
2297  const int dir = h->intra4x4_pred_mode_cache[scan8[i]];
2298  if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
2299  h->hpc.pred8x8l_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2300  } else {
2301  const int nnz = h->non_zero_count_cache[scan8[i + p * 16]];
2302  h->hpc.pred8x8l[dir](ptr, (h->topleft_samples_available << i) & 0x8000,
2303  (h->topright_samples_available << i) & 0x4000, linesize);
2304  if (nnz) {
2305  if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
2306  idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2307  else
2308  idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2309  }
2310  }
2311  }
2312  } else {
2313  if (transform_bypass) {
2314  idct_dc_add =
2316  } else {
2317  idct_dc_add = h->h264dsp.h264_idct_dc_add;
2319  }
2320  for (i = 0; i < 16; i++) {
2321  uint8_t *const ptr = dest_y + block_offset[i];
2322  const int dir = h->intra4x4_pred_mode_cache[scan8[i]];
2323 
2324  if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
2325  h->hpc.pred4x4_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2326  } else {
2327  uint8_t *topright;
2328  int nnz, tr;
2329  uint64_t tr_high;
2330  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
2331  const int topright_avail = (h->topright_samples_available << i) & 0x8000;
2332  av_assert2(h->mb_y || linesize <= block_offset[i]);
2333  if (!topright_avail) {
2334  if (pixel_shift) {
2335  tr_high = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL;
2336  topright = (uint8_t *)&tr_high;
2337  } else {
2338  tr = ptr[3 - linesize] * 0x01010101u;
2339  topright = (uint8_t *)&tr;
2340  }
2341  } else
2342  topright = ptr + (4 << pixel_shift) - linesize;
2343  } else
2344  topright = NULL;
2345 
2346  h->hpc.pred4x4[dir](ptr, topright, linesize);
2347  nnz = h->non_zero_count_cache[scan8[i + p * 16]];
2348  if (nnz) {
2349  if (is_h264) {
2350  if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
2351  idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2352  else
2353  idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2354  } else if (CONFIG_SVQ3_DECODER)
2355  ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize, qscale, 0);
2356  }
2357  }
2358  }
2359  }
2360  } else {
2361  h->hpc.pred16x16[h->intra16x16_pred_mode](dest_y, linesize);
2362  if (is_h264) {
2363  if (h->non_zero_count_cache[scan8[LUMA_DC_BLOCK_INDEX + p]]) {
2364  if (!transform_bypass)
2365  h->h264dsp.h264_luma_dc_dequant_idct(h->mb + (p * 256 << pixel_shift),
2366  h->mb_luma_dc[p],
2367  h->dequant4_coeff[p][qscale][0]);
2368  else {
2369  static const uint8_t dc_mapping[16] = {
2370  0 * 16, 1 * 16, 4 * 16, 5 * 16,
2371  2 * 16, 3 * 16, 6 * 16, 7 * 16,
2372  8 * 16, 9 * 16, 12 * 16, 13 * 16,
2373  10 * 16, 11 * 16, 14 * 16, 15 * 16 };
2374  for (i = 0; i < 16; i++)
2375  dctcoef_set(h->mb + (p * 256 << pixel_shift),
2376  pixel_shift, dc_mapping[i],
2377  dctcoef_get(h->mb_luma_dc[p],
2378  pixel_shift, i));
2379  }
2380  }
2381  } else if (CONFIG_SVQ3_DECODER)
2382  ff_svq3_luma_dc_dequant_idct_c(h->mb + p * 256,
2383  h->mb_luma_dc[p], qscale);
2384  }
2385 }
2386 
2388  int is_h264, int simple,
2389  int transform_bypass,
2390  int pixel_shift,
2391  int *block_offset,
2392  int linesize,
2393  uint8_t *dest_y, int p)
2394 {
2395  void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
2396  int i;
2397  block_offset += 16 * p;
2398  if (!IS_INTRA4x4(mb_type)) {
2399  if (is_h264) {
2400  if (IS_INTRA16x16(mb_type)) {
2401  if (transform_bypass) {
2402  if (h->sps.profile_idc == 244 &&
2405  h->hpc.pred16x16_add[h->intra16x16_pred_mode](dest_y, block_offset,
2406  h->mb + (p * 256 << pixel_shift),
2407  linesize);
2408  } else {
2409  for (i = 0; i < 16; i++)
2410  if (h->non_zero_count_cache[scan8[i + p * 16]] ||
2411  dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
2412  h->h264dsp.h264_add_pixels4_clear(dest_y + block_offset[i],
2413  h->mb + (i * 16 + p * 256 << pixel_shift),
2414  linesize);
2415  }
2416  } else {
2417  h->h264dsp.h264_idct_add16intra(dest_y, block_offset,
2418  h->mb + (p * 256 << pixel_shift),
2419  linesize,
2420  h->non_zero_count_cache + p * 5 * 8);
2421  }
2422  } else if (h->cbp & 15) {
2423  if (transform_bypass) {
2424  const int di = IS_8x8DCT(mb_type) ? 4 : 1;
2427  for (i = 0; i < 16; i += di)
2428  if (h->non_zero_count_cache[scan8[i + p * 16]])
2429  idct_add(dest_y + block_offset[i],
2430  h->mb + (i * 16 + p * 256 << pixel_shift),
2431  linesize);
2432  } else {
2433  if (IS_8x8DCT(mb_type))
2434  h->h264dsp.h264_idct8_add4(dest_y, block_offset,
2435  h->mb + (p * 256 << pixel_shift),
2436  linesize,
2437  h->non_zero_count_cache + p * 5 * 8);
2438  else
2439  h->h264dsp.h264_idct_add16(dest_y, block_offset,
2440  h->mb + (p * 256 << pixel_shift),
2441  linesize,
2442  h->non_zero_count_cache + p * 5 * 8);
2443  }
2444  }
2445  } else if (CONFIG_SVQ3_DECODER) {
2446  for (i = 0; i < 16; i++)
2447  if (h->non_zero_count_cache[scan8[i + p * 16]] || h->mb[i * 16 + p * 256]) {
2448  // FIXME benchmark weird rule, & below
2449  uint8_t *const ptr = dest_y + block_offset[i];
2450  ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize,
2451  h->qscale, IS_INTRA(mb_type) ? 1 : 0);
2452  }
2453  }
2454  }
2455 }
2456 
2457 #define BITS 8
2458 #define SIMPLE 1
2459 #include "h264_mb_template.c"
2460 
2461 #undef BITS
2462 #define BITS 16
2463 #include "h264_mb_template.c"
2464 
2465 #undef SIMPLE
2466 #define SIMPLE 0
2467 #include "h264_mb_template.c"
2468 
2470 {
2471  const int mb_xy = h->mb_xy;
2472  const int mb_type = h->cur_pic.mb_type[mb_xy];
2473  int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || h->qscale == 0;
2474 
2475  if (CHROMA444(h)) {
2476  if (is_complex || h->pixel_shift)
2477  hl_decode_mb_444_complex(h);
2478  else
2479  hl_decode_mb_444_simple_8(h);
2480  } else if (is_complex) {
2481  hl_decode_mb_complex(h);
2482  } else if (h->pixel_shift) {
2483  hl_decode_mb_simple_16(h);
2484  } else
2485  hl_decode_mb_simple_8(h);
2486 }
2487 
2489 {
2490  int list, i;
2491  int luma_def, chroma_def;
2492 
2493  h->use_weight = 0;
2494  h->use_weight_chroma = 0;
2496  if (h->sps.chroma_format_idc)
2498  luma_def = 1 << h->luma_log2_weight_denom;
2499  chroma_def = 1 << h->chroma_log2_weight_denom;
2500 
2501  for (list = 0; list < 2; list++) {
2502  h->luma_weight_flag[list] = 0;
2503  h->chroma_weight_flag[list] = 0;
2504  for (i = 0; i < h->ref_count[list]; i++) {
2505  int luma_weight_flag, chroma_weight_flag;
2506 
2507  luma_weight_flag = get_bits1(&h->gb);
2508  if (luma_weight_flag) {
2509  h->luma_weight[i][list][0] = get_se_golomb(&h->gb);
2510  h->luma_weight[i][list][1] = get_se_golomb(&h->gb);
2511  if (h->luma_weight[i][list][0] != luma_def ||
2512  h->luma_weight[i][list][1] != 0) {
2513  h->use_weight = 1;
2514  h->luma_weight_flag[list] = 1;
2515  }
2516  } else {
2517  h->luma_weight[i][list][0] = luma_def;
2518  h->luma_weight[i][list][1] = 0;
2519  }
2520 
2521  if (h->sps.chroma_format_idc) {
2522  chroma_weight_flag = get_bits1(&h->gb);
2523  if (chroma_weight_flag) {
2524  int j;
2525  for (j = 0; j < 2; j++) {
2526  h->chroma_weight[i][list][j][0] = get_se_golomb(&h->gb);
2527  h->chroma_weight[i][list][j][1] = get_se_golomb(&h->gb);
2528  if (h->chroma_weight[i][list][j][0] != chroma_def ||
2529  h->chroma_weight[i][list][j][1] != 0) {
2530  h->use_weight_chroma = 1;
2531  h->chroma_weight_flag[list] = 1;
2532  }
2533  }
2534  } else {
2535  int j;
2536  for (j = 0; j < 2; j++) {
2537  h->chroma_weight[i][list][j][0] = chroma_def;
2538  h->chroma_weight[i][list][j][1] = 0;
2539  }
2540  }
2541  }
2542  }
2544  break;
2545  }
2546  h->use_weight = h->use_weight || h->use_weight_chroma;
2547  return 0;
2548 }
2549 
2550 /**
2551  * Initialize implicit_weight table.
2552  * @param field 0/1 initialize the weight for interlaced MBAFF
2553  * -1 initializes the rest
2554  */
2555 static void implicit_weight_table(H264Context *h, int field)
2556 {
2557  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
2558 
2559  for (i = 0; i < 2; i++) {
2560  h->luma_weight_flag[i] = 0;
2561  h->chroma_weight_flag[i] = 0;
2562  }
2563 
2564  if (field < 0) {
2565  if (h->picture_structure == PICT_FRAME) {
2566  cur_poc = h->cur_pic_ptr->poc;
2567  } else {
2568  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
2569  }
2570  if (h->ref_count[0] == 1 && h->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
2571  h->ref_list[0][0].poc + h->ref_list[1][0].poc == 2 * cur_poc) {
2572  h->use_weight = 0;
2573  h->use_weight_chroma = 0;
2574  return;
2575  }
2576  ref_start = 0;
2577  ref_count0 = h->ref_count[0];
2578  ref_count1 = h->ref_count[1];
2579  } else {
2580  cur_poc = h->cur_pic_ptr->field_poc[field];
2581  ref_start = 16;
2582  ref_count0 = 16 + 2 * h->ref_count[0];
2583  ref_count1 = 16 + 2 * h->ref_count[1];
2584  }
2585 
2586  h->use_weight = 2;
2587  h->use_weight_chroma = 2;
2588  h->luma_log2_weight_denom = 5;
2589  h->chroma_log2_weight_denom = 5;
2590 
2591  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
2592  int poc0 = h->ref_list[0][ref0].poc;
2593  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
2594  int w = 32;
2595  if (!h->ref_list[0][ref0].long_ref && !h->ref_list[1][ref1].long_ref) {
2596  int poc1 = h->ref_list[1][ref1].poc;
2597  int td = av_clip(poc1 - poc0, -128, 127);
2598  if (td) {
2599  int tb = av_clip(cur_poc - poc0, -128, 127);
2600  int tx = (16384 + (FFABS(td) >> 1)) / td;
2601  int dist_scale_factor = (tb * tx + 32) >> 8;
2602  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
2603  w = 64 - dist_scale_factor;
2604  }
2605  }
2606  if (field < 0) {
2607  h->implicit_weight[ref0][ref1][0] =
2608  h->implicit_weight[ref0][ref1][1] = w;
2609  } else {
2610  h->implicit_weight[ref0][ref1][field] = w;
2611  }
2612  }
2613  }
2614 }
2615 
2616 /**
2617  * instantaneous decoder refresh.
2618  */
2619 static void idr(H264Context *h)
2620 {
2621  int i;
2623  h->prev_frame_num = 0;
2624  h->prev_frame_num_offset = 0;
2625  h->prev_poc_msb = 1<<16;
2626  h->prev_poc_lsb = 0;
2627  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
2628  h->last_pocs[i] = INT_MIN;
2629 }
2630 
2631 /* forget old pics after a seek */
2632 static void flush_change(H264Context *h)
2633 {
2634  int i, j;
2635 
2636  h->outputed_poc = h->next_outputed_poc = INT_MIN;
2637  h->prev_interlaced_frame = 1;
2638  idr(h);
2639 
2640  h->prev_frame_num = -1;
2641  if (h->cur_pic_ptr) {
2642  h->cur_pic_ptr->reference = 0;
2643  for (j=i=0; h->delayed_pic[i]; i++)
2644  if (h->delayed_pic[i] != h->cur_pic_ptr)
2645  h->delayed_pic[j++] = h->delayed_pic[i];
2646  h->delayed_pic[j] = NULL;
2647  }
2648  h->first_field = 0;
2649  memset(h->ref_list[0], 0, sizeof(h->ref_list[0]));
2650  memset(h->ref_list[1], 0, sizeof(h->ref_list[1]));
2651  memset(h->default_ref_list[0], 0, sizeof(h->default_ref_list[0]));
2652  memset(h->default_ref_list[1], 0, sizeof(h->default_ref_list[1]));
2653  ff_h264_reset_sei(h);
2654  h->recovery_frame= -1;
2655  h->sync= 0;
2656  h->list_count = 0;
2657  h->current_slice = 0;
2658 }
2659 
2660 /* forget old pics after a seek */
2661 static void flush_dpb(AVCodecContext *avctx)
2662 {
2663  H264Context *h = avctx->priv_data;
2664  int i;
2665 
2666  for (i = 0; i <= MAX_DELAYED_PIC_COUNT; i++) {
2667  if (h->delayed_pic[i])
2668  h->delayed_pic[i]->reference = 0;
2669  h->delayed_pic[i] = NULL;
2670  }
2671 
2672  flush_change(h);
2673 
2674  if (h->DPB)
2675  for (i = 0; i < MAX_PICTURE_COUNT; i++)
2676  unref_picture(h, &h->DPB[i]);
2677  h->cur_pic_ptr = NULL;
2678  unref_picture(h, &h->cur_pic);
2679 
2680  h->mb_x = h->mb_y = 0;
2681 
2682  h->parse_context.state = -1;
2684  h->parse_context.overread = 0;
2686  h->parse_context.index = 0;
2687  h->parse_context.last_index = 0;
2688 }
2689 
2690 static int init_poc(H264Context *h)
2691 {
2692  const int max_frame_num = 1 << h->sps.log2_max_frame_num;
2693  int field_poc[2];
2694  Picture *cur = h->cur_pic_ptr;
2695 
2697  if (h->frame_num < h->prev_frame_num)
2698  h->frame_num_offset += max_frame_num;
2699 
2700  if (h->sps.poc_type == 0) {
2701  const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb;
2702 
2703  if (h->poc_lsb < h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
2704  h->poc_msb = h->prev_poc_msb + max_poc_lsb;
2705  else if (h->poc_lsb > h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2)
2706  h->poc_msb = h->prev_poc_msb - max_poc_lsb;
2707  else
2708  h->poc_msb = h->prev_poc_msb;
2709  field_poc[0] =
2710  field_poc[1] = h->poc_msb + h->poc_lsb;
2711  if (h->picture_structure == PICT_FRAME)
2712  field_poc[1] += h->delta_poc_bottom;
2713  } else if (h->sps.poc_type == 1) {
2714  int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
2715  int i;
2716 
2717  if (h->sps.poc_cycle_length != 0)
2718  abs_frame_num = h->frame_num_offset + h->frame_num;
2719  else
2720  abs_frame_num = 0;
2721 
2722  if (h->nal_ref_idc == 0 && abs_frame_num > 0)
2723  abs_frame_num--;
2724 
2725  expected_delta_per_poc_cycle = 0;
2726  for (i = 0; i < h->sps.poc_cycle_length; i++)
2727  // FIXME integrate during sps parse
2728  expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i];
2729 
2730  if (abs_frame_num > 0) {
2731  int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
2732  int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
2733 
2734  expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
2735  for (i = 0; i <= frame_num_in_poc_cycle; i++)
2736  expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i];
2737  } else
2738  expectedpoc = 0;
2739 
2740  if (h->nal_ref_idc == 0)
2741  expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
2742 
2743  field_poc[0] = expectedpoc + h->delta_poc[0];
2744  field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
2745 
2746  if (h->picture_structure == PICT_FRAME)
2747  field_poc[1] += h->delta_poc[1];
2748  } else {
2749  int poc = 2 * (h->frame_num_offset + h->frame_num);
2750 
2751  if (!h->nal_ref_idc)
2752  poc--;
2753 
2754  field_poc[0] = poc;
2755  field_poc[1] = poc;
2756  }
2757 
2759  h->cur_pic_ptr->field_poc[0] = field_poc[0];
2761  h->cur_pic_ptr->field_poc[1] = field_poc[1];
2762  cur->poc = FFMIN(cur->field_poc[0], cur->field_poc[1]);
2763 
2764  return 0;
2765 }
2766 
2767 /**
2768  * initialize scan tables
2769  */
2771 {
2772  int i;
2773  for (i = 0; i < 16; i++) {
2774 #define T(x) (x >> 2) | ((x << 2) & 0xF)
2775  h->zigzag_scan[i] = T(zigzag_scan[i]);
2776  h->field_scan[i] = T(field_scan[i]);
2777 #undef T
2778  }
2779  for (i = 0; i < 64; i++) {
2780 #define T(x) (x >> 3) | ((x & 7) << 3)
2781  h->zigzag_scan8x8[i] = T(ff_zigzag_direct[i]);
2783  h->field_scan8x8[i] = T(field_scan8x8[i]);
2785 #undef T
2786  }
2787  if (h->sps.transform_bypass) { // FIXME same ugly
2788  memcpy(h->zigzag_scan_q0 , zigzag_scan , sizeof(h->zigzag_scan_q0 ));
2789  memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 ));
2791  memcpy(h->field_scan_q0 , field_scan , sizeof(h->field_scan_q0 ));
2792  memcpy(h->field_scan8x8_q0 , field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
2794  } else {
2795  memcpy(h->zigzag_scan_q0 , h->zigzag_scan , sizeof(h->zigzag_scan_q0 ));
2796  memcpy(h->zigzag_scan8x8_q0 , h->zigzag_scan8x8 , sizeof(h->zigzag_scan8x8_q0 ));
2798  memcpy(h->field_scan_q0 , h->field_scan , sizeof(h->field_scan_q0 ));
2799  memcpy(h->field_scan8x8_q0 , h->field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
2801  }
2802 }
2803 
2804 static int field_end(H264Context *h, int in_setup)
2805 {
2806  AVCodecContext *const avctx = h->avctx;
2807  int err = 0;
2808  h->mb_y = 0;
2809 
2813 
2814  if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) {
2815  if (!h->droppable) {
2817  h->prev_poc_msb = h->poc_msb;
2818  h->prev_poc_lsb = h->poc_lsb;
2819  }
2821  h->prev_frame_num = h->frame_num;
2823  }
2824 
2825  if (avctx->hwaccel) {
2826  if (avctx->hwaccel->end_frame(avctx) < 0)
2827  av_log(avctx, AV_LOG_ERROR,
2828  "hardware accelerator failed to decode picture\n");
2829  }
2830 
2834 
2835  /*
2836  * FIXME: Error handling code does not seem to support interlaced
2837  * when slices span multiple rows
2838  * The ff_er_add_slice calls don't work right for bottom
2839  * fields; they cause massive erroneous error concealing
2840  * Error marking covers both fields (top and bottom).
2841  * This causes a mismatched s->error_count
2842  * and a bad error table. Further, the error count goes to
2843  * INT_MAX when called for bottom field, because mb_y is
2844  * past end by one (callers fault) and resync_mb_y != 0
2845  * causes problems for the first MB line, too.
2846  */
2848  !FIELD_PICTURE(h) && h->current_slice && !h->sps.new) {
2849  h->er.cur_pic = h->cur_pic_ptr;
2850  ff_er_frame_end(&h->er);
2851  }
2852  if (!in_setup && !h->droppable)
2855  emms_c();
2856 
2857  h->current_slice = 0;
2858 
2859  return err;
2860 }
2861 
2862 /**
2863  * Replicate H264 "master" context to thread contexts.
2864  */
2866 {
2867  memcpy(dst->block_offset, src->block_offset, sizeof(dst->block_offset));
2868  dst->cur_pic_ptr = src->cur_pic_ptr;
2869  dst->cur_pic = src->cur_pic;
2870  dst->linesize = src->linesize;
2871  dst->uvlinesize = src->uvlinesize;
2872  dst->first_field = src->first_field;
2873 
2874  dst->prev_poc_msb = src->prev_poc_msb;
2875  dst->prev_poc_lsb = src->prev_poc_lsb;
2877  dst->prev_frame_num = src->prev_frame_num;
2878  dst->short_ref_count = src->short_ref_count;
2879 
2880  memcpy(dst->short_ref, src->short_ref, sizeof(dst->short_ref));
2881  memcpy(dst->long_ref, src->long_ref, sizeof(dst->long_ref));
2882  memcpy(dst->default_ref_list, src->default_ref_list, sizeof(dst->default_ref_list));
2883 
2884  memcpy(dst->dequant4_coeff, src->dequant4_coeff, sizeof(src->dequant4_coeff));
2885  memcpy(dst->dequant8_coeff, src->dequant8_coeff, sizeof(src->dequant8_coeff));
2886 
2887  return 0;
2888 }
2889 
2890 /**
2891  * Compute profile from profile_idc and constraint_set?_flags.
2892  *
2893  * @param sps SPS
2894  *
2895  * @return profile as defined by FF_PROFILE_H264_*
2896  */
2898 {
2899  int profile = sps->profile_idc;
2900 
2901  switch (sps->profile_idc) {
2903  // constraint_set1_flag set to 1
2904  profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
2905  break;
2909  // constraint_set3_flag set to 1
2910  profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
2911  break;
2912  }
2913 
2914  return profile;
2915 }
2916 
2918 {
2919  if (h->flags & CODEC_FLAG_LOW_DELAY ||
2921  !h->sps.num_reorder_frames)) {
2922  if (h->avctx->has_b_frames > 1 || h->delayed_pic[0])
2923  av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. "
2924  "Reenabling low delay requires a codec flush.\n");
2925  else
2926  h->low_delay = 1;
2927  }
2928 
2929  if (h->avctx->has_b_frames < 2)
2930  h->avctx->has_b_frames = !h->low_delay;
2931 
2932  if (h->sps.bit_depth_luma != h->sps.bit_depth_chroma) {
2934  "Different chroma and luma bit depth");
2935  return AVERROR_PATCHWELCOME;
2936  }
2937 
2938  if (h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
2940  if (h->avctx->codec &&
2942  (h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) {
2944  "VDPAU decoding does not support video colorspace.\n");
2945  return AVERROR_INVALIDDATA;
2946  }
2947  if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 14 &&
2948  h->sps.bit_depth_luma != 11 && h->sps.bit_depth_luma != 13) {
2951  h->pixel_shift = h->sps.bit_depth_luma > 8;
2952 
2954  h->sps.chroma_format_idc);
2958  h->sps.chroma_format_idc);
2959 
2961  ff_dsputil_init(&h->dsp, h->avctx);
2963  } else {
2964  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n",
2965  h->sps.bit_depth_luma);
2966  return AVERROR_INVALIDDATA;
2967  }
2968  }
2969  return 0;
2970 }
2971 
2972 static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
2973 {
2974  switch (h->sps.bit_depth_luma) {
2975  case 9:
2976  if (CHROMA444(h)) {
2977  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
2978  return AV_PIX_FMT_GBRP9;
2979  } else
2980  return AV_PIX_FMT_YUV444P9;
2981  } else if (CHROMA422(h))
2982  return AV_PIX_FMT_YUV422P9;
2983  else
2984  return AV_PIX_FMT_YUV420P9;
2985  break;
2986  case 10:
2987  if (CHROMA444(h)) {
2988  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
2989  return AV_PIX_FMT_GBRP10;
2990  } else
2991  return AV_PIX_FMT_YUV444P10;
2992  } else if (CHROMA422(h))
2993  return AV_PIX_FMT_YUV422P10;
2994  else
2995  return AV_PIX_FMT_YUV420P10;
2996  break;
2997  case 12:
2998  if (CHROMA444(h)) {
2999  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
3000  return AV_PIX_FMT_GBRP12;
3001  } else
3002  return AV_PIX_FMT_YUV444P12;
3003  } else if (CHROMA422(h))
3004  return AV_PIX_FMT_YUV422P12;
3005  else
3006  return AV_PIX_FMT_YUV420P12;
3007  break;
3008  case 14:
3009  if (CHROMA444(h)) {
3010  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
3011  return AV_PIX_FMT_GBRP14;
3012  } else
3013  return AV_PIX_FMT_YUV444P14;
3014  } else if (CHROMA422(h))
3015  return AV_PIX_FMT_YUV422P14;
3016  else
3017  return AV_PIX_FMT_YUV420P14;
3018  break;
3019  case 8:
3020  if (CHROMA444(h)) {
3021  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
3022  av_log(h->avctx, AV_LOG_DEBUG, "Detected GBR colorspace.\n");
3023  return AV_PIX_FMT_GBR24P;
3024  } else if (h->avctx->colorspace == AVCOL_SPC_YCGCO) {
3025  av_log(h->avctx, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
3026  }
3029  } else if (CHROMA422(h)) {
3032  } else {
3033  int i;
3034  const enum AVPixelFormat * fmt = h->avctx->codec->pix_fmts ?
3035  h->avctx->codec->pix_fmts :
3039 
3040  for (i=0; fmt[i] != AV_PIX_FMT_NONE; i++)
3041  if (fmt[i] == h->avctx->pix_fmt && !force_callback)
3042  return fmt[i];
3043  return ff_thread_get_format(h->avctx, fmt);
3044  }
3045  break;
3046  default:
3048  "Unsupported bit depth: %d\n", h->sps.bit_depth_luma);
3049  return AVERROR_INVALIDDATA;
3050  }
3051 }
3052 
3053 /* export coded and cropped frame dimensions to AVCodecContext */
3055 {
3056  int width = h->width - (h->sps.crop_right + h->sps.crop_left);
3057  int height = h->height - (h->sps.crop_top + h->sps.crop_bottom);
3058 
3059  /* handle container cropping */
3060  if (!h->sps.crop &&
3061  FFALIGN(h->avctx->width, 16) == h->width &&
3062  FFALIGN(h->avctx->height, 16) == h->height) {
3063  width = h->avctx->width;
3064  height = h->avctx->height;
3065  }
3066 
3067  if (width <= 0 || height <= 0) {
3068  av_log(h->avctx, AV_LOG_ERROR, "Invalid cropped dimensions: %dx%d.\n",
3069  width, height);
3071  return AVERROR_INVALIDDATA;
3072 
3073  av_log(h->avctx, AV_LOG_WARNING, "Ignoring cropping information.\n");
3074  h->sps.crop_bottom = h->sps.crop_top = h->sps.crop_right = h->sps.crop_left = 0;
3075  h->sps.crop = 0;
3076 
3077  width = h->width;
3078  height = h->height;
3079  }
3080 
3081  h->avctx->coded_width = h->width;
3082  h->avctx->coded_height = h->height;
3083  h->avctx->width = width;
3084  h->avctx->height = height;
3085 
3086  return 0;
3087 }
3088 
3090 {
3091  int nb_slices = (HAVE_THREADS &&
3093  h->avctx->thread_count : 1;
3094  int i;
3095 
3096  h->avctx->sample_aspect_ratio = h->sps.sar;
3099  &h->chroma_x_shift, &h->chroma_y_shift);
3100 
3101  if (h->sps.timing_info_present_flag) {
3102  int64_t den = h->sps.time_scale;
3103  if (h->x264_build < 44U)
3104  den *= 2;
3106  h->sps.num_units_in_tick, den, 1 << 30);
3107  }
3108 
3110 
3111  if (reinit)
3112  free_tables(h, 0);
3113  h->first_field = 0;
3114  h->prev_interlaced_frame = 1;
3115 
3116  init_scan_tables(h);
3117  if (ff_h264_alloc_tables(h) < 0) {
3119  "Could not allocate memory for h264\n");
3120  return AVERROR(ENOMEM);
3121  }
3122 
3123  if (nb_slices > MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) {
3124  int max_slices;
3125  if (h->mb_height)
3126  max_slices = FFMIN(MAX_THREADS, h->mb_height);
3127  else
3128  max_slices = MAX_THREADS;
3129  av_log(h->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
3130  " reducing to %d\n", nb_slices, max_slices);
3131  nb_slices = max_slices;
3132  }
3133  h->slice_context_count = nb_slices;
3134 
3136  if (context_init(h) < 0) {
3137  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
3138  return -1;
3139  }
3140  } else {
3141  for (i = 1; i < h->slice_context_count; i++) {
3142  H264Context *c;
3143  c = h->thread_context[i] = av_mallocz(sizeof(H264Context));
3144  c->avctx = h->avctx;
3146  c->dsp = h->dsp;
3147  }
3148  c->vdsp = h->vdsp;
3149  c->h264dsp = h->h264dsp;
3150  c->h264qpel = h->h264qpel;
3151  c->h264chroma = h->h264chroma;
3152  c->sps = h->sps;
3153  c->pps = h->pps;
3154  c->pixel_shift = h->pixel_shift;
3156  c->width = h->width;
3157  c->height = h->height;
3158  c->linesize = h->linesize;
3159  c->uvlinesize = h->uvlinesize;
3162  c->qscale = h->qscale;
3163  c->droppable = h->droppable;
3165  c->low_delay = h->low_delay;
3166  c->mb_width = h->mb_width;
3167  c->mb_height = h->mb_height;
3168  c->mb_stride = h->mb_stride;
3169  c->mb_num = h->mb_num;
3170  c->flags = h->flags;
3172  c->pict_type = h->pict_type;
3173 
3174  init_scan_tables(c);
3175  clone_tables(c, h, i);
3176  c->context_initialized = 1;
3177  }
3178 
3179  for (i = 0; i < h->slice_context_count; i++)
3180  if (context_init(h->thread_context[i]) < 0) {
3181  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
3182  return -1;
3183  }
3184  }
3185 
3186  h->context_initialized = 1;
3187 
3188  return 0;
3189 }
3190 
3191 /**
3192  * Decode a slice header.
3193  * This will also call ff_MPV_common_init() and frame_start() as needed.
3194  *
3195  * @param h h264context
3196  * @param h0 h264 master context (differs from 'h' when doing sliced based
3197  * parallel decoding)
3198  *
3199  * @return 0 if okay, <0 if an error occurred, 1 if decoding must not be multithreaded
3200  */
3202 {
3203  unsigned int first_mb_in_slice;
3204  unsigned int pps_id;
3205  int num_ref_idx_active_override_flag, ret;
3206  unsigned int slice_type, tmp, i, j;
3207  int last_pic_structure, last_pic_droppable;
3208  int must_reinit;
3209  int needs_reinit = 0;
3210 
3213 
3214  first_mb_in_slice = get_ue_golomb_long(&h->gb);
3215 
3216  if (first_mb_in_slice == 0) { // FIXME better field boundary detection
3217  if (h0->current_slice && FIELD_PICTURE(h)) {
3218  field_end(h, 1);
3219  }
3220 
3221  h0->current_slice = 0;
3222  if (!h0->first_field) {
3223  if (h->cur_pic_ptr && !h->droppable) {
3226  }
3227  h->cur_pic_ptr = NULL;
3228  }
3229  }
3230 
3231  slice_type = get_ue_golomb_31(&h->gb);
3232  if (slice_type > 9) {
3234  "slice type too large (%d) at %d %d\n",
3235  slice_type, h->mb_x, h->mb_y);
3236  return -1;
3237  }
3238  if (slice_type > 4) {
3239  slice_type -= 5;
3240  h->slice_type_fixed = 1;
3241  } else
3242  h->slice_type_fixed = 0;
3243 
3244  slice_type = golomb_to_pict_type[slice_type];
3245  h->slice_type = slice_type;
3246  h->slice_type_nos = slice_type & 3;
3247 
3248  // to make a few old functions happy, it's wrong though
3249  h->pict_type = h->slice_type;
3250 
3251  pps_id = get_ue_golomb(&h->gb);
3252  if (pps_id >= MAX_PPS_COUNT) {
3253  av_log(h->avctx, AV_LOG_ERROR, "pps_id %d out of range\n", pps_id);
3254  return -1;
3255  }
3256  if (!h0->pps_buffers[pps_id]) {
3258  "non-existing PPS %u referenced\n",
3259  pps_id);
3260  return -1;
3261  }
3262  h->pps = *h0->pps_buffers[pps_id];
3263 
3264  if (!h0->sps_buffers[h->pps.sps_id]) {
3266  "non-existing SPS %u referenced\n",
3267  h->pps.sps_id);
3268  return -1;
3269  }
3270 
3271  if (h->pps.sps_id != h->current_sps_id ||
3272  h0->sps_buffers[h->pps.sps_id]->new) {
3273  h0->sps_buffers[h->pps.sps_id]->new = 0;
3274 
3275  h->current_sps_id = h->pps.sps_id;
3276  h->sps = *h0->sps_buffers[h->pps.sps_id];
3277 
3278  if (h->mb_width != h->sps.mb_width ||
3279  h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) ||
3282  )
3283  needs_reinit = 1;
3284 
3285  if (h->bit_depth_luma != h->sps.bit_depth_luma ||
3289  needs_reinit = 1;
3290  }
3291  if ((ret = h264_set_parameter_from_sps(h)) < 0)
3292  return ret;
3293  }
3294 
3295  h->avctx->profile = ff_h264_get_profile(&h->sps);
3296  h->avctx->level = h->sps.level_idc;
3297  h->avctx->refs = h->sps.ref_frame_count;
3298 
3299  must_reinit = (h->context_initialized &&
3300  ( 16*h->sps.mb_width != h->avctx->coded_width
3301  || 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
3304  || av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio)));
3305  if (h0->avctx->pix_fmt != get_pixel_format(h0, 0))
3306  must_reinit = 1;
3307 
3308  h->mb_width = h->sps.mb_width;
3309  h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
3310  h->mb_num = h->mb_width * h->mb_height;
3311  h->mb_stride = h->mb_width + 1;
3312 
3313  h->b_stride = h->mb_width * 4;
3314 
3315  h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p
3316 
3317  h->width = 16 * h->mb_width;
3318  h->height = 16 * h->mb_height;
3319 
3320  ret = init_dimensions(h);
3321  if (ret < 0)
3322  return ret;
3323 
3326  : AVCOL_RANGE_MPEG;
3328  if (h->avctx->colorspace != h->sps.colorspace)
3329  needs_reinit = 1;
3331  h->avctx->color_trc = h->sps.color_trc;
3332  h->avctx->colorspace = h->sps.colorspace;
3333  }
3334  }
3335 
3336  if (h->context_initialized &&
3337  (h->width != h->avctx->coded_width ||
3338  h->height != h->avctx->coded_height ||
3339  must_reinit ||
3340  needs_reinit)) {
3341 
3342  if (h != h0) {
3343  av_log(h->avctx, AV_LOG_ERROR, "changing width/height on "
3344  "slice %d\n", h0->current_slice + 1);
3345  return AVERROR_INVALIDDATA;
3346  }
3347 
3348  flush_change(h);
3349 
3350  if ((ret = get_pixel_format(h, 1)) < 0)
3351  return ret;
3352  h->avctx->pix_fmt = ret;
3353 
3354  av_log(h->avctx, AV_LOG_INFO, "Reinit context to %dx%d, "
3355  "pix_fmt: %d\n", h->width, h->height, h->avctx->pix_fmt);
3356 
3357  if ((ret = h264_slice_header_init(h, 1)) < 0) {
3359  "h264_slice_header_init() failed\n");
3360  return ret;
3361  }
3362  }
3363  if (!h->context_initialized) {
3364  if (h != h0) {
3366  "Cannot (re-)initialize context during parallel decoding.\n");
3367  return -1;
3368  }
3369 
3370  if ((ret = get_pixel_format(h, 1)) < 0)
3371  return ret;
3372  h->avctx->pix_fmt = ret;
3373 
3374  if ((ret = h264_slice_header_init(h, 0)) < 0) {
3376  "h264_slice_header_init() failed\n");
3377  return ret;
3378  }
3379  }
3380 
3381  if (h == h0 && h->dequant_coeff_pps != pps_id) {
3382  h->dequant_coeff_pps = pps_id;
3384  }
3385 
3386  h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num);
3387 
3388  h->mb_mbaff = 0;
3389  h->mb_aff_frame = 0;
3390  last_pic_structure = h0->picture_structure;
3391  last_pic_droppable = h0->droppable;
3392  h->droppable = h->nal_ref_idc == 0;
3393  if (h->sps.frame_mbs_only_flag) {
3395  } else {
3396  if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
3397  av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
3398  return -1;
3399  }
3400  if (get_bits1(&h->gb)) { // field_pic_flag
3401  h->picture_structure = PICT_TOP_FIELD + get_bits1(&h->gb); // bottom_field_flag
3402  } else {
3404  h->mb_aff_frame = h->sps.mb_aff;
3405  }
3406  }
3408 
3409  if (h0->current_slice != 0) {
3410  if (last_pic_structure != h->picture_structure ||
3411  last_pic_droppable != h->droppable) {
3413  "Changing field mode (%d -> %d) between slices is not allowed\n",
3414  last_pic_structure, h->picture_structure);
3415  h->picture_structure = last_pic_structure;
3416  h->droppable = last_pic_droppable;
3417  return AVERROR_INVALIDDATA;
3418  } else if (!h0->cur_pic_ptr) {
3420  "unset cur_pic_ptr on %d. slice\n",
3421  h0->current_slice + 1);
3422  return AVERROR_INVALIDDATA;
3423  }
3424  } else {
3425  /* Shorten frame num gaps so we don't have to allocate reference
3426  * frames just to throw them away */
3427  if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
3428  int unwrap_prev_frame_num = h->prev_frame_num;
3429  int max_frame_num = 1 << h->sps.log2_max_frame_num;
3430 
3431  if (unwrap_prev_frame_num > h->frame_num)
3432  unwrap_prev_frame_num -= max_frame_num;
3433 
3434  if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) {
3435  unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1;
3436  if (unwrap_prev_frame_num < 0)
3437  unwrap_prev_frame_num += max_frame_num;
3438 
3439  h->prev_frame_num = unwrap_prev_frame_num;
3440  }
3441  }
3442 
3443  /* See if we have a decoded first field looking for a pair...
3444  * Here, we're using that to see if we should mark previously
3445  * decode frames as "finished".
3446  * We have to do that before the "dummy" in-between frame allocation,
3447  * since that can modify h->cur_pic_ptr. */
3448  if (h0->first_field) {
3449  assert(h0->cur_pic_ptr);
3450  assert(h0->cur_pic_ptr->f.data[0]);
3451  assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
3452 
3453  /* Mark old field/frame as completed */
3454  if (!last_pic_droppable && h0->cur_pic_ptr->tf.owner == h0->avctx) {
3455  ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
3456  last_pic_structure == PICT_BOTTOM_FIELD);
3457  }
3458 
3459  /* figure out if we have a complementary field pair */
3460  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
3461  /* Previous field is unmatched. Don't display it, but let it
3462  * remain for reference if marked as such. */
3463  if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
3464  ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
3465  last_pic_structure == PICT_TOP_FIELD);
3466  }
3467  } else {
3468  if (h0->cur_pic_ptr->frame_num != h->frame_num) {
3469  /* This and previous field were reference, but had
3470  * different frame_nums. Consider this field first in
3471  * pair. Throw away previous field except for reference
3472  * purposes. */
3473  if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
3474  ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
3475  last_pic_structure == PICT_TOP_FIELD);
3476  }
3477  } else {
3478  /* Second field in complementary pair */
3479  if (!((last_pic_structure == PICT_TOP_FIELD &&
3481  (last_pic_structure == PICT_BOTTOM_FIELD &&
3484  "Invalid field mode combination %d/%d\n",
3485  last_pic_structure, h->picture_structure);
3486  h->picture_structure = last_pic_structure;
3487  h->droppable = last_pic_droppable;
3488  return AVERROR_INVALIDDATA;
3489  } else if (last_pic_droppable != h->droppable) {
3491  "Found reference and non-reference fields in the same frame, which");
3492  h->picture_structure = last_pic_structure;
3493  h->droppable = last_pic_droppable;
3494  return AVERROR_PATCHWELCOME;
3495  }
3496  }
3497  }
3498  }
3499 
3500  while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 && !h0->first_field &&
3501  h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
3502  Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
3503  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
3504  h->frame_num, h->prev_frame_num);
3506  for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
3507  h->last_pocs[i] = INT_MIN;
3508  if (h264_frame_start(h) < 0)
3509  return -1;
3510  h->prev_frame_num++;
3511  h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
3513  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
3514  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
3515  if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 &&
3517  return ret;
3518  if (ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index) < 0 &&
3520  return AVERROR_INVALIDDATA;
3521  /* Error concealment: if a ref is missing, copy the previous ref in its place.
3522  * FIXME: avoiding a memcpy would be nice, but ref handling makes many assumptions
3523  * about there being no actual duplicates.
3524  * FIXME: this doesn't copy padding for out-of-frame motion vectors. Given we're
3525  * concealing a lost frame, this probably isn't noticeable by comparison, but it should
3526  * be fixed. */
3527  if (h->short_ref_count) {
3528  if (prev) {
3529  av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize,
3530  (const uint8_t **)prev->f.data, prev->f.linesize,
3531  h->avctx->pix_fmt, h->mb_width * 16, h->mb_height * 16);
3532  h->short_ref[0]->poc = prev->poc + 2;
3533  }
3534  h->short_ref[0]->frame_num = h->prev_frame_num;
3535  }
3536  }
3537 
3538  /* See if we have a decoded first field looking for a pair...
3539  * We're using that to see whether to continue decoding in that
3540  * frame, or to allocate a new one. */
3541  if (h0->first_field) {
3542  assert(h0->cur_pic_ptr);
3543  assert(h0->cur_pic_ptr->f.data[0]);
3544  assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
3545 
3546  /* figure out if we have a complementary field pair */
3547  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
3548  /* Previous field is unmatched. Don't display it, but let it
3549  * remain for reference if marked as such. */
3550  h0->cur_pic_ptr = NULL;
3551  h0->first_field = FIELD_PICTURE(h);
3552  } else {
3553  if (h0->cur_pic_ptr->frame_num != h->frame_num) {
3554  ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
3556  /* This and the previous field had different frame_nums.
3557  * Consider this field first in pair. Throw away previous
3558  * one except for reference purposes. */
3559  h0->first_field = 1;
3560  h0->cur_pic_ptr = NULL;
3561  } else {
3562  /* Second field in complementary pair */
3563  h0->first_field = 0;
3564  }
3565  }
3566  } else {
3567  /* Frame or first field in a potentially complementary pair */
3568  h0->first_field = FIELD_PICTURE(h);
3569  }
3570 
3571  if (!FIELD_PICTURE(h) || h0->first_field) {
3572  if (h264_frame_start(h) < 0) {
3573  h0->first_field = 0;
3574  return -1;
3575  }
3576  } else {
3578  }
3579  /* Some macroblocks can be accessed before they're available in case
3580  * of lost slices, MBAFF or threading. */
3581  if (FIELD_PICTURE(h)) {
3582  for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
3583  memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
3584  } else {
3585  memset(h->slice_table, -1,
3586  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
3587  }
3588  h0->last_slice_type = -1;
3589  }
3590  if (h != h0 && (ret = clone_slice(h, h0)) < 0)
3591  return ret;
3592 
3593  /* can't be in alloc_tables because linesize isn't known there.
3594  * FIXME: redo bipred weight to not require extra buffer? */
3595  for (i = 0; i < h->slice_context_count; i++)
3596  if (h->thread_context[i]) {
3598  if (ret < 0)
3599  return ret;
3600  }
3601 
3602  h->cur_pic_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup
3603 
3604  av_assert1(h->mb_num == h->mb_width * h->mb_height);
3605  if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
3606  first_mb_in_slice >= h->mb_num) {
3607  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
3608  return -1;
3609  }
3610  h->resync_mb_x = h->mb_x = first_mb_in_slice % h->mb_width;
3611  h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) << FIELD_OR_MBAFF_PICTURE(h);
3613  h->resync_mb_y = h->mb_y = h->mb_y + 1;
3614  av_assert1(h->mb_y < h->mb_height);
3615 
3616  if (h->picture_structure == PICT_FRAME) {
3617  h->curr_pic_num = h->frame_num;
3618  h->max_pic_num = 1 << h->sps.log2_max_frame_num;
3619  } else {
3620  h->curr_pic_num = 2 * h->frame_num + 1;
3621  h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1);
3622  }
3623 
3624  if (h->nal_unit_type == NAL_IDR_SLICE)
3625  get_ue_golomb(&h->gb); /* idr_pic_id */
3626 
3627  if (h->sps.poc_type == 0) {
3628  h->poc_lsb = get_bits(&h->gb, h->sps.log2_max_poc_lsb);
3629 
3630  if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME)
3631  h->delta_poc_bottom = get_se_golomb(&h->gb);
3632  }
3633 
3634  if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) {
3635  h->delta_poc[0] = get_se_golomb(&h->gb);
3636 
3637  if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME)
3638  h->delta_poc[1] = get_se_golomb(&h->gb);
3639  }
3640 
3641  init_poc(h);
3642 
3645 
3646  // set defaults, might be overridden a few lines later
3647  h->ref_count[0] = h->pps.ref_count[0];
3648  h->ref_count[1] = h->pps.ref_count[1];
3649 
3650  if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
3651  unsigned max[2];
3652  max[0] = max[1] = h->picture_structure == PICT_FRAME ? 15 : 31;
3653 
3656  num_ref_idx_active_override_flag = get_bits1(&h->gb);
3657 
3658  if (num_ref_idx_active_override_flag) {
3659  h->ref_count[0] = get_ue_golomb(&h->gb) + 1;
3660  if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
3661  h->ref_count[1] = get_ue_golomb(&h->gb) + 1;
3662  } else
3663  // full range is spec-ok in this case, even for frames
3664  h->ref_count[1] = 1;
3665  }
3666 
3667  if (h->ref_count[0]-1 > max[0] || h->ref_count[1]-1 > max[1]){
3668  av_log(h->avctx, AV_LOG_ERROR, "reference overflow %u > %u or %u > %u\n", h->ref_count[0]-1, max[0], h->ref_count[1]-1, max[1]);
3669  h->ref_count[0] = h->ref_count[1] = 0;
3670  return AVERROR_INVALIDDATA;
3671  }
3672 
3674  h->list_count = 2;
3675  else
3676  h->list_count = 1;
3677  } else {
3678  h->list_count = 0;
3679  h->ref_count[0] = h->ref_count[1] = 0;
3680  }
3681  if (slice_type != AV_PICTURE_TYPE_I &&
3682  (h0->current_slice == 0 ||
3683  slice_type != h0->last_slice_type ||
3684  memcmp(h0->last_ref_count, h0->ref_count, sizeof(h0->ref_count)))) {
3686  }
3687 
3688  if (h->slice_type_nos != AV_PICTURE_TYPE_I &&
3690  h->ref_count[1] = h->ref_count[0] = 0;
3691  return -1;
3692  }
3693 
3694  if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) ||
3695  (h->pps.weighted_bipred_idc == 1 &&
3697  pred_weight_table(h);
3698  else if (h->pps.weighted_bipred_idc == 2 &&
3700  implicit_weight_table(h, -1);
3701  } else {
3702  h->use_weight = 0;
3703  for (i = 0; i < 2; i++) {
3704  h->luma_weight_flag[i] = 0;
3705  h->chroma_weight_flag[i] = 0;
3706  }
3707  }
3708 
3709  // If frame-mt is enabled, only update mmco tables for the first slice
3710  // in a field. Subsequent slices can temporarily clobber h->mmco_index
3711  // or h->mmco, which will cause ref list mix-ups and decoding errors
3712  // further down the line. This may break decoding if the first slice is
3713  // corrupt, thus we only do this if frame-mt is enabled.
3714  if (h->nal_ref_idc &&
3717  h0->current_slice == 0) < 0 &&
3719  return AVERROR_INVALIDDATA;
3720 
3721  if (FRAME_MBAFF(h)) {
3723 
3725  implicit_weight_table(h, 0);
3726  implicit_weight_table(h, 1);
3727  }
3728  }
3729 
3733 
3734  if (h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) {
3735  tmp = get_ue_golomb_31(&h->gb);
3736  if (tmp > 2) {
3737  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n");
3738  return -1;
3739  }
3740  h->cabac_init_idc = tmp;
3741  }
3742 
3743  h->last_qscale_diff = 0;
3744  tmp = h->pps.init_qp + get_se_golomb(&h->gb);
3745  if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) {
3746  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
3747  return -1;
3748  }
3749  h->qscale = tmp;
3750  h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale);
3751  h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale);
3752  // FIXME qscale / qp ... stuff
3753  if (h->slice_type == AV_PICTURE_TYPE_SP)
3754  get_bits1(&h->gb); /* sp_for_switch_flag */
3755  if (h->slice_type == AV_PICTURE_TYPE_SP ||
3757  get_se_golomb(&h->gb); /* slice_qs_delta */
3758 
3759  h->deblocking_filter = 1;
3760  h->slice_alpha_c0_offset = 52;
3761  h->slice_beta_offset = 52;
3763  tmp = get_ue_golomb_31(&h->gb);
3764  if (tmp > 2) {
3766  "deblocking_filter_idc %u out of range\n", tmp);
3767  return -1;
3768  }
3769  h->deblocking_filter = tmp;
3770  if (h->deblocking_filter < 2)
3771  h->deblocking_filter ^= 1; // 1<->0
3772 
3773  if (h->deblocking_filter) {
3774  h->slice_alpha_c0_offset += get_se_golomb(&h->gb) << 1;
3775  h->slice_beta_offset += get_se_golomb(&h->gb) << 1;
3776  if (h->slice_alpha_c0_offset > 104U ||
3777  h->slice_beta_offset > 104U) {
3779  "deblocking filter parameters %d %d out of range\n",
3781  return -1;
3782  }
3783  }
3784  }
3785 
3786  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
3792  h->nal_ref_idc == 0))
3793  h->deblocking_filter = 0;
3794 
3795  if (h->deblocking_filter == 1 && h0->max_contexts > 1) {
3796  if (h->avctx->flags2 & CODEC_FLAG2_FAST) {
3797  /* Cheat slightly for speed:
3798  * Do not bother to deblock across slices. */
3799  h->deblocking_filter = 2;
3800  } else {
3801  h0->max_contexts = 1;
3802  if (!h0->single_decode_warning) {
3803  av_log(h->avctx, AV_LOG_INFO,
3804  "Cannot parallelize deblocking type 1, decoding such frames in sequential order\n");
3805  h0->single_decode_warning = 1;
3806  }
3807  if (h != h0) {
3809  "Deblocking switched inside frame.\n");
3810  return 1;
3811  }
3812  }
3813  }
3814  h->qp_thresh = 15 + 52 -
3816  FFMAX3(0,
3818  h->pps.chroma_qp_index_offset[1]) +
3819  6 * (h->sps.bit_depth_luma - 8);
3820 
3821  h0->last_slice_type = slice_type;
3822  memcpy(h0->last_ref_count, h0->ref_count, sizeof(h0->last_ref_count));
3823  h->slice_num = ++h0->current_slice;
3824 
3825  if (h->slice_num)
3826  h0->slice_row[(h->slice_num-1)&(MAX_SLICES-1)]= h->resync_mb_y;
3827  if ( h0->slice_row[h->slice_num&(MAX_SLICES-1)] + 3 >= h->resync_mb_y
3828  && h0->slice_row[h->slice_num&(MAX_SLICES-1)] <= h->resync_mb_y
3829  && h->slice_num >= MAX_SLICES) {
3830  //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
3831  av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", h->slice_num, MAX_SLICES);
3832  }
3833 
3834  for (j = 0; j < 2; j++) {
3835  int id_list[16];
3836  int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j];
3837  for (i = 0; i < 16; i++) {
3838  id_list[i] = 60;
3839  if (j < h->list_count && i < h->ref_count[j] && h->ref_list[j][i].f.buf[0]) {
3840  int k;
3841  AVBuffer *buf = h->ref_list[j][i].f.buf[0]->buffer;
3842  for (k = 0; k < h->short_ref_count; k++)
3843  if (h->short_ref[k]->f.buf[0]->buffer == buf) {
3844  id_list[i] = k;
3845  break;
3846  }
3847  for (k = 0; k < h->long_ref_count; k++)
3848  if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) {
3849  id_list[i] = h->short_ref_count + k;
3850  break;
3851  }
3852  }
3853  }
3854 
3855  ref2frm[0] =
3856  ref2frm[1] = -1;
3857  for (i = 0; i < 16; i++)
3858  ref2frm[i + 2] = 4 * id_list[i] +
3859  (h->ref_list[j][i].reference & 3);
3860  ref2frm[18 + 0] =
3861  ref2frm[18 + 1] = -1;
3862  for (i = 16; i < 48; i++)
3863  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
3864  (h->ref_list[j][i].reference & 3);
3865  }
3866 
3867  if (h->ref_count[0]) h->er.last_pic = &h->ref_list[0][0];
3868  if (h->ref_count[1]) h->er.next_pic = &h->ref_list[1][0];
3869 
3870  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
3872  "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
3873  h->slice_num,
3874  (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
3875  first_mb_in_slice,
3877  h->slice_type_fixed ? " fix" : "",
3878  h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
3879  pps_id, h->frame_num,
3880  h->cur_pic_ptr->field_poc[0],
3881  h->cur_pic_ptr->field_poc[1],
3882  h->ref_count[0], h->ref_count[1],
3883  h->qscale,
3884  h->deblocking_filter,
3885  h->slice_alpha_c0_offset / 2 - 26, h->slice_beta_offset / 2 - 26,
3886  h->use_weight,
3887  h->use_weight == 1 && h->use_weight_chroma ? "c" : "",
3888  h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
3889  }
3890 
3891  return 0;
3892 }
3893 
3895 {
3896  switch (h->slice_type) {
3897  case AV_PICTURE_TYPE_P:
3898  return 0;
3899  case AV_PICTURE_TYPE_B:
3900  return 1;
3901  case AV_PICTURE_TYPE_I:
3902  return 2;
3903  case AV_PICTURE_TYPE_SP:
3904  return 3;
3905  case AV_PICTURE_TYPE_SI:
3906  return 4;
3907  default:
3908  return -1;
3909  }
3910 }
3911 
3913  int mb_type, int top_xy,
3914  int left_xy[LEFT_MBS],
3915  int top_type,
3916  int left_type[LEFT_MBS],
3917  int mb_xy, int list)
3918 {
3919  int b_stride = h->b_stride;
3920  int16_t(*mv_dst)[2] = &h->mv_cache[list][scan8[0]];
3921  int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
3922  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
3923  if (USES_LIST(top_type, list)) {
3924  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
3925  const int b8_xy = 4 * top_xy + 2;
3926  int (*ref2frm)[64] = (void*)(h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF(h) ? 20 : 2));
3927  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
3928  ref_cache[0 - 1 * 8] =
3929  ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 0]];
3930  ref_cache[2 - 1 * 8] =
3931  ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 1]];
3932  } else {
3933  AV_ZERO128(mv_dst - 1 * 8);
3934  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3935  }
3936 
3937  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
3938  if (USES_LIST(left_type[LTOP], list)) {
3939  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
3940  const int b8_xy = 4 * left_xy[LTOP] + 1;
3941  int (*ref2frm)[64] =(void*)( h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF(h) ? 20 : 2));
3942  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
3943  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
3944  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
3945  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
3946  ref_cache[-1 + 0] =
3947  ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
3948  ref_cache[-1 + 16] =
3949  ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
3950  } else {
3951  AV_ZERO32(mv_dst - 1 + 0);
3952  AV_ZERO32(mv_dst - 1 + 8);
3953  AV_ZERO32(mv_dst - 1 + 16);
3954  AV_ZERO32(mv_dst - 1 + 24);
3955  ref_cache[-1 + 0] =
3956  ref_cache[-1 + 8] =
3957  ref_cache[-1 + 16] =
3958  ref_cache[-1 + 24] = LIST_NOT_USED;
3959  }
3960  }
3961  }
3962 
3963  if (!USES_LIST(mb_type, list)) {
3964  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
3965  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3966  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3967  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3968  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3969  return;
3970  }
3971 
3972  {
3973  int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
3974  int (*ref2frm)[64] = (void*)(h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF(h) ? 20 : 2));
3975  uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
3976  uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
3977  AV_WN32A(&ref_cache[0 * 8], ref01);
3978  AV_WN32A(&ref_cache[1 * 8], ref01);
3979  AV_WN32A(&ref_cache[2 * 8], ref23);
3980  AV_WN32A(&ref_cache[3 * 8], ref23);
3981  }
3982 
3983  {
3984  int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * h->mb_x + 4 * h->mb_y * b_stride];
3985  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
3986  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
3987  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
3988  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
3989  }
3990 }
3991 
3992 /**
3993  *
3994  * @return non zero if the loop filter can be skipped
3995  */
3996 static int fill_filter_caches(H264Context *h, int mb_type)
3997 {
3998  const int mb_xy = h->mb_xy;
3999  int top_xy, left_xy[LEFT_MBS];
4000  int top_type, left_type[LEFT_MBS];
4001  uint8_t *nnz;
4002  uint8_t *nnz_cache;
4003 
4004  top_xy = mb_xy - (h->mb_stride << MB_FIELD(h));
4005 
4006  /* Wow, what a mess, why didn't they simplify the interlacing & intra
4007  * stuff, I can't imagine that these complex rules are worth it. */
4008 
4009  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
4010  if (FRAME_MBAFF(h)) {
4011  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
4012  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
4013  if (h->mb_y & 1) {
4014  if (left_mb_field_flag != curr_mb_field_flag)
4015  left_xy[LTOP] -= h->mb_stride;
4016  } else {
4017  if (curr_mb_field_flag)
4018  top_xy += h->mb_stride &
4019  (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
4020  if (left_mb_field_flag != curr_mb_field_flag)
4021  left_xy[LBOT] += h->mb_stride;
4022  }
4023  }
4024 
4025  h->top_mb_xy = top_xy;
4026  h->left_mb_xy[LTOP] = left_xy[LTOP];
4027  h->left_mb_xy[LBOT] = left_xy[LBOT];
4028  {
4029  /* For sufficiently low qp, filtering wouldn't do anything.
4030  * This is a conservative estimate: could also check beta_offset
4031  * and more accurate chroma_qp. */
4032  int qp_thresh = h->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
4033  int qp = h->cur_pic.qscale_table[mb_xy];
4034  if (qp <= qp_thresh &&
4035  (left_xy[LTOP] < 0 ||
4036  ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
4037  (top_xy < 0 ||
4038  ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
4039  if (!FRAME_MBAFF(h))
4040  return 1;
4041  if ((left_xy[LTOP] < 0 ||
4042  ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
4043  (top_xy < h->mb_stride ||
4044  ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
4045  return 1;
4046  }
4047  }
4048 
4049  top_type = h->cur_pic.mb_type[top_xy];
4050  left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
4051  left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
4052  if (h->deblocking_filter == 2) {
4053  if (h->slice_table[top_xy] != h->slice_num)
4054  top_type = 0;
4055  if (h->slice_table[left_xy[LBOT]] != h->slice_num)
4056  left_type[LTOP] = left_type[LBOT] = 0;
4057  } else {
4058  if (h->slice_table[top_xy] == 0xFFFF)
4059  top_type = 0;
4060  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
4061  left_type[LTOP] = left_type[LBOT] = 0;
4062  }
4063  h->top_type = top_type;
4064  h->left_type[LTOP] = left_type[LTOP];
4065  h->left_type[LBOT] = left_type[LBOT];
4066 
4067  if (IS_INTRA(mb_type))
4068  return 0;
4069 
4070  fill_filter_caches_inter(h, mb_type, top_xy, left_xy,
4071  top_type, left_type, mb_xy, 0);
4072  if (h->list_count == 2)
4073  fill_filter_caches_inter(h, mb_type, top_xy, left_xy,
4074  top_type, left_type, mb_xy, 1);
4075 
4076  nnz = h->non_zero_count[mb_xy];
4077  nnz_cache = h->non_zero_count_cache;
4078  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
4079  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
4080  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
4081  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
4082  h->cbp = h->cbp_table[mb_xy];
4083 
4084  if (top_type) {
4085  nnz = h->non_zero_count[top_xy];
4086  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
4087  }
4088 
4089  if (left_type[LTOP]) {
4090  nnz = h->non_zero_count[left_xy[LTOP]];
4091  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
4092  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
4093  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
4094  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
4095  }
4096 
4097  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
4098  * from what the loop filter needs */
4099  if (!CABAC(h) && h->pps.transform_8x8_mode) {
4100  if (IS_8x8DCT(top_type)) {
4101  nnz_cache[4 + 8 * 0] =
4102  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
4103  nnz_cache[6 + 8 * 0] =
4104  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
4105  }
4106  if (IS_8x8DCT(left_type[LTOP])) {
4107  nnz_cache[3 + 8 * 1] =
4108  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
4109  }
4110  if (IS_8x8DCT(left_type[LBOT])) {
4111  nnz_cache[3 + 8 * 3] =
4112  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
4113  }
4114 
4115  if (IS_8x8DCT(mb_type)) {
4116  nnz_cache[scan8[0]] =
4117  nnz_cache[scan8[1]] =
4118  nnz_cache[scan8[2]] =
4119  nnz_cache[scan8[3]] = (h->cbp & 0x1000) >> 12;
4120 
4121  nnz_cache[scan8[0 + 4]] =
4122  nnz_cache[scan8[1 + 4]] =
4123  nnz_cache[scan8[2 + 4]] =
4124  nnz_cache[scan8[3 + 4]] = (h->cbp & 0x2000) >> 12;
4125 
4126  nnz_cache[scan8[0 + 8]] =
4127  nnz_cache[scan8[1 + 8]] =
4128  nnz_cache[scan8[2 + 8]] =
4129  nnz_cache[scan8[3 + 8]] = (h->cbp & 0x4000) >> 12;
4130 
4131  nnz_cache[scan8[0 + 12]] =
4132  nnz_cache[scan8[1 + 12]] =
4133  nnz_cache[scan8[2 + 12]] =
4134  nnz_cache[scan8[3 + 12]] = (h->cbp & 0x8000) >> 12;
4135  }
4136  }
4137 
4138  return 0;
4139 }
4140 
4141 static void loop_filter(H264Context *h, int start_x, int end_x)
4142 {
4143  uint8_t *dest_y, *dest_cb, *dest_cr;
4144  int linesize, uvlinesize, mb_x, mb_y;
4145  const int end_mb_y = h->mb_y + FRAME_MBAFF(h);
4146  const int old_slice_type = h->slice_type;
4147  const int pixel_shift = h->pixel_shift;
4148  const int block_h = 16 >> h->chroma_y_shift;
4149 
4150  if (h->deblocking_filter) {
4151  for (mb_x = start_x; mb_x < end_x; mb_x++)
4152  for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
4153  int mb_xy, mb_type;
4154  mb_xy = h->mb_xy = mb_x + mb_y * h->mb_stride;
4155  h->slice_num = h->slice_table[mb_xy];
4156  mb_type = h->cur_pic.mb_type[mb_xy];
4157  h->list_count = h->list_counts[mb_xy];
4158 
4159  if (FRAME_MBAFF(h))
4160  h->mb_mbaff =
4161  h->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
4162 
4163  h->mb_x = mb_x;
4164  h->mb_y = mb_y;
4165  dest_y = h->cur_pic.f.data[0] +
4166  ((mb_x << pixel_shift) + mb_y * h->linesize) * 16;
4167  dest_cb = h->cur_pic.f.data[1] +
4168  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
4169  mb_y * h->uvlinesize * block_h;
4170  dest_cr = h->cur_pic.f.data[2] +
4171  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
4172  mb_y * h->uvlinesize * block_h;
4173  // FIXME simplify above
4174 
4175  if (MB_FIELD(h)) {
4176  linesize = h->mb_linesize = h->linesize * 2;
4177  uvlinesize = h->mb_uvlinesize = h->uvlinesize * 2;
4178  if (mb_y & 1) { // FIXME move out of this function?
4179  dest_y -= h->linesize * 15;
4180  dest_cb -= h->uvlinesize * (block_h - 1);
4181  dest_cr -= h->uvlinesize * (block_h - 1);
4182  }
4183  } else {
4184  linesize = h->mb_linesize = h->linesize;
4185  uvlinesize = h->mb_uvlinesize = h->uvlinesize;
4186  }
4187  backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize,
4188  uvlinesize, 0);
4189  if (fill_filter_caches(h, mb_type))
4190  continue;
4191  h->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.qscale_table[mb_xy]);
4192  h->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.qscale_table[mb_xy]);
4193 
4194  if (FRAME_MBAFF(h)) {
4195  ff_h264_filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr,
4196  linesize, uvlinesize);
4197  } else {
4198  ff_h264_filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb,
4199  dest_cr, linesize, uvlinesize);
4200  }
4201  }
4202  }
4203  h->slice_type = old_slice_type;
4204  h->mb_x = end_x;
4205  h->mb_y = end_mb_y - FRAME_MBAFF(h);
4206  h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale);
4207  h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale);
4208 }
4209 
4211 {
4212  const int mb_xy = h->mb_x + h->mb_y * h->mb_stride;
4213  int mb_type = (h->slice_table[mb_xy - 1] == h->slice_num) ?
4214  h->cur_pic.mb_type[mb_xy - 1] :
4215  (h->slice_table[mb_xy - h->mb_stride] == h->slice_num) ?
4216  h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
4217  h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
4218 }
4219 
4220 /**
4221  * Draw edges and report progress for the last MB row.
4222  */
4224 {
4225  int top = 16 * (h->mb_y >> FIELD_PICTURE(h));
4226  int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
4227  int height = 16 << FRAME_MBAFF(h);
4228  int deblock_border = (16 + 4) << FRAME_MBAFF(h);
4229 
4230  if (h->deblocking_filter) {
4231  if ((top + height) >= pic_height)
4232  height += deblock_border;
4233  top -= deblock_border;
4234  }
4235 
4236  if (top >= pic_height || (top + height) < 0)
4237  return;
4238 
4239  height = FFMIN(height, pic_height - top);
4240  if (top < 0) {
4241  height = top + height;
4242  top = 0;
4243  }
4244 
4245  ff_h264_draw_horiz_band(h, top, height);
4246 
4247  if (h->droppable || h->er.error_occurred)
4248  return;
4249 
4250  ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
4252 }
4253 
4254 static void er_add_slice(H264Context *h, int startx, int starty,
4255  int endx, int endy, int status)
4256 {
4258  ERContext *er = &h->er;
4259 
4260  er->ref_count = h->ref_count[0];
4261  ff_er_add_slice(er, startx, starty, endx, endy, status);
4262  }
4263 }
4264 
4265 static int decode_slice(struct AVCodecContext *avctx, void *arg)
4266 {
4267  H264Context *h = *(void **)arg;
4268  int lf_x_start = h->mb_x;
4269 
4270  h->mb_skip_run = -1;
4271 
4272  av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * h->linesize * ((scan8[15] - scan8[0]) >> 3));
4273 
4275  avctx->codec_id != AV_CODEC_ID_H264 ||
4276  (CONFIG_GRAY && (h->flags & CODEC_FLAG_GRAY));
4277 
4279  const int start_i = av_clip(h->resync_mb_x + h->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
4280  if (start_i) {
4281  int prev_status = h->er.error_status_table[h->er.mb_index2xy[start_i - 1]];
4282  prev_status &= ~ VP_START;
4283  if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
4284  h->er.error_occurred = 1;
4285  }
4286  }
4287 
4288  if (h->pps.cabac) {
4289  /* realign */
4290  align_get_bits(&h->gb);
4291 
4292  /* init cabac */
4294  h->gb.buffer + get_bits_count(&h->gb) / 8,
4295  (get_bits_left(&h->gb) + 7) / 8);
4296 
4298 
4299  for (;;) {
4300  // START_TIMER
4301  int ret = ff_h264_decode_mb_cabac(h);
4302  int eos;
4303  // STOP_TIMER("decode_mb_cabac")
4304 
4305  if (ret >= 0)
4307 
4308  // FIXME optimal? or let mb_decode decode 16x32 ?
4309  if (ret >= 0 && FRAME_MBAFF(h)) {
4310  h->mb_y++;
4311 
4312  ret = ff_h264_decode_mb_cabac(h);
4313 
4314  if (ret >= 0)
4316  h->mb_y--;
4317  }
4318  eos = get_cabac_terminate(&h->cabac);
4319 
4320  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
4321  h->cabac.bytestream > h->cabac.bytestream_end + 2) {
4322  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1,
4323  h->mb_y, ER_MB_END);
4324  if (h->mb_x >= lf_x_start)
4325  loop_filter(h, lf_x_start, h->mb_x + 1);
4326  return 0;
4327  }
4328  if (h->cabac.bytestream > h->cabac.bytestream_end + 2 )
4329  av_log(h->avctx, AV_LOG_DEBUG, "bytestream overread %td\n", h->cabac.bytestream_end - h->cabac.bytestream);
4330  if (ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 4) {
4332  "error while decoding MB %d %d, bytestream (%td)\n",
4333  h->mb_x, h->mb_y,
4335  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x,
4336  h->mb_y, ER_MB_ERROR);
4337  return -1;
4338  }
4339 
4340  if (++h->mb_x >= h->mb_width) {
4341  loop_filter(h, lf_x_start, h->mb_x);
4342  h->mb_x = lf_x_start = 0;
4343  decode_finish_row(h);
4344  ++h->mb_y;
4345  if (FIELD_OR_MBAFF_PICTURE(h)) {
4346  ++h->mb_y;
4347  if (FRAME_MBAFF(h) && h->mb_y < h->mb_height)
4349  }
4350  }
4351 
4352  if (eos || h->mb_y >= h->mb_height) {
4353  tprintf(h->avctx, "slice end %d %d\n",
4354  get_bits_count(&h->gb), h->gb.size_in_bits);
4355  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1,
4356  h->mb_y, ER_MB_END);
4357  if (h->mb_x > lf_x_start)
4358  loop_filter(h, lf_x_start, h->mb_x);
4359  return 0;
4360  }
4361  }
4362  } else {
4363  for (;;) {
4364  int ret = ff_h264_decode_mb_cavlc(h);
4365 
4366  if (ret >= 0)
4368 
4369  // FIXME optimal? or let mb_decode decode 16x32 ?
4370  if (ret >= 0 && FRAME_MBAFF(h)) {
4371  h->mb_y++;
4372  ret = ff_h264_decode_mb_cavlc(h);
4373 
4374  if (ret >= 0)
4376  h->mb_y--;
4377  }
4378 
4379  if (ret < 0) {
4381  "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
4382  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x,
4383  h->mb_y, ER_MB_ERROR);
4384  return -1;
4385  }
4386 
4387  if (++h->mb_x >= h->mb_width) {
4388  loop_filter(h, lf_x_start, h->mb_x);
4389  h->mb_x = lf_x_start = 0;
4390  decode_finish_row(h);
4391  ++h->mb_y;
4392  if (FIELD_OR_MBAFF_PICTURE(h)) {
4393  ++h->mb_y;
4394  if (FRAME_MBAFF(h) && h->mb_y < h->mb_height)
4396  }
4397  if (h->mb_y >= h->mb_height) {
4398  tprintf(h->avctx, "slice end %d %d\n",
4399  get_bits_count(&h->gb), h->gb.size_in_bits);
4400 
4401  if ( get_bits_left(&h->gb) == 0
4402  || get_bits_left(&h->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
4404  h->mb_x - 1, h->mb_y,
4405  ER_MB_END);
4406 
4407  return 0;
4408  } else {
4410  h->mb_x, h->mb_y,
4411  ER_MB_END);
4412 
4413  return -1;
4414  }
4415  }
4416  }
4417 
4418  if (get_bits_left(&h->gb) <= 0 && h->mb_skip_run <= 0) {
4419  tprintf(h->avctx, "slice end %d %d\n",
4420  get_bits_count(&h->gb), h->gb.size_in_bits);
4421  if (get_bits_left(&h->gb) == 0) {
4423  h->mb_x - 1, h->mb_y,
4424  ER_MB_END);
4425  if (h->mb_x > lf_x_start)
4426  loop_filter(h, lf_x_start, h->mb_x);
4427 
4428  return 0;
4429  } else {
4430  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x,
4431  h->mb_y, ER_MB_ERROR);
4432 
4433  return -1;
4434  }
4435  }
4436  }
4437  }
4438 }
4439 
4440 /**
4441  * Call decode_slice() for each context.
4442  *
4443  * @param h h264 master context
4444  * @param context_count number of contexts to execute
4445  */
4446 static int execute_decode_slices(H264Context *h, int context_count)
4447 {
4448  AVCodecContext *const avctx = h->avctx;
4449  H264Context *hx;
4450  int i;
4451 
4452  if (h->avctx->hwaccel ||
4454  return 0;
4455  if (context_count == 1) {
4456  return decode_slice(avctx, &h);
4457  } else {
4458  av_assert0(context_count > 0);
4459  for (i = 1; i < context_count; i++) {
4460  hx = h->thread_context[i];
4462  hx->er.error_count = 0;
4463  }
4464  hx->x264_build = h->x264_build;
4465  }
4466 
4467  avctx->execute(avctx, decode_slice, h->thread_context,
4468  NULL, context_count, sizeof(void *));
4469 
4470  /* pull back stuff from slices to master context */
4471  hx = h->thread_context[context_count - 1];
4472  h->mb_x = hx->mb_x;
4473  h->mb_y = hx->mb_y;
4474  h->droppable = hx->droppable;
4477  for (i = 1; i < context_count; i++)
4479  }
4480  }
4481 
4482  return 0;
4483 }
4484 
4485 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
4486  int parse_extradata)
4487 {
4488  AVCodecContext *const avctx = h->avctx;
4489  H264Context *hx; ///< thread context
4490  int buf_index;
4491  int context_count;
4492  int next_avc;
4493  int pass = !(avctx->active_thread_type & FF_THREAD_FRAME);
4494  int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
4495  int nal_index;
4496  int idr_cleared=0;
4497  int first_slice = 0;
4498 
4499  h->nal_unit_type= 0;
4500 
4501  if(!h->slice_context_count)
4502  h->slice_context_count= 1;
4504  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) {
4505  h->current_slice = 0;
4506  if (!h->first_field)
4507  h->cur_pic_ptr = NULL;
4508  ff_h264_reset_sei(h);
4509  }
4510 
4511  if (h->nal_length_size == 4) {
4512  if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
4513  h->is_avc = 0;
4514  }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
4515  h->is_avc = 1;
4516  }
4517 
4518  for (; pass <= 1; pass++) {
4519  buf_index = 0;
4520  context_count = 0;
4521  next_avc = h->is_avc ? 0 : buf_size;
4522  nal_index = 0;
4523  for (;;) {
4524  int consumed;
4525  int dst_length;
4526  int bit_length;
4527  const uint8_t *ptr;
4528  int i, nalsize = 0;
4529  int err;
4530 
4531  if (buf_index >= next_avc) {
4532  if (buf_index >= buf_size - h->nal_length_size)
4533  break;
4534  nalsize = 0;
4535  for (i = 0; i < h->nal_length_size; i++)
4536  nalsize = (nalsize << 8) | buf[buf_index++];
4537  if (nalsize <= 0 || nalsize > buf_size - buf_index) {
4539  "AVC: nal size %d\n", nalsize);
4540  break;
4541  }
4542  next_avc = buf_index + nalsize;
4543  } else {
4544  // start code prefix search
4545  for (; buf_index + 3 < next_avc; buf_index++)
4546  // This should always succeed in the first iteration.
4547  if (buf[buf_index] == 0 &&
4548  buf[buf_index + 1] == 0 &&
4549  buf[buf_index + 2] == 1)
4550  break;
4551 
4552  if (buf_index + 3 >= buf_size) {
4553  buf_index = buf_size;
4554  break;
4555  }
4556 
4557  buf_index += 3;
4558  if (buf_index >= next_avc)
4559  continue;
4560  }
4561 
4562  hx = h->thread_context[context_count];
4563 
4564  ptr = ff_h264_decode_nal(hx, buf + buf_index, &dst_length,
4565  &consumed, next_avc - buf_index);
4566  if (ptr == NULL || dst_length < 0) {
4567  buf_index = -1;
4568  goto end;
4569  }
4570  i = buf_index + consumed;
4571  if ((h->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc &&
4572  buf[i] == 0x00 && buf[i + 1] == 0x00 &&
4573  buf[i + 2] == 0x01 && buf[i + 3] == 0xE0)
4575 
4576  if (!(h->workaround_bugs & FF_BUG_TRUNCATED))
4577  while(dst_length > 0 && ptr[dst_length - 1] == 0)
4578  dst_length--;
4579  bit_length = !dst_length ? 0
4580  : (8 * dst_length -
4581  decode_rbsp_trailing(h, ptr + dst_length - 1));
4582 
4583  if (h->avctx->debug & FF_DEBUG_STARTCODE)
4584  av_log(h->avctx, AV_LOG_DEBUG, "NAL %d/%d at %d/%d length %d pass %d\n", hx->nal_unit_type, hx->nal_ref_idc, buf_index, buf_size, dst_length, pass);
4585 
4586  if (h->is_avc && (nalsize != consumed) && nalsize)
4588  "AVC: Consumed only %d bytes instead of %d\n",
4589  consumed, nalsize);
4590 
4591  buf_index += consumed;
4592  nal_index++;
4593 
4594  if (pass == 0) {
4595  /* packets can sometimes contain multiple PPS/SPS,
4596  * e.g. two PAFF field pictures in one packet, or a demuxer
4597  * which splits NALs strangely if so, when frame threading we
4598  * can't start the next thread until we've read all of them */
4599  switch (hx->nal_unit_type) {
4600  case NAL_SPS:
4601  case NAL_PPS:
4602  nals_needed = nal_index;
4603  break;
4604  case NAL_DPA:
4605  case NAL_IDR_SLICE:
4606  case NAL_SLICE:
4607  init_get_bits(&hx->gb, ptr, bit_length);
4608  if (!get_ue_golomb(&hx->gb) || !first_slice)
4609  nals_needed = nal_index;
4610  if (!first_slice)
4611  first_slice = hx->nal_unit_type;
4612  }
4613  continue;
4614  }
4615 
4616  if (!first_slice)
4617  switch (hx->nal_unit_type) {
4618  case NAL_DPA:
4619  case NAL_IDR_SLICE:
4620  case NAL_SLICE:
4621  first_slice = hx->nal_unit_type;
4622  }
4623 
4624  // FIXME do not discard SEI id
4625  if (avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0)
4626  continue;
4627 
4628 again:
4629  /* Ignore per frame NAL unit type during extradata
4630  * parsing. Decoding slices is not possible in codec init
4631  * with frame-mt */
4632  if (parse_extradata) {
4633  switch (hx->nal_unit_type) {
4634  case NAL_IDR_SLICE:
4635  case NAL_SLICE:
4636  case NAL_DPA:
4637  case NAL_DPB:
4638  case NAL_DPC:
4639  case NAL_AUXILIARY_SLICE:
4640  av_log(h->avctx, AV_LOG_WARNING, "Ignoring NAL %d in global header/extradata\n", hx->nal_unit_type);
4642  }
4643  }
4644 
4645  err = 0;
4646 
4647  switch (hx->nal_unit_type) {
4648  case NAL_IDR_SLICE:
4649  if (first_slice != NAL_IDR_SLICE) {
4651  "Invalid mix of idr and non-idr slices\n");
4652  buf_index = -1;
4653  goto end;
4654  }
4655  if(!idr_cleared)
4656  idr(h); // FIXME ensure we don't lose some frames if there is reordering
4657  idr_cleared = 1;
4658  case NAL_SLICE:
4659  init_get_bits(&hx->gb, ptr, bit_length);
4660  hx->intra_gb_ptr =
4661  hx->inter_gb_ptr = &hx->gb;
4662  hx->data_partitioning = 0;
4663 
4664  if ((err = decode_slice_header(hx, h)))
4665  break;
4666 
4668  h->valid_recovery_point = 1;
4669 
4670  if ( h->sei_recovery_frame_cnt >= 0
4671  && ( h->recovery_frame<0
4672  || ((h->recovery_frame - h->frame_num) & ((1 << h->sps.log2_max_frame_num)-1)) > h->sei_recovery_frame_cnt)) {
4674  (1 << h->sps.log2_max_frame_num);
4675 
4676  if (!h->valid_recovery_point)
4677  h->recovery_frame = h->frame_num;
4678  }
4679 
4680  h->cur_pic_ptr->f.key_frame |=
4681  (hx->nal_unit_type == NAL_IDR_SLICE);
4682 
4683  if (h->recovery_frame == h->frame_num) {
4684  h->cur_pic_ptr->sync |= 1;
4685  h->recovery_frame = -1;
4686  }
4687 
4688  h->sync |= !!h->cur_pic_ptr->f.key_frame;
4689  h->sync |= 3*!!(avctx->flags2 & CODEC_FLAG2_SHOW_ALL);
4690  h->cur_pic_ptr->sync |= h->sync;
4691 
4692  if (h->current_slice == 1) {
4693  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS))
4694  decode_postinit(h, nal_index >= nals_needed);
4695 
4696  if (h->avctx->hwaccel &&
4697  h->avctx->hwaccel->start_frame(h->avctx, NULL, 0) < 0)
4698  return -1;
4702  }
4703 
4704  if (hx->redundant_pic_count == 0 &&
4705  (avctx->skip_frame < AVDISCARD_NONREF ||
4706  hx->nal_ref_idc) &&
4707  (avctx->skip_frame < AVDISCARD_BIDIR ||
4709  (avctx->skip_frame < AVDISCARD_NONKEY ||
4711  avctx->skip_frame < AVDISCARD_ALL) {
4712  if (avctx->hwaccel) {
4713  if (avctx->hwaccel->decode_slice(avctx,
4714  &buf[buf_index - consumed],
4715  consumed) < 0)
4716  return -1;
4717  } else if (CONFIG_H264_VDPAU_DECODER &&
4719  static const uint8_t start_code[] = {
4720  0x00, 0x00, 0x01 };
4721  ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0], start_code,
4722  sizeof(start_code));
4723  ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0], &buf[buf_index - consumed],
4724  consumed);
4725  } else
4726  context_count++;
4727  }
4728  break;
4729  case NAL_DPA:
4730  init_get_bits(&hx->gb, ptr, bit_length);
4731  hx->intra_gb_ptr =
4732  hx->inter_gb_ptr = NULL;
4733 
4734  if ((err = decode_slice_header(hx, h)) < 0)
4735  break;
4736 
4737  hx->data_partitioning = 1;
4738  break;
4739  case NAL_DPB:
4740  init_get_bits(&hx->intra_gb, ptr, bit_length);
4741  hx->intra_gb_ptr = &hx->intra_gb;
4742  break;
4743  case NAL_DPC:
4744  init_get_bits(&hx->inter_gb, ptr, bit_length);
4745  hx->inter_gb_ptr = &hx->inter_gb;
4746 
4747  av_log(h->avctx, AV_LOG_ERROR, "Partitioned H.264 support is incomplete\n");
4748  break;
4749 
4750  if (hx->redundant_pic_count == 0 &&
4751  hx->intra_gb_ptr &&
4752  hx->data_partitioning &&
4753  h->cur_pic_ptr && h->context_initialized &&
4754  (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) &&
4755  (avctx->skip_frame < AVDISCARD_BIDIR ||
4757  (avctx->skip_frame < AVDISCARD_NONKEY ||
4759  avctx->skip_frame < AVDISCARD_ALL)
4760  context_count++;
4761  break;
4762  case NAL_SEI:
4763  init_get_bits(&h->gb, ptr, bit_length);
4764  ff_h264_decode_sei(h);
4765  break;
4766  case NAL_SPS:
4767  init_get_bits(&h->gb, ptr, bit_length);
4768  if (ff_h264_decode_seq_parameter_set(h) < 0 && (h->is_avc ? (nalsize != consumed) && nalsize : 1)) {
4770  "SPS decoding failure, trying again with the complete NAL\n");
4771  if (h->is_avc)
4772  av_assert0(next_avc - buf_index + consumed == nalsize);
4773  if ((next_avc - buf_index + consumed - 1) >= INT_MAX/8)
4774  break;
4775  init_get_bits(&h->gb, &buf[buf_index + 1 - consumed],
4776  8*(next_avc - buf_index + consumed - 1));
4778  }
4779 
4780  break;
4781  case NAL_PPS:
4782  init_get_bits(&h->gb, ptr, bit_length);
4783  ff_h264_decode_picture_parameter_set(h, bit_length);
4784  break;
4785  case NAL_AUD:
4786  case NAL_END_SEQUENCE:
4787  case NAL_END_STREAM:
4788  case NAL_FILLER_DATA:
4789  case NAL_SPS_EXT:
4790  case NAL_AUXILIARY_SLICE:
4791  break;
4792  case NAL_FF_IGNORE:
4793  break;
4794  default:
4795  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
4796  hx->nal_unit_type, bit_length);
4797  }
4798 
4799  if (context_count == h->max_contexts) {
4800  execute_decode_slices(h, context_count);
4801  context_count = 0;
4802  }
4803 
4804  if (err < 0)
4805  av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
4806  else if (err == 1) {
4807  /* Slice could not be decoded in parallel mode, copy down
4808  * NAL unit stuff to context 0 and restart. Note that
4809  * rbsp_buffer is not transferred, but since we no longer
4810  * run in parallel mode this should not be an issue. */
4811  h->nal_unit_type = hx->nal_unit_type;
4812  h->nal_ref_idc = hx->nal_ref_idc;
4813  hx = h;
4814  goto again;
4815  }
4816  }
4817  }
4818  if (context_count)
4819  execute_decode_slices(h, context_count);
4820 
4821 end:
4822  /* clean up */
4823  if (h->cur_pic_ptr && !h->droppable) {
4826  }
4827 
4828  return buf_index;
4829 }
4830 
4831 /**
4832  * Return the number of bytes consumed for building the current frame.
4833  */
4834 static int get_consumed_bytes(int pos, int buf_size)
4835 {
4836  if (pos == 0)
4837  pos = 1; // avoid infinite loops (i doubt that is needed but ...)
4838  if (pos + 10 > buf_size)
4839  pos = buf_size; // oops ;)
4840 
4841  return pos;
4842 }
4843 
4845 {
4846  int i;
4847  int ret = av_frame_ref(dst, src);
4848  if (ret < 0)
4849  return ret;
4850 
4851  if (!h->sps.crop)
4852  return 0;
4853 
4854  for (i = 0; i < 3; i++) {
4855  int hshift = (i > 0) ? h->chroma_x_shift : 0;
4856  int vshift = (i > 0) ? h->chroma_y_shift : 0;
4857  int off = ((h->sps.crop_left >> hshift) << h->pixel_shift) +
4858  (h->sps.crop_top >> vshift) * dst->linesize[i];
4859  dst->data[i] += off;
4860  }
4861  return 0;
4862 }
4863 
4864 static int decode_frame(AVCodecContext *avctx, void *data,
4865  int *got_frame, AVPacket *avpkt)
4866 {
4867  const uint8_t *buf = avpkt->data;
4868  int buf_size = avpkt->size;
4869  H264Context *h = avctx->priv_data;
4870  AVFrame *pict = data;
4871  int buf_index = 0;
4872  Picture *out;
4873  int i, out_idx;
4874  int ret;
4875 
4876  h->flags = avctx->flags;
4877 
4878  /* end of stream, output what is still in the buffers */
4879  if (buf_size == 0) {
4880  out:
4881 
4882  h->cur_pic_ptr = NULL;
4883  h->first_field = 0;
4884 
4885  // FIXME factorize this with the output code below
4886  out = h->delayed_pic[0];
4887  out_idx = 0;
4888  for (i = 1;
4889  h->delayed_pic[i] &&
4890  !h->delayed_pic[i]->f.key_frame &&
4891  !h->delayed_pic[i]->mmco_reset;
4892  i++)
4893  if (h->delayed_pic[i]->poc < out->poc) {
4894  out = h->delayed_pic[i];
4895  out_idx = i;
4896  }
4897 
4898  for (i = out_idx; h->delayed_pic[i]; i++)
4899  h->delayed_pic[i] = h->delayed_pic[i + 1];
4900 
4901  if (out) {
4902  out->reference &= ~DELAYED_PIC_REF;
4903  ret = output_frame(h, pict, &out->f);
4904  if (ret < 0)
4905  return ret;
4906  *got_frame = 1;
4907  }
4908 
4909  return buf_index;
4910  }
4911  if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){
4912  int cnt= buf[5]&0x1f;
4913  const uint8_t *p= buf+6;
4914  while(cnt--){
4915  int nalsize= AV_RB16(p) + 2;
4916  if(nalsize > buf_size - (p-buf) || p[2]!=0x67)
4917  goto not_extra;
4918  p += nalsize;
4919  }
4920  cnt = *(p++);
4921  if(!cnt)
4922  goto not_extra;
4923  while(cnt--){
4924  int nalsize= AV_RB16(p) + 2;
4925  if(nalsize > buf_size - (p-buf) || p[2]!=0x68)
4926  goto not_extra;
4927  p += nalsize;
4928  }
4929 
4930  return ff_h264_decode_extradata(h, buf, buf_size);
4931  }
4932 not_extra:
4933 
4934  buf_index = decode_nal_units(h, buf, buf_size, 0);
4935  if (buf_index < 0)
4936  return -1;
4937 
4938  if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
4939  av_assert0(buf_index <= buf_size);
4940  goto out;
4941  }
4942 
4943  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
4944  if (avctx->skip_frame >= AVDISCARD_NONREF ||
4945  buf_size >= 4 && !memcmp("Q264", buf, 4))
4946  return buf_size;
4947  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
4948  return -1;
4949  }
4950 
4951  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) ||
4952  (h->mb_y >= h->mb_height && h->mb_height)) {
4953  if (avctx->flags2 & CODEC_FLAG2_CHUNKS)
4954  decode_postinit(h, 1);
4955 
4956  field_end(h, 0);
4957 
4958  /* Wait for second field. */
4959  *got_frame = 0;
4960  if (h->next_output_pic && (h->next_output_pic->sync || h->sync>1)) {
4961  ret = output_frame(h, pict, &h->next_output_pic->f);
4962  if (ret < 0)
4963  return ret;
4964  *got_frame = 1;
4965  if (CONFIG_MPEGVIDEO) {
4967  &h->low_delay,
4968  h->mb_width, h->mb_height, h->mb_stride, 1);
4969  }
4970  }
4971  }
4972 
4973  assert(pict->data[0] || !*got_frame);
4974 
4975  return get_consumed_bytes(buf_index, buf_size);
4976 }
4977 
4979 {
4980  int i;
4981 
4982  free_tables(h, 1); // FIXME cleanup init stuff perhaps
4983 
4984  for (i = 0; i < MAX_SPS_COUNT; i++)
4985  av_freep(h->sps_buffers + i);
4986 
4987  for (i = 0; i < MAX_PPS_COUNT; i++)
4988  av_freep(h->pps_buffers + i);
4989 }
4990 
4992 {
4993  H264Context *h = avctx->priv_data;
4994 
4997 
4998  unref_picture(h, &h->cur_pic);
4999 
5000  return 0;
5001 }
5002 
5003 static const AVProfile profiles[] = {
5004  { FF_PROFILE_H264_BASELINE, "Baseline" },
5005  { FF_PROFILE_H264_CONSTRAINED_BASELINE, "Constrained Baseline" },
5006  { FF_PROFILE_H264_MAIN, "Main" },
5007  { FF_PROFILE_H264_EXTENDED, "Extended" },
5008  { FF_PROFILE_H264_HIGH, "High" },
5009  { FF_PROFILE_H264_HIGH_10, "High 10" },
5010  { FF_PROFILE_H264_HIGH_10_INTRA, "High 10 Intra" },
5011  { FF_PROFILE_H264_HIGH_422, "High 4:2:2" },
5012  { FF_PROFILE_H264_HIGH_422_INTRA, "High 4:2:2 Intra" },
5013  { FF_PROFILE_H264_HIGH_444, "High 4:4:4" },
5014  { FF_PROFILE_H264_HIGH_444_PREDICTIVE, "High 4:4:4 Predictive" },
5015  { FF_PROFILE_H264_HIGH_444_INTRA, "High 4:4:4 Intra" },
5016  { FF_PROFILE_H264_CAVLC_444, "CAVLC 4:4:4" },
5017  { FF_PROFILE_UNKNOWN },
5018 };
5019 
5020 static const AVOption h264_options[] = {
5021  {"is_avc", "is avc", offsetof(H264Context, is_avc), FF_OPT_TYPE_INT, {.i64 = 0}, 0, 1, 0},
5022  {"nal_length_size", "nal_length_size", offsetof(H264Context, nal_length_size), FF_OPT_TYPE_INT, {.i64 = 0}, 0, 4, 0},
5023  {NULL}
5024 };
5025 
5026 static const AVClass h264_class = {
5027  .class_name = "H264 Decoder",
5028  .item_name = av_default_item_name,
5029  .option = h264_options,
5030  .version = LIBAVUTIL_VERSION_INT,
5031 };
5032 
5033 static const AVClass h264_vdpau_class = {
5034  .class_name = "H264 VDPAU Decoder",
5035  .item_name = av_default_item_name,
5036  .option = h264_options,
5037  .version = LIBAVUTIL_VERSION_INT,
5038 };
5039 
5041  .name = "h264",
5042  .type = AVMEDIA_TYPE_VIDEO,
5043  .id = AV_CODEC_ID_H264,
5044  .priv_data_size = sizeof(H264Context),
5047  .decode = decode_frame,
5048  .capabilities = /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 |
5051  .flush = flush_dpb,
5052  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
5053  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
5054  .update_thread_context = ONLY_IF_THREADS_ENABLED(decode_update_thread_context),
5055  .profiles = NULL_IF_CONFIG_SMALL(profiles),
5056  .priv_class = &h264_class,
5057 };
5058 
5059 #if CONFIG_H264_VDPAU_DECODER
5060 AVCodec ff_h264_vdpau_decoder = {
5061  .name = "h264_vdpau",
5062  .type = AVMEDIA_TYPE_VIDEO,
5063  .id = AV_CODEC_ID_H264,
5064  .priv_data_size = sizeof(H264Context),
5067  .decode = decode_frame,
5069  .flush = flush_dpb,
5070  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
5071  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
5072  AV_PIX_FMT_NONE},
5073  .profiles = NULL_IF_CONFIG_SMALL(profiles),
5074  .priv_class = &h264_vdpau_class,
5075 };
5076 #endif
int chroma_format_idc
Definition: h264.h:154
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free
Definition: mpegvideo.h:190
Picture default_ref_list[2][32]
base reference list for all slices of a coded picture
Definition: h264.h:526
#define PICT_BOTTOM_FIELD
Definition: mpegvideo.h:663
void ff_h264_direct_dist_scale_factor(H264Context *const h)
Definition: h264_direct.c:51
GetBitContext inter_gb
Definition: h264.h:418
#define XCHG(a, b, xchg)
int video_signal_type_present_flag
Definition: h264.h:179
#define VERT_PRED8x8
Definition: h264pred.h:70
int last_slice_type
Definition: h264.h:576
int ff_h264_decode_mb_cabac(H264Context *h)
Decode a CABAC coded macroblock.
Definition: h264_cabac.c:1875
static void clone_tables(H264Context *dst, H264Context *src, int i)
Mimic alloc_tables(), but for every context thread.
Definition: h264.c:1299
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
#define FF_PROFILE_H264_HIGH_10
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread.c:959
const struct AVCodec * codec
int ff_h264_decode_seq_parameter_set(H264Context *h)
Decode SPS.
Definition: h264_ps.c:328
#define PICT_TOP_FIELD
Definition: mpegvideo.h:662
discard all frames except keyframes
uint8_t * edge_emu_buffer
Definition: h264.h:648
int8_t * ref_index[2]
Definition: mpegvideo.h:114
#define CONFIG_SVQ3_DECODER
Definition: config.h:587
int workaround_bugs
Definition: h264.h:294
av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
Definition: dsputil.c:2675
float v
#define MAX_PICTURE_COUNT
Definition: mpegvideo.h:66
unsigned int top_samples_available
Definition: h264.h:320
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, int parse_extradata)
Definition: h264.c:4485
unsigned int topleft_samples_available
Definition: h264.h:319
#define DC_128_PRED8x8
Definition: h264pred.h:76
int single_decode_warning
1 if the single thread fallback warning has already been displayed, 0 otherwise.
Definition: h264.h:572
GetBitContext gb
Definition: h264.h:268
#define CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
#define AV_NUM_DATA_POINTERS
Definition: frame.h:77
5: top field, bottom field, top field repeated, in that order
Definition: h264.h:142
static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, int mb_type, int is_h264, int simple, int transform_bypass, int pixel_shift, int *block_offset, int linesize, uint8_t *dest_y, int p)
Definition: h264.c:2272
#define VERT_LEFT_PRED
Definition: h264pred.h:45
static int shift(int a, int b)
Definition: sonic.c:86
int low_delay
Definition: h264.h:290
int mb_num
Definition: h264.h:467
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
GetBitContext * intra_gb_ptr
Definition: h264.h:419
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1778
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
int mb_aff_frame
Definition: h264.h:379
FIXME Range Coding of cr are ref
Definition: snow.txt:367
void(* pred8x8l_add[2])(uint8_t *pix, int16_t *block, ptrdiff_t stride)
Definition: h264pred.h:102
AVOption.
Definition: opt.h:251
static void copy_parameter_set(void **to, void **from, int count, int size)
Definition: h264.c:1559
static const AVClass h264_class
Definition: h264.c:5026
int delta_poc[2]
Definition: h264.h:506
#define IS_SUB_4X4(a)
Definition: mpegvideo.h:152
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:287
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:175
int last_qscale_diff
Definition: h264.h:440
#define CHROMA444(h)
Definition: h264.h:92
#define LEFT_MBS
Definition: h264.h:68
mpeg2/4, h264 default
int coded_width
Bitstream width / height, may be different from width/height e.g.
int cbp
Definition: h264.h:435
3: top field, bottom field, in that order
Definition: h264.h:140
#define FF_PROFILE_H264_HIGH_444
av_default_item_name
const uint8_t * ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length)
Decode a network abstraction layer unit.
Definition: h264.c:514
const char * fmt
Definition: avisynth_c.h:669
int first_field
Definition: h264.h:383
uint8_t field_scan8x8_q0[64]
Definition: h264.h:456
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:73
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
#define ER_MB_END
AVFrame * f
Definition: thread.h:36
int weighted_bipred_idc
Definition: h264.h:221
int sync
did we had a keyframe or recovery point
Definition: h264.h:643
int chroma_qp_index_offset[2]
Definition: h264.h:224
const uint8_t * bytestream_end
Definition: cabac.h:48
int left_type[LEFT_MBS]
Definition: h264.h:311
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:343
if max(w)>1 w=0.9 *w/max(w)
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
H264ChromaContext h264chroma
Definition: h264.h:264
uint16_t * cbp_table
Definition: h264.h:434
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:1455
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264.h:631
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:154
MMCO mmco[MAX_MMCO_COUNT]
memory management control operations buffer.
Definition: h264.h:538
7: frame doubling
Definition: h264.h:144
void ff_er_frame_end(ERContext *s)
static av_always_inline void mc_part_weighted(H264Context *h, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, h264_weight_func luma_weight_op, h264_weight_func chroma_weight_op, h264_biweight_func luma_weight_avg, h264_biweight_func chroma_weight_avg, int list0, int list1, int pixel_shift, int chroma_idc)
Definition: h264.c:969
#define MAX_PPS_COUNT
Definition: h264.h:43
Sequence parameter set.
Definition: h264.h:151
enum AVColorRange color_range
MPEG vs JPEG YUV range.
static void init_dequant_tables(H264Context *h)
Definition: h264.c:1218
int coded_picture_number
Definition: h264.h:289
int mb_y
Definition: h264.h:461
int bitstream_restriction_flag
Definition: h264.h:190
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:219
#define FMO
Definition: h264.h:55
int num
numerator
Definition: rational.h:44
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:265
int needs_realloc
Picture needs to be reallocated (eg due to a frame size change)
Definition: mpegvideo.h:176
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
static void unref_picture(H264Context *h, Picture *pic)
Definition: h264.c:191
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:295
#define VP_START
< current MB is the first after a resync marker
AVBufferPool * mb_type_pool
Definition: h264.h:654
int outputed_poc
Definition: h264.h:532
int chroma_x_shift
Definition: h264.h:284
HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the b...
Definition: pixfmt.h:126
#define AV_EF_AGGRESSIVE
const uint8_t * buffer
Definition: get_bits.h:55
Picture parameter set.
Definition: h264.h:213
int field_picture
whether or not the picture was encoded in separate fields
Definition: mpegvideo.h:169
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
#define pass
Definition: fft.c:335
static void release_unused_pictures(H264Context *h, int remove_current)
Definition: h264.c:212
int flags
Definition: h264.h:293
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int frame_mbs_only_flag
Definition: h264.h:167
int mb_height
Definition: h264.h:465
int16_t * dc_val_base
Definition: h264.h:649
#define CONFIG_SMALL
Definition: config.h:391
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:55
int is_avc
Used to parse AVC variant of h264.
Definition: h264.h:488
static av_always_inline void fill_filter_caches_inter(H264Context *h, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264.c:3912
int mmco_index
Definition: h264.h:539
AVBufferPool * ref_index_pool
Definition: h264.h:656
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional FF_INPUT_BUFFER_PADDING_SIZE at the end w...
static int decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264.c:1596
uint8_t zigzag_scan8x8_cavlc[64]
Definition: h264.h:448
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:282
DSPContext dsp
Definition: h264.h:269
mpegvideo header.
int ff_h264_get_profile(SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264.c:2897
#define FF_ARRAY_ELEMS(a)
uint32_t dequant8_buffer[6][QP_MAX_NUM+1][64]
Definition: h264.h:368
void ff_h264_decode_init_vlc(void)
Definition: h264_cavlc.c:326
H264Context.
Definition: h264.h:260
static int context_init(H264Context *h)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264.c:1326
int mmco_reset
h264 MMCO_RESET set this 1. Reordering code must not mix pictures before and after MMCO_RESET...
Definition: mpegvideo.h:162
#define IS_INTRA4x4(a)
Definition: mpegvideo.h:135
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:69
int prev_poc_msb
poc_msb of the last reference pic for POC type 0
Definition: h264.h:508
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
uint32_t num_units_in_tick
Definition: h264.h:186
struct H264Context H264Context
H264Context.
#define FF_PROFILE_H264_MAIN
4: bottom field, top field, in that order
Definition: h264.h:141
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
static int get_lowest_part_list_y(H264Context *h, Picture *pic, int n, int height, int y_offset, int list)
Definition: h264.c:645
#define HOR_PRED8x8
Definition: h264pred.h:69
int stride
Definition: mace.c:144
int frame_start_found
Definition: parser.h:34
int picture_structure
Definition: h264.h:382
#define AV_WN32A(p, v)
Definition: intreadwrite.h:530
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264.h:375
#define AV_COPY32(d, s)
Definition: intreadwrite.h:578
static const uint8_t rem6[QP_MAX_NUM+1]
Definition: h264.c:56
#define IS_INTRA_PCM(a)
Definition: mpegvideo.h:141
int profile_idc
Definition: h264.h:152
unsigned current_sps_id
id of the current SPS
Definition: h264.h:359
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, &#39;draw_horiz_band&#39; is called by the libavcodec decoder to draw a horizontal band...
static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int height, int delta, int list, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int src_x_offset, int src_y_offset, qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op, int pixel_shift, int chroma_idc)
Definition: h264.c:809
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264.h:828
output residual component w
int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
Definition: h264.c:106
int mb_skip_run
Definition: h264.h:464
void ff_h264_init_cabac_states(H264Context *h)
Definition: h264_cabac.c:1260
#define FFALIGN(x, a)
Definition: common.h:63
#define tf
Definition: regdef.h:73
#define USES_LIST(a, list)
does this mb use listX, note does not work if subMBs
Definition: mpegvideo.h:156
void ff_vdpau_h264_picture_complete(H264Context *h)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
Switching Intra.
Definition: avutil.h:220
uint8_t * chroma_pred_mode_table
Definition: h264.h:439
#define IS_DIR(a, part, list)
Definition: mpegvideo.h:155
static const uint8_t div6[QP_MAX_NUM+1]
Definition: h264.c:64
initialize output if(nPeaks >3)%at least 3 peaks in spectrum for trying to find f0 nf0peaks
void(* pred16x16_add[3])(uint8_t *pix, const int *block_offset, int16_t *block, ptrdiff_t stride)
Definition: h264pred.h:107
enum AVDiscard skip_frame
Skip decoding for selected frames.
int ff_h264_decode_ref_pic_list_reordering(H264Context *h)
Definition: h264_refs.c:205
static const uint8_t golomb_to_pict_type[5]
Definition: h264data.h:38
#define AV_RN32A(p)
Definition: intreadwrite.h:518
int ref_poc[2][2][32]
h264 POCs of the frames/fields used as reference (FIXME need per slice)
Definition: mpegvideo.h:166
struct AVHWAccel * hwaccel
Hardware accelerator in use.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:55
unsigned int crop_top
frame_cropping_rect_top_offset
Definition: h264.h:175
int long_ref
1->long term reference 0->short term reference
Definition: mpegvideo.h:165
static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type, int is_h264, int simple, int transform_bypass, int pixel_shift, int *block_offset, int linesize, uint8_t *dest_y, int p)
Definition: h264.c:2387
static int decode_init_thread_copy(AVCodecContext *avctx)
Definition: h264.c:1574
int resync_mb_y
Definition: h264.h:463
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define IS_8x8DCT(a)
Definition: h264.h:99
uint8_t scaling_matrix4[6][16]
Definition: h264.h:229
const uint8_t * bytestream
Definition: cabac.h:47
int ref2frm[MAX_SLICES][2][64]
reference to frame number lists, used in the loop filter, the first 2 are for -2,-1 ...
Definition: h264.h:414
int deblocking_filter_parameters_present
deblocking_filter_parameters_present_flag
Definition: h264.h:225
#define IS_INTER(a)
Definition: mpegvideo.h:139
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
uint32_t(*[6] dequant4_coeff)[16]
Definition: h264.h:369
#define CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries...
uint8_t
#define av_cold
Definition: attributes.h:78
int ref_count[2][2]
number of entries in ref_poc (FIXME need per slice)
Definition: mpegvideo.h:167
int prev_frame_num_offset
for POC type 2
Definition: h264.h:511
int use_weight
Definition: h264.h:388
int mb_uvlinesize
Definition: h264.h:357
#define mb
int full_range
Definition: h264.h:180
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:63
unsigned int crop_left
frame_cropping_rect_left_offset
Definition: h264.h:173
#define IS_8X16(a)
Definition: mpegvideo.h:147
int offset_for_non_ref_pic
Definition: h264.h:160
float delta
mode
Definition: f_perms.c:27
#define PICT_FRAME
Definition: mpegvideo.h:664
AVOptions.
void ff_h264_reset_sei(H264Context *h)
Reset SEI values at the beginning of the frame.
Definition: h264_sei.c:40
int gaps_in_frame_num_allowed_flag
Definition: h264.h:164
int data_partitioning
Definition: h264.h:288
int luma_weight[48][2][2]
Definition: h264.h:393
int bit_depth_chroma
bit_depth_chroma_minus8 + 8
Definition: h264.h:204
#define FF_PROFILE_H264_EXTENDED
enum AVColorPrimaries color_primaries
Definition: h264.h:182
#define AV_RB32
static int find_unused_picture(H264Context *h)
Definition: h264.c:403
AVCodec ff_h264_decoder
Definition: h264.c:5040
end end
Picture ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264.h:411
#define CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
#define ER_MB_ERROR
int cabac
entropy_coding_mode_flag
Definition: h264.h:215
int mb_xy
Definition: h264.h:468
qpel_mc_func(* qpel_put)[16]
Definition: mpegvideo.h:228
#define LUMA_DC_BLOCK_INDEX
Definition: h264.h:808
#define DIAG_DOWN_LEFT_PRED
Definition: h264pred.h:41
#define FF_PROFILE_UNKNOWN
#define emms_c()
static const uint8_t dequant8_coeff_init[6][6]
Definition: h264data.h:263
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:408
const char * from
unsigned int crop_right
frame_cropping_rect_right_offset
Definition: h264.h:174
#define FF_PROFILE_H264_CONSTRAINED
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
unsigned int last_ref_count[2]
Definition: h264.h:577
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:294
void ff_h264_fill_mbaff_ref_list(H264Context *h)
Definition: h264_refs.c:327
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define FF_PROFILE_H264_HIGH_444_INTRA
void(* h264_idct_add16intra)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:98
int uvlinesize
Definition: h264.h:283
#define TOP_DC_PRED
Definition: h264pred.h:50
int height
Definition: h264.h:282
int mb_x
Definition: h264.h:461
int transform_bypass
qpprime_y_zero_transform_bypass_flag
Definition: h264.h:155
uint8_t * data
static int init_poc(H264Context *h)
Definition: h264.c:2690
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:193
static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
Identify the exact end of the bitstream.
Definition: h264.c:630
void(* h264_idct_add)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:80
int left_mb_xy[LEFT_MBS]
Definition: h264.h:306
int top_mb_xy
Definition: h264.h:304
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
Definition: pixfmt.h:81
#define ER_MV_END
int ff_h264_get_slice_type(const H264Context *h)
Reconstruct bitstream slice_type.
Definition: h264.c:3894
int redundant_pic_cnt_present
redundant_pic_cnt_present_flag
Definition: h264.h:227
int chroma_y_shift
Definition: h264.h:284
AVBufferRef * mb_type_buf
Definition: mpegvideo.h:107
static const uint8_t dequant8_coeff_init_scan[16]
Definition: h264data.h:259
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:270
#define MAX_DELAYED_PIC_COUNT
Definition: h264.h:47
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread.c:1040
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:489
#define FF_PROFILE_H264_HIGH_422_INTRA
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:283
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
void(* h264_idct8_dc_add)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:86
Picture * next_output_pic
Definition: h264.h:531
#define AV_COPY64(d, s)
Definition: intreadwrite.h:582
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
int luma_log2_weight_denom
Definition: h264.h:390
#define IS_INTERLACED(a)
Definition: mpegvideo.h:142
qpel_mc_func avg_h264_qpel_pixels_tab[4][16]
Definition: h264qpel.h:29
Definition: h264.h:112
void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:38
static int h264_set_parameter_from_sps(H264Context *h)
Definition: h264.c:2917
int chroma_weight[48][2][2][2]
Definition: h264.h:394
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
Definition: h264.c:2972
AVCodecContext * owner
Definition: thread.h:37
int last_pocs[MAX_DELAYED_PIC_COUNT]
Definition: h264.h:530
#define AVCOL_SPC_YCGCO
void(* qpel_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
Definition: dsputil.h:84
static void init_dequant4_coeff_table(H264Context *h)
Definition: h264.c:1192
static int pic_is_unused(H264Context *h, Picture *pic)
Definition: h264.c:394
int width
Definition: h264.h:282
H.264 / AVC / MPEG4 part10 codec.
Discrete Time axis x
ThreadFrame tf
Definition: mpegvideo.h:99
#define U(x)
int frame_num
Definition: h264.h:507
static const uint8_t field_scan8x8_cavlc[64+1]
Definition: h264data.h:134
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:557
enum AVCodecID id
int mb_aff
mb_adaptive_frame_field_flag
Definition: h264.h:168
enum AVColorTransferCharacteristic color_trc
Definition: h264.h:183
H264PredContext hpc
Definition: h264.h:318
static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src)
Definition: h264.c:4844
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:473
int has_b_frames
Size of the frame reordering buffer in the decoder.
#define td
Definition: regdef.h:70
static enum AVPixelFormat h264_hwaccel_pixfmt_list_jpeg_420[]
Definition: h264.c:89
static int get_ue_golomb(GetBitContext *gb)
read unsigned exp golomb code.
Definition: golomb.h:53
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:75
static int decode_slice_header(H264Context *h, H264Context *h0)
Decode a slice header.
Definition: h264.c:3201
static int get_consumed_bytes(int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
Definition: h264.c:4834
MotionEstContext me
Definition: h264.h:266
int poc_type
pic_order_cnt_type
Definition: h264.h:157
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
int context_initialized
Definition: h264.h:292
Multithreading support functions.
static const uint16_t mask[17]
Definition: lzw.c:37
#define AV_EF_EXPLODE
void ff_h264_hl_decode_mb(H264Context *h)
Definition: h264.c:2469
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
ParseContext parse_context
Definition: h264.h:267
int nal_unit_type
Definition: h264.h:481
int use_weight_chroma
Definition: h264.h:389
int num_reorder_frames
Definition: h264.h:191
#define AV_RB16
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
discard all bidirectional frames
void ff_h264_direct_ref_list_init(H264Context *const h)
Definition: h264_direct.c:103
#define DC_128_PRED
Definition: h264pred.h:51
#define LEFT_DC_PRED
Definition: h264pred.h:49
static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale)
Get the chroma qp.
Definition: h264.h:849
GetBitContext * inter_gb_ptr
Definition: h264.h:420
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:1806
#define ALZHEIMER_DC_L0T_PRED8x8
Definition: h264pred.h:79
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Picture * delayed_pic[MAX_DELAYED_PIC_COUNT+2]
Definition: h264.h:529
#define IS_SUB_8X4(a)
Definition: mpegvideo.h:150
#define MB_FIELD(h)
Definition: h264.h:65
int active_thread_type
Which multithreading methods are in use by the codec.
int mb_field_decoding_flag
Definition: h264.h:380
int reference
Definition: mpegvideo.h:178
Spectrum Plot time data
const char * r
Definition: vf_curves.c:94
static void flush_dpb(AVCodecContext *avctx)
Definition: h264.c:2661
static int h264_slice_header_init(H264Context *, int)
Definition: h264.c:3089
int capabilities
Codec capabilities.
static const AVOption h264_options[]
Definition: h264.c:5020
PPS pps
current pps
Definition: h264.h:365
#define FF_BUG_TRUNCATED
const char * arg
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:441
int prev_interlaced_frame
Complement sei_pic_struct SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced...
Definition: h264.h:591
int flags
CODEC_FLAG_*.
struct Picture * next_pic
int direct_spatial_mv_pred
Definition: h264.h:397
static int h264_frame_start(H264Context *h)
Definition: h264.c:1795
#define FF_BUG_AUTODETECT
autodetection
0: frame
Definition: h264.h:137
simple assert() macros that are a bit more flexible than ISO C assert().
void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:37
int weighted_pred
weighted_pred_flag
Definition: h264.h:220
int overread_index
the index into ParseContext.buffer of the overread bytes
Definition: parser.h:36
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
const char * name
Name of the codec implementation.
#define T(x)
H264QpelContext h264qpel
Definition: h264.h:265
ERContext er
Definition: h264.h:270
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:281
static void predict_field_decoding_flag(H264Context *h)
Definition: h264.c:4210
#define CABAC(h)
Definition: h264.h:87
#define IS_INTRA(a)
Definition: mpegvideo.h:138
void ff_init_cabac_states(void)
Definition: cabac.c:140
int ff_h264_decode_mb_cavlc(H264Context *h)
Decode a macroblock.
Definition: h264_cavlc.c:699
static int square(int x)
Definition: roqvideoenc.c:111
int valid_recovery_point
Are the SEI recovery points looking valid.
Definition: h264.h:629
static const uint8_t offset[127][2]
Definition: vf_spp.c:70
static const uint8_t field_scan[16+1]
Definition: h264data.h:62
uint8_t * list_counts
Array of list_count per MB specifying the slice type.
Definition: h264.h:410
static void er_add_slice(H264Context *h, int startx, int starty, int endx, int endy, int status)
Definition: h264.c:4254
#define FFMAX(a, b)
Definition: common.h:56
external API header
qpel_mc_func put_h264_qpel_pixels_tab[4][16]
Definition: h264qpel.h:28
int delta_pic_order_always_zero_flag
Definition: h264.h:159
#define CODEC_FLAG_LOW_DELAY
Force low delay.
uint8_t * mbintra_table
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:257
int new
flag to keep track if the decoder context needs re-init due to changed SPS
Definition: h264.h:207
int * mb_index2xy
int offset_for_top_to_bottom_field
Definition: h264.h:161
#define IN_RANGE(a, b, size)
Definition: h264.c:1537
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264.h:84
int size
int priv_data_size
Size of HW accelerator private data.
void(* h264_add_pixels8_clear)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:106
uint8_t zigzag_scan8x8[64]
Definition: h264.h:447
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:72
static const uint8_t scan8[16 *3+3]
Definition: h264.h:812
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
the normal 2^n-1 "JPEG" YUV ranges
int crop
frame_cropping_flag
Definition: h264.h:170
uint8_t * error_status_table
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:36
void(* h264_idct_add16)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:89
uint8_t * direct_table
Definition: h264.h:443
AVBufferRef * hwaccel_priv_buf
Definition: mpegvideo.h:128
#define CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
static av_always_inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg, int chroma444, int simple, int pixel_shift)
Definition: h264.c:2166
uint8_t scaling_matrix8[6][64]
Definition: h264.h:230
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
static void copy_picture_range(Picture **to, Picture **from, int count, H264Context *new_base, H264Context *old_base)
Definition: h264.c:1544
#define FF_DEBUG_STARTCODE
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264.h:489
void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:187
useful rectangle filling function
the normal 219*2^(n-8) "MPEG" YUV ranges
#define AV_LOG_VERBOSE
Definition: log.h:157
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:277
int refs
number of reference frames
#define MAX_THREADS
CABACContext cabac
Cabac.
Definition: h264.h:430
unsigned int left_samples_available
Definition: h264.h:322
#define IS_8X8(a)
Definition: mpegvideo.h:148
AVBufferRef * motion_val_buf[2]
Definition: mpegvideo.h:104
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: h264.c:112
int ref_frame_count
num_ref_frames
Definition: h264.h:163
Picture * long_ref[32]
Definition: h264.h:528
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
#define CONFIG_MPEGVIDEO
Definition: config.h:416
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
void ff_h264_filter_mb(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
struct Picture * last_pic
int frame_num_offset
for POC type 2
Definition: h264.h:510
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int x264_build
Definition: h264.h:459
uint32_t * mb2br_xy
Definition: h264.h:353
uint8_t * er_temp_buffer
int overread
the number of bytes which where irreversibly read from the next frame
Definition: parser.h:35
#define FFMIN(a, b)
Definition: common.h:58
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
Definition: h264.h:373
int last_index
Definition: parser.h:31
uint8_t field_scan8x8_cavlc[64]
Definition: h264.h:451
int poc_cycle_length
num_ref_frames_in_pic_order_cnt_cycle
Definition: h264.h:162
int colour_description_present_flag
Definition: h264.h:181
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:80
int poc
h264 frame POC
Definition: mpegvideo.h:160
AVRational sar
Definition: h264.h:178
int redundant_pic_count
Definition: h264.h:524
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
#define FIELD_PICTURE(h)
Definition: h264.h:67
ret
Definition: avfilter.c:821
static const AVClass h264_vdpau_class
Definition: h264.c:5033
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
int width
picture width / height.
static const uint8_t field_scan8x8[64+1]
Definition: h264data.h:115
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:105
void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:152
int long_ref_count
number of actual long term references
Definition: h264.h:542
#define ER_DC_END
Picture.
Definition: mpegvideo.h:97
#define FF_PROFILE_H264_HIGH_10_INTRA
int cabac_init_idc
Definition: h264.h:545
static void implicit_weight_table(H264Context *h, int field)
Initialize implicit_weight table.
Definition: h264.c:2555
void * hwaccel_picture_private
hardware accelerator private data
Definition: mpegvideo.h:132
static av_always_inline int dctcoef_get(int16_t *mb, int high_bit_depth, int index)
Definition: h264.c:2254
int size_in_bits
Definition: get_bits.h:57
SPS sps
current sps
Definition: h264.h:360
int32_t
PPS * pps_buffers[MAX_PPS_COUNT]
Definition: h264.h:496
static av_always_inline void prefetch_motion(H264Context *h, int list, int pixel_shift, int chroma_idc)
Definition: h264.c:1076
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
DSPContext * dsp
#define MAX_SPS_COUNT
Definition: h264.h:42
Definition: h264.h:113
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
Decode PPS.
Definition: h264_ps.c:567
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y)
Definition: h264chroma.h:24
#define FFABS(a)
Definition: common.h:53
Context Adaptive Binary Arithmetic Coder inline functions.
int init_qp
pic_init_qp_minus26 + 26
Definition: h264.h:222
int frame_num
h264 frame_num (raw frame_num from slice header)
Definition: mpegvideo.h:161
int mmco_reset
Definition: h264.h:540
int direct_8x8_inference_flag
Definition: h264.h:169
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:316
#define CONFIG_GRAY
Definition: config.h:376
uint8_t * bipred_scratchpad
Definition: h264.h:639
float u
int poc_lsb
Definition: h264.h:503
int max_pic_num
max_frame_num or 2 * max_frame_num for field pics.
Definition: h264.h:522
void(* pred4x4_add[2])(uint8_t *pix, int16_t *block, ptrdiff_t stride)
Definition: h264pred.h:100
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice)
Definition: h264_refs.c:531
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264.c:4265
unsigned int topright_samples_available
Definition: h264.h:321
#define AV_WN16A(p, v)
Definition: intreadwrite.h:526
int curr_pic_num
frame_num for frames or 2 * frame_num + 1 for field pics.
Definition: h264.h:517
int slice_type
Definition: h264.h:374
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264.c:2770
static int av_unused get_cabac_terminate(CABACContext *c)
int top_type
Definition: h264.h:309
#define MB_MBAFF(h)
Definition: h264.h:64
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:278
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:297
static void loop_filter(H264Context *h, int start_x, int end_x)
Definition: h264.c:4141
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
LIBAVUTIL_VERSION_INT
Definition: eval.c:55
uint32_t dequant4_buffer[6][QP_MAX_NUM+1][16]
Definition: h264.h:367
void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:63
#define PART_NOT_AVAILABLE
Definition: h264.h:339
unsigned int list_count
Definition: h264.h:409
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
#define IS_16X8(a)
Definition: mpegvideo.h:146
static void flush(AVCodecContext *avctx)
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG2 field pics)
void(* h264_idct8_add)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:82
GetBitContext intra_gb
Definition: h264.h:417
int dequant_coeff_pps
reinit tables when pps changes
Definition: h264.h:498
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:85
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int pic_order_present
pic_order_present_flag
Definition: h264.h:216
Picture cur_pic
Definition: h264.h:274
SPS * sps_buffers[MAX_SPS_COUNT]
Definition: h264.h:495
static const int8_t mv[256][2]
uint8_t zigzag_scan_q0[16]
Definition: h264.h:452
struct H264Context * thread_context[MAX_THREADS]
Definition: h264.h:551
AVBufferRef * progress
Definition: thread.h:40
int chroma_log2_weight_denom
Definition: h264.h:391
int bit_depth_luma
luma bit depth from sps to detect changes
Definition: h264.h:492
static void flush_change(H264Context *h)
Definition: h264.c:2632
for k
short offset_for_ref_frame[256]
Definition: h264.h:189
int chroma_format_idc
chroma format from sps to detect changes
Definition: h264.h:493
Definition: h264.h:110
VideoDSPContext vdsp
Definition: h264.h:262
int timing_info_present_flag
Definition: h264.h:185
NULL
Definition: eval.c:55
AVBufferRef * qscale_table_buf
Definition: mpegvideo.h:101
static void decode_finish_row(H264Context *h)
Draw edges and report progress for the last MB row.
Definition: h264.c:4223
struct Picture * cur_pic
int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:563
int mbaff
h264 1 -> MBAFF frame 0-> not MBAFF
Definition: mpegvideo.h:168
void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table, int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegvideo.c:1878
static int width
Definition: tests/utils.c:158
int coded_picture_number
picture number in bitstream order
Definition: frame.h:176
int mb_stride
Definition: h264.h:466
AVCodecContext * avctx
Definition: h264.h:261
uint8_t zigzag_scan8x8_q0[64]
Definition: h264.h:453
int sync
has been decoded after a keyframe
Definition: mpegvideo.h:170
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:285
AVS_Value src
Definition: avisynth_c.h:523
H264 / AVC / MPEG4 part10 codec data table
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
typedef void(RENAME(mix_any_func_type))
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an so the codec calls ff_thread_report set AVCodecInternal allocate_progress The frames must then be freed with ff_thread_release_buffer().Otherwise leave it at zero and decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
int slice_alpha_c0_offset
Definition: h264.h:474
1: top field
Definition: h264.h:138
enum AVCodecID codec_id
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:100
AVHWAccel.
void ff_h264_remove_all_refs(H264Context *h)
Definition: h264_refs.c:463
static const uint8_t zigzag_scan[16+1]
Definition: h264data.h:55
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:512
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
static void get_lowest_part_y(H264Context *h, int refs[2][48], int n, int height, int y_offset, int list0, int list1, int *nrefs)
Definition: h264.c:658
void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
Definition: vdpau.c:160
int next_outputed_poc
Definition: h264.h:533
#define LTOP
Definition: h264.h:69
int ff_h264_decode_sei(H264Context *h)
Decode SEI.
Definition: h264_sei.c:200
void avpriv_color_frame(AVFrame *frame, const int color[4])
int poc_msb
Definition: h264.h:504
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
#define FF_PROFILE_H264_HIGH_444_PREDICTIVE
int implicit_weight[48][48][2]
Definition: h264.h:395
int max_contexts
Max number of threads / contexts.
Definition: h264.h:564
int recovery_frame
recovery_frame is the frame_num at which the next frame should be fully constructed.
Definition: h264.h:624
main external API structure.
static const uint8_t zigzag_scan8x8_cavlc[64+1]
Definition: h264data.h:96
Definition: h264.h:108
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:375
uint8_t * visualization_buffer[3]
temporary buffer vor MV visualization
Definition: h264.h:651
int ff_h264_check_intra4x4_pred_mode(H264Context *h)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:426
uint8_t * data
The data buffer.
Definition: buffer.h:89
static void decode_postinit(H264Context *h, int setup_finished)
Run setup operations that must be run after slice header decoding.
Definition: h264.c:1891
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:1235
2: bottom field
Definition: h264.h:139
#define QP_MAX_NUM
Definition: h264.h:101
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
int resync_mb_x
Definition: h264.h:462
static void init_dequant8_coeff_table(H264Context *h)
Definition: h264.c:1165
void * buf
Definition: avisynth_c.h:594
uint32_t state
contains the last few bytes in MSB order
Definition: parser.h:33
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
AVBuffer * buffer
Definition: buffer.h:82
H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstre...
Definition: pixfmt.h:106
int constraint_set_flags
constraint_set[0-3]_flag
Definition: h264.h:206
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:273
SEI_PicStructType sei_pic_struct
pic_struct in picture timing SEI message
Definition: h264.h:583
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
Picture * short_ref[32]
Definition: h264.h:527
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:279
void(* h264_idct8_add4)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:92
#define FF_THREAD_FRAME
Decode more than one frame at once.
void ff_vdpau_h264_set_reference_frames(H264Context *h)
Definition: vdpau.c:92
double value
Definition: eval.c:82
int slice_flags
slice flags
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
void avcodec_get_frame_defaults(AVFrame *frame)
Set the fields of the given AVFrame to default values.
Switching Predicted.
Definition: avutil.h:221
Describe the class of an AVClass context structure.
Definition: log.h:50
int slice_beta_offset
Definition: h264.h:475
static void idct_add(uint8_t *dest, int line_size, int16_t *block)
Definition: dsputil_sh4.c:74
#define CHROMA422(h)
Definition: h264.h:91
int index
Definition: gxfenc.c:89
uint32_t(*[6] dequant8_coeff)[64]
Definition: h264.h:370
static av_cold int h264_decode_end(AVCodecContext *avctx)
Definition: h264.c:4991
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264.h:279
synthesis window for stochastic i
enum AVColorSpace colorspace
YUV colorspace type.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: h264.h:106
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:203
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:379
int pixel_shift
0 for 8-bit H264, 1 for high-bit-depth H264
Definition: h264.h:276
void(* h264_luma_dc_dequant_idct)(int16_t *output, int16_t *input, int qmul)
Definition: h264dsp.h:101
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb, int first_slice)
Definition: h264_refs.c:745
static void await_references(H264Context *h)
Wait until all reference frames are available for MC operations.
Definition: h264.c:701
AVHWAccel * ff_find_hwaccel(enum AVCodecID codec_id, enum AVPixelFormat pix_fmt)
Return the hardware accelerated codec for codec codec_id and pixel format pix_fmt.
void(* h264_add_pixels4_clear)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:107
void ff_vdpau_h264_picture_start(H264Context *h)
int16_t mb_padding[256 *2]
as mb is addressed by scantable[i] and scantable is uint8_t we can either check that i is not too lar...
Definition: h264.h:425
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:276
unsigned int sps_id
Definition: h264.h:214
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread.c:1066
#define AV_PIX_FMT_GBR24P
Definition: pixfmt.h:251
#define FF_PROFILE_H264_CAVLC_444
int allocate_progress
Whether to allocate progress for frame threading.
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264.h:158
6: bottom field, top field, bottom field repeated, in that order
Definition: h264.h:143
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
#define FF_PROFILE_H264_INTRA
static int init_dimensions(H264Context *h)
Definition: h264.c:3054
AVCodecContext * avctx
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
Definition: pixfmt.h:135
int16_t slice_row[MAX_SLICES]
to detect when MAX_SLICES is too low
Definition: h264.h:641
void ff_h264_draw_horiz_band(H264Context *h, int y, int height)
Definition: h264.c:147
int block_offset[2 *(16 *3)]
block_offset[ 0..23] for frame macroblocks block_offset[24..47] for field macroblocks ...
Definition: h264.h:350
uint32_t time_scale
Definition: h264.h:187
int field_poc[2]
h264 top/bottom POC
Definition: mpegvideo.h:159
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:286
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:296
int transform_8x8_mode
transform_8x8_mode_flag
Definition: h264.h:228
static int pred_weight_table(H264Context *h)
Definition: h264.c:2488
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:280
int pic_struct_present_flag
Definition: h264.h:197
int av_frame_ref(AVFrame *dst, AVFrame *src)
Setup a new reference to the data described by an given frame.
Definition: frame.c:228
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
uint8_t zigzag_scan[16]
Definition: h264.h:446
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:4978
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:284
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
#define IS_INTRA16x16(a)
Definition: mpegvideo.h:136
qpel_mc_func(* qpel_avg)[16]
Definition: mpegvideo.h:229
#define FF_DEBUG_PICT_INFO
void ff_h264_filter_mb_fast(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
#define LBOT
Definition: h264.h:70
#define AV_ZERO128(d)
Definition: intreadwrite.h:614
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:410
int8_t * qscale_table
Definition: mpegvideo.h:102
#define MAX_MBPAIR_SIZE
Definition: h264.h:49
#define CONFIG_ERROR_RESILIENCE
Definition: config.h:371
static void idr(H264Context *h)
instantaneous decoder refresh.
Definition: h264.c:2619
static int field_end(H264Context *h, int in_setup)
Definition: h264.c:2804
hardware decoding through VDA
Definition: pixfmt.h:160
discard all non reference
int is_complex
Definition: h264.h:470
AVBufferPool * qscale_table_pool
Definition: h264.h:653
static enum AVPixelFormat h264_hwaccel_pixfmt_list_420[]
Definition: h264.c:72
int slice_context_count
Definition: h264.h:566
int mb_height
pic_height_in_map_units_minus1 + 1
Definition: h264.h:166
AVBufferPool * motion_val_pool
Definition: h264.h:655
FFmpeg Automated Testing Environment ************************************Table of Contents *****************FFmpeg Automated Testing Environment Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass target exec to configure or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script tests fate sh from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at doc fate_config sh template Create a configuration that suits your based on the configuration template The slot configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern< arch >< os >< compiler >< compiler version > The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the fate_recv variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ssh command with one or more v options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory FATE makefile targets and variables *************************************Makefile can be set to
Definition: fate.txt:142
Picture * DPB
Definition: h264.h:272
#define FF_PROFILE_H264_HIGH
uint8_t * rbsp_buffer[2]
Definition: h264.h:482
int qscale
Definition: h264.h:286
static const uint8_t dequant4_coeff_init[6][3]
Definition: h264data.h:250
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
#define tprintf(p,...)
Definition: get_bits.h:628
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
common internal api header.
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:115
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size)
Definition: h264.c:1398
#define AV_COPY128(d, s)
Definition: intreadwrite.h:586
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:48
#define MAX_SLICES
Definition: dxva2_mpeg2.c:25
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
uint16_t * slice_table_base
Definition: h264.h:500
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264.h:156
static double c[64]
int16_t * dc_val[3]
H.264 / AVC / MPEG4 part10 motion vector predicion.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Bi-dir predicted.
Definition: avutil.h:218
AVProfile.
int index
Definition: parser.h:30
static int execute_decode_slices(H264Context *h, int context_count)
Call decode_slice() for each context.
Definition: h264.c:4446
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
Definition: pixfmt.h:82
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
int cur_chroma_format_idc
Definition: h264.h:638
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
int den
denominator
Definition: rational.h:45
function y
Definition: D.m:1
int chroma_qp[2]
Definition: h264.h:277
int sei_ct_type
Bit set of clock types for fields/frames in picture timing SEI message.
Definition: h264.h:598
static av_always_inline void dctcoef_set(int16_t *mb, int high_bit_depth, int index, int value)
Definition: h264.c:2263
void(* h264_weight_func)(uint8_t *block, int stride, int height, int log2_denom, int weight, int offset)
Definition: h264dsp.h:32
int bit_depth_luma
bit_depth_luma_minus8 + 8
Definition: h264.h:203
DSP utils.
#define SLICE_FLAG_CODED_ORDER
draw_horiz_band() is called in coded order instead of display
int intra16x16_pred_mode
Definition: h264.h:301
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
int prev_poc_lsb
poc_lsb of the last reference pic for POC type 0
Definition: h264.h:509
#define CONFIG_H264_VDPAU_DECODER
Definition: config.h:504
#define IS_SUB_4X8(a)
Definition: mpegvideo.h:151
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:58
int linesize
Definition: h264.h:283
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread.c:705
uint8_t zigzag_scan8x8_cavlc_q0[64]
Definition: h264.h:454
void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:125
static void reinit(J2kEncoderContext *s)
Definition: j2kenc.c:898
static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264.c:2070
Picture * cur_pic_ptr
Definition: h264.h:273
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:275
#define FRAME_MBAFF(h)
Definition: h264.h:66
#define IS_SUB_8X8(a)
Definition: mpegvideo.h:149
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: h264.c:4864
const uint16_t ff_h264_mb_sizes[4]
Definition: h264.c:54
#define IS_DIRECT(a)
Definition: mpegvideo.h:143
uint8_t(*[2] top_borders)[(16 *3)*2]
Definition: h264.h:323
struct AVCodecInternal * internal
Private context used for internal data.
static int fill_filter_caches(H264Context *h, int mb_type)
Definition: h264.c:3996
#define FF_PROFILE_H264_BASELINE
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
int pic_id
h264 pic_num (short -> no wrap version of pic_num, pic_num & max_pic_num; long -> long_pic_num) ...
Definition: mpegvideo.h:163
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:139
#define AV_ZERO32(d)
Definition: intreadwrite.h:606
int mb_width
Definition: h264.h:465
enum AVPictureType pict_type
Definition: h264.h:574
int current_slice
current slice number, used to initialize slice_num of each thread/context
Definition: h264.h:556
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:418
int mb_width
pic_width_in_mbs_minus1 + 1
Definition: h264.h:165
#define FF_PROFILE_H264_HIGH_422
#define IS_16X16(a)
Definition: mpegvideo.h:145
int flags2
CODEC_FLAG2_*.
#define AV_RN16A(p)
Definition: intreadwrite.h:514
uint32_t * mb2b_xy
Definition: h264.h:352
uint8_t field_scan8x8_cavlc_q0[64]
Definition: h264.h:457
#define HAVE_THREADS
Definition: config.h:274
int slice_type_fixed
Definition: h264.h:376
struct AVFrame f
Definition: mpegvideo.h:98
int delta_poc_bottom
Definition: h264.h:505
static void free_tables(H264Context *h, int free_rbsp)
Definition: h264.c:1100
int ff_h264_fill_default_ref_list(H264Context *h)
Fill the default_ref_list.
Definition: h264_refs.c:117
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264.c:1539
H264DSPContext h264dsp
Definition: h264.h:263
void ff_er_frame_start(ERContext *s)
void(* h264_idct_dc_add)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:84
#define AV_LOG_INFO
Definition: log.h:156
uint8_t field_scan8x8[64]
Definition: h264.h:450
uint32_t * mb_type
Definition: mpegvideo.h:108
void INT64 INT64 count
Definition: avisynth_c.h:594
Definition: h264.h:107
#define copy_fields(to, from, start_field, end_field)
Definition: h264.c:1588
#define av_always_inline
Definition: attributes.h:41
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264.h:632
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
uint8_t * temp
Definition: mpegvideo.h:193
static av_always_inline void mc_part_std(H264Context *h, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg, int list0, int list1, int pixel_shift, int chroma_idc)
Definition: h264.c:922
int8_t * intra4x4_pred_mode
Definition: h264.h:317
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
#define ER_AC_END
static int init_table_pools(H264Context *h)
Definition: h264.c:311
const char int length
Definition: avisynth_c.h:668
static int ref_picture(H264Context *h, Picture *dst, Picture *src)
Definition: h264.c:225
int mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264.h:356
static int clone_slice(H264Context *dst, H264Context *src)
Replicate H264 "master" context to thread contexts.
Definition: h264.c:2865
8: frame tripling
Definition: h264.h:145
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264.h:473
static int alloc_scratch_buffers(H264Context *h, int linesize)
Definition: h264.c:286
#define AV_RN64A(p)
Definition: intreadwrite.h:522
uint8_t field_scan_q0[16]
Definition: h264.h:455
#define LIST_NOT_USED
Definition: h264.h:338
static int alloc_picture(H264Context *h, Picture *pic)
Definition: h264.c:338
uint8_t(* non_zero_count)[48]
Definition: h264.h:331
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
unsigned int crop_bottom
frame_cropping_rect_bottom_offset
Definition: h264.h:176
exp golomb vlc stuff
void(* h264_biweight_func)(uint8_t *dst, uint8_t *src, int stride, int height, int log2_denom, int weightd, int weights, int offset)
Definition: h264dsp.h:34
Definition: h264.h:111
uint8_t * mbskip_table
int slice_num
Definition: h264.h:372
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
This structure stores compressed data.
int sei_recovery_frame_cnt
recovery_frame_cnt from SEI message
Definition: h264.h:617
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
int droppable
Definition: h264.h:287
int level_idc
Definition: h264.h:153
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
void(* pred8x8l[9+3])(uint8_t *src, int topleft, int topright, ptrdiff_t stride)
Definition: h264pred.h:95
#define STARTCODE_TEST
int nal_ref_idc
Definition: h264.h:480
uint8_t field_scan[16]
Definition: h264.h:449
for(j=16;j >0;--j)
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
#define FFMAX3(a, b, c)
Definition: common.h:57
int b_stride
Definition: h264.h:354
Predicted.
Definition: avutil.h:217
unsigned int rbsp_buffer_size[2]
Definition: h264.h:483
#define tb
Definition: regdef.h:68
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Context Adaptive Binary Arithmetic Coder.
AVBufferRef * ref_index_buf[2]
Definition: mpegvideo.h:113
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264.h:381
int short_ref_count
number of actual short term references
Definition: h264.h:543
static const AVProfile profiles[]
Definition: h264.c:5003
enum AVColorSpace colorspace
Definition: h264.h:184