vaapi_h264.c
Go to the documentation of this file.
1 /*
2  * H.264 HW decode acceleration through VA API
3  *
4  * Copyright (C) 2008-2009 Splitted-Desktop Systems
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "vaapi_internal.h"
24 #include "h264.h"
25 
26 /**
27  * @file
28  * This file implements the glue code between FFmpeg's and VA API's
29  * structures for H.264 decoding.
30  */
31 
32 /**
33  * Initialize an empty VA API picture.
34  *
35  * VA API requires a fixed-size reference picture array.
36  */
37 static void init_vaapi_pic(VAPictureH264 *va_pic)
38 {
39  va_pic->picture_id = VA_INVALID_ID;
40  va_pic->flags = VA_PICTURE_H264_INVALID;
41  va_pic->TopFieldOrderCnt = 0;
42  va_pic->BottomFieldOrderCnt = 0;
43 }
44 
45 /**
46  * Translate an FFmpeg Picture into its VA API form.
47  *
48  * @param[out] va_pic A pointer to VA API's own picture struct
49  * @param[in] pic A pointer to the FFmpeg picture struct to convert
50  * @param[in] pic_structure The picture field type (as defined in mpegvideo.h),
51  * supersedes pic's field type if nonzero.
52  */
53 static void fill_vaapi_pic(VAPictureH264 *va_pic,
54  Picture *pic,
55  int pic_structure)
56 {
57  if (pic_structure == 0)
58  pic_structure = pic->reference;
59  pic_structure &= PICT_FRAME; /* PICT_TOP_FIELD|PICT_BOTTOM_FIELD */
60 
61  va_pic->picture_id = ff_vaapi_get_surface_id(pic);
62  va_pic->frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
63 
64  va_pic->flags = 0;
65  if (pic_structure != PICT_FRAME)
66  va_pic->flags |= (pic_structure & PICT_TOP_FIELD) ? VA_PICTURE_H264_TOP_FIELD : VA_PICTURE_H264_BOTTOM_FIELD;
67  if (pic->reference)
68  va_pic->flags |= pic->long_ref ? VA_PICTURE_H264_LONG_TERM_REFERENCE : VA_PICTURE_H264_SHORT_TERM_REFERENCE;
69 
70  va_pic->TopFieldOrderCnt = 0;
71  if (pic->field_poc[0] != INT_MAX)
72  va_pic->TopFieldOrderCnt = pic->field_poc[0];
73 
74  va_pic->BottomFieldOrderCnt = 0;
75  if (pic->field_poc[1] != INT_MAX)
76  va_pic->BottomFieldOrderCnt = pic->field_poc[1];
77 }
78 
79 /** Decoded Picture Buffer (DPB). */
80 typedef struct DPB {
81  int size; ///< Current number of reference frames in the DPB
82  int max_size; ///< Max number of reference frames. This is FF_ARRAY_ELEMS(VAPictureParameterBufferH264.ReferenceFrames)
83  VAPictureH264 *va_pics; ///< Pointer to VAPictureParameterBufferH264.ReferenceFrames array
84 } DPB;
85 
86 /**
87  * Append picture to the decoded picture buffer, in a VA API form that
88  * merges the second field picture attributes with the first, if
89  * available. The decoded picture buffer's size must be large enough
90  * to receive the new VA API picture object.
91  */
92 static int dpb_add(DPB *dpb, Picture *pic)
93 {
94  int i;
95 
96  if (dpb->size >= dpb->max_size)
97  return -1;
98 
99  for (i = 0; i < dpb->size; i++) {
100  VAPictureH264 * const va_pic = &dpb->va_pics[i];
101  if (va_pic->picture_id == ff_vaapi_get_surface_id(pic)) {
102  VAPictureH264 temp_va_pic;
103  fill_vaapi_pic(&temp_va_pic, pic, 0);
104 
105  if ((temp_va_pic.flags ^ va_pic->flags) & (VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD)) {
106  va_pic->flags |= temp_va_pic.flags & (VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD);
107  /* Merge second field */
108  if (temp_va_pic.flags & VA_PICTURE_H264_TOP_FIELD) {
109  va_pic->TopFieldOrderCnt = temp_va_pic.TopFieldOrderCnt;
110  } else {
111  va_pic->BottomFieldOrderCnt = temp_va_pic.BottomFieldOrderCnt;
112  }
113  }
114  return 0;
115  }
116  }
117 
118  fill_vaapi_pic(&dpb->va_pics[dpb->size++], pic, 0);
119  return 0;
120 }
121 
122 /** Fill in VA API reference frames array. */
123 static int fill_vaapi_ReferenceFrames(VAPictureParameterBufferH264 *pic_param,
124  H264Context *h)
125 {
126  DPB dpb;
127  int i;
128 
129  dpb.size = 0;
130  dpb.max_size = FF_ARRAY_ELEMS(pic_param->ReferenceFrames);
131  dpb.va_pics = pic_param->ReferenceFrames;
132  for (i = 0; i < dpb.max_size; i++)
133  init_vaapi_pic(&dpb.va_pics[i]);
134 
135  for (i = 0; i < h->short_ref_count; i++) {
136  Picture * const pic = h->short_ref[i];
137  if (pic && pic->reference && dpb_add(&dpb, pic) < 0)
138  return -1;
139  }
140 
141  for (i = 0; i < 16; i++) {
142  Picture * const pic = h->long_ref[i];
143  if (pic && pic->reference && dpb_add(&dpb, pic) < 0)
144  return -1;
145  }
146  return 0;
147 }
148 
149 /**
150  * Fill in VA API reference picture lists from the FFmpeg reference
151  * picture list.
152  *
153  * @param[out] RefPicList VA API internal reference picture list
154  * @param[in] ref_list A pointer to the FFmpeg reference list
155  * @param[in] ref_count The number of reference pictures in ref_list
156  */
157 static void fill_vaapi_RefPicList(VAPictureH264 RefPicList[32],
158  Picture *ref_list,
159  unsigned int ref_count)
160 {
161  unsigned int i, n = 0;
162  for (i = 0; i < ref_count; i++)
163  if (ref_list[i].reference)
164  fill_vaapi_pic(&RefPicList[n++], &ref_list[i], 0);
165 
166  for (; n < 32; n++)
167  init_vaapi_pic(&RefPicList[n]);
168 }
169 
170 /**
171  * Fill in prediction weight table.
172  *
173  * VA API requires a plain prediction weight table as it does not infer
174  * any value.
175  *
176  * @param[in] h A pointer to the current H.264 context
177  * @param[in] list The reference frame list index to use
178  * @param[out] luma_weight_flag VA API plain luma weight flag
179  * @param[out] luma_weight VA API plain luma weight table
180  * @param[out] luma_offset VA API plain luma offset table
181  * @param[out] chroma_weight_flag VA API plain chroma weight flag
182  * @param[out] chroma_weight VA API plain chroma weight table
183  * @param[out] chroma_offset VA API plain chroma offset table
184  */
186  int list,
187  unsigned char *luma_weight_flag,
188  short luma_weight[32],
189  short luma_offset[32],
190  unsigned char *chroma_weight_flag,
191  short chroma_weight[32][2],
192  short chroma_offset[32][2])
193 {
194  unsigned int i, j;
195 
196  *luma_weight_flag = h->luma_weight_flag[list];
197  *chroma_weight_flag = h->chroma_weight_flag[list];
198 
199  for (i = 0; i < h->ref_count[list]; i++) {
200  /* VA API also wants the inferred (default) values, not
201  only what is available in the bitstream (7.4.3.2). */
202  if (h->luma_weight_flag[list]) {
203  luma_weight[i] = h->luma_weight[i][list][0];
204  luma_offset[i] = h->luma_weight[i][list][1];
205  } else {
206  luma_weight[i] = 1 << h->luma_log2_weight_denom;
207  luma_offset[i] = 0;
208  }
209  for (j = 0; j < 2; j++) {
210  if (h->chroma_weight_flag[list]) {
211  chroma_weight[i][j] = h->chroma_weight[i][list][j][0];
212  chroma_offset[i][j] = h->chroma_weight[i][list][j][1];
213  } else {
214  chroma_weight[i][j] = 1 << h->chroma_log2_weight_denom;
215  chroma_offset[i][j] = 0;
216  }
217  }
218  }
219 }
220 
221 /** Initialize and start decoding a frame with VA API. */
223  av_unused const uint8_t *buffer,
224  av_unused uint32_t size)
225 {
226  H264Context * const h = avctx->priv_data;
227  struct vaapi_context * const vactx = avctx->hwaccel_context;
228  VAPictureParameterBufferH264 *pic_param;
229  VAIQMatrixBufferH264 *iq_matrix;
230 
231  av_dlog(avctx, "vaapi_h264_start_frame()\n");
232 
233  vactx->slice_param_size = sizeof(VASliceParameterBufferH264);
234 
235  /* Fill in VAPictureParameterBufferH264. */
236  pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferH264));
237  if (!pic_param)
238  return -1;
239  fill_vaapi_pic(&pic_param->CurrPic, h->cur_pic_ptr, h->picture_structure);
240  if (fill_vaapi_ReferenceFrames(pic_param, h) < 0)
241  return -1;
242  pic_param->picture_width_in_mbs_minus1 = h->mb_width - 1;
243  pic_param->picture_height_in_mbs_minus1 = h->mb_height - 1;
244  pic_param->bit_depth_luma_minus8 = h->sps.bit_depth_luma - 8;
245  pic_param->bit_depth_chroma_minus8 = h->sps.bit_depth_chroma - 8;
246  pic_param->num_ref_frames = h->sps.ref_frame_count;
247  pic_param->seq_fields.value = 0; /* reset all bits */
248  pic_param->seq_fields.bits.chroma_format_idc = h->sps.chroma_format_idc;
249  pic_param->seq_fields.bits.residual_colour_transform_flag = h->sps.residual_color_transform_flag; /* XXX: only for 4:4:4 high profile? */
250  pic_param->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = h->sps.gaps_in_frame_num_allowed_flag;
251  pic_param->seq_fields.bits.frame_mbs_only_flag = h->sps.frame_mbs_only_flag;
252  pic_param->seq_fields.bits.mb_adaptive_frame_field_flag = h->sps.mb_aff;
253  pic_param->seq_fields.bits.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag;
254  pic_param->seq_fields.bits.MinLumaBiPredSize8x8 = h->sps.level_idc >= 31; /* A.3.3.2 */
255  pic_param->seq_fields.bits.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4;
256  pic_param->seq_fields.bits.pic_order_cnt_type = h->sps.poc_type;
257  pic_param->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4;
258  pic_param->seq_fields.bits.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag;
259  pic_param->num_slice_groups_minus1 = h->pps.slice_group_count - 1;
260  pic_param->slice_group_map_type = h->pps.mb_slice_group_map_type;
261  pic_param->slice_group_change_rate_minus1 = 0; /* XXX: unimplemented in FFmpeg */
262  pic_param->pic_init_qp_minus26 = h->pps.init_qp - 26;
263  pic_param->pic_init_qs_minus26 = h->pps.init_qs - 26;
264  pic_param->chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0];
265  pic_param->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1];
266  pic_param->pic_fields.value = 0; /* reset all bits */
267  pic_param->pic_fields.bits.entropy_coding_mode_flag = h->pps.cabac;
268  pic_param->pic_fields.bits.weighted_pred_flag = h->pps.weighted_pred;
269  pic_param->pic_fields.bits.weighted_bipred_idc = h->pps.weighted_bipred_idc;
270  pic_param->pic_fields.bits.transform_8x8_mode_flag = h->pps.transform_8x8_mode;
271  pic_param->pic_fields.bits.field_pic_flag = h->picture_structure != PICT_FRAME;
272  pic_param->pic_fields.bits.constrained_intra_pred_flag = h->pps.constrained_intra_pred;
273  pic_param->pic_fields.bits.pic_order_present_flag = h->pps.pic_order_present;
274  pic_param->pic_fields.bits.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
275  pic_param->pic_fields.bits.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present;
276  pic_param->pic_fields.bits.reference_pic_flag = h->nal_ref_idc != 0;
277  pic_param->frame_num = h->frame_num;
278 
279  /* Fill in VAIQMatrixBufferH264. */
280  iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferH264));
281  if (!iq_matrix)
282  return -1;
283  memcpy(iq_matrix->ScalingList4x4, h->pps.scaling_matrix4, sizeof(iq_matrix->ScalingList4x4));
284  memcpy(iq_matrix->ScalingList8x8[0], h->pps.scaling_matrix8[0], sizeof(iq_matrix->ScalingList8x8[0]));
285  memcpy(iq_matrix->ScalingList8x8[1], h->pps.scaling_matrix8[3], sizeof(iq_matrix->ScalingList8x8[0]));
286  return 0;
287 }
288 
289 /** End a hardware decoding based frame. */
291 {
292  struct vaapi_context * const vactx = avctx->hwaccel_context;
293  H264Context * const h = avctx->priv_data;
294  int ret;
295 
296  av_dlog(avctx, "vaapi_h264_end_frame()\n");
297  ret = ff_vaapi_commit_slices(vactx);
298  if (ret < 0)
299  goto finish;
300 
302  if (ret < 0)
303  goto finish;
304 
306 
307 finish:
309  return ret;
310 }
311 
312 /** Decode the given H.264 slice with VA API. */
314  const uint8_t *buffer,
315  uint32_t size)
316 {
317  H264Context * const h = avctx->priv_data;
318  VASliceParameterBufferH264 *slice_param;
319 
320  av_dlog(avctx, "vaapi_h264_decode_slice(): buffer %p, size %d\n",
321  buffer, size);
322 
323  /* Fill in VASliceParameterBufferH264. */
324  slice_param = (VASliceParameterBufferH264 *)ff_vaapi_alloc_slice(avctx->hwaccel_context, buffer, size);
325  if (!slice_param)
326  return -1;
327  slice_param->slice_data_bit_offset = get_bits_count(&h->gb) + 8; /* bit buffer started beyond nal_unit_type */
328  slice_param->first_mb_in_slice = (h->mb_y >> FIELD_OR_MBAFF_PICTURE(h)) * h->mb_width + h->mb_x;
329  slice_param->slice_type = ff_h264_get_slice_type(h);
330  slice_param->direct_spatial_mv_pred_flag = h->slice_type == AV_PICTURE_TYPE_B ? h->direct_spatial_mv_pred : 0;
331  slice_param->num_ref_idx_l0_active_minus1 = h->list_count > 0 ? h->ref_count[0] - 1 : 0;
332  slice_param->num_ref_idx_l1_active_minus1 = h->list_count > 1 ? h->ref_count[1] - 1 : 0;
333  slice_param->cabac_init_idc = h->cabac_init_idc;
334  slice_param->slice_qp_delta = h->qscale - h->pps.init_qp;
335  slice_param->disable_deblocking_filter_idc = h->deblocking_filter < 2 ? !h->deblocking_filter : h->deblocking_filter;
336  slice_param->slice_alpha_c0_offset_div2 = h->slice_alpha_c0_offset / 2 - 26;
337  slice_param->slice_beta_offset_div2 = h->slice_beta_offset / 2 - 26;
338  slice_param->luma_log2_weight_denom = h->luma_log2_weight_denom;
339  slice_param->chroma_log2_weight_denom = h->chroma_log2_weight_denom;
340 
341  fill_vaapi_RefPicList(slice_param->RefPicList0, h->ref_list[0], h->list_count > 0 ? h->ref_count[0] : 0);
342  fill_vaapi_RefPicList(slice_param->RefPicList1, h->ref_list[1], h->list_count > 1 ? h->ref_count[1] : 0);
343 
345  &slice_param->luma_weight_l0_flag, slice_param->luma_weight_l0, slice_param->luma_offset_l0,
346  &slice_param->chroma_weight_l0_flag, slice_param->chroma_weight_l0, slice_param->chroma_offset_l0);
348  &slice_param->luma_weight_l1_flag, slice_param->luma_weight_l1, slice_param->luma_offset_l1,
349  &slice_param->chroma_weight_l1_flag, slice_param->chroma_weight_l1, slice_param->chroma_offset_l1);
350  return 0;
351 }
352 
354  .name = "h264_vaapi",
355  .type = AVMEDIA_TYPE_VIDEO,
356  .id = AV_CODEC_ID_H264,
357  .pix_fmt = AV_PIX_FMT_VAAPI_VLD,
358  .start_frame = vaapi_h264_start_frame,
359  .end_frame = vaapi_h264_end_frame,
360  .decode_slice = vaapi_h264_decode_slice,
361 };
int chroma_format_idc
Definition: h264.h:154
#define PICT_TOP_FIELD
Definition: mpegvideo.h:662
GetBitContext gb
Definition: h264.h:268
struct DPB DPB
Decoded Picture Buffer (DPB).
static int dpb_add(DPB *dpb, Picture *pic)
Append picture to the decoded picture buffer, in a VA API form that merges the second field picture a...
Definition: vaapi_h264.c:92
int weighted_bipred_idc
Definition: h264.h:221
int chroma_qp_index_offset[2]
Definition: h264.h:224
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264.h:631
AVHWAccel ff_h264_vaapi_hwaccel
Definition: vaapi_h264.c:353
int mb_y
Definition: h264.h:461
HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the b...
Definition: pixfmt.h:126
int ff_vaapi_render_picture(struct vaapi_context *vactx, VASurfaceID surface)
Definition: vaapi.c:44
int frame_mbs_only_flag
Definition: h264.h:167
int mb_height
Definition: h264.h:465
int max_size
Max number of reference frames. This is FF_ARRAY_ELEMS(VAPictureParameterBufferH264.ReferenceFrames)
Definition: vaapi_h264.c:82
#define FF_ARRAY_ELEMS(a)
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
H264Context.
Definition: h264.h:260
This structure is used to share data between the FFmpeg library and the client video application...
Definition: vaapi.h:50
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
int picture_structure
Definition: h264.h:382
static int vaapi_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Decode the given H.264 slice with VA API.
Definition: vaapi_h264.c:313
int long_ref
1->long term reference 0->short term reference
Definition: mpegvideo.h:165
uint8_t scaling_matrix4[6][16]
Definition: h264.h:229
int deblocking_filter_parameters_present
deblocking_filter_parameters_present_flag
Definition: h264.h:225
uint8_t
void * hwaccel_context
Hardware accelerator context.
#define PICT_FRAME
Definition: mpegvideo.h:664
int gaps_in_frame_num_allowed_flag
Definition: h264.h:164
int luma_weight[48][2][2]
Definition: h264.h:393
int bit_depth_chroma
bit_depth_chroma_minus8 + 8
Definition: h264.h:204
Picture ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264.h:411
int cabac
entropy_coding_mode_flag
Definition: h264.h:215
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:408
int mb_x
Definition: h264.h:461
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:193
int ff_h264_get_slice_type(const H264Context *h)
Reconstruct bitstream slice_type.
Definition: h264.c:3894
int redundant_pic_cnt_present
redundant_pic_cnt_present_flag
Definition: h264.h:227
int luma_log2_weight_denom
Definition: h264.h:390
int chroma_weight[48][2][2][2]
Definition: h264.h:394
H.264 / AVC / MPEG4 part10 codec.
int frame_num
Definition: h264.h:507
int mb_aff
mb_adaptive_frame_field_flag
Definition: h264.h:168
int poc_type
pic_order_cnt_type
Definition: h264.h:157
int constrained_intra_pred
constrained_intra_pred_flag
Definition: h264.h:226
void ff_vaapi_common_end_frame(AVCodecContext *avctx)
Common AVHWAccel.end_frame() implementation.
Definition: vaapi.c:179
void * ff_vaapi_alloc_pic_param(struct vaapi_context *vactx, unsigned int size)
Allocate a new picture parameter buffer.
Definition: vaapi.c:133
int reference
Definition: mpegvideo.h:178
PPS pps
current pps
Definition: h264.h:365
int direct_spatial_mv_pred
Definition: h264.h:397
int weighted_pred
weighted_pred_flag
Definition: h264.h:220
int residual_color_transform_flag
residual_colour_transform_flag
Definition: h264.h:205
int delta_pic_order_always_zero_flag
Definition: h264.h:159
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264.h:84
uint8_t scaling_matrix8[6][64]
Definition: h264.h:230
int ref_frame_count
num_ref_frames
Definition: h264.h:163
Picture * long_ref[32]
Definition: h264.h:528
const char * name
Name of the hardware accelerated codec.
static int vaapi_h264_end_frame(AVCodecContext *avctx)
End a hardware decoding based frame.
Definition: vaapi_h264.c:290
ret
Definition: avfilter.c:821
Picture.
Definition: mpegvideo.h:97
int cabac_init_idc
Definition: h264.h:545
SPS sps
current sps
Definition: h264.h:360
unsigned int slice_param_size
Size of a VASliceParameterBuffer element.
Definition: vaapi.h:137
int size
Current number of reference frames in the DPB.
Definition: vaapi_h264.c:81
int init_qp
pic_init_qp_minus26 + 26
Definition: h264.h:222
int frame_num
h264 frame_num (raw frame_num from slice header)
Definition: mpegvideo.h:161
int direct_8x8_inference_flag
Definition: h264.h:169
int slice_type
Definition: h264.h:374
unsigned int list_count
Definition: h264.h:409
int pic_order_present
pic_order_present_flag
Definition: h264.h:216
int chroma_log2_weight_denom
Definition: h264.h:391
AVCodecContext * avctx
Definition: h264.h:261
int slice_alpha_c0_offset
Definition: h264.h:474
AVHWAccel.
main external API structure.
VASliceParameterBufferBase * ff_vaapi_alloc_slice(struct vaapi_context *vactx, const uint8_t *buffer, uint32_t size)
Allocate a new slice descriptor for the input slice.
Definition: vaapi.c:148
Picture * short_ref[32]
Definition: h264.h:527
int slice_beta_offset
Definition: h264.h:475
synthesis window for stochastic i
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264.h:158
void ff_h264_draw_horiz_band(H264Context *h, int y, int height)
Definition: h264.c:147
int field_poc[2]
h264 top/bottom POC
Definition: mpegvideo.h:159
int transform_8x8_mode
transform_8x8_mode_flag
Definition: h264.h:228
int ff_vaapi_commit_slices(struct vaapi_context *vactx)
Definition: vaapi.c:81
int init_qs
pic_init_qs_minus26 + 26
Definition: h264.h:223
int qscale
Definition: h264.h:286
static int fill_vaapi_ReferenceFrames(VAPictureParameterBufferH264 *pic_param, H264Context *h)
Fill in VA API reference frames array.
Definition: vaapi_h264.c:123
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264.h:156
static void fill_vaapi_plain_pred_weight_table(H264Context *h, int list, unsigned char *luma_weight_flag, short luma_weight[32], short luma_offset[32], unsigned char *chroma_weight_flag, short chroma_weight[32][2], short chroma_offset[32][2])
Fill in prediction weight table.
Definition: vaapi_h264.c:185
void * ff_vaapi_alloc_iq_matrix(struct vaapi_context *vactx, unsigned int size)
Allocate a new IQ matrix buffer.
Definition: vaapi.c:138
Bi-dir predicted.
Definition: avutil.h:218
the buffer and buffer reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFilterBuffer structures They must not be accessed but through references stored in AVFilterBufferRef structures Several references can point to the same buffer
static VASurfaceID ff_vaapi_get_surface_id(Picture *pic)
Extract VASurfaceID from a Picture.
int bit_depth_luma
bit_depth_luma_minus8 + 8
Definition: h264.h:203
static int vaapi_h264_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
Initialize and start decoding a frame with VA API.
Definition: vaapi_h264.c:222
static void init_vaapi_pic(VAPictureH264 *va_pic)
Initialize an empty VA API picture.
Definition: vaapi_h264.c:37
Decoded Picture Buffer (DPB).
Definition: vaapi_h264.c:80
Picture * cur_pic_ptr
Definition: h264.h:273
int pic_id
h264 pic_num (short -> no wrap version of pic_num, pic_num & max_pic_num; long -> long_pic_num) ...
Definition: mpegvideo.h:163
int mb_width
Definition: h264.h:465
int slice_group_count
num_slice_groups_minus1 + 1
Definition: h264.h:217
static void fill_vaapi_RefPicList(VAPictureH264 RefPicList[32], Picture *ref_list, unsigned int ref_count)
Fill in VA API reference picture lists from the FFmpeg reference picture list.
Definition: vaapi_h264.c:157
VAPictureH264 * va_pics
Pointer to VAPictureParameterBufferH264.ReferenceFrames array.
Definition: vaapi_h264.c:83
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264.h:632
static void fill_vaapi_pic(VAPictureH264 *va_pic, Picture *pic, int pic_structure)
Translate an FFmpeg Picture into its VA API form.
Definition: vaapi_h264.c:53
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264.h:473
int level_idc
Definition: h264.h:153
int nal_ref_idc
Definition: h264.h:480
#define av_unused
Definition: attributes.h:114
int short_ref_count
number of actual short term references
Definition: h264.h:543
int mb_slice_group_map_type
Definition: h264.h:218