h264_direct.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... direct mb/block decoding
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG4 part10 direct mb/block decoding.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #include "internal.h"
29 #include "avcodec.h"
30 #include "mpegvideo.h"
31 #include "h264.h"
32 #include "rectangle.h"
33 #include "thread.h"
34 
35 //#undef NDEBUG
36 #include <assert.h>
37 
38 
39 static int get_scale_factor(H264Context * const h, int poc, int poc1, int i){
40  int poc0 = h->ref_list[0][i].poc;
41  int td = av_clip(poc1 - poc0, -128, 127);
42  if(td == 0 || h->ref_list[0][i].long_ref){
43  return 256;
44  }else{
45  int tb = av_clip(poc - poc0, -128, 127);
46  int tx = (16384 + (FFABS(td) >> 1)) / td;
47  return av_clip((tb*tx + 32) >> 6, -1024, 1023);
48  }
49 }
50 
52  const int poc = h->cur_pic_ptr->field_poc[h->picture_structure == PICT_BOTTOM_FIELD];
53  const int poc1 = h->ref_list[1][0].poc;
54  int i, field;
55 
56  if (FRAME_MBAFF(h))
57  for (field = 0; field < 2; field++){
58  const int poc = h->cur_pic_ptr->field_poc[field];
59  const int poc1 = h->ref_list[1][0].field_poc[field];
60  for (i = 0; i < 2 * h->ref_count[0]; i++)
61  h->dist_scale_factor_field[field][i^field] =
62  get_scale_factor(h, poc, poc1, i+16);
63  }
64 
65  for (i = 0; i < h->ref_count[0]; i++){
66  h->dist_scale_factor[i] = get_scale_factor(h, poc, poc1, i);
67  }
68 }
69 
70 static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field, int colfield, int mbafi){
71  Picture * const ref1 = &h->ref_list[1][0];
72  int j, old_ref, rfield;
73  int start= mbafi ? 16 : 0;
74  int end = mbafi ? 16+2*h->ref_count[0] : h->ref_count[0];
75  int interl= mbafi || h->picture_structure != PICT_FRAME;
76 
77  /* bogus; fills in for missing frames */
78  memset(map[list], 0, sizeof(map[list]));
79 
80  for(rfield=0; rfield<2; rfield++){
81  for(old_ref=0; old_ref<ref1->ref_count[colfield][list]; old_ref++){
82  int poc = ref1->ref_poc[colfield][list][old_ref];
83 
84  if (!interl)
85  poc |= 3;
86  else if( interl && (poc&3) == 3) // FIXME: store all MBAFF references so this is not needed
87  poc= (poc&~3) + rfield + 1;
88 
89  for(j=start; j<end; j++){
90  if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].reference & 3) == poc) {
91  int cur_ref= mbafi ? (j-16)^field : j;
92  if (ref1->mbaff)
93  map[list][2 * old_ref + (rfield^field) + 16] = cur_ref;
94  if(rfield == field || !interl)
95  map[list][old_ref] = cur_ref;
96  break;
97  }
98  }
99  }
100  }
101 }
102 
104  Picture * const ref1 = &h->ref_list[1][0];
105  Picture * const cur = h->cur_pic_ptr;
106  int list, j, field;
107  int sidx= (h->picture_structure&1)^1;
108  int ref1sidx = (ref1->reference&1)^1;
109 
110  for(list=0; list<2; list++){
111  cur->ref_count[sidx][list] = h->ref_count[list];
112  for(j=0; j<h->ref_count[list]; j++)
113  cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].reference & 3);
114  }
115 
116  if(h->picture_structure == PICT_FRAME){
117  memcpy(cur->ref_count[1], cur->ref_count[0], sizeof(cur->ref_count[0]));
118  memcpy(cur->ref_poc [1], cur->ref_poc [0], sizeof(cur->ref_poc [0]));
119  }
120 
121  cur->mbaff = FRAME_MBAFF(h);
122 
123  h->col_fieldoff= 0;
124  if(h->picture_structure == PICT_FRAME){
125  int cur_poc = h->cur_pic_ptr->poc;
126  int *col_poc = h->ref_list[1]->field_poc;
127  h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc));
128  ref1sidx=sidx= h->col_parity;
129  } else if (!(h->picture_structure & h->ref_list[1][0].reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity
130  h->col_fieldoff = 2 * h->ref_list[1][0].reference - 3;
131  }
132 
134  return;
135 
136  for(list=0; list<2; list++){
137  fill_colmap(h, h->map_col_to_list0, list, sidx, ref1sidx, 0);
138  if (FRAME_MBAFF(h))
139  for(field=0; field<2; field++)
140  fill_colmap(h, h->map_col_to_list0_field[field], list, field, field, 1);
141  }
142 }
143 
144 static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y)
145 {
146  int ref_field = ref->reference - 1;
147  int ref_field_picture = ref->field_picture;
148  int ref_height = 16*h->mb_height >> ref_field_picture;
149 
151  return;
152 
153  //FIXME it can be safe to access mb stuff
154  //even if pixels aren't deblocked yet
155 
157  FFMIN(16 * mb_y >> ref_field_picture, ref_height - 1),
158  ref_field_picture && ref_field);
159 }
160 
161 static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
162  int b8_stride = 2;
163  int b4_stride = h->b_stride;
164  int mb_xy = h->mb_xy, mb_y = h->mb_y;
165  int mb_type_col[2];
166  const int16_t (*l1mv0)[2], (*l1mv1)[2];
167  const int8_t *l1ref0, *l1ref1;
168  const int is_b8x8 = IS_8X8(*mb_type);
169  unsigned int sub_mb_type= MB_TYPE_L0L1;
170  int i8, i4;
171  int ref[2];
172  int mv[2];
173  int list;
174 
175  assert(h->ref_list[1][0].reference & 3);
176 
177  await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type));
178 
179 #define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16|MB_TYPE_INTRA4x4|MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)
180 
181 
182  /* ref = min(neighbors) */
183  for(list=0; list<2; list++){
184  int left_ref = h->ref_cache[list][scan8[0] - 1];
185  int top_ref = h->ref_cache[list][scan8[0] - 8];
186  int refc = h->ref_cache[list][scan8[0] - 8 + 4];
187  const int16_t *C= h->mv_cache[list][ scan8[0] - 8 + 4];
188  if(refc == PART_NOT_AVAILABLE){
189  refc = h->ref_cache[list][scan8[0] - 8 - 1];
190  C = h-> mv_cache[list][scan8[0] - 8 - 1];
191  }
192  ref[list] = FFMIN3((unsigned)left_ref, (unsigned)top_ref, (unsigned)refc);
193  if(ref[list] >= 0){
194  //this is just pred_motion() but with the cases removed that cannot happen for direct blocks
195  const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ];
196  const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ];
197 
198  int match_count= (left_ref==ref[list]) + (top_ref==ref[list]) + (refc==ref[list]);
199  if(match_count > 1){ //most common
200  mv[list]= pack16to32(mid_pred(A[0], B[0], C[0]),
201  mid_pred(A[1], B[1], C[1]) );
202  }else {
203  assert(match_count==1);
204  if(left_ref==ref[list]){
205  mv[list]= AV_RN32A(A);
206  }else if(top_ref==ref[list]){
207  mv[list]= AV_RN32A(B);
208  }else{
209  mv[list]= AV_RN32A(C);
210  }
211  }
212  av_assert2(ref[list] < (h->ref_count[list] << !!FRAME_MBAFF(h)));
213  }else{
214  int mask= ~(MB_TYPE_L0 << (2*list));
215  mv[list] = 0;
216  ref[list] = -1;
217  if(!is_b8x8)
218  *mb_type &= mask;
219  sub_mb_type &= mask;
220  }
221  }
222  if(ref[0] < 0 && ref[1] < 0){
223  ref[0] = ref[1] = 0;
224  if(!is_b8x8)
225  *mb_type |= MB_TYPE_L0L1;
226  sub_mb_type |= MB_TYPE_L0L1;
227  }
228 
229  if(!(is_b8x8|mv[0]|mv[1])){
230  fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
231  fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
232  fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
233  fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
235  return;
236  }
237 
238  if (IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
239  if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
240  mb_y = (h->mb_y&~1) + h->col_parity;
241  mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride;
242  b8_stride = 0;
243  }else{
244  mb_y += h->col_fieldoff;
245  mb_xy += h->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity
246  }
247  goto single_col;
248  }else{ // AFL/AFR/FR/FL -> AFR/FR
249  if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
250  mb_y = h->mb_y&~1;
251  mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride;
252  mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
253  mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + h->mb_stride];
254  b8_stride = 2+4*h->mb_stride;
255  b4_stride *= 6;
256  if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
257  mb_type_col[0] &= ~MB_TYPE_INTERLACED;
258  mb_type_col[1] &= ~MB_TYPE_INTERLACED;
259  }
260 
261  sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
262  if( (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)
263  && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA)
264  && !is_b8x8){
265  *mb_type |= MB_TYPE_16x8 |MB_TYPE_DIRECT2; /* B_16x8 */
266  }else{
267  *mb_type |= MB_TYPE_8x8;
268  }
269  }else{ // AFR/FR -> AFR/FR
270 single_col:
271  mb_type_col[0] =
272  mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
273 
274  sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
275  if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
276  *mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_16x16 */
277  }else if(!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16))){
278  *mb_type |= MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16));
279  }else{
281  /* FIXME save sub mb types from previous frames (or derive from MVs)
282  * so we know exactly what block size to use */
283  sub_mb_type += (MB_TYPE_8x8-MB_TYPE_16x16); /* B_SUB_4x4 */
284  }
285  *mb_type |= MB_TYPE_8x8;
286  }
287  }
288  }
289 
290  await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
291 
292  l1mv0 = (void*)&h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
293  l1mv1 = (void*)&h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
294  l1ref0 = &h->ref_list[1][0].ref_index [0][4 * mb_xy];
295  l1ref1 = &h->ref_list[1][0].ref_index [1][4 * mb_xy];
296  if(!b8_stride){
297  if(h->mb_y&1){
298  l1ref0 += 2;
299  l1ref1 += 2;
300  l1mv0 += 2*b4_stride;
301  l1mv1 += 2*b4_stride;
302  }
303  }
304 
305 
306  if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
307  int n=0;
308  for(i8=0; i8<4; i8++){
309  int x8 = i8&1;
310  int y8 = i8>>1;
311  int xy8 = x8+y8*b8_stride;
312  int xy4 = 3*x8+y8*b4_stride;
313  int a,b;
314 
315  if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
316  continue;
317  h->sub_mb_type[i8] = sub_mb_type;
318 
319  fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
320  fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
321  if(!IS_INTRA(mb_type_col[y8]) && !h->ref_list[1][0].long_ref
322  && ( (l1ref0[xy8] == 0 && FFABS(l1mv0[xy4][0]) <= 1 && FFABS(l1mv0[xy4][1]) <= 1)
323  || (l1ref0[xy8] < 0 && l1ref1[xy8] == 0 && FFABS(l1mv1[xy4][0]) <= 1 && FFABS(l1mv1[xy4][1]) <= 1))){
324  a=b=0;
325  if(ref[0] > 0)
326  a= mv[0];
327  if(ref[1] > 0)
328  b= mv[1];
329  n++;
330  }else{
331  a= mv[0];
332  b= mv[1];
333  }
334  fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, a, 4);
335  fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, b, 4);
336  }
337  if(!is_b8x8 && !(n&3))
339  }else if(IS_16X16(*mb_type)){
340  int a,b;
341 
342  fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
343  fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
344  if(!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref
345  && ( (l1ref0[0] == 0 && FFABS(l1mv0[0][0]) <= 1 && FFABS(l1mv0[0][1]) <= 1)
346  || (l1ref0[0] < 0 && l1ref1[0] == 0 && FFABS(l1mv1[0][0]) <= 1 && FFABS(l1mv1[0][1]) <= 1
347  && h->x264_build>33U))){
348  a=b=0;
349  if(ref[0] > 0)
350  a= mv[0];
351  if(ref[1] > 0)
352  b= mv[1];
353  }else{
354  a= mv[0];
355  b= mv[1];
356  }
357  fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, a, 4);
358  fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, b, 4);
359  }else{
360  int n=0;
361  for(i8=0; i8<4; i8++){
362  const int x8 = i8&1;
363  const int y8 = i8>>1;
364 
365  if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
366  continue;
367  h->sub_mb_type[i8] = sub_mb_type;
368 
369  fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, mv[0], 4);
370  fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, mv[1], 4);
371  fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
372  fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
373 
374  assert(b8_stride==2);
375  /* col_zero_flag */
376  if(!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref && ( l1ref0[i8] == 0
377  || (l1ref0[i8] < 0 && l1ref1[i8] == 0
378  && h->x264_build>33U))){
379  const int16_t (*l1mv)[2]= l1ref0[i8] == 0 ? l1mv0 : l1mv1;
380  if(IS_SUB_8X8(sub_mb_type)){
381  const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride];
382  if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
383  if(ref[0] == 0)
384  fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
385  if(ref[1] == 0)
386  fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
387  n+=4;
388  }
389  }else{
390  int m=0;
391  for(i4=0; i4<4; i4++){
392  const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride];
393  if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
394  if(ref[0] == 0)
395  AV_ZERO32(h->mv_cache[0][scan8[i8*4+i4]]);
396  if(ref[1] == 0)
397  AV_ZERO32(h->mv_cache[1][scan8[i8*4+i4]]);
398  m++;
399  }
400  }
401  if(!(m&3))
402  h->sub_mb_type[i8]+= MB_TYPE_16x16 - MB_TYPE_8x8;
403  n+=m;
404  }
405  }
406  }
407  if(!is_b8x8 && !(n&15))
409  }
410 }
411 
412 static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
413  int b8_stride = 2;
414  int b4_stride = h->b_stride;
415  int mb_xy = h->mb_xy, mb_y = h->mb_y;
416  int mb_type_col[2];
417  const int16_t (*l1mv0)[2], (*l1mv1)[2];
418  const int8_t *l1ref0, *l1ref1;
419  const int is_b8x8 = IS_8X8(*mb_type);
420  unsigned int sub_mb_type;
421  int i8, i4;
422 
423  assert(h->ref_list[1][0].reference & 3);
424 
425  await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type));
426 
427  if (IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
428  if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
429  mb_y = (h->mb_y&~1) + h->col_parity;
430  mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride;
431  b8_stride = 0;
432  }else{
433  mb_y += h->col_fieldoff;
434  mb_xy += h->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity
435  }
436  goto single_col;
437  }else{ // AFL/AFR/FR/FL -> AFR/FR
438  if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
439  mb_y = h->mb_y&~1;
440  mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride;
441  mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
442  mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + h->mb_stride];
443  b8_stride = 2+4*h->mb_stride;
444  b4_stride *= 6;
445  if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
446  mb_type_col[0] &= ~MB_TYPE_INTERLACED;
447  mb_type_col[1] &= ~MB_TYPE_INTERLACED;
448  }
449 
450  sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
451 
452  if( (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)
453  && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA)
454  && !is_b8x8){
455  *mb_type |= MB_TYPE_16x8 |MB_TYPE_L0L1|MB_TYPE_DIRECT2; /* B_16x8 */
456  }else{
457  *mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1;
458  }
459  }else{ // AFR/FR -> AFR/FR
460 single_col:
461  mb_type_col[0] =
462  mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
463 
464  sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
465  if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
466  *mb_type |= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */
467  }else if(!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16))){
468  *mb_type |= MB_TYPE_L0L1|MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16));
469  }else{
471  /* FIXME save sub mb types from previous frames (or derive from MVs)
472  * so we know exactly what block size to use */
473  sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */
474  }
475  *mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1;
476  }
477  }
478  }
479 
480  await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
481 
482  l1mv0 = (void*)&h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
483  l1mv1 = (void*)&h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
484  l1ref0 = &h->ref_list[1][0].ref_index [0][4 * mb_xy];
485  l1ref1 = &h->ref_list[1][0].ref_index [1][4 * mb_xy];
486  if(!b8_stride){
487  if(h->mb_y&1){
488  l1ref0 += 2;
489  l1ref1 += 2;
490  l1mv0 += 2*b4_stride;
491  l1mv1 += 2*b4_stride;
492  }
493  }
494 
495  {
496  const int *map_col_to_list0[2] = {h->map_col_to_list0[0], h->map_col_to_list0[1]};
497  const int *dist_scale_factor = h->dist_scale_factor;
498  int ref_offset;
499 
500  if (FRAME_MBAFF(h) && IS_INTERLACED(*mb_type)) {
501  map_col_to_list0[0] = h->map_col_to_list0_field[h->mb_y&1][0];
502  map_col_to_list0[1] = h->map_col_to_list0_field[h->mb_y&1][1];
503  dist_scale_factor =h->dist_scale_factor_field[h->mb_y&1];
504  }
505  ref_offset = (h->ref_list[1][0].mbaff<<4) & (mb_type_col[0]>>3); //if(h->ref_list[1][0].mbaff && IS_INTERLACED(mb_type_col[0])) ref_offset=16 else 0
506 
507  if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
508  int y_shift = 2*!IS_INTERLACED(*mb_type);
509  assert(h->sps.direct_8x8_inference_flag);
510 
511  for(i8=0; i8<4; i8++){
512  const int x8 = i8&1;
513  const int y8 = i8>>1;
514  int ref0, scale;
515  const int16_t (*l1mv)[2]= l1mv0;
516 
517  if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
518  continue;
519  h->sub_mb_type[i8] = sub_mb_type;
520 
521  fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
522  if(IS_INTRA(mb_type_col[y8])){
523  fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
524  fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
525  fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
526  continue;
527  }
528 
529  ref0 = l1ref0[x8 + y8*b8_stride];
530  if(ref0 >= 0)
531  ref0 = map_col_to_list0[0][ref0 + ref_offset];
532  else{
533  ref0 = map_col_to_list0[1][l1ref1[x8 + y8*b8_stride] + ref_offset];
534  l1mv= l1mv1;
535  }
536  scale = dist_scale_factor[ref0];
537  fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
538 
539  {
540  const int16_t *mv_col = l1mv[x8*3 + y8*b4_stride];
541  int my_col = (mv_col[1]<<y_shift)/2;
542  int mx = (scale * mv_col[0] + 128) >> 8;
543  int my = (scale * my_col + 128) >> 8;
544  fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
545  fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-my_col), 4);
546  }
547  }
548  return;
549  }
550 
551  /* one-to-one mv scaling */
552 
553  if(IS_16X16(*mb_type)){
554  int ref, mv0, mv1;
555 
556  fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
557  if(IS_INTRA(mb_type_col[0])){
558  ref=mv0=mv1=0;
559  }else{
560  const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0] + ref_offset]
561  : map_col_to_list0[1][l1ref1[0] + ref_offset];
562  const int scale = dist_scale_factor[ref0];
563  const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0];
564  int mv_l0[2];
565  mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
566  mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
567  ref= ref0;
568  mv0= pack16to32(mv_l0[0],mv_l0[1]);
569  mv1= pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
570  }
571  fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
572  fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4);
573  fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4);
574  }else{
575  for(i8=0; i8<4; i8++){
576  const int x8 = i8&1;
577  const int y8 = i8>>1;
578  int ref0, scale;
579  const int16_t (*l1mv)[2]= l1mv0;
580 
581  if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
582  continue;
583  h->sub_mb_type[i8] = sub_mb_type;
584  fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
585  if(IS_INTRA(mb_type_col[0])){
586  fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
587  fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
588  fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
589  continue;
590  }
591 
592  assert(b8_stride == 2);
593  ref0 = l1ref0[i8];
594  if(ref0 >= 0)
595  ref0 = map_col_to_list0[0][ref0 + ref_offset];
596  else{
597  ref0 = map_col_to_list0[1][l1ref1[i8] + ref_offset];
598  l1mv= l1mv1;
599  }
600  scale = dist_scale_factor[ref0];
601 
602  fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
603  if(IS_SUB_8X8(sub_mb_type)){
604  const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride];
605  int mx = (scale * mv_col[0] + 128) >> 8;
606  int my = (scale * mv_col[1] + 128) >> 8;
607  fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
608  fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-mv_col[1]), 4);
609  }else
610  for(i4=0; i4<4; i4++){
611  const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride];
612  int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]];
613  mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
614  mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
615  AV_WN32A(h->mv_cache[1][scan8[i8*4+i4]],
616  pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]));
617  }
618  }
619  }
620  }
621 }
622 
623 void ff_h264_pred_direct_motion(H264Context * const h, int *mb_type){
624  if(h->direct_spatial_mv_pred){
625  pred_spatial_direct_motion(h, mb_type);
626  }else{
627  pred_temp_direct_motion(h, mb_type);
628  }
629 }
#define PICT_BOTTOM_FIELD
Definition: mpegvideo.h:663
Definition: start.py:1
int8_t * ref_index[2]
Definition: mpegvideo.h:114
FIXME Range Coding of cr are ref
Definition: snow.txt:367
#define C
#define B
Definition: dsputil.c:2025
void ff_h264_direct_dist_scale_factor(H264Context *const h)
Definition: h264_direct.c:51
int mb_y
Definition: h264.h:461
int field_picture
whether or not the picture was encoded in separate fields
Definition: mpegvideo.h:169
int mb_height
Definition: h264.h:465
#define MB_TYPE_P0L0
mpegvideo header.
H264Context.
Definition: h264.h:260
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
int picture_structure
Definition: h264.h:382
#define AV_WN32A(p, v)
Definition: intreadwrite.h:530
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264.h:375
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264.h:828
initialize output if(nPeaks >3)%at least 3 peaks in spectrum for trying to find f0 nf0peaks
void ff_h264_pred_direct_motion(H264Context *const h, int *mb_type)
Definition: h264_direct.c:623
#define AV_RN32A(p)
Definition: intreadwrite.h:518
int ref_poc[2][2][32]
h264 POCs of the frames/fields used as reference (FIXME need per slice)
Definition: mpegvideo.h:166
int long_ref
1->long term reference 0->short term reference
Definition: mpegvideo.h:165
uint8_t
int ref_count[2][2]
number of entries in ref_poc (FIXME need per slice)
Definition: mpegvideo.h:167
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:63
#define PICT_FRAME
Definition: mpegvideo.h:664
window constants for m
#define b
Definition: input.c:42
end end
Picture ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264.h:411
int mb_xy
Definition: h264.h:468
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:408
#define MB_TYPE_P1L1
int mb_x
Definition: h264.h:461
#define FFMIN3(a, b, c)
Definition: common.h:59
static void pred_temp_direct_motion(H264Context *const h, int *mb_type)
Definition: h264_direct.c:412
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:489
#define A(x)
#define IS_INTERLACED(a)
Definition: mpegvideo.h:142
#define MB_TYPE_P1L0
H.264 / AVC / MPEG4 part10 codec.
ThreadFrame tf
Definition: mpegvideo.h:99
#define U(x)
#define td
Definition: regdef.h:70
void ff_h264_direct_ref_list_init(H264Context *const h)
Definition: h264_direct.c:103
Multithreading support functions.
static const uint16_t mask[17]
Definition: lzw.c:37
int active_thread_type
Which multithreading methods are in use by the codec.
int reference
Definition: mpegvideo.h:178
int direct_spatial_mv_pred
Definition: h264.h:397
#define IS_INTRA(a)
Definition: mpegvideo.h:138
external API header
int map_col_to_list0[2][16+32]
Definition: h264.h:402
#define MB_TYPE_16x16_OR_INTRA
int col_parity
Definition: h264.h:398
static const uint8_t scan8[16 *3+3]
Definition: h264.h:812
useful rectangle filling function
#define IS_8X8(a)
Definition: mpegvideo.h:148
int x264_build
Definition: h264.h:459
#define MB_TYPE_P0L1
#define MB_TYPE_DIRECT2
#define FFMIN(a, b)
Definition: common.h:58
int poc
h264 frame POC
Definition: mpegvideo.h:160
#define MB_TYPE_INTERLACED
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:105
Picture.
Definition: mpegvideo.h:97
SPS sps
current sps
Definition: h264.h:360
#define MB_TYPE_L0L1
int dist_scale_factor[32]
Definition: h264.h:400
#define FFABS(a)
Definition: common.h:53
int frame_num
h264 frame_num (raw frame_num from slice header)
Definition: mpegvideo.h:161
int direct_8x8_inference_flag
Definition: h264.h:169
static int get_scale_factor(H264Context *const h, int poc, int poc1, int i)
Definition: h264_direct.c:39
#define PART_NOT_AVAILABLE
Definition: h264.h:339
static const int8_t mv[256][2]
static void pred_spatial_direct_motion(H264Context *const h, int *mb_type)
Definition: h264_direct.c:161
int mbaff
h264 1 -> MBAFF frame 0-> not MBAFF
Definition: mpegvideo.h:168
int mb_stride
Definition: h264.h:466
AVCodecContext * avctx
Definition: h264.h:261
#define MB_TYPE_8x16
#define FF_THREAD_FRAME
Decode more than one frame at once.
synthesis window for stochastic i
#define MB_TYPE_16x16
#define mid_pred
Definition: mathops.h:94
int field_poc[2]
h264 top/bottom POC
Definition: mpegvideo.h:159
int dist_scale_factor_field[2][32]
Definition: h264.h:401
static void await_reference_mb_row(H264Context *const h, Picture *ref, int mb_y)
Definition: h264_direct.c:144
common internal api header.
#define MB_TYPE_8x8
Bi-dir predicted.
Definition: avutil.h:218
Same thing on a dB scale
#define MB_TYPE_16x8
int col_fieldoff
Definition: h264.h:399
Picture * cur_pic_ptr
Definition: h264.h:273
#define FRAME_MBAFF(h)
Definition: h264.h:66
#define IS_SUB_8X8(a)
Definition: mpegvideo.h:149
#define IS_DIRECT(a)
Definition: mpegvideo.h:143
#define AV_ZERO32(d)
Definition: intreadwrite.h:606
#define IS_16X16(a)
Definition: mpegvideo.h:145
uint32_t * mb2b_xy
Definition: h264.h:352
#define HAVE_THREADS
Definition: config.h:274
uint32_t * mb_type
Definition: mpegvideo.h:108
int map_col_to_list0_field[2][2][16+32]
Definition: h264.h:403
#define MB_TYPE_L0
int b_stride
Definition: h264.h:354
#define tb
Definition: regdef.h:68
static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field, int colfield, int mbafi)
Definition: h264_direct.c:70