39 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
40 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
41 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
42 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 0, 0, 0, 0, 0, 0, 4, 4, 5, 6,
45 7, 8, 9, 10, 12, 13, 15, 17, 20, 22,
46 25, 28, 32, 36, 40, 45, 50, 56, 63, 71,
47 80, 90,101,113,127,144,162,182,203,226,
49 255,255,255,255,255,255,255,255,255,255,255,255,255,
50 255,255,255,255,255,255,255,255,255,255,255,255,255,
51 255,255,255,255,255,255,255,255,255,255,255,255,255,
52 255,255,255,255,255,255,255,255,255,255,255,255,255,
55 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
57 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 0, 0, 0, 0, 2, 2, 2, 3,
61 3, 3, 3, 4, 4, 4, 6, 6, 7, 7,
62 8, 8, 9, 9, 10, 10, 11, 11, 12, 12,
63 13, 13, 14, 14, 15, 15, 16, 16, 17, 17,
65 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
66 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
67 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
68 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
71 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
72 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
73 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
74 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
75 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
76 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
77 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
78 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
79 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
80 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
81 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
82 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 1 },
83 {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 1, 1 }, {-1, 0, 1, 1 }, {-1, 1, 1, 1 },
84 {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 },
85 {-1, 1, 1, 2 }, {-1, 1, 2, 3 }, {-1, 1, 2, 3 }, {-1, 2, 2, 3 }, {-1, 2, 2, 4 }, {-1, 2, 3, 4 },
86 {-1, 2, 3, 4 }, {-1, 3, 3, 5 }, {-1, 3, 4, 6 }, {-1, 3, 4, 6 }, {-1, 4, 5, 7 }, {-1, 4, 5, 8 },
87 {-1, 4, 6, 9 }, {-1, 5, 7,10 }, {-1, 6, 8,11 }, {-1, 6, 8,13 }, {-1, 7,10,14 }, {-1, 8,11,16 },
88 {-1, 9,12,18 }, {-1,10,13,20 }, {-1,11,15,23 }, {-1,13,17,25 },
89 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
90 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
91 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
92 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
93 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
94 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
95 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
96 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
97 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
103 unsigned int qp,
int a,
int b,
106 const unsigned int index_a = qp +
a;
109 if (alpha ==0 || beta == 0)
return;
111 if( bS[0] < 4 || !intra ) {
125 unsigned int qp,
int a,
int b,
128 const unsigned int index_a = qp +
a;
131 if (alpha ==0 || beta == 0)
return;
133 if( bS[0] < 4 || !intra ) {
147 const int16_t bS[7],
int bsi,
148 int qp,
int a,
int b,
151 const unsigned int index_a = qp +
a;
154 if (alpha ==0 || beta == 0)
return;
156 if( bS[0] < 4 || !intra ) {
171 int bsi,
int qp,
int a,
174 const unsigned int index_a = qp +
a;
177 if (alpha ==0 || beta == 0)
return;
179 if( bS[0] < 4 || !intra ) {
181 tc[0] =
tc0_table[index_a][bS[0*bsi]] + 1;
182 tc[1] =
tc0_table[index_a][bS[1*bsi]] + 1;
183 tc[2] =
tc0_table[index_a][bS[2*bsi]] + 1;
184 tc[3] =
tc0_table[index_a][bS[3*bsi]] + 1;
193 unsigned int qp,
int a,
int b,
196 const unsigned int index_a = qp +
a;
199 if (alpha ==0 || beta == 0)
return;
201 if( bS[0] < 4 || !intra ) {
215 unsigned int qp,
int a,
int b,
218 const unsigned int index_a = qp +
a;
221 if (alpha ==0 || beta == 0)
return;
223 if( bS[0] < 4 || !intra ) {
263 qp0 = (qp + qp0 + 1) >> 1;
264 qp1 = (qp + qp1 + 1) >> 1;
265 qpc0 = (qpc + qpc0 + 1) >> 1;
266 qpc1 = (qpc + qpc1 + 1) >> 1;
269 static const int16_t bS4[4] = {4,4,4,4};
270 static const int16_t bS3[4] = {3,3,3,3};
273 filter_mb_edgev( &img_y[4*0<<pixel_shift], linesize, bS4, qp0, a, b, h, 1);
275 filter_mb_edgev( &img_y[4*2<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
277 filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, a, b, h, 1);
281 filter_mb_edgev( &img_y[4*1<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
282 filter_mb_edgev( &img_y[4*2<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
283 filter_mb_edgev( &img_y[4*3<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
285 filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, a, b, h, 1);
294 filter_mb_edgev( &img_cb[4*0<<pixel_shift], linesize, bS4, qpc0, a, b, h, 1);
295 filter_mb_edgev( &img_cr[4*0<<pixel_shift], linesize, bS4, qpc0, a, b, h, 1);
298 filter_mb_edgev( &img_cb[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
299 filter_mb_edgev( &img_cr[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
301 filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1 );
302 filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1 );
304 filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
305 filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
307 filter_mb_edgev( &img_cb[4*1<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
308 filter_mb_edgev( &img_cr[4*1<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
309 filter_mb_edgev( &img_cb[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
310 filter_mb_edgev( &img_cr[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
311 filter_mb_edgev( &img_cb[4*3<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
312 filter_mb_edgev( &img_cr[4*3<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
314 filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1);
315 filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1);
317 filter_mb_edgeh( &img_cb[4*1*linesize], linesize, bS3, qpc, a, b, h, 0);
318 filter_mb_edgeh( &img_cr[4*1*linesize], linesize, bS3, qpc, a, b, h, 0);
319 filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
320 filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
321 filter_mb_edgeh( &img_cb[4*3*linesize], linesize, bS3, qpc, a, b, h, 0);
322 filter_mb_edgeh( &img_cr[4*3*linesize], linesize, bS3, qpc, a, b, h, 0);
326 filter_mb_edgecv(&img_cb[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
327 filter_mb_edgecv(&img_cr[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
329 filter_mb_edgecv(&img_cb[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
330 filter_mb_edgecv(&img_cr[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
332 filter_mb_edgech(&img_cb[4*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
333 filter_mb_edgech(&img_cr[4*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
335 filter_mb_edgech(&img_cb[4*1*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
336 filter_mb_edgech(&img_cr[4*1*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
337 filter_mb_edgech(&img_cb[4*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
338 filter_mb_edgech(&img_cr[4*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
339 filter_mb_edgech(&img_cb[4*3*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
340 filter_mb_edgech(&img_cr[4*3*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
343 filter_mb_edgecv( &img_cb[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
344 filter_mb_edgecv( &img_cr[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
346 filter_mb_edgecv( &img_cb[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
347 filter_mb_edgecv( &img_cr[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
349 filter_mb_edgech( &img_cb[2*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
350 filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
352 filter_mb_edgech( &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
353 filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
360 if(
IS_8x8DCT(mb_type) && (h->
cbp&7) == 7 && !chroma444 ) {
362 AV_WN64A(bS[0][0], 0x0002000200020002ULL);
363 AV_WN64A(bS[0][2], 0x0002000200020002ULL);
364 AV_WN64A(bS[1][0], 0x0002000200020002ULL);
365 AV_WN64A(bS[1][2], 0x0002000200020002ULL);
367 int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4);
368 int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1);
369 int step = 1+(mb_type>>24);
370 edges = 4 - 3*((mb_type>>3) & !(h->
cbp & 15));
375 AV_WN64A(bS[0][0], 0x0004000400040004ULL);
379 #define FILTER(hv,dir,edge,intra)\ 380 if(AV_RN64A(bS[dir][edge])) { \ 381 filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qp : qp##dir, a, b, h, intra );\ 384 filter_mb_edge##hv( &img_cb[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\ 385 filter_mb_edge##hv( &img_cr[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\ 386 } else if(!(edge&1)) {\ 387 filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1<<pixel_shift)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\ 388 filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1<<pixel_shift)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\ 437 v= h->ref_cache[0][b_idx] != h->ref_cache[0][bn_idx];
438 if(!v && h->ref_cache[0][b_idx]!=-1)
439 v= h->mv_cache[0][b_idx][0] - h->mv_cache[0][bn_idx][0] + 3 >= 7
U |
440 FFABS( h->mv_cache[0][b_idx][1] - h->mv_cache[0][bn_idx][1] ) >= mvy_limit;
444 v = h->ref_cache[1][b_idx] != h->ref_cache[1][bn_idx] |
445 h->mv_cache[1][b_idx][0] - h->mv_cache[1][bn_idx][0] + 3 >= 7
U |
446 FFABS( h->mv_cache[1][b_idx][1] - h->mv_cache[1][bn_idx][1] ) >= mvy_limit;
449 if(h->ref_cache[0][b_idx] != h->ref_cache[1][bn_idx] |
450 h->ref_cache[1][b_idx] != h->ref_cache[0][bn_idx])
453 h->mv_cache[0][b_idx][0] - h->mv_cache[1][bn_idx][0] + 3 >= 7
U |
454 FFABS( h->mv_cache[0][b_idx][1] - h->mv_cache[1][bn_idx][1] ) >= mvy_limit |
455 h->mv_cache[1][b_idx][0] - h->mv_cache[0][bn_idx][0] + 3 >= 7
U |
456 FFABS( h->mv_cache[1][b_idx][1] - h->mv_cache[0][bn_idx][1] ) >= mvy_limit;
463 static av_always_inline void filter_mb_dir(
H264Context *h,
int mb_x,
int mb_y,
uint8_t *img_y,
uint8_t *img_cb,
uint8_t *img_cr,
unsigned int linesize,
unsigned int uvlinesize,
int mb_xy,
int mb_type,
int mvy_limit,
int first_vertical_edge_done,
int a,
int b,
int chroma,
int dir) {
465 int chroma_qp_avg[2];
468 const int mbm_xy = dir == 0 ? mb_xy -1 : h->
top_mb_xy;
472 static const uint8_t mask_edge_tab[2][8]={{0,3,3,3,1,1,1,1},
474 const int mask_edge = mask_edge_tab[dir][(mb_type>>3)&7];
475 const int edges = mask_edge== 3 && !(h->
cbp&15) ? 1 : 4;
480 if(mbm_type && !first_vertical_edge_done){
482 if (
FRAME_MBAFF(h) && (dir == 1) && ((mb_y&1) == 0)
489 unsigned int tmp_linesize = 2 *
linesize;
494 for(j=0; j<2; j++, mbn_xy += h->
mb_stride){
498 AV_WN64A(bS, 0x0003000300030003ULL);
501 bS[0]= 1+((h->
cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[
scan8[0]+0]);
502 bS[1]= 1+((h->
cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[
scan8[0]+1]);
503 bS[2]= 1+((h->
cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[
scan8[0]+2]);
504 bS[3]= 1+((h->
cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[
scan8[0]+3]);
508 for( i = 0; i < 4; i++ ) {
509 bS[
i] = 1 + !!(h->non_zero_count_cache[
scan8[0]+
i] | mbn_nnz[
i]);
516 tprintf(h->
avctx,
"filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
518 filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 );
523 filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
524 filter_mb_edgeh (&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], a, b, h, 0);
526 filter_mb_edgech(&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
527 filter_mb_edgech(&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], a, b, h, 0);
536 AV_WN64A(bS, 0x0003000300030003ULL);
540 AV_WN64A(bS, 0x0004000400040004ULL);
546 AV_WN64A(bS, 0x0001000100010001ULL);
551 int bn_idx= b_idx - (dir ? 8:1);
553 bS[0] = bS[1] = bS[2] = bS[3] =
check_mv(h, 8 + 4, bn_idx, mvy_limit);
559 for( i = 0; i < 4; i++ ) {
560 int x = dir == 0 ? 0 :
i;
561 int y = dir == 0 ? i : 0;
562 int b_idx= 8 + 4 + x + 8*
y;
563 int bn_idx= b_idx - (dir ? 8:1);
565 if( h->non_zero_count_cache[b_idx] |
566 h->non_zero_count_cache[bn_idx] ) {
571 bS[
i] =
check_mv(h, b_idx, bn_idx, mvy_limit);
579 if(bS[0]+bS[1]+bS[2]+bS[3]){
582 tprintf(h->
avctx,
"filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
590 filter_mb_edgev ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
591 filter_mb_edgev ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
593 filter_mb_edgecv( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
594 filter_mb_edgecv( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
601 filter_mb_edgeh ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
602 filter_mb_edgeh ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
604 filter_mb_edgech( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
605 filter_mb_edgech( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
614 for( edge = 1; edge < edges; edge++ ) {
617 const int deblock_edge = !
IS_8x8DCT(mb_type & (edge<<24));
619 if (!deblock_edge && (!chroma422 || dir == 0))
623 AV_WN64A(bS, 0x0003000300030003ULL);
628 if( edge & mask_edge ) {
632 else if( mask_par0 ) {
633 int b_idx= 8 + 4 + edge * (dir ? 8:1);
634 int bn_idx= b_idx - (dir ? 8:1);
636 bS[0] = bS[1] = bS[2] = bS[3] =
check_mv(h, b_idx, bn_idx, mvy_limit);
642 for( i = 0; i < 4; i++ ) {
643 int x = dir == 0 ? edge :
i;
644 int y = dir == 0 ? i : edge;
645 int b_idx= 8 + 4 + x + 8*
y;
646 int bn_idx= b_idx - (dir ? 8:1);
648 if( h->non_zero_count_cache[b_idx] |
649 h->non_zero_count_cache[bn_idx] ) {
654 bS[
i] =
check_mv(h, b_idx, bn_idx, mvy_limit);
658 if(bS[0]+bS[1]+bS[2]+bS[3] == 0)
667 tprintf(h->
avctx,
"filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
675 }
else if( (edge&1) == 0 ) {
683 filter_mb_edgeh(&img_y[4*edge*linesize], linesize, bS, qp, a, b, h, 0);
689 filter_mb_edgeh(&img_y[4*edge*linesize], linesize, bS, qp, a, b, h, 0);
694 }
else if ((edge&1) == 0) {
708 int first_vertical_edge_done = 0;
727 int mb_qp, mbn0_qp, mbn1_qp;
729 first_vertical_edge_done = 1;
732 AV_WN64A(&bS[0], 0x0004000400040004ULL);
733 AV_WN64A(&bS[4], 0x0004000400040004ULL);
737 {3+4*0, 3+4*0, 3+4*0, 3+4*0, 3+4*1, 3+4*1, 3+4*1, 3+4*1},
738 {3+4*2, 3+4*2, 3+4*2, 3+4*2, 3+4*3, 3+4*3, 3+4*3, 3+4*3},
740 {3+4*0, 3+4*1, 3+4*2, 3+4*3, 3+4*0, 3+4*1, 3+4*2, 3+4*3},
741 {3+4*0, 3+4*1, 3+4*2, 3+4*3, 3+4*0, 3+4*1, 3+4*2, 3+4*3},
745 for( i = 0; i < 8; i++ ) {
753 bS[
i] = 1 + !!(h->non_zero_count_cache[12+8*(i>>1)] |
765 qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
770 qp[1] = ( mb_qp + mbn1_qp + 1 ) >> 1;
777 tprintf(h->
avctx,
"filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize);
820 for( dir = 0; dir < 2; dir++ )
821 filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, dir ? 0 : first_vertical_edge_done, a, b, chroma, dir);
823 filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, first_vertical_edge_done, a, b, chroma, 0);
824 filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, 0, a, b, chroma, 1);
void(* h264_h_loop_filter_chroma_mbaff_intra)(uint8_t *pix, int stride, int alpha, int beta)
static const uint8_t alpha_table[52 *3]
#define DECLARE_ALIGNED(n, t, v)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
void(* h264_v_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta)
int cabac
entropy_coding_mode_flag
static av_always_inline void filter_mb_edgech(uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra)
void ff_h264_filter_mb(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
void(* h264_h_loop_filter_luma_mbaff_intra)(uint8_t *pix, int stride, int alpha, int beta)
#define LOCAL_ALIGNED_8(t, v,...)
H.264 / AVC / MPEG4 part10 codec.
static av_always_inline void filter_mb_mbaff_edgecv(H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp, int a, int b, int intra)
static double alpha(void *priv, double x, double y)
static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale)
Get the chroma qp.
void(* h264_h_loop_filter_chroma_mbaff)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
static const uint8_t offset[127][2]
static const uint8_t scan8[16 *3+3]
common internal API header
useful rectangle filling function
void(* h264_v_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
void(* h264_h_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
void(* h264_h_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta)
void(* h264_h_loop_filter_luma_mbaff)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
void(* h264_v_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
static av_always_inline void h264_filter_mb_fast_internal(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int pixel_shift)
int slice_alpha_c0_offset
static int check_mv(H264Context *h, long b_idx, long bn_idx, int mvy_limit)
static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int a, int b, int chroma, int dir)
void(* h264_loop_filter_strength)(int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2], int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field)
static const uint8_t tc0_table[52 *3][4]
synthesis window for stochastic i
static av_always_inline void filter_mb_mbaff_edgev(H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp, int a, int b, int intra)
int pixel_shift
0 for 8-bit H264, 1 for high-bit-depth H264
static av_always_inline void filter_mb_edgeh(uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra)
void(* h264_h_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
static av_always_inline void filter_mb_edgev(uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra)
common internal api header.
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
#define FILTER(hv, dir, edge, intra)
int bit_depth_luma
bit_depth_luma_minus8 + 8
void(* h264_h_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta)
static av_always_inline void filter_mb_edgecv(uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra)
void(* h264_v_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta)
uint8_t(* non_zero_count)[48]
static const uint8_t beta_table[52 *3]
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
void ff_h264_filter_mb_fast(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)