56 uint8_t *dest_cr,
int mb_x,
int mb_y)
59 int dc, dcu, dcv,
y,
i;
60 for (i = 0; i < 4; i++) {
61 dc = s->
dc_val[0][mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * s->
b8_stride];
66 for (y = 0; y < 8; y++) {
68 for (x = 0; x < 8; x++)
69 dest_y[x + (i & 1) * 8 + (y + (i >> 1) * 8) * linesize[0]] = dc / 8;
82 for (y = 0; y < 8; y++) {
84 for (x = 0; x < 8; x++) {
85 dest_cb[x + y * linesize[1]] = dcu / 8;
86 dest_cr[x + y * linesize[2]] = dcv / 8;
96 for (y = 1; y < height - 1; y++) {
97 int prev_dc = data[0 + y *
stride];
99 for (x = 1; x < width - 1; x++) {
102 data[x + y *
stride] * 8 -
104 dc = (dc * 10923 + 32768) >> 16;
105 prev_dc = data[x + y *
stride];
111 for (x = 1; x < width - 1; x++) {
112 int prev_dc = data[
x];
114 for (y = 1; y < height - 1; y++) {
118 data[x + y *
stride] * 8 -
119 data[x + (y + 1) * stride];
120 dc = (dc * 10923 + 32768) >> 16;
121 prev_dc = data[x + y *
stride];
133 int h,
int stride,
int is_luma)
136 int16_t (*col )[4] =
av_malloc(stride*h*
sizeof( int16_t)*4);
137 uint32_t (*dist)[4] =
av_malloc(stride*h*
sizeof(uint32_t)*4);
144 for(b_y=0; b_y<h; b_y++){
147 for(b_x=0; b_x<
w; b_x++){
148 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->
mb_stride;
152 color= dc[b_x + b_y*
stride];
156 dist[b_x + b_y*
stride][1]= distance >= 0 ? b_x-distance : 9999;
160 for(b_x=w-1; b_x>=0; b_x--){
161 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->
mb_stride;
165 color= dc[b_x + b_y*
stride];
169 dist[b_x + b_y*
stride][0]= distance >= 0 ? distance-b_x : 9999;
172 for(b_x=0; b_x<
w; b_x++){
175 for(b_y=0; b_y<h; b_y++){
176 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->
mb_stride;
180 color= dc[b_x + b_y*
stride];
184 dist[b_x + b_y*
stride][3]= distance >= 0 ? b_y-distance : 9999;
188 for(b_y=h-1; b_y>=0; b_y--){
189 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->
mb_stride;
193 color= dc[b_x + b_y*
stride];
197 dist[b_x + b_y*
stride][2]= distance >= 0 ? distance-b_y : 9999;
201 for (b_y = 0; b_y < h; b_y++) {
202 for (b_x = 0; b_x <
w; b_x++) {
203 int mb_index, error, j;
204 int64_t guess, weight_sum;
205 mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->
mb_stride;
215 for (j = 0; j < 4; j++) {
216 int64_t
weight = 256 * 256 * 256 * 16 /
FFMAX(dist[b_x + b_y*stride][j], 1);
217 guess += weight*(int64_t)col[b_x + b_y*stride][j];
220 guess = (guess + weight_sum / 2) / weight_sum;
221 dc[b_x + b_y *
stride] = guess;
236 int h,
int stride,
int is_luma)
238 int b_x, b_y, mvx_stride, mvy_stride;
241 mvx_stride >>= is_luma;
242 mvy_stride *= mvx_stride;
244 for (b_y = 0; b_y < h; b_y++) {
245 for (b_x = 0; b_x < w - 1; b_x++) {
253 int offset = b_x * 8 + b_y * stride * 8;
254 int16_t *left_mv = s->
cur_pic->
motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
255 int16_t *right_mv = s->
cur_pic->
motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
256 if (!(left_damage || right_damage))
258 if ((!left_intra) && (!right_intra) &&
259 FFABS(left_mv[0] - right_mv[0]) +
260 FFABS(left_mv[1] + right_mv[1]) < 2)
263 for (y = 0; y < 8; y++) {
266 a = dst[offset + 7 + y *
stride] - dst[offset + 6 + y *
stride];
267 b = dst[offset + 8 + y *
stride] - dst[offset + 7 + y *
stride];
268 c = dst[offset + 9 + y *
stride] - dst[offset + 8 + y *
stride];
278 if (!(left_damage && right_damage))
282 dst[offset + 7 + y *
stride] = cm[dst[offset + 7 + y *
stride] + ((d * 7) >> 4)];
283 dst[offset + 6 + y *
stride] = cm[dst[offset + 6 + y *
stride] + ((d * 5) >> 4)];
284 dst[offset + 5 + y *
stride] = cm[dst[offset + 5 + y *
stride] + ((d * 3) >> 4)];
285 dst[offset + 4 + y *
stride] = cm[dst[offset + 4 + y *
stride] + ((d * 1) >> 4)];
288 dst[offset + 8 + y *
stride] = cm[dst[offset + 8 + y *
stride] - ((d * 7) >> 4)];
289 dst[offset + 9 + y *
stride] = cm[dst[offset + 9 + y *
stride] - ((d * 5) >> 4)];
290 dst[offset + 10+ y *
stride] = cm[dst[offset + 10 + y *
stride] - ((d * 3) >> 4)];
291 dst[offset + 11+ y *
stride] = cm[dst[offset + 11 + y *
stride] - ((d * 1) >> 4)];
306 int b_x, b_y, mvx_stride, mvy_stride;
309 mvx_stride >>= is_luma;
310 mvy_stride *= mvx_stride;
312 for (b_y = 0; b_y < h - 1; b_y++) {
313 for (b_x = 0; b_x <
w; b_x++) {
321 int offset = b_x * 8 + b_y * stride * 8;
323 int16_t *top_mv = s->
cur_pic->
motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
324 int16_t *bottom_mv = s->
cur_pic->
motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
326 if (!(top_damage || bottom_damage))
329 if ((!top_intra) && (!bottom_intra) &&
330 FFABS(top_mv[0] - bottom_mv[0]) +
331 FFABS(top_mv[1] + bottom_mv[1]) < 2)
334 for (x = 0; x < 8; x++) {
337 a = dst[offset + x + 7 *
stride] - dst[offset + x + 6 *
stride];
338 b = dst[offset + x + 8 *
stride] - dst[offset + x + 7 *
stride];
339 c = dst[offset + x + 9 *
stride] - dst[offset + x + 8 *
stride];
349 if (!(top_damage && bottom_damage))
353 dst[offset + x + 7 *
stride] = cm[dst[offset + x + 7 *
stride] + ((d * 7) >> 4)];
354 dst[offset + x + 6 *
stride] = cm[dst[offset + x + 6 *
stride] + ((d * 5) >> 4)];
355 dst[offset + x + 5 *
stride] = cm[dst[offset + x + 5 *
stride] + ((d * 3) >> 4)];
356 dst[offset + x + 4 *
stride] = cm[dst[offset + x + 4 *
stride] + ((d * 1) >> 4)];
359 dst[offset + x + 8 *
stride] = cm[dst[offset + x + 8 *
stride] - ((d * 7) >> 4)];
360 dst[offset + x + 9 *
stride] = cm[dst[offset + x + 9 *
stride] - ((d * 5) >> 4)];
361 dst[offset + x + 10 *
stride] = cm[dst[offset + x + 10 *
stride] - ((d * 3) >> 4)];
362 dst[offset + x + 11 *
stride] = cm[dst[offset + x + 11 *
stride] - ((d * 1) >> 4)];
374 #define MV_UNCHANGED 1 379 int mb_x, mb_y, mot_step, mot_stride;
384 for (i = 0; i < s->
mb_num; i++) {
400 const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
408 num_avail <= mb_width / 2) {
409 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
410 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
411 const int mb_xy = mb_x + mb_y * s->
mb_stride;
428 for (depth = 0; ; depth++) {
429 int changed,
pass, none_left;
433 for (pass = 0; (changed || pass < 2) && pass < 10; pass++) {
438 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
439 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
440 const int mb_xy = mb_x + mb_y * s->
mb_stride;
441 int mv_predictor[8][2] = { { 0 } };
445 int best_score = 256 * 256 * 256 * 64;
447 const int mot_index = (mb_x + mb_y * mot_stride) * mot_step;
448 int prev_x, prev_y, prev_ref;
450 if ((mb_x ^ mb_y ^ pass) & 1)
459 if (mb_x > 0 && fixed[mb_xy - 1] ==
MV_FROZEN)
461 if (mb_x + 1 < mb_width && fixed[mb_xy + 1] ==
MV_FROZEN)
463 if (mb_y > 0 && fixed[mb_xy - mb_stride] ==
MV_FROZEN)
465 if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] ==
MV_FROZEN)
471 if (mb_x > 0 && fixed[mb_xy - 1 ] ==
MV_CHANGED)
473 if (mb_x + 1 < mb_width && fixed[mb_xy + 1 ] ==
MV_CHANGED)
475 if (mb_y > 0 && fixed[mb_xy - mb_stride] ==
MV_CHANGED)
477 if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] ==
MV_CHANGED)
479 if (j == 0 && pass > 1)
484 if (mb_x > 0 && fixed[mb_xy - 1]) {
485 mv_predictor[pred_count][0] =
487 mv_predictor[pred_count][1] =
493 if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
494 mv_predictor[pred_count][0] =
496 mv_predictor[pred_count][1] =
502 if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
503 mv_predictor[pred_count][0] =
505 mv_predictor[pred_count][1] =
511 if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) {
512 mv_predictor[pred_count][0] =
514 mv_predictor[pred_count][1] =
523 if (pred_count > 1) {
524 int sum_x = 0, sum_y = 0, sum_r = 0;
525 int max_x, max_y, min_x, min_y, max_r, min_r;
527 for (j = 0; j < pred_count; j++) {
528 sum_x += mv_predictor[j][0];
529 sum_y += mv_predictor[j][1];
531 if (j && ref[j] != ref[j - 1])
532 goto skip_mean_and_median;
536 mv_predictor[pred_count][0] = sum_x / j;
537 mv_predictor[pred_count][1] = sum_y / j;
538 ref[pred_count] = sum_r / j;
541 if (pred_count >= 3) {
542 min_y = min_x = min_r = 99999;
543 max_y = max_x = max_r = -99999;
545 min_x = min_y = max_x = max_y = min_r = max_r = 0;
547 for (j = 0; j < pred_count; j++) {
548 max_x =
FFMAX(max_x, mv_predictor[j][0]);
549 max_y =
FFMAX(max_y, mv_predictor[j][1]);
550 max_r =
FFMAX(max_r, ref[j]);
551 min_x =
FFMIN(min_x, mv_predictor[j][0]);
552 min_y =
FFMIN(min_y, mv_predictor[j][1]);
553 min_r =
FFMIN(min_r, ref[j]);
555 mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x;
556 mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y;
557 ref[pred_count + 1] = sum_r - max_r - min_r;
559 if (pred_count == 4) {
560 mv_predictor[pred_count + 1][0] /= 2;
561 mv_predictor[pred_count + 1][1] /= 2;
562 ref[pred_count + 1] /= 2;
567 skip_mean_and_median:
571 if (!fixed[mb_xy] && 0) {
591 mv_predictor[pred_count][0] = prev_x;
592 mv_predictor[pred_count][1] = prev_y;
593 ref[pred_count] = prev_ref;
598 for (j = 0; j < pred_count; j++) {
602 mb_x * 16 + mb_y * 16 * linesize[0];
605 s->
mv[0][0][0] = mv_predictor[j][0];
607 s->
mv[0][0][1] = mv_predictor[j][1];
616 if (mb_x > 0 && fixed[mb_xy - 1]) {
618 for (k = 0; k < 16; k++)
619 score +=
FFABS(src[k * linesize[0] - 1] -
620 src[k * linesize[0]]);
622 if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
624 for (k = 0; k < 16; k++)
625 score +=
FFABS(src[k * linesize[0] + 15] -
626 src[k * linesize[0] + 16]);
628 if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
630 for (k = 0; k < 16; k++)
631 score +=
FFABS(src[k - linesize[0]] - src[k]);
633 if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride]) {
635 for (k = 0; k < 16; k++)
636 score +=
FFABS(src[k + linesize[0] * 15] -
637 src[k + linesize[0] * 16]);
640 if (score <= best_score) {
645 score_sum += best_score;
646 s->
mv[0][0][0] = mv_predictor[best_pred][0];
647 s->
mv[0][0][1] = mv_predictor[best_pred][1];
649 for (i = 0; i < mot_step; i++)
650 for (j = 0; j < mot_step; j++) {
659 if (s->
mv[0][0][0] != prev_x || s->
mv[0][0][1] != prev_y) {
671 for (i = 0; i < s->
mb_num; i++) {
681 int is_intra_likely,
i, j, undamaged_count, skip_amount, mb_x, mb_y;
687 for (i = 0; i < s->
mb_num; i++) {
697 if (undamaged_count < 5)
706 skip_amount =
FFMAX(undamaged_count / 50, 1);
710 for (mb_y = 0; mb_y < s->
mb_height - 1; mb_y++) {
711 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
713 const int mb_xy = mb_x + mb_y * s->
mb_stride;
721 if ((j % skip_amount) != 0)
727 mb_x * 16 + mb_y * 16 * linesize[0];
729 mb_x * 16 + mb_y * 16 * linesize[0];
736 is_intra_likely += s->
dsp->
sad[0](
NULL, last_mb_ptr, mb_ptr,
739 is_intra_likely -= s->
dsp->
sad[0](
NULL, last_mb_ptr,
740 last_mb_ptr + linesize[0] * 16,
751 return is_intra_likely > 0;
773 int endx,
int endy,
int status)
775 const int start_i = av_clip(startx + starty * s->
mb_width, 0, s->
mb_num - 1);
776 const int end_i = av_clip(endx + endy * s->
mb_width, 0, s->
mb_num);
784 if (start_i > end_i || start_xy > end_xy) {
786 "internal error, slice end before start\n");
814 (end_xy - start_xy) *
sizeof(
uint8_t));
817 for (i = start_xy; i < end_xy; i++)
845 int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
847 int threshold_part[4] = { 100, 100, 100 };
883 for (i = 0; i < 2; i++) {
892 for (i = 0; i < 2; i++) {
903 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
904 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
915 for (error_type = 1; error_type <= 3; error_type++) {
918 for (i = s->
mb_num - 1; i >= 0; i--) {
922 if (error & (1 << error_type))
924 if (error & (8 << error_type))
940 for (i = s->
mb_num - 1; i >= 0; i--) {
988 for (error_type = 1; error_type <= 3; error_type++) {
989 for (i = s->
mb_num - 1; i >= 0; i--) {
995 if (error & (1 << error_type))
999 if (distance < threshold_part[error_type - 1])
1002 if (distance < threshold)
1014 for (i = 0; i < s->
mb_num; i++) {
1028 for (i = 0; i < s->
mb_num; i++) {
1038 dc_error = ac_error = mv_error = 0;
1039 for (i = 0; i < s->
mb_num; i++) {
1055 for (i = 0; i < s->
mb_num; i++) {
1061 if (is_intra_likely)
1070 for (i = 0; i < s->
mb_num; i++) {
1077 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1078 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1079 const int mb_xy = mb_x + mb_y * s->
mb_stride;
1095 int mb_index = mb_x * 2 + mb_y * 2 * s->
b8_stride;
1098 for (j = 0; j < 4; j++) {
1109 mv_dir, mv_type, &s->
mv, mb_x, mb_y, 0, 0);
1115 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1116 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1118 const int mb_xy = mb_x + mb_y * s->
mb_stride;
1165 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1166 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1167 int dc, dcu, dcv,
y,
n;
1169 uint8_t *dest_y, *dest_cb, *dest_cr;
1170 const int mb_xy = mb_x + mb_y * s->
mb_stride;
1180 dest_y = s->
cur_pic->
f.
data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
1181 dest_cb = s->
cur_pic->
f.
data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
1182 dest_cr = s->
cur_pic->
f.
data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
1185 for (n = 0; n < 4; n++) {
1187 for (y = 0; y < 8; y++) {
1189 for (x = 0; x < 8; x++)
1190 dc += dest_y[x + (n & 1) * 8 +
1191 (y + (n >> 1) * 8) * linesize[0]];
1193 dc_ptr[(n & 1) + (n >> 1) * s->
b8_stride] = (dc + 4) >> 3;
1197 for (y = 0; y < 8; y++) {
1199 for (x = 0; x < 8; x++) {
1200 dcu += dest_cb[x + y * linesize[1]];
1201 dcv += dest_cr[x + y * linesize[2]];
1220 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1221 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1222 uint8_t *dest_y, *dest_cb, *dest_cr;
1223 const int mb_xy = mb_x + mb_y * s->
mb_stride;
1233 dest_y = s->
cur_pic->
f.
data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
1234 dest_cb = s->
cur_pic->
f.
data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
1235 dest_cr = s->
cur_pic->
f.
data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
1237 put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
1262 for (i = 0; i < s->
mb_num; i++) {
const struct AVCodec * codec
#define CONFIG_MPEG_XVMC_DECODER
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
FIXME Range Coding of cr are ref
static void put_dc(ERContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y)
Replace the current MB with a flat dc-only version.
#define AV_LOG_WARNING
Something somehow does not look correct.
void ff_er_frame_end(ERContext *s)
static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h, int stride, int is_luma)
simple vertical deblocking filter used for error resilience
static void filter181(int16_t *data, int width, int height, int stride)
#define VP_START
< current MB is the first after a resync marker
int field_picture
whether or not the picture was encoded in separate fields
static void guess_mv(ERContext *s)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
output residual component w
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
struct AVHWAccel * hwaccel
Hardware accelerator in use.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
#define CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
static void guess_dc(ERContext *s, int16_t *dc, int w, int h, int stride, int is_luma)
guess the dc of blocks which do not have an undamaged dc
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
int width
width and height of the video frame
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Multithreading support functions.
static const uint16_t mask[17]
int active_thread_type
Which multithreading methods are in use by the codec.
int error_concealment
error concealment flags
int capabilities
Codec capabilities.
struct Picture * next_pic
void av_log(void *avcl, int level, const char *fmt,...)
static const uint8_t offset[127][2]
static float distance(float x, float y, int band)
uint8_t * error_status_table
useful rectangle filling function
AVBufferRef * motion_val_buf[2]
enum AVPictureType pict_type
Picture type of the frame.
struct Picture * last_pic
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
int16_t(*[2] motion_val)[2]
int skip_top
Number of macroblock rows at the top which are skipped.
int xvmc_acceleration
XVideo Motion Acceleration.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
#define MV_TYPE_16X16
1 vector for the whole mb
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
FIXME Range Coding of cr are mx and my are Motion Vector top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Predicton block[y][x] dc[1]
uint8_t * data
The data buffer.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
BYTE int const BYTE int int int height
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
synthesis window for stochastic i
static int weight(int i, int blen, int offset)
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int is_intra_more_likely(ERContext *s)
else dst[i][x+y *dst_stride[i]]
void ff_er_frame_start(ERContext *s)
static void h_block_filter(ERContext *s, uint8_t *dst, int w, int h, int stride, int is_luma)
simple horizontal deblocking filter used for error resilience
#define MV_TYPE_8X8
4 vectors (h263, mpeg4 4MV)
static void set_mv_strides(ERContext *s, int *mv_step, int *stride)
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
AVBufferRef * ref_index_buf[2]