annotate ffmpeg/libavcodec/mpeg4video.c @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * MPEG4 decoder / encoder common code.
yading@10 3 * Copyright (c) 2000,2001 Fabrice Bellard
yading@10 4 * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
yading@10 5 *
yading@10 6 * This file is part of FFmpeg.
yading@10 7 *
yading@10 8 * FFmpeg is free software; you can redistribute it and/or
yading@10 9 * modify it under the terms of the GNU Lesser General Public
yading@10 10 * License as published by the Free Software Foundation; either
yading@10 11 * version 2.1 of the License, or (at your option) any later version.
yading@10 12 *
yading@10 13 * FFmpeg is distributed in the hope that it will be useful,
yading@10 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 16 * Lesser General Public License for more details.
yading@10 17 *
yading@10 18 * You should have received a copy of the GNU Lesser General Public
yading@10 19 * License along with FFmpeg; if not, write to the Free Software
yading@10 20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 21 */
yading@10 22
yading@10 23 #include "mpegvideo.h"
yading@10 24 #include "mpeg4video.h"
yading@10 25 #include "mpeg4data.h"
yading@10 26
yading@10 27 uint8_t ff_mpeg4_static_rl_table_store[3][2][2*MAX_RUN + MAX_LEVEL + 3];
yading@10 28
yading@10 29 int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s){
yading@10 30 switch(s->pict_type){
yading@10 31 case AV_PICTURE_TYPE_I:
yading@10 32 return 16;
yading@10 33 case AV_PICTURE_TYPE_P:
yading@10 34 case AV_PICTURE_TYPE_S:
yading@10 35 return s->f_code+15;
yading@10 36 case AV_PICTURE_TYPE_B:
yading@10 37 return FFMAX3(s->f_code, s->b_code, 2) + 15;
yading@10 38 default:
yading@10 39 return -1;
yading@10 40 }
yading@10 41 }
yading@10 42
yading@10 43 void ff_mpeg4_clean_buffers(MpegEncContext *s)
yading@10 44 {
yading@10 45 int c_wrap, c_xy, l_wrap, l_xy;
yading@10 46
yading@10 47 l_wrap= s->b8_stride;
yading@10 48 l_xy= (2*s->mb_y-1)*l_wrap + s->mb_x*2 - 1;
yading@10 49 c_wrap= s->mb_stride;
yading@10 50 c_xy= (s->mb_y-1)*c_wrap + s->mb_x - 1;
yading@10 51
yading@10 52 #if 0
yading@10 53 /* clean DC */
yading@10 54 memsetw(s->dc_val[0] + l_xy, 1024, l_wrap*2+1);
yading@10 55 memsetw(s->dc_val[1] + c_xy, 1024, c_wrap+1);
yading@10 56 memsetw(s->dc_val[2] + c_xy, 1024, c_wrap+1);
yading@10 57 #endif
yading@10 58
yading@10 59 /* clean AC */
yading@10 60 memset(s->ac_val[0] + l_xy, 0, (l_wrap*2+1)*16*sizeof(int16_t));
yading@10 61 memset(s->ac_val[1] + c_xy, 0, (c_wrap +1)*16*sizeof(int16_t));
yading@10 62 memset(s->ac_val[2] + c_xy, 0, (c_wrap +1)*16*sizeof(int16_t));
yading@10 63
yading@10 64 /* clean MV */
yading@10 65 // we can't clear the MVs as they might be needed by a b frame
yading@10 66 // memset(s->motion_val + l_xy, 0, (l_wrap*2+1)*2*sizeof(int16_t));
yading@10 67 // memset(s->motion_val, 0, 2*sizeof(int16_t)*(2 + s->mb_width*2)*(2 + s->mb_height*2));
yading@10 68 s->last_mv[0][0][0]=
yading@10 69 s->last_mv[0][0][1]=
yading@10 70 s->last_mv[1][0][0]=
yading@10 71 s->last_mv[1][0][1]= 0;
yading@10 72 }
yading@10 73
yading@10 74 #define tab_size ((signed)FF_ARRAY_ELEMS(s->direct_scale_mv[0]))
yading@10 75 #define tab_bias (tab_size/2)
yading@10 76
yading@10 77 //used by mpeg4 and rv10 decoder
yading@10 78 void ff_mpeg4_init_direct_mv(MpegEncContext *s){
yading@10 79 int i;
yading@10 80 for(i=0; i<tab_size; i++){
yading@10 81 s->direct_scale_mv[0][i] = (i-tab_bias)*s->pb_time/s->pp_time;
yading@10 82 s->direct_scale_mv[1][i] = (i-tab_bias)*(s->pb_time-s->pp_time)/s->pp_time;
yading@10 83 }
yading@10 84 }
yading@10 85
yading@10 86 static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my, int i){
yading@10 87 int xy= s->block_index[i];
yading@10 88 uint16_t time_pp= s->pp_time;
yading@10 89 uint16_t time_pb= s->pb_time;
yading@10 90 int p_mx, p_my;
yading@10 91
yading@10 92 p_mx = s->next_picture.motion_val[0][xy][0];
yading@10 93 if((unsigned)(p_mx + tab_bias) < tab_size){
yading@10 94 s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx;
yading@10 95 s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
yading@10 96 : s->direct_scale_mv[1][p_mx + tab_bias];
yading@10 97 }else{
yading@10 98 s->mv[0][i][0] = p_mx*time_pb/time_pp + mx;
yading@10 99 s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
yading@10 100 : p_mx*(time_pb - time_pp)/time_pp;
yading@10 101 }
yading@10 102 p_my = s->next_picture.motion_val[0][xy][1];
yading@10 103 if((unsigned)(p_my + tab_bias) < tab_size){
yading@10 104 s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my;
yading@10 105 s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
yading@10 106 : s->direct_scale_mv[1][p_my + tab_bias];
yading@10 107 }else{
yading@10 108 s->mv[0][i][1] = p_my*time_pb/time_pp + my;
yading@10 109 s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
yading@10 110 : p_my*(time_pb - time_pp)/time_pp;
yading@10 111 }
yading@10 112 }
yading@10 113
yading@10 114 #undef tab_size
yading@10 115 #undef tab_bias
yading@10 116
yading@10 117 /**
yading@10 118 *
yading@10 119 * @return the mb_type
yading@10 120 */
yading@10 121 int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
yading@10 122 const int mb_index= s->mb_x + s->mb_y*s->mb_stride;
yading@10 123 const int colocated_mb_type = s->next_picture.mb_type[mb_index];
yading@10 124 uint16_t time_pp;
yading@10 125 uint16_t time_pb;
yading@10 126 int i;
yading@10 127
yading@10 128 //FIXME avoid divides
yading@10 129 // try special case with shifts for 1 and 3 B-frames?
yading@10 130
yading@10 131 if(IS_8X8(colocated_mb_type)){
yading@10 132 s->mv_type = MV_TYPE_8X8;
yading@10 133 for(i=0; i<4; i++){
yading@10 134 ff_mpeg4_set_one_direct_mv(s, mx, my, i);
yading@10 135 }
yading@10 136 return MB_TYPE_DIRECT2 | MB_TYPE_8x8 | MB_TYPE_L0L1;
yading@10 137 } else if(IS_INTERLACED(colocated_mb_type)){
yading@10 138 s->mv_type = MV_TYPE_FIELD;
yading@10 139 for(i=0; i<2; i++){
yading@10 140 int field_select = s->next_picture.ref_index[0][4 * mb_index + 2 * i];
yading@10 141 s->field_select[0][i]= field_select;
yading@10 142 s->field_select[1][i]= i;
yading@10 143 if(s->top_field_first){
yading@10 144 time_pp= s->pp_field_time - field_select + i;
yading@10 145 time_pb= s->pb_field_time - field_select + i;
yading@10 146 }else{
yading@10 147 time_pp= s->pp_field_time + field_select - i;
yading@10 148 time_pb= s->pb_field_time + field_select - i;
yading@10 149 }
yading@10 150 s->mv[0][i][0] = s->p_field_mv_table[i][0][mb_index][0]*time_pb/time_pp + mx;
yading@10 151 s->mv[0][i][1] = s->p_field_mv_table[i][0][mb_index][1]*time_pb/time_pp + my;
yading@10 152 s->mv[1][i][0] = mx ? s->mv[0][i][0] - s->p_field_mv_table[i][0][mb_index][0]
yading@10 153 : s->p_field_mv_table[i][0][mb_index][0]*(time_pb - time_pp)/time_pp;
yading@10 154 s->mv[1][i][1] = my ? s->mv[0][i][1] - s->p_field_mv_table[i][0][mb_index][1]
yading@10 155 : s->p_field_mv_table[i][0][mb_index][1]*(time_pb - time_pp)/time_pp;
yading@10 156 }
yading@10 157 return MB_TYPE_DIRECT2 | MB_TYPE_16x8 | MB_TYPE_L0L1 | MB_TYPE_INTERLACED;
yading@10 158 }else{
yading@10 159 ff_mpeg4_set_one_direct_mv(s, mx, my, 0);
yading@10 160 s->mv[0][1][0] = s->mv[0][2][0] = s->mv[0][3][0] = s->mv[0][0][0];
yading@10 161 s->mv[0][1][1] = s->mv[0][2][1] = s->mv[0][3][1] = s->mv[0][0][1];
yading@10 162 s->mv[1][1][0] = s->mv[1][2][0] = s->mv[1][3][0] = s->mv[1][0][0];
yading@10 163 s->mv[1][1][1] = s->mv[1][2][1] = s->mv[1][3][1] = s->mv[1][0][1];
yading@10 164 if((s->avctx->workaround_bugs & FF_BUG_DIRECT_BLOCKSIZE) || !s->quarter_sample)
yading@10 165 s->mv_type= MV_TYPE_16X16;
yading@10 166 else
yading@10 167 s->mv_type= MV_TYPE_8X8;
yading@10 168 return MB_TYPE_DIRECT2 | MB_TYPE_16x16 | MB_TYPE_L0L1; //Note see prev line
yading@10 169 }
yading@10 170 }