annotate ffmpeg/libavcodec/mpegvideo_motion.c @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * Copyright (c) 2000,2001 Fabrice Bellard
yading@10 3 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
yading@10 4 *
yading@10 5 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
yading@10 6 *
yading@10 7 * This file is part of FFmpeg.
yading@10 8 *
yading@10 9 * FFmpeg is free software; you can redistribute it and/or
yading@10 10 * modify it under the terms of the GNU Lesser General Public
yading@10 11 * License as published by the Free Software Foundation; either
yading@10 12 * version 2.1 of the License, or (at your option) any later version.
yading@10 13 *
yading@10 14 * FFmpeg is distributed in the hope that it will be useful,
yading@10 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 17 * Lesser General Public License for more details.
yading@10 18 *
yading@10 19 * You should have received a copy of the GNU Lesser General Public
yading@10 20 * License along with FFmpeg; if not, write to the Free Software
yading@10 21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 22 */
yading@10 23
yading@10 24 #include <string.h>
yading@10 25
yading@10 26 #include "libavutil/avassert.h"
yading@10 27 #include "libavutil/internal.h"
yading@10 28 #include "avcodec.h"
yading@10 29 #include "dsputil.h"
yading@10 30 #include "h261.h"
yading@10 31 #include "mpegvideo.h"
yading@10 32 #include "mjpegenc.h"
yading@10 33 #include "msmpeg4.h"
yading@10 34 #include <limits.h>
yading@10 35
yading@10 36 static void gmc1_motion(MpegEncContext *s,
yading@10 37 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
yading@10 38 uint8_t **ref_picture)
yading@10 39 {
yading@10 40 uint8_t *ptr;
yading@10 41 int offset, src_x, src_y, linesize, uvlinesize;
yading@10 42 int motion_x, motion_y;
yading@10 43 int emu=0;
yading@10 44
yading@10 45 motion_x= s->sprite_offset[0][0];
yading@10 46 motion_y= s->sprite_offset[0][1];
yading@10 47 src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
yading@10 48 src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
yading@10 49 motion_x<<=(3-s->sprite_warping_accuracy);
yading@10 50 motion_y<<=(3-s->sprite_warping_accuracy);
yading@10 51 src_x = av_clip(src_x, -16, s->width);
yading@10 52 if (src_x == s->width)
yading@10 53 motion_x =0;
yading@10 54 src_y = av_clip(src_y, -16, s->height);
yading@10 55 if (src_y == s->height)
yading@10 56 motion_y =0;
yading@10 57
yading@10 58 linesize = s->linesize;
yading@10 59 uvlinesize = s->uvlinesize;
yading@10 60
yading@10 61 ptr = ref_picture[0] + (src_y * linesize) + src_x;
yading@10 62
yading@10 63 if( (unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0)
yading@10 64 || (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)){
yading@10 65 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
yading@10 66 ptr= s->edge_emu_buffer;
yading@10 67 }
yading@10 68
yading@10 69 if((motion_x|motion_y)&7){
yading@10 70 s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
yading@10 71 s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
yading@10 72 }else{
yading@10 73 int dxy;
yading@10 74
yading@10 75 dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
yading@10 76 if (s->no_rounding){
yading@10 77 s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
yading@10 78 }else{
yading@10 79 s->hdsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16);
yading@10 80 }
yading@10 81 }
yading@10 82
yading@10 83 if(CONFIG_GRAY && s->flags&CODEC_FLAG_GRAY) return;
yading@10 84
yading@10 85 motion_x= s->sprite_offset[1][0];
yading@10 86 motion_y= s->sprite_offset[1][1];
yading@10 87 src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
yading@10 88 src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
yading@10 89 motion_x<<=(3-s->sprite_warping_accuracy);
yading@10 90 motion_y<<=(3-s->sprite_warping_accuracy);
yading@10 91 src_x = av_clip(src_x, -8, s->width>>1);
yading@10 92 if (src_x == s->width>>1)
yading@10 93 motion_x =0;
yading@10 94 src_y = av_clip(src_y, -8, s->height>>1);
yading@10 95 if (src_y == s->height>>1)
yading@10 96 motion_y =0;
yading@10 97
yading@10 98 offset = (src_y * uvlinesize) + src_x;
yading@10 99 ptr = ref_picture[1] + offset;
yading@10 100 if( (unsigned)src_x >= FFMAX((s->h_edge_pos>>1) - 9, 0)
yading@10 101 || (unsigned)src_y >= FFMAX((s->v_edge_pos>>1) - 9, 0)){
yading@10 102 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
yading@10 103 ptr= s->edge_emu_buffer;
yading@10 104 emu=1;
yading@10 105 }
yading@10 106 s->dsp.gmc1(dest_cb, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
yading@10 107
yading@10 108 ptr = ref_picture[2] + offset;
yading@10 109 if(emu){
yading@10 110 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
yading@10 111 ptr= s->edge_emu_buffer;
yading@10 112 }
yading@10 113 s->dsp.gmc1(dest_cr, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
yading@10 114
yading@10 115 return;
yading@10 116 }
yading@10 117
yading@10 118 static void gmc_motion(MpegEncContext *s,
yading@10 119 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
yading@10 120 uint8_t **ref_picture)
yading@10 121 {
yading@10 122 uint8_t *ptr;
yading@10 123 int linesize, uvlinesize;
yading@10 124 const int a= s->sprite_warping_accuracy;
yading@10 125 int ox, oy;
yading@10 126
yading@10 127 linesize = s->linesize;
yading@10 128 uvlinesize = s->uvlinesize;
yading@10 129
yading@10 130 ptr = ref_picture[0];
yading@10 131
yading@10 132 ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
yading@10 133 oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
yading@10 134
yading@10 135 s->dsp.gmc(dest_y, ptr, linesize, 16,
yading@10 136 ox,
yading@10 137 oy,
yading@10 138 s->sprite_delta[0][0], s->sprite_delta[0][1],
yading@10 139 s->sprite_delta[1][0], s->sprite_delta[1][1],
yading@10 140 a+1, (1<<(2*a+1)) - s->no_rounding,
yading@10 141 s->h_edge_pos, s->v_edge_pos);
yading@10 142 s->dsp.gmc(dest_y+8, ptr, linesize, 16,
yading@10 143 ox + s->sprite_delta[0][0]*8,
yading@10 144 oy + s->sprite_delta[1][0]*8,
yading@10 145 s->sprite_delta[0][0], s->sprite_delta[0][1],
yading@10 146 s->sprite_delta[1][0], s->sprite_delta[1][1],
yading@10 147 a+1, (1<<(2*a+1)) - s->no_rounding,
yading@10 148 s->h_edge_pos, s->v_edge_pos);
yading@10 149
yading@10 150 if(CONFIG_GRAY && s->flags&CODEC_FLAG_GRAY) return;
yading@10 151
yading@10 152 ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
yading@10 153 oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
yading@10 154
yading@10 155 ptr = ref_picture[1];
yading@10 156 s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
yading@10 157 ox,
yading@10 158 oy,
yading@10 159 s->sprite_delta[0][0], s->sprite_delta[0][1],
yading@10 160 s->sprite_delta[1][0], s->sprite_delta[1][1],
yading@10 161 a+1, (1<<(2*a+1)) - s->no_rounding,
yading@10 162 s->h_edge_pos>>1, s->v_edge_pos>>1);
yading@10 163
yading@10 164 ptr = ref_picture[2];
yading@10 165 s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
yading@10 166 ox,
yading@10 167 oy,
yading@10 168 s->sprite_delta[0][0], s->sprite_delta[0][1],
yading@10 169 s->sprite_delta[1][0], s->sprite_delta[1][1],
yading@10 170 a+1, (1<<(2*a+1)) - s->no_rounding,
yading@10 171 s->h_edge_pos>>1, s->v_edge_pos>>1);
yading@10 172 }
yading@10 173
yading@10 174 static inline int hpel_motion(MpegEncContext *s,
yading@10 175 uint8_t *dest, uint8_t *src,
yading@10 176 int src_x, int src_y,
yading@10 177 op_pixels_func *pix_op,
yading@10 178 int motion_x, int motion_y)
yading@10 179 {
yading@10 180 int dxy = 0;
yading@10 181 int emu=0;
yading@10 182
yading@10 183 src_x += motion_x >> 1;
yading@10 184 src_y += motion_y >> 1;
yading@10 185
yading@10 186 /* WARNING: do no forget half pels */
yading@10 187 src_x = av_clip(src_x, -16, s->width); //FIXME unneeded for emu?
yading@10 188 if (src_x != s->width)
yading@10 189 dxy |= motion_x & 1;
yading@10 190 src_y = av_clip(src_y, -16, s->height);
yading@10 191 if (src_y != s->height)
yading@10 192 dxy |= (motion_y & 1) << 1;
yading@10 193 src += src_y * s->linesize + src_x;
yading@10 194
yading@10 195 if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){
yading@10 196 if( (unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x&1) - 8, 0)
yading@10 197 || (unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y&1) - 8, 0)){
yading@10 198 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, 9, 9,
yading@10 199 src_x, src_y, s->h_edge_pos, s->v_edge_pos);
yading@10 200 src= s->edge_emu_buffer;
yading@10 201 emu=1;
yading@10 202 }
yading@10 203 }
yading@10 204 pix_op[dxy](dest, src, s->linesize, 8);
yading@10 205 return emu;
yading@10 206 }
yading@10 207
yading@10 208 static av_always_inline
yading@10 209 void mpeg_motion_internal(MpegEncContext *s,
yading@10 210 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
yading@10 211 int field_based, int bottom_field, int field_select,
yading@10 212 uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
yading@10 213 int motion_x, int motion_y, int h, int is_mpeg12, int mb_y)
yading@10 214 {
yading@10 215 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
yading@10 216 int dxy, uvdxy, mx, my, src_x, src_y,
yading@10 217 uvsrc_x, uvsrc_y, v_edge_pos, uvlinesize, linesize;
yading@10 218
yading@10 219 #if 0
yading@10 220 if(s->quarter_sample)
yading@10 221 {
yading@10 222 motion_x>>=1;
yading@10 223 motion_y>>=1;
yading@10 224 }
yading@10 225 #endif
yading@10 226
yading@10 227 v_edge_pos = s->v_edge_pos >> field_based;
yading@10 228 linesize = s->current_picture.f.linesize[0] << field_based;
yading@10 229 uvlinesize = s->current_picture.f.linesize[1] << field_based;
yading@10 230
yading@10 231 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
yading@10 232 src_x = s->mb_x* 16 + (motion_x >> 1);
yading@10 233 src_y =( mb_y<<(4-field_based)) + (motion_y >> 1);
yading@10 234
yading@10 235 if (!is_mpeg12 && s->out_format == FMT_H263) {
yading@10 236 if((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based){
yading@10 237 mx = (motion_x>>1)|(motion_x&1);
yading@10 238 my = motion_y >>1;
yading@10 239 uvdxy = ((my & 1) << 1) | (mx & 1);
yading@10 240 uvsrc_x = s->mb_x* 8 + (mx >> 1);
yading@10 241 uvsrc_y =( mb_y<<(3-field_based))+ (my >> 1);
yading@10 242 }else{
yading@10 243 uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
yading@10 244 uvsrc_x = src_x>>1;
yading@10 245 uvsrc_y = src_y>>1;
yading@10 246 }
yading@10 247 }else if(!is_mpeg12 && s->out_format == FMT_H261){//even chroma mv's are full pel in H261
yading@10 248 mx = motion_x / 4;
yading@10 249 my = motion_y / 4;
yading@10 250 uvdxy = 0;
yading@10 251 uvsrc_x = s->mb_x*8 + mx;
yading@10 252 uvsrc_y = mb_y*8 + my;
yading@10 253 } else {
yading@10 254 if(s->chroma_y_shift){
yading@10 255 mx = motion_x / 2;
yading@10 256 my = motion_y / 2;
yading@10 257 uvdxy = ((my & 1) << 1) | (mx & 1);
yading@10 258 uvsrc_x = s->mb_x* 8 + (mx >> 1);
yading@10 259 uvsrc_y =( mb_y<<(3-field_based))+ (my >> 1);
yading@10 260 } else {
yading@10 261 if(s->chroma_x_shift){
yading@10 262 //Chroma422
yading@10 263 mx = motion_x / 2;
yading@10 264 uvdxy = ((motion_y & 1) << 1) | (mx & 1);
yading@10 265 uvsrc_x = s->mb_x* 8 + (mx >> 1);
yading@10 266 uvsrc_y = src_y;
yading@10 267 } else {
yading@10 268 //Chroma444
yading@10 269 uvdxy = dxy;
yading@10 270 uvsrc_x = src_x;
yading@10 271 uvsrc_y = src_y;
yading@10 272 }
yading@10 273 }
yading@10 274 }
yading@10 275
yading@10 276 ptr_y = ref_picture[0] + src_y * linesize + src_x;
yading@10 277 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
yading@10 278 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
yading@10 279
yading@10 280 if( (unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x&1) - 16, 0)
yading@10 281 || (unsigned)src_y > FFMAX( v_edge_pos - (motion_y&1) - h , 0)){
yading@10 282 if(is_mpeg12 || s->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
yading@10 283 s->codec_id == AV_CODEC_ID_MPEG1VIDEO){
yading@10 284 av_log(s->avctx,AV_LOG_DEBUG,
yading@10 285 "MPEG motion vector out of boundary (%d %d)\n", src_x, src_y);
yading@10 286 return;
yading@10 287 }
yading@10 288 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize,
yading@10 289 17, 17+field_based,
yading@10 290 src_x, src_y<<field_based,
yading@10 291 s->h_edge_pos, s->v_edge_pos);
yading@10 292 ptr_y = s->edge_emu_buffer;
yading@10 293 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
yading@10 294 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
yading@10 295 s->vdsp.emulated_edge_mc(uvbuf ,
yading@10 296 ptr_cb, s->uvlinesize,
yading@10 297 9, 9+field_based,
yading@10 298 uvsrc_x, uvsrc_y<<field_based,
yading@10 299 s->h_edge_pos>>1, s->v_edge_pos>>1);
yading@10 300 s->vdsp.emulated_edge_mc(uvbuf+16,
yading@10 301 ptr_cr, s->uvlinesize,
yading@10 302 9, 9+field_based,
yading@10 303 uvsrc_x, uvsrc_y<<field_based,
yading@10 304 s->h_edge_pos>>1, s->v_edge_pos>>1);
yading@10 305 ptr_cb= uvbuf;
yading@10 306 ptr_cr= uvbuf+16;
yading@10 307 }
yading@10 308 }
yading@10 309
yading@10 310 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
yading@10 311 dest_y += s->linesize;
yading@10 312 dest_cb+= s->uvlinesize;
yading@10 313 dest_cr+= s->uvlinesize;
yading@10 314 }
yading@10 315
yading@10 316 if(field_select){
yading@10 317 ptr_y += s->linesize;
yading@10 318 ptr_cb+= s->uvlinesize;
yading@10 319 ptr_cr+= s->uvlinesize;
yading@10 320 }
yading@10 321
yading@10 322 pix_op[0][dxy](dest_y, ptr_y, linesize, h);
yading@10 323
yading@10 324 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
yading@10 325 pix_op[s->chroma_x_shift][uvdxy]
yading@10 326 (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
yading@10 327 pix_op[s->chroma_x_shift][uvdxy]
yading@10 328 (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
yading@10 329 }
yading@10 330 if(!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
yading@10 331 s->out_format == FMT_H261){
yading@10 332 ff_h261_loop_filter(s);
yading@10 333 }
yading@10 334 }
yading@10 335 /* apply one mpeg motion vector to the three components */
yading@10 336 static void mpeg_motion(MpegEncContext *s,
yading@10 337 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
yading@10 338 int field_select, uint8_t **ref_picture,
yading@10 339 op_pixels_func (*pix_op)[4],
yading@10 340 int motion_x, int motion_y, int h, int mb_y)
yading@10 341 {
yading@10 342 #if !CONFIG_SMALL
yading@10 343 if(s->out_format == FMT_MPEG1)
yading@10 344 mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
yading@10 345 field_select, ref_picture, pix_op,
yading@10 346 motion_x, motion_y, h, 1, mb_y);
yading@10 347 else
yading@10 348 #endif
yading@10 349 mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
yading@10 350 field_select, ref_picture, pix_op,
yading@10 351 motion_x, motion_y, h, 0, mb_y);
yading@10 352 }
yading@10 353
yading@10 354 static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
yading@10 355 uint8_t *dest_cb, uint8_t *dest_cr,
yading@10 356 int bottom_field, int field_select,
yading@10 357 uint8_t **ref_picture,
yading@10 358 op_pixels_func (*pix_op)[4],
yading@10 359 int motion_x, int motion_y, int h, int mb_y)
yading@10 360 {
yading@10 361 #if !CONFIG_SMALL
yading@10 362 if(s->out_format == FMT_MPEG1)
yading@10 363 mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
yading@10 364 bottom_field, field_select, ref_picture, pix_op,
yading@10 365 motion_x, motion_y, h, 1, mb_y);
yading@10 366 else
yading@10 367 #endif
yading@10 368 mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
yading@10 369 bottom_field, field_select, ref_picture, pix_op,
yading@10 370 motion_x, motion_y, h, 0, mb_y);
yading@10 371 }
yading@10 372
yading@10 373 //FIXME move to dsputil, avg variant, 16x16 version
yading@10 374 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride){
yading@10 375 int x;
yading@10 376 uint8_t * const top = src[1];
yading@10 377 uint8_t * const left = src[2];
yading@10 378 uint8_t * const mid = src[0];
yading@10 379 uint8_t * const right = src[3];
yading@10 380 uint8_t * const bottom= src[4];
yading@10 381 #define OBMC_FILTER(x, t, l, m, r, b)\
yading@10 382 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
yading@10 383 #define OBMC_FILTER4(x, t, l, m, r, b)\
yading@10 384 OBMC_FILTER(x , t, l, m, r, b);\
yading@10 385 OBMC_FILTER(x+1 , t, l, m, r, b);\
yading@10 386 OBMC_FILTER(x +stride, t, l, m, r, b);\
yading@10 387 OBMC_FILTER(x+1+stride, t, l, m, r, b);
yading@10 388
yading@10 389 x=0;
yading@10 390 OBMC_FILTER (x , 2, 2, 4, 0, 0);
yading@10 391 OBMC_FILTER (x+1, 2, 1, 5, 0, 0);
yading@10 392 OBMC_FILTER4(x+2, 2, 1, 5, 0, 0);
yading@10 393 OBMC_FILTER4(x+4, 2, 0, 5, 1, 0);
yading@10 394 OBMC_FILTER (x+6, 2, 0, 5, 1, 0);
yading@10 395 OBMC_FILTER (x+7, 2, 0, 4, 2, 0);
yading@10 396 x+= stride;
yading@10 397 OBMC_FILTER (x , 1, 2, 5, 0, 0);
yading@10 398 OBMC_FILTER (x+1, 1, 2, 5, 0, 0);
yading@10 399 OBMC_FILTER (x+6, 1, 0, 5, 2, 0);
yading@10 400 OBMC_FILTER (x+7, 1, 0, 5, 2, 0);
yading@10 401 x+= stride;
yading@10 402 OBMC_FILTER4(x , 1, 2, 5, 0, 0);
yading@10 403 OBMC_FILTER4(x+2, 1, 1, 6, 0, 0);
yading@10 404 OBMC_FILTER4(x+4, 1, 0, 6, 1, 0);
yading@10 405 OBMC_FILTER4(x+6, 1, 0, 5, 2, 0);
yading@10 406 x+= 2*stride;
yading@10 407 OBMC_FILTER4(x , 0, 2, 5, 0, 1);
yading@10 408 OBMC_FILTER4(x+2, 0, 1, 6, 0, 1);
yading@10 409 OBMC_FILTER4(x+4, 0, 0, 6, 1, 1);
yading@10 410 OBMC_FILTER4(x+6, 0, 0, 5, 2, 1);
yading@10 411 x+= 2*stride;
yading@10 412 OBMC_FILTER (x , 0, 2, 5, 0, 1);
yading@10 413 OBMC_FILTER (x+1, 0, 2, 5, 0, 1);
yading@10 414 OBMC_FILTER4(x+2, 0, 1, 5, 0, 2);
yading@10 415 OBMC_FILTER4(x+4, 0, 0, 5, 1, 2);
yading@10 416 OBMC_FILTER (x+6, 0, 0, 5, 2, 1);
yading@10 417 OBMC_FILTER (x+7, 0, 0, 5, 2, 1);
yading@10 418 x+= stride;
yading@10 419 OBMC_FILTER (x , 0, 2, 4, 0, 2);
yading@10 420 OBMC_FILTER (x+1, 0, 1, 5, 0, 2);
yading@10 421 OBMC_FILTER (x+6, 0, 0, 5, 1, 2);
yading@10 422 OBMC_FILTER (x+7, 0, 0, 4, 2, 2);
yading@10 423 }
yading@10 424
yading@10 425 /* obmc for 1 8x8 luma block */
yading@10 426 static inline void obmc_motion(MpegEncContext *s,
yading@10 427 uint8_t *dest, uint8_t *src,
yading@10 428 int src_x, int src_y,
yading@10 429 op_pixels_func *pix_op,
yading@10 430 int16_t mv[5][2]/* mid top left right bottom*/)
yading@10 431 #define MID 0
yading@10 432 {
yading@10 433 int i;
yading@10 434 uint8_t *ptr[5];
yading@10 435
yading@10 436 av_assert2(s->quarter_sample==0);
yading@10 437
yading@10 438 for(i=0; i<5; i++){
yading@10 439 if(i && mv[i][0]==mv[MID][0] && mv[i][1]==mv[MID][1]){
yading@10 440 ptr[i]= ptr[MID];
yading@10 441 }else{
yading@10 442 ptr[i]= s->obmc_scratchpad + 8*(i&1) + s->linesize*8*(i>>1);
yading@10 443 hpel_motion(s, ptr[i], src,
yading@10 444 src_x, src_y,
yading@10 445 pix_op,
yading@10 446 mv[i][0], mv[i][1]);
yading@10 447 }
yading@10 448 }
yading@10 449
yading@10 450 put_obmc(dest, ptr, s->linesize);
yading@10 451 }
yading@10 452
yading@10 453 static inline void qpel_motion(MpegEncContext *s,
yading@10 454 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
yading@10 455 int field_based, int bottom_field, int field_select,
yading@10 456 uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
yading@10 457 qpel_mc_func (*qpix_op)[16],
yading@10 458 int motion_x, int motion_y, int h)
yading@10 459 {
yading@10 460 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
yading@10 461 int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, linesize, uvlinesize;
yading@10 462
yading@10 463 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
yading@10 464 src_x = s->mb_x * 16 + (motion_x >> 2);
yading@10 465 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
yading@10 466
yading@10 467 v_edge_pos = s->v_edge_pos >> field_based;
yading@10 468 linesize = s->linesize << field_based;
yading@10 469 uvlinesize = s->uvlinesize << field_based;
yading@10 470
yading@10 471 if(field_based){
yading@10 472 mx= motion_x/2;
yading@10 473 my= motion_y>>1;
yading@10 474 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
yading@10 475 static const int rtab[8]= {0,0,1,1,0,0,0,1};
yading@10 476 mx= (motion_x>>1) + rtab[motion_x&7];
yading@10 477 my= (motion_y>>1) + rtab[motion_y&7];
yading@10 478 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
yading@10 479 mx= (motion_x>>1)|(motion_x&1);
yading@10 480 my= (motion_y>>1)|(motion_y&1);
yading@10 481 }else{
yading@10 482 mx= motion_x/2;
yading@10 483 my= motion_y/2;
yading@10 484 }
yading@10 485 mx= (mx>>1)|(mx&1);
yading@10 486 my= (my>>1)|(my&1);
yading@10 487
yading@10 488 uvdxy= (mx&1) | ((my&1)<<1);
yading@10 489 mx>>=1;
yading@10 490 my>>=1;
yading@10 491
yading@10 492 uvsrc_x = s->mb_x * 8 + mx;
yading@10 493 uvsrc_y = s->mb_y * (8 >> field_based) + my;
yading@10 494
yading@10 495 ptr_y = ref_picture[0] + src_y * linesize + src_x;
yading@10 496 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
yading@10 497 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
yading@10 498
yading@10 499 if( (unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x&3) - 16, 0)
yading@10 500 || (unsigned)src_y > FFMAX( v_edge_pos - (motion_y&3) - h , 0)){
yading@10 501 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize,
yading@10 502 17, 17+field_based, src_x, src_y<<field_based,
yading@10 503 s->h_edge_pos, s->v_edge_pos);
yading@10 504 ptr_y= s->edge_emu_buffer;
yading@10 505 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
yading@10 506 uint8_t *uvbuf= s->edge_emu_buffer + 18*s->linesize;
yading@10 507 s->vdsp.emulated_edge_mc(uvbuf, ptr_cb, s->uvlinesize,
yading@10 508 9, 9 + field_based,
yading@10 509 uvsrc_x, uvsrc_y<<field_based,
yading@10 510 s->h_edge_pos>>1, s->v_edge_pos>>1);
yading@10 511 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize,
yading@10 512 9, 9 + field_based,
yading@10 513 uvsrc_x, uvsrc_y<<field_based,
yading@10 514 s->h_edge_pos>>1, s->v_edge_pos>>1);
yading@10 515 ptr_cb= uvbuf;
yading@10 516 ptr_cr= uvbuf + 16;
yading@10 517 }
yading@10 518 }
yading@10 519
yading@10 520 if(!field_based)
yading@10 521 qpix_op[0][dxy](dest_y, ptr_y, linesize);
yading@10 522 else{
yading@10 523 if(bottom_field){
yading@10 524 dest_y += s->linesize;
yading@10 525 dest_cb+= s->uvlinesize;
yading@10 526 dest_cr+= s->uvlinesize;
yading@10 527 }
yading@10 528
yading@10 529 if(field_select){
yading@10 530 ptr_y += s->linesize;
yading@10 531 ptr_cb += s->uvlinesize;
yading@10 532 ptr_cr += s->uvlinesize;
yading@10 533 }
yading@10 534 //damn interlaced mode
yading@10 535 //FIXME boundary mirroring is not exactly correct here
yading@10 536 qpix_op[1][dxy](dest_y , ptr_y , linesize);
yading@10 537 qpix_op[1][dxy](dest_y+8, ptr_y+8, linesize);
yading@10 538 }
yading@10 539 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
yading@10 540 pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
yading@10 541 pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
yading@10 542 }
yading@10 543 }
yading@10 544
yading@10 545 /**
yading@10 546 * h263 chroma 4mv motion compensation.
yading@10 547 */
yading@10 548 static void chroma_4mv_motion(MpegEncContext *s,
yading@10 549 uint8_t *dest_cb, uint8_t *dest_cr,
yading@10 550 uint8_t **ref_picture,
yading@10 551 op_pixels_func *pix_op,
yading@10 552 int mx, int my)
yading@10 553 {
yading@10 554 int dxy, emu=0, src_x, src_y, offset;
yading@10 555 uint8_t *ptr;
yading@10 556
yading@10 557 /* In case of 8X8, we construct a single chroma motion vector
yading@10 558 with a special rounding */
yading@10 559 mx= ff_h263_round_chroma(mx);
yading@10 560 my= ff_h263_round_chroma(my);
yading@10 561
yading@10 562 dxy = ((my & 1) << 1) | (mx & 1);
yading@10 563 mx >>= 1;
yading@10 564 my >>= 1;
yading@10 565
yading@10 566 src_x = s->mb_x * 8 + mx;
yading@10 567 src_y = s->mb_y * 8 + my;
yading@10 568 src_x = av_clip(src_x, -8, (s->width >> 1));
yading@10 569 if (src_x == (s->width >> 1))
yading@10 570 dxy &= ~1;
yading@10 571 src_y = av_clip(src_y, -8, (s->height >> 1));
yading@10 572 if (src_y == (s->height >> 1))
yading@10 573 dxy &= ~2;
yading@10 574
yading@10 575 offset = src_y * s->uvlinesize + src_x;
yading@10 576 ptr = ref_picture[1] + offset;
yading@10 577 if(s->flags&CODEC_FLAG_EMU_EDGE){
yading@10 578 if( (unsigned)src_x > FFMAX((s->h_edge_pos>>1) - (dxy &1) - 8, 0)
yading@10 579 || (unsigned)src_y > FFMAX((s->v_edge_pos>>1) - (dxy>>1) - 8, 0)){
yading@10 580 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
yading@10 581 9, 9, src_x, src_y,
yading@10 582 s->h_edge_pos>>1, s->v_edge_pos>>1);
yading@10 583 ptr= s->edge_emu_buffer;
yading@10 584 emu=1;
yading@10 585 }
yading@10 586 }
yading@10 587 pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
yading@10 588
yading@10 589 ptr = ref_picture[2] + offset;
yading@10 590 if(emu){
yading@10 591 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
yading@10 592 9, 9, src_x, src_y,
yading@10 593 s->h_edge_pos>>1, s->v_edge_pos>>1);
yading@10 594 ptr= s->edge_emu_buffer;
yading@10 595 }
yading@10 596 pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
yading@10 597 }
yading@10 598
yading@10 599 static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir){
yading@10 600 /* fetch pixels for estimated mv 4 macroblocks ahead
yading@10 601 * optimized for 64byte cache lines */
yading@10 602 const int shift = s->quarter_sample ? 2 : 1;
yading@10 603 const int mx= (s->mv[dir][0][0]>>shift) + 16*s->mb_x + 8;
yading@10 604 const int my= (s->mv[dir][0][1]>>shift) + 16*s->mb_y;
yading@10 605 int off= mx + (my + (s->mb_x&3)*4)*s->linesize + 64;
yading@10 606 s->vdsp.prefetch(pix[0]+off, s->linesize, 4);
yading@10 607 off= (mx>>1) + ((my>>1) + (s->mb_x&7))*s->uvlinesize + 64;
yading@10 608 s->vdsp.prefetch(pix[1]+off, pix[2]-pix[1], 2);
yading@10 609 }
yading@10 610
yading@10 611 /**
yading@10 612 * motion compensation of a single macroblock
yading@10 613 * @param s context
yading@10 614 * @param dest_y luma destination pointer
yading@10 615 * @param dest_cb chroma cb/u destination pointer
yading@10 616 * @param dest_cr chroma cr/v destination pointer
yading@10 617 * @param dir direction (0->forward, 1->backward)
yading@10 618 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
yading@10 619 * @param pix_op halfpel motion compensation function (average or put normally)
yading@10 620 * @param qpix_op qpel motion compensation function (average or put normally)
yading@10 621 * the motion vectors are taken from s->mv and the MV type from s->mv_type
yading@10 622 */
yading@10 623 static av_always_inline void MPV_motion_internal(MpegEncContext *s,
yading@10 624 uint8_t *dest_y, uint8_t *dest_cb,
yading@10 625 uint8_t *dest_cr, int dir,
yading@10 626 uint8_t **ref_picture,
yading@10 627 op_pixels_func (*pix_op)[4],
yading@10 628 qpel_mc_func (*qpix_op)[16], int is_mpeg12)
yading@10 629 {
yading@10 630 int dxy, mx, my, src_x, src_y, motion_x, motion_y;
yading@10 631 int mb_x, mb_y, i;
yading@10 632 uint8_t *ptr, *dest;
yading@10 633
yading@10 634 mb_x = s->mb_x;
yading@10 635 mb_y = s->mb_y;
yading@10 636
yading@10 637 prefetch_motion(s, ref_picture, dir);
yading@10 638
yading@10 639 if(!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B){
yading@10 640 LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
yading@10 641 Picture *cur_frame = &s->current_picture;
yading@10 642 const int xy= s->mb_x + s->mb_y*s->mb_stride;
yading@10 643 const int mot_stride= s->b8_stride;
yading@10 644 const int mot_xy= mb_x*2 + mb_y*2*mot_stride;
yading@10 645
yading@10 646 av_assert2(!s->mb_skipped);
yading@10 647
yading@10 648 AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy ]);
yading@10 649 AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);
yading@10 650
yading@10 651 AV_COPY32(mv_cache[2][1], cur_frame->motion_val[0][mot_xy + mot_stride ]);
yading@10 652 AV_COPY32(mv_cache[2][2], cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
yading@10 653
yading@10 654 AV_COPY32(mv_cache[3][1], cur_frame->motion_val[0][mot_xy + mot_stride ]);
yading@10 655 AV_COPY32(mv_cache[3][2], cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
yading@10 656
yading@10 657 if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
yading@10 658 AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
yading@10 659 AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
yading@10 660 }else{
yading@10 661 AV_COPY32(mv_cache[0][1], cur_frame->motion_val[0][mot_xy - mot_stride ]);
yading@10 662 AV_COPY32(mv_cache[0][2], cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
yading@10 663 }
yading@10 664
yading@10 665 if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
yading@10 666 AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
yading@10 667 AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
yading@10 668 }else{
yading@10 669 AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
yading@10 670 AV_COPY32(mv_cache[2][0], cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
yading@10 671 }
yading@10 672
yading@10 673 if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
yading@10 674 AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
yading@10 675 AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
yading@10 676 }else{
yading@10 677 AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
yading@10 678 AV_COPY32(mv_cache[2][3], cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
yading@10 679 }
yading@10 680
yading@10 681 mx = 0;
yading@10 682 my = 0;
yading@10 683 for(i=0;i<4;i++) {
yading@10 684 const int x= (i&1)+1;
yading@10 685 const int y= (i>>1)+1;
yading@10 686 int16_t mv[5][2]= {
yading@10 687 {mv_cache[y][x ][0], mv_cache[y][x ][1]},
yading@10 688 {mv_cache[y-1][x][0], mv_cache[y-1][x][1]},
yading@10 689 {mv_cache[y][x-1][0], mv_cache[y][x-1][1]},
yading@10 690 {mv_cache[y][x+1][0], mv_cache[y][x+1][1]},
yading@10 691 {mv_cache[y+1][x][0], mv_cache[y+1][x][1]}};
yading@10 692 //FIXME cleanup
yading@10 693 obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
yading@10 694 ref_picture[0],
yading@10 695 mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
yading@10 696 pix_op[1],
yading@10 697 mv);
yading@10 698
yading@10 699 mx += mv[0][0];
yading@10 700 my += mv[0][1];
yading@10 701 }
yading@10 702 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
yading@10 703 chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
yading@10 704
yading@10 705 return;
yading@10 706 }
yading@10 707
yading@10 708 switch(s->mv_type) {
yading@10 709 case MV_TYPE_16X16:
yading@10 710 if(s->mcsel){
yading@10 711 if(s->real_sprite_warping_points==1){
yading@10 712 gmc1_motion(s, dest_y, dest_cb, dest_cr,
yading@10 713 ref_picture);
yading@10 714 }else{
yading@10 715 gmc_motion(s, dest_y, dest_cb, dest_cr,
yading@10 716 ref_picture);
yading@10 717 }
yading@10 718 }else if(!is_mpeg12 && s->quarter_sample){
yading@10 719 qpel_motion(s, dest_y, dest_cb, dest_cr,
yading@10 720 0, 0, 0,
yading@10 721 ref_picture, pix_op, qpix_op,
yading@10 722 s->mv[dir][0][0], s->mv[dir][0][1], 16);
yading@10 723 } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
yading@10 724 s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
yading@10 725 ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
yading@10 726 ref_picture, pix_op,
yading@10 727 s->mv[dir][0][0], s->mv[dir][0][1], 16);
yading@10 728 }else
yading@10 729 {
yading@10 730 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
yading@10 731 ref_picture, pix_op,
yading@10 732 s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y);
yading@10 733 }
yading@10 734 break;
yading@10 735 case MV_TYPE_8X8:
yading@10 736 if (!is_mpeg12) {
yading@10 737 mx = 0;
yading@10 738 my = 0;
yading@10 739 if(s->quarter_sample){
yading@10 740 for(i=0;i<4;i++) {
yading@10 741 motion_x = s->mv[dir][i][0];
yading@10 742 motion_y = s->mv[dir][i][1];
yading@10 743
yading@10 744 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
yading@10 745 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
yading@10 746 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
yading@10 747
yading@10 748 /* WARNING: do no forget half pels */
yading@10 749 src_x = av_clip(src_x, -16, s->width);
yading@10 750 if (src_x == s->width)
yading@10 751 dxy &= ~3;
yading@10 752 src_y = av_clip(src_y, -16, s->height);
yading@10 753 if (src_y == s->height)
yading@10 754 dxy &= ~12;
yading@10 755
yading@10 756 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
yading@10 757 if(s->flags&CODEC_FLAG_EMU_EDGE){
yading@10 758 if( (unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x&3) - 8, 0)
yading@10 759 || (unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y&3) - 8, 0)){
yading@10 760 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
yading@10 761 s->linesize, 9, 9,
yading@10 762 src_x, src_y,
yading@10 763 s->h_edge_pos, s->v_edge_pos);
yading@10 764 ptr= s->edge_emu_buffer;
yading@10 765 }
yading@10 766 }
yading@10 767 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
yading@10 768 qpix_op[1][dxy](dest, ptr, s->linesize);
yading@10 769
yading@10 770 mx += s->mv[dir][i][0]/2;
yading@10 771 my += s->mv[dir][i][1]/2;
yading@10 772 }
yading@10 773 }else{
yading@10 774 for(i=0;i<4;i++) {
yading@10 775 hpel_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
yading@10 776 ref_picture[0],
yading@10 777 mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
yading@10 778 pix_op[1],
yading@10 779 s->mv[dir][i][0], s->mv[dir][i][1]);
yading@10 780
yading@10 781 mx += s->mv[dir][i][0];
yading@10 782 my += s->mv[dir][i][1];
yading@10 783 }
yading@10 784 }
yading@10 785
yading@10 786 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
yading@10 787 chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
yading@10 788 }
yading@10 789 break;
yading@10 790 case MV_TYPE_FIELD:
yading@10 791 if (s->picture_structure == PICT_FRAME) {
yading@10 792 if(!is_mpeg12 && s->quarter_sample){
yading@10 793 for(i=0; i<2; i++){
yading@10 794 qpel_motion(s, dest_y, dest_cb, dest_cr,
yading@10 795 1, i, s->field_select[dir][i],
yading@10 796 ref_picture, pix_op, qpix_op,
yading@10 797 s->mv[dir][i][0], s->mv[dir][i][1], 8);
yading@10 798 }
yading@10 799 }else{
yading@10 800 /* top field */
yading@10 801 mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
yading@10 802 0, s->field_select[dir][0],
yading@10 803 ref_picture, pix_op,
yading@10 804 s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y);
yading@10 805 /* bottom field */
yading@10 806 mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
yading@10 807 1, s->field_select[dir][1],
yading@10 808 ref_picture, pix_op,
yading@10 809 s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
yading@10 810 }
yading@10 811 } else {
yading@10 812 if( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
yading@10 813 || !ref_picture[0]){
yading@10 814 ref_picture = s->current_picture_ptr->f.data;
yading@10 815 }
yading@10 816
yading@10 817 mpeg_motion(s, dest_y, dest_cb, dest_cr,
yading@10 818 s->field_select[dir][0],
yading@10 819 ref_picture, pix_op,
yading@10 820 s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y>>1);
yading@10 821 }
yading@10 822 break;
yading@10 823 case MV_TYPE_16X8:
yading@10 824 for(i=0; i<2; i++){
yading@10 825 uint8_t ** ref2picture;
yading@10 826
yading@10 827 if((s->picture_structure == s->field_select[dir][i] + 1
yading@10 828 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field) && ref_picture[0]){
yading@10 829 ref2picture= ref_picture;
yading@10 830 }else{
yading@10 831 ref2picture = s->current_picture_ptr->f.data;
yading@10 832 }
yading@10 833
yading@10 834 mpeg_motion(s, dest_y, dest_cb, dest_cr,
yading@10 835 s->field_select[dir][i],
yading@10 836 ref2picture, pix_op,
yading@10 837 s->mv[dir][i][0], s->mv[dir][i][1] + 16*i, 8, mb_y>>1);
yading@10 838
yading@10 839 dest_y += 16*s->linesize;
yading@10 840 dest_cb+= (16>>s->chroma_y_shift)*s->uvlinesize;
yading@10 841 dest_cr+= (16>>s->chroma_y_shift)*s->uvlinesize;
yading@10 842 }
yading@10 843 break;
yading@10 844 case MV_TYPE_DMV:
yading@10 845 if(s->picture_structure == PICT_FRAME){
yading@10 846 for(i=0; i<2; i++){
yading@10 847 int j;
yading@10 848 for(j=0; j<2; j++){
yading@10 849 mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
yading@10 850 j, j^i, ref_picture, pix_op,
yading@10 851 s->mv[dir][2*i + j][0],
yading@10 852 s->mv[dir][2*i + j][1], 8, mb_y);
yading@10 853 }
yading@10 854 pix_op = s->hdsp.avg_pixels_tab;
yading@10 855 }
yading@10 856 }else{
yading@10 857 if (!ref_picture[0]) {
yading@10 858 ref_picture = s->current_picture_ptr->f.data;
yading@10 859 }
yading@10 860 for(i=0; i<2; i++){
yading@10 861 mpeg_motion(s, dest_y, dest_cb, dest_cr,
yading@10 862 s->picture_structure != i+1,
yading@10 863 ref_picture, pix_op,
yading@10 864 s->mv[dir][2*i][0],s->mv[dir][2*i][1],16, mb_y>>1);
yading@10 865
yading@10 866 // after put we make avg of the same block
yading@10 867 pix_op=s->hdsp.avg_pixels_tab;
yading@10 868
yading@10 869 //opposite parity is always in the same frame if this is second field
yading@10 870 if(!s->first_field){
yading@10 871 ref_picture = s->current_picture_ptr->f.data;
yading@10 872 }
yading@10 873 }
yading@10 874 }
yading@10 875 break;
yading@10 876 default: av_assert2(0);
yading@10 877 }
yading@10 878 }
yading@10 879
yading@10 880 void ff_MPV_motion(MpegEncContext *s,
yading@10 881 uint8_t *dest_y, uint8_t *dest_cb,
yading@10 882 uint8_t *dest_cr, int dir,
yading@10 883 uint8_t **ref_picture,
yading@10 884 op_pixels_func (*pix_op)[4],
yading@10 885 qpel_mc_func (*qpix_op)[16])
yading@10 886 {
yading@10 887 #if !CONFIG_SMALL
yading@10 888 if(s->out_format == FMT_MPEG1)
yading@10 889 MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
yading@10 890 ref_picture, pix_op, qpix_op, 1);
yading@10 891 else
yading@10 892 #endif
yading@10 893 MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
yading@10 894 ref_picture, pix_op, qpix_op, 0);
yading@10 895 }