vc1dec.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 /**
25  * @file
26  * VC-1 and WMV3 decoder
27  */
28 
29 #include "internal.h"
30 #include "avcodec.h"
31 #include "error_resilience.h"
32 #include "mpegvideo.h"
33 #include "h263.h"
34 #include "h264chroma.h"
35 #include "vc1.h"
36 #include "vc1data.h"
37 #include "vc1acdata.h"
38 #include "msmpeg4data.h"
39 #include "unary.h"
40 #include "mathops.h"
41 #include "vdpau_internal.h"
42 #include "libavutil/avassert.h"
43 
44 #undef NDEBUG
45 #include <assert.h>
46 
47 #define MB_INTRA_VLC_BITS 9
48 #define DC_VLC_BITS 9
49 
50 
51 // offset tables for interlaced picture MVDATA decoding
52 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
53 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
54 
55 /***********************************************************************/
56 /**
57  * @name VC-1 Bitplane decoding
58  * @see 8.7, p56
59  * @{
60  */
61 
62 /**
63  * Imode types
64  * @{
65  */
66 enum Imode {
74 };
75 /** @} */ //imode defines
76 
78 {
79  MpegEncContext *s = &v->s;
81  if (v->field_mode && !(v->second_field ^ v->tff)) {
82  s->dest[0] += s->current_picture_ptr->f.linesize[0];
83  s->dest[1] += s->current_picture_ptr->f.linesize[1];
84  s->dest[2] += s->current_picture_ptr->f.linesize[2];
85  }
86 }
87 
88 
89 /** @} */ //Bitplane group
90 
92 {
93  MpegEncContext *s = &v->s;
94  int topleft_mb_pos, top_mb_pos;
95  int stride_y, fieldtx = 0;
96  int v_dist;
97 
98  /* The put pixels loop is always one MB row behind the decoding loop,
99  * because we can only put pixels when overlap filtering is done, and
100  * for filtering of the bottom edge of a MB, we need the next MB row
101  * present as well.
102  * Within the row, the put pixels loop is also one MB col behind the
103  * decoding loop. The reason for this is again, because for filtering
104  * of the right MB edge, we need the next MB present. */
105  if (!s->first_slice_line) {
106  if (s->mb_x) {
107  topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
108  if (v->fcm == ILACE_FRAME)
109  fieldtx = v->fieldtx_plane[topleft_mb_pos];
110  stride_y = s->linesize << fieldtx;
111  v_dist = (16 - fieldtx) >> (fieldtx == 0);
113  s->dest[0] - 16 * s->linesize - 16,
114  stride_y);
116  s->dest[0] - 16 * s->linesize - 8,
117  stride_y);
119  s->dest[0] - v_dist * s->linesize - 16,
120  stride_y);
122  s->dest[0] - v_dist * s->linesize - 8,
123  stride_y);
125  s->dest[1] - 8 * s->uvlinesize - 8,
126  s->uvlinesize);
128  s->dest[2] - 8 * s->uvlinesize - 8,
129  s->uvlinesize);
130  }
131  if (s->mb_x == s->mb_width - 1) {
132  top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
133  if (v->fcm == ILACE_FRAME)
134  fieldtx = v->fieldtx_plane[top_mb_pos];
135  stride_y = s->linesize << fieldtx;
136  v_dist = fieldtx ? 15 : 8;
138  s->dest[0] - 16 * s->linesize,
139  stride_y);
141  s->dest[0] - 16 * s->linesize + 8,
142  stride_y);
144  s->dest[0] - v_dist * s->linesize,
145  stride_y);
147  s->dest[0] - v_dist * s->linesize + 8,
148  stride_y);
150  s->dest[1] - 8 * s->uvlinesize,
151  s->uvlinesize);
153  s->dest[2] - 8 * s->uvlinesize,
154  s->uvlinesize);
155  }
156  }
157 
158 #define inc_blk_idx(idx) do { \
159  idx++; \
160  if (idx >= v->n_allocated_blks) \
161  idx = 0; \
162  } while (0)
163 
168 }
169 
170 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
171 {
172  MpegEncContext *s = &v->s;
173  int j;
174  if (!s->first_slice_line) {
175  v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
176  if (s->mb_x)
177  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
178  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
179  for (j = 0; j < 2; j++) {
180  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
181  if (s->mb_x)
182  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
183  }
184  }
185  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
186 
187  if (s->mb_y == s->end_mb_y - 1) {
188  if (s->mb_x) {
189  v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
190  v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
191  v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
192  }
193  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
194  }
195 }
196 
198 {
199  MpegEncContext *s = &v->s;
200  int j;
201 
202  /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
203  * means it runs two rows/cols behind the decoding loop. */
204  if (!s->first_slice_line) {
205  if (s->mb_x) {
206  if (s->mb_y >= s->start_mb_y + 2) {
207  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
208 
209  if (s->mb_x >= 2)
210  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
211  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
212  for (j = 0; j < 2; j++) {
213  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
214  if (s->mb_x >= 2) {
215  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
216  }
217  }
218  }
219  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
220  }
221 
222  if (s->mb_x == s->mb_width - 1) {
223  if (s->mb_y >= s->start_mb_y + 2) {
224  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
225 
226  if (s->mb_x)
227  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
228  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
229  for (j = 0; j < 2; j++) {
230  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
231  if (s->mb_x >= 2) {
232  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
233  }
234  }
235  }
236  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
237  }
238 
239  if (s->mb_y == s->end_mb_y) {
240  if (s->mb_x) {
241  if (s->mb_x >= 2)
242  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
243  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
244  if (s->mb_x >= 2) {
245  for (j = 0; j < 2; j++) {
246  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
247  }
248  }
249  }
250 
251  if (s->mb_x == s->mb_width - 1) {
252  if (s->mb_x)
253  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
254  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
255  if (s->mb_x) {
256  for (j = 0; j < 2; j++) {
257  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
258  }
259  }
260  }
261  }
262  }
263 }
264 
266 {
267  MpegEncContext *s = &v->s;
268  int mb_pos;
269 
270  if (v->condover == CONDOVER_NONE)
271  return;
272 
273  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
274 
275  /* Within a MB, the horizontal overlap always runs before the vertical.
276  * To accomplish that, we run the H on left and internal borders of the
277  * currently decoded MB. Then, we wait for the next overlap iteration
278  * to do H overlap on the right edge of this MB, before moving over and
279  * running the V overlap. Therefore, the V overlap makes us trail by one
280  * MB col and the H overlap filter makes us trail by one MB row. This
281  * is reflected in the time at which we run the put_pixels loop. */
282  if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
283  if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
284  v->over_flags_plane[mb_pos - 1])) {
286  v->block[v->cur_blk_idx][0]);
288  v->block[v->cur_blk_idx][2]);
289  if (!(s->flags & CODEC_FLAG_GRAY)) {
291  v->block[v->cur_blk_idx][4]);
293  v->block[v->cur_blk_idx][5]);
294  }
295  }
297  v->block[v->cur_blk_idx][1]);
299  v->block[v->cur_blk_idx][3]);
300 
301  if (s->mb_x == s->mb_width - 1) {
302  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
303  v->over_flags_plane[mb_pos - s->mb_stride])) {
305  v->block[v->cur_blk_idx][0]);
307  v->block[v->cur_blk_idx][1]);
308  if (!(s->flags & CODEC_FLAG_GRAY)) {
310  v->block[v->cur_blk_idx][4]);
312  v->block[v->cur_blk_idx][5]);
313  }
314  }
316  v->block[v->cur_blk_idx][2]);
318  v->block[v->cur_blk_idx][3]);
319  }
320  }
321  if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
322  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
323  v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
325  v->block[v->left_blk_idx][0]);
327  v->block[v->left_blk_idx][1]);
328  if (!(s->flags & CODEC_FLAG_GRAY)) {
330  v->block[v->left_blk_idx][4]);
332  v->block[v->left_blk_idx][5]);
333  }
334  }
336  v->block[v->left_blk_idx][2]);
338  v->block[v->left_blk_idx][3]);
339  }
340 }
341 
342 /** Do motion compensation over 1 macroblock
343  * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
344  */
345 static void vc1_mc_1mv(VC1Context *v, int dir)
346 {
347  MpegEncContext *s = &v->s;
348  H264ChromaContext *h264chroma = &v->h264chroma;
349  uint8_t *srcY, *srcU, *srcV;
350  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
351  int off, off_uv;
352  int v_edge_pos = s->v_edge_pos >> v->field_mode;
353  int i;
354 
355  if ((!v->field_mode ||
356  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
357  !v->s.last_picture.f.data[0])
358  return;
359 
360  mx = s->mv[dir][0][0];
361  my = s->mv[dir][0][1];
362 
363  // store motion vectors for further use in B frames
364  if (s->pict_type == AV_PICTURE_TYPE_P) {
365  for (i = 0; i < 4; i++) {
366  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
367  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
368  }
369  }
370 
371  uvmx = (mx + ((mx & 3) == 3)) >> 1;
372  uvmy = (my + ((my & 3) == 3)) >> 1;
373  v->luma_mv[s->mb_x][0] = uvmx;
374  v->luma_mv[s->mb_x][1] = uvmy;
375 
376  if (v->field_mode &&
377  v->cur_field_type != v->ref_field_type[dir]) {
378  my = my - 2 + 4 * v->cur_field_type;
379  uvmy = uvmy - 2 + 4 * v->cur_field_type;
380  }
381 
382  // fastuvmc shall be ignored for interlaced frame picture
383  if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
384  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
385  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
386  }
387  if (v->field_mode) { // interlaced field picture
388  if (!dir) {
389  if ((v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
390  srcY = s->current_picture.f.data[0];
391  srcU = s->current_picture.f.data[1];
392  srcV = s->current_picture.f.data[2];
393  } else {
394  srcY = s->last_picture.f.data[0];
395  srcU = s->last_picture.f.data[1];
396  srcV = s->last_picture.f.data[2];
397  }
398  } else {
399  srcY = s->next_picture.f.data[0];
400  srcU = s->next_picture.f.data[1];
401  srcV = s->next_picture.f.data[2];
402  }
403  } else {
404  if (!dir) {
405  srcY = s->last_picture.f.data[0];
406  srcU = s->last_picture.f.data[1];
407  srcV = s->last_picture.f.data[2];
408  } else {
409  srcY = s->next_picture.f.data[0];
410  srcU = s->next_picture.f.data[1];
411  srcV = s->next_picture.f.data[2];
412  }
413  }
414 
415  if(!srcY)
416  return;
417 
418  src_x = s->mb_x * 16 + (mx >> 2);
419  src_y = s->mb_y * 16 + (my >> 2);
420  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
421  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
422 
423  if (v->profile != PROFILE_ADVANCED) {
424  src_x = av_clip( src_x, -16, s->mb_width * 16);
425  src_y = av_clip( src_y, -16, s->mb_height * 16);
426  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
427  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
428  } else {
429  src_x = av_clip( src_x, -17, s->avctx->coded_width);
430  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
431  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
432  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
433  }
434 
435  srcY += src_y * s->linesize + src_x;
436  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
437  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
438 
439  if (v->field_mode && v->ref_field_type[dir]) {
440  srcY += s->current_picture_ptr->f.linesize[0];
441  srcU += s->current_picture_ptr->f.linesize[1];
442  srcV += s->current_picture_ptr->f.linesize[2];
443  }
444 
445  /* for grayscale we should not try to read from unknown area */
446  if (s->flags & CODEC_FLAG_GRAY) {
447  srcU = s->edge_emu_buffer + 18 * s->linesize;
448  srcV = s->edge_emu_buffer + 18 * s->linesize;
449  }
450 
452  || s->h_edge_pos < 22 || v_edge_pos < 22
453  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
454  || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
455  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
456 
457  srcY -= s->mspel * (1 + s->linesize);
459  17 + s->mspel * 2, 17 + s->mspel * 2,
460  src_x - s->mspel, src_y - s->mspel,
461  s->h_edge_pos, v_edge_pos);
462  srcY = s->edge_emu_buffer;
463  s->vdsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
464  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
465  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
466  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
467  srcU = uvbuf;
468  srcV = uvbuf + 16;
469  /* if we deal with range reduction we need to scale source blocks */
470  if (v->rangeredfrm) {
471  int i, j;
472  uint8_t *src, *src2;
473 
474  src = srcY;
475  for (j = 0; j < 17 + s->mspel * 2; j++) {
476  for (i = 0; i < 17 + s->mspel * 2; i++)
477  src[i] = ((src[i] - 128) >> 1) + 128;
478  src += s->linesize;
479  }
480  src = srcU;
481  src2 = srcV;
482  for (j = 0; j < 9; j++) {
483  for (i = 0; i < 9; i++) {
484  src[i] = ((src[i] - 128) >> 1) + 128;
485  src2[i] = ((src2[i] - 128) >> 1) + 128;
486  }
487  src += s->uvlinesize;
488  src2 += s->uvlinesize;
489  }
490  }
491  /* if we deal with intensity compensation we need to scale source blocks */
492  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
493  int i, j;
494  uint8_t *src, *src2;
495 
496  src = srcY;
497  for (j = 0; j < 17 + s->mspel * 2; j++) {
498  for (i = 0; i < 17 + s->mspel * 2; i++)
499  src[i] = v->luty[src[i]];
500  src += s->linesize;
501  }
502  src = srcU;
503  src2 = srcV;
504  for (j = 0; j < 9; j++) {
505  for (i = 0; i < 9; i++) {
506  src[i] = v->lutuv[src[i]];
507  src2[i] = v->lutuv[src2[i]];
508  }
509  src += s->uvlinesize;
510  src2 += s->uvlinesize;
511  }
512  }
513  srcY += s->mspel * (1 + s->linesize);
514  }
515 
516  off = 0;
517  off_uv = 0;
518  if (s->mspel) {
519  dxy = ((my & 3) << 2) | (mx & 3);
520  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
521  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
522  srcY += s->linesize * 8;
523  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
524  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
525  } else { // hpel mc - always used for luma
526  dxy = (my & 2) | ((mx & 2) >> 1);
527  if (!v->rnd)
528  s->hdsp.put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
529  else
530  s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
531  }
532 
533  if (s->flags & CODEC_FLAG_GRAY) return;
534  /* Chroma MC always uses qpel bilinear */
535  uvmx = (uvmx & 3) << 1;
536  uvmy = (uvmy & 3) << 1;
537  if (!v->rnd) {
538  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
539  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
540  } else {
541  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
542  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
543  }
544 }
545 
546 static inline int median4(int a, int b, int c, int d)
547 {
548  if (a < b) {
549  if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
550  else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
551  } else {
552  if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
553  else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
554  }
555 }
556 
557 /** Do motion compensation for 4-MV macroblock - luminance block
558  */
559 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
560 {
561  MpegEncContext *s = &v->s;
562  uint8_t *srcY;
563  int dxy, mx, my, src_x, src_y;
564  int off;
565  int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
566  int v_edge_pos = s->v_edge_pos >> v->field_mode;
567 
568  if ((!v->field_mode ||
569  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
570  !v->s.last_picture.f.data[0])
571  return;
572 
573  mx = s->mv[dir][n][0];
574  my = s->mv[dir][n][1];
575 
576  if (!dir) {
577  if (v->field_mode) {
578  if ((v->cur_field_type != v->ref_field_type[dir]) && v->second_field)
579  srcY = s->current_picture.f.data[0];
580  else
581  srcY = s->last_picture.f.data[0];
582  } else
583  srcY = s->last_picture.f.data[0];
584  } else
585  srcY = s->next_picture.f.data[0];
586 
587  if(!srcY)
588  return;
589 
590  if (v->field_mode) {
591  if (v->cur_field_type != v->ref_field_type[dir])
592  my = my - 2 + 4 * v->cur_field_type;
593  }
594 
595  if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
596  int same_count = 0, opp_count = 0, k;
597  int chosen_mv[2][4][2], f;
598  int tx, ty;
599  for (k = 0; k < 4; k++) {
600  f = v->mv_f[0][s->block_index[k] + v->blocks_off];
601  chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
602  chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
603  opp_count += f;
604  same_count += 1 - f;
605  }
606  f = opp_count > same_count;
607  switch (f ? opp_count : same_count) {
608  case 4:
609  tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
610  chosen_mv[f][2][0], chosen_mv[f][3][0]);
611  ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
612  chosen_mv[f][2][1], chosen_mv[f][3][1]);
613  break;
614  case 3:
615  tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
616  ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
617  break;
618  case 2:
619  tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
620  ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
621  break;
622  default:
623  av_assert2(0);
624  }
625  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
626  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
627  for (k = 0; k < 4; k++)
628  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
629  }
630 
631  if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
632  int qx, qy;
633  int width = s->avctx->coded_width;
634  int height = s->avctx->coded_height >> 1;
635  if (s->pict_type == AV_PICTURE_TYPE_P) {
636  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
637  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
638  }
639  qx = (s->mb_x * 16) + (mx >> 2);
640  qy = (s->mb_y * 8) + (my >> 3);
641 
642  if (qx < -17)
643  mx -= 4 * (qx + 17);
644  else if (qx > width)
645  mx -= 4 * (qx - width);
646  if (qy < -18)
647  my -= 8 * (qy + 18);
648  else if (qy > height + 1)
649  my -= 8 * (qy - height - 1);
650  }
651 
652  if ((v->fcm == ILACE_FRAME) && fieldmv)
653  off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
654  else
655  off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
656 
657  src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
658  if (!fieldmv)
659  src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
660  else
661  src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
662 
663  if (v->profile != PROFILE_ADVANCED) {
664  src_x = av_clip(src_x, -16, s->mb_width * 16);
665  src_y = av_clip(src_y, -16, s->mb_height * 16);
666  } else {
667  src_x = av_clip(src_x, -17, s->avctx->coded_width);
668  if (v->fcm == ILACE_FRAME) {
669  if (src_y & 1)
670  src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
671  else
672  src_y = av_clip(src_y, -18, s->avctx->coded_height);
673  } else {
674  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
675  }
676  }
677 
678  srcY += src_y * s->linesize + src_x;
679  if (v->field_mode && v->ref_field_type[dir])
680  srcY += s->current_picture_ptr->f.linesize[0];
681 
682  if (fieldmv && !(src_y & 1))
683  v_edge_pos--;
684  if (fieldmv && (src_y & 1) && src_y < 4)
685  src_y--;
687  || s->h_edge_pos < 13 || v_edge_pos < 23
688  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
689  || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
690  srcY -= s->mspel * (1 + (s->linesize << fieldmv));
691  /* check emulate edge stride and offset */
693  9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
694  src_x - s->mspel, src_y - (s->mspel << fieldmv),
695  s->h_edge_pos, v_edge_pos);
696  srcY = s->edge_emu_buffer;
697  /* if we deal with range reduction we need to scale source blocks */
698  if (v->rangeredfrm) {
699  int i, j;
700  uint8_t *src;
701 
702  src = srcY;
703  for (j = 0; j < 9 + s->mspel * 2; j++) {
704  for (i = 0; i < 9 + s->mspel * 2; i++)
705  src[i] = ((src[i] - 128) >> 1) + 128;
706  src += s->linesize << fieldmv;
707  }
708  }
709  /* if we deal with intensity compensation we need to scale source blocks */
710  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
711  int i, j;
712  uint8_t *src;
713 
714  src = srcY;
715  for (j = 0; j < 9 + s->mspel * 2; j++) {
716  for (i = 0; i < 9 + s->mspel * 2; i++)
717  src[i] = v->luty[src[i]];
718  src += s->linesize << fieldmv;
719  }
720  }
721  srcY += s->mspel * (1 + (s->linesize << fieldmv));
722  }
723 
724  if (s->mspel) {
725  dxy = ((my & 3) << 2) | (mx & 3);
726  if (avg)
727  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
728  else
729  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
730  } else { // hpel mc - always used for luma
731  dxy = (my & 2) | ((mx & 2) >> 1);
732  if (!v->rnd)
733  s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
734  else
735  s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
736  }
737 }
738 
739 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
740 {
741  int idx, i;
742  static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
743 
744  idx = ((a[3] != flag) << 3)
745  | ((a[2] != flag) << 2)
746  | ((a[1] != flag) << 1)
747  | (a[0] != flag);
748  if (!idx) {
749  *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
750  *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
751  return 4;
752  } else if (count[idx] == 1) {
753  switch (idx) {
754  case 0x1:
755  *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
756  *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
757  return 3;
758  case 0x2:
759  *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
760  *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
761  return 3;
762  case 0x4:
763  *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
764  *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
765  return 3;
766  case 0x8:
767  *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
768  *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
769  return 3;
770  }
771  } else if (count[idx] == 2) {
772  int t1 = 0, t2 = 0;
773  for (i = 0; i < 3; i++)
774  if (!a[i]) {
775  t1 = i;
776  break;
777  }
778  for (i = t1 + 1; i < 4; i++)
779  if (!a[i]) {
780  t2 = i;
781  break;
782  }
783  *tx = (mvx[t1] + mvx[t2]) / 2;
784  *ty = (mvy[t1] + mvy[t2]) / 2;
785  return 2;
786  } else {
787  return 0;
788  }
789  return -1;
790 }
791 
792 /** Do motion compensation for 4-MV macroblock - both chroma blocks
793  */
794 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
795 {
796  MpegEncContext *s = &v->s;
797  H264ChromaContext *h264chroma = &v->h264chroma;
798  uint8_t *srcU, *srcV;
799  int uvmx, uvmy, uvsrc_x, uvsrc_y;
800  int k, tx = 0, ty = 0;
801  int mvx[4], mvy[4], intra[4], mv_f[4];
802  int valid_count;
803  int chroma_ref_type = v->cur_field_type, off = 0;
804  int v_edge_pos = s->v_edge_pos >> v->field_mode;
805 
806  if (!v->field_mode && !v->s.last_picture.f.data[0])
807  return;
808  if (s->flags & CODEC_FLAG_GRAY)
809  return;
810 
811  for (k = 0; k < 4; k++) {
812  mvx[k] = s->mv[dir][k][0];
813  mvy[k] = s->mv[dir][k][1];
814  intra[k] = v->mb_type[0][s->block_index[k]];
815  if (v->field_mode)
816  mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
817  }
818 
819  /* calculate chroma MV vector from four luma MVs */
820  if (!v->field_mode || (v->field_mode && !v->numref)) {
821  valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
822  chroma_ref_type = v->reffield;
823  if (!valid_count) {
824  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
825  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
826  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
827  return; //no need to do MC for intra blocks
828  }
829  } else {
830  int dominant = 0;
831  if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
832  dominant = 1;
833  valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
834  if (dominant)
835  chroma_ref_type = !v->cur_field_type;
836  }
837  if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
838  return;
839  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
840  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
841  uvmx = (tx + ((tx & 3) == 3)) >> 1;
842  uvmy = (ty + ((ty & 3) == 3)) >> 1;
843 
844  v->luma_mv[s->mb_x][0] = uvmx;
845  v->luma_mv[s->mb_x][1] = uvmy;
846 
847  if (v->fastuvmc) {
848  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
849  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
850  }
851  // Field conversion bias
852  if (v->cur_field_type != chroma_ref_type)
853  uvmy += 2 - 4 * chroma_ref_type;
854 
855  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
856  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
857 
858  if (v->profile != PROFILE_ADVANCED) {
859  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
860  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
861  } else {
862  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
863  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
864  }
865 
866  if (!dir) {
867  if (v->field_mode) {
868  if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) {
869  srcU = s->current_picture.f.data[1];
870  srcV = s->current_picture.f.data[2];
871  } else {
872  srcU = s->last_picture.f.data[1];
873  srcV = s->last_picture.f.data[2];
874  }
875  } else {
876  srcU = s->last_picture.f.data[1];
877  srcV = s->last_picture.f.data[2];
878  }
879  } else {
880  srcU = s->next_picture.f.data[1];
881  srcV = s->next_picture.f.data[2];
882  }
883 
884  if(!srcU)
885  return;
886 
887  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
888  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
889 
890  if (v->field_mode) {
891  if (chroma_ref_type) {
892  srcU += s->current_picture_ptr->f.linesize[1];
893  srcV += s->current_picture_ptr->f.linesize[2];
894  }
895  off = 0;
896  }
897 
899  || s->h_edge_pos < 18 || v_edge_pos < 18
900  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
901  || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
903  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
904  s->h_edge_pos >> 1, v_edge_pos >> 1);
905  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
906  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
907  s->h_edge_pos >> 1, v_edge_pos >> 1);
908  srcU = s->edge_emu_buffer;
909  srcV = s->edge_emu_buffer + 16;
910 
911  /* if we deal with range reduction we need to scale source blocks */
912  if (v->rangeredfrm) {
913  int i, j;
914  uint8_t *src, *src2;
915 
916  src = srcU;
917  src2 = srcV;
918  for (j = 0; j < 9; j++) {
919  for (i = 0; i < 9; i++) {
920  src[i] = ((src[i] - 128) >> 1) + 128;
921  src2[i] = ((src2[i] - 128) >> 1) + 128;
922  }
923  src += s->uvlinesize;
924  src2 += s->uvlinesize;
925  }
926  }
927  /* if we deal with intensity compensation we need to scale source blocks */
928  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
929  int i, j;
930  uint8_t *src, *src2;
931 
932  src = srcU;
933  src2 = srcV;
934  for (j = 0; j < 9; j++) {
935  for (i = 0; i < 9; i++) {
936  src[i] = v->lutuv[src[i]];
937  src2[i] = v->lutuv[src2[i]];
938  }
939  src += s->uvlinesize;
940  src2 += s->uvlinesize;
941  }
942  }
943  }
944 
945  /* Chroma MC always uses qpel bilinear */
946  uvmx = (uvmx & 3) << 1;
947  uvmy = (uvmy & 3) << 1;
948  if (!v->rnd) {
949  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
950  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
951  } else {
952  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
953  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
954  }
955 }
956 
957 /** Do motion compensation for 4-MV field chroma macroblock (both U and V)
958  */
960 {
961  MpegEncContext *s = &v->s;
962  H264ChromaContext *h264chroma = &v->h264chroma;
963  uint8_t *srcU, *srcV;
964  int uvsrc_x, uvsrc_y;
965  int uvmx_field[4], uvmy_field[4];
966  int i, off, tx, ty;
967  int fieldmv = v->blk_mv_type[s->block_index[0]];
968  static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
969  int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
970  int v_edge_pos = s->v_edge_pos >> 1;
971 
972  if (!v->s.last_picture.f.data[0])
973  return;
974  if (s->flags & CODEC_FLAG_GRAY)
975  return;
976 
977  for (i = 0; i < 4; i++) {
978  tx = s->mv[0][i][0];
979  uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
980  ty = s->mv[0][i][1];
981  if (fieldmv)
982  uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
983  else
984  uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
985  }
986 
987  for (i = 0; i < 4; i++) {
988  off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
989  uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
990  uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
991  // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
992  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
993  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
994  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
995  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
996  uvmx_field[i] = (uvmx_field[i] & 3) << 1;
997  uvmy_field[i] = (uvmy_field[i] & 3) << 1;
998 
999  if (fieldmv && !(uvsrc_y & 1))
1000  v_edge_pos = (s->v_edge_pos >> 1) - 1;
1001 
1002  if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1003  uvsrc_y--;
1004  if ((v->mv_mode == MV_PMODE_INTENSITY_COMP)
1005  || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1006  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1007  || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1009  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1010  s->h_edge_pos >> 1, v_edge_pos);
1011  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1012  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1013  s->h_edge_pos >> 1, v_edge_pos);
1014  srcU = s->edge_emu_buffer;
1015  srcV = s->edge_emu_buffer + 16;
1016 
1017  /* if we deal with intensity compensation we need to scale source blocks */
1018  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1019  int i, j;
1020  uint8_t *src, *src2;
1021 
1022  src = srcU;
1023  src2 = srcV;
1024  for (j = 0; j < 5; j++) {
1025  for (i = 0; i < 5; i++) {
1026  src[i] = v->lutuv[src[i]];
1027  src2[i] = v->lutuv[src2[i]];
1028  }
1029  src += s->uvlinesize << 1;
1030  src2 += s->uvlinesize << 1;
1031  }
1032  }
1033  }
1034  if (!v->rnd) {
1035  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1036  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1037  } else {
1038  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1039  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1040  }
1041  }
1042 }
1043 
1044 /***********************************************************************/
1045 /**
1046  * @name VC-1 Block-level functions
1047  * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1048  * @{
1049  */
1050 
1051 /**
1052  * @def GET_MQUANT
1053  * @brief Get macroblock-level quantizer scale
1054  */
1055 #define GET_MQUANT() \
1056  if (v->dquantfrm) { \
1057  int edges = 0; \
1058  if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1059  if (v->dqbilevel) { \
1060  mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1061  } else { \
1062  mqdiff = get_bits(gb, 3); \
1063  if (mqdiff != 7) \
1064  mquant = v->pq + mqdiff; \
1065  else \
1066  mquant = get_bits(gb, 5); \
1067  } \
1068  } \
1069  if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1070  edges = 1 << v->dqsbedge; \
1071  else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1072  edges = (3 << v->dqsbedge) % 15; \
1073  else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1074  edges = 15; \
1075  if ((edges&1) && !s->mb_x) \
1076  mquant = v->altpq; \
1077  if ((edges&2) && s->first_slice_line) \
1078  mquant = v->altpq; \
1079  if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1080  mquant = v->altpq; \
1081  if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1082  mquant = v->altpq; \
1083  if (!mquant || mquant > 31) { \
1084  av_log(v->s.avctx, AV_LOG_ERROR, \
1085  "Overriding invalid mquant %d\n", mquant); \
1086  mquant = 1; \
1087  } \
1088  }
1089 
1090 /**
1091  * @def GET_MVDATA(_dmv_x, _dmv_y)
1092  * @brief Get MV differentials
1093  * @see MVDATA decoding from 8.3.5.2, p(1)20
1094  * @param _dmv_x Horizontal differential for decoded MV
1095  * @param _dmv_y Vertical differential for decoded MV
1096  */
1097 #define GET_MVDATA(_dmv_x, _dmv_y) \
1098  index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1099  VC1_MV_DIFF_VLC_BITS, 2); \
1100  if (index > 36) { \
1101  mb_has_coeffs = 1; \
1102  index -= 37; \
1103  } else \
1104  mb_has_coeffs = 0; \
1105  s->mb_intra = 0; \
1106  if (!index) { \
1107  _dmv_x = _dmv_y = 0; \
1108  } else if (index == 35) { \
1109  _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1110  _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1111  } else if (index == 36) { \
1112  _dmv_x = 0; \
1113  _dmv_y = 0; \
1114  s->mb_intra = 1; \
1115  } else { \
1116  index1 = index % 6; \
1117  if (!s->quarter_sample && index1 == 5) val = 1; \
1118  else val = 0; \
1119  if (size_table[index1] - val > 0) \
1120  val = get_bits(gb, size_table[index1] - val); \
1121  else val = 0; \
1122  sign = 0 - (val&1); \
1123  _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1124  \
1125  index1 = index / 6; \
1126  if (!s->quarter_sample && index1 == 5) val = 1; \
1127  else val = 0; \
1128  if (size_table[index1] - val > 0) \
1129  val = get_bits(gb, size_table[index1] - val); \
1130  else val = 0; \
1131  sign = 0 - (val & 1); \
1132  _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1133  }
1134 
1136  int *dmv_y, int *pred_flag)
1137 {
1138  int index, index1;
1139  int extend_x = 0, extend_y = 0;
1140  GetBitContext *gb = &v->s.gb;
1141  int bits, esc;
1142  int val, sign;
1143  const int* offs_tab;
1144 
1145  if (v->numref) {
1146  bits = VC1_2REF_MVDATA_VLC_BITS;
1147  esc = 125;
1148  } else {
1149  bits = VC1_1REF_MVDATA_VLC_BITS;
1150  esc = 71;
1151  }
1152  switch (v->dmvrange) {
1153  case 1:
1154  extend_x = 1;
1155  break;
1156  case 2:
1157  extend_y = 1;
1158  break;
1159  case 3:
1160  extend_x = extend_y = 1;
1161  break;
1162  }
1163  index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1164  if (index == esc) {
1165  *dmv_x = get_bits(gb, v->k_x);
1166  *dmv_y = get_bits(gb, v->k_y);
1167  if (v->numref) {
1168  if (pred_flag) {
1169  *pred_flag = *dmv_y & 1;
1170  *dmv_y = (*dmv_y + *pred_flag) >> 1;
1171  } else {
1172  *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1173  }
1174  }
1175  }
1176  else {
1177  av_assert0(index < esc);
1178  if (extend_x)
1179  offs_tab = offset_table2;
1180  else
1181  offs_tab = offset_table1;
1182  index1 = (index + 1) % 9;
1183  if (index1 != 0) {
1184  val = get_bits(gb, index1 + extend_x);
1185  sign = 0 -(val & 1);
1186  *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1187  } else
1188  *dmv_x = 0;
1189  if (extend_y)
1190  offs_tab = offset_table2;
1191  else
1192  offs_tab = offset_table1;
1193  index1 = (index + 1) / 9;
1194  if (index1 > v->numref) {
1195  val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1196  sign = 0 - (val & 1);
1197  *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1198  } else
1199  *dmv_y = 0;
1200  if (v->numref && pred_flag)
1201  *pred_flag = index1 & 1;
1202  }
1203 }
1204 
1205 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1206 {
1207  int scaledvalue, refdist;
1208  int scalesame1, scalesame2;
1209  int scalezone1_x, zone1offset_x;
1210  int table_index = dir ^ v->second_field;
1211 
1212  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1213  refdist = v->refdist;
1214  else
1215  refdist = dir ? v->brfd : v->frfd;
1216  if (refdist > 3)
1217  refdist = 3;
1218  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1219  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1220  scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1221  zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1222 
1223  if (FFABS(n) > 255)
1224  scaledvalue = n;
1225  else {
1226  if (FFABS(n) < scalezone1_x)
1227  scaledvalue = (n * scalesame1) >> 8;
1228  else {
1229  if (n < 0)
1230  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1231  else
1232  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1233  }
1234  }
1235  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1236 }
1237 
1238 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1239 {
1240  int scaledvalue, refdist;
1241  int scalesame1, scalesame2;
1242  int scalezone1_y, zone1offset_y;
1243  int table_index = dir ^ v->second_field;
1244 
1245  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1246  refdist = v->refdist;
1247  else
1248  refdist = dir ? v->brfd : v->frfd;
1249  if (refdist > 3)
1250  refdist = 3;
1251  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1252  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1253  scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1254  zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1255 
1256  if (FFABS(n) > 63)
1257  scaledvalue = n;
1258  else {
1259  if (FFABS(n) < scalezone1_y)
1260  scaledvalue = (n * scalesame1) >> 8;
1261  else {
1262  if (n < 0)
1263  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1264  else
1265  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1266  }
1267  }
1268 
1269  if (v->cur_field_type && !v->ref_field_type[dir])
1270  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1271  else
1272  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1273 }
1274 
1275 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1276 {
1277  int scalezone1_x, zone1offset_x;
1278  int scaleopp1, scaleopp2, brfd;
1279  int scaledvalue;
1280 
1281  brfd = FFMIN(v->brfd, 3);
1282  scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1283  zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1284  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1285  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1286 
1287  if (FFABS(n) > 255)
1288  scaledvalue = n;
1289  else {
1290  if (FFABS(n) < scalezone1_x)
1291  scaledvalue = (n * scaleopp1) >> 8;
1292  else {
1293  if (n < 0)
1294  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1295  else
1296  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1297  }
1298  }
1299  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1300 }
1301 
1302 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1303 {
1304  int scalezone1_y, zone1offset_y;
1305  int scaleopp1, scaleopp2, brfd;
1306  int scaledvalue;
1307 
1308  brfd = FFMIN(v->brfd, 3);
1309  scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1310  zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1311  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1312  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1313 
1314  if (FFABS(n) > 63)
1315  scaledvalue = n;
1316  else {
1317  if (FFABS(n) < scalezone1_y)
1318  scaledvalue = (n * scaleopp1) >> 8;
1319  else {
1320  if (n < 0)
1321  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1322  else
1323  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1324  }
1325  }
1326  if (v->cur_field_type && !v->ref_field_type[dir]) {
1327  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1328  } else {
1329  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1330  }
1331 }
1332 
1333 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1334  int dim, int dir)
1335 {
1336  int brfd, scalesame;
1337  int hpel = 1 - v->s.quarter_sample;
1338 
1339  n >>= hpel;
1340  if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1341  if (dim)
1342  n = scaleforsame_y(v, i, n, dir) << hpel;
1343  else
1344  n = scaleforsame_x(v, n, dir) << hpel;
1345  return n;
1346  }
1347  brfd = FFMIN(v->brfd, 3);
1348  scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1349 
1350  n = (n * scalesame >> 8) << hpel;
1351  return n;
1352 }
1353 
1354 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1355  int dim, int dir)
1356 {
1357  int refdist, scaleopp;
1358  int hpel = 1 - v->s.quarter_sample;
1359 
1360  n >>= hpel;
1361  if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1362  if (dim)
1363  n = scaleforopp_y(v, n, dir) << hpel;
1364  else
1365  n = scaleforopp_x(v, n) << hpel;
1366  return n;
1367  }
1368  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1369  refdist = FFMIN(v->refdist, 3);
1370  else
1371  refdist = dir ? v->brfd : v->frfd;
1372  scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1373 
1374  n = (n * scaleopp >> 8) << hpel;
1375  return n;
1376 }
1377 
1378 /** Predict and set motion vector
1379  */
1380 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1381  int mv1, int r_x, int r_y, uint8_t* is_intra,
1382  int pred_flag, int dir)
1383 {
1384  MpegEncContext *s = &v->s;
1385  int xy, wrap, off = 0;
1386  int16_t *A, *B, *C;
1387  int px, py;
1388  int sum;
1389  int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1390  int opposite, a_f, b_f, c_f;
1391  int16_t field_predA[2];
1392  int16_t field_predB[2];
1393  int16_t field_predC[2];
1394  int a_valid, b_valid, c_valid;
1395  int hybridmv_thresh, y_bias = 0;
1396 
1397  if (v->mv_mode == MV_PMODE_MIXED_MV ||
1399  mixedmv_pic = 1;
1400  else
1401  mixedmv_pic = 0;
1402  /* scale MV difference to be quad-pel */
1403  dmv_x <<= 1 - s->quarter_sample;
1404  dmv_y <<= 1 - s->quarter_sample;
1405 
1406  wrap = s->b8_stride;
1407  xy = s->block_index[n];
1408 
1409  if (s->mb_intra) {
1410  s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1411  s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1412  s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1413  s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1414  if (mv1) { /* duplicate motion data for 1-MV block */
1415  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1416  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1417  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1418  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1419  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1420  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1421  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1422  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1423  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1424  s->current_picture.motion_val[1][xy + wrap][0] = 0;
1425  s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1426  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1427  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1428  }
1429  return;
1430  }
1431 
1432  C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1433  A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1434  if (mv1) {
1435  if (v->field_mode && mixedmv_pic)
1436  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1437  else
1438  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1439  } else {
1440  //in 4-MV mode different blocks have different B predictor position
1441  switch (n) {
1442  case 0:
1443  off = (s->mb_x > 0) ? -1 : 1;
1444  break;
1445  case 1:
1446  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1447  break;
1448  case 2:
1449  off = 1;
1450  break;
1451  case 3:
1452  off = -1;
1453  }
1454  }
1455  B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1456 
1457  a_valid = !s->first_slice_line || (n == 2 || n == 3);
1458  b_valid = a_valid && (s->mb_width > 1);
1459  c_valid = s->mb_x || (n == 1 || n == 3);
1460  if (v->field_mode) {
1461  a_valid = a_valid && !is_intra[xy - wrap];
1462  b_valid = b_valid && !is_intra[xy - wrap + off];
1463  c_valid = c_valid && !is_intra[xy - 1];
1464  }
1465 
1466  if (a_valid) {
1467  a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1468  num_oppfield += a_f;
1469  num_samefield += 1 - a_f;
1470  field_predA[0] = A[0];
1471  field_predA[1] = A[1];
1472  } else {
1473  field_predA[0] = field_predA[1] = 0;
1474  a_f = 0;
1475  }
1476  if (b_valid) {
1477  b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1478  num_oppfield += b_f;
1479  num_samefield += 1 - b_f;
1480  field_predB[0] = B[0];
1481  field_predB[1] = B[1];
1482  } else {
1483  field_predB[0] = field_predB[1] = 0;
1484  b_f = 0;
1485  }
1486  if (c_valid) {
1487  c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1488  num_oppfield += c_f;
1489  num_samefield += 1 - c_f;
1490  field_predC[0] = C[0];
1491  field_predC[1] = C[1];
1492  } else {
1493  field_predC[0] = field_predC[1] = 0;
1494  c_f = 0;
1495  }
1496 
1497  if (v->field_mode) {
1498  if (!v->numref)
1499  // REFFIELD determines if the last field or the second-last field is
1500  // to be used as reference
1501  opposite = 1 - v->reffield;
1502  else {
1503  if (num_samefield <= num_oppfield)
1504  opposite = 1 - pred_flag;
1505  else
1506  opposite = pred_flag;
1507  }
1508  } else
1509  opposite = 0;
1510  if (opposite) {
1511  if (a_valid && !a_f) {
1512  field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1513  field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1514  }
1515  if (b_valid && !b_f) {
1516  field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1517  field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1518  }
1519  if (c_valid && !c_f) {
1520  field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1521  field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1522  }
1523  v->mv_f[dir][xy + v->blocks_off] = 1;
1524  v->ref_field_type[dir] = !v->cur_field_type;
1525  } else {
1526  if (a_valid && a_f) {
1527  field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1528  field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1529  }
1530  if (b_valid && b_f) {
1531  field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1532  field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1533  }
1534  if (c_valid && c_f) {
1535  field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1536  field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1537  }
1538  v->mv_f[dir][xy + v->blocks_off] = 0;
1539  v->ref_field_type[dir] = v->cur_field_type;
1540  }
1541 
1542  if (a_valid) {
1543  px = field_predA[0];
1544  py = field_predA[1];
1545  } else if (c_valid) {
1546  px = field_predC[0];
1547  py = field_predC[1];
1548  } else if (b_valid) {
1549  px = field_predB[0];
1550  py = field_predB[1];
1551  } else {
1552  px = 0;
1553  py = 0;
1554  }
1555 
1556  if (num_samefield + num_oppfield > 1) {
1557  px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1558  py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1559  }
1560 
1561  /* Pullback MV as specified in 8.3.5.3.4 */
1562  if (!v->field_mode) {
1563  int qx, qy, X, Y;
1564  qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1565  qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1566  X = (s->mb_width << 6) - 4;
1567  Y = (s->mb_height << 6) - 4;
1568  if (mv1) {
1569  if (qx + px < -60) px = -60 - qx;
1570  if (qy + py < -60) py = -60 - qy;
1571  } else {
1572  if (qx + px < -28) px = -28 - qx;
1573  if (qy + py < -28) py = -28 - qy;
1574  }
1575  if (qx + px > X) px = X - qx;
1576  if (qy + py > Y) py = Y - qy;
1577  }
1578 
1579  if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1580  /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1581  hybridmv_thresh = 32;
1582  if (a_valid && c_valid) {
1583  if (is_intra[xy - wrap])
1584  sum = FFABS(px) + FFABS(py);
1585  else
1586  sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1587  if (sum > hybridmv_thresh) {
1588  if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1589  px = field_predA[0];
1590  py = field_predA[1];
1591  } else {
1592  px = field_predC[0];
1593  py = field_predC[1];
1594  }
1595  } else {
1596  if (is_intra[xy - 1])
1597  sum = FFABS(px) + FFABS(py);
1598  else
1599  sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1600  if (sum > hybridmv_thresh) {
1601  if (get_bits1(&s->gb)) {
1602  px = field_predA[0];
1603  py = field_predA[1];
1604  } else {
1605  px = field_predC[0];
1606  py = field_predC[1];
1607  }
1608  }
1609  }
1610  }
1611  }
1612 
1613  if (v->field_mode && v->numref)
1614  r_y >>= 1;
1615  if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1616  y_bias = 1;
1617  /* store MV using signed modulus of MV range defined in 4.11 */
1618  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1619  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1620  if (mv1) { /* duplicate motion data for 1-MV block */
1621  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1622  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1623  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1624  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1625  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1626  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1627  v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1628  v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1629  }
1630 }
1631 
1632 /** Predict and set motion vector for interlaced frame picture MBs
1633  */
1634 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1635  int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1636 {
1637  MpegEncContext *s = &v->s;
1638  int xy, wrap, off = 0;
1639  int A[2], B[2], C[2];
1640  int px, py;
1641  int a_valid = 0, b_valid = 0, c_valid = 0;
1642  int field_a, field_b, field_c; // 0: same, 1: opposit
1643  int total_valid, num_samefield, num_oppfield;
1644  int pos_c, pos_b, n_adj;
1645 
1646  wrap = s->b8_stride;
1647  xy = s->block_index[n];
1648 
1649  if (s->mb_intra) {
1650  s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1651  s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1652  s->current_picture.motion_val[1][xy][0] = 0;
1653  s->current_picture.motion_val[1][xy][1] = 0;
1654  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1655  s->current_picture.motion_val[0][xy + 1][0] = 0;
1656  s->current_picture.motion_val[0][xy + 1][1] = 0;
1657  s->current_picture.motion_val[0][xy + wrap][0] = 0;
1658  s->current_picture.motion_val[0][xy + wrap][1] = 0;
1659  s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1660  s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1661  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1662  s->current_picture.motion_val[1][xy + 1][0] = 0;
1663  s->current_picture.motion_val[1][xy + 1][1] = 0;
1664  s->current_picture.motion_val[1][xy + wrap][0] = 0;
1665  s->current_picture.motion_val[1][xy + wrap][1] = 0;
1666  s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1667  s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1668  }
1669  return;
1670  }
1671 
1672  off = ((n == 0) || (n == 1)) ? 1 : -1;
1673  /* predict A */
1674  if (s->mb_x || (n == 1) || (n == 3)) {
1675  if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1676  || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1677  A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1678  A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1679  a_valid = 1;
1680  } else { // current block has frame mv and cand. has field MV (so average)
1681  A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1682  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1683  A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1684  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1685  a_valid = 1;
1686  }
1687  if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1688  a_valid = 0;
1689  A[0] = A[1] = 0;
1690  }
1691  } else
1692  A[0] = A[1] = 0;
1693  /* Predict B and C */
1694  B[0] = B[1] = C[0] = C[1] = 0;
1695  if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1696  if (!s->first_slice_line) {
1697  if (!v->is_intra[s->mb_x - s->mb_stride]) {
1698  b_valid = 1;
1699  n_adj = n | 2;
1700  pos_b = s->block_index[n_adj] - 2 * wrap;
1701  if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1702  n_adj = (n & 2) | (n & 1);
1703  }
1704  B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1705  B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1706  if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1707  B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1708  B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1709  }
1710  }
1711  if (s->mb_width > 1) {
1712  if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1713  c_valid = 1;
1714  n_adj = 2;
1715  pos_c = s->block_index[2] - 2 * wrap + 2;
1716  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1717  n_adj = n & 2;
1718  }
1719  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1720  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1721  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1722  C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1723  C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1724  }
1725  if (s->mb_x == s->mb_width - 1) {
1726  if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1727  c_valid = 1;
1728  n_adj = 3;
1729  pos_c = s->block_index[3] - 2 * wrap - 2;
1730  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1731  n_adj = n | 1;
1732  }
1733  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1734  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1735  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1736  C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1737  C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1738  }
1739  } else
1740  c_valid = 0;
1741  }
1742  }
1743  }
1744  }
1745  } else {
1746  pos_b = s->block_index[1];
1747  b_valid = 1;
1748  B[0] = s->current_picture.motion_val[dir][pos_b][0];
1749  B[1] = s->current_picture.motion_val[dir][pos_b][1];
1750  pos_c = s->block_index[0];
1751  c_valid = 1;
1752  C[0] = s->current_picture.motion_val[dir][pos_c][0];
1753  C[1] = s->current_picture.motion_val[dir][pos_c][1];
1754  }
1755 
1756  total_valid = a_valid + b_valid + c_valid;
1757  // check if predictor A is out of bounds
1758  if (!s->mb_x && !(n == 1 || n == 3)) {
1759  A[0] = A[1] = 0;
1760  }
1761  // check if predictor B is out of bounds
1762  if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1763  B[0] = B[1] = C[0] = C[1] = 0;
1764  }
1765  if (!v->blk_mv_type[xy]) {
1766  if (s->mb_width == 1) {
1767  px = B[0];
1768  py = B[1];
1769  } else {
1770  if (total_valid >= 2) {
1771  px = mid_pred(A[0], B[0], C[0]);
1772  py = mid_pred(A[1], B[1], C[1]);
1773  } else if (total_valid) {
1774  if (a_valid) { px = A[0]; py = A[1]; }
1775  else if (b_valid) { px = B[0]; py = B[1]; }
1776  else if (c_valid) { px = C[0]; py = C[1]; }
1777  else av_assert2(0);
1778  } else
1779  px = py = 0;
1780  }
1781  } else {
1782  if (a_valid)
1783  field_a = (A[1] & 4) ? 1 : 0;
1784  else
1785  field_a = 0;
1786  if (b_valid)
1787  field_b = (B[1] & 4) ? 1 : 0;
1788  else
1789  field_b = 0;
1790  if (c_valid)
1791  field_c = (C[1] & 4) ? 1 : 0;
1792  else
1793  field_c = 0;
1794 
1795  num_oppfield = field_a + field_b + field_c;
1796  num_samefield = total_valid - num_oppfield;
1797  if (total_valid == 3) {
1798  if ((num_samefield == 3) || (num_oppfield == 3)) {
1799  px = mid_pred(A[0], B[0], C[0]);
1800  py = mid_pred(A[1], B[1], C[1]);
1801  } else if (num_samefield >= num_oppfield) {
1802  /* take one MV from same field set depending on priority
1803  the check for B may not be necessary */
1804  px = !field_a ? A[0] : B[0];
1805  py = !field_a ? A[1] : B[1];
1806  } else {
1807  px = field_a ? A[0] : B[0];
1808  py = field_a ? A[1] : B[1];
1809  }
1810  } else if (total_valid == 2) {
1811  if (num_samefield >= num_oppfield) {
1812  if (!field_a && a_valid) {
1813  px = A[0];
1814  py = A[1];
1815  } else if (!field_b && b_valid) {
1816  px = B[0];
1817  py = B[1];
1818  } else if (c_valid) {
1819  px = C[0];
1820  py = C[1];
1821  } else px = py = 0;
1822  } else {
1823  if (field_a && a_valid) {
1824  px = A[0];
1825  py = A[1];
1826  } else if (field_b && b_valid) {
1827  px = B[0];
1828  py = B[1];
1829  } else if (c_valid) {
1830  px = C[0];
1831  py = C[1];
1832  } else px = py = 0;
1833  }
1834  } else if (total_valid == 1) {
1835  px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1836  py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1837  } else
1838  px = py = 0;
1839  }
1840 
1841  /* store MV using signed modulus of MV range defined in 4.11 */
1842  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1843  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1844  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1845  s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1846  s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1847  s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1848  s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1849  s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1850  s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1851  } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1852  s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1853  s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1854  s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1855  s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1856  }
1857 }
1858 
1859 /** Motion compensation for direct or interpolated blocks in B-frames
1860  */
1862 {
1863  MpegEncContext *s = &v->s;
1864  H264ChromaContext *h264chroma = &v->h264chroma;
1865  uint8_t *srcY, *srcU, *srcV;
1866  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1867  int off, off_uv;
1868  int v_edge_pos = s->v_edge_pos >> v->field_mode;
1869 
1870  if (!v->field_mode && !v->s.next_picture.f.data[0])
1871  return;
1872 
1873  mx = s->mv[1][0][0];
1874  my = s->mv[1][0][1];
1875  uvmx = (mx + ((mx & 3) == 3)) >> 1;
1876  uvmy = (my + ((my & 3) == 3)) >> 1;
1877  if (v->field_mode) {
1878  if (v->cur_field_type != v->ref_field_type[1])
1879  my = my - 2 + 4 * v->cur_field_type;
1880  uvmy = uvmy - 2 + 4 * v->cur_field_type;
1881  }
1882  if (v->fastuvmc) {
1883  uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1884  uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1885  }
1886  srcY = s->next_picture.f.data[0];
1887  srcU = s->next_picture.f.data[1];
1888  srcV = s->next_picture.f.data[2];
1889 
1890  src_x = s->mb_x * 16 + (mx >> 2);
1891  src_y = s->mb_y * 16 + (my >> 2);
1892  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1893  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1894 
1895  if (v->profile != PROFILE_ADVANCED) {
1896  src_x = av_clip( src_x, -16, s->mb_width * 16);
1897  src_y = av_clip( src_y, -16, s->mb_height * 16);
1898  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1899  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1900  } else {
1901  src_x = av_clip( src_x, -17, s->avctx->coded_width);
1902  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1903  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1904  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1905  }
1906 
1907  srcY += src_y * s->linesize + src_x;
1908  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1909  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1910 
1911  if (v->field_mode && v->ref_field_type[1]) {
1912  srcY += s->current_picture_ptr->f.linesize[0];
1913  srcU += s->current_picture_ptr->f.linesize[1];
1914  srcV += s->current_picture_ptr->f.linesize[2];
1915  }
1916 
1917  /* for grayscale we should not try to read from unknown area */
1918  if (s->flags & CODEC_FLAG_GRAY) {
1919  srcU = s->edge_emu_buffer + 18 * s->linesize;
1920  srcV = s->edge_emu_buffer + 18 * s->linesize;
1921  }
1922 
1923  if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22
1924  || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1925  || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1926  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1927 
1928  srcY -= s->mspel * (1 + s->linesize);
1930  17 + s->mspel * 2, 17 + s->mspel * 2,
1931  src_x - s->mspel, src_y - s->mspel,
1932  s->h_edge_pos, v_edge_pos);
1933  srcY = s->edge_emu_buffer;
1934  s->vdsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
1935  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1936  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
1937  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1938  srcU = uvbuf;
1939  srcV = uvbuf + 16;
1940  /* if we deal with range reduction we need to scale source blocks */
1941  if (v->rangeredfrm) {
1942  int i, j;
1943  uint8_t *src, *src2;
1944 
1945  src = srcY;
1946  for (j = 0; j < 17 + s->mspel * 2; j++) {
1947  for (i = 0; i < 17 + s->mspel * 2; i++)
1948  src[i] = ((src[i] - 128) >> 1) + 128;
1949  src += s->linesize;
1950  }
1951  src = srcU;
1952  src2 = srcV;
1953  for (j = 0; j < 9; j++) {
1954  for (i = 0; i < 9; i++) {
1955  src[i] = ((src[i] - 128) >> 1) + 128;
1956  src2[i] = ((src2[i] - 128) >> 1) + 128;
1957  }
1958  src += s->uvlinesize;
1959  src2 += s->uvlinesize;
1960  }
1961  }
1962  srcY += s->mspel * (1 + s->linesize);
1963  }
1964 
1965  off = 0;
1966  off_uv = 0;
1967 
1968  if (s->mspel) {
1969  dxy = ((my & 3) << 2) | (mx & 3);
1970  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
1971  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
1972  srcY += s->linesize * 8;
1973  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
1974  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
1975  } else { // hpel mc
1976  dxy = (my & 2) | ((mx & 2) >> 1);
1977 
1978  if (!v->rnd)
1979  s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
1980  else
1981  s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
1982  }
1983 
1984  if (s->flags & CODEC_FLAG_GRAY) return;
1985  /* Chroma MC always uses qpel blilinear */
1986  uvmx = (uvmx & 3) << 1;
1987  uvmy = (uvmy & 3) << 1;
1988  if (!v->rnd) {
1989  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
1990  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
1991  } else {
1992  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
1993  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
1994  }
1995 }
1996 
1997 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
1998 {
1999  int n = bfrac;
2000 
2001 #if B_FRACTION_DEN==256
2002  if (inv)
2003  n -= 256;
2004  if (!qs)
2005  return 2 * ((value * n + 255) >> 9);
2006  return (value * n + 128) >> 8;
2007 #else
2008  if (inv)
2009  n -= B_FRACTION_DEN;
2010  if (!qs)
2011  return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2012  return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2013 #endif
2014 }
2015 
2016 /** Reconstruct motion vector for B-frame and do motion compensation
2017  */
2018 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2019  int direct, int mode)
2020 {
2021  if (v->use_ic) {
2022  v->mv_mode2 = v->mv_mode;
2024  }
2025  if (direct) {
2026  vc1_mc_1mv(v, 0);
2027  vc1_interp_mc(v);
2028  if (v->use_ic)
2029  v->mv_mode = v->mv_mode2;
2030  return;
2031  }
2032  if (mode == BMV_TYPE_INTERPOLATED) {
2033  vc1_mc_1mv(v, 0);
2034  vc1_interp_mc(v);
2035  if (v->use_ic)
2036  v->mv_mode = v->mv_mode2;
2037  return;
2038  }
2039 
2040  if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
2041  v->mv_mode = v->mv_mode2;
2042  vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2043  if (v->use_ic)
2044  v->mv_mode = v->mv_mode2;
2045 }
2046 
2047 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2048  int direct, int mvtype)
2049 {
2050  MpegEncContext *s = &v->s;
2051  int xy, wrap, off = 0;
2052  int16_t *A, *B, *C;
2053  int px, py;
2054  int sum;
2055  int r_x, r_y;
2056  const uint8_t *is_intra = v->mb_type[0];
2057 
2058  r_x = v->range_x;
2059  r_y = v->range_y;
2060  /* scale MV difference to be quad-pel */
2061  dmv_x[0] <<= 1 - s->quarter_sample;
2062  dmv_y[0] <<= 1 - s->quarter_sample;
2063  dmv_x[1] <<= 1 - s->quarter_sample;
2064  dmv_y[1] <<= 1 - s->quarter_sample;
2065 
2066  wrap = s->b8_stride;
2067  xy = s->block_index[0];
2068 
2069  if (s->mb_intra) {
2070  s->current_picture.motion_val[0][xy + v->blocks_off][0] =
2071  s->current_picture.motion_val[0][xy + v->blocks_off][1] =
2072  s->current_picture.motion_val[1][xy + v->blocks_off][0] =
2073  s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
2074  return;
2075  }
2076  if (!v->field_mode) {
2077  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2078  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2079  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2080  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2081 
2082  /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2083  s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2084  s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2085  s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2086  s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2087  }
2088  if (direct) {
2089  s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2090  s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2091  s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2092  s->current_picture.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2093  return;
2094  }
2095 
2096  if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2097  C = s->current_picture.motion_val[0][xy - 2];
2098  A = s->current_picture.motion_val[0][xy - wrap * 2];
2099  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2100  B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2101 
2102  if (!s->mb_x) C[0] = C[1] = 0;
2103  if (!s->first_slice_line) { // predictor A is not out of bounds
2104  if (s->mb_width == 1) {
2105  px = A[0];
2106  py = A[1];
2107  } else {
2108  px = mid_pred(A[0], B[0], C[0]);
2109  py = mid_pred(A[1], B[1], C[1]);
2110  }
2111  } else if (s->mb_x) { // predictor C is not out of bounds
2112  px = C[0];
2113  py = C[1];
2114  } else {
2115  px = py = 0;
2116  }
2117  /* Pullback MV as specified in 8.3.5.3.4 */
2118  {
2119  int qx, qy, X, Y;
2120  if (v->profile < PROFILE_ADVANCED) {
2121  qx = (s->mb_x << 5);
2122  qy = (s->mb_y << 5);
2123  X = (s->mb_width << 5) - 4;
2124  Y = (s->mb_height << 5) - 4;
2125  if (qx + px < -28) px = -28 - qx;
2126  if (qy + py < -28) py = -28 - qy;
2127  if (qx + px > X) px = X - qx;
2128  if (qy + py > Y) py = Y - qy;
2129  } else {
2130  qx = (s->mb_x << 6);
2131  qy = (s->mb_y << 6);
2132  X = (s->mb_width << 6) - 4;
2133  Y = (s->mb_height << 6) - 4;
2134  if (qx + px < -60) px = -60 - qx;
2135  if (qy + py < -60) py = -60 - qy;
2136  if (qx + px > X) px = X - qx;
2137  if (qy + py > Y) py = Y - qy;
2138  }
2139  }
2140  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2141  if (0 && !s->first_slice_line && s->mb_x) {
2142  if (is_intra[xy - wrap])
2143  sum = FFABS(px) + FFABS(py);
2144  else
2145  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2146  if (sum > 32) {
2147  if (get_bits1(&s->gb)) {
2148  px = A[0];
2149  py = A[1];
2150  } else {
2151  px = C[0];
2152  py = C[1];
2153  }
2154  } else {
2155  if (is_intra[xy - 2])
2156  sum = FFABS(px) + FFABS(py);
2157  else
2158  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2159  if (sum > 32) {
2160  if (get_bits1(&s->gb)) {
2161  px = A[0];
2162  py = A[1];
2163  } else {
2164  px = C[0];
2165  py = C[1];
2166  }
2167  }
2168  }
2169  }
2170  /* store MV using signed modulus of MV range defined in 4.11 */
2171  s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2172  s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2173  }
2174  if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2175  C = s->current_picture.motion_val[1][xy - 2];
2176  A = s->current_picture.motion_val[1][xy - wrap * 2];
2177  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2178  B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2179 
2180  if (!s->mb_x)
2181  C[0] = C[1] = 0;
2182  if (!s->first_slice_line) { // predictor A is not out of bounds
2183  if (s->mb_width == 1) {
2184  px = A[0];
2185  py = A[1];
2186  } else {
2187  px = mid_pred(A[0], B[0], C[0]);
2188  py = mid_pred(A[1], B[1], C[1]);
2189  }
2190  } else if (s->mb_x) { // predictor C is not out of bounds
2191  px = C[0];
2192  py = C[1];
2193  } else {
2194  px = py = 0;
2195  }
2196  /* Pullback MV as specified in 8.3.5.3.4 */
2197  {
2198  int qx, qy, X, Y;
2199  if (v->profile < PROFILE_ADVANCED) {
2200  qx = (s->mb_x << 5);
2201  qy = (s->mb_y << 5);
2202  X = (s->mb_width << 5) - 4;
2203  Y = (s->mb_height << 5) - 4;
2204  if (qx + px < -28) px = -28 - qx;
2205  if (qy + py < -28) py = -28 - qy;
2206  if (qx + px > X) px = X - qx;
2207  if (qy + py > Y) py = Y - qy;
2208  } else {
2209  qx = (s->mb_x << 6);
2210  qy = (s->mb_y << 6);
2211  X = (s->mb_width << 6) - 4;
2212  Y = (s->mb_height << 6) - 4;
2213  if (qx + px < -60) px = -60 - qx;
2214  if (qy + py < -60) py = -60 - qy;
2215  if (qx + px > X) px = X - qx;
2216  if (qy + py > Y) py = Y - qy;
2217  }
2218  }
2219  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2220  if (0 && !s->first_slice_line && s->mb_x) {
2221  if (is_intra[xy - wrap])
2222  sum = FFABS(px) + FFABS(py);
2223  else
2224  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2225  if (sum > 32) {
2226  if (get_bits1(&s->gb)) {
2227  px = A[0];
2228  py = A[1];
2229  } else {
2230  px = C[0];
2231  py = C[1];
2232  }
2233  } else {
2234  if (is_intra[xy - 2])
2235  sum = FFABS(px) + FFABS(py);
2236  else
2237  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2238  if (sum > 32) {
2239  if (get_bits1(&s->gb)) {
2240  px = A[0];
2241  py = A[1];
2242  } else {
2243  px = C[0];
2244  py = C[1];
2245  }
2246  }
2247  }
2248  }
2249  /* store MV using signed modulus of MV range defined in 4.11 */
2250 
2251  s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2252  s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2253  }
2254  s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2255  s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2256  s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2257  s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2258 }
2259 
2260 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2261 {
2262  int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2263  MpegEncContext *s = &v->s;
2264  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2265 
2266  if (v->bmvtype == BMV_TYPE_DIRECT) {
2267  int total_opp, k, f;
2268  if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2269  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2270  v->bfraction, 0, s->quarter_sample);
2271  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2272  v->bfraction, 0, s->quarter_sample);
2273  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2274  v->bfraction, 1, s->quarter_sample);
2275  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2276  v->bfraction, 1, s->quarter_sample);
2277 
2278  total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2279  + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2280  + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2281  + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2282  f = (total_opp > 2) ? 1 : 0;
2283  } else {
2284  s->mv[0][0][0] = s->mv[0][0][1] = 0;
2285  s->mv[1][0][0] = s->mv[1][0][1] = 0;
2286  f = 0;
2287  }
2288  v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2289  for (k = 0; k < 4; k++) {
2290  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2291  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2292  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2293  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2294  v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2295  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2296  }
2297  return;
2298  }
2299  if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2300  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2301  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2302  return;
2303  }
2304  if (dir) { // backward
2305  vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2306  if (n == 3 || mv1) {
2307  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2308  }
2309  } else { // forward
2310  vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2311  if (n == 3 || mv1) {
2312  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2313  }
2314  }
2315 }
2316 
2317 /** Get predicted DC value for I-frames only
2318  * prediction dir: left=0, top=1
2319  * @param s MpegEncContext
2320  * @param overlap flag indicating that overlap filtering is used
2321  * @param pq integer part of picture quantizer
2322  * @param[in] n block index in the current MB
2323  * @param dc_val_ptr Pointer to DC predictor
2324  * @param dir_ptr Prediction direction for use in AC prediction
2325  */
2326 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2327  int16_t **dc_val_ptr, int *dir_ptr)
2328 {
2329  int a, b, c, wrap, pred, scale;
2330  int16_t *dc_val;
2331  static const uint16_t dcpred[32] = {
2332  -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2333  114, 102, 93, 85, 79, 73, 68, 64,
2334  60, 57, 54, 51, 49, 47, 45, 43,
2335  41, 39, 38, 37, 35, 34, 33
2336  };
2337 
2338  /* find prediction - wmv3_dc_scale always used here in fact */
2339  if (n < 4) scale = s->y_dc_scale;
2340  else scale = s->c_dc_scale;
2341 
2342  wrap = s->block_wrap[n];
2343  dc_val = s->dc_val[0] + s->block_index[n];
2344 
2345  /* B A
2346  * C X
2347  */
2348  c = dc_val[ - 1];
2349  b = dc_val[ - 1 - wrap];
2350  a = dc_val[ - wrap];
2351 
2352  if (pq < 9 || !overlap) {
2353  /* Set outer values */
2354  if (s->first_slice_line && (n != 2 && n != 3))
2355  b = a = dcpred[scale];
2356  if (s->mb_x == 0 && (n != 1 && n != 3))
2357  b = c = dcpred[scale];
2358  } else {
2359  /* Set outer values */
2360  if (s->first_slice_line && (n != 2 && n != 3))
2361  b = a = 0;
2362  if (s->mb_x == 0 && (n != 1 && n != 3))
2363  b = c = 0;
2364  }
2365 
2366  if (abs(a - b) <= abs(b - c)) {
2367  pred = c;
2368  *dir_ptr = 1; // left
2369  } else {
2370  pred = a;
2371  *dir_ptr = 0; // top
2372  }
2373 
2374  /* update predictor */
2375  *dc_val_ptr = &dc_val[0];
2376  return pred;
2377 }
2378 
2379 
2380 /** Get predicted DC value
2381  * prediction dir: left=0, top=1
2382  * @param s MpegEncContext
2383  * @param overlap flag indicating that overlap filtering is used
2384  * @param pq integer part of picture quantizer
2385  * @param[in] n block index in the current MB
2386  * @param a_avail flag indicating top block availability
2387  * @param c_avail flag indicating left block availability
2388  * @param dc_val_ptr Pointer to DC predictor
2389  * @param dir_ptr Prediction direction for use in AC prediction
2390  */
2391 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2392  int a_avail, int c_avail,
2393  int16_t **dc_val_ptr, int *dir_ptr)
2394 {
2395  int a, b, c, wrap, pred;
2396  int16_t *dc_val;
2397  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2398  int q1, q2 = 0;
2399  int dqscale_index;
2400 
2401  wrap = s->block_wrap[n];
2402  dc_val = s->dc_val[0] + s->block_index[n];
2403 
2404  /* B A
2405  * C X
2406  */
2407  c = dc_val[ - 1];
2408  b = dc_val[ - 1 - wrap];
2409  a = dc_val[ - wrap];
2410  /* scale predictors if needed */
2411  q1 = s->current_picture.qscale_table[mb_pos];
2412  dqscale_index = s->y_dc_scale_table[q1] - 1;
2413  if (dqscale_index < 0)
2414  return 0;
2415  if (c_avail && (n != 1 && n != 3)) {
2416  q2 = s->current_picture.qscale_table[mb_pos - 1];
2417  if (q2 && q2 != q1)
2418  c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2419  }
2420  if (a_avail && (n != 2 && n != 3)) {
2421  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2422  if (q2 && q2 != q1)
2423  a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2424  }
2425  if (a_avail && c_avail && (n != 3)) {
2426  int off = mb_pos;
2427  if (n != 1)
2428  off--;
2429  if (n != 2)
2430  off -= s->mb_stride;
2431  q2 = s->current_picture.qscale_table[off];
2432  if (q2 && q2 != q1)
2433  b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2434  }
2435 
2436  if (a_avail && c_avail) {
2437  if (abs(a - b) <= abs(b - c)) {
2438  pred = c;
2439  *dir_ptr = 1; // left
2440  } else {
2441  pred = a;
2442  *dir_ptr = 0; // top
2443  }
2444  } else if (a_avail) {
2445  pred = a;
2446  *dir_ptr = 0; // top
2447  } else if (c_avail) {
2448  pred = c;
2449  *dir_ptr = 1; // left
2450  } else {
2451  pred = 0;
2452  *dir_ptr = 1; // left
2453  }
2454 
2455  /* update predictor */
2456  *dc_val_ptr = &dc_val[0];
2457  return pred;
2458 }
2459 
2460 /** @} */ // Block group
2461 
2462 /**
2463  * @name VC1 Macroblock-level functions in Simple/Main Profiles
2464  * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2465  * @{
2466  */
2467 
2468 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2469  uint8_t **coded_block_ptr)
2470 {
2471  int xy, wrap, pred, a, b, c;
2472 
2473  xy = s->block_index[n];
2474  wrap = s->b8_stride;
2475 
2476  /* B C
2477  * A X
2478  */
2479  a = s->coded_block[xy - 1 ];
2480  b = s->coded_block[xy - 1 - wrap];
2481  c = s->coded_block[xy - wrap];
2482 
2483  if (b == c) {
2484  pred = a;
2485  } else {
2486  pred = c;
2487  }
2488 
2489  /* store value */
2490  *coded_block_ptr = &s->coded_block[xy];
2491 
2492  return pred;
2493 }
2494 
2495 /**
2496  * Decode one AC coefficient
2497  * @param v The VC1 context
2498  * @param last Last coefficient
2499  * @param skip How much zero coefficients to skip
2500  * @param value Decoded AC coefficient value
2501  * @param codingset set of VLC to decode data
2502  * @see 8.1.3.4
2503  */
2504 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2505  int *value, int codingset)
2506 {
2507  GetBitContext *gb = &v->s.gb;
2508  int index, escape, run = 0, level = 0, lst = 0;
2509 
2510  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2511  if (index != ff_vc1_ac_sizes[codingset] - 1) {
2512  run = vc1_index_decode_table[codingset][index][0];
2513  level = vc1_index_decode_table[codingset][index][1];
2514  lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2515  if (get_bits1(gb))
2516  level = -level;
2517  } else {
2518  escape = decode210(gb);
2519  if (escape != 2) {
2520  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2521  run = vc1_index_decode_table[codingset][index][0];
2522  level = vc1_index_decode_table[codingset][index][1];
2523  lst = index >= vc1_last_decode_table[codingset];
2524  if (escape == 0) {
2525  if (lst)
2526  level += vc1_last_delta_level_table[codingset][run];
2527  else
2528  level += vc1_delta_level_table[codingset][run];
2529  } else {
2530  if (lst)
2531  run += vc1_last_delta_run_table[codingset][level] + 1;
2532  else
2533  run += vc1_delta_run_table[codingset][level] + 1;
2534  }
2535  if (get_bits1(gb))
2536  level = -level;
2537  } else {
2538  int sign;
2539  lst = get_bits1(gb);
2540  if (v->s.esc3_level_length == 0) {
2541  if (v->pq < 8 || v->dquantfrm) { // table 59
2542  v->s.esc3_level_length = get_bits(gb, 3);
2543  if (!v->s.esc3_level_length)
2544  v->s.esc3_level_length = get_bits(gb, 2) + 8;
2545  } else { // table 60
2546  v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2547  }
2548  v->s.esc3_run_length = 3 + get_bits(gb, 2);
2549  }
2550  run = get_bits(gb, v->s.esc3_run_length);
2551  sign = get_bits1(gb);
2552  level = get_bits(gb, v->s.esc3_level_length);
2553  if (sign)
2554  level = -level;
2555  }
2556  }
2557 
2558  *last = lst;
2559  *skip = run;
2560  *value = level;
2561 }
2562 
2563 /** Decode intra block in intra frames - should be faster than decode_intra_block
2564  * @param v VC1Context
2565  * @param block block to decode
2566  * @param[in] n subblock index
2567  * @param coded are AC coeffs present or not
2568  * @param codingset set of VLC to decode data
2569  */
2570 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2571  int coded, int codingset)
2572 {
2573  GetBitContext *gb = &v->s.gb;
2574  MpegEncContext *s = &v->s;
2575  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2576  int i;
2577  int16_t *dc_val;
2578  int16_t *ac_val, *ac_val2;
2579  int dcdiff;
2580 
2581  /* Get DC differential */
2582  if (n < 4) {
2584  } else {
2586  }
2587  if (dcdiff < 0) {
2588  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2589  return -1;
2590  }
2591  if (dcdiff) {
2592  if (dcdiff == 119 /* ESC index value */) {
2593  /* TODO: Optimize */
2594  if (v->pq == 1) dcdiff = get_bits(gb, 10);
2595  else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2596  else dcdiff = get_bits(gb, 8);
2597  } else {
2598  if (v->pq == 1)
2599  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2600  else if (v->pq == 2)
2601  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2602  }
2603  if (get_bits1(gb))
2604  dcdiff = -dcdiff;
2605  }
2606 
2607  /* Prediction */
2608  dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2609  *dc_val = dcdiff;
2610 
2611  /* Store the quantized DC coeff, used for prediction */
2612  if (n < 4) {
2613  block[0] = dcdiff * s->y_dc_scale;
2614  } else {
2615  block[0] = dcdiff * s->c_dc_scale;
2616  }
2617  /* Skip ? */
2618  if (!coded) {
2619  goto not_coded;
2620  }
2621 
2622  // AC Decoding
2623  i = 1;
2624 
2625  {
2626  int last = 0, skip, value;
2627  const uint8_t *zz_table;
2628  int scale;
2629  int k;
2630 
2631  scale = v->pq * 2 + v->halfpq;
2632 
2633  if (v->s.ac_pred) {
2634  if (!dc_pred_dir)
2635  zz_table = v->zz_8x8[2];
2636  else
2637  zz_table = v->zz_8x8[3];
2638  } else
2639  zz_table = v->zz_8x8[1];
2640 
2641  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2642  ac_val2 = ac_val;
2643  if (dc_pred_dir) // left
2644  ac_val -= 16;
2645  else // top
2646  ac_val -= 16 * s->block_wrap[n];
2647 
2648  while (!last) {
2649  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2650  i += skip;
2651  if (i > 63)
2652  break;
2653  block[zz_table[i++]] = value;
2654  }
2655 
2656  /* apply AC prediction if needed */
2657  if (s->ac_pred) {
2658  if (dc_pred_dir) { // left
2659  for (k = 1; k < 8; k++)
2660  block[k << v->left_blk_sh] += ac_val[k];
2661  } else { // top
2662  for (k = 1; k < 8; k++)
2663  block[k << v->top_blk_sh] += ac_val[k + 8];
2664  }
2665  }
2666  /* save AC coeffs for further prediction */
2667  for (k = 1; k < 8; k++) {
2668  ac_val2[k] = block[k << v->left_blk_sh];
2669  ac_val2[k + 8] = block[k << v->top_blk_sh];
2670  }
2671 
2672  /* scale AC coeffs */
2673  for (k = 1; k < 64; k++)
2674  if (block[k]) {
2675  block[k] *= scale;
2676  if (!v->pquantizer)
2677  block[k] += (block[k] < 0) ? -v->pq : v->pq;
2678  }
2679 
2680  if (s->ac_pred) i = 63;
2681  }
2682 
2683 not_coded:
2684  if (!coded) {
2685  int k, scale;
2686  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2687  ac_val2 = ac_val;
2688 
2689  i = 0;
2690  scale = v->pq * 2 + v->halfpq;
2691  memset(ac_val2, 0, 16 * 2);
2692  if (dc_pred_dir) { // left
2693  ac_val -= 16;
2694  if (s->ac_pred)
2695  memcpy(ac_val2, ac_val, 8 * 2);
2696  } else { // top
2697  ac_val -= 16 * s->block_wrap[n];
2698  if (s->ac_pred)
2699  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2700  }
2701 
2702  /* apply AC prediction if needed */
2703  if (s->ac_pred) {
2704  if (dc_pred_dir) { //left
2705  for (k = 1; k < 8; k++) {
2706  block[k << v->left_blk_sh] = ac_val[k] * scale;
2707  if (!v->pquantizer && block[k << v->left_blk_sh])
2708  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2709  }
2710  } else { // top
2711  for (k = 1; k < 8; k++) {
2712  block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2713  if (!v->pquantizer && block[k << v->top_blk_sh])
2714  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2715  }
2716  }
2717  i = 63;
2718  }
2719  }
2720  s->block_last_index[n] = i;
2721 
2722  return 0;
2723 }
2724 
2725 /** Decode intra block in intra frames - should be faster than decode_intra_block
2726  * @param v VC1Context
2727  * @param block block to decode
2728  * @param[in] n subblock number
2729  * @param coded are AC coeffs present or not
2730  * @param codingset set of VLC to decode data
2731  * @param mquant quantizer value for this macroblock
2732  */
2733 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2734  int coded, int codingset, int mquant)
2735 {
2736  GetBitContext *gb = &v->s.gb;
2737  MpegEncContext *s = &v->s;
2738  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2739  int i;
2740  int16_t *dc_val = NULL;
2741  int16_t *ac_val, *ac_val2;
2742  int dcdiff;
2743  int a_avail = v->a_avail, c_avail = v->c_avail;
2744  int use_pred = s->ac_pred;
2745  int scale;
2746  int q1, q2 = 0;
2747  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2748 
2749  /* Get DC differential */
2750  if (n < 4) {
2752  } else {
2754  }
2755  if (dcdiff < 0) {
2756  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2757  return -1;
2758  }
2759  if (dcdiff) {
2760  if (dcdiff == 119 /* ESC index value */) {
2761  /* TODO: Optimize */
2762  if (mquant == 1) dcdiff = get_bits(gb, 10);
2763  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2764  else dcdiff = get_bits(gb, 8);
2765  } else {
2766  if (mquant == 1)
2767  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2768  else if (mquant == 2)
2769  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2770  }
2771  if (get_bits1(gb))
2772  dcdiff = -dcdiff;
2773  }
2774 
2775  /* Prediction */
2776  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2777  *dc_val = dcdiff;
2778 
2779  /* Store the quantized DC coeff, used for prediction */
2780  if (n < 4) {
2781  block[0] = dcdiff * s->y_dc_scale;
2782  } else {
2783  block[0] = dcdiff * s->c_dc_scale;
2784  }
2785 
2786  //AC Decoding
2787  i = 1;
2788 
2789  /* check if AC is needed at all */
2790  if (!a_avail && !c_avail)
2791  use_pred = 0;
2792  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2793  ac_val2 = ac_val;
2794 
2795  scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2796 
2797  if (dc_pred_dir) // left
2798  ac_val -= 16;
2799  else // top
2800  ac_val -= 16 * s->block_wrap[n];
2801 
2802  q1 = s->current_picture.qscale_table[mb_pos];
2803  if ( dc_pred_dir && c_avail && mb_pos)
2804  q2 = s->current_picture.qscale_table[mb_pos - 1];
2805  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2806  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2807  if ( dc_pred_dir && n == 1)
2808  q2 = q1;
2809  if (!dc_pred_dir && n == 2)
2810  q2 = q1;
2811  if (n == 3)
2812  q2 = q1;
2813 
2814  if (coded) {
2815  int last = 0, skip, value;
2816  const uint8_t *zz_table;
2817  int k;
2818 
2819  if (v->s.ac_pred) {
2820  if (!use_pred && v->fcm == ILACE_FRAME) {
2821  zz_table = v->zzi_8x8;
2822  } else {
2823  if (!dc_pred_dir) // top
2824  zz_table = v->zz_8x8[2];
2825  else // left
2826  zz_table = v->zz_8x8[3];
2827  }
2828  } else {
2829  if (v->fcm != ILACE_FRAME)
2830  zz_table = v->zz_8x8[1];
2831  else
2832  zz_table = v->zzi_8x8;
2833  }
2834 
2835  while (!last) {
2836  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2837  i += skip;
2838  if (i > 63)
2839  break;
2840  block[zz_table[i++]] = value;
2841  }
2842 
2843  /* apply AC prediction if needed */
2844  if (use_pred) {
2845  /* scale predictors if needed*/
2846  if (q2 && q1 != q2) {
2847  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2848  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2849 
2850  if (q1 < 1)
2851  return AVERROR_INVALIDDATA;
2852  if (dc_pred_dir) { // left
2853  for (k = 1; k < 8; k++)
2854  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2855  } else { // top
2856  for (k = 1; k < 8; k++)
2857  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2858  }
2859  } else {
2860  if (dc_pred_dir) { //left
2861  for (k = 1; k < 8; k++)
2862  block[k << v->left_blk_sh] += ac_val[k];
2863  } else { //top
2864  for (k = 1; k < 8; k++)
2865  block[k << v->top_blk_sh] += ac_val[k + 8];
2866  }
2867  }
2868  }
2869  /* save AC coeffs for further prediction */
2870  for (k = 1; k < 8; k++) {
2871  ac_val2[k ] = block[k << v->left_blk_sh];
2872  ac_val2[k + 8] = block[k << v->top_blk_sh];
2873  }
2874 
2875  /* scale AC coeffs */
2876  for (k = 1; k < 64; k++)
2877  if (block[k]) {
2878  block[k] *= scale;
2879  if (!v->pquantizer)
2880  block[k] += (block[k] < 0) ? -mquant : mquant;
2881  }
2882 
2883  if (use_pred) i = 63;
2884  } else { // no AC coeffs
2885  int k;
2886 
2887  memset(ac_val2, 0, 16 * 2);
2888  if (dc_pred_dir) { // left
2889  if (use_pred) {
2890  memcpy(ac_val2, ac_val, 8 * 2);
2891  if (q2 && q1 != q2) {
2892  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2893  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2894  if (q1 < 1)
2895  return AVERROR_INVALIDDATA;
2896  for (k = 1; k < 8; k++)
2897  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2898  }
2899  }
2900  } else { // top
2901  if (use_pred) {
2902  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2903  if (q2 && q1 != q2) {
2904  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2905  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2906  if (q1 < 1)
2907  return AVERROR_INVALIDDATA;
2908  for (k = 1; k < 8; k++)
2909  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2910  }
2911  }
2912  }
2913 
2914  /* apply AC prediction if needed */
2915  if (use_pred) {
2916  if (dc_pred_dir) { // left
2917  for (k = 1; k < 8; k++) {
2918  block[k << v->left_blk_sh] = ac_val2[k] * scale;
2919  if (!v->pquantizer && block[k << v->left_blk_sh])
2920  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2921  }
2922  } else { // top
2923  for (k = 1; k < 8; k++) {
2924  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2925  if (!v->pquantizer && block[k << v->top_blk_sh])
2926  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2927  }
2928  }
2929  i = 63;
2930  }
2931  }
2932  s->block_last_index[n] = i;
2933 
2934  return 0;
2935 }
2936 
2937 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2938  * @param v VC1Context
2939  * @param block block to decode
2940  * @param[in] n subblock index
2941  * @param coded are AC coeffs present or not
2942  * @param mquant block quantizer
2943  * @param codingset set of VLC to decode data
2944  */
2945 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
2946  int coded, int mquant, int codingset)
2947 {
2948  GetBitContext *gb = &v->s.gb;
2949  MpegEncContext *s = &v->s;
2950  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2951  int i;
2952  int16_t *dc_val = NULL;
2953  int16_t *ac_val, *ac_val2;
2954  int dcdiff;
2955  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2956  int a_avail = v->a_avail, c_avail = v->c_avail;
2957  int use_pred = s->ac_pred;
2958  int scale;
2959  int q1, q2 = 0;
2960 
2961  s->dsp.clear_block(block);
2962 
2963  /* XXX: Guard against dumb values of mquant */
2964  mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
2965 
2966  /* Set DC scale - y and c use the same */
2967  s->y_dc_scale = s->y_dc_scale_table[mquant];
2968  s->c_dc_scale = s->c_dc_scale_table[mquant];
2969 
2970  /* Get DC differential */
2971  if (n < 4) {
2973  } else {
2975  }
2976  if (dcdiff < 0) {
2977  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2978  return -1;
2979  }
2980  if (dcdiff) {
2981  if (dcdiff == 119 /* ESC index value */) {
2982  /* TODO: Optimize */
2983  if (mquant == 1) dcdiff = get_bits(gb, 10);
2984  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2985  else dcdiff = get_bits(gb, 8);
2986  } else {
2987  if (mquant == 1)
2988  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2989  else if (mquant == 2)
2990  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2991  }
2992  if (get_bits1(gb))
2993  dcdiff = -dcdiff;
2994  }
2995 
2996  /* Prediction */
2997  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
2998  *dc_val = dcdiff;
2999 
3000  /* Store the quantized DC coeff, used for prediction */
3001 
3002  if (n < 4) {
3003  block[0] = dcdiff * s->y_dc_scale;
3004  } else {
3005  block[0] = dcdiff * s->c_dc_scale;
3006  }
3007 
3008  //AC Decoding
3009  i = 1;
3010 
3011  /* check if AC is needed at all and adjust direction if needed */
3012  if (!a_avail) dc_pred_dir = 1;
3013  if (!c_avail) dc_pred_dir = 0;
3014  if (!a_avail && !c_avail) use_pred = 0;
3015  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3016  ac_val2 = ac_val;
3017 
3018  scale = mquant * 2 + v->halfpq;
3019 
3020  if (dc_pred_dir) //left
3021  ac_val -= 16;
3022  else //top
3023  ac_val -= 16 * s->block_wrap[n];
3024 
3025  q1 = s->current_picture.qscale_table[mb_pos];
3026  if (dc_pred_dir && c_avail && mb_pos)
3027  q2 = s->current_picture.qscale_table[mb_pos - 1];
3028  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3029  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3030  if ( dc_pred_dir && n == 1)
3031  q2 = q1;
3032  if (!dc_pred_dir && n == 2)
3033  q2 = q1;
3034  if (n == 3) q2 = q1;
3035 
3036  if (coded) {
3037  int last = 0, skip, value;
3038  int k;
3039 
3040  while (!last) {
3041  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3042  i += skip;
3043  if (i > 63)
3044  break;
3045  if (v->fcm == PROGRESSIVE)
3046  block[v->zz_8x8[0][i++]] = value;
3047  else {
3048  if (use_pred && (v->fcm == ILACE_FRAME)) {
3049  if (!dc_pred_dir) // top
3050  block[v->zz_8x8[2][i++]] = value;
3051  else // left
3052  block[v->zz_8x8[3][i++]] = value;
3053  } else {
3054  block[v->zzi_8x8[i++]] = value;
3055  }
3056  }
3057  }
3058 
3059  /* apply AC prediction if needed */
3060  if (use_pred) {
3061  /* scale predictors if needed*/
3062  if (q2 && q1 != q2) {
3063  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3064  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3065 
3066  if (q1 < 1)
3067  return AVERROR_INVALIDDATA;
3068  if (dc_pred_dir) { // left
3069  for (k = 1; k < 8; k++)
3070  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3071  } else { //top
3072  for (k = 1; k < 8; k++)
3073  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3074  }
3075  } else {
3076  if (dc_pred_dir) { // left
3077  for (k = 1; k < 8; k++)
3078  block[k << v->left_blk_sh] += ac_val[k];
3079  } else { // top
3080  for (k = 1; k < 8; k++)
3081  block[k << v->top_blk_sh] += ac_val[k + 8];
3082  }
3083  }
3084  }
3085  /* save AC coeffs for further prediction */
3086  for (k = 1; k < 8; k++) {
3087  ac_val2[k ] = block[k << v->left_blk_sh];
3088  ac_val2[k + 8] = block[k << v->top_blk_sh];
3089  }
3090 
3091  /* scale AC coeffs */
3092  for (k = 1; k < 64; k++)
3093  if (block[k]) {
3094  block[k] *= scale;
3095  if (!v->pquantizer)
3096  block[k] += (block[k] < 0) ? -mquant : mquant;
3097  }
3098 
3099  if (use_pred) i = 63;
3100  } else { // no AC coeffs
3101  int k;
3102 
3103  memset(ac_val2, 0, 16 * 2);
3104  if (dc_pred_dir) { // left
3105  if (use_pred) {
3106  memcpy(ac_val2, ac_val, 8 * 2);
3107  if (q2 && q1 != q2) {
3108  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3109  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3110  if (q1 < 1)
3111  return AVERROR_INVALIDDATA;
3112  for (k = 1; k < 8; k++)
3113  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3114  }
3115  }
3116  } else { // top
3117  if (use_pred) {
3118  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3119  if (q2 && q1 != q2) {
3120  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3121  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3122  if (q1 < 1)
3123  return AVERROR_INVALIDDATA;
3124  for (k = 1; k < 8; k++)
3125  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3126  }
3127  }
3128  }
3129 
3130  /* apply AC prediction if needed */
3131  if (use_pred) {
3132  if (dc_pred_dir) { // left
3133  for (k = 1; k < 8; k++) {
3134  block[k << v->left_blk_sh] = ac_val2[k] * scale;
3135  if (!v->pquantizer && block[k << v->left_blk_sh])
3136  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3137  }
3138  } else { // top
3139  for (k = 1; k < 8; k++) {
3140  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3141  if (!v->pquantizer && block[k << v->top_blk_sh])
3142  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3143  }
3144  }
3145  i = 63;
3146  }
3147  }
3148  s->block_last_index[n] = i;
3149 
3150  return 0;
3151 }
3152 
3153 /** Decode P block
3154  */
3155 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3156  int mquant, int ttmb, int first_block,
3157  uint8_t *dst, int linesize, int skip_block,
3158  int *ttmb_out)
3159 {
3160  MpegEncContext *s = &v->s;
3161  GetBitContext *gb = &s->gb;
3162  int i, j;
3163  int subblkpat = 0;
3164  int scale, off, idx, last, skip, value;
3165  int ttblk = ttmb & 7;
3166  int pat = 0;
3167 
3168  s->dsp.clear_block(block);
3169 
3170  if (ttmb == -1) {
3172  }
3173  if (ttblk == TT_4X4) {
3174  subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3175  }
3176  if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3177  && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3178  || (!v->res_rtm_flag && !first_block))) {
3179  subblkpat = decode012(gb);
3180  if (subblkpat)
3181  subblkpat ^= 3; // swap decoded pattern bits
3182  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3183  ttblk = TT_8X4;
3184  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3185  ttblk = TT_4X8;
3186  }
3187  scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3188 
3189  // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3190  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3191  subblkpat = 2 - (ttblk == TT_8X4_TOP);
3192  ttblk = TT_8X4;
3193  }
3194  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3195  subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3196  ttblk = TT_4X8;
3197  }
3198  switch (ttblk) {
3199  case TT_8X8:
3200  pat = 0xF;
3201  i = 0;
3202  last = 0;
3203  while (!last) {
3204  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3205  i += skip;
3206  if (i > 63)
3207  break;
3208  if (!v->fcm)
3209  idx = v->zz_8x8[0][i++];
3210  else
3211  idx = v->zzi_8x8[i++];
3212  block[idx] = value * scale;
3213  if (!v->pquantizer)
3214  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3215  }
3216  if (!skip_block) {
3217  if (i == 1)
3218  v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3219  else {
3220  v->vc1dsp.vc1_inv_trans_8x8(block);
3221  s->dsp.add_pixels_clamped(block, dst, linesize);
3222  }
3223  }
3224  break;
3225  case TT_4X4:
3226  pat = ~subblkpat & 0xF;
3227  for (j = 0; j < 4; j++) {
3228  last = subblkpat & (1 << (3 - j));
3229  i = 0;
3230  off = (j & 1) * 4 + (j & 2) * 16;
3231  while (!last) {
3232  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3233  i += skip;
3234  if (i > 15)
3235  break;
3236  if (!v->fcm)
3238  else
3239  idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3240  block[idx + off] = value * scale;
3241  if (!v->pquantizer)
3242  block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3243  }
3244  if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3245  if (i == 1)
3246  v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3247  else
3248  v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3249  }
3250  }
3251  break;
3252  case TT_8X4:
3253  pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3254  for (j = 0; j < 2; j++) {
3255  last = subblkpat & (1 << (1 - j));
3256  i = 0;
3257  off = j * 32;
3258  while (!last) {
3259  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3260  i += skip;
3261  if (i > 31)
3262  break;
3263  if (!v->fcm)
3264  idx = v->zz_8x4[i++] + off;
3265  else
3266  idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3267  block[idx] = value * scale;
3268  if (!v->pquantizer)
3269  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3270  }
3271  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3272  if (i == 1)
3273  v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3274  else
3275  v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3276  }
3277  }
3278  break;
3279  case TT_4X8:
3280  pat = ~(subblkpat * 5) & 0xF;
3281  for (j = 0; j < 2; j++) {
3282  last = subblkpat & (1 << (1 - j));
3283  i = 0;
3284  off = j * 4;
3285  while (!last) {
3286  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3287  i += skip;
3288  if (i > 31)
3289  break;
3290  if (!v->fcm)
3291  idx = v->zz_4x8[i++] + off;
3292  else
3293  idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3294  block[idx] = value * scale;
3295  if (!v->pquantizer)
3296  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3297  }
3298  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3299  if (i == 1)
3300  v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3301  else
3302  v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3303  }
3304  }
3305  break;
3306  }
3307  if (ttmb_out)
3308  *ttmb_out |= ttblk << (n * 4);
3309  return pat;
3310 }
3311 
3312 /** @} */ // Macroblock group
3313 
3314 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3315 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3316 
3318 {
3319  MpegEncContext *s = &v->s;
3320  int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3321  block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3322  mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3323  block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3324  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3325  uint8_t *dst;
3326 
3327  if (block_num > 3) {
3328  dst = s->dest[block_num - 3];
3329  } else {
3330  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3331  }
3332  if (s->mb_y != s->end_mb_y || block_num < 2) {
3333  int16_t (*mv)[2];
3334  int mv_stride;
3335 
3336  if (block_num > 3) {
3337  bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3338  bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3339  mv = &v->luma_mv[s->mb_x - s->mb_stride];
3340  mv_stride = s->mb_stride;
3341  } else {
3342  bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3343  : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3344  bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3345  : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3346  mv_stride = s->b8_stride;
3347  mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3348  }
3349 
3350  if (bottom_is_intra & 1 || block_is_intra & 1 ||
3351  mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3352  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3353  } else {
3354  idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3355  if (idx == 3) {
3356  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3357  } else if (idx) {
3358  if (idx == 1)
3359  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3360  else
3361  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3362  }
3363  }
3364  }
3365 
3366  dst -= 4 * linesize;
3367  ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3368  if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3369  idx = (block_cbp | (block_cbp >> 2)) & 3;
3370  if (idx == 3) {
3371  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3372  } else if (idx) {
3373  if (idx == 1)
3374  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3375  else
3376  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3377  }
3378  }
3379 }
3380 
3382 {
3383  MpegEncContext *s = &v->s;
3384  int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3385  block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3386  mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3387  block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3388  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3389  uint8_t *dst;
3390 
3391  if (block_num > 3) {
3392  dst = s->dest[block_num - 3] - 8 * linesize;
3393  } else {
3394  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3395  }
3396 
3397  if (s->mb_x != s->mb_width || !(block_num & 5)) {
3398  int16_t (*mv)[2];
3399 
3400  if (block_num > 3) {
3401  right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3402  right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3403  mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3404  } else {
3405  right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3406  : (mb_cbp >> ((block_num + 1) * 4));
3407  right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3408  : (mb_is_intra >> ((block_num + 1) * 4));
3409  mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3410  }
3411  if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3412  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3413  } else {
3414  idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3415  if (idx == 5) {
3416  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3417  } else if (idx) {
3418  if (idx == 1)
3419  v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3420  else
3421  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3422  }
3423  }
3424  }
3425 
3426  dst -= 4;
3427  ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3428  if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3429  idx = (block_cbp | (block_cbp >> 1)) & 5;
3430  if (idx == 5) {
3431  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3432  } else if (idx) {
3433  if (idx == 1)
3434  v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3435  else
3436  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3437  }
3438  }
3439 }
3440 
3442 {
3443  MpegEncContext *s = &v->s;
3444  int i;
3445 
3446  for (i = 0; i < 6; i++) {
3448  }
3449 
3450  /* V always precedes H, therefore we run H one MB before V;
3451  * at the end of a row, we catch up to complete the row */
3452  if (s->mb_x) {
3453  for (i = 0; i < 6; i++) {
3455  }
3456  if (s->mb_x == s->mb_width - 1) {
3457  s->mb_x++;
3459  for (i = 0; i < 6; i++) {
3461  }
3462  }
3463  }
3464 }
3465 
3466 /** Decode one P-frame MB
3467  */
3469 {
3470  MpegEncContext *s = &v->s;
3471  GetBitContext *gb = &s->gb;
3472  int i, j;
3473  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3474  int cbp; /* cbp decoding stuff */
3475  int mqdiff, mquant; /* MB quantization */
3476  int ttmb = v->ttfrm; /* MB Transform type */
3477 
3478  int mb_has_coeffs = 1; /* last_flag */
3479  int dmv_x, dmv_y; /* Differential MV components */
3480  int index, index1; /* LUT indexes */
3481  int val, sign; /* temp values */
3482  int first_block = 1;
3483  int dst_idx, off;
3484  int skipped, fourmv;
3485  int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3486 
3487  mquant = v->pq; /* lossy initialization */
3488 
3489  if (v->mv_type_is_raw)
3490  fourmv = get_bits1(gb);
3491  else
3492  fourmv = v->mv_type_mb_plane[mb_pos];
3493  if (v->skip_is_raw)
3494  skipped = get_bits1(gb);
3495  else
3496  skipped = v->s.mbskip_table[mb_pos];
3497 
3498  if (!fourmv) { /* 1MV mode */
3499  if (!skipped) {
3500  GET_MVDATA(dmv_x, dmv_y);
3501 
3502  if (s->mb_intra) {
3503  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3504  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3505  }
3507  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3508 
3509  /* FIXME Set DC val for inter block ? */
3510  if (s->mb_intra && !mb_has_coeffs) {
3511  GET_MQUANT();
3512  s->ac_pred = get_bits1(gb);
3513  cbp = 0;
3514  } else if (mb_has_coeffs) {
3515  if (s->mb_intra)
3516  s->ac_pred = get_bits1(gb);
3517  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3518  GET_MQUANT();
3519  } else {
3520  mquant = v->pq;
3521  cbp = 0;
3522  }
3523  s->current_picture.qscale_table[mb_pos] = mquant;
3524 
3525  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3526  ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3527  VC1_TTMB_VLC_BITS, 2);
3528  if (!s->mb_intra) vc1_mc_1mv(v, 0);
3529  dst_idx = 0;
3530  for (i = 0; i < 6; i++) {
3531  s->dc_val[0][s->block_index[i]] = 0;
3532  dst_idx += i >> 2;
3533  val = ((cbp >> (5 - i)) & 1);
3534  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3535  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3536  if (s->mb_intra) {
3537  /* check if prediction blocks A and C are available */
3538  v->a_avail = v->c_avail = 0;
3539  if (i == 2 || i == 3 || !s->first_slice_line)
3540  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3541  if (i == 1 || i == 3 || s->mb_x)
3542  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3543 
3544  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3545  (i & 4) ? v->codingset2 : v->codingset);
3546  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3547  continue;
3548  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3549  if (v->rangeredfrm)
3550  for (j = 0; j < 64; j++)
3551  s->block[i][j] <<= 1;
3552  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3553  if (v->pq >= 9 && v->overlap) {
3554  if (v->c_avail)
3555  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3556  if (v->a_avail)
3557  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3558  }
3559  block_cbp |= 0xF << (i << 2);
3560  block_intra |= 1 << i;
3561  } else if (val) {
3562  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3563  s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3564  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3565  block_cbp |= pat << (i << 2);
3566  if (!v->ttmbf && ttmb < 8)
3567  ttmb = -1;
3568  first_block = 0;
3569  }
3570  }
3571  } else { // skipped
3572  s->mb_intra = 0;
3573  for (i = 0; i < 6; i++) {
3574  v->mb_type[0][s->block_index[i]] = 0;
3575  s->dc_val[0][s->block_index[i]] = 0;
3576  }
3577  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3578  s->current_picture.qscale_table[mb_pos] = 0;
3579  vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3580  vc1_mc_1mv(v, 0);
3581  }
3582  } else { // 4MV mode
3583  if (!skipped /* unskipped MB */) {
3584  int intra_count = 0, coded_inter = 0;
3585  int is_intra[6], is_coded[6];
3586  /* Get CBPCY */
3587  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3588  for (i = 0; i < 6; i++) {
3589  val = ((cbp >> (5 - i)) & 1);
3590  s->dc_val[0][s->block_index[i]] = 0;
3591  s->mb_intra = 0;
3592  if (i < 4) {
3593  dmv_x = dmv_y = 0;
3594  s->mb_intra = 0;
3595  mb_has_coeffs = 0;
3596  if (val) {
3597  GET_MVDATA(dmv_x, dmv_y);
3598  }
3599  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3600  if (!s->mb_intra)
3601  vc1_mc_4mv_luma(v, i, 0, 0);
3602  intra_count += s->mb_intra;
3603  is_intra[i] = s->mb_intra;
3604  is_coded[i] = mb_has_coeffs;
3605  }
3606  if (i & 4) {
3607  is_intra[i] = (intra_count >= 3);
3608  is_coded[i] = val;
3609  }
3610  if (i == 4)
3611  vc1_mc_4mv_chroma(v, 0);
3612  v->mb_type[0][s->block_index[i]] = is_intra[i];
3613  if (!coded_inter)
3614  coded_inter = !is_intra[i] & is_coded[i];
3615  }
3616  // if there are no coded blocks then don't do anything more
3617  dst_idx = 0;
3618  if (!intra_count && !coded_inter)
3619  goto end;
3620  GET_MQUANT();
3621  s->current_picture.qscale_table[mb_pos] = mquant;
3622  /* test if block is intra and has pred */
3623  {
3624  int intrapred = 0;
3625  for (i = 0; i < 6; i++)
3626  if (is_intra[i]) {
3627  if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3628  || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3629  intrapred = 1;
3630  break;
3631  }
3632  }
3633  if (intrapred)
3634  s->ac_pred = get_bits1(gb);
3635  else
3636  s->ac_pred = 0;
3637  }
3638  if (!v->ttmbf && coded_inter)
3640  for (i = 0; i < 6; i++) {
3641  dst_idx += i >> 2;
3642  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3643  s->mb_intra = is_intra[i];
3644  if (is_intra[i]) {
3645  /* check if prediction blocks A and C are available */
3646  v->a_avail = v->c_avail = 0;
3647  if (i == 2 || i == 3 || !s->first_slice_line)
3648  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3649  if (i == 1 || i == 3 || s->mb_x)
3650  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3651 
3652  vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3653  (i & 4) ? v->codingset2 : v->codingset);
3654  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3655  continue;
3656  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3657  if (v->rangeredfrm)
3658  for (j = 0; j < 64; j++)
3659  s->block[i][j] <<= 1;
3660  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3661  (i & 4) ? s->uvlinesize : s->linesize);
3662  if (v->pq >= 9 && v->overlap) {
3663  if (v->c_avail)
3664  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3665  if (v->a_avail)
3666  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3667  }
3668  block_cbp |= 0xF << (i << 2);
3669  block_intra |= 1 << i;
3670  } else if (is_coded[i]) {
3671  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3672  first_block, s->dest[dst_idx] + off,
3673  (i & 4) ? s->uvlinesize : s->linesize,
3674  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3675  &block_tt);
3676  block_cbp |= pat << (i << 2);
3677  if (!v->ttmbf && ttmb < 8)
3678  ttmb = -1;
3679  first_block = 0;
3680  }
3681  }
3682  } else { // skipped MB
3683  s->mb_intra = 0;
3684  s->current_picture.qscale_table[mb_pos] = 0;
3685  for (i = 0; i < 6; i++) {
3686  v->mb_type[0][s->block_index[i]] = 0;
3687  s->dc_val[0][s->block_index[i]] = 0;
3688  }
3689  for (i = 0; i < 4; i++) {
3690  vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3691  vc1_mc_4mv_luma(v, i, 0, 0);
3692  }
3693  vc1_mc_4mv_chroma(v, 0);
3694  s->current_picture.qscale_table[mb_pos] = 0;
3695  }
3696  }
3697 end:
3698  v->cbp[s->mb_x] = block_cbp;
3699  v->ttblk[s->mb_x] = block_tt;
3700  v->is_intra[s->mb_x] = block_intra;
3701 
3702  return 0;
3703 }
3704 
3705 /* Decode one macroblock in an interlaced frame p picture */
3706 
3708 {
3709  MpegEncContext *s = &v->s;
3710  GetBitContext *gb = &s->gb;
3711  int i;
3712  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3713  int cbp = 0; /* cbp decoding stuff */
3714  int mqdiff, mquant; /* MB quantization */
3715  int ttmb = v->ttfrm; /* MB Transform type */
3716 
3717  int mb_has_coeffs = 1; /* last_flag */
3718  int dmv_x, dmv_y; /* Differential MV components */
3719  int val; /* temp value */
3720  int first_block = 1;
3721  int dst_idx, off;
3722  int skipped, fourmv = 0, twomv = 0;
3723  int block_cbp = 0, pat, block_tt = 0;
3724  int idx_mbmode = 0, mvbp;
3725  int stride_y, fieldtx;
3726 
3727  mquant = v->pq; /* Lossy initialization */
3728 
3729  if (v->skip_is_raw)
3730  skipped = get_bits1(gb);
3731  else
3732  skipped = v->s.mbskip_table[mb_pos];
3733  if (!skipped) {
3734  if (v->fourmvswitch)
3735  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3736  else
3737  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3738  switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3739  /* store the motion vector type in a flag (useful later) */
3740  case MV_PMODE_INTFR_4MV:
3741  fourmv = 1;
3742  v->blk_mv_type[s->block_index[0]] = 0;
3743  v->blk_mv_type[s->block_index[1]] = 0;
3744  v->blk_mv_type[s->block_index[2]] = 0;
3745  v->blk_mv_type[s->block_index[3]] = 0;
3746  break;
3748  fourmv = 1;
3749  v->blk_mv_type[s->block_index[0]] = 1;
3750  v->blk_mv_type[s->block_index[1]] = 1;
3751  v->blk_mv_type[s->block_index[2]] = 1;
3752  v->blk_mv_type[s->block_index[3]] = 1;
3753  break;
3755  twomv = 1;
3756  v->blk_mv_type[s->block_index[0]] = 1;
3757  v->blk_mv_type[s->block_index[1]] = 1;
3758  v->blk_mv_type[s->block_index[2]] = 1;
3759  v->blk_mv_type[s->block_index[3]] = 1;
3760  break;
3761  case MV_PMODE_INTFR_1MV:
3762  v->blk_mv_type[s->block_index[0]] = 0;
3763  v->blk_mv_type[s->block_index[1]] = 0;
3764  v->blk_mv_type[s->block_index[2]] = 0;
3765  v->blk_mv_type[s->block_index[3]] = 0;
3766  break;
3767  }
3768  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3769  for (i = 0; i < 4; i++) {
3770  s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
3771  s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
3772  }
3773  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3774  s->mb_intra = v->is_intra[s->mb_x] = 1;
3775  for (i = 0; i < 6; i++)
3776  v->mb_type[0][s->block_index[i]] = 1;
3777  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3778  mb_has_coeffs = get_bits1(gb);
3779  if (mb_has_coeffs)
3780  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3781  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3782  GET_MQUANT();
3783  s->current_picture.qscale_table[mb_pos] = mquant;
3784  /* Set DC scale - y and c use the same (not sure if necessary here) */
3785  s->y_dc_scale = s->y_dc_scale_table[mquant];
3786  s->c_dc_scale = s->c_dc_scale_table[mquant];
3787  dst_idx = 0;
3788  for (i = 0; i < 6; i++) {
3789  s->dc_val[0][s->block_index[i]] = 0;
3790  dst_idx += i >> 2;
3791  val = ((cbp >> (5 - i)) & 1);
3792  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3793  v->a_avail = v->c_avail = 0;
3794  if (i == 2 || i == 3 || !s->first_slice_line)
3795  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3796  if (i == 1 || i == 3 || s->mb_x)
3797  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3798 
3799  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3800  (i & 4) ? v->codingset2 : v->codingset);
3801  if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3802  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3803  if (i < 4) {
3804  stride_y = s->linesize << fieldtx;
3805  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3806  } else {
3807  stride_y = s->uvlinesize;
3808  off = 0;
3809  }
3810  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3811  //TODO: loop filter
3812  }
3813 
3814  } else { // inter MB
3815  mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3816  if (mb_has_coeffs)
3817  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3818  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3820  } else {
3821  if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3822  || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3824  }
3825  }
3826  s->mb_intra = v->is_intra[s->mb_x] = 0;
3827  for (i = 0; i < 6; i++)
3828  v->mb_type[0][s->block_index[i]] = 0;
3829  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3830  /* for all motion vector read MVDATA and motion compensate each block */
3831  dst_idx = 0;
3832  if (fourmv) {
3833  mvbp = v->fourmvbp;
3834  for (i = 0; i < 6; i++) {
3835  if (i < 4) {
3836  dmv_x = dmv_y = 0;
3837  val = ((mvbp >> (3 - i)) & 1);
3838  if (val) {
3839  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3840  }
3841  vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
3842  vc1_mc_4mv_luma(v, i, 0, 0);
3843  } else if (i == 4) {
3844  vc1_mc_4mv_chroma4(v);
3845  }
3846  }
3847  } else if (twomv) {
3848  mvbp = v->twomvbp;
3849  dmv_x = dmv_y = 0;
3850  if (mvbp & 2) {
3851  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3852  }
3853  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3854  vc1_mc_4mv_luma(v, 0, 0, 0);
3855  vc1_mc_4mv_luma(v, 1, 0, 0);
3856  dmv_x = dmv_y = 0;
3857  if (mvbp & 1) {
3858  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3859  }
3860  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3861  vc1_mc_4mv_luma(v, 2, 0, 0);
3862  vc1_mc_4mv_luma(v, 3, 0, 0);
3863  vc1_mc_4mv_chroma4(v);
3864  } else {
3865  mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3866  dmv_x = dmv_y = 0;
3867  if (mvbp) {
3868  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3869  }
3870  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3871  vc1_mc_1mv(v, 0);
3872  }
3873  if (cbp)
3874  GET_MQUANT(); // p. 227
3875  s->current_picture.qscale_table[mb_pos] = mquant;
3876  if (!v->ttmbf && cbp)
3878  for (i = 0; i < 6; i++) {
3879  s->dc_val[0][s->block_index[i]] = 0;
3880  dst_idx += i >> 2;
3881  val = ((cbp >> (5 - i)) & 1);
3882  if (!fieldtx)
3883  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3884  else
3885  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3886  if (val) {
3887  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3888  first_block, s->dest[dst_idx] + off,
3889  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3890  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3891  block_cbp |= pat << (i << 2);
3892  if (!v->ttmbf && ttmb < 8)
3893  ttmb = -1;
3894  first_block = 0;
3895  }
3896  }
3897  }
3898  } else { // skipped
3899  s->mb_intra = v->is_intra[s->mb_x] = 0;
3900  for (i = 0; i < 6; i++) {
3901  v->mb_type[0][s->block_index[i]] = 0;
3902  s->dc_val[0][s->block_index[i]] = 0;
3903  }
3904  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3905  s->current_picture.qscale_table[mb_pos] = 0;
3906  v->blk_mv_type[s->block_index[0]] = 0;
3907  v->blk_mv_type[s->block_index[1]] = 0;
3908  v->blk_mv_type[s->block_index[2]] = 0;
3909  v->blk_mv_type[s->block_index[3]] = 0;
3910  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3911  vc1_mc_1mv(v, 0);
3912  }
3913  if (s->mb_x == s->mb_width - 1)
3914  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3915  return 0;
3916 }
3917 
3919 {
3920  MpegEncContext *s = &v->s;
3921  GetBitContext *gb = &s->gb;
3922  int i;
3923  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3924  int cbp = 0; /* cbp decoding stuff */
3925  int mqdiff, mquant; /* MB quantization */
3926  int ttmb = v->ttfrm; /* MB Transform type */
3927 
3928  int mb_has_coeffs = 1; /* last_flag */
3929  int dmv_x, dmv_y; /* Differential MV components */
3930  int val; /* temp values */
3931  int first_block = 1;
3932  int dst_idx, off;
3933  int pred_flag = 0;
3934  int block_cbp = 0, pat, block_tt = 0;
3935  int idx_mbmode = 0;
3936 
3937  mquant = v->pq; /* Lossy initialization */
3938 
3939  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
3940  if (idx_mbmode <= 1) { // intra MB
3941  s->mb_intra = v->is_intra[s->mb_x] = 1;
3942  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
3943  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
3944  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
3945  GET_MQUANT();
3946  s->current_picture.qscale_table[mb_pos] = mquant;
3947  /* Set DC scale - y and c use the same (not sure if necessary here) */
3948  s->y_dc_scale = s->y_dc_scale_table[mquant];
3949  s->c_dc_scale = s->c_dc_scale_table[mquant];
3950  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3951  mb_has_coeffs = idx_mbmode & 1;
3952  if (mb_has_coeffs)
3953  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
3954  dst_idx = 0;
3955  for (i = 0; i < 6; i++) {
3956  s->dc_val[0][s->block_index[i]] = 0;
3957  v->mb_type[0][s->block_index[i]] = 1;
3958  dst_idx += i >> 2;
3959  val = ((cbp >> (5 - i)) & 1);
3960  v->a_avail = v->c_avail = 0;
3961  if (i == 2 || i == 3 || !s->first_slice_line)
3962  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3963  if (i == 1 || i == 3 || s->mb_x)
3964  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3965 
3966  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3967  (i & 4) ? v->codingset2 : v->codingset);
3968  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3969  continue;
3970  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3971  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3972  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
3973  // TODO: loop filter
3974  }
3975  } else {
3976  s->mb_intra = v->is_intra[s->mb_x] = 0;
3977  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
3978  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
3979  if (idx_mbmode <= 5) { // 1-MV
3980  dmv_x = dmv_y = pred_flag = 0;
3981  if (idx_mbmode & 1) {
3982  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
3983  }
3984  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
3985  vc1_mc_1mv(v, 0);
3986  mb_has_coeffs = !(idx_mbmode & 2);
3987  } else { // 4-MV
3989  for (i = 0; i < 6; i++) {
3990  if (i < 4) {
3991  dmv_x = dmv_y = pred_flag = 0;
3992  val = ((v->fourmvbp >> (3 - i)) & 1);
3993  if (val) {
3994  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
3995  }
3996  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
3997  vc1_mc_4mv_luma(v, i, 0, 0);
3998  } else if (i == 4)
3999  vc1_mc_4mv_chroma(v, 0);
4000  }
4001  mb_has_coeffs = idx_mbmode & 1;
4002  }
4003  if (mb_has_coeffs)
4004  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4005  if (cbp) {
4006  GET_MQUANT();
4007  }
4008  s->current_picture.qscale_table[mb_pos] = mquant;
4009  if (!v->ttmbf && cbp) {
4011  }
4012  dst_idx = 0;
4013  for (i = 0; i < 6; i++) {
4014  s->dc_val[0][s->block_index[i]] = 0;
4015  dst_idx += i >> 2;
4016  val = ((cbp >> (5 - i)) & 1);
4017  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4018  if (val) {
4019  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4020  first_block, s->dest[dst_idx] + off,
4021  (i & 4) ? s->uvlinesize : s->linesize,
4022  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4023  &block_tt);
4024  block_cbp |= pat << (i << 2);
4025  if (!v->ttmbf && ttmb < 8) ttmb = -1;
4026  first_block = 0;
4027  }
4028  }
4029  }
4030  if (s->mb_x == s->mb_width - 1)
4031  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4032  return 0;
4033 }
4034 
4035 /** Decode one B-frame MB (in Main profile)
4036  */
4038 {
4039  MpegEncContext *s = &v->s;
4040  GetBitContext *gb = &s->gb;
4041  int i, j;
4042  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4043  int cbp = 0; /* cbp decoding stuff */
4044  int mqdiff, mquant; /* MB quantization */
4045  int ttmb = v->ttfrm; /* MB Transform type */
4046  int mb_has_coeffs = 0; /* last_flag */
4047  int index, index1; /* LUT indexes */
4048  int val, sign; /* temp values */
4049  int first_block = 1;
4050  int dst_idx, off;
4051  int skipped, direct;
4052  int dmv_x[2], dmv_y[2];
4053  int bmvtype = BMV_TYPE_BACKWARD;
4054 
4055  mquant = v->pq; /* lossy initialization */
4056  s->mb_intra = 0;
4057 
4058  if (v->dmb_is_raw)
4059  direct = get_bits1(gb);
4060  else
4061  direct = v->direct_mb_plane[mb_pos];
4062  if (v->skip_is_raw)
4063  skipped = get_bits1(gb);
4064  else
4065  skipped = v->s.mbskip_table[mb_pos];
4066 
4067  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4068  for (i = 0; i < 6; i++) {
4069  v->mb_type[0][s->block_index[i]] = 0;
4070  s->dc_val[0][s->block_index[i]] = 0;
4071  }
4072  s->current_picture.qscale_table[mb_pos] = 0;
4073 
4074  if (!direct) {
4075  if (!skipped) {
4076  GET_MVDATA(dmv_x[0], dmv_y[0]);
4077  dmv_x[1] = dmv_x[0];
4078  dmv_y[1] = dmv_y[0];
4079  }
4080  if (skipped || !s->mb_intra) {
4081  bmvtype = decode012(gb);
4082  switch (bmvtype) {
4083  case 0:
4084  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4085  break;
4086  case 1:
4087  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4088  break;
4089  case 2:
4090  bmvtype = BMV_TYPE_INTERPOLATED;
4091  dmv_x[0] = dmv_y[0] = 0;
4092  }
4093  }
4094  }
4095  for (i = 0; i < 6; i++)
4096  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4097 
4098  if (skipped) {
4099  if (direct)
4100  bmvtype = BMV_TYPE_INTERPOLATED;
4101  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4102  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4103  return;
4104  }
4105  if (direct) {
4106  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4107  GET_MQUANT();
4108  s->mb_intra = 0;
4109  s->current_picture.qscale_table[mb_pos] = mquant;
4110  if (!v->ttmbf)
4112  dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4113  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4114  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4115  } else {
4116  if (!mb_has_coeffs && !s->mb_intra) {
4117  /* no coded blocks - effectively skipped */
4118  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4119  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4120  return;
4121  }
4122  if (s->mb_intra && !mb_has_coeffs) {
4123  GET_MQUANT();
4124  s->current_picture.qscale_table[mb_pos] = mquant;
4125  s->ac_pred = get_bits1(gb);
4126  cbp = 0;
4127  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4128  } else {
4129  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4130  GET_MVDATA(dmv_x[0], dmv_y[0]);
4131  if (!mb_has_coeffs) {
4132  /* interpolated skipped block */
4133  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4134  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4135  return;
4136  }
4137  }
4138  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4139  if (!s->mb_intra) {
4140  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4141  }
4142  if (s->mb_intra)
4143  s->ac_pred = get_bits1(gb);
4144  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4145  GET_MQUANT();
4146  s->current_picture.qscale_table[mb_pos] = mquant;
4147  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4149  }
4150  }
4151  dst_idx = 0;
4152  for (i = 0; i < 6; i++) {
4153  s->dc_val[0][s->block_index[i]] = 0;
4154  dst_idx += i >> 2;
4155  val = ((cbp >> (5 - i)) & 1);
4156  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4157  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4158  if (s->mb_intra) {
4159  /* check if prediction blocks A and C are available */
4160  v->a_avail = v->c_avail = 0;
4161  if (i == 2 || i == 3 || !s->first_slice_line)
4162  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4163  if (i == 1 || i == 3 || s->mb_x)
4164  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4165 
4166  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4167  (i & 4) ? v->codingset2 : v->codingset);
4168  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4169  continue;
4170  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4171  if (v->rangeredfrm)
4172  for (j = 0; j < 64; j++)
4173  s->block[i][j] <<= 1;
4174  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4175  } else if (val) {
4176  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4177  first_block, s->dest[dst_idx] + off,
4178  (i & 4) ? s->uvlinesize : s->linesize,
4179  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4180  if (!v->ttmbf && ttmb < 8)
4181  ttmb = -1;
4182  first_block = 0;
4183  }
4184  }
4185 }
4186 
4187 /** Decode one B-frame MB (in interlaced field B picture)
4188  */
4190 {
4191  MpegEncContext *s = &v->s;
4192  GetBitContext *gb = &s->gb;
4193  int i, j;
4194  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4195  int cbp = 0; /* cbp decoding stuff */
4196  int mqdiff, mquant; /* MB quantization */
4197  int ttmb = v->ttfrm; /* MB Transform type */
4198  int mb_has_coeffs = 0; /* last_flag */
4199  int val; /* temp value */
4200  int first_block = 1;
4201  int dst_idx, off;
4202  int fwd;
4203  int dmv_x[2], dmv_y[2], pred_flag[2];
4204  int bmvtype = BMV_TYPE_BACKWARD;
4205  int idx_mbmode, interpmvp;
4206 
4207  mquant = v->pq; /* Lossy initialization */
4208  s->mb_intra = 0;
4209 
4210  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4211  if (idx_mbmode <= 1) { // intra MB
4212  s->mb_intra = v->is_intra[s->mb_x] = 1;
4213  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4214  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4215  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4216  GET_MQUANT();
4217  s->current_picture.qscale_table[mb_pos] = mquant;
4218  /* Set DC scale - y and c use the same (not sure if necessary here) */
4219  s->y_dc_scale = s->y_dc_scale_table[mquant];
4220  s->c_dc_scale = s->c_dc_scale_table[mquant];
4221  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4222  mb_has_coeffs = idx_mbmode & 1;
4223  if (mb_has_coeffs)
4224  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4225  dst_idx = 0;
4226  for (i = 0; i < 6; i++) {
4227  s->dc_val[0][s->block_index[i]] = 0;
4228  dst_idx += i >> 2;
4229  val = ((cbp >> (5 - i)) & 1);
4230  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4231  v->a_avail = v->c_avail = 0;
4232  if (i == 2 || i == 3 || !s->first_slice_line)
4233  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4234  if (i == 1 || i == 3 || s->mb_x)
4235  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4236 
4237  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4238  (i & 4) ? v->codingset2 : v->codingset);
4239  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4240  continue;
4241  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4242  if (v->rangeredfrm)
4243  for (j = 0; j < 64; j++)
4244  s->block[i][j] <<= 1;
4245  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4246  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4247  // TODO: yet to perform loop filter
4248  }
4249  } else {
4250  s->mb_intra = v->is_intra[s->mb_x] = 0;
4251  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4252  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4253  if (v->fmb_is_raw)
4254  fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4255  else
4256  fwd = v->forward_mb_plane[mb_pos];
4257  if (idx_mbmode <= 5) { // 1-MV
4258  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4259  pred_flag[0] = pred_flag[1] = 0;
4260  if (fwd)
4261  bmvtype = BMV_TYPE_FORWARD;
4262  else {
4263  bmvtype = decode012(gb);
4264  switch (bmvtype) {
4265  case 0:
4266  bmvtype = BMV_TYPE_BACKWARD;
4267  break;
4268  case 1:
4269  bmvtype = BMV_TYPE_DIRECT;
4270  break;
4271  case 2:
4272  bmvtype = BMV_TYPE_INTERPOLATED;
4273  interpmvp = get_bits1(gb);
4274  }
4275  }
4276  v->bmvtype = bmvtype;
4277  if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4278  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4279  }
4280  if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4281  get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4282  }
4283  if (bmvtype == BMV_TYPE_DIRECT) {
4284  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4285  dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4286  }
4287  vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4288  vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4289  mb_has_coeffs = !(idx_mbmode & 2);
4290  } else { // 4-MV
4291  if (fwd)
4292  bmvtype = BMV_TYPE_FORWARD;
4293  v->bmvtype = bmvtype;
4295  for (i = 0; i < 6; i++) {
4296  if (i < 4) {
4297  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4298  dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4299  val = ((v->fourmvbp >> (3 - i)) & 1);
4300  if (val) {
4301  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4302  &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4303  &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4304  }
4305  vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4306  vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
4307  } else if (i == 4)
4308  vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4309  }
4310  mb_has_coeffs = idx_mbmode & 1;
4311  }
4312  if (mb_has_coeffs)
4313  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4314  if (cbp) {
4315  GET_MQUANT();
4316  }
4317  s->current_picture.qscale_table[mb_pos] = mquant;
4318  if (!v->ttmbf && cbp) {
4320  }
4321  dst_idx = 0;
4322  for (i = 0; i < 6; i++) {
4323  s->dc_val[0][s->block_index[i]] = 0;
4324  dst_idx += i >> 2;
4325  val = ((cbp >> (5 - i)) & 1);
4326  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4327  if (val) {
4328  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4329  first_block, s->dest[dst_idx] + off,
4330  (i & 4) ? s->uvlinesize : s->linesize,
4331  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4332  if (!v->ttmbf && ttmb < 8)
4333  ttmb = -1;
4334  first_block = 0;
4335  }
4336  }
4337  }
4338 }
4339 
4340 /** Decode one B-frame MB (in interlaced frame B picture)
4341  */
4343 {
4344  MpegEncContext *s = &v->s;
4345  GetBitContext *gb = &s->gb;
4346  int i, j;
4347  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4348  int cbp = 0; /* cbp decoding stuff */
4349  int mqdiff, mquant; /* MB quantization */
4350  int ttmb = v->ttfrm; /* MB Transform type */
4351  int mvsw = 0; /* motion vector switch */
4352  int mb_has_coeffs = 1; /* last_flag */
4353  int dmv_x, dmv_y; /* Differential MV components */
4354  int val; /* temp value */
4355  int first_block = 1;
4356  int dst_idx, off;
4357  int skipped, direct, twomv = 0;
4358  int block_cbp = 0, pat, block_tt = 0;
4359  int idx_mbmode = 0, mvbp;
4360  int stride_y, fieldtx;
4361  int bmvtype = BMV_TYPE_BACKWARD;
4362  int dir, dir2;
4363 
4364  mquant = v->pq; /* Lossy initialization */
4365  s->mb_intra = 0;
4366  if (v->skip_is_raw)
4367  skipped = get_bits1(gb);
4368  else
4369  skipped = v->s.mbskip_table[mb_pos];
4370 
4371  if (!skipped) {
4372  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
4373  if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD)
4374  {
4375  twomv = 1;
4376  v->blk_mv_type[s->block_index[0]] = 1;
4377  v->blk_mv_type[s->block_index[1]] = 1;
4378  v->blk_mv_type[s->block_index[2]] = 1;
4379  v->blk_mv_type[s->block_index[3]] = 1;
4380  } else {
4381  v->blk_mv_type[s->block_index[0]] = 0;
4382  v->blk_mv_type[s->block_index[1]] = 0;
4383  v->blk_mv_type[s->block_index[2]] = 0;
4384  v->blk_mv_type[s->block_index[3]] = 0;
4385  }
4386  }
4387 
4388  if (v->dmb_is_raw)
4389  direct = get_bits1(gb);
4390  else
4391  direct = v->direct_mb_plane[mb_pos];
4392 
4393  if (direct) {
4394  s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
4395  s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
4396  s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
4397  s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
4398 
4399  if (twomv) {
4400  s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
4401  s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
4402  s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
4403  s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
4404 
4405  for (i = 1; i < 4; i+=2) {
4406  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
4407  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
4408  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
4409  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
4410  }
4411  } else {
4412  for (i = 1; i < 4; i++) {
4413  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
4414  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
4415  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
4416  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
4417  }
4418  }
4419  }
4420 
4421  if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
4422  for (i = 0; i < 4; i++) {
4423  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
4424  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
4425  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
4426  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
4427  }
4428  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4429  s->mb_intra = v->is_intra[s->mb_x] = 1;
4430  for (i = 0; i < 6; i++)
4431  v->mb_type[0][s->block_index[i]] = 1;
4432  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
4433  mb_has_coeffs = get_bits1(gb);
4434  if (mb_has_coeffs)
4435  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4436  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4437  GET_MQUANT();
4438  s->current_picture.qscale_table[mb_pos] = mquant;
4439  /* Set DC scale - y and c use the same (not sure if necessary here) */
4440  s->y_dc_scale = s->y_dc_scale_table[mquant];
4441  s->c_dc_scale = s->c_dc_scale_table[mquant];
4442  dst_idx = 0;
4443  for (i = 0; i < 6; i++) {
4444  s->dc_val[0][s->block_index[i]] = 0;
4445  dst_idx += i >> 2;
4446  val = ((cbp >> (5 - i)) & 1);
4447  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4448  v->a_avail = v->c_avail = 0;
4449  if (i == 2 || i == 3 || !s->first_slice_line)
4450  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4451  if (i == 1 || i == 3 || s->mb_x)
4452  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4453 
4454  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4455  (i & 4) ? v->codingset2 : v->codingset);
4456  if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
4457  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4458  if (i < 4) {
4459  stride_y = s->linesize << fieldtx;
4460  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
4461  } else {
4462  stride_y = s->uvlinesize;
4463  off = 0;
4464  }
4465  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
4466  }
4467  } else {
4468  s->mb_intra = v->is_intra[s->mb_x] = 0;
4469  if (!direct) {
4470  if (skipped || !s->mb_intra) {
4471  bmvtype = decode012(gb);
4472  switch (bmvtype) {
4473  case 0:
4474  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4475  break;
4476  case 1:
4477  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4478  break;
4479  case 2:
4480  bmvtype = BMV_TYPE_INTERPOLATED;
4481  }
4482  }
4483 
4484  if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
4485  mvsw = get_bits1(gb);
4486  }
4487 
4488  if (!skipped) { // inter MB
4489  mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
4490  if (mb_has_coeffs)
4491  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4492  if (!direct) {
4493  if (bmvtype == BMV_TYPE_INTERPOLATED & twomv) {
4495  }
4496  else if (bmvtype == BMV_TYPE_INTERPOLATED | twomv) {
4498  }
4499  }
4500 
4501  for (i = 0; i < 6; i++)
4502  v->mb_type[0][s->block_index[i]] = 0;
4503  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
4504  /* for all motion vector read MVDATA and motion compensate each block */
4505  dst_idx = 0;
4506  if (direct) {
4507  if (twomv) {
4508  for (i = 0; i < 4; i++) {
4509  vc1_mc_4mv_luma(v, i, 0, 0);
4510  vc1_mc_4mv_luma(v, i, 1, 1);
4511  }
4512  vc1_mc_4mv_chroma4(v);
4513  } else {
4514  vc1_mc_1mv(v, 0);
4515  vc1_interp_mc(v);
4516  }
4517  } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
4518  mvbp = v->fourmvbp;
4519  for (i = 0; i < 4; i++) {
4520  dir = i==1 || i==3;
4521  dmv_x = dmv_y = 0;
4522  val = ((mvbp >> (3 - i)) & 1);
4523  if (val) {
4524  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4525  }
4526  j = i > 1 ? 2 : 0;
4527  vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4528  vc1_mc_4mv_luma(v, j, dir, dir);
4529  vc1_mc_4mv_luma(v, j+1, dir, dir);
4530  }
4531 
4532  vc1_mc_4mv_chroma4(v);
4533  } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
4534  mvbp = v->twomvbp;
4535  dmv_x = dmv_y = 0;
4536  if (mvbp & 2) {
4537  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4538  }
4539 
4540  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4541  vc1_mc_1mv(v, 0);
4542 
4543  dmv_x = dmv_y = 0;
4544  if (mvbp & 1) {
4545  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4546  }
4547 
4548  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4549  vc1_interp_mc(v);
4550  } else if (twomv) {
4551  dir = bmvtype == BMV_TYPE_BACKWARD;
4552  dir2 = dir;
4553  if (mvsw)
4554  dir2 = !dir;
4555  mvbp = v->twomvbp;
4556  dmv_x = dmv_y = 0;
4557  if (mvbp & 2) {
4558  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4559  }
4560  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4561 
4562  dmv_x = dmv_y = 0;
4563  if (mvbp & 1) {
4564  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4565  }
4566  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
4567 
4568  if (mvsw) {
4569  for (i = 0; i<2; i++) {
4570  s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4571  s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4572  s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4573  s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4574  }
4575  } else {
4576  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4577  vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4578  }
4579 
4580  vc1_mc_4mv_luma(v, 0, dir, 0);
4581  vc1_mc_4mv_luma(v, 1, dir, 0);
4582  vc1_mc_4mv_luma(v, 2, dir2, 0);
4583  vc1_mc_4mv_luma(v, 3, dir2, 0);
4584  vc1_mc_4mv_chroma4(v);
4585  } else {
4586  dir = bmvtype == BMV_TYPE_BACKWARD;
4587 
4588  mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
4589  dmv_x = dmv_y = 0;
4590  if (mvbp) {
4591  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4592  }
4593 
4594  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4595  v->blk_mv_type[s->block_index[0]] = 1;
4596  v->blk_mv_type[s->block_index[1]] = 1;
4597  v->blk_mv_type[s->block_index[2]] = 1;
4598  v->blk_mv_type[s->block_index[3]] = 1;
4599  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4600  for (i = 0; i<2; i++) {
4601  s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4602  s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4603  }
4604  vc1_mc_1mv(v, dir);
4605  }
4606 
4607  if (cbp)
4608  GET_MQUANT(); // p. 227
4609  s->current_picture.qscale_table[mb_pos] = mquant;
4610  if (!v->ttmbf && cbp)
4612  for (i = 0; i < 6; i++) {
4613  s->dc_val[0][s->block_index[i]] = 0;
4614  dst_idx += i >> 2;
4615  val = ((cbp >> (5 - i)) & 1);
4616  if (!fieldtx)
4617  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4618  else
4619  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
4620  if (val) {
4621  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4622  first_block, s->dest[dst_idx] + off,
4623  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4624  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4625  block_cbp |= pat << (i << 2);
4626  if (!v->ttmbf && ttmb < 8)
4627  ttmb = -1;
4628  first_block = 0;
4629  }
4630  }
4631 
4632  } else { // skipped
4633  dir = 0;
4634  for (i = 0; i < 6; i++) {
4635  v->mb_type[0][s->block_index[i]] = 0;
4636  s->dc_val[0][s->block_index[i]] = 0;
4637  }
4638  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
4639  s->current_picture.qscale_table[mb_pos] = 0;
4640  v->blk_mv_type[s->block_index[0]] = 0;
4641  v->blk_mv_type[s->block_index[1]] = 0;
4642  v->blk_mv_type[s->block_index[2]] = 0;
4643  v->blk_mv_type[s->block_index[3]] = 0;
4644 
4645  if (!direct) {
4646  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4647  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4648  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4649  } else {
4650  dir = bmvtype == BMV_TYPE_BACKWARD;
4651  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4652  if (mvsw) {
4653  int dir2 = dir;
4654  if (mvsw)
4655  dir2 = !dir;
4656  for (i = 0; i<2; i++) {
4657  s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4658  s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4659  s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4660  s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4661  }
4662  } else {
4663  v->blk_mv_type[s->block_index[0]] = 1;
4664  v->blk_mv_type[s->block_index[1]] = 1;
4665  v->blk_mv_type[s->block_index[2]] = 1;
4666  v->blk_mv_type[s->block_index[3]] = 1;
4667  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4668  for (i = 0; i<2; i++) {
4669  s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4670  s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4671  }
4672  }
4673  }
4674  }
4675 
4676  vc1_mc_1mv(v, dir);
4677  if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
4678  vc1_interp_mc(v);
4679  }
4680  }
4681  }
4682  if (s->mb_x == s->mb_width - 1)
4683  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
4684  v->cbp[s->mb_x] = block_cbp;
4685  v->ttblk[s->mb_x] = block_tt;
4686  return 0;
4687 }
4688 
4689 /** Decode blocks of I-frame
4690  */
4692 {
4693  int k, j;
4694  MpegEncContext *s = &v->s;
4695  int cbp, val;
4696  uint8_t *coded_val;
4697  int mb_pos;
4698 
4699  /* select codingmode used for VLC tables selection */
4700  switch (v->y_ac_table_index) {
4701  case 0:
4703  break;
4704  case 1:
4706  break;
4707  case 2:
4709  break;
4710  }
4711 
4712  switch (v->c_ac_table_index) {
4713  case 0:
4715  break;
4716  case 1:
4718  break;
4719  case 2:
4721  break;
4722  }
4723 
4724  /* Set DC scale - y and c use the same */
4725  s->y_dc_scale = s->y_dc_scale_table[v->pq];
4726  s->c_dc_scale = s->c_dc_scale_table[v->pq];
4727 
4728  //do frame decode
4729  s->mb_x = s->mb_y = 0;
4730  s->mb_intra = 1;
4731  s->first_slice_line = 1;
4732  for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4733  s->mb_x = 0;
4734  init_block_index(v);
4735  for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4736  uint8_t *dst[6];
4738  dst[0] = s->dest[0];
4739  dst[1] = dst[0] + 8;
4740  dst[2] = s->dest[0] + s->linesize * 8;
4741  dst[3] = dst[2] + 8;
4742  dst[4] = s->dest[1];
4743  dst[5] = s->dest[2];
4744  s->dsp.clear_blocks(s->block[0]);
4745  mb_pos = s->mb_x + s->mb_y * s->mb_width;
4746  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4747  s->current_picture.qscale_table[mb_pos] = v->pq;
4748  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4749  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4750 
4751  // do actual MB decoding and displaying
4753  v->s.ac_pred = get_bits1(&v->s.gb);
4754 
4755  for (k = 0; k < 6; k++) {
4756  val = ((cbp >> (5 - k)) & 1);
4757 
4758  if (k < 4) {
4759  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4760  val = val ^ pred;
4761  *coded_val = val;
4762  }
4763  cbp |= val << (5 - k);
4764 
4765  vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4766 
4767  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4768  continue;
4769  v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4770  if (v->pq >= 9 && v->overlap) {
4771  if (v->rangeredfrm)
4772  for (j = 0; j < 64; j++)
4773  s->block[k][j] <<= 1;
4774  s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4775  } else {
4776  if (v->rangeredfrm)
4777  for (j = 0; j < 64; j++)
4778  s->block[k][j] = (s->block[k][j] - 64) << 1;
4779  s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4780  }
4781  }
4782 
4783  if (v->pq >= 9 && v->overlap) {
4784  if (s->mb_x) {
4785  v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4786  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4787  if (!(s->flags & CODEC_FLAG_GRAY)) {
4788  v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4789  v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4790  }
4791  }
4792  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4793  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4794  if (!s->first_slice_line) {
4795  v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4796  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4797  if (!(s->flags & CODEC_FLAG_GRAY)) {
4798  v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4799  v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4800  }
4801  }
4802  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4803  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4804  }
4805  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4806 
4807  if (get_bits_count(&s->gb) > v->bits) {
4808  ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4809  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4810  get_bits_count(&s->gb), v->bits);
4811  return;
4812  }
4813  }
4814  if (!v->s.loop_filter)
4815  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4816  else if (s->mb_y)
4817  ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4818 
4819  s->first_slice_line = 0;
4820  }
4821  if (v->s.loop_filter)
4822  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4823 
4824  /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4825  * profile, these only differ are when decoding MSS2 rectangles. */
4826  ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4827 }
4828 
4829 /** Decode blocks of I-frame for advanced profile
4830  */
4832 {
4833  int k;
4834  MpegEncContext *s = &v->s;
4835  int cbp, val;
4836  uint8_t *coded_val;
4837  int mb_pos;
4838  int mquant = v->pq;
4839  int mqdiff;
4840  GetBitContext *gb = &s->gb;
4841 
4842  /* select codingmode used for VLC tables selection */
4843  switch (v->y_ac_table_index) {
4844  case 0:
4846  break;
4847  case 1:
4849  break;
4850  case 2:
4852  break;
4853  }
4854 
4855  switch (v->c_ac_table_index) {
4856  case 0:
4858  break;
4859  case 1:
4861  break;
4862  case 2:
4864  break;
4865  }
4866 
4867  // do frame decode
4868  s->mb_x = s->mb_y = 0;
4869  s->mb_intra = 1;
4870  s->first_slice_line = 1;
4871  s->mb_y = s->start_mb_y;
4872  if (s->start_mb_y) {
4873  s->mb_x = 0;
4874  init_block_index(v);
4875  memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4876  (1 + s->b8_stride) * sizeof(*s->coded_block));
4877  }
4878  for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4879  s->mb_x = 0;
4880  init_block_index(v);
4881  for (;s->mb_x < s->mb_width; s->mb_x++) {
4882  int16_t (*block)[64] = v->block[v->cur_blk_idx];
4884  s->dsp.clear_blocks(block[0]);
4885  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4886  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4887  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4888  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4889 
4890  // do actual MB decoding and displaying
4891  if (v->fieldtx_is_raw)
4892  v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4894  if ( v->acpred_is_raw)
4895  v->s.ac_pred = get_bits1(&v->s.gb);
4896  else
4897  v->s.ac_pred = v->acpred_plane[mb_pos];
4898 
4899  if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4900  v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4901 
4902  GET_MQUANT();
4903 
4904  s->current_picture.qscale_table[mb_pos] = mquant;
4905  /* Set DC scale - y and c use the same */
4906  s->y_dc_scale = s->y_dc_scale_table[mquant];
4907  s->c_dc_scale = s->c_dc_scale_table[mquant];
4908 
4909  for (k = 0; k < 6; k++) {
4910  val = ((cbp >> (5 - k)) & 1);
4911 
4912  if (k < 4) {
4913  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4914  val = val ^ pred;
4915  *coded_val = val;
4916  }
4917  cbp |= val << (5 - k);
4918 
4919  v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4920  v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4921 
4922  vc1_decode_i_block_adv(v, block[k], k, val,
4923  (k < 4) ? v->codingset : v->codingset2, mquant);
4924 
4925  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4926  continue;
4927  v->vc1dsp.vc1_inv_trans_8x8(block[k]);
4928  }
4929 
4933 
4934  if (get_bits_count(&s->gb) > v->bits) {
4935  // TODO: may need modification to handle slice coding
4936  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4937  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4938  get_bits_count(&s->gb), v->bits);
4939  return;
4940  }
4941  }
4942  if (!v->s.loop_filter)
4943  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4944  else if (s->mb_y)
4945  ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4946  s->first_slice_line = 0;
4947  }
4948 
4949  /* raw bottom MB row */
4950  s->mb_x = 0;
4951  init_block_index(v);
4952 
4953  for (;s->mb_x < s->mb_width; s->mb_x++) {
4956  if (v->s.loop_filter)
4958  }
4959  if (v->s.loop_filter)
4960  ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
4961  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4962  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4963 }
4964 
4966 {
4967  MpegEncContext *s = &v->s;
4968  int apply_loop_filter;
4969 
4970  /* select codingmode used for VLC tables selection */
4971  switch (v->c_ac_table_index) {
4972  case 0:
4974  break;
4975  case 1:
4977  break;
4978  case 2:
4980  break;
4981  }
4982 
4983  switch (v->c_ac_table_index) {
4984  case 0:
4986  break;
4987  case 1:
4989  break;
4990  case 2:
4992  break;
4993  }
4994 
4995  apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
4996  s->first_slice_line = 1;
4997  memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
4998  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4999  s->mb_x = 0;
5000  init_block_index(v);
5001  for (; s->mb_x < s->mb_width; s->mb_x++) {
5003 
5004  if (v->fcm == ILACE_FIELD)
5006  else if (v->fcm == ILACE_FRAME)
5008  else vc1_decode_p_mb(v);
5009  if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == PROGRESSIVE)
5011  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5012  // TODO: may need modification to handle slice coding
5013  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5014  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5015  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5016  return;
5017  }
5018  }
5019  memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
5020  memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
5021  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
5022  memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
5023  if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5024  s->first_slice_line = 0;
5025  }
5026  if (apply_loop_filter && v->fcm == PROGRESSIVE) {
5027  s->mb_x = 0;
5028  init_block_index(v);
5029  for (; s->mb_x < s->mb_width; s->mb_x++) {
5032  }
5033  }
5034  if (s->end_mb_y >= s->start_mb_y)
5035  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5036  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5037  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5038 }
5039 
5041 {
5042  MpegEncContext *s = &v->s;
5043 
5044  /* select codingmode used for VLC tables selection */
5045  switch (v->c_ac_table_index) {
5046  case 0:
5048  break;
5049  case 1:
5051  break;
5052  case 2:
5054  break;
5055  }
5056 
5057  switch (v->c_ac_table_index) {
5058  case 0:
5060  break;
5061  case 1:
5063  break;
5064  case 2:
5066  break;
5067  }
5068 
5069  s->first_slice_line = 1;
5070  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5071  s->mb_x = 0;
5072  init_block_index(v);
5073  for (; s->mb_x < s->mb_width; s->mb_x++) {
5075 
5076  if (v->fcm == ILACE_FIELD)
5078  else if (v->fcm == ILACE_FRAME)
5080  else
5081  vc1_decode_b_mb(v);
5082  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5083  // TODO: may need modification to handle slice coding
5084  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5085  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5086  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5087  return;
5088  }
5089  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
5090  }
5091  if (!v->s.loop_filter)
5092  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5093  else if (s->mb_y)
5094  ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5095  s->first_slice_line = 0;
5096  }
5097  if (v->s.loop_filter)
5098  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5099  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5100  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5101 }
5102 
5104 {
5105  MpegEncContext *s = &v->s;
5106 
5107  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
5108  s->first_slice_line = 1;
5109  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5110  s->mb_x = 0;
5111  init_block_index(v);
5113  if (s->last_picture.f.data[0]) {
5114  memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
5115  memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5116  memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5117  }
5118  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5119  s->first_slice_line = 0;
5120  }
5122 }
5123 
5125 {
5126 
5127  v->s.esc3_level_length = 0;
5128  if (v->x8_type) {
5129  ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
5130  } else {
5131  v->cur_blk_idx = 0;
5132  v->left_blk_idx = -1;
5133  v->topleft_blk_idx = 1;
5134  v->top_blk_idx = 2;
5135  switch (v->s.pict_type) {
5136  case AV_PICTURE_TYPE_I:
5137  if (v->profile == PROFILE_ADVANCED)
5139  else
5141  break;
5142  case AV_PICTURE_TYPE_P:
5143  if (v->p_frame_skipped)
5145  else
5147  break;
5148  case AV_PICTURE_TYPE_B:
5149  if (v->bi_type) {
5150  if (v->profile == PROFILE_ADVANCED)
5152  else
5154  } else
5156  break;
5157  }
5158  }
5159 }
5160 
5161 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5162 
5163 typedef struct {
5164  /**
5165  * Transform coefficients for both sprites in 16.16 fixed point format,
5166  * in the order they appear in the bitstream:
5167  * x scale
5168  * rotation 1 (unused)
5169  * x offset
5170  * rotation 2 (unused)
5171  * y scale
5172  * y offset
5173  * alpha
5174  */
5175  int coefs[2][7];
5176 
5177  int effect_type, effect_flag;
5178  int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
5179  int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
5180 } SpriteData;
5181 
5182 static inline int get_fp_val(GetBitContext* gb)
5183 {
5184  return (get_bits_long(gb, 30) - (1 << 29)) << 1;
5185 }
5186 
5187 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
5188 {
5189  c[1] = c[3] = 0;
5190 
5191  switch (get_bits(gb, 2)) {
5192  case 0:
5193  c[0] = 1 << 16;
5194  c[2] = get_fp_val(gb);
5195  c[4] = 1 << 16;
5196  break;
5197  case 1:
5198  c[0] = c[4] = get_fp_val(gb);
5199  c[2] = get_fp_val(gb);
5200  break;
5201  case 2:
5202  c[0] = get_fp_val(gb);
5203  c[2] = get_fp_val(gb);
5204  c[4] = get_fp_val(gb);
5205  break;
5206  case 3:
5207  c[0] = get_fp_val(gb);
5208  c[1] = get_fp_val(gb);
5209  c[2] = get_fp_val(gb);
5210  c[3] = get_fp_val(gb);
5211  c[4] = get_fp_val(gb);
5212  break;
5213  }
5214  c[5] = get_fp_val(gb);
5215  if (get_bits1(gb))
5216  c[6] = get_fp_val(gb);
5217  else
5218  c[6] = 1 << 16;
5219 }
5220 
5221 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
5222 {
5223  AVCodecContext *avctx = v->s.avctx;
5224  int sprite, i;
5225 
5226  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5227  vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
5228  if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
5229  avpriv_request_sample(avctx, "Non-zero rotation coefficients");
5230  av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
5231  for (i = 0; i < 7; i++)
5232  av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
5233  sd->coefs[sprite][i] / (1<<16),
5234  (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5235  av_log(avctx, AV_LOG_DEBUG, "\n");
5236  }
5237 
5238  skip_bits(gb, 2);
5239  if (sd->effect_type = get_bits_long(gb, 30)) {
5240  switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5241  case 7:
5242  vc1_sprite_parse_transform(gb, sd->effect_params1);
5243  break;
5244  case 14:
5245  vc1_sprite_parse_transform(gb, sd->effect_params1);
5246  vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5247  break;
5248  default:
5249  for (i = 0; i < sd->effect_pcount1; i++)
5250  sd->effect_params1[i] = get_fp_val(gb);
5251  }
5252  if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5253  // effect 13 is simple alpha blending and matches the opacity above
5254  av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5255  for (i = 0; i < sd->effect_pcount1; i++)
5256  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5257  sd->effect_params1[i] / (1 << 16),
5258  (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5259  av_log(avctx, AV_LOG_DEBUG, "\n");
5260  }
5261 
5262  sd->effect_pcount2 = get_bits(gb, 16);
5263  if (sd->effect_pcount2 > 10) {
5264  av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5265  return;
5266  } else if (sd->effect_pcount2) {
5267  i = -1;
5268  av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5269  while (++i < sd->effect_pcount2) {
5270  sd->effect_params2[i] = get_fp_val(gb);
5271  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5272  sd->effect_params2[i] / (1 << 16),
5273  (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5274  }
5275  av_log(avctx, AV_LOG_DEBUG, "\n");
5276  }
5277  }
5278  if (sd->effect_flag = get_bits1(gb))
5279  av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5280 
5281  if (get_bits_count(gb) >= gb->size_in_bits +
5282  (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0))
5283  av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5284  if (get_bits_count(gb) < gb->size_in_bits - 8)
5285  av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5286 }
5287 
5288 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5289 {
5290  int i, plane, row, sprite;
5291  int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5292  uint8_t* src_h[2][2];
5293  int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5294  int ysub[2];
5295  MpegEncContext *s = &v->s;
5296 
5297  for (i = 0; i < 2; i++) {
5298  xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5299  xadv[i] = sd->coefs[i][0];
5300  if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5301  xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5302 
5303  yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5304  yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5305  }
5306  alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5307 
5308  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5309  int width = v->output_width>>!!plane;
5310 
5311  for (row = 0; row < v->output_height>>!!plane; row++) {
5312  uint8_t *dst = v->sprite_output_frame.data[plane] +
5313  v->sprite_output_frame.linesize[plane] * row;
5314 
5315  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5316  uint8_t *iplane = s->current_picture.f.data[plane];
5317  int iline = s->current_picture.f.linesize[plane];
5318  int ycoord = yoff[sprite] + yadv[sprite] * row;
5319  int yline = ycoord >> 16;
5320  int next_line;
5321  ysub[sprite] = ycoord & 0xFFFF;
5322  if (sprite) {
5323  iplane = s->last_picture.f.data[plane];
5324  iline = s->last_picture.f.linesize[plane];
5325  }
5326  next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
5327  if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5328  src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5329  if (ysub[sprite])
5330  src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
5331  } else {
5332  if (sr_cache[sprite][0] != yline) {
5333  if (sr_cache[sprite][1] == yline) {
5334  FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5335  FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5336  } else {
5337  v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5338  sr_cache[sprite][0] = yline;
5339  }
5340  }
5341  if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5342  v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
5343  iplane + next_line, xoff[sprite],
5344  xadv[sprite], width);
5345  sr_cache[sprite][1] = yline + 1;
5346  }
5347  src_h[sprite][0] = v->sr_rows[sprite][0];
5348  src_h[sprite][1] = v->sr_rows[sprite][1];
5349  }
5350  }
5351 
5352  if (!v->two_sprites) {
5353  if (ysub[0]) {
5354  v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5355  } else {
5356  memcpy(dst, src_h[0][0], width);
5357  }
5358  } else {
5359  if (ysub[0] && ysub[1]) {
5360  v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5361  src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5362  } else if (ysub[0]) {
5363  v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5364  src_h[1][0], alpha, width);
5365  } else if (ysub[1]) {
5366  v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5367  src_h[0][0], (1<<16)-1-alpha, width);
5368  } else {
5369  v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5370  }
5371  }
5372  }
5373 
5374  if (!plane) {
5375  for (i = 0; i < 2; i++) {
5376  xoff[i] >>= 1;
5377  yoff[i] >>= 1;
5378  }
5379  }
5380 
5381  }
5382 }
5383 
5384 
5385 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5386 {
5387  int ret;
5388  MpegEncContext *s = &v->s;
5389  AVCodecContext *avctx = s->avctx;
5390  SpriteData sd;
5391 
5392  vc1_parse_sprites(v, gb, &sd);
5393 
5394  if (!s->current_picture.f.data[0]) {
5395  av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5396  return -1;
5397  }
5398 
5399  if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5400  av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5401  v->two_sprites = 0;
5402  }
5403 
5405  if ((ret = ff_get_buffer(avctx, &v->sprite_output_frame, 0)) < 0)
5406  return ret;
5407 
5408  vc1_draw_sprites(v, &sd);
5409 
5410  return 0;
5411 }
5412 
5413 static void vc1_sprite_flush(AVCodecContext *avctx)
5414 {
5415  VC1Context *v = avctx->priv_data;
5416  MpegEncContext *s = &v->s;
5417  AVFrame *f = &s->current_picture.f;
5418  int plane, i;
5419 
5420  /* Windows Media Image codecs have a convergence interval of two keyframes.
5421  Since we can't enforce it, clear to black the missing sprite. This is
5422  wrong but it looks better than doing nothing. */
5423 
5424  if (f->data[0])
5425  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5426  for (i = 0; i < v->sprite_height>>!!plane; i++)
5427  memset(f->data[plane] + i * f->linesize[plane],
5428  plane ? 128 : 0, f->linesize[plane]);
5429 }
5430 
5431 #endif
5432 
5434 {
5435  MpegEncContext *s = &v->s;
5436  int i;
5437 
5438  /* Allocate mb bitplanes */
5443  v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5445 
5446  v->n_allocated_blks = s->mb_width + 2;
5447  v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5448  v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5449  v->cbp = v->cbp_base + s->mb_stride;
5450  v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5451  v->ttblk = v->ttblk_base + s->mb_stride;
5452  v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5453  v->is_intra = v->is_intra_base + s->mb_stride;
5454  v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5455  v->luma_mv = v->luma_mv_base + s->mb_stride;
5456 
5457  /* allocate block type info in that way so it could be used with s->block_index[] */
5458  v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5459  v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5460  v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5461  v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5462 
5463  /* allocate memory to store block level MV info */
5464  v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5465  v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5466  v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5467  v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5468  v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5469  v->mv_f_last_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5470  v->mv_f_last[0] = v->mv_f_last_base + s->b8_stride + 1;
5471  v->mv_f_last[1] = v->mv_f_last[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5472  v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5473  v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5474  v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5475 
5476  /* Init coded blocks info */
5477  if (v->profile == PROFILE_ADVANCED) {
5478 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5479 // return -1;
5480 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5481 // return -1;
5482  }
5483 
5484  ff_intrax8_common_init(&v->x8,s);
5485 
5487  for (i = 0; i < 4; i++)
5488  if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5489  }
5490 
5491  if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5492  !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5493  !v->mb_type_base)
5494  return -1;
5495 
5496  return 0;
5497 }
5498 
5500 {
5501  int i;
5502  for (i = 0; i < 64; i++) {
5503 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5504  v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5505  v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5506  v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5507  v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5509  }
5510  v->left_blk_sh = 0;
5511  v->top_blk_sh = 3;
5512 }
5513 
5514 /** Initialize a VC1/WMV3 decoder
5515  * @todo TODO: Handle VC-1 IDUs (Transport level?)
5516  * @todo TODO: Decypher remaining bits in extra_data
5517  */
5519 {
5520  VC1Context *v = avctx->priv_data;
5521  MpegEncContext *s = &v->s;
5522  GetBitContext gb;
5523 
5524  /* save the container output size for WMImage */
5525  v->output_width = avctx->width;
5526  v->output_height = avctx->height;
5527 
5528  if (!avctx->extradata_size || !avctx->extradata)
5529  return -1;
5530  if (!(avctx->flags & CODEC_FLAG_GRAY))
5531  avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5532  else
5533  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5534  avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5535  v->s.avctx = avctx;
5536  avctx->flags |= CODEC_FLAG_EMU_EDGE;
5537  v->s.flags |= CODEC_FLAG_EMU_EDGE;
5538 
5539  if (ff_vc1_init_common(v) < 0)
5540  return -1;
5541  // ensure static VLC tables are initialized
5542  if (ff_msmpeg4_decode_init(avctx) < 0)
5543  return -1;
5545  return -1;
5546  // Hack to ensure the above functions will be called
5547  // again once we know all necessary settings.
5548  // That this is necessary might indicate a bug.
5549  ff_vc1_decode_end(avctx);
5550 
5552  ff_vc1dsp_init(&v->vc1dsp);
5553 
5554  if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5555  int count = 0;
5556 
5557  // looks like WMV3 has a sequence header stored in the extradata
5558  // advanced sequence header may be before the first frame
5559  // the last byte of the extradata is a version number, 1 for the
5560  // samples we can decode
5561 
5562  init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5563 
5564  if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0)
5565  return -1;
5566 
5567  count = avctx->extradata_size*8 - get_bits_count(&gb);
5568  if (count > 0) {
5569  av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5570  count, get_bits(&gb, count));
5571  } else if (count < 0) {
5572  av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5573  }
5574  } else { // VC1/WVC1/WVP2
5575  const uint8_t *start = avctx->extradata;
5576  uint8_t *end = avctx->extradata + avctx->extradata_size;
5577  const uint8_t *next;
5578  int size, buf2_size;
5579  uint8_t *buf2 = NULL;
5580  int seq_initialized = 0, ep_initialized = 0;
5581 
5582  if (avctx->extradata_size < 16) {
5583  av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5584  return -1;
5585  }
5586 
5588  start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5589  next = start;
5590  for (; next < end; start = next) {
5591  next = find_next_marker(start + 4, end);
5592  size = next - start - 4;
5593  if (size <= 0)
5594  continue;
5595  buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5596  init_get_bits(&gb, buf2, buf2_size * 8);
5597  switch (AV_RB32(start)) {
5598  case VC1_CODE_SEQHDR:
5599  if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5600  av_free(buf2);
5601  return -1;
5602  }
5603  seq_initialized = 1;
5604  break;
5605  case VC1_CODE_ENTRYPOINT:
5606  if (ff_vc1_decode_entry_point(avctx, v, &gb) < 0) {
5607  av_free(buf2);
5608  return -1;
5609  }
5610  ep_initialized = 1;
5611  break;
5612  }
5613  }
5614  av_free(buf2);
5615  if (!seq_initialized || !ep_initialized) {
5616  av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5617  return -1;
5618  }
5619  v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5620  }
5621 
5622  avctx->profile = v->profile;
5623  if (v->profile == PROFILE_ADVANCED)
5624  avctx->level = v->level;
5625 
5626  avctx->has_b_frames = !!avctx->max_b_frames;
5627 
5628  s->mb_width = (avctx->coded_width + 15) >> 4;
5629  s->mb_height = (avctx->coded_height + 15) >> 4;
5630 
5631  if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5633  } else {
5634  memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5635  v->left_blk_sh = 3;
5636  v->top_blk_sh = 0;
5637  }
5638 
5639  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5640  v->sprite_width = avctx->coded_width;
5641  v->sprite_height = avctx->coded_height;
5642 
5643  avctx->coded_width = avctx->width = v->output_width;
5644  avctx->coded_height = avctx->height = v->output_height;
5645 
5646  // prevent 16.16 overflows
5647  if (v->sprite_width > 1 << 14 ||
5648  v->sprite_height > 1 << 14 ||
5649  v->output_width > 1 << 14 ||
5650  v->output_height > 1 << 14) return -1;
5651 
5652  if ((v->sprite_width&1) || (v->sprite_height&1)) {
5653  avpriv_request_sample(avctx, "odd sprites support");
5654  return AVERROR_PATCHWELCOME;
5655  }
5656  }
5657  return 0;
5658 }
5659 
5660 /** Close a VC1/WMV3 decoder
5661  * @warning Initial try at using MpegEncContext stuff
5662  */
5664 {
5665  VC1Context *v = avctx->priv_data;
5666  int i;
5667 
5669 
5670  for (i = 0; i < 4; i++)
5671  av_freep(&v->sr_rows[i >> 1][i & 1]);
5672  av_freep(&v->hrd_rate);
5673  av_freep(&v->hrd_buffer);
5674  ff_MPV_common_end(&v->s);
5678  av_freep(&v->fieldtx_plane);
5679  av_freep(&v->acpred_plane);
5681  av_freep(&v->mb_type_base);
5683  av_freep(&v->mv_f_base);
5684  av_freep(&v->mv_f_last_base);
5685  av_freep(&v->mv_f_next_base);
5686  av_freep(&v->block);
5687  av_freep(&v->cbp_base);
5688  av_freep(&v->ttblk_base);
5689  av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5690  av_freep(&v->luma_mv_base);
5692  return 0;
5693 }
5694 
5695 
5696 /** Decode a VC1/WMV3 frame
5697  * @todo TODO: Handle VC-1 IDUs (Transport level?)
5698  */
5699 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5700  int *got_frame, AVPacket *avpkt)
5701 {
5702  const uint8_t *buf = avpkt->data;
5703  int buf_size = avpkt->size, n_slices = 0, i, ret;
5704  VC1Context *v = avctx->priv_data;
5705  MpegEncContext *s = &v->s;
5706  AVFrame *pict = data;
5707  uint8_t *buf2 = NULL;
5708  const uint8_t *buf_start = buf, *buf_start_second_field = NULL;
5709  int mb_height, n_slices1=-1;
5710  struct {
5711  uint8_t *buf;
5712  GetBitContext gb;
5713  int mby_start;
5714  } *slices = NULL, *tmp;
5715 
5716  v->second_field = 0;
5717 
5718  if(s->flags & CODEC_FLAG_LOW_DELAY)
5719  s->low_delay = 1;
5720 
5721  /* no supplementary picture */
5722  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5723  /* special case for last picture */
5724  if (s->low_delay == 0 && s->next_picture_ptr) {
5725  if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
5726  return ret;
5727  s->next_picture_ptr = NULL;
5728 
5729  *got_frame = 1;
5730  }
5731 
5732  return buf_size;
5733  }
5734 
5736  if (v->profile < PROFILE_ADVANCED)
5737  avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
5738  else
5739  avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
5740  }
5741 
5742  //for advanced profile we may need to parse and unescape data
5743  if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5744  int buf_size2 = 0;
5745  buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5746 
5747  if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5748  const uint8_t *start, *end, *next;
5749  int size;
5750 
5751  next = buf;
5752  for (start = buf, end = buf + buf_size; next < end; start = next) {
5753  next = find_next_marker(start + 4, end);
5754  size = next - start - 4;
5755  if (size <= 0) continue;
5756  switch (AV_RB32(start)) {
5757  case VC1_CODE_FRAME:
5758  if (avctx->hwaccel ||
5760  buf_start = start;
5761  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5762  break;
5763  case VC1_CODE_FIELD: {
5764  int buf_size3;
5765  if (avctx->hwaccel ||
5767  buf_start_second_field = start;
5768  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5769  if (!tmp)
5770  goto err;
5771  slices = tmp;
5772  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5773  if (!slices[n_slices].buf)
5774  goto err;
5775  buf_size3 = vc1_unescape_buffer(start + 4, size,
5776  slices[n_slices].buf);
5777  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5778  buf_size3 << 3);
5779  /* assuming that the field marker is at the exact middle,
5780  hope it's correct */
5781  slices[n_slices].mby_start = s->mb_height >> 1;
5782  n_slices1 = n_slices - 1; // index of the last slice of the first field
5783  n_slices++;
5784  break;
5785  }
5786  case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5787  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5788  init_get_bits(&s->gb, buf2, buf_size2 * 8);
5789  ff_vc1_decode_entry_point(avctx, v, &s->gb);
5790  break;
5791  case VC1_CODE_SLICE: {
5792  int buf_size3;
5793  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5794  if (!tmp)
5795  goto err;
5796  slices = tmp;
5797  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5798  if (!slices[n_slices].buf)
5799  goto err;
5800  buf_size3 = vc1_unescape_buffer(start + 4, size,
5801  slices[n_slices].buf);
5802  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5803  buf_size3 << 3);
5804  slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5805  n_slices++;
5806  break;
5807  }
5808  }
5809  }
5810  } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5811  const uint8_t *divider;
5812  int buf_size3;
5813 
5814  divider = find_next_marker(buf, buf + buf_size);
5815  if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5816  av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5817  goto err;
5818  } else { // found field marker, unescape second field
5819  if (avctx->hwaccel ||
5821  buf_start_second_field = divider;
5822  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5823  if (!tmp)
5824  goto err;
5825  slices = tmp;
5826  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5827  if (!slices[n_slices].buf)
5828  goto err;
5829  buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5830  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5831  buf_size3 << 3);
5832  slices[n_slices].mby_start = s->mb_height >> 1;
5833  n_slices1 = n_slices - 1;
5834  n_slices++;
5835  }
5836  buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5837  } else {
5838  buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5839  }
5840  init_get_bits(&s->gb, buf2, buf_size2*8);
5841  } else
5842  init_get_bits(&s->gb, buf, buf_size*8);
5843 
5844  if (v->res_sprite) {
5845  v->new_sprite = !get_bits1(&s->gb);
5846  v->two_sprites = get_bits1(&s->gb);
5847  /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5848  we're using the sprite compositor. These are intentionally kept separate
5849  so you can get the raw sprites by using the wmv3 decoder for WMVP or
5850  the vc1 one for WVP2 */
5851  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5852  if (v->new_sprite) {
5853  // switch AVCodecContext parameters to those of the sprites
5854  avctx->width = avctx->coded_width = v->sprite_width;
5855  avctx->height = avctx->coded_height = v->sprite_height;
5856  } else {
5857  goto image;
5858  }
5859  }
5860  }
5861 
5862  if (s->context_initialized &&
5863  (s->width != avctx->coded_width ||
5864  s->height != avctx->coded_height)) {
5865  ff_vc1_decode_end(avctx);
5866  }
5867 
5868  if (!s->context_initialized) {
5870  goto err;
5871 
5872  s->low_delay = !avctx->has_b_frames || v->res_sprite;
5873 
5874  if (v->profile == PROFILE_ADVANCED) {
5875  if(avctx->coded_width<=1 || avctx->coded_height<=1)
5876  goto err;
5877  s->h_edge_pos = avctx->coded_width;
5878  s->v_edge_pos = avctx->coded_height;
5879  }
5880  }
5881 
5882  /* We need to set current_picture_ptr before reading the header,
5883  * otherwise we cannot store anything in there. */
5884  if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5885  int i = ff_find_unused_picture(s, 0);
5886  if (i < 0)
5887  goto err;
5888  s->current_picture_ptr = &s->picture[i];
5889  }
5890 
5891  // do parse frame header
5892  v->pic_header_flag = 0;
5893  v->first_pic_header_flag = 1;
5894  if (v->profile < PROFILE_ADVANCED) {
5895  if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
5896  goto err;
5897  }
5898  } else {
5899  if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5900  goto err;
5901  }
5902  }
5903  v->first_pic_header_flag = 0;
5904 
5905  if (avctx->debug & FF_DEBUG_PICT_INFO)
5906  av_log(v->s.avctx, AV_LOG_DEBUG, "pict_type: %c\n", av_get_picture_type_char(s->pict_type));
5907 
5908  if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5909  && s->pict_type != AV_PICTURE_TYPE_I) {
5910  av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5911  goto err;
5912  }
5913 
5914  if ((s->mb_height >> v->field_mode) == 0) {
5915  av_log(v->s.avctx, AV_LOG_ERROR, "image too short\n");
5916  goto err;
5917  }
5918 
5919  // process pulldown flags
5921  // Pulldown flags are only valid when 'broadcast' has been set.
5922  // So ticks_per_frame will be 2
5923  if (v->rff) {
5924  // repeat field
5926  } else if (v->rptfrm) {
5927  // repeat frames
5928  s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5929  }
5930 
5931  // for skipping the frame
5934 
5935  /* skip B-frames if we don't have reference frames */
5936  if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
5937  goto err;
5938  }
5939  if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5940  (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5941  avctx->skip_frame >= AVDISCARD_ALL) {
5942  goto end;
5943  }
5944 
5945  if (s->next_p_frame_damaged) {
5946  if (s->pict_type == AV_PICTURE_TYPE_B)
5947  goto end;
5948  else
5949  s->next_p_frame_damaged = 0;
5950  }
5951 
5952  if (ff_MPV_frame_start(s, avctx) < 0) {
5953  goto err;
5954  }
5955 
5958 
5961 
5964  ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
5965  else if (avctx->hwaccel) {
5966  if (v->field_mode && buf_start_second_field) {
5967  // decode first field
5969  if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0)
5970  goto err;
5971  if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0)
5972  goto err;
5973  if (avctx->hwaccel->end_frame(avctx) < 0)
5974  goto err;
5975 
5976  // decode second field
5977  s->gb = slices[n_slices1 + 1].gb;
5979  v->second_field = 1;
5980  v->pic_header_flag = 0;
5981  if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5982  av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed");
5983  goto err;
5984  }
5986 
5987  if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
5988  goto err;
5989  if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
5990  goto err;
5991  if (avctx->hwaccel->end_frame(avctx) < 0)
5992  goto err;
5993  } else {
5995  if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5996  goto err;
5997  if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5998  goto err;
5999  if (avctx->hwaccel->end_frame(avctx) < 0)
6000  goto err;
6001  }
6002  } else {
6003  int header_ret = 0;
6004 
6005 
6007 
6008  v->bits = buf_size * 8;
6009  v->end_mb_x = s->mb_width;
6010  if (v->field_mode) {
6011  uint8_t *tmp[2];
6012  s->current_picture.f.linesize[0] <<= 1;
6013  s->current_picture.f.linesize[1] <<= 1;
6014  s->current_picture.f.linesize[2] <<= 1;
6015  s->linesize <<= 1;
6016  s->uvlinesize <<= 1;
6017  tmp[0] = v->mv_f_last[0];
6018  tmp[1] = v->mv_f_last[1];
6019  v->mv_f_last[0] = v->mv_f_next[0];
6020  v->mv_f_last[1] = v->mv_f_next[1];
6021  v->mv_f_next[0] = v->mv_f[0];
6022  v->mv_f_next[1] = v->mv_f[1];
6023  v->mv_f[0] = tmp[0];
6024  v->mv_f[1] = tmp[1];
6025  }
6026  mb_height = s->mb_height >> v->field_mode;
6027  for (i = 0; i <= n_slices; i++) {
6028  if (i > 0 && slices[i - 1].mby_start >= mb_height) {
6029  if (v->field_mode <= 0) {
6030  av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
6031  "picture boundary (%d >= %d)\n", i,
6032  slices[i - 1].mby_start, mb_height);
6033  continue;
6034  }
6035  v->second_field = 1;
6036  v->blocks_off = s->b8_stride * (s->mb_height&~1);
6037  v->mb_off = s->mb_stride * s->mb_height >> 1;
6038  } else {
6039  v->second_field = 0;
6040  v->blocks_off = 0;
6041  v->mb_off = 0;
6042  }
6043  if (i) {
6044  v->pic_header_flag = 0;
6045  if (v->field_mode && i == n_slices1 + 2) {
6046  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6047  av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
6048  continue;
6049  }
6050  } else if (get_bits1(&s->gb)) {
6051  v->pic_header_flag = 1;
6052  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6053  av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
6054  continue;
6055  }
6056  }
6057  }
6058  if (header_ret < 0)
6059  continue;
6060  s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
6061  if (!v->field_mode || v->second_field)
6062  s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6063  else {
6064  if (i >= n_slices) {
6065  av_log(v->s.avctx, AV_LOG_ERROR, "first field slice count too large\n");
6066  continue;
6067  }
6068  s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6069  }
6070  if (s->end_mb_y <= s->start_mb_y) {
6071  av_log(v->s.avctx, AV_LOG_ERROR, "end mb y %d %d invalid\n", s->end_mb_y, s->start_mb_y);
6072  continue;
6073  }
6074  if (!v->p_frame_skipped && s->pict_type != AV_PICTURE_TYPE_I && !v->cbpcy_vlc) {
6075  av_log(v->s.avctx, AV_LOG_ERROR, "missing cbpcy_vlc\n");
6076  continue;
6077  }
6079  if (i != n_slices)
6080  s->gb = slices[i].gb;
6081  }
6082  if (v->field_mode) {
6083  v->second_field = 0;
6084  if (s->pict_type == AV_PICTURE_TYPE_B) {
6085  memcpy(v->mv_f_base, v->mv_f_next_base,
6086  2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
6087  }
6088  s->current_picture.f.linesize[0] >>= 1;
6089  s->current_picture.f.linesize[1] >>= 1;
6090  s->current_picture.f.linesize[2] >>= 1;
6091  s->linesize >>= 1;
6092  s->uvlinesize >>= 1;
6093  }
6094  av_dlog(s->avctx, "Consumed %i/%i bits\n",
6095  get_bits_count(&s->gb), s->gb.size_in_bits);
6096 // if (get_bits_count(&s->gb) > buf_size * 8)
6097 // return -1;
6099  goto err;
6100  if(!v->field_mode)
6101  ff_er_frame_end(&s->er);
6102  }
6103 
6104  ff_MPV_frame_end(s);
6105 
6106  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
6107 image:
6108  avctx->width = avctx->coded_width = v->output_width;
6109  avctx->height = avctx->coded_height = v->output_height;
6110  if (avctx->skip_frame >= AVDISCARD_NONREF)
6111  goto end;
6112 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
6113  if (vc1_decode_sprites(v, &s->gb))
6114  goto err;
6115 #endif
6116  if ((ret = av_frame_ref(pict, &v->sprite_output_frame)) < 0)
6117  goto err;
6118  *got_frame = 1;
6119  } else {
6120  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
6121  if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
6122  goto err;
6124  } else if (s->last_picture_ptr != NULL) {
6125  if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
6126  goto err;
6128  }
6129  if (s->last_picture_ptr || s->low_delay) {
6130  *got_frame = 1;
6131  }
6132  }
6133 
6134 end:
6135  av_free(buf2);
6136  for (i = 0; i < n_slices; i++)
6137  av_free(slices[i].buf);
6138  av_free(slices);
6139  return buf_size;
6140 
6141 err:
6142  av_free(buf2);
6143  for (i = 0; i < n_slices; i++)
6144  av_free(slices[i].buf);
6145  av_free(slices);
6146  return -1;
6147 }
6148 
6149 
6150 static const AVProfile profiles[] = {
6151  { FF_PROFILE_VC1_SIMPLE, "Simple" },
6152  { FF_PROFILE_VC1_MAIN, "Main" },
6153  { FF_PROFILE_VC1_COMPLEX, "Complex" },
6154  { FF_PROFILE_VC1_ADVANCED, "Advanced" },
6155  { FF_PROFILE_UNKNOWN },
6156 };
6157 
6159 #if CONFIG_DXVA2
6161 #endif
6162 #if CONFIG_VAAPI
6164 #endif
6165 #if CONFIG_VDPAU
6167 #endif
6170 };
6171 
6173  .name = "vc1",
6174  .type = AVMEDIA_TYPE_VIDEO,
6175  .id = AV_CODEC_ID_VC1,
6176  .priv_data_size = sizeof(VC1Context),
6177  .init = vc1_decode_init,
6180  .flush = ff_mpeg_flush,
6181  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6182  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
6183  .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6184  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6185 };
6186 
6187 #if CONFIG_WMV3_DECODER
6188 AVCodec ff_wmv3_decoder = {
6189  .name = "wmv3",
6190  .type = AVMEDIA_TYPE_VIDEO,
6191  .id = AV_CODEC_ID_WMV3,
6192  .priv_data_size = sizeof(VC1Context),
6193  .init = vc1_decode_init,
6196  .flush = ff_mpeg_flush,
6197  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6198  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
6199  .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6200  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6201 };
6202 #endif
6203 
6204 #if CONFIG_WMV3_VDPAU_DECODER
6205 AVCodec ff_wmv3_vdpau_decoder = {
6206  .name = "wmv3_vdpau",
6207  .type = AVMEDIA_TYPE_VIDEO,
6208  .id = AV_CODEC_ID_WMV3,
6209  .priv_data_size = sizeof(VC1Context),
6210  .init = vc1_decode_init,
6214  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
6215  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE },
6216  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6217 };
6218 #endif
6219 
6220 #if CONFIG_VC1_VDPAU_DECODER
6221 AVCodec ff_vc1_vdpau_decoder = {
6222  .name = "vc1_vdpau",
6223  .type = AVMEDIA_TYPE_VIDEO,
6224  .id = AV_CODEC_ID_VC1,
6225  .priv_data_size = sizeof(VC1Context),
6226  .init = vc1_decode_init,
6230  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
6231  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE },
6232  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6233 };
6234 #endif
6235 
6236 #if CONFIG_WMV3IMAGE_DECODER
6237 AVCodec ff_wmv3image_decoder = {
6238  .name = "wmv3image",
6239  .type = AVMEDIA_TYPE_VIDEO,
6240  .id = AV_CODEC_ID_WMV3IMAGE,
6241  .priv_data_size = sizeof(VC1Context),
6242  .init = vc1_decode_init,
6245  .capabilities = CODEC_CAP_DR1,
6246  .flush = vc1_sprite_flush,
6247  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
6248  .pix_fmts = ff_pixfmt_list_420
6249 };
6250 #endif
6251 
6252 #if CONFIG_VC1IMAGE_DECODER
6253 AVCodec ff_vc1image_decoder = {
6254  .name = "vc1image",
6255  .type = AVMEDIA_TYPE_VIDEO,
6256  .id = AV_CODEC_ID_VC1IMAGE,
6257  .priv_data_size = sizeof(VC1Context),
6258  .init = vc1_decode_init,
6261  .capabilities = CODEC_CAP_DR1,
6262  .flush = vc1_sprite_flush,
6263  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
6264  .pix_fmts = ff_pixfmt_list_420
6265 };
6266 #endif
static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
Definition: vc1dec.c:265
#define PICT_BOTTOM_FIELD
Definition: mpegvideo.h:663
in the bitstream is reported as 00b
Definition: vc1.h:173
const int ff_vc1_ttblk_to_tt[3][8]
Table for conversion between TTBLK and TTMB.
Definition: vc1data.c:34
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: start.py:1
int use_ic
use intensity compensation in B-frames
Definition: vc1.h:300
#define VC1_TTBLK_VLC_BITS
Definition: vc1data.c:126
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
const struct AVCodec * codec
int topleft_blk_idx
Definition: vc1.h:390
#define MB_TYPE_SKIP
#define PICT_TOP_FIELD
Definition: mpegvideo.h:662
static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n, int coded, int codingset)
Decode intra block in intra frames - should be faster than decode_intra_block.
Definition: vc1dec.c:2570
discard all frames except keyframes
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:3005
#define VC1_IF_MBMODE_VLC_BITS
Definition: vc1data.c:145
float v
int p_frame_skipped
Definition: vc1.h:385
Imode
Imode types.
Definition: vc1.c:53
const char * s
Definition: avisynth_c.h:668
static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n, int mquant, int ttmb, int first_block, uint8_t *dst, int linesize, int skip_block, int *ttmb_out)
Decode P block.
Definition: vc1dec.c:3155
void(* vc1_h_s_overlap)(int16_t *left, int16_t *right)
Definition: vc1dsp.h:50
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
uint8_t luty[256]
Definition: vc1.h:299
void(* vc1_inv_trans_4x8)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:41
The VC1 Context.
Definition: vc1.h:182
int esc3_level_length
Definition: mpegvideo.h:641
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
VLC ff_vc1_ttblk_vlc[3]
Definition: vc1data.c:127
#define VC1_ICBPCY_VLC_BITS
Definition: vc1data.c:120
static int vc1_decode_p_mb(VC1Context *v)
Decode one P-frame MB.
Definition: vc1dec.c:3468
int k_x
Number of bits for MVs (depends on MV range)
Definition: vc1.h:241
int reffield
if numref = 0 (1 reference) then reffield decides which
Definition: vc1.h:363
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:316
void(* sprite_v_double_onescale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, int alpha, int width)
Definition: vc1dsp.h:72
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:351
uint8_t * mv_f_base
Definition: vc1.h:354
#define C
int coded_width
Bitstream width / height, may be different from width/height e.g.
int mv_type_is_raw
mv type mb plane is not coded
Definition: vc1.h:295
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
#define B
Definition: dsputil.c:2025
static av_always_inline int scaleforsame(VC1Context *v, int i, int n, int dim, int dir)
Definition: vc1dec.c:1333
uint8_t dmvrange
Frame decoding info for interlaced picture.
Definition: vc1.h:338
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
#define ER_MB_END
#define AC_VLC_BITS
Definition: intrax8.c:37
static av_always_inline int scaleforopp_y(VC1Context *v, int n, int dir)
Definition: vc1dec.c:1302
static const uint8_t vc1_index_decode_table[AC_MODES][185][2]
Definition: vc1acdata.h:34
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:317
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
void(* vc1_v_loop_filter16)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:55
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
int16_t(*[3] ac_val)[16]
used for for mpeg4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:357
static const int vc1_last_decode_table[AC_MODES]
Definition: vc1acdata.h:30
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:154
int tt_index
Index for Transform Type tables (to decode TTMB)
Definition: vc1.h:291
static void vc1_decode_p_blocks(VC1Context *v)
Definition: vc1dec.c:4965
static void vc1_put_signed_blocks_clamped(VC1Context *v)
Definition: vc1dec.c:91
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:281
#define GET_MVDATA(_dmv_x, _dmv_y)
Get MV differentials.
Definition: vc1dec.c:1097
#define VC1_2REF_MVDATA_VLC_BITS
Definition: vc1data.c:140
void ff_er_frame_end(ERContext *s)
uint8_t * mv_f_last_base
Definition: vc1.h:355
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:265
uint8_t rangeredfrm
Frame decoding info for S/M profiles only.
Definition: vc1.h:305
void(* vc1_v_overlap)(uint8_t *src, int stride)
Definition: vc1dsp.h:47
HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the b...
Definition: pixfmt.h:126
Sinusoidal phase f
void(* sprite_v_single)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width)
Definition: vc1dsp.h:70
int ff_msmpeg4_decode_init(AVCodecContext *avctx)
Definition: msmpeg4dec.c:286
void(* vc1_h_loop_filter16)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:56
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int frfd
Definition: vc1.h:372
uint8_t zz_8x8[4][64]
Zigzag table for TT_8x8, permuted for IDCT.
Definition: vc1.h:245
static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n, int coded, int mquant, int codingset)
Decode intra block in inter frames - more generic version than vc1_decode_i_block.
Definition: vc1dec.c:2945
static void vc1_decode_b_blocks(VC1Context *v)
Definition: vc1dec.c:5040
#define wrap(func)
Definition: w64xmmtest.h:70
mpegvideo header.
void * av_realloc(void *ptr, size_t size)
Allocate or reallocate a block of memory.
Definition: mem.c:141
int top_blk_idx
Definition: vc1.h:390
IntraX8Context x8
Definition: vc1.h:184
VLC * imv_vlc
Definition: vc1.h:345
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
uint8_t * mb_type_base
Definition: vc1.h:270
uint8_t * mv_f[2]
0: MV obtained from same field, 1: opposite field
Definition: vc1.h:354
int sprite_height
Definition: vc1.h:381
uint8_t run
Definition: svq3.c:136
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
Definition: vc1.h:228
int end_mb_x
Horizontal macroblock limit (used only by mss2)
Definition: vc1.h:398
struct VC1Context VC1Context
The VC1 Context.
void(* clear_block)(int16_t *block)
Definition: dsputil.h:145
void(* vc1_inv_trans_8x4_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:44
void(* vc1_v_loop_filter4)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:51
void ff_vc1_decode_blocks(VC1Context *v)
Definition: vc1dec.c:5124
vc1op_pixels_func put_vc1_mspel_pixels_tab[16]
Definition: vc1dsp.h:61
int block_wrap[6]
Definition: mpegvideo.h:466
uint8_t rff
Definition: vc1.h:314
static void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
Definition: vc1dec.c:2260
const uint8_t ff_vc1_adv_interlaced_4x8_zz[32]
Definition: vc1data.c:1065
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
static int vc1_coded_block_pred(MpegEncContext *s, int n, uint8_t **coded_block_ptr)
Definition: vc1dec.c:2468
set threshold d
initialize output if(nPeaks >3)%at least 3 peaks in spectrum for trying to find f0 nf0peaks
enum AVDiscard skip_frame
Skip decoding for selected frames.
int bits
Definition: vc1.h:188
int range_x
Definition: vc1.h:243
#define VC1_4MV_BLOCK_PATTERN_VLC_BITS
Definition: vc1data.c:122
static void vc1_apply_p_loop_filter(VC1Context *v)
Definition: vc1dec.c:3441
const uint16_t ff_vc1_b_field_mvpred_scales[7][4]
Definition: vc1data.c:1121
struct AVHWAccel * hwaccel
Hardware accelerator in use.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2996
int esc3_run_length
Definition: mpegvideo.h:642
int refdist
distance of the current picture from reference
Definition: vc1.h:360
uint8_t * acpred_plane
AC prediction flags bitplane.
Definition: vc1.h:324
VC-1 tables.
int bi_type
Definition: vc1.h:386
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define MB_TYPE_INTRA
Definition: mpegvideo.h:134
static const AVProfile profiles[]
Definition: vc1dec.c:6150
uint8_t bits
Definition: crc.c:216
uint8_t
static int vc1_decode_b_mb_intfr(VC1Context *v)
Decode one B-frame MB (in interlaced frame B picture)
Definition: vc1dec.c:4342
#define av_cold
Definition: attributes.h:78
int first_pic_header_flag
Definition: vc1.h:373
uint16_t * hrd_rate
Definition: vc1.h:329
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:63
mode
Definition: f_perms.c:27
#define PICT_FRAME
Definition: mpegvideo.h:664
#define Y
Definition: vf_boxblur.c:76
#define DC_VLC_BITS
Definition: vc1dec.c:48
int left_blk_idx
Definition: vc1.h:390
static void vc1_mc_4mv_chroma4(VC1Context *v)
Do motion compensation for 4-MV field chroma macroblock (both U and V)
Definition: vc1dec.c:959
#define AV_RB32
int interlace
Progressive/interlaced (RPTFTM syntax element)
Definition: vc1.h:210
int y_ac_table_index
Luma index from AC2FRM element.
Definition: vc1.h:261
#define FF_PROFILE_VC1_ADVANCED
#define b
Definition: input.c:42
end end
int second_field
Definition: vc1.h:359
#define CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
#define ER_MB_ERROR
int n_allocated_blks
Definition: vc1.h:390
qpel_mc_func(* qpel_put)[16]
Definition: mpegvideo.h:228
int c_ac_table_index
AC coding set indexes.
Definition: vc1.h:260
const int ff_vc1_ac_sizes[AC_MODES]
Definition: vc1data.c:1133
#define FF_PROFILE_UNKNOWN
void(* vc1_inv_trans_8x8)(int16_t *b)
Definition: vc1dsp.h:39
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int ttfrm
Transform type info present at frame level.
Definition: vc1.h:263
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:343
uint8_t lutuv[256]
lookup tables used for intensity compensation
Definition: vc1.h:299
int codingset2
index of current table set from 11.8 to use for chroma block decoding
Definition: vc1.h:267
int16_t bfraction
Relative position % anchors=> how to scale MVs.
Definition: vc1.h:278
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
int16_t((* luma_mv)[2]
Definition: vc1.h:393
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags...
Definition: vc1.h:224
static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
Definition: vc1dec.c:197
MSMPEG4 data tables.
uint8_t * data
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:193
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
static av_always_inline int scaleforsame_x(VC1Context *v, int n, int dir)
Definition: vc1dec.c:1205
uint8_t * forward_mb_plane
bitplane for "forward" MBs
Definition: vc1.h:294
WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstrea...
Definition: pixfmt.h:109
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:270
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:277
enum AVPixelFormat ff_pixfmt_list_420[]
Definition: mpegvideo.c:127
int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
Decode Simple/Main Profiles sequence header.
Definition: vc1.c:293
static void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
Reconstruct motion vector for B-frame and do motion compensation.
Definition: vc1dec.c:2018
void ff_MPV_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1717
int fieldtx_is_raw
Definition: vc1.h:351
uint8_t * over_flags_plane
Overflags bitplane.
Definition: vc1.h:326
static void vc1_decode_b_mb(VC1Context *v)
Decode one B-frame MB (in Main profile)
Definition: vc1dec.c:4037
uint8_t fourmvbp
Definition: vc1.h:349
#define A(x)
void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:38
int range_y
MV range.
Definition: vc1.h:243
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:861
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: dsputil.h:189
uint8_t ttmbf
Transform type flag.
Definition: vc1.h:264
Definition: vc1.h:143
int k_y
Number of bits for MVs (depends on MV range)
Definition: vc1.h:242
void(* vc1_h_loop_filter8)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:54
#define transpose(x)
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:557
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:350
enum AVCodecID id
uint8_t twomvbp
Definition: vc1.h:348
int dmb_is_raw
direct mb plane is raw
Definition: vc1.h:296
const uint8_t ff_vc1_simple_progressive_4x4_zz[16]
Definition: vc1data.c:1022
static int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int16_t **dc_val_ptr, int *dir_ptr)
Get predicted DC value for I-frames only prediction dir: left=0, top=1.
Definition: vc1dec.c:2326
int16_t(* block)[6][64]
Definition: vc1.h:389
static double alpha(void *priv, double x, double y)
Definition: vf_geq.c:86
#define VC1_CBPCY_P_VLC_BITS
Definition: vc1data.c:118
void(* vc1_inv_trans_4x4)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:42
int has_b_frames
Size of the frame reordering buffer in the decoder.
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:183
int overlap
overlapped transforms in use
Definition: vc1.h:232
in the bitstream is reported as 11b
Definition: vc1.h:175
#define FF_PROFILE_VC1_COMPLEX
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: dsputil.h:190
static void vc1_mc_1mv(VC1Context *v, int dir)
Do motion compensation over 1 macroblock Mostly adapted hpel_motion and qpel_motion from mpegvideo...
Definition: vc1dec.c:345
static const struct endianess table[]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
ERContext er
Definition: mpegvideo.h:742
static av_cold int vc1_decode_init(AVCodecContext *avctx)
Initialize a VC1/WMV3 decoder.
Definition: vc1dec.c:5518
Spectrum Plot time data
#define GET_MQUANT()
Get macroblock-level quantizer scale.
Definition: vc1dec.c:1055
AVFrame sprite_output_frame
Definition: vc1.h:380
int capabilities
Codec capabilities.
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:364
#define t1
Definition: regdef.h:29
uint8_t * mv_f_next_base
Definition: vc1.h:356
int flags
CODEC_FLAG_*.
VLC * mbmode_vlc
Definition: vc1.h:344
void(* clear_blocks)(int16_t *blocks)
Definition: dsputil.h:146
simple assert() macros that are a bit more flexible than ISO C assert().
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
const char * name
Name of the codec implementation.
void(* vc1_v_loop_filter8)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:53
#define IS_MARKER(state, i, buf, buf_size)
Definition: dca_parser.c:37
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:579
int low_delay
no reordering needed / has no b-frames
Definition: mpegvideo.h:592
GetBitContext gb
Definition: mpegvideo.h:649
#define FFMAX(a, b)
Definition: common.h:56
external API header
static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n, int dir)
Definition: vc1dec.c:1238
void(* vc1_inv_trans_8x8_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:43
#define FF_PROFILE_VC1_MAIN
static void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, int mvn, int r_x, int r_y, uint8_t *is_intra, int dir)
Predict and set motion vector for interlaced frame picture MBs.
Definition: vc1dec.c:1634
const uint8_t * zz_8x4
Zigzag scan table for TT_8x4 coding mode.
Definition: vc1.h:247
int res_rtm_flag
reserved, set to 1
Definition: vc1.h:200
#define CODEC_FLAG_LOW_DELAY
Force low delay.
int size
int a_avail
Definition: vc1.h:269
uint8_t * blk_mv_type
0: frame MV, 1: field MV (interlaced frame)
Definition: vc1.h:353
static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
Decode one AC coefficient.
Definition: vc1dec.c:2504
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:3066
const uint8_t ff_vc1_adv_interlaced_8x4_zz[32]
Definition: vc1data.c:1058
int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext *gb)
Definition: vc1.c:830
#define B_FRACTION_DEN
Definition: vc1data.h:99
const uint8_t ff_vc1_adv_interlaced_8x8_zz[64]
Definition: vc1data.c:1047
VLC ff_vc1_ttmb_vlc[3]
Definition: vc1data.c:115
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
static av_always_inline int scaleforopp(VC1Context *v, int n, int dim, int dir)
Definition: vc1dec.c:1354
int cur_field_type
0: top, 1: bottom
Definition: vc1.h:367
const uint8_t ff_wmv1_scantable[WMV1_SCANTABLE_COUNT][64]
Definition: msmpeg4data.c:1831
VLC * twomvbp_vlc
Definition: vc1.h:346
const uint8_t * zz_4x8
Zigzag scan table for TT_4x8 coding mode.
Definition: vc1.h:248
AVCodec ff_vc1_decoder
Definition: vc1dec.c:6172
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
static av_always_inline int scaleforopp_x(VC1Context *v, int n)
Definition: vc1dec.c:1275
void ff_mpeg_er_frame_start(MpegEncContext *s)
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
Close a VC1/WMV3 decoder.
Definition: vc1dec.c:5663
int x8_type
Definition: vc1.h:387
#define FFMIN(a, b)
Definition: common.h:58
av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
Definition: vc1dec.c:5499
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed b frames
Definition: mpegvideo.h:531
uint8_t * blk_mv_type_base
Definition: vc1.h:353
av_cold void ff_intrax8_common_init(IntraX8Context *w, MpegEncContext *const s)
Initialize IntraX8 frame decoder.
Definition: intrax8.c:691
int field_mode
1 for interlaced field pictures
Definition: vc1.h:357
av_cold void ff_intrax8_common_end(IntraX8Context *w)
Destroy IntraX8 frame structure.
Definition: intrax8.c:709
ret
Definition: avfilter.c:821
static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n, int coded, int codingset, int mquant)
Decode intra block in intra frames - should be faster than decode_intra_block.
Definition: vc1dec.c:2733
int width
picture width / height.
#define VC1_SUBBLKPAT_VLC_BITS
Definition: vc1data.c:128
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encodin...
Definition: mpegvideo.h:359
void(* vc1_h_overlap)(uint8_t *src, int stride)
Definition: vc1dsp.h:48
uint8_t mv_mode
Frame decoding info for all profiles.
Definition: vc1.h:239
void(* put_signed_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
Definition: dsputil.h:132
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:105
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:347
int fourmvswitch
Definition: vc1.h:339
int mb_off
Definition: vc1.h:369
Sampled sinusoid X
static void vc1_decode_skip_blocks(VC1Context *v)
Definition: vc1dec.c:5103
static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
Definition: vc1dec.c:3317
int size_in_bits
Definition: get_bits.h:57
void(* put_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
Definition: dsputil.h:131
av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
Definition: vc1dec.c:5433
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
Definition: vc1.c:579
static const int offset_table[6]
Definition: vc1dec.c:3315
static int median4(int a, int b, int c, int d)
Definition: vc1dec.c:546
#define FFABS(a)
Definition: common.h:53
static int vc1_decode_p_mb_intfr(VC1Context *v)
Definition: vc1dec.c:3707
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:524
vc1op_pixels_func avg_vc1_mspel_pixels_tab[16]
Definition: vc1dsp.h:62
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:291
void(* vc1_v_s_overlap)(int16_t *top, int16_t *bottom)
Definition: vc1dsp.h:49
MotionEstContext me
Definition: mpegvideo.h:437
static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x, int *dmv_y, int *pred_flag)
Definition: vc1dec.c:1135
static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
Definition: vc1dec.c:3381
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3]
Definition: h264chroma.h:28
const uint16_t ff_vc1_field_mvpred_scales[2][7][4]
Definition: vc1data.c:1097
FIXME Range Coding of cr are level
Definition: snow.txt:367
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function for encode/decode called after coding/decoding the header and before a frame is code...
Definition: mpegvideo.c:1493
uint32_t * cbp
Definition: vc1.h:391
int left_blk_sh
Definition: vc1.h:246
int16_t(* luma_mv_base)[2]
Definition: vc1.h:393
uint8_t * fieldtx_plane
Definition: vc1.h:350
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:465
static void flush(AVCodecContext *avctx)
int * ttblk_base
Definition: vc1.h:265
VLC * cbpcy_vlc
CBPCY VLC table.
Definition: vc1.h:290
static int decode210(GetBitContext *gb)
Definition: get_bits.h:549
static const float pred[4]
Definition: siprdata.h:259
uint8_t * sr_rows[2][2]
Sprite resizer line cache.
Definition: vc1.h:382
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
void(* vc1_inv_trans_4x8_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:45
static const int8_t mv[256][2]
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]
Definition: vc1dsp.h:66
void(* add_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
Definition: dsputil.h:133
static void vc1_loop_filter_iblk(VC1Context *v, int pq)
Definition: vc1dec.c:170
static void vc1_interp_mc(VC1Context *v)
Motion compensation for direct or interpolated blocks in B-frames.
Definition: vc1dec.c:1861
for k
int first_slice_line
used in mpeg4 too to handle resync markers
Definition: mpegvideo.h:637
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1289
static const int offset_table1[9]
Definition: vc1dec.c:52
NULL
Definition: eval.c:55
static int width
Definition: tests/utils.c:158
int res_sprite
Simple/Main Profile sequence header.
Definition: vc1.h:192
AVS_Value src
Definition: avisynth_c.h:523
int top_blk_sh
Either 3 or 0, positions of l/t in blk[].
Definition: vc1.h:246
void(* sprite_v_double_noscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width)
Definition: vc1dsp.h:71
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
enum AVCodecID codec_id
int c_avail
Definition: vc1.h:269
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
static const uint8_t vc1_delta_run_table[AC_MODES][57]
Definition: vc1acdata.h:295
static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
Do motion compensation for 4-MV macroblock - luminance block.
Definition: vc1dec.c:559
int ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
Definition: vc1.c:1544
uint32_t * cbp_base
Definition: vc1.h:391
main external API structure.
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:375
uint8_t * is_intra
Definition: vc1.h:392
static int vc1_decode_p_mb_intfi(VC1Context *v)
Definition: vc1dec.c:3918
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
Definition: mpegvideo.h:355
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:245
static void vc1_decode_b_mb_intfi(VC1Context *v)
Decode one B-frame MB (in interlaced field B picture)
Definition: vc1dec.c:4189
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
static void init_block_index(VC1Context *v)
Definition: vc1dec.c:77
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
void * buf
Definition: avisynth_c.h:594
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
Definition: mpegvideo.c:2164
Picture * picture
main picture buffer
Definition: mpegvideo.h:285
const uint8_t ff_vc1_mbmode_intfrp[2][15][4]
Definition: vc1data.c:53
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:273
void(* vc1_inv_trans_8x4)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:40
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
int sprite_width
Definition: vc1.h:381
int fmb_is_raw
forward mb plane is raw
Definition: vc1.h:297
double value
Definition: eval.c:82
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
uint8_t * is_intra_base
Definition: vc1.h:392
Definition: vc1.h:139
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:80
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:265
#define MB_INTRA_VLC_BITS
Definition: vc1dec.c:47
int index
Definition: gxfenc.c:89
synthesis window for stochastic i
uint8_t * mv_f_last[2]
Definition: vc1.h:355
void(* sprite_v_double_twoscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, const uint8_t *src2b, int offset2, int alpha, int width)
Definition: vc1dsp.h:74
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:203
static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
Definition: vc1dec.c:739
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:379
int context_initialized
Definition: mpegvideo.h:272
#define VC1_2MV_BLOCK_PATTERN_VLC_BITS
Definition: vc1data.c:124
static int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int a_avail, int c_avail, int16_t **dc_val_ptr, int *dir_ptr)
Get predicted DC value prediction dir: left=0, top=1.
Definition: vc1dec.c:2391
#define MB_TYPE_16x16
AVHWAccel * ff_find_hwaccel(enum AVCodecID codec_id, enum AVPixelFormat pix_fmt)
Return the hardware accelerated codec for codec codec_id and pixel format pix_fmt.
#define mid_pred
Definition: mathops.h:94
int dim
DSPContext dsp
pointers for accelerated dsp functions
Definition: mpegvideo.h:391
int skip_is_raw
skip mb plane is not coded
Definition: vc1.h:298
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:330
#define CONFIG_VC1_VDPAU_DECODER
Definition: config.h:611
int ff_intrax8_decode_picture(IntraX8Context *const w, int dquant, int quant_offset)
Decode single IntraX8 frame.
Definition: intrax8.c:726
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:306
int ref_field_type[2]
forward and backward reference field type (top or bottom)
Definition: vc1.h:368
uint8_t * direct_mb_plane
bitplane for "direct" MBs
Definition: vc1.h:293
static const uint8_t vc1_last_delta_run_table[AC_MODES][10]
Definition: vc1acdata.h:339
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
Definition: pixfmt.h:135
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:377
static int vc1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Decode a VC1/WMV3 frame.
Definition: vc1dec.c:5699
uint8_t * mv_type_mb_plane
bitplane for mv_type == (4MV)
Definition: vc1.h:292
int numref
number of past field pictures used as reference
Definition: vc1.h:361
const int32_t ff_vc1_dqscale[63]
Definition: vc1data.c:1085
int blocks_off
Definition: vc1.h:369
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]
Definition: vc1dsp.h:65
int av_frame_ref(AVFrame *dst, AVFrame *src)
Setup a new reference to the data described by an given frame.
Definition: frame.c:228
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
#define FF_PROFILE_VC1_SIMPLE
uint8_t tff
Definition: vc1.h:314
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:352
qpel_mc_func(* qpel_avg)[16]
Definition: mpegvideo.h:229
#define FF_DEBUG_PICT_INFO
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:431
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:279
MpegEncContext s
Definition: vc1.h:183
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
in the bitstream is reported as 10b
Definition: vc1.h:174
MpegEncContext.
Definition: mpegvideo.h:241
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:346
int8_t * qscale_table
Definition: mpegvideo.h:102
struct AVCodecContext * avctx
Definition: mpegvideo.h:243
VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstrea...
Definition: pixfmt.h:110
int cur_blk_idx
Definition: vc1.h:390
uint8_t pq
Definition: vc1.h:244
static const int offset_table2[9]
Definition: vc1dec.c:53
discard all non reference
#define CODEC_FLAG_EMU_EDGE
Don&#39;t draw edges.
static void vc1_decode_i_blocks(VC1Context *v)
Decode blocks of I-frame.
Definition: vc1dec.c:4691
int pqindex
raw pqindex used in coding set selection
Definition: vc1.h:268
static const uint8_t vc1_last_delta_level_table[AC_MODES][44]
Definition: vc1acdata.h:246
#define VC1_1REF_MVDATA_VLC_BITS
Definition: vc1data.c:138
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
Y , 8bpp.
Definition: pixfmt.h:76
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:278
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
Definition: vc1dec.c:1997
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
static enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[]
Definition: vc1dec.c:6158
#define VC1_TTMB_VLC_BITS
Definition: vc1data.c:114
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:33
uint8_t * dest[3]
Definition: mpegvideo.h:467
static const int size_table[6]
Definition: vc1dec.c:3314
int output_width
Definition: vc1.h:381
enum FrameCodingMode fcm
Frame decoding info for Advanced profile.
Definition: vc1.h:311
static double c[64]
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:325
uint8_t dquantfrm
pquant parameters
Definition: vc1.h:251
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:345
Bi-dir predicted.
Definition: avutil.h:218
AVProfile.
const uint8_t ff_vc1_adv_interlaced_4x4_zz[16]
Definition: vc1data.c:1076
int res_fasttx
reserved, always 1
Definition: vc1.h:196
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Same thing on a dB scale
int pic_header_flag
Definition: vc1.h:374
int * ttblk
Transform type at the block level.
Definition: vc1.h:265
VLC ff_vc1_ac_coeff_table[8]
Definition: vc1data.c:143
uint8_t condover
Definition: vc1.h:328
int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
Definition: vc1.c:524
#define VC1_INTFR_4MV_MBMODE_VLC_BITS
Definition: vc1data.c:130
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:58
uint8_t pquantizer
Uniform (over sequence) quantizer in use.
Definition: vc1.h:289
int picture_structure
Definition: mpegvideo.h:660
h264_chroma_mc_func put_h264_chroma_pixels_tab[3]
Definition: h264chroma.h:27
static void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t *is_intra, int pred_flag, int dir)
Predict and set motion vector.
Definition: vc1dec.c:1380
int rnd
rounding control
Definition: vc1.h:301
VideoDSPContext vdsp
Definition: mpegvideo.h:394
Definition: vc1.h:142
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:275
void ff_MPV_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1244
int acpred_is_raw
Definition: vc1.h:325
uint8_t zzi_8x8[64]
Definition: vc1.h:352
#define avg(d, s)
Definition: dsputil_align.c:52
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:700
op_pixels_func avg_no_rnd_pixels_tab[4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:92
uint8_t rptfrm
Definition: vc1.h:314
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
static int decode012(GetBitContext *gb)
Definition: get_bits.h:539
VLC_TYPE(* table)[2]
code, bits
Definition: get_bits.h:65
int bmvtype
Definition: vc1.h:371
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:331
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:139
static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
Do motion compensation for 4-MV macroblock - both chroma blocks.
Definition: vc1dec.c:794
int linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:283
static void vc1_decode_i_blocks_adv(VC1Context *v)
Decode blocks of I-frame for advanced profile.
Definition: vc1dec.c:4831
H264ChromaContext h264chroma
Definition: vc1.h:185
int overflg_is_raw
Definition: vc1.h:327
static av_always_inline int vc1_unescape_buffer(const uint8_t *src, int size, uint8_t *dst)
Definition: vc1.h:422
struct AVFrame f
Definition: mpegvideo.h:98
Definition: vc1.h:136
void(* vc1_inv_trans_4x4_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:46
int level
Advanced Profile.
Definition: vc1.h:206
void(* vc1_h_loop_filter4)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:52
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:260
#define AV_LOG_INFO
Definition: log.h:156
int brfd
reference frame distance (forward or backward)
Definition: vc1.h:372
uint32_t * mb_type
Definition: mpegvideo.h:108
VLC ff_msmp4_mb_i_vlc
Definition: msmpeg4data.c:36
void INT64 INT64 count
Definition: avisynth_c.h:594
void INT64 start
Definition: avisynth_c.h:594
#define av_always_inline
Definition: attributes.h:41
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
uint8_t mv_mode2
Secondary MV coding mode (B frames)
Definition: vc1.h:240
int new_sprite
Frame decoding info for sprite modes.
Definition: vc1.h:378
uint8_t * mv_f_next[2]
Definition: vc1.h:356
#define FFSWAP(type, a, b)
Definition: common.h:61
int two_sprites
Definition: vc1.h:379
int codingset
index of current table set from 11.8 to use for luma block decoding
Definition: vc1.h:266
uint8_t * mb_type[3]
Definition: vc1.h:270
void(* sprite_h)(uint8_t *dst, const uint8_t *src, int offset, int advance, int count)
Definition: vc1dsp.h:69
uint16_t * hrd_buffer
Definition: vc1.h:329
int ff_find_unused_picture(MpegEncContext *s, int shared)
Definition: mpegvideo.c:1453
void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, int buf_size)
int uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:284
#define VC1_INTFR_NON4MV_MBMODE_VLC_BITS
Definition: vc1data.c:132
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
This structure stores compressed data.
av_cold void ff_vc1dsp_init(VC1DSPContext *dsp)
Definition: vc1dsp.c:804
VLC * fourmvbp_vlc
Definition: vc1.h:347
int dc_table_index
Definition: mpegvideo.h:634
VLC ff_msmp4_dc_luma_vlc[2]
Definition: msmpeg4data.c:37
VLC ff_vc1_subblkpat_vlc[3]
Definition: vc1data.c:129
#define inc_blk_idx(idx)
uint8_t halfpq
Uniform quant over image and qp+.5.
Definition: vc1.h:279
static void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
Definition: vc1dec.c:2047
static const uint8_t vc1_delta_level_table[AC_MODES][31]
Definition: vc1acdata.h:203
#define t2
Definition: regdef.h:30
VC1DSPContext vc1dsp
Definition: vc1.h:186
Predicted.
Definition: avutil.h:217
static av_always_inline const uint8_t * find_next_marker(const uint8_t *src, const uint8_t *end)
Find VC-1 marker in buffer.
Definition: vc1.h:408
int output_height
Definition: vc1.h:381
VLC ff_msmp4_dc_chroma_vlc[2]
Definition: msmpeg4data.c:38
HpelDSPContext hdsp
Definition: mpegvideo.h:393