rv34.c
Go to the documentation of this file.
1 /*
2  * RV30/40 decoder common data
3  * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * RV30/40 decoder common data
25  */
26 
27 #include "libavutil/imgutils.h"
28 #include "libavutil/internal.h"
29 
30 #include "avcodec.h"
31 #include "error_resilience.h"
32 #include "mpegvideo.h"
33 #include "golomb.h"
34 #include "internal.h"
35 #include "mathops.h"
36 #include "rectangle.h"
37 #include "thread.h"
38 
39 #include "rv34vlc.h"
40 #include "rv34data.h"
41 #include "rv34.h"
42 
43 //#define DEBUG
44 
45 static inline void ZERO8x2(void* dst, int stride)
46 {
47  fill_rectangle(dst, 1, 2, stride, 0, 4);
48  fill_rectangle(((uint8_t*)(dst))+4, 1, 2, stride, 0, 4);
49 }
50 
51 /** translation of RV30/40 macroblock types to lavc ones */
52 static const int rv34_mb_type_to_lavc[12] = {
65 };
66 
67 
69 
70 static int rv34_decode_mv(RV34DecContext *r, int block_type);
71 
72 /**
73  * @name RV30/40 VLC generating functions
74  * @{
75  */
76 
77 static const int table_offs[] = {
78  0, 1818, 3622, 4144, 4698, 5234, 5804, 5868, 5900, 5932,
79  5996, 6252, 6316, 6348, 6380, 7674, 8944, 10274, 11668, 12250,
80  14060, 15846, 16372, 16962, 17512, 18148, 18180, 18212, 18244, 18308,
81  18564, 18628, 18660, 18692, 20036, 21314, 22648, 23968, 24614, 26384,
82  28190, 28736, 29366, 29938, 30608, 30640, 30672, 30704, 30768, 31024,
83  31088, 31120, 31184, 32570, 33898, 35236, 36644, 37286, 39020, 40802,
84  41368, 42052, 42692, 43348, 43380, 43412, 43444, 43476, 43604, 43668,
85  43700, 43732, 45100, 46430, 47778, 49160, 49802, 51550, 53340, 53972,
86  54648, 55348, 55994, 56122, 56154, 56186, 56218, 56346, 56410, 56442,
87  56474, 57878, 59290, 60636, 62036, 62682, 64460, 64524, 64588, 64716,
88  64844, 66076, 67466, 67978, 68542, 69064, 69648, 70296, 72010, 72074,
89  72138, 72202, 72330, 73572, 74936, 75454, 76030, 76566, 77176, 77822,
90  79582, 79646, 79678, 79742, 79870, 81180, 82536, 83064, 83672, 84242,
91  84934, 85576, 87384, 87448, 87480, 87544, 87672, 88982, 90340, 90902,
92  91598, 92182, 92846, 93488, 95246, 95278, 95310, 95374, 95502, 96878,
93  98266, 98848, 99542, 100234, 100884, 101524, 103320, 103352, 103384, 103416,
94  103480, 104874, 106222, 106910, 107584, 108258, 108902, 109544, 111366, 111398,
95  111430, 111462, 111494, 112878, 114320, 114988, 115660, 116310, 116950, 117592
96 };
97 
98 static VLC_TYPE table_data[117592][2];
99 
100 /**
101  * Generate VLC from codeword lengths.
102  * @param bits codeword lengths (zeroes are accepted)
103  * @param size length of input data
104  * @param vlc output VLC
105  * @param insyms symbols for input codes (NULL for default ones)
106  * @param num VLC table number (for static initialization)
107  */
108 static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *insyms,
109  const int num)
110 {
111  int i;
112  int counts[17] = {0}, codes[17];
113  uint16_t cw[MAX_VLC_SIZE], syms[MAX_VLC_SIZE];
115  int maxbits = 0, realsize = 0;
116 
117  for(i = 0; i < size; i++){
118  if(bits[i]){
119  bits2[realsize] = bits[i];
120  syms[realsize] = insyms ? insyms[i] : i;
121  realsize++;
122  maxbits = FFMAX(maxbits, bits[i]);
123  counts[bits[i]]++;
124  }
125  }
126 
127  codes[0] = 0;
128  for(i = 0; i < 16; i++)
129  codes[i+1] = (codes[i] + counts[i]) << 1;
130  for(i = 0; i < realsize; i++)
131  cw[i] = codes[bits2[i]]++;
132 
133  vlc->table = &table_data[table_offs[num]];
134  vlc->table_allocated = table_offs[num + 1] - table_offs[num];
135  ff_init_vlc_sparse(vlc, FFMIN(maxbits, 9), realsize,
136  bits2, 1, 1,
137  cw, 2, 2,
138  syms, 2, 2, INIT_VLC_USE_NEW_STATIC);
139 }
140 
141 /**
142  * Initialize all tables.
143  */
144 static av_cold void rv34_init_tables(void)
145 {
146  int i, j, k;
147 
148  for(i = 0; i < NUM_INTRA_TABLES; i++){
149  for(j = 0; j < 2; j++){
150  rv34_gen_vlc(rv34_table_intra_cbppat [i][j], CBPPAT_VLC_SIZE, &intra_vlcs[i].cbppattern[j], NULL, 19*i + 0 + j);
151  rv34_gen_vlc(rv34_table_intra_secondpat[i][j], OTHERBLK_VLC_SIZE, &intra_vlcs[i].second_pattern[j], NULL, 19*i + 2 + j);
152  rv34_gen_vlc(rv34_table_intra_thirdpat [i][j], OTHERBLK_VLC_SIZE, &intra_vlcs[i].third_pattern[j], NULL, 19*i + 4 + j);
153  for(k = 0; k < 4; k++){
154  rv34_gen_vlc(rv34_table_intra_cbp[i][j+k*2], CBP_VLC_SIZE, &intra_vlcs[i].cbp[j][k], rv34_cbp_code, 19*i + 6 + j*4 + k);
155  }
156  }
157  for(j = 0; j < 4; j++){
158  rv34_gen_vlc(rv34_table_intra_firstpat[i][j], FIRSTBLK_VLC_SIZE, &intra_vlcs[i].first_pattern[j], NULL, 19*i + 14 + j);
159  }
160  rv34_gen_vlc(rv34_intra_coeff[i], COEFF_VLC_SIZE, &intra_vlcs[i].coefficient, NULL, 19*i + 18);
161  }
162 
163  for(i = 0; i < NUM_INTER_TABLES; i++){
164  rv34_gen_vlc(rv34_inter_cbppat[i], CBPPAT_VLC_SIZE, &inter_vlcs[i].cbppattern[0], NULL, i*12 + 95);
165  for(j = 0; j < 4; j++){
166  rv34_gen_vlc(rv34_inter_cbp[i][j], CBP_VLC_SIZE, &inter_vlcs[i].cbp[0][j], rv34_cbp_code, i*12 + 96 + j);
167  }
168  for(j = 0; j < 2; j++){
169  rv34_gen_vlc(rv34_table_inter_firstpat [i][j], FIRSTBLK_VLC_SIZE, &inter_vlcs[i].first_pattern[j], NULL, i*12 + 100 + j);
170  rv34_gen_vlc(rv34_table_inter_secondpat[i][j], OTHERBLK_VLC_SIZE, &inter_vlcs[i].second_pattern[j], NULL, i*12 + 102 + j);
171  rv34_gen_vlc(rv34_table_inter_thirdpat [i][j], OTHERBLK_VLC_SIZE, &inter_vlcs[i].third_pattern[j], NULL, i*12 + 104 + j);
172  }
173  rv34_gen_vlc(rv34_inter_coeff[i], COEFF_VLC_SIZE, &inter_vlcs[i].coefficient, NULL, i*12 + 106);
174  }
175 }
176 
177 /** @} */ // vlc group
178 
179 /**
180  * @name RV30/40 4x4 block decoding functions
181  * @{
182  */
183 
184 /**
185  * Decode coded block pattern.
186  */
187 static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table)
188 {
189  int pattern, code, cbp=0;
190  int ones;
191  static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000};
192  static const int shifts[4] = { 0, 2, 8, 10 };
193  const int *curshift = shifts;
194  int i, t, mask;
195 
196  code = get_vlc2(gb, vlc->cbppattern[table].table, 9, 2);
197  pattern = code & 0xF;
198  code >>= 4;
199 
200  ones = rv34_count_ones[pattern];
201 
202  for(mask = 8; mask; mask >>= 1, curshift++){
203  if(pattern & mask)
204  cbp |= get_vlc2(gb, vlc->cbp[table][ones].table, vlc->cbp[table][ones].bits, 1) << curshift[0];
205  }
206 
207  for(i = 0; i < 4; i++){
208  t = (modulo_three_table[code] >> (6 - 2*i)) & 3;
209  if(t == 1)
210  cbp |= cbp_masks[get_bits1(gb)] << i;
211  if(t == 2)
212  cbp |= cbp_masks[2] << i;
213  }
214  return cbp;
215 }
216 
217 /**
218  * Get one coefficient value from the bitstream and store it.
219  */
220 static inline void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, VLC* vlc, int q)
221 {
222  if(coef){
223  if(coef == esc){
224  coef = get_vlc2(gb, vlc->table, 9, 2);
225  if(coef > 23){
226  coef -= 23;
227  coef = 22 + ((1 << coef) | get_bits(gb, coef));
228  }
229  coef += esc;
230  }
231  if(get_bits1(gb))
232  coef = -coef;
233  *dst = (coef*q + 8) >> 4;
234  }
235 }
236 
237 /**
238  * Decode 2x2 subblock of coefficients.
239  */
240 static inline void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc, int q)
241 {
243 
244  decode_coeff( dst+0*4+0, (flags >> 6) , 3, gb, vlc, q);
245  if(is_block2){
246  decode_coeff(dst+1*4+0, (flags >> 4) & 3, 2, gb, vlc, q);
247  decode_coeff(dst+0*4+1, (flags >> 2) & 3, 2, gb, vlc, q);
248  }else{
249  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q);
250  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q);
251  }
252  decode_coeff( dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q);
253 }
254 
255 /**
256  * Decode a single coefficient.
257  */
258 static inline void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q)
259 {
260  int coeff = modulo_three_table[code] >> 6;
261  decode_coeff(dst, coeff, 3, gb, vlc, q);
262 }
263 
264 static inline void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, VLC *vlc,
265  int q_dc, int q_ac1, int q_ac2)
266 {
268 
269  decode_coeff(dst+0*4+0, (flags >> 6) , 3, gb, vlc, q_dc);
270  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
271  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
272  decode_coeff(dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q_ac2);
273 }
274 
275 /**
276  * Decode coefficients for 4x4 block.
277  *
278  * This is done by filling 2x2 subblocks with decoded coefficients
279  * in this order (the same for subblocks and subblock coefficients):
280  * o--o
281  * /
282  * /
283  * o--o
284  */
285 
286 static int rv34_decode_block(int16_t *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
287 {
288  int code, pattern, has_ac = 1;
289 
290  code = get_vlc2(gb, rvlc->first_pattern[fc].table, 9, 2);
291 
292  pattern = code & 0x7;
293 
294  code >>= 3;
295 
296  if (modulo_three_table[code] & 0x3F) {
297  decode_subblock3(dst, code, gb, &rvlc->coefficient, q_dc, q_ac1, q_ac2);
298  } else {
299  decode_subblock1(dst, code, gb, &rvlc->coefficient, q_dc);
300  if (!pattern)
301  return 0;
302  has_ac = 0;
303  }
304 
305  if(pattern & 4){
306  code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2);
307  decode_subblock(dst + 4*0+2, code, 0, gb, &rvlc->coefficient, q_ac2);
308  }
309  if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block
310  code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2);
311  decode_subblock(dst + 4*2+0, code, 1, gb, &rvlc->coefficient, q_ac2);
312  }
313  if(pattern & 1){
314  code = get_vlc2(gb, rvlc->third_pattern[sc].table, 9, 2);
315  decode_subblock(dst + 4*2+2, code, 0, gb, &rvlc->coefficient, q_ac2);
316  }
317  return has_ac | pattern;
318 }
319 
320 /**
321  * @name RV30/40 bitstream parsing
322  * @{
323  */
324 
325 /**
326  * Decode starting slice position.
327  * @todo Maybe replace with ff_h263_decode_mba() ?
328  */
330 {
331  int i;
332  for(i = 0; i < 5; i++)
333  if(rv34_mb_max_sizes[i] >= mb_size - 1)
334  break;
335  return rv34_mb_bits_sizes[i];
336 }
337 
338 /**
339  * Select VLC set for decoding from current quantizer, modifier and frame type.
340  */
341 static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
342 {
343  if(mod == 2 && quant < 19) quant += 10;
344  else if(mod && quant < 26) quant += 5;
345  return type ? &inter_vlcs[rv34_quant_to_vlc_set[1][av_clip(quant, 0, 30)]]
346  : &intra_vlcs[rv34_quant_to_vlc_set[0][av_clip(quant, 0, 30)]];
347 }
348 
349 /**
350  * Decode intra macroblock header and return CBP in case of success, -1 otherwise.
351  */
352 static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
353 {
354  MpegEncContext *s = &r->s;
355  GetBitContext *gb = &s->gb;
356  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
357  int t;
358 
359  r->is16 = get_bits1(gb);
360  if(r->is16){
363  t = get_bits(gb, 2);
364  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
365  r->luma_vlc = 2;
366  }else{
367  if(!r->rv30){
368  if(!get_bits1(gb))
369  av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
370  }
373  if(r->decode_intra_types(r, gb, intra_types) < 0)
374  return -1;
375  r->luma_vlc = 1;
376  }
377 
378  r->chroma_vlc = 0;
379  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
380 
381  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
382 }
383 
384 /**
385  * Decode inter macroblock header and return CBP in case of success, -1 otherwise.
386  */
387 static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
388 {
389  MpegEncContext *s = &r->s;
390  GetBitContext *gb = &s->gb;
391  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
392  int i, t;
393 
394  r->block_type = r->decode_mb_info(r);
395  if(r->block_type == -1)
396  return -1;
398  r->mb_type[mb_pos] = r->block_type;
399  if(r->block_type == RV34_MB_SKIP){
400  if(s->pict_type == AV_PICTURE_TYPE_P)
401  r->mb_type[mb_pos] = RV34_MB_P_16x16;
402  if(s->pict_type == AV_PICTURE_TYPE_B)
403  r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
404  }
405  r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
406  rv34_decode_mv(r, r->block_type);
407  if(r->block_type == RV34_MB_SKIP){
408  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
409  return 0;
410  }
411  r->chroma_vlc = 1;
412  r->luma_vlc = 0;
413 
414  if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
415  if(r->is16){
416  t = get_bits(gb, 2);
417  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
418  r->luma_vlc = 2;
419  }else{
420  if(r->decode_intra_types(r, gb, intra_types) < 0)
421  return -1;
422  r->luma_vlc = 1;
423  }
424  r->chroma_vlc = 0;
425  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
426  }else{
427  for(i = 0; i < 16; i++)
428  intra_types[(i & 3) + (i>>2) * r->intra_types_stride] = 0;
429  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
430  if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){
431  r->is16 = 1;
432  r->chroma_vlc = 1;
433  r->luma_vlc = 2;
434  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
435  }
436  }
437 
438  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
439 }
440 
441 /** @} */ //bitstream functions
442 
443 /**
444  * @name motion vector related code (prediction, reconstruction, motion compensation)
445  * @{
446  */
447 
448 /** macroblock partition width in 8x8 blocks */
449 static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };
450 
451 /** macroblock partition height in 8x8 blocks */
452 static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };
453 
454 /** availability index for subblocks */
455 static const uint8_t avail_indexes[4] = { 6, 7, 10, 11 };
456 
457 /**
458  * motion vector prediction
459  *
460  * Motion prediction performed for the block by using median prediction of
461  * motion vectors from the left, top and right top blocks but in corner cases
462  * some other vectors may be used instead.
463  */
464 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
465 {
466  MpegEncContext *s = &r->s;
467  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
468  int A[2] = {0}, B[2], C[2];
469  int i, j;
470  int mx, my;
471  int* avail = r->avail_cache + avail_indexes[subblock_no];
472  int c_off = part_sizes_w[block_type];
473 
474  mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride;
475  if(subblock_no == 3)
476  c_off = -1;
477 
478  if(avail[-1]){
479  A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
480  A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
481  }
482  if(avail[-4]){
483  B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
484  B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
485  }else{
486  B[0] = A[0];
487  B[1] = A[1];
488  }
489  if(!avail[c_off-4]){
490  if(avail[-4] && (avail[-1] || r->rv30)){
491  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
492  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
493  }else{
494  C[0] = A[0];
495  C[1] = A[1];
496  }
497  }else{
498  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
499  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
500  }
501  mx = mid_pred(A[0], B[0], C[0]);
502  my = mid_pred(A[1], B[1], C[1]);
503  mx += r->dmv[dmv_no][0];
504  my += r->dmv[dmv_no][1];
505  for(j = 0; j < part_sizes_h[block_type]; j++){
506  for(i = 0; i < part_sizes_w[block_type]; i++){
507  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
508  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
509  }
510  }
511 }
512 
513 #define GET_PTS_DIFF(a, b) ((a - b + 8192) & 0x1FFF)
514 
515 /**
516  * Calculate motion vector component that should be added for direct blocks.
517  */
518 static int calc_add_mv(RV34DecContext *r, int dir, int val)
519 {
520  int mul = dir ? -r->mv_weight2 : r->mv_weight1;
521 
522  return (val * mul + 0x2000) >> 14;
523 }
524 
525 /**
526  * Predict motion vector for B-frame macroblock.
527  */
528 static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],
529  int A_avail, int B_avail, int C_avail,
530  int *mx, int *my)
531 {
532  if(A_avail + B_avail + C_avail != 3){
533  *mx = A[0] + B[0] + C[0];
534  *my = A[1] + B[1] + C[1];
535  if(A_avail + B_avail + C_avail == 2){
536  *mx /= 2;
537  *my /= 2;
538  }
539  }else{
540  *mx = mid_pred(A[0], B[0], C[0]);
541  *my = mid_pred(A[1], B[1], C[1]);
542  }
543 }
544 
545 /**
546  * motion vector prediction for B-frames
547  */
548 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
549 {
550  MpegEncContext *s = &r->s;
551  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
552  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
553  int A[2] = { 0 }, B[2] = { 0 }, C[2] = { 0 };
554  int has_A = 0, has_B = 0, has_C = 0;
555  int mx, my;
556  int i, j;
557  Picture *cur_pic = s->current_picture_ptr;
558  const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
559  int type = cur_pic->mb_type[mb_pos];
560 
561  if((r->avail_cache[6-1] & type) & mask){
562  A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
563  A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
564  has_A = 1;
565  }
566  if((r->avail_cache[6-4] & type) & mask){
567  B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
568  B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
569  has_B = 1;
570  }
571  if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
572  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
573  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
574  has_C = 1;
575  }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
576  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
577  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
578  has_C = 1;
579  }
580 
581  rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my);
582 
583  mx += r->dmv[dir][0];
584  my += r->dmv[dir][1];
585 
586  for(j = 0; j < 2; j++){
587  for(i = 0; i < 2; i++){
588  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
589  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
590  }
591  }
592  if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
593  ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
594  }
595 }
596 
597 /**
598  * motion vector prediction - RV3 version
599  */
600 static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
601 {
602  MpegEncContext *s = &r->s;
603  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
604  int A[2] = {0}, B[2], C[2];
605  int i, j, k;
606  int mx, my;
607  int* avail = r->avail_cache + avail_indexes[0];
608 
609  if(avail[-1]){
610  A[0] = s->current_picture_ptr->motion_val[0][mv_pos - 1][0];
611  A[1] = s->current_picture_ptr->motion_val[0][mv_pos - 1][1];
612  }
613  if(avail[-4]){
614  B[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][0];
615  B[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][1];
616  }else{
617  B[0] = A[0];
618  B[1] = A[1];
619  }
620  if(!avail[-4 + 2]){
621  if(avail[-4] && (avail[-1])){
622  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][0];
623  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][1];
624  }else{
625  C[0] = A[0];
626  C[1] = A[1];
627  }
628  }else{
629  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][0];
630  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][1];
631  }
632  mx = mid_pred(A[0], B[0], C[0]);
633  my = mid_pred(A[1], B[1], C[1]);
634  mx += r->dmv[0][0];
635  my += r->dmv[0][1];
636  for(j = 0; j < 2; j++){
637  for(i = 0; i < 2; i++){
638  for(k = 0; k < 2; k++){
639  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
640  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
641  }
642  }
643  }
644 }
645 
646 static const int chroma_coeffs[3] = { 0, 3, 5 };
647 
648 /**
649  * generic motion compensation function
650  *
651  * @param r decoder context
652  * @param block_type type of the current block
653  * @param xoff horizontal offset from the start of the current block
654  * @param yoff vertical offset from the start of the current block
655  * @param mv_off offset to the motion vector information
656  * @param width width of the current partition in 8x8 blocks
657  * @param height height of the current partition in 8x8 blocks
658  * @param dir motion compensation direction (i.e. from the last or the next reference frame)
659  * @param thirdpel motion vectors are specified in 1/3 of pixel
660  * @param qpel_mc a set of functions used to perform luma motion compensation
661  * @param chroma_mc a set of functions used to perform chroma motion compensation
662  */
663 static inline void rv34_mc(RV34DecContext *r, const int block_type,
664  const int xoff, const int yoff, int mv_off,
665  const int width, const int height, int dir,
666  const int thirdpel, int weighted,
667  qpel_mc_func (*qpel_mc)[16],
668  h264_chroma_mc_func (*chroma_mc))
669 {
670  MpegEncContext *s = &r->s;
671  uint8_t *Y, *U, *V, *srcY, *srcU, *srcV;
672  int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
673  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
674  int is16x16 = 1;
675 
676  if(thirdpel){
677  int chroma_mx, chroma_my;
678  mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
679  my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
680  lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
681  ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
682  chroma_mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
683  chroma_my = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
684  umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
685  umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
686  uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
687  uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
688  }else{
689  int cx, cy;
690  mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
691  my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
692  lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
693  ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
694  cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
695  cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
696  umx = cx >> 2;
697  umy = cy >> 2;
698  uvmx = (cx & 3) << 1;
699  uvmy = (cy & 3) << 1;
700  //due to some flaw RV40 uses the same MC compensation routine for H2V2 and H3V3
701  if(uvmx == 6 && uvmy == 6)
702  uvmx = uvmy = 4;
703  }
704 
706  /* wait for the referenced mb row to be finished */
707  int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
708  ThreadFrame *f = dir ? &s->next_picture_ptr->tf : &s->last_picture_ptr->tf;
709  ff_thread_await_progress(f, mb_row, 0);
710  }
711 
712  dxy = ly*4 + lx;
713  srcY = dir ? s->next_picture_ptr->f.data[0] : s->last_picture_ptr->f.data[0];
714  srcU = dir ? s->next_picture_ptr->f.data[1] : s->last_picture_ptr->f.data[1];
715  srcV = dir ? s->next_picture_ptr->f.data[2] : s->last_picture_ptr->f.data[2];
716  src_x = s->mb_x * 16 + xoff + mx;
717  src_y = s->mb_y * 16 + yoff + my;
718  uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
719  uvsrc_y = s->mb_y * 8 + (yoff >> 1) + umy;
720  srcY += src_y * s->linesize + src_x;
721  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
722  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
723  if(s->h_edge_pos - (width << 3) < 6 || s->v_edge_pos - (height << 3) < 6 ||
724  (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4 ||
725  (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4) {
726  uint8_t *uvbuf = s->edge_emu_buffer + 22 * s->linesize;
727 
728  srcY -= 2 + 2*s->linesize;
729  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, (width<<3)+6, (height<<3)+6,
730  src_x - 2, src_y - 2, s->h_edge_pos, s->v_edge_pos);
731  srcY = s->edge_emu_buffer + 2 + 2*s->linesize;
732  s->vdsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, (width<<2)+1, (height<<2)+1,
733  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
734  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, (width<<2)+1, (height<<2)+1,
735  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
736  srcU = uvbuf;
737  srcV = uvbuf + 16;
738  }
739  if(!weighted){
740  Y = s->dest[0] + xoff + yoff *s->linesize;
741  U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
742  V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
743  }else{
744  Y = r->tmp_b_block_y [dir] + xoff + yoff *s->linesize;
745  U = r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
746  V = r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
747  }
748 
749  if(block_type == RV34_MB_P_16x8){
750  qpel_mc[1][dxy](Y, srcY, s->linesize);
751  Y += 8;
752  srcY += 8;
753  }else if(block_type == RV34_MB_P_8x16){
754  qpel_mc[1][dxy](Y, srcY, s->linesize);
755  Y += 8 * s->linesize;
756  srcY += 8 * s->linesize;
757  }
758  is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16);
759  qpel_mc[!is16x16][dxy](Y, srcY, s->linesize);
760  chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy);
761  chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy);
762 }
763 
764 static void rv34_mc_1mv(RV34DecContext *r, const int block_type,
765  const int xoff, const int yoff, int mv_off,
766  const int width, const int height, int dir)
767 {
768  rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, 0,
769  r->rdsp.put_pixels_tab,
771 }
772 
773 static void rv4_weight(RV34DecContext *r)
774 {
776  r->tmp_b_block_y[0],
777  r->tmp_b_block_y[1],
778  r->weight1,
779  r->weight2,
780  r->s.linesize);
782  r->tmp_b_block_uv[0],
783  r->tmp_b_block_uv[2],
784  r->weight1,
785  r->weight2,
786  r->s.uvlinesize);
788  r->tmp_b_block_uv[1],
789  r->tmp_b_block_uv[3],
790  r->weight1,
791  r->weight2,
792  r->s.uvlinesize);
793 }
794 
795 static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
796 {
797  int weighted = !r->rv30 && block_type != RV34_MB_B_BIDIR && r->weight1 != 8192;
798 
799  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, weighted,
800  r->rdsp.put_pixels_tab,
802  if(!weighted){
803  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 0,
804  r->rdsp.avg_pixels_tab,
806  }else{
807  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 1,
808  r->rdsp.put_pixels_tab,
810  rv4_weight(r);
811  }
812 }
813 
815 {
816  int i, j;
817  int weighted = !r->rv30 && r->weight1 != 8192;
818 
819  for(j = 0; j < 2; j++)
820  for(i = 0; i < 2; i++){
821  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30,
822  weighted,
823  r->rdsp.put_pixels_tab,
825  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30,
826  weighted,
827  weighted ? r->rdsp.put_pixels_tab : r->rdsp.avg_pixels_tab,
829  }
830  if(weighted)
831  rv4_weight(r);
832 }
833 
834 /** number of motion vectors in each macroblock type */
835 static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
836 
837 /**
838  * Decode motion vector differences
839  * and perform motion vector reconstruction and motion compensation.
840  */
841 static int rv34_decode_mv(RV34DecContext *r, int block_type)
842 {
843  MpegEncContext *s = &r->s;
844  GetBitContext *gb = &s->gb;
845  int i, j, k, l;
846  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
847  int next_bt;
848 
849  memset(r->dmv, 0, sizeof(r->dmv));
850  for(i = 0; i < num_mvs[block_type]; i++){
851  r->dmv[i][0] = svq3_get_se_golomb(gb);
852  r->dmv[i][1] = svq3_get_se_golomb(gb);
853  }
854  switch(block_type){
855  case RV34_MB_TYPE_INTRA:
857  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
858  return 0;
859  case RV34_MB_SKIP:
860  if(s->pict_type == AV_PICTURE_TYPE_P){
861  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
862  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
863  break;
864  }
865  case RV34_MB_B_DIRECT:
866  //surprisingly, it uses motion scheme from next reference frame
867  /* wait for the current mb row to be finished */
870 
871  next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
872  if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
873  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
874  ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
875  }else
876  for(j = 0; j < 2; j++)
877  for(i = 0; i < 2; i++)
878  for(k = 0; k < 2; k++)
879  for(l = 0; l < 2; l++)
880  s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
881  if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
882  rv34_mc_2mv(r, block_type);
883  else
884  rv34_mc_2mv_skip(r);
885  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
886  break;
887  case RV34_MB_P_16x16:
888  case RV34_MB_P_MIX16x16:
889  rv34_pred_mv(r, block_type, 0, 0);
890  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
891  break;
892  case RV34_MB_B_FORWARD:
893  case RV34_MB_B_BACKWARD:
894  r->dmv[1][0] = r->dmv[0][0];
895  r->dmv[1][1] = r->dmv[0][1];
896  if(r->rv30)
897  rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD);
898  else
899  rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD);
900  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD);
901  break;
902  case RV34_MB_P_16x8:
903  case RV34_MB_P_8x16:
904  rv34_pred_mv(r, block_type, 0, 0);
905  rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1);
906  if(block_type == RV34_MB_P_16x8){
907  rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0);
908  rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0);
909  }
910  if(block_type == RV34_MB_P_8x16){
911  rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0);
912  rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0);
913  }
914  break;
915  case RV34_MB_B_BIDIR:
916  rv34_pred_mv_b (r, block_type, 0);
917  rv34_pred_mv_b (r, block_type, 1);
918  rv34_mc_2mv (r, block_type);
919  break;
920  case RV34_MB_P_8x8:
921  for(i=0;i< 4;i++){
922  rv34_pred_mv(r, block_type, i, i);
923  rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0);
924  }
925  break;
926  }
927 
928  return 0;
929 }
930 /** @} */ // mv group
931 
932 /**
933  * @name Macroblock reconstruction functions
934  * @{
935  */
936 /** mapping of RV30/40 intra prediction types to standard H.264 types */
937 static const int ittrans[9] = {
940 };
941 
942 /** mapping of RV30/40 intra 16x16 prediction types to standard H.264 types */
943 static const int ittrans16[4] = {
945 };
946 
947 /**
948  * Perform 4x4 intra prediction.
949  */
950 static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
951 {
952  uint8_t *prev = dst - stride + 4;
953  uint32_t topleft;
954 
955  if(!up && !left)
956  itype = DC_128_PRED;
957  else if(!up){
958  if(itype == VERT_PRED) itype = HOR_PRED;
959  if(itype == DC_PRED) itype = LEFT_DC_PRED;
960  }else if(!left){
961  if(itype == HOR_PRED) itype = VERT_PRED;
962  if(itype == DC_PRED) itype = TOP_DC_PRED;
964  }
965  if(!down){
967  if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN;
968  if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN;
969  }
970  if(!right && up){
971  topleft = dst[-stride + 3] * 0x01010101u;
972  prev = (uint8_t*)&topleft;
973  }
974  r->h.pred4x4[itype](dst, prev, stride);
975 }
976 
977 static inline int adjust_pred16(int itype, int up, int left)
978 {
979  if(!up && !left)
980  itype = DC_128_PRED8x8;
981  else if(!up){
982  if(itype == PLANE_PRED8x8)itype = HOR_PRED8x8;
983  if(itype == VERT_PRED8x8) itype = HOR_PRED8x8;
984  if(itype == DC_PRED8x8) itype = LEFT_DC_PRED8x8;
985  }else if(!left){
986  if(itype == PLANE_PRED8x8)itype = VERT_PRED8x8;
987  if(itype == HOR_PRED8x8) itype = VERT_PRED8x8;
988  if(itype == DC_PRED8x8) itype = TOP_DC_PRED8x8;
989  }
990  return itype;
991 }
992 
993 static inline void rv34_process_block(RV34DecContext *r,
994  uint8_t *pdst, int stride,
995  int fc, int sc, int q_dc, int q_ac)
996 {
997  MpegEncContext *s = &r->s;
998  int16_t *ptr = s->block[0];
999  int has_ac = rv34_decode_block(ptr, &s->gb, r->cur_vlcs,
1000  fc, sc, q_dc, q_ac, q_ac);
1001  if(has_ac){
1002  r->rdsp.rv34_idct_add(pdst, stride, ptr);
1003  }else{
1004  r->rdsp.rv34_idct_dc_add(pdst, stride, ptr[0]);
1005  ptr[0] = 0;
1006  }
1007 }
1008 
1009 static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
1010 {
1011  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1012  MpegEncContext *s = &r->s;
1013  GetBitContext *gb = &s->gb;
1014  int q_dc = rv34_qscale_tab[ r->luma_dc_quant_i[s->qscale] ],
1015  q_ac = rv34_qscale_tab[s->qscale];
1016  uint8_t *dst = s->dest[0];
1017  int16_t *ptr = s->block[0];
1018  int i, j, itype, has_ac;
1019 
1020  memset(block16, 0, 16 * sizeof(*block16));
1021 
1022  has_ac = rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac);
1023  if(has_ac)
1024  r->rdsp.rv34_inv_transform(block16);
1025  else
1026  r->rdsp.rv34_inv_transform_dc(block16);
1027 
1028  itype = ittrans16[intra_types[0]];
1029  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1030  r->h.pred16x16[itype](dst, s->linesize);
1031 
1032  for(j = 0; j < 4; j++){
1033  for(i = 0; i < 4; i++, cbp >>= 1){
1034  int dc = block16[i + j*4];
1035 
1036  if(cbp & 1){
1037  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1038  }else
1039  has_ac = 0;
1040 
1041  if(has_ac){
1042  ptr[0] = dc;
1043  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1044  }else
1045  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1046  }
1047 
1048  dst += 4*s->linesize;
1049  }
1050 
1051  itype = ittrans16[intra_types[0]];
1052  if(itype == PLANE_PRED8x8) itype = DC_PRED8x8;
1053  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1054 
1055  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1056  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1057 
1058  for(j = 1; j < 3; j++){
1059  dst = s->dest[j];
1060  r->h.pred8x8[itype](dst, s->uvlinesize);
1061  for(i = 0; i < 4; i++, cbp >>= 1){
1062  uint8_t *pdst;
1063  if(!(cbp & 1)) continue;
1064  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1065 
1066  rv34_process_block(r, pdst, s->uvlinesize,
1067  r->chroma_vlc, 1, q_dc, q_ac);
1068  }
1069  }
1070 }
1071 
1072 static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
1073 {
1074  MpegEncContext *s = &r->s;
1075  uint8_t *dst = s->dest[0];
1076  int avail[6*8] = {0};
1077  int i, j, k;
1078  int idx, q_ac, q_dc;
1079 
1080  // Set neighbour information.
1081  if(r->avail_cache[1])
1082  avail[0] = 1;
1083  if(r->avail_cache[2])
1084  avail[1] = avail[2] = 1;
1085  if(r->avail_cache[3])
1086  avail[3] = avail[4] = 1;
1087  if(r->avail_cache[4])
1088  avail[5] = 1;
1089  if(r->avail_cache[5])
1090  avail[8] = avail[16] = 1;
1091  if(r->avail_cache[9])
1092  avail[24] = avail[32] = 1;
1093 
1094  q_ac = rv34_qscale_tab[s->qscale];
1095  for(j = 0; j < 4; j++){
1096  idx = 9 + j*8;
1097  for(i = 0; i < 4; i++, cbp >>= 1, dst += 4, idx++){
1098  rv34_pred_4x4_block(r, dst, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]);
1099  avail[idx] = 1;
1100  if(!(cbp & 1)) continue;
1101 
1102  rv34_process_block(r, dst, s->linesize,
1103  r->luma_vlc, 0, q_ac, q_ac);
1104  }
1105  dst += s->linesize * 4 - 4*4;
1106  intra_types += r->intra_types_stride;
1107  }
1108 
1109  intra_types -= r->intra_types_stride * 4;
1110 
1111  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1112  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1113 
1114  for(k = 0; k < 2; k++){
1115  dst = s->dest[1+k];
1116  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 0, 4);
1117 
1118  for(j = 0; j < 2; j++){
1119  int* acache = r->avail_cache + 6 + j*4;
1120  for(i = 0; i < 2; i++, cbp >>= 1, acache++){
1121  int itype = ittrans[intra_types[i*2+j*2*r->intra_types_stride]];
1122  rv34_pred_4x4_block(r, dst+4*i, s->uvlinesize, itype, acache[-4], acache[-1], !i && !j, acache[-3]);
1123  acache[0] = 1;
1124 
1125  if(!(cbp&1)) continue;
1126 
1127  rv34_process_block(r, dst + 4*i, s->uvlinesize,
1128  r->chroma_vlc, 1, q_dc, q_ac);
1129  }
1130 
1131  dst += 4*s->uvlinesize;
1132  }
1133  }
1134 }
1135 
1136 static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
1137 {
1138  int d;
1139  d = motion_val[0][0] - motion_val[-step][0];
1140  if(d < -3 || d > 3)
1141  return 1;
1142  d = motion_val[0][1] - motion_val[-step][1];
1143  if(d < -3 || d > 3)
1144  return 1;
1145  return 0;
1146 }
1147 
1149 {
1150  MpegEncContext *s = &r->s;
1151  int hmvmask = 0, vmvmask = 0, i, j;
1152  int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
1153  int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
1154  for(j = 0; j < 16; j += 8){
1155  for(i = 0; i < 2; i++){
1156  if(is_mv_diff_gt_3(motion_val + i, 1))
1157  vmvmask |= 0x11 << (j + i*2);
1158  if((j || s->mb_y) && is_mv_diff_gt_3(motion_val + i, s->b8_stride))
1159  hmvmask |= 0x03 << (j + i*2);
1160  }
1161  motion_val += s->b8_stride;
1162  }
1163  if(s->first_slice_line)
1164  hmvmask &= ~0x000F;
1165  if(!s->mb_x)
1166  vmvmask &= ~0x1111;
1167  if(r->rv30){ //RV30 marks both subblocks on the edge for filtering
1168  vmvmask |= (vmvmask & 0x4444) >> 1;
1169  hmvmask |= (hmvmask & 0x0F00) >> 4;
1170  if(s->mb_x)
1171  r->deblock_coefs[s->mb_x - 1 + s->mb_y*s->mb_stride] |= (vmvmask & 0x1111) << 3;
1172  if(!s->first_slice_line)
1173  r->deblock_coefs[s->mb_x + (s->mb_y - 1)*s->mb_stride] |= (hmvmask & 0xF) << 12;
1174  }
1175  return hmvmask | vmvmask;
1176 }
1177 
1178 static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
1179 {
1180  MpegEncContext *s = &r->s;
1181  GetBitContext *gb = &s->gb;
1182  uint8_t *dst = s->dest[0];
1183  int16_t *ptr = s->block[0];
1184  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1185  int cbp, cbp2;
1186  int q_dc, q_ac, has_ac;
1187  int i, j;
1188  int dist;
1189 
1190  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1191  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1192  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1193  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1194  if(s->mb_x && dist)
1195  r->avail_cache[5] =
1196  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1197  if(dist >= s->mb_width)
1198  r->avail_cache[2] =
1199  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1200  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1201  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1202  if(s->mb_x && dist > s->mb_width)
1203  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1204 
1205  s->qscale = r->si.quant;
1206  cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
1207  r->cbp_luma [mb_pos] = cbp;
1208  r->cbp_chroma[mb_pos] = cbp >> 16;
1209  r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
1210  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1211 
1212  if(cbp == -1)
1213  return -1;
1214 
1215  if (IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
1216  if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
1217  else rv34_output_intra(r, intra_types, cbp);
1218  return 0;
1219  }
1220 
1221  if(r->is16){
1222  // Only for RV34_MB_P_MIX16x16
1223  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1224  memset(block16, 0, 16 * sizeof(*block16));
1225  q_dc = rv34_qscale_tab[ r->luma_dc_quant_p[s->qscale] ];
1226  q_ac = rv34_qscale_tab[s->qscale];
1227  if (rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac))
1228  r->rdsp.rv34_inv_transform(block16);
1229  else
1230  r->rdsp.rv34_inv_transform_dc(block16);
1231 
1232  q_ac = rv34_qscale_tab[s->qscale];
1233 
1234  for(j = 0; j < 4; j++){
1235  for(i = 0; i < 4; i++, cbp >>= 1){
1236  int dc = block16[i + j*4];
1237 
1238  if(cbp & 1){
1239  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1240  }else
1241  has_ac = 0;
1242 
1243  if(has_ac){
1244  ptr[0] = dc;
1245  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1246  }else
1247  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1248  }
1249 
1250  dst += 4*s->linesize;
1251  }
1252 
1253  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
1254  }else{
1255  q_ac = rv34_qscale_tab[s->qscale];
1256 
1257  for(j = 0; j < 4; j++){
1258  for(i = 0; i < 4; i++, cbp >>= 1){
1259  if(!(cbp & 1)) continue;
1260 
1261  rv34_process_block(r, dst + 4*i, s->linesize,
1262  r->luma_vlc, 0, q_ac, q_ac);
1263  }
1264  dst += 4*s->linesize;
1265  }
1266  }
1267 
1268  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1269  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1270 
1271  for(j = 1; j < 3; j++){
1272  dst = s->dest[j];
1273  for(i = 0; i < 4; i++, cbp >>= 1){
1274  uint8_t *pdst;
1275  if(!(cbp & 1)) continue;
1276  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1277 
1278  rv34_process_block(r, pdst, s->uvlinesize,
1279  r->chroma_vlc, 1, q_dc, q_ac);
1280  }
1281  }
1282 
1283  return 0;
1284 }
1285 
1286 static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
1287 {
1288  MpegEncContext *s = &r->s;
1289  int cbp, dist;
1290  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1291 
1292  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1293  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1294  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1295  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1296  if(s->mb_x && dist)
1297  r->avail_cache[5] =
1298  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1299  if(dist >= s->mb_width)
1300  r->avail_cache[2] =
1301  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1302  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1303  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1304  if(s->mb_x && dist > s->mb_width)
1305  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1306 
1307  s->qscale = r->si.quant;
1308  cbp = rv34_decode_intra_mb_header(r, intra_types);
1309  r->cbp_luma [mb_pos] = cbp;
1310  r->cbp_chroma[mb_pos] = cbp >> 16;
1311  r->deblock_coefs[mb_pos] = 0xFFFF;
1312  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1313 
1314  if(cbp == -1)
1315  return -1;
1316 
1317  if(r->is16){
1318  rv34_output_i16x16(r, intra_types, cbp);
1319  return 0;
1320  }
1321 
1322  rv34_output_intra(r, intra_types, cbp);
1323  return 0;
1324 }
1325 
1327 {
1328  int bits;
1329  if(s->mb_y >= s->mb_height)
1330  return 1;
1331  if(!s->mb_num_left)
1332  return 1;
1333  if(r->s.mb_skip_run > 1)
1334  return 0;
1335  bits = get_bits_left(&s->gb);
1336  if(bits <= 0 || (bits < 8 && !show_bits(&s->gb, bits)))
1337  return 1;
1338  return 0;
1339 }
1340 
1341 
1343 {
1345  r->intra_types = NULL;
1347  av_freep(&r->mb_type);
1348  av_freep(&r->cbp_luma);
1349  av_freep(&r->cbp_chroma);
1350  av_freep(&r->deblock_coefs);
1351 }
1352 
1353 
1355 {
1356  r->intra_types_stride = r->s.mb_width * 4 + 4;
1357 
1358  r->cbp_chroma = av_malloc(r->s.mb_stride * r->s.mb_height *
1359  sizeof(*r->cbp_chroma));
1360  r->cbp_luma = av_malloc(r->s.mb_stride * r->s.mb_height *
1361  sizeof(*r->cbp_luma));
1362  r->deblock_coefs = av_malloc(r->s.mb_stride * r->s.mb_height *
1363  sizeof(*r->deblock_coefs));
1365  sizeof(*r->intra_types_hist));
1366  r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height *
1367  sizeof(*r->mb_type));
1368 
1369  if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
1370  r->intra_types_hist && r->mb_type)) {
1371  rv34_decoder_free(r);
1372  return AVERROR(ENOMEM);
1373  }
1374 
1376 
1377  return 0;
1378 }
1379 
1380 
1382 {
1383  rv34_decoder_free(r);
1384  return rv34_decoder_alloc(r);
1385 }
1386 
1387 
1388 static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
1389 {
1390  MpegEncContext *s = &r->s;
1391  GetBitContext *gb = &s->gb;
1392  int mb_pos, slice_type;
1393  int res;
1394 
1395  init_get_bits(&r->s.gb, buf, buf_size*8);
1396  res = r->parse_slice_header(r, gb, &r->si);
1397  if(res < 0){
1398  av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n");
1399  return -1;
1400  }
1401 
1402  slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
1403  if (slice_type != s->pict_type) {
1404  av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
1405  return AVERROR_INVALIDDATA;
1406  }
1407  if (s->width != r->si.width || s->height != r->si.height) {
1408  av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
1409  return AVERROR_INVALIDDATA;
1410  }
1411 
1412  r->si.end = end;
1413  s->qscale = r->si.quant;
1414  s->mb_num_left = r->si.end - r->si.start;
1415  r->s.mb_skip_run = 0;
1416 
1417  mb_pos = s->mb_x + s->mb_y * s->mb_width;
1418  if(r->si.start != mb_pos){
1419  av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos);
1420  s->mb_x = r->si.start % s->mb_width;
1421  s->mb_y = r->si.start / s->mb_width;
1422  }
1423  memset(r->intra_types_hist, -1, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
1424  s->first_slice_line = 1;
1425  s->resync_mb_x = s->mb_x;
1426  s->resync_mb_y = s->mb_y;
1427 
1429  while(!check_slice_end(r, s)) {
1431 
1432  if(r->si.type)
1433  res = rv34_decode_inter_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1434  else
1435  res = rv34_decode_intra_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1436  if(res < 0){
1437  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_ERROR);
1438  return -1;
1439  }
1440  if (++s->mb_x == s->mb_width) {
1441  s->mb_x = 0;
1442  s->mb_y++;
1444 
1445  memmove(r->intra_types_hist, r->intra_types, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1446  memset(r->intra_types, -1, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1447 
1448  if(r->loop_filter && s->mb_y >= 2)
1449  r->loop_filter(r, s->mb_y - 2);
1450 
1453  s->mb_y - 2, 0);
1454 
1455  }
1456  if(s->mb_x == s->resync_mb_x)
1457  s->first_slice_line=0;
1458  s->mb_num_left--;
1459  }
1460  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
1461 
1462  return s->mb_y == s->mb_height;
1463 }
1464 
1465 /** @} */ // recons group end
1466 
1467 /**
1468  * Initialize decoder.
1469  */
1471 {
1472  RV34DecContext *r = avctx->priv_data;
1473  MpegEncContext *s = &r->s;
1474  int ret;
1475 
1477  s->avctx = avctx;
1478  s->out_format = FMT_H263;
1479  s->codec_id = avctx->codec_id;
1480 
1481  s->width = avctx->width;
1482  s->height = avctx->height;
1483 
1484  r->s.avctx = avctx;
1485  avctx->flags |= CODEC_FLAG_EMU_EDGE;
1486  r->s.flags |= CODEC_FLAG_EMU_EDGE;
1487  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1488  avctx->has_b_frames = 1;
1489  s->low_delay = 0;
1490 
1491  if ((ret = ff_MPV_common_init(s)) < 0)
1492  return ret;
1493 
1494  ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1);
1495 
1496 #if CONFIG_RV30_DECODER
1497  if (avctx->codec_id == AV_CODEC_ID_RV30)
1498  ff_rv30dsp_init(&r->rdsp);
1499 #endif
1500 #if CONFIG_RV40_DECODER
1501  if (avctx->codec_id == AV_CODEC_ID_RV40)
1502  ff_rv40dsp_init(&r->rdsp);
1503 #endif
1504 
1505  if ((ret = rv34_decoder_alloc(r)) < 0)
1506  return ret;
1507 
1508  if(!intra_vlcs[0].cbppattern[0].bits)
1509  rv34_init_tables();
1510 
1511  avctx->internal->allocate_progress = 1;
1512 
1513  return 0;
1514 }
1515 
1517 {
1518  int err;
1519  RV34DecContext *r = avctx->priv_data;
1520 
1521  r->s.avctx = avctx;
1522 
1523  if (avctx->internal->is_copy) {
1524  r->tmp_b_block_base = NULL;
1525  if ((err = ff_MPV_common_init(&r->s)) < 0)
1526  return err;
1527  if ((err = rv34_decoder_alloc(r)) < 0)
1528  return err;
1529  }
1530 
1531  return 0;
1532 }
1533 
1535 {
1536  RV34DecContext *r = dst->priv_data, *r1 = src->priv_data;
1537  MpegEncContext * const s = &r->s, * const s1 = &r1->s;
1538  int err;
1539 
1540  if (dst == src || !s1->context_initialized)
1541  return 0;
1542 
1543  if (s->height != s1->height || s->width != s1->width) {
1544  s->height = s1->height;
1545  s->width = s1->width;
1546  if ((err = ff_MPV_common_frame_size_change(s)) < 0)
1547  return err;
1548  if ((err = rv34_decoder_realloc(r)) < 0)
1549  return err;
1550  }
1551 
1552  if ((err = ff_mpeg_update_thread_context(dst, src)))
1553  return err;
1554 
1555  r->cur_pts = r1->cur_pts;
1556  r->last_pts = r1->last_pts;
1557  r->next_pts = r1->next_pts;
1558 
1559  memset(&r->si, 0, sizeof(r->si));
1560 
1561  return 0;
1562 }
1563 
1564 static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n)
1565 {
1566  if(avctx->slice_count) return avctx->slice_offset[n];
1567  else return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);
1568 }
1569 
1570 static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
1571 {
1572  RV34DecContext *r = avctx->priv_data;
1573  MpegEncContext *s = &r->s;
1574  int got_picture = 0, ret;
1575 
1576  ff_er_frame_end(&s->er);
1577  ff_MPV_frame_end(s);
1578  s->mb_num_left = 0;
1579 
1582 
1583  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1584  if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
1585  return ret;
1588  got_picture = 1;
1589  } else if (s->last_picture_ptr != NULL) {
1590  if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
1591  return ret;
1594  got_picture = 1;
1595  }
1596 
1597  return got_picture;
1598 }
1599 
1600 static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
1601 {
1602  // attempt to keep aspect during typical resolution switches
1603  if (!sar.num)
1604  sar = (AVRational){1, 1};
1605 
1606  sar = av_mul_q(sar, (AVRational){new_h * old_w, new_w * old_h});
1607  return sar;
1608 }
1609 
1611  void *data, int *got_picture_ptr,
1612  AVPacket *avpkt)
1613 {
1614  const uint8_t *buf = avpkt->data;
1615  int buf_size = avpkt->size;
1616  RV34DecContext *r = avctx->priv_data;
1617  MpegEncContext *s = &r->s;
1618  AVFrame *pict = data;
1619  SliceInfo si;
1620  int i, ret;
1621  int slice_count;
1622  const uint8_t *slices_hdr = NULL;
1623  int last = 0;
1624 
1625  /* no supplementary picture */
1626  if (buf_size == 0) {
1627  /* special case for last picture */
1628  if (s->low_delay==0 && s->next_picture_ptr) {
1629  if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
1630  return ret;
1631  s->next_picture_ptr = NULL;
1632 
1633  *got_picture_ptr = 1;
1634  }
1635  return 0;
1636  }
1637 
1638  if(!avctx->slice_count){
1639  slice_count = (*buf++) + 1;
1640  slices_hdr = buf + 4;
1641  buf += 8 * slice_count;
1642  buf_size -= 1 + 8 * slice_count;
1643  }else
1644  slice_count = avctx->slice_count;
1645 
1646  //parse first slice header to check whether this frame can be decoded
1647  if(get_slice_offset(avctx, slices_hdr, 0) < 0 ||
1648  get_slice_offset(avctx, slices_hdr, 0) > buf_size){
1649  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1650  return AVERROR_INVALIDDATA;
1651  }
1652  init_get_bits(&s->gb, buf+get_slice_offset(avctx, slices_hdr, 0), (buf_size-get_slice_offset(avctx, slices_hdr, 0))*8);
1653  if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
1654  av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
1655  return AVERROR_INVALIDDATA;
1656  }
1657  if ((!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) &&
1658  si.type == AV_PICTURE_TYPE_B) {
1659  av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
1660  "reference data.\n");
1661  return AVERROR_INVALIDDATA;
1662  }
1663  if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
1664  || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
1665  || avctx->skip_frame >= AVDISCARD_ALL)
1666  return avpkt->size;
1667 
1668  /* first slice */
1669  if (si.start == 0) {
1670  if (s->mb_num_left > 0) {
1671  av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.\n",
1672  s->mb_num_left);
1673  ff_er_frame_end(&s->er);
1674  ff_MPV_frame_end(s);
1675  }
1676 
1677  if (s->width != si.width || s->height != si.height) {
1678  int err;
1679 
1680  av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
1681  si.width, si.height);
1682 
1683  if (av_image_check_size(si.width, si.height, 0, s->avctx))
1684  return AVERROR_INVALIDDATA;
1685 
1687  s->width, s->height, s->avctx->sample_aspect_ratio,
1688  si.width, si.height);
1689  s->width = si.width;
1690  s->height = si.height;
1692  if ((err = ff_MPV_common_frame_size_change(s)) < 0)
1693  return err;
1694  if ((err = rv34_decoder_realloc(r)) < 0)
1695  return err;
1696  }
1697  s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
1698  if (ff_MPV_frame_start(s, s->avctx) < 0)
1699  return -1;
1701  if (!r->tmp_b_block_base) {
1702  int i;
1703 
1704  r->tmp_b_block_base = av_malloc(s->linesize * 48);
1705  for (i = 0; i < 2; i++)
1706  r->tmp_b_block_y[i] = r->tmp_b_block_base
1707  + i * 16 * s->linesize;
1708  for (i = 0; i < 4; i++)
1709  r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
1710  + (i >> 1) * 8 * s->uvlinesize
1711  + (i & 1) * 16;
1712  }
1713  r->cur_pts = si.pts;
1714  if (s->pict_type != AV_PICTURE_TYPE_B) {
1715  r->last_pts = r->next_pts;
1716  r->next_pts = r->cur_pts;
1717  } else {
1718  int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
1719  int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
1720  int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
1721 
1722  if(!refdist){
1723  r->mv_weight1 = r->mv_weight2 = r->weight1 = r->weight2 = 8192;
1724  r->scaled_weight = 0;
1725  }else{
1726  r->mv_weight1 = (dist0 << 14) / refdist;
1727  r->mv_weight2 = (dist1 << 14) / refdist;
1728  if((r->mv_weight1|r->mv_weight2) & 511){
1729  r->weight1 = r->mv_weight1;
1730  r->weight2 = r->mv_weight2;
1731  r->scaled_weight = 0;
1732  }else{
1733  r->weight1 = r->mv_weight1 >> 9;
1734  r->weight2 = r->mv_weight2 >> 9;
1735  r->scaled_weight = 1;
1736  }
1737  }
1738  }
1739  s->mb_x = s->mb_y = 0;
1741  } else if (HAVE_THREADS &&
1743  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames in frame "
1744  "multithreading mode (start MB is %d).\n", si.start);
1745  return AVERROR_INVALIDDATA;
1746  }
1747 
1748  for(i = 0; i < slice_count; i++){
1749  int offset = get_slice_offset(avctx, slices_hdr, i);
1750  int size;
1751  if(i+1 == slice_count)
1752  size = buf_size - offset;
1753  else
1754  size = get_slice_offset(avctx, slices_hdr, i+1) - offset;
1755 
1756  if(offset < 0 || offset > buf_size){
1757  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1758  break;
1759  }
1760 
1761  r->si.end = s->mb_width * s->mb_height;
1762  s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
1763 
1764  if(i+1 < slice_count){
1765  if (get_slice_offset(avctx, slices_hdr, i+1) < 0 ||
1766  get_slice_offset(avctx, slices_hdr, i+1) > buf_size) {
1767  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1768  break;
1769  }
1770  init_get_bits(&s->gb, buf+get_slice_offset(avctx, slices_hdr, i+1), (buf_size-get_slice_offset(avctx, slices_hdr, i+1))*8);
1771  if(r->parse_slice_header(r, &r->s.gb, &si) < 0){
1772  if(i+2 < slice_count)
1773  size = get_slice_offset(avctx, slices_hdr, i+2) - offset;
1774  else
1775  size = buf_size - offset;
1776  }else
1777  r->si.end = si.start;
1778  }
1779  if (size < 0 || size > buf_size - offset) {
1780  av_log(avctx, AV_LOG_ERROR, "Slice size is invalid\n");
1781  break;
1782  }
1783  last = rv34_decode_slice(r, r->si.end, buf + offset, size);
1784  if(last)
1785  break;
1786  }
1787 
1788  if (s->current_picture_ptr) {
1789  if (last) {
1790  if(r->loop_filter)
1791  r->loop_filter(r, s->mb_height - 1);
1792 
1793  ret = finish_frame(avctx, pict);
1794  if (ret < 0)
1795  return ret;
1796  *got_picture_ptr = ret;
1797  } else if (HAVE_THREADS &&
1799  av_log(avctx, AV_LOG_INFO, "marking unfished frame as finished\n");
1800  /* always mark the current frame as finished, frame-mt supports
1801  * only complete frames */
1802  ff_er_frame_end(&s->er);
1803  ff_MPV_frame_end(s);
1804  s->mb_num_left = 0;
1806  return AVERROR_INVALIDDATA;
1807  }
1808  }
1809 
1810  return avpkt->size;
1811 }
1812 
1814 {
1815  RV34DecContext *r = avctx->priv_data;
1816 
1817  ff_MPV_common_end(&r->s);
1818  rv34_decoder_free(r);
1819 
1820  return 0;
1821 }
qpel_mc_func put_pixels_tab[4][16]
Definition: rv34dsp.h:58
P-frame macroblock with DCs in a separate 4x4 block, one motion vector.
Definition: rv34.h:54
void ff_rv40dsp_init(RV34DSPContext *c)
Definition: rv40dsp.c:533
#define MB_TYPE_INTRA16x16
#define VERT_PRED8x8
Definition: h264pred.h:70
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
#define MB_TYPE_SKIP
int vlc_set
VLCs used for this slice.
Definition: rv34.h:76
VLC second_pattern[2]
VLCs used for decoding coefficients in the subblocks 2 and 3.
Definition: rv34.h:67
discard all frames except keyframes
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:3005
const char * s
Definition: avisynth_c.h:668
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static const uint8_t rv34_table_inter_secondpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3737
#define DC_128_PRED8x8
Definition: h264pred.h:76
#define VERT_LEFT_PRED
Definition: h264pred.h:45
int last_pts
Definition: rv34.h:107
P-frame macroblock, 16x8 motion compensation partitions.
Definition: rv34.h:51
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1009
#define IS_SKIP(a)
Definition: mpegvideo.h:140
uint8_t * tmp_b_block_y[2]
temporary blocks for RV4 weighted MC
Definition: rv34.h:120
#define C
av_cold int ff_MPV_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:993
#define B
Definition: dsputil.c:2025
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
#define ER_MB_END
static const int ittrans[9]
mapping of RV30/40 intra prediction types to standard H.264 types
Definition: rv34.c:937
B-frame macroblock, forward prediction.
Definition: rv34.h:47
int dmv[4][2]
differential motion vectors for the current macroblock
Definition: rv34.h:102
Bidirectionally predicted B-frame macroblock, two motion vectors.
Definition: rv34.h:53
MpegEncContext s
Definition: rv34.h:85
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:154
static int rv34_decode_block(int16_t *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
Decode coefficients for 4x4 block.
Definition: rv34.c:286
static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode intra macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:352
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:281
void ff_er_frame_end(ERContext *s)
static const uint8_t rv34_chroma_quant[2][32]
quantizer values used for AC and DC coefficients in chroma blocks
Definition: rv34data.h:74
int height
coded height
Definition: rv34.h:79
int num
numerator
Definition: rational.h:44
void avcodec_set_dimensions(AVCodecContext *s, int width, int height)
enum AVCodecID codec_id
Definition: mpegvideo.h:257
Bidirectionally predicted B-frame macroblock, no motion vectors.
Definition: rv34.h:50
Sinusoidal phase f
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static int rv34_decode_mv(RV34DecContext *r, int block_type)
Decode motion vector differences and perform motion vector reconstruction and motion compensation...
Definition: rv34.c:841
#define VLC_TYPE
Definition: get_bits.h:61
mpegvideo header.
static void rv34_pred_b_vector(int A[2], int B[2], int C[2], int A_avail, int B_avail, int C_avail, int *mx, int *my)
Predict motion vector for B-frame macroblock.
Definition: rv34.c:528
VLC cbppattern[2]
VLCs used for pattern of coded block patterns decoding.
Definition: rv34.h:64
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
Definition: rv34.c:1570
int weight2
B frame distance fractions (0.14) used in motion compensation.
Definition: rv34.h:109
#define GET_PTS_DIFF(a, b)
Definition: rv34.c:513
RV30 and RV40 decoder common data declarations.
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t *buf, int buf_size)
Definition: rv34.c:1388
static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
Perform 4x4 intra prediction.
Definition: rv34.c:950
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
int start
Definition: rv34.h:77
#define HOR_PRED8x8
Definition: h264pred.h:69
static const int rv34_mb_type_to_lavc[12]
translation of RV30/40 macroblock types to lavc ones
Definition: rv34.c:52
int stride
Definition: mace.c:144
#define NUM_INTRA_TABLES
Definition: rv34vlc.h:32
int qscale
QP.
Definition: mpegvideo.h:369
#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:54
const uint8_t * luma_dc_quant_p
luma subblock DC quantizer for interframes
Definition: rv34.h:91
static void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q_dc, int q_ac1, int q_ac2)
Definition: rv34.c:264
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
set threshold d
#define FF_QSCALE_TYPE_MPEG1
enum AVDiscard skip_frame
Skip decoding for selected frames.
#define PLANE_PRED8x8
Definition: h264pred.h:71
#define CBPPAT_VLC_SIZE
Definition: rv34vlc.h:35
uint8_t * tmp_b_block_base
Definition: rv34.h:122
int mb_num_left
number of MBs left in this video packet (for partitioned Slices only)
Definition: mpegvideo.h:530
#define MB_TYPE_INTRA
Definition: mpegvideo.h:134
static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *insyms, const int num)
Generate VLC from codeword lengths.
Definition: rv34.c:108
int ff_MPV_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo.c:1171
int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:262
uint8_t bits
Definition: crc.c:216
uint8_t
static void rv34_process_block(RV34DecContext *r, uint8_t *pdst, int stride, int fc, int sc, int q_dc, int q_ac)
Definition: rv34.c:993
static const uint8_t rv34_table_intra_firstpat[NUM_INTRA_TABLES][4][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:940
#define av_cold
Definition: attributes.h:78
#define IS_8X16(a)
Definition: mpegvideo.h:147
#define DC_PRED8x8
Definition: h264pred.h:68
int scaled_weight
Definition: rv34.h:108
uint16_t * cbp_luma
CBP values for luma subblocks.
Definition: rv34.h:112
static const uint16_t rv34_mb_max_sizes[6]
maximum number of macroblocks for each of the possible slice offset sizes
Definition: rv34data.h:119
enum OutputFormat out_format
output format
Definition: mpegvideo.h:249
static const int chroma_coeffs[3]
Definition: rv34.c:646
#define Y
Definition: vf_boxblur.c:76
static void rv34_mc_2mv_skip(RV34DecContext *r)
Definition: rv34.c:814
#define AV_RB32
end end
int width
coded width
Definition: rv34.h:78
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
#define ER_MB_ERROR
#define HOR_UP_PRED_RV40_NODOWN
Definition: h264pred.h:55
#define DIAG_DOWN_LEFT_PRED
Definition: h264pred.h:41
#define MB_TYPE_SEPARATE_DC
Definition: rv34.h:36
rv40_weight_func rv40_weight_pixels_tab[2][2]
Biweight functions, first dimension is transform size (16/8), second is whether the weight is prescal...
Definition: rv34dsp.h:67
uint16_t * deblock_coefs
deblock coefficients for each macroblock
Definition: rv34.h:114
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
static const uint8_t part_sizes_h[RV34_MB_TYPES]
macroblock partition height in 8x8 blocks
Definition: rv34.c:452
#define TOP_DC_PRED
Definition: h264pred.h:50
static RV34VLC inter_vlcs[NUM_INTER_TABLES]
Definition: rv34.c:68
uint8_t * data
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
static const uint8_t bits2[81]
Definition: aactab.c:118
static int is_mv_diff_gt_3(int16_t(*motion_val)[2], int step)
Definition: rv34.c:1136
Skipped block.
Definition: rv34.h:49
static VLC_TYPE table_data[117592][2]
Definition: rv34.c:98
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:277
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:489
static const uint16_t rv34_qscale_tab[32]
This table is used for dequantizing.
Definition: rv34data.h:84
void ff_MPV_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1717
static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1072
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
rv34_idct_add_func rv34_idct_add
Definition: rv34dsp.h:70
P-frame macroblock, 8x16 motion compensation partitions.
Definition: rv34.h:52
#define A(x)
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:861
void(* qpel_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
Definition: dsputil.h:84
static const uint8_t rv34_quant_to_vlc_set[2][31]
tables used to translate a quantizer value into a VLC set for decoding The first table is used for in...
Definition: rv34data.h:95
static void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q)
Decode a single coefficient.
Definition: rv34.c:258
int slice_count
slice count
ThreadFrame tf
Definition: mpegvideo.h:99
#define U(x)
int quant
quantizer used for this slice
Definition: rv34.h:75
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:557
static const int table_offs[]
Definition: rv34.c:77
qpel_mc_func avg_pixels_tab[4][16]
Definition: rv34dsp.h:59
int has_b_frames
Size of the frame reordering buffer in the decoder.
static void rv4_weight(RV34DecContext *r)
Definition: rv34.c:773
static const int ittrans16[4]
mapping of RV30/40 intra 16x16 prediction types to standard H.264 types
Definition: rv34.c:943
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Multithreading support functions.
static const uint16_t mask[17]
Definition: lzw.c:37
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
rv34_idct_dc_add_func rv34_idct_dc_add
Definition: rv34dsp.h:71
#define DC_128_PRED
Definition: h264pred.h:51
#define LEFT_DC_PRED
Definition: h264pred.h:49
static const struct endianess table[]
av_cold void ff_rv30dsp_init(RV34DSPContext *c)
Definition: rv30dsp.c:264
ERContext er
Definition: mpegvideo.h:742
int active_thread_type
Which multithreading methods are in use by the codec.
static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
Definition: rv34.c:795
static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
motion vector prediction
Definition: rv34.c:464
Spectrum Plot time data
const char * r
Definition: vf_curves.c:94
int luma_vlc
which VLC set will be used for decoding of luma blocks
Definition: rv34.h:99
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:364
#define COEFF_VLC_SIZE
Definition: rv34vlc.h:39
int flags
CODEC_FLAG_*.
static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
Definition: rv34.c:1600
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
static int rv34_decoder_realloc(RV34DecContext *r)
Definition: rv34.c:1381
#define IS_INTRA(a)
Definition: mpegvideo.h:138
int low_delay
no reordering needed / has no b-frames
Definition: mpegvideo.h:592
GetBitContext gb
Definition: mpegvideo.h:649
static const uint8_t offset[127][2]
Definition: vf_spp.c:70
#define FFMAX(a, b)
Definition: common.h:56
#define CBP_VLC_SIZE
Definition: rv34vlc.h:36
external API header
rv34_inv_transform_func rv34_inv_transform_dc
Definition: rv34dsp.h:69
VLC tables used by the decoder.
Definition: rv34.h:63
int size
Definition: get_bits.h:63
int end
start and end macroblocks of the slice
Definition: rv34.h:77
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:527
static int svq3_get_se_golomb(GetBitContext *gb)
Definition: golomb.h:207
static int rv34_set_deblock_coef(RV34DecContext *r)
Definition: rv34.c:1148
common internal API header
#define HOR_UP_PRED
Definition: h264pred.h:46
useful rectangle filling function
struct AVRational AVRational
rational number numerator/denominator
static void ZERO8x2(void *dst, int stride)
Definition: rv34.c:45
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:231
#define IS_8X8(a)
Definition: mpegvideo.h:148
#define V
int(* parse_slice_header)(struct RV34DecContext *r, GetBitContext *gb, SliceInfo *si)
Definition: rv34.h:124
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
Definition: mpegvideo.c:2170
void ff_mpeg_er_frame_start(MpegEncContext *s)
Intra macroblock with DCs in a separate 4x4 block.
Definition: rv34.h:44
static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
motion vector prediction for B-frames
Definition: rv34.c:548
#define MB_TYPE_DIRECT2
#define FFMIN(a, b)
Definition: common.h:58
int * mb_type
internal macroblock types
Definition: rv34.h:97
ret
Definition: avfilter.c:821
static int adjust_pred16(int itype, int up, int left)
Definition: rv34.c:977
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
int width
picture width / height.
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:105
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:347
Picture.
Definition: mpegvideo.h:97
static int calc_add_mv(RV34DecContext *r, int dir, int val)
Calculate motion vector component that should be added for direct blocks.
Definition: rv34.c:518
H264PredContext h
functions for 4x4 and 16x16 intra block prediction
Definition: rv34.h:94
static RV34VLC * choose_vlc_set(int quant, int mod, int type)
Select VLC set for decoding from current quantizer, modifier and frame type.
Definition: rv34.c:341
VLC coefficient
VLCs used for decoding big coefficients.
Definition: rv34.h:69
t
Definition: genspecsines3.m:6
static int rv34_decoder_alloc(RV34DecContext *r)
Definition: rv34.c:1354
VLC first_pattern[4]
VLCs used for decoding coefficients in the first subblock.
Definition: rv34.h:66
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:255
#define FIRSTBLK_VLC_SIZE
Definition: rv34vlc.h:37
int mv_weight1
Definition: rv34.h:110
#define MB_TYPE_L0L1
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y)
Definition: h264chroma.h:24
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:524
#define AV_RL32
float u
static const uint8_t rv34_table_intra_secondpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2074
#define MB_TYPE_L1
static const uint8_t rv34_table_intra_thirdpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2177
static const uint8_t rv34_inter_coeff[NUM_INTER_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:4024
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function for encode/decode called after coding/decoding the header and before a frame is code...
Definition: mpegvideo.c:1493
#define INIT_VLC_USE_NEW_STATIC
Definition: get_bits.h:443
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:658
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
#define VERT_PRED
Prediction types.
Definition: h264pred.h:38
#define DIAG_DOWN_RIGHT_PRED
Definition: h264pred.h:42
static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir)
Definition: rv34.c:764
#define IS_16X8(a)
Definition: mpegvideo.h:146
int bits
Definition: get_bits.h:64
RV30/40 VLC tables.
int table_allocated
Definition: get_bits.h:66
int(* decode_mb_info)(struct RV34DecContext *r)
Definition: rv34.h:125
for k
int first_slice_line
used in mpeg4 too to handle resync markers
Definition: mpegvideo.h:637
static const uint8_t rv34_inter_cbp[NUM_INTER_TABLES][4][CBP_VLC_SIZE]
Definition: rv34vlc.h:2890
int ff_rv34_decode_init_thread_copy(AVCodecContext *avctx)
Definition: rv34.c:1516
static void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, const int thirdpel, int weighted, qpel_mc_func(*qpel_mc)[16], h264_chroma_mc_func(*chroma_mc))
generic motion compensation function
Definition: rv34.c:663
NULL
Definition: eval.c:55
static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1286
or the Software in violation of any applicable export control laws in any jurisdiction Except as provided by mandatorily applicable UPF has no obligation to provide you with source code to the Software In the event Software contains any source code
static int width
Definition: tests/utils.c:158
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n)
Definition: rv34.c:1564
AVS_Value src
Definition: avisynth_c.h:523
#define MB_TYPE_8x16
essential slice information
Definition: rv34.h:73
enum AVCodecID codec_id
static const uint8_t rv34_table_inter_firstpat[NUM_INTER_TABLES][2][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:2936
static const uint8_t rv34_table_intra_cbppat[NUM_INTRA_TABLES][2][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:42
static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode inter macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:387
main external API structure.
FIXME Range Coding of cr are mx and my are Motion Vector top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Predicton block[y][x] dc[1]
Definition: snow.txt:392
static const uint8_t part_sizes_w[RV34_MB_TYPES]
macroblock partition width in 8x8 blocks
Definition: rv34.c:449
#define MAX_VLC_SIZE
Definition: rv34vlc.h:40
#define HOR_PRED
Definition: h264pred.h:39
RV34VLC * cur_vlcs
VLC set used for current frame decoding.
Definition: rv34.h:93
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:245
#define HOR_DOWN_PRED
Definition: h264pred.h:44
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
static const uint8_t rv34_inter_cbppat[NUM_INTER_TABLES][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:2305
SliceInfo si
current slice information
Definition: rv34.h:95
void * buf
Definition: avisynth_c.h:594
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
Definition: mpegvideo.c:2164
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
P-frame macroblock, 8x8 motion compensation partitions.
Definition: rv34.h:46
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:273
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
#define FF_THREAD_FRAME
Decode more than one frame at once.
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
static void rv34_decoder_free(RV34DecContext *r)
Definition: rv34.c:1342
VLC cbp[2][4]
VLCs used for coded block patterns decoding.
Definition: rv34.h:65
synthesis window for stochastic i
rational number numerator/denominator
Definition: rational.h:43
static av_cold void rv34_init_tables(void)
Initialize all tables.
Definition: rv34.c:144
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:379
av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
Initialize decoder.
Definition: rv34.c:1470
#define MB_TYPE_16x16
#define mid_pred
Definition: mathops.h:94
static const double coeff[2][5]
Definition: vf_ow.c:64
int ff_rv34_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt)
Definition: rv34.c:1610
#define s1
Definition: regdef.h:38
static const uint8_t rv34_table_inter_thirdpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3880
static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1178
int allocate_progress
Whether to allocate progress for frame threading.
int intra_types_stride
block types array stride
Definition: rv34.h:89
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
#define type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:377
miscellaneous RV30/40 tables
int(* decode_intra_types)(struct RV34DecContext *r, GetBitContext *gb, int8_t *dst)
Definition: rv34.h:126
const uint8_t * quant
static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
motion vector prediction - RV3 version
Definition: rv34.c:600
static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
Definition: rv34.c:1326
int is16
current block has additional 16x16 specific features or not
Definition: rv34.h:101
int8_t * intra_types
block types
Definition: rv34.h:88
static int flags
Definition: cpu.c:23
int av_frame_ref(AVFrame *dst, AVFrame *src)
Setup a new reference to the data described by an given frame.
Definition: frame.c:228
static const uint8_t rv34_table_intra_cbp[NUM_INTRA_TABLES][8][CBP_VLC_SIZE]
Definition: rv34vlc.h:886
P-frame macroblock, one motion frame.
Definition: rv34.h:45
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
#define IS_INTRA16x16(a)
Definition: mpegvideo.h:136
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:279
int cur_pts
Definition: rv34.h:107
static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table)
Decode coded block pattern.
Definition: rv34.c:187
void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:410
MpegEncContext.
Definition: mpegvideo.h:241
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:346
av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
Definition: rv34.c:1813
int8_t * qscale_table
Definition: mpegvideo.h:102
struct AVCodecContext * avctx
Definition: mpegvideo.h:243
static const uint8_t rv34_cbp_code[16]
values used to reconstruct coded block pattern
Definition: rv34data.h:42
int weight1
Definition: rv34.h:109
#define VERT_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:56
discard all non reference
#define VERT_RIGHT_PRED
Definition: h264pred.h:43
#define OTHERBLK_VLC_SIZE
Definition: rv34vlc.h:38
#define CODEC_FLAG_EMU_EDGE
Don&#39;t draw edges.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
static const uint8_t avail_indexes[4]
availability index for subblocks
Definition: rv34.c:455
uint8_t * tmp_b_block_uv[4]
Definition: rv34.h:121
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:278
int mv_weight2
Definition: rv34.h:110
uint8_t * dest[3]
Definition: mpegvideo.h:467
#define MB_TYPE_8x8
B-frame macroblock, backward prediction.
Definition: rv34.h:48
static const uint8_t rv34_mb_bits_sizes[6]
bits needed to code the slice offset for the given size
Definition: rv34data.h:124
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: rv34.c:1534
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:345
Bi-dir predicted.
Definition: avutil.h:218
VLC third_pattern[2]
VLCs used for decoding coefficients in the last subblock.
Definition: rv34.h:68
static const int num_mvs[RV34_MB_TYPES]
number of motion vectors in each macroblock type
Definition: rv34.c:835
int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
Decode starting slice position.
Definition: rv34.c:329
#define MB_TYPE_16x8
static const uint8_t modulo_three_table[108]
precalculated results of division by three and modulo three for values 0-107
Definition: rv34data.h:53
#define NUM_INTER_TABLES
Definition: rv34vlc.h:33
decoder context
Definition: rv34.h:84
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:58
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread.c:705
VideoDSPContext vdsp
Definition: mpegvideo.h:394
void ff_MPV_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1244
struct AVCodecInternal * internal
Private context used for internal data.
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:528
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:700
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
int block_type
current block type
Definition: rv34.h:98
VLC_TYPE(* table)[2]
code, bits
Definition: get_bits.h:65
int linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:283
int next_pts
Definition: rv34.h:107
#define HAVE_THREADS
Definition: config.h:274
const uint8_t * luma_dc_quant_i
luma subblock DC quantizer for intraframes
Definition: rv34.h:90
struct AVFrame f
Definition: mpegvideo.h:98
static const uint8_t rv34_count_ones[16]
number of ones in nibble minus one
Definition: rv34data.h:35
int * slice_offset
slice offsets in the frame in bytes
int8_t * intra_types_hist
old block types, used for prediction
Definition: rv34.h:87
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:260
#define AV_LOG_INFO
Definition: log.h:156
rv34_inv_transform_func rv34_inv_transform
Definition: rv34dsp.h:68
uint32_t * mb_type
Definition: mpegvideo.h:108
#define LOCAL_ALIGNED_16(t, v,...)
static void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc, int q)
Decode 2x2 subblock of coefficients.
Definition: rv34.c:240
int type
slice type (intra, inter)
Definition: rv34.h:74
h264_chroma_mc_func avg_chroma_pixels_tab[3]
Definition: rv34dsp.h:61
#define DC_PRED
Definition: h264pred.h:40
static void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, VLC *vlc, int q)
Get one coefficient value from the bitstream and store it.
Definition: rv34.c:220
static RV34VLC intra_vlcs[NUM_INTRA_TABLES]
Definition: rv34.c:68
int rv30
indicates which RV variasnt is currently decoded
Definition: rv34.h:104
exp golomb vlc stuff
int uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:284
This structure stores compressed data.
h264_chroma_mc_func put_chroma_pixels_tab[3]
Definition: rv34dsp.h:60
Intra macroblock.
Definition: rv34.h:43
void(* loop_filter)(struct RV34DecContext *r, int row)
Definition: rv34.h:127
int chroma_vlc
which VLC set will be used for decoding of chroma blocks
Definition: rv34.h:100
for(j=16;j >0;--j)
RV34DSPContext rdsp
Definition: rv34.h:86
#define MB_TYPE_L0
Predicted.
Definition: avutil.h:217
int pts
frame timestamp
Definition: rv34.h:80
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
void ff_MPV_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
Definition: mpegvideo.c:826
static const uint8_t rv34_intra_coeff[NUM_INTRA_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:2281
uint8_t * cbp_chroma
CBP values for chroma subblocks.
Definition: rv34.h:113