vp56.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * VP5 and VP6 compatible video decoder (common features)
24  */
25 
26 #include "avcodec.h"
27 #include "bytestream.h"
28 #include "internal.h"
29 #include "h264chroma.h"
30 #include "vp56.h"
31 #include "vp56data.h"
32 
33 
34 void ff_vp56_init_dequant(VP56Context *s, int quantizer)
35 {
36  s->quantizer = quantizer;
37  s->dequant_dc = vp56_dc_dequant[quantizer] << 2;
38  s->dequant_ac = vp56_ac_dequant[quantizer] << 2;
39 }
40 
41 static int vp56_get_vectors_predictors(VP56Context *s, int row, int col,
43 {
44  int nb_pred = 0;
45  VP56mv vect[2] = {{0,0}, {0,0}};
46  int pos, offset;
47  VP56mv mvp;
48 
49  for (pos=0; pos<12; pos++) {
50  mvp.x = col + vp56_candidate_predictor_pos[pos][0];
51  mvp.y = row + vp56_candidate_predictor_pos[pos][1];
52  if (mvp.x < 0 || mvp.x >= s->mb_width ||
53  mvp.y < 0 || mvp.y >= s->mb_height)
54  continue;
55  offset = mvp.x + s->mb_width*mvp.y;
56 
57  if (vp56_reference_frame[s->macroblocks[offset].type] != ref_frame)
58  continue;
59  if ((s->macroblocks[offset].mv.x == vect[0].x &&
60  s->macroblocks[offset].mv.y == vect[0].y) ||
61  (s->macroblocks[offset].mv.x == 0 &&
62  s->macroblocks[offset].mv.y == 0))
63  continue;
64 
65  vect[nb_pred++] = s->macroblocks[offset].mv;
66  if (nb_pred > 1) {
67  nb_pred = -1;
68  break;
69  }
70  s->vector_candidate_pos = pos;
71  }
72 
73  s->vector_candidate[0] = vect[0];
74  s->vector_candidate[1] = vect[1];
75 
76  return nb_pred+1;
77 }
78 
80 {
81  VP56RangeCoder *c = &s->c;
82  VP56Model *model = s->modelp;
83  int i, ctx, type;
84 
85  for (ctx=0; ctx<3; ctx++) {
86  if (vp56_rac_get_prob(c, 174)) {
87  int idx = vp56_rac_gets(c, 4);
88  memcpy(model->mb_types_stats[ctx],
90  sizeof(model->mb_types_stats[ctx]));
91  }
92  if (vp56_rac_get_prob(c, 254)) {
93  for (type=0; type<10; type++) {
94  for(i=0; i<2; i++) {
95  if (vp56_rac_get_prob(c, 205)) {
96  int delta, sign = vp56_rac_get(c);
97 
100  if (!delta)
101  delta = 4 * vp56_rac_gets(c, 7);
102  model->mb_types_stats[ctx][type][i] += (delta ^ -sign) + sign;
103  }
104  }
105  }
106  }
107  }
108 
109  /* compute MB type probability tables based on previous MB type */
110  for (ctx=0; ctx<3; ctx++) {
111  int p[10];
112 
113  for (type=0; type<10; type++)
114  p[type] = 100 * model->mb_types_stats[ctx][type][1];
115 
116  for (type=0; type<10; type++) {
117  int p02, p34, p0234, p17, p56, p89, p5689, p156789;
118 
119  /* conservative MB type probability */
120  model->mb_type[ctx][type][0] = 255 - (255 * model->mb_types_stats[ctx][type][0]) / (1 + model->mb_types_stats[ctx][type][0] + model->mb_types_stats[ctx][type][1]);
121 
122  p[type] = 0; /* same MB type => weight is null */
123 
124  /* binary tree parsing probabilities */
125  p02 = p[0] + p[2];
126  p34 = p[3] + p[4];
127  p0234 = p02 + p34;
128  p17 = p[1] + p[7];
129  p56 = p[5] + p[6];
130  p89 = p[8] + p[9];
131  p5689 = p56 + p89;
132  p156789 = p17 + p5689;
133 
134  model->mb_type[ctx][type][1] = 1 + 255 * p0234/(1+p0234+p156789);
135  model->mb_type[ctx][type][2] = 1 + 255 * p02 / (1+p0234);
136  model->mb_type[ctx][type][3] = 1 + 255 * p17 / (1+p156789);
137  model->mb_type[ctx][type][4] = 1 + 255 * p[0] / (1+p02);
138  model->mb_type[ctx][type][5] = 1 + 255 * p[3] / (1+p34);
139  model->mb_type[ctx][type][6] = 1 + 255 * p[1] / (1+p17);
140  model->mb_type[ctx][type][7] = 1 + 255 * p56 / (1+p5689);
141  model->mb_type[ctx][type][8] = 1 + 255 * p[5] / (1+p56);
142  model->mb_type[ctx][type][9] = 1 + 255 * p[8] / (1+p89);
143 
144  /* restore initial value */
145  p[type] = 100 * model->mb_types_stats[ctx][type][1];
146  }
147  }
148 }
149 
151  VP56mb prev_type, int ctx)
152 {
153  uint8_t *mb_type_model = s->modelp->mb_type[ctx][prev_type];
154  VP56RangeCoder *c = &s->c;
155 
156  if (vp56_rac_get_prob(c, mb_type_model[0]))
157  return prev_type;
158  else
159  return vp56_rac_get_tree(c, vp56_pmbt_tree, mb_type_model);
160 }
161 
162 static void vp56_decode_4mv(VP56Context *s, int row, int col)
163 {
164  VP56mv mv = {0,0};
165  int type[4];
166  int b;
167 
168  /* parse each block type */
169  for (b=0; b<4; b++) {
170  type[b] = vp56_rac_gets(&s->c, 2);
171  if (type[b])
172  type[b]++; /* only returns 0, 2, 3 or 4 (all INTER_PF) */
173  }
174 
175  /* get vectors */
176  for (b=0; b<4; b++) {
177  switch (type[b]) {
179  s->mv[b] = (VP56mv) {0,0};
180  break;
182  s->parse_vector_adjustment(s, &s->mv[b]);
183  break;
184  case VP56_MB_INTER_V1_PF:
185  s->mv[b] = s->vector_candidate[0];
186  break;
187  case VP56_MB_INTER_V2_PF:
188  s->mv[b] = s->vector_candidate[1];
189  break;
190  }
191  mv.x += s->mv[b].x;
192  mv.y += s->mv[b].y;
193  }
194 
195  /* this is the one selected for the whole MB for prediction */
196  s->macroblocks[row * s->mb_width + col].mv = s->mv[3];
197 
198  /* chroma vectors are average luma vectors */
199  if (s->avctx->codec->id == AV_CODEC_ID_VP5) {
200  s->mv[4].x = s->mv[5].x = RSHIFT(mv.x,2);
201  s->mv[4].y = s->mv[5].y = RSHIFT(mv.y,2);
202  } else {
203  s->mv[4] = s->mv[5] = (VP56mv) {mv.x/4, mv.y/4};
204  }
205 }
206 
207 static VP56mb vp56_decode_mv(VP56Context *s, int row, int col)
208 {
209  VP56mv *mv, vect = {0,0};
210  int ctx, b;
211 
213  s->mb_type = vp56_parse_mb_type(s, s->mb_type, ctx);
214  s->macroblocks[row * s->mb_width + col].type = s->mb_type;
215 
216  switch (s->mb_type) {
217  case VP56_MB_INTER_V1_PF:
218  mv = &s->vector_candidate[0];
219  break;
220 
221  case VP56_MB_INTER_V2_PF:
222  mv = &s->vector_candidate[1];
223  break;
224 
225  case VP56_MB_INTER_V1_GF:
227  mv = &s->vector_candidate[0];
228  break;
229 
230  case VP56_MB_INTER_V2_GF:
232  mv = &s->vector_candidate[1];
233  break;
234 
236  s->parse_vector_adjustment(s, &vect);
237  mv = &vect;
238  break;
239 
242  s->parse_vector_adjustment(s, &vect);
243  mv = &vect;
244  break;
245 
246  case VP56_MB_INTER_4V:
247  vp56_decode_4mv(s, row, col);
248  return s->mb_type;
249 
250  default:
251  mv = &vect;
252  break;
253  }
254 
255  s->macroblocks[row*s->mb_width + col].mv = *mv;
256 
257  /* same vector for all blocks */
258  for (b=0; b<6; b++)
259  s->mv[b] = *mv;
260 
261  return s->mb_type;
262 }
263 
265 {
266  int idx = s->idct_scantable[0];
267  int b;
268 
269  for (b=0; b<6; b++) {
270  VP56RefDc *ab = &s->above_blocks[s->above_block_idx[b]];
272  int count = 0;
273  int dc = 0;
274  int i;
275 
276  if (ref_frame == lb->ref_frame) {
277  dc += lb->dc_coeff;
278  count++;
279  }
280  if (ref_frame == ab->ref_frame) {
281  dc += ab->dc_coeff;
282  count++;
283  }
284  if (s->avctx->codec->id == AV_CODEC_ID_VP5)
285  for (i=0; i<2; i++)
286  if (count < 2 && ref_frame == ab[-1+2*i].ref_frame) {
287  dc += ab[-1+2*i].dc_coeff;
288  count++;
289  }
290  if (count == 0)
291  dc = s->prev_dc[ff_vp56_b2p[b]][ref_frame];
292  else if (count == 2)
293  dc /= 2;
294 
295  s->block_coeff[b][idx] += dc;
296  s->prev_dc[ff_vp56_b2p[b]][ref_frame] = s->block_coeff[b][idx];
297  ab->dc_coeff = s->block_coeff[b][idx];
298  ab->ref_frame = ref_frame;
299  lb->dc_coeff = s->block_coeff[b][idx];
300  lb->ref_frame = ref_frame;
301  s->block_coeff[b][idx] *= s->dequant_dc;
302  }
303 }
304 
306  int stride, int dx, int dy)
307 {
309  if (dx) s->vp56dsp.edge_filter_hor(yuv + 10-dx , stride, t);
310  if (dy) s->vp56dsp.edge_filter_ver(yuv + stride*(10-dy), stride, t);
311 }
312 
313 static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src,
314  int stride, int x, int y)
315 {
316  uint8_t *dst = s->frames[VP56_FRAME_CURRENT]->data[plane] + s->block_offset[b];
317  uint8_t *src_block;
318  int src_offset;
319  int overlap_offset = 0;
320  int mask = s->vp56_coord_div[b] - 1;
321  int deblock_filtering = s->deblock_filtering;
322  int dx;
323  int dy;
324 
325  if (s->avctx->skip_loop_filter >= AVDISCARD_ALL ||
328  deblock_filtering = 0;
329 
330  dx = s->mv[b].x / s->vp56_coord_div[b];
331  dy = s->mv[b].y / s->vp56_coord_div[b];
332 
333  if (b >= 4) {
334  x /= 2;
335  y /= 2;
336  }
337  x += dx - 2;
338  y += dy - 2;
339 
340  if (x<0 || x+12>=s->plane_width[plane] ||
341  y<0 || y+12>=s->plane_height[plane]) {
343  src + s->block_offset[b] + (dy-2)*stride + (dx-2),
344  stride, 12, 12, x, y,
345  s->plane_width[plane],
346  s->plane_height[plane]);
347  src_block = s->edge_emu_buffer;
348  src_offset = 2 + 2*stride;
349  } else if (deblock_filtering) {
350  /* only need a 12x12 block, but there is no such dsp function, */
351  /* so copy a 16x12 block */
352  s->hdsp.put_pixels_tab[0][0](s->edge_emu_buffer,
353  src + s->block_offset[b] + (dy-2)*stride + (dx-2),
354  stride, 12);
355  src_block = s->edge_emu_buffer;
356  src_offset = 2 + 2*stride;
357  } else {
358  src_block = src;
359  src_offset = s->block_offset[b] + dy*stride + dx;
360  }
361 
362  if (deblock_filtering)
363  vp56_deblock_filter(s, src_block, stride, dx&7, dy&7);
364 
365  if (s->mv[b].x & mask)
366  overlap_offset += (s->mv[b].x > 0) ? 1 : -1;
367  if (s->mv[b].y & mask)
368  overlap_offset += (s->mv[b].y > 0) ? stride : -stride;
369 
370  if (overlap_offset) {
371  if (s->filter)
372  s->filter(s, dst, src_block, src_offset, src_offset+overlap_offset,
373  stride, s->mv[b], mask, s->filter_selection, b<4);
374  else
375  s->vp3dsp.put_no_rnd_pixels_l2(dst, src_block+src_offset,
376  src_block+src_offset+overlap_offset,
377  stride, 8);
378  } else {
379  s->hdsp.put_pixels_tab[1][0](dst, src_block+src_offset, stride, 8);
380  }
381 }
382 
383 static void vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha)
384 {
385  AVFrame *frame_current, *frame_ref;
386  VP56mb mb_type;
388  int b, ab, b_max, plane, off;
389 
391  mb_type = VP56_MB_INTRA;
392  else
393  mb_type = vp56_decode_mv(s, row, col);
394  ref_frame = vp56_reference_frame[mb_type];
395 
396  s->parse_coeff(s);
397 
398  vp56_add_predictors_dc(s, ref_frame);
399 
400  frame_current = s->frames[VP56_FRAME_CURRENT];
401  frame_ref = s->frames[ref_frame];
402  if (mb_type != VP56_MB_INTRA && !frame_ref->data[0])
403  return;
404 
405  ab = 6*is_alpha;
406  b_max = 6 - 2*is_alpha;
407 
408  switch (mb_type) {
409  case VP56_MB_INTRA:
410  for (b=0; b<b_max; b++) {
411  plane = ff_vp56_b2p[b+ab];
412  s->vp3dsp.idct_put(frame_current->data[plane] + s->block_offset[b],
413  s->stride[plane], s->block_coeff[b]);
414  }
415  break;
416 
419  for (b=0; b<b_max; b++) {
420  plane = ff_vp56_b2p[b+ab];
421  off = s->block_offset[b];
422  s->hdsp.put_pixels_tab[1][0](frame_current->data[plane] + off,
423  frame_ref->data[plane] + off,
424  s->stride[plane], 8);
425  s->vp3dsp.idct_add(frame_current->data[plane] + off,
426  s->stride[plane], s->block_coeff[b]);
427  }
428  break;
429 
431  case VP56_MB_INTER_V1_PF:
432  case VP56_MB_INTER_V2_PF:
434  case VP56_MB_INTER_4V:
435  case VP56_MB_INTER_V1_GF:
436  case VP56_MB_INTER_V2_GF:
437  for (b=0; b<b_max; b++) {
438  int x_off = b==1 || b==3 ? 8 : 0;
439  int y_off = b==2 || b==3 ? 8 : 0;
440  plane = ff_vp56_b2p[b+ab];
441  vp56_mc(s, b, plane, frame_ref->data[plane], s->stride[plane],
442  16*col+x_off, 16*row+y_off);
443  s->vp3dsp.idct_add(frame_current->data[plane] + s->block_offset[b],
444  s->stride[plane], s->block_coeff[b]);
445  }
446  break;
447  }
448 
449  if (is_alpha) {
450  s->block_coeff[4][0] = 0;
451  s->block_coeff[5][0] = 0;
452  }
453 }
454 
456 {
457  AVCodecContext *avctx = s->avctx;
459  int i;
460 
461  s->plane_width[0] = s->plane_width[3] = avctx->coded_width;
462  s->plane_width[1] = s->plane_width[2] = avctx->coded_width/2;
463  s->plane_height[0] = s->plane_height[3] = avctx->coded_height;
464  s->plane_height[1] = s->plane_height[2] = avctx->coded_height/2;
465 
466  for (i=0; i<4; i++)
467  s->stride[i] = s->flip * s->frames[VP56_FRAME_CURRENT]->linesize[i];
468 
469  s->mb_width = (avctx->coded_width +15) / 16;
470  s->mb_height = (avctx->coded_height+15) / 16;
471 
472  if (s->mb_width > 1000 || s->mb_height > 1000) {
473  avcodec_set_dimensions(avctx, 0, 0);
474  av_log(avctx, AV_LOG_ERROR, "picture too big\n");
475  return -1;
476  }
477 
479  (4*s->mb_width+6) * sizeof(*s->above_blocks));
481  s->mb_width*s->mb_height*sizeof(*s->macroblocks));
483  s->edge_emu_buffer_alloc = av_malloc(16*stride);
485  if (s->flip < 0)
486  s->edge_emu_buffer += 15 * stride;
487 
488  if (s->alpha_context)
489  return vp56_size_changed(s->alpha_context);
490 
491  return 0;
492 }
493 
494 static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *, int, int);
495 
496 int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
497  AVPacket *avpkt)
498 {
499  const uint8_t *buf = avpkt->data;
500  VP56Context *s = avctx->priv_data;
501  AVFrame *const p = s->frames[VP56_FRAME_CURRENT];
502  int remaining_buf_size = avpkt->size;
503  int av_uninit(alpha_offset);
504  int i, res;
505  int ret;
506 
507  if (s->has_alpha) {
508  if (remaining_buf_size < 3)
509  return -1;
510  alpha_offset = bytestream_get_be24(&buf);
511  remaining_buf_size -= 3;
512  if (remaining_buf_size < alpha_offset)
513  return -1;
514  }
515 
516  res = s->parse_header(s, buf, remaining_buf_size);
517  if (res < 0)
518  return res;
519 
520  if (res == VP56_SIZE_CHANGE) {
521  for (i = 0; i < 4; i++) {
522  av_frame_unref(s->frames[i]);
523  if (s->alpha_context)
525  }
526  }
527 
528  if (ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF) < 0)
529  return -1;
530 
531  if (s->has_alpha) {
533  if ((ret = av_frame_ref(s->alpha_context->frames[VP56_FRAME_CURRENT], p)) < 0) {
534  av_frame_unref(p);
535  return ret;
536  }
537  }
538 
539  if (res == VP56_SIZE_CHANGE) {
540  if (vp56_size_changed(s)) {
541  av_frame_unref(p);
542  return -1;
543  }
544  }
545 
546  if (s->has_alpha) {
547  int bak_w = avctx->width;
548  int bak_h = avctx->height;
549  int bak_cw = avctx->coded_width;
550  int bak_ch = avctx->coded_height;
551  buf += alpha_offset;
552  remaining_buf_size -= alpha_offset;
553 
554  res = s->alpha_context->parse_header(s->alpha_context, buf, remaining_buf_size);
555  if (res != 0) {
556  if(res==VP56_SIZE_CHANGE) {
557  av_log(avctx, AV_LOG_ERROR, "Alpha reconfiguration\n");
558  avctx->width = bak_w;
559  avctx->height = bak_h;
560  avctx->coded_width = bak_cw;
561  avctx->coded_height = bak_ch;
562  }
563  av_frame_unref(p);
564  return -1;
565  }
566  }
567 
568  avctx->execute2(avctx, ff_vp56_decode_mbs, 0, 0, s->has_alpha + 1);
569 
570  if ((res = av_frame_ref(data, p)) < 0)
571  return res;
572  *got_frame = 1;
573 
574  return avpkt->size;
575 }
576 
577 static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *data,
578  int jobnr, int threadnr)
579 {
580  VP56Context *s0 = avctx->priv_data;
581  int is_alpha = (jobnr == 1);
582  VP56Context *s = is_alpha ? s0->alpha_context : s0;
583  AVFrame *const p = s->frames[VP56_FRAME_CURRENT];
584  int mb_row, mb_col, mb_row_flip, mb_offset = 0;
585  int block, y, uv, stride_y, stride_uv;
586  int res;
587 
588  if (p->key_frame) {
590  s->default_models_init(s);
591  for (block=0; block<s->mb_height*s->mb_width; block++)
592  s->macroblocks[block].type = VP56_MB_INTRA;
593  } else {
596  s->parse_vector_models(s);
598  }
599 
600  if (s->parse_coeff_models(s))
601  goto next;
602 
603  memset(s->prev_dc, 0, sizeof(s->prev_dc));
604  s->prev_dc[1][VP56_FRAME_CURRENT] = 128;
605  s->prev_dc[2][VP56_FRAME_CURRENT] = 128;
606 
607  for (block=0; block < 4*s->mb_width+6; block++) {
609  s->above_blocks[block].dc_coeff = 0;
610  s->above_blocks[block].not_null_dc = 0;
611  }
614 
615  stride_y = p->linesize[0];
616  stride_uv = p->linesize[1];
617 
618  if (s->flip < 0)
619  mb_offset = 7;
620 
621  /* main macroblocks loop */
622  for (mb_row=0; mb_row<s->mb_height; mb_row++) {
623  if (s->flip < 0)
624  mb_row_flip = s->mb_height - mb_row - 1;
625  else
626  mb_row_flip = mb_row;
627 
628  for (block=0; block<4; block++) {
630  s->left_block[block].dc_coeff = 0;
631  s->left_block[block].not_null_dc = 0;
632  }
633  memset(s->coeff_ctx, 0, sizeof(s->coeff_ctx));
634  memset(s->coeff_ctx_last, 24, sizeof(s->coeff_ctx_last));
635 
636  s->above_block_idx[0] = 1;
637  s->above_block_idx[1] = 2;
638  s->above_block_idx[2] = 1;
639  s->above_block_idx[3] = 2;
640  s->above_block_idx[4] = 2*s->mb_width + 2 + 1;
641  s->above_block_idx[5] = 3*s->mb_width + 4 + 1;
642 
643  s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y;
644  s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y;
645  s->block_offset[1] = s->block_offset[0] + 8;
646  s->block_offset[3] = s->block_offset[2] + 8;
647  s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv;
648  s->block_offset[5] = s->block_offset[4];
649 
650  for (mb_col=0; mb_col<s->mb_width; mb_col++) {
651  vp56_decode_mb(s, mb_row, mb_col, is_alpha);
652 
653  for (y=0; y<4; y++) {
654  s->above_block_idx[y] += 2;
655  s->block_offset[y] += 16;
656  }
657 
658  for (uv=4; uv<6; uv++) {
659  s->above_block_idx[uv] += 1;
660  s->block_offset[uv] += 8;
661  }
662  }
663  }
664 
665 next:
666  if (p->key_frame || s->golden_frame) {
668  if ((res = av_frame_ref(s->frames[VP56_FRAME_GOLDEN], p)) < 0)
669  return res;
670  }
671 
675  return 0;
676 }
677 
678 av_cold int ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha)
679 {
680  VP56Context *s = avctx->priv_data;
681  return ff_vp56_init_context(avctx, s, flip, has_alpha);
682 }
683 
685  int flip, int has_alpha)
686 {
687  int i;
688 
689  s->avctx = avctx;
690  avctx->pix_fmt = has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
691 
693  ff_hpeldsp_init(&s->hdsp, avctx->flags);
694  ff_videodsp_init(&s->vdsp, 8);
695  ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
696  ff_vp56dsp_init(&s->vp56dsp, avctx->codec->id);
697  for (i = 0; i < 64; i++) {
698 #define T(x) (x >> 3) | ((x & 7) << 3)
700 #undef T
701  }
702 
703  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
704  s->frames[i] = av_frame_alloc();
705  if (!s->frames[i]) {
706  ff_vp56_free(avctx);
707  return AVERROR(ENOMEM);
708  }
709  }
711 
712  s->above_blocks = NULL;
713  s->macroblocks = NULL;
714  s->quantizer = -1;
715  s->deblock_filtering = 1;
716  s->golden_frame = 0;
717 
718  s->filter = NULL;
719 
720  s->has_alpha = has_alpha;
721 
722  s->modelp = &s->model;
723 
724  if (flip) {
725  s->flip = -1;
726  s->frbi = 2;
727  s->srbi = 0;
728  } else {
729  s->flip = 1;
730  s->frbi = 0;
731  s->srbi = 2;
732  }
733 
734  return 0;
735 }
736 
738 {
739  VP56Context *s = avctx->priv_data;
740  return ff_vp56_free_context(s);
741 }
742 
744 {
745  int i;
746 
747  av_freep(&s->above_blocks);
748  av_freep(&s->macroblocks);
750 
751  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
752  av_frame_free(&s->frames[i]);
753 
754  return 0;
755 }
av_cold int ff_vp56_free(AVCodecContext *avctx)
Definition: vp56.c:737
const struct AVCodec * codec
discard all frames except keyframes
const char * s
Definition: avisynth_c.h:668
Inter MB, first vector, from previous frame.
Definition: vp56data.h:43
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
void(* edge_filter_hor)(uint8_t *yuv, int stride, int t)
Definition: vp56dsp.h:28
VP5 and VP6 compatible video decoder (common features)
static int vp56_get_vectors_predictors(VP56Context *s, int row, int col, VP56Frame ref_frame)
Definition: vp56.c:41
int coded_width
Bitstream width / height, may be different from width/height e.g.
void(* put_no_rnd_pixels_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, ptrdiff_t stride, int h)
Copy 8xH pixels from source to destination buffer using a bilinear filter with no rounding (i...
Definition: vp3dsp.h:36
Intra MB.
Definition: vp56data.h:41
int stride[4]
Definition: vp56.h:158
uint8_t mb_types_stats[3][10][2]
Definition: vp56.h:94
VP56ParseCoeffModels parse_coeff_models
Definition: vp56.h:166
void avcodec_set_dimensions(AVCodecContext *s, int width, int height)
VP56mv vector_candidate[2]
Definition: vp56.h:138
static VP56mb vp56_parse_mb_type(VP56Context *s, VP56mb prev_type, int ctx)
Definition: vp56.c:150
uint16_t dequant_dc
Definition: vp56.h:122
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void * av_realloc(void *ptr, size_t size)
Allocate or reallocate a block of memory.
Definition: mem.c:141
#define FF_ARRAY_ELEMS(a)
#define VP56_SIZE_CHANGE
Definition: vp56.h:46
void ff_vp56dsp_init(VP56DSPContext *s, enum AVCodecID codec)
Definition: vp56dsp.c:78
int plane_height[4]
Definition: vp56.h:116
int stride
Definition: mace.c:144
av_cold int ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha)
Definition: vp56.c:678
VP56RangeCoder c
Definition: vp56.h:108
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
void(* idct_add)(uint8_t *dest, int line_size, int16_t *block)
Definition: vp3dsp.h:42
int16_t y
Definition: vp56.h:43
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:105
static VP56mb vp56_decode_mv(VP56Context *s, int row, int col)
Definition: vp56.c:207
int block_offset[6]
Definition: vp56.h:119
VP56Context * alpha_context
Definition: vp56.h:170
uint8_t
#define av_cold
Definition: attributes.h:78
static const uint8_t vp56_mb_type_model_model[]
Definition: vp56data.h:211
float delta
HpelDSPContext hdsp
Definition: vp56.h:100
#define b
Definition: input.c:42
uint8_t * edge_emu_buffer_alloc
Definition: vp56.h:106
VP56Frame
Definition: vp56data.h:31
uint8_t coeff_ctx[4][64]
Definition: vp56.h:149
av_cold int ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s, int flip, int has_alpha)
Definition: vp56.c:684
uint8_t * data
VP56ParseCoeff parse_coeff
Definition: vp56.h:163
Inter MB, second vector, from golden frame.
Definition: vp56data.h:49
static av_always_inline int vp56_rac_get_tree(VP56RangeCoder *c, const VP56Tree *tree, const uint8_t *probs)
Definition: vp56.h:337
void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:38
VP56ParseVectorAdjustment parse_vector_adjustment
Definition: vp56.h:161
Inter MB, first vector, from golden frame.
Definition: vp56data.h:48
static const VP56Tree vp56_pmbt_tree[]
Definition: vp56data.h:224
Discrete Time axis x
enum AVCodecID id
static const uint8_t vp56_pre_def_mb_type_stats[16][3][10][2]
Definition: vp56data.h:101
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:183
int16_t prev_dc[3][3]
Definition: vp56.h:129
static const uint16_t mask[17]
Definition: lzw.c:37
AVCodecContext * avctx
Definition: vp56.h:98
static av_always_inline int vp56_rac_get(VP56RangeCoder *c)
Definition: vp56.h:261
Spectrum Plot time data
static void vp56_parse_mb_type_models(VP56Context *s)
Definition: vp56.c:79
#define s0
Definition: regdef.h:37
VP3DSPContext vp3dsp
Definition: vp56.h:102
int flags
CODEC_FLAG_*.
static const VP56Frame vp56_reference_frame[]
Definition: vp56data.h:66
void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:37
VP56mb mb_type
Definition: vp56.h:132
Definition: vp56.h:69
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
uint8_t type
Definition: vp56.h:76
VP56RefDc * above_blocks
Definition: vp56.h:126
void ff_vp56_init_dequant(VP56Context *s, int quantizer)
Definition: vp56.c:34
VP56Macroblock * macroblocks
Definition: vp56.h:133
const uint8_t ff_vp56_b6to4[]
Definition: vp56data.c:29
static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src, int stride, int x, int y)
Definition: vp56.c:313
VP56ParseVectorModels parse_vector_models
Definition: vp56.h:165
static const uint8_t offset[127][2]
Definition: vf_spp.c:70
external API header
static const uint8_t vp56_filter_threshold[]
Definition: vp56data.h:200
int deblock_filtering
Definition: vp56.h:143
static int vp56_size_changed(VP56Context *s)
Definition: vp56.c:455
void(* edge_filter_ver)(uint8_t *yuv, int stride, int t)
Definition: vp56dsp.h:29
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:37
int above_block_idx[6]
Definition: vp56.h:128
const uint8_t * vp56_coord_div
Definition: vp56.h:160
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
Definition: vp3dsp.h:41
VP5 and VP6 compatible video decoder (common data)
H264ChromaContext h264chroma
Definition: vp56.h:99
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
int vector_candidate_pos
Definition: vp56.h:139
ret
Definition: avfilter.c:821
int width
picture width / height.
static const VP56Tree vp56_pmbtm_tree[]
Definition: vp56data.h:215
Inter MB, 4 vectors, from previous frame.
Definition: vp56data.h:47
VP56mv mv[6]
Definition: vp56.h:137
static const uint8_t vp56_ac_dequant[64]
Definition: vp56data.h:79
struct VP56mv VP56mv
t
Definition: genspecsines3.m:6
uint8_t idct_scantable[64]
Definition: vp56.h:104
uint8_t coeff_ctx_last[4]
Definition: vp56.h:150
Inter MB, no vector, from previous frame.
Definition: vp56data.h:40
int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp56.c:496
static const uint8_t vp56_dc_dequant[64]
Definition: vp56data.h:90
VP56DefaultModelsInit default_models_init
Definition: vp56.h:164
int mb_height
Definition: vp56.h:118
#define vp56_rac_get_prob
Definition: vp56.h:226
int golden_frame
Definition: vp56.h:114
const uint8_t ff_vp56_b2p[]
Definition: vp56data.c:28
VP56Frame ref_frame
Definition: vp56.h:71
VP56Model * modelp
Definition: vp56.h:172
static const int8_t mv[256][2]
int has_alpha
Definition: vp56.h:152
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
NULL
Definition: eval.c:55
int frbi
Definition: vp56.h:156
AVS_Value src
Definition: avisynth_c.h:523
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
AVFrame * frames[4]
Definition: vp56.h:105
main external API structure.
FIXME Range Coding of cr are mx and my are Motion Vector top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Predicton block[y][x] dc[1]
Definition: snow.txt:392
#define RSHIFT(a, b)
Definition: common.h:48
uint8_t not_null_dc
Definition: vp56.h:70
uint8_t mb_type[3][10][10]
Definition: vp56.h:93
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
Inter MB, second vector, from previous frame.
Definition: vp56data.h:44
void * buf
Definition: avisynth_c.h:594
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *, int, int)
Definition: vp56.c:577
Inter MB, above/left vector + delta, from golden frame.
Definition: vp56data.h:46
synthesis window for stochastic i
int filter_selection
Definition: vp56.h:144
static void vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha)
Definition: vp56.c:383
static void vp56_decode_4mv(VP56Context *s, int row, int col)
Definition: vp56.c:162
VideoDSPContext vdsp
Definition: vp56.h:101
VP56DSPContext vp56dsp
Definition: vp56.h:103
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:330
int mb_width
Definition: vp56.h:117
VP56ParseHeader parse_header
Definition: vp56.h:167
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
#define type
int flip
Definition: vp56.h:155
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:95
Definition: vp56.h:41
int av_frame_ref(AVFrame *dst, AVFrame *src)
Setup a new reference to the data described by an given frame.
Definition: frame.c:228
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
VP56RefDc left_block[4]
Definition: vp56.h:127
av_cold int ff_vp56_free_context(VP56Context *s)
Definition: vp56.c:743
Inter MB, no vector, from golden frame.
Definition: vp56data.h:45
VP56mv mv
Definition: vp56.h:77
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
common internal api header.
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:115
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
Definition: vp3.c:1877
uint16_t dequant_ac
Definition: vp56.h:123
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
static double c[64]
uint8_t * edge_emu_buffer
Definition: vp56.h:107
static void flip(AVCodecContext *avctx, AVPicture *picture)
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
function y
Definition: D.m:1
VP56Filter filter
Definition: vp56.h:162
VP56Model model
Definition: vp56.h:173
#define T(x)
static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame)
Definition: vp56.c:264
VP56mb
Definition: vp56data.h:39
int16_t dc_coeff
Definition: vp56.h:72
static av_unused int vp56_rac_gets(VP56RangeCoder *c, int bits)
Definition: vp56.h:285
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:58
Inter MB, above/left vector + delta, from previous frame.
Definition: vp56data.h:42
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:139
int quantizer
Definition: vp56.h:121
#define av_uninit(x)
Definition: attributes.h:137
void INT64 INT64 count
Definition: avisynth_c.h:594
int srbi
Definition: vp56.h:157
Definition: vp56.h:80
#define FFSWAP(type, a, b)
Definition: common.h:61
static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv, int stride, int dx, int dy)
Definition: vp56.c:305
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
Definition: vp3dsp.c:280
static const int8_t vp56_candidate_predictor_pos[12][2]
Definition: vp56data.h:237
int plane_width[4]
Definition: vp56.h:115
This structure stores compressed data.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
for(j=16;j >0;--j)
Predicted.
Definition: avutil.h:217