vp3.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003-2004 the ffmpeg project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * On2 VP3 Video Decoder
24  *
25  * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
26  * For more information about the VP3 coding process, visit:
27  * http://wiki.multimedia.cx/index.php?title=On2_VP3
28  *
29  * Theora decoder by Alex Beregszaszi
30  */
31 
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 
36 #include "libavutil/imgutils.h"
37 #include "avcodec.h"
38 #include "internal.h"
39 #include "dsputil.h"
40 #include "get_bits.h"
41 #include "hpeldsp.h"
42 #include "videodsp.h"
43 #include "vp3data.h"
44 #include "vp3dsp.h"
45 #include "xiph.h"
46 #include "thread.h"
47 
48 #define FRAGMENT_PIXELS 8
49 
50 //FIXME split things out into their own arrays
51 typedef struct Vp3Fragment {
52  int16_t dc;
55 } Vp3Fragment;
56 
57 #define SB_NOT_CODED 0
58 #define SB_PARTIALLY_CODED 1
59 #define SB_FULLY_CODED 2
60 
61 // This is the maximum length of a single long bit run that can be encoded
62 // for superblock coding or block qps. Theora special-cases this to read a
63 // bit instead of flipping the current bit to allow for runs longer than 4129.
64 #define MAXIMUM_LONG_BIT_RUN 4129
65 
66 #define MODE_INTER_NO_MV 0
67 #define MODE_INTRA 1
68 #define MODE_INTER_PLUS_MV 2
69 #define MODE_INTER_LAST_MV 3
70 #define MODE_INTER_PRIOR_LAST 4
71 #define MODE_USING_GOLDEN 5
72 #define MODE_GOLDEN_MV 6
73 #define MODE_INTER_FOURMV 7
74 #define CODING_MODE_COUNT 8
75 
76 /* special internal mode */
77 #define MODE_COPY 8
78 
79 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb);
80 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb);
81 
82 
83 /* There are 6 preset schemes, plus a free-form scheme */
84 static const int ModeAlphabet[6][CODING_MODE_COUNT] =
85 {
86  /* scheme 1: Last motion vector dominates */
91 
92  /* scheme 2 */
96  MODE_GOLDEN_MV, MODE_INTER_FOURMV },
97 
98  /* scheme 3 */
102  MODE_GOLDEN_MV, MODE_INTER_FOURMV },
103 
104  /* scheme 4 */
108  MODE_GOLDEN_MV, MODE_INTER_FOURMV },
109 
110  /* scheme 5: No motion vector dominates */
114  MODE_GOLDEN_MV, MODE_INTER_FOURMV },
115 
116  /* scheme 6 */
120  MODE_GOLDEN_MV, MODE_INTER_FOURMV },
121 
122 };
123 
124 static const uint8_t hilbert_offset[16][2] = {
125  {0,0}, {1,0}, {1,1}, {0,1},
126  {0,2}, {0,3}, {1,3}, {1,2},
127  {2,2}, {2,3}, {3,3}, {3,2},
128  {3,1}, {2,1}, {2,0}, {3,0}
129 };
130 
131 #define MIN_DEQUANT_VAL 2
132 
133 typedef struct Vp3DecodeContext {
135  int theora, theora_tables;
136  int version;
137  int width, height;
138  int chroma_x_shift, chroma_y_shift;
142  int keyframe;
143  uint8_t idct_permutation[64];
144  uint8_t idct_scantable[64];
148  DECLARE_ALIGNED(16, int16_t, block)[64];
152 
153  int qps[3];
154  int nqps;
155  int last_qps[3];
156 
166  unsigned char *superblock_coding;
167 
171 
173  int fragment_width[2];
174  int fragment_height[2];
175 
177  int fragment_start[3];
178  int data_offset[3];
179 
180  int8_t (*motion_val[2])[2];
181 
182  /* tables */
183  uint16_t coded_dc_scale_factor[64];
184  uint32_t coded_ac_scale_factor[64];
185  uint8_t base_matrix[384][64];
186  uint8_t qr_count[2][3];
187  uint8_t qr_size [2][3][64];
188  uint16_t qr_base[2][3][64];
189 
190  /**
191  * This is a list of all tokens in bitstream order. Reordering takes place
192  * by pulling from each level during IDCT. As a consequence, IDCT must be
193  * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32
194  * otherwise. The 32 different tokens with up to 12 bits of extradata are
195  * collapsed into 3 types, packed as follows:
196  * (from the low to high bits)
197  *
198  * 2 bits: type (0,1,2)
199  * 0: EOB run, 14 bits for run length (12 needed)
200  * 1: zero run, 7 bits for run length
201  * 7 bits for the next coefficient (3 needed)
202  * 2: coefficient, 14 bits (11 needed)
203  *
204  * Coefficients are signed, so are packed in the highest bits for automatic
205  * sign extension.
206  */
207  int16_t *dct_tokens[3][64];
208  int16_t *dct_tokens_base;
209 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
210 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) << 9) + ((zero_run) << 2) + 1)
211 #define TOKEN_COEFF(coeff) (((coeff) << 2) + 2)
212 
213  /**
214  * number of blocks that contain DCT coefficients at the given level or higher
215  */
216  int num_coded_frags[3][64];
218 
219  /* this is a list of indexes into the all_fragments array indicating
220  * which of the fragments are coded */
221  int *coded_fragment_list[3];
222 
223  VLC dc_vlc[16];
224  VLC ac_vlc_1[16];
225  VLC ac_vlc_2[16];
226  VLC ac_vlc_3[16];
227  VLC ac_vlc_4[16];
228 
233 
234  /* these arrays need to be on 16-byte boundaries since SSE2 operations
235  * index into them */
236  DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; ///< qmat[qpi][is_inter][plane]
237 
238  /* This table contains superblock_count * 16 entries. Each set of 16
239  * numbers corresponds to the fragment indexes 0..15 of the superblock.
240  * An entry will be -1 to indicate that no entry corresponds to that
241  * index. */
243 
244  /* This is an array that indicates how a particular macroblock
245  * is coded. */
246  unsigned char *macroblock_coding;
247 
249 
250  /* Huffman decode */
251  int hti;
252  unsigned int hbits;
253  int entries;
255  uint32_t huffman_table[80][32][2];
256 
257  uint8_t filter_limit_values[64];
258  DECLARE_ALIGNED(8, int, bounding_values_array)[256+2];
260 
261 /************************************************************************
262  * VP3 specific functions
263  ************************************************************************/
264 
265 static void vp3_decode_flush(AVCodecContext *avctx)
266 {
267  Vp3DecodeContext *s = avctx->priv_data;
268 
269  if (s->golden_frame.f)
271  if (s->last_frame.f)
273  if (s->current_frame.f)
275 }
276 
278 {
279  Vp3DecodeContext *s = avctx->priv_data;
280  int i;
281 
283  av_freep(&s->all_fragments);
288  av_freep(&s->motion_val[0]);
289  av_freep(&s->motion_val[1]);
291 
292  s->theora_tables = 0;
293 
294  /* release all frames */
295  vp3_decode_flush(avctx);
299 
300  if (avctx->internal->is_copy)
301  return 0;
302 
303  for (i = 0; i < 16; i++) {
304  ff_free_vlc(&s->dc_vlc[i]);
305  ff_free_vlc(&s->ac_vlc_1[i]);
306  ff_free_vlc(&s->ac_vlc_2[i]);
307  ff_free_vlc(&s->ac_vlc_3[i]);
308  ff_free_vlc(&s->ac_vlc_4[i]);
309  }
310 
315 
316 
317  return 0;
318 }
319 
320 /**
321  * This function sets up all of the various blocks mappings:
322  * superblocks <-> fragments, macroblocks <-> fragments,
323  * superblocks <-> macroblocks
324  *
325  * @return 0 is successful; returns 1 if *anything* went wrong.
326  */
328 {
329  int sb_x, sb_y, plane;
330  int x, y, i, j = 0;
331 
332  for (plane = 0; plane < 3; plane++) {
333  int sb_width = plane ? s->c_superblock_width : s->y_superblock_width;
334  int sb_height = plane ? s->c_superblock_height : s->y_superblock_height;
335  int frag_width = s->fragment_width[!!plane];
336  int frag_height = s->fragment_height[!!plane];
337 
338  for (sb_y = 0; sb_y < sb_height; sb_y++)
339  for (sb_x = 0; sb_x < sb_width; sb_x++)
340  for (i = 0; i < 16; i++) {
341  x = 4*sb_x + hilbert_offset[i][0];
342  y = 4*sb_y + hilbert_offset[i][1];
343 
344  if (x < frag_width && y < frag_height)
345  s->superblock_fragments[j++] = s->fragment_start[plane] + y*frag_width + x;
346  else
347  s->superblock_fragments[j++] = -1;
348  }
349  }
350 
351  return 0; /* successful path out */
352 }
353 
354 /*
355  * This function sets up the dequantization tables used for a particular
356  * frame.
357  */
359 {
360  int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
361  int dc_scale_factor = s->coded_dc_scale_factor[s->qps[qpi]];
362  int i, plane, inter, qri, bmi, bmj, qistart;
363 
364  for(inter=0; inter<2; inter++){
365  for(plane=0; plane<3; plane++){
366  int sum=0;
367  for(qri=0; qri<s->qr_count[inter][plane]; qri++){
368  sum+= s->qr_size[inter][plane][qri];
369  if(s->qps[qpi] <= sum)
370  break;
371  }
372  qistart= sum - s->qr_size[inter][plane][qri];
373  bmi= s->qr_base[inter][plane][qri ];
374  bmj= s->qr_base[inter][plane][qri+1];
375  for(i=0; i<64; i++){
376  int coeff= ( 2*(sum -s->qps[qpi])*s->base_matrix[bmi][i]
377  - 2*(qistart-s->qps[qpi])*s->base_matrix[bmj][i]
378  + s->qr_size[inter][plane][qri])
379  / (2*s->qr_size[inter][plane][qri]);
380 
381  int qmin= 8<<(inter + !i);
382  int qscale= i ? ac_scale_factor : dc_scale_factor;
383 
384  s->qmat[qpi][inter][plane][s->idct_permutation[i]] =
385  av_clip((qscale * coeff) / 100 * 4, qmin, 4096);
386  }
387  // all DC coefficients use the same quant so as not to interfere with DC prediction
388  s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
389  }
390  }
391 }
392 
393 /*
394  * This function initializes the loop filter boundary limits if the frame's
395  * quality index is different from the previous frame's.
396  *
397  * The filter_limit_values may not be larger than 127.
398  */
400 {
401  int *bounding_values= s->bounding_values_array+127;
402  int filter_limit;
403  int x;
404  int value;
405 
406  filter_limit = s->filter_limit_values[s->qps[0]];
407  av_assert0(filter_limit < 128U);
408 
409  /* set up the bounding values */
410  memset(s->bounding_values_array, 0, 256 * sizeof(int));
411  for (x = 0; x < filter_limit; x++) {
412  bounding_values[-x] = -x;
413  bounding_values[x] = x;
414  }
415  for (x = value = filter_limit; x < 128 && value; x++, value--) {
416  bounding_values[ x] = value;
417  bounding_values[-x] = -value;
418  }
419  if (value)
420  bounding_values[128] = value;
421  bounding_values[129] = bounding_values[130] = filter_limit * 0x02020202;
422 }
423 
424 /*
425  * This function unpacks all of the superblock/macroblock/fragment coding
426  * information from the bitstream.
427  */
429 {
430  int superblock_starts[3] = { 0, s->u_superblock_start, s->v_superblock_start };
431  int bit = 0;
432  int current_superblock = 0;
433  int current_run = 0;
434  int num_partial_superblocks = 0;
435 
436  int i, j;
437  int current_fragment;
438  int plane;
439 
440  if (s->keyframe) {
442 
443  } else {
444 
445  /* unpack the list of partially-coded superblocks */
446  bit = get_bits1(gb) ^ 1;
447  current_run = 0;
448 
449  while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) {
450  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
451  bit = get_bits1(gb);
452  else
453  bit ^= 1;
454 
455  current_run = get_vlc2(gb,
456  s->superblock_run_length_vlc.table, 6, 2) + 1;
457  if (current_run == 34)
458  current_run += get_bits(gb, 12);
459 
460  if (current_superblock + current_run > s->superblock_count) {
461  av_log(s->avctx, AV_LOG_ERROR, "Invalid partially coded superblock run length\n");
462  return -1;
463  }
464 
465  memset(s->superblock_coding + current_superblock, bit, current_run);
466 
467  current_superblock += current_run;
468  if (bit)
469  num_partial_superblocks += current_run;
470  }
471 
472  /* unpack the list of fully coded superblocks if any of the blocks were
473  * not marked as partially coded in the previous step */
474  if (num_partial_superblocks < s->superblock_count) {
475  int superblocks_decoded = 0;
476 
477  current_superblock = 0;
478  bit = get_bits1(gb) ^ 1;
479  current_run = 0;
480 
481  while (superblocks_decoded < s->superblock_count - num_partial_superblocks
482  && get_bits_left(gb) > 0) {
483 
484  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
485  bit = get_bits1(gb);
486  else
487  bit ^= 1;
488 
489  current_run = get_vlc2(gb,
490  s->superblock_run_length_vlc.table, 6, 2) + 1;
491  if (current_run == 34)
492  current_run += get_bits(gb, 12);
493 
494  for (j = 0; j < current_run; current_superblock++) {
495  if (current_superblock >= s->superblock_count) {
496  av_log(s->avctx, AV_LOG_ERROR, "Invalid fully coded superblock run length\n");
497  return -1;
498  }
499 
500  /* skip any superblocks already marked as partially coded */
501  if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
502  s->superblock_coding[current_superblock] = 2*bit;
503  j++;
504  }
505  }
506  superblocks_decoded += current_run;
507  }
508  }
509 
510  /* if there were partial blocks, initialize bitstream for
511  * unpacking fragment codings */
512  if (num_partial_superblocks) {
513 
514  current_run = 0;
515  bit = get_bits1(gb);
516  /* toggle the bit because as soon as the first run length is
517  * fetched the bit will be toggled again */
518  bit ^= 1;
519  }
520  }
521 
522  /* figure out which fragments are coded; iterate through each
523  * superblock (all planes) */
524  s->total_num_coded_frags = 0;
526 
527  for (plane = 0; plane < 3; plane++) {
528  int sb_start = superblock_starts[plane];
529  int sb_end = sb_start + (plane ? s->c_superblock_count : s->y_superblock_count);
530  int num_coded_frags = 0;
531 
532  for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
533 
534  /* iterate through all 16 fragments in a superblock */
535  for (j = 0; j < 16; j++) {
536 
537  /* if the fragment is in bounds, check its coding status */
538  current_fragment = s->superblock_fragments[i * 16 + j];
539  if (current_fragment != -1) {
540  int coded = s->superblock_coding[i];
541 
542  if (s->superblock_coding[i] == SB_PARTIALLY_CODED) {
543 
544  /* fragment may or may not be coded; this is the case
545  * that cares about the fragment coding runs */
546  if (current_run-- == 0) {
547  bit ^= 1;
548  current_run = get_vlc2(gb,
549  s->fragment_run_length_vlc.table, 5, 2);
550  }
551  coded = bit;
552  }
553 
554  if (coded) {
555  /* default mode; actual mode will be decoded in
556  * the next phase */
557  s->all_fragments[current_fragment].coding_method =
559  s->coded_fragment_list[plane][num_coded_frags++] =
560  current_fragment;
561  } else {
562  /* not coded; copy this fragment from the prior frame */
563  s->all_fragments[current_fragment].coding_method =
564  MODE_COPY;
565  }
566  }
567  }
568  }
569  s->total_num_coded_frags += num_coded_frags;
570  for (i = 0; i < 64; i++)
571  s->num_coded_frags[plane][i] = num_coded_frags;
572  if (plane < 2)
573  s->coded_fragment_list[plane+1] = s->coded_fragment_list[plane] + num_coded_frags;
574  }
575  return 0;
576 }
577 
578 /*
579  * This function unpacks all the coding mode data for individual macroblocks
580  * from the bitstream.
581  */
583 {
584  int i, j, k, sb_x, sb_y;
585  int scheme;
586  int current_macroblock;
587  int current_fragment;
588  int coding_mode;
589  int custom_mode_alphabet[CODING_MODE_COUNT];
590  const int *alphabet;
591  Vp3Fragment *frag;
592 
593  if (s->keyframe) {
594  for (i = 0; i < s->fragment_count; i++)
596 
597  } else {
598 
599  /* fetch the mode coding scheme for this frame */
600  scheme = get_bits(gb, 3);
601 
602  /* is it a custom coding scheme? */
603  if (scheme == 0) {
604  for (i = 0; i < 8; i++)
605  custom_mode_alphabet[i] = MODE_INTER_NO_MV;
606  for (i = 0; i < 8; i++)
607  custom_mode_alphabet[get_bits(gb, 3)] = i;
608  alphabet = custom_mode_alphabet;
609  } else
610  alphabet = ModeAlphabet[scheme-1];
611 
612  /* iterate through all of the macroblocks that contain 1 or more
613  * coded fragments */
614  for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
615  for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
616  if (get_bits_left(gb) <= 0)
617  return -1;
618 
619  for (j = 0; j < 4; j++) {
620  int mb_x = 2*sb_x + (j>>1);
621  int mb_y = 2*sb_y + (((j>>1)+j)&1);
622  current_macroblock = mb_y * s->macroblock_width + mb_x;
623 
624  if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height)
625  continue;
626 
627 #define BLOCK_X (2*mb_x + (k&1))
628 #define BLOCK_Y (2*mb_y + (k>>1))
629  /* coding modes are only stored if the macroblock has at least one
630  * luma block coded, otherwise it must be INTER_NO_MV */
631  for (k = 0; k < 4; k++) {
632  current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X;
633  if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
634  break;
635  }
636  if (k == 4) {
637  s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV;
638  continue;
639  }
640 
641  /* mode 7 means get 3 bits for each coding mode */
642  if (scheme == 7)
643  coding_mode = get_bits(gb, 3);
644  else
645  coding_mode = alphabet
646  [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
647 
648  s->macroblock_coding[current_macroblock] = coding_mode;
649  for (k = 0; k < 4; k++) {
650  frag = s->all_fragments + BLOCK_Y*s->fragment_width[0] + BLOCK_X;
651  if (frag->coding_method != MODE_COPY)
652  frag->coding_method = coding_mode;
653  }
654 
655 #define SET_CHROMA_MODES \
656  if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
657  frag[s->fragment_start[1]].coding_method = coding_mode;\
658  if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
659  frag[s->fragment_start[2]].coding_method = coding_mode;
660 
661  if (s->chroma_y_shift) {
662  frag = s->all_fragments + mb_y*s->fragment_width[1] + mb_x;
664  } else if (s->chroma_x_shift) {
665  frag = s->all_fragments + 2*mb_y*s->fragment_width[1] + mb_x;
666  for (k = 0; k < 2; k++) {
668  frag += s->fragment_width[1];
669  }
670  } else {
671  for (k = 0; k < 4; k++) {
672  frag = s->all_fragments + BLOCK_Y*s->fragment_width[1] + BLOCK_X;
674  }
675  }
676  }
677  }
678  }
679  }
680 
681  return 0;
682 }
683 
684 /*
685  * This function unpacks all the motion vectors for the individual
686  * macroblocks from the bitstream.
687  */
689 {
690  int j, k, sb_x, sb_y;
691  int coding_mode;
692  int motion_x[4];
693  int motion_y[4];
694  int last_motion_x = 0;
695  int last_motion_y = 0;
696  int prior_last_motion_x = 0;
697  int prior_last_motion_y = 0;
698  int current_macroblock;
699  int current_fragment;
700  int frag;
701 
702  if (s->keyframe)
703  return 0;
704 
705  /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
706  coding_mode = get_bits1(gb);
707 
708  /* iterate through all of the macroblocks that contain 1 or more
709  * coded fragments */
710  for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
711  for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
712  if (get_bits_left(gb) <= 0)
713  return -1;
714 
715  for (j = 0; j < 4; j++) {
716  int mb_x = 2*sb_x + (j>>1);
717  int mb_y = 2*sb_y + (((j>>1)+j)&1);
718  current_macroblock = mb_y * s->macroblock_width + mb_x;
719 
720  if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height ||
721  (s->macroblock_coding[current_macroblock] == MODE_COPY))
722  continue;
723 
724  switch (s->macroblock_coding[current_macroblock]) {
725 
726  case MODE_INTER_PLUS_MV:
727  case MODE_GOLDEN_MV:
728  /* all 6 fragments use the same motion vector */
729  if (coding_mode == 0) {
730  motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
731  motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
732  } else {
733  motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
734  motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
735  }
736 
737  /* vector maintenance, only on MODE_INTER_PLUS_MV */
738  if (s->macroblock_coding[current_macroblock] ==
740  prior_last_motion_x = last_motion_x;
741  prior_last_motion_y = last_motion_y;
742  last_motion_x = motion_x[0];
743  last_motion_y = motion_y[0];
744  }
745  break;
746 
747  case MODE_INTER_FOURMV:
748  /* vector maintenance */
749  prior_last_motion_x = last_motion_x;
750  prior_last_motion_y = last_motion_y;
751 
752  /* fetch 4 vectors from the bitstream, one for each
753  * Y fragment, then average for the C fragment vectors */
754  for (k = 0; k < 4; k++) {
755  current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X;
756  if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
757  if (coding_mode == 0) {
758  motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
759  motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
760  } else {
761  motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
762  motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
763  }
764  last_motion_x = motion_x[k];
765  last_motion_y = motion_y[k];
766  } else {
767  motion_x[k] = 0;
768  motion_y[k] = 0;
769  }
770  }
771  break;
772 
773  case MODE_INTER_LAST_MV:
774  /* all 6 fragments use the last motion vector */
775  motion_x[0] = last_motion_x;
776  motion_y[0] = last_motion_y;
777 
778  /* no vector maintenance (last vector remains the
779  * last vector) */
780  break;
781 
783  /* all 6 fragments use the motion vector prior to the
784  * last motion vector */
785  motion_x[0] = prior_last_motion_x;
786  motion_y[0] = prior_last_motion_y;
787 
788  /* vector maintenance */
789  prior_last_motion_x = last_motion_x;
790  prior_last_motion_y = last_motion_y;
791  last_motion_x = motion_x[0];
792  last_motion_y = motion_y[0];
793  break;
794 
795  default:
796  /* covers intra, inter without MV, golden without MV */
797  motion_x[0] = 0;
798  motion_y[0] = 0;
799 
800  /* no vector maintenance */
801  break;
802  }
803 
804  /* assign the motion vectors to the correct fragments */
805  for (k = 0; k < 4; k++) {
806  current_fragment =
808  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
809  s->motion_val[0][current_fragment][0] = motion_x[k];
810  s->motion_val[0][current_fragment][1] = motion_y[k];
811  } else {
812  s->motion_val[0][current_fragment][0] = motion_x[0];
813  s->motion_val[0][current_fragment][1] = motion_y[0];
814  }
815  }
816 
817  if (s->chroma_y_shift) {
818  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
819  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] + motion_x[2] + motion_x[3], 2);
820  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] + motion_y[2] + motion_y[3], 2);
821  }
822  motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1);
823  motion_y[0] = (motion_y[0]>>1) | (motion_y[0]&1);
824  frag = mb_y*s->fragment_width[1] + mb_x;
825  s->motion_val[1][frag][0] = motion_x[0];
826  s->motion_val[1][frag][1] = motion_y[0];
827  } else if (s->chroma_x_shift) {
828  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
829  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1);
830  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1);
831  motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1);
832  motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1);
833  } else {
834  motion_x[1] = motion_x[0];
835  motion_y[1] = motion_y[0];
836  }
837  motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1);
838  motion_x[1] = (motion_x[1]>>1) | (motion_x[1]&1);
839 
840  frag = 2*mb_y*s->fragment_width[1] + mb_x;
841  for (k = 0; k < 2; k++) {
842  s->motion_val[1][frag][0] = motion_x[k];
843  s->motion_val[1][frag][1] = motion_y[k];
844  frag += s->fragment_width[1];
845  }
846  } else {
847  for (k = 0; k < 4; k++) {
848  frag = BLOCK_Y*s->fragment_width[1] + BLOCK_X;
849  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
850  s->motion_val[1][frag][0] = motion_x[k];
851  s->motion_val[1][frag][1] = motion_y[k];
852  } else {
853  s->motion_val[1][frag][0] = motion_x[0];
854  s->motion_val[1][frag][1] = motion_y[0];
855  }
856  }
857  }
858  }
859  }
860  }
861 
862  return 0;
863 }
864 
866 {
867  int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi;
868  int num_blocks = s->total_num_coded_frags;
869 
870  for (qpi = 0; qpi < s->nqps-1 && num_blocks > 0; qpi++) {
871  i = blocks_decoded = num_blocks_at_qpi = 0;
872 
873  bit = get_bits1(gb) ^ 1;
874  run_length = 0;
875 
876  do {
877  if (run_length == MAXIMUM_LONG_BIT_RUN)
878  bit = get_bits1(gb);
879  else
880  bit ^= 1;
881 
882  run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1;
883  if (run_length == 34)
884  run_length += get_bits(gb, 12);
885  blocks_decoded += run_length;
886 
887  if (!bit)
888  num_blocks_at_qpi += run_length;
889 
890  for (j = 0; j < run_length; i++) {
891  if (i >= s->total_num_coded_frags)
892  return -1;
893 
894  if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) {
895  s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit;
896  j++;
897  }
898  }
899  } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0);
900 
901  num_blocks -= num_blocks_at_qpi;
902  }
903 
904  return 0;
905 }
906 
907 /*
908  * This function is called by unpack_dct_coeffs() to extract the VLCs from
909  * the bitstream. The VLCs encode tokens which are used to unpack DCT
910  * data. This function unpacks all the VLCs for either the Y plane or both
911  * C planes, and is called for DC coefficients or different AC coefficient
912  * levels (since different coefficient types require different VLC tables.
913  *
914  * This function returns a residual eob run. E.g, if a particular token gave
915  * instructions to EOB the next 5 fragments and there were only 2 fragments
916  * left in the current fragment range, 3 would be returned so that it could
917  * be passed into the next call to this same function.
918  */
920  VLC *table, int coeff_index,
921  int plane,
922  int eob_run)
923 {
924  int i, j = 0;
925  int token;
926  int zero_run = 0;
927  int16_t coeff = 0;
928  int bits_to_get;
929  int blocks_ended;
930  int coeff_i = 0;
931  int num_coeffs = s->num_coded_frags[plane][coeff_index];
932  int16_t *dct_tokens = s->dct_tokens[plane][coeff_index];
933 
934  /* local references to structure members to avoid repeated deferences */
935  int *coded_fragment_list = s->coded_fragment_list[plane];
936  Vp3Fragment *all_fragments = s->all_fragments;
937  VLC_TYPE (*vlc_table)[2] = table->table;
938 
939  if (num_coeffs < 0)
940  av_log(s->avctx, AV_LOG_ERROR, "Invalid number of coefficents at level %d\n", coeff_index);
941 
942  if (eob_run > num_coeffs) {
943  coeff_i = blocks_ended = num_coeffs;
944  eob_run -= num_coeffs;
945  } else {
946  coeff_i = blocks_ended = eob_run;
947  eob_run = 0;
948  }
949 
950  // insert fake EOB token to cover the split between planes or zzi
951  if (blocks_ended)
952  dct_tokens[j++] = blocks_ended << 2;
953 
954  while (coeff_i < num_coeffs && get_bits_left(gb) > 0) {
955  /* decode a VLC into a token */
956  token = get_vlc2(gb, vlc_table, 11, 3);
957  /* use the token to get a zero run, a coefficient, and an eob run */
958  if ((unsigned) token <= 6U) {
959  eob_run = eob_run_base[token];
960  if (eob_run_get_bits[token])
961  eob_run += get_bits(gb, eob_run_get_bits[token]);
962 
963  // record only the number of blocks ended in this plane,
964  // any spill will be recorded in the next plane.
965  if (eob_run > num_coeffs - coeff_i) {
966  dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i);
967  blocks_ended += num_coeffs - coeff_i;
968  eob_run -= num_coeffs - coeff_i;
969  coeff_i = num_coeffs;
970  } else {
971  dct_tokens[j++] = TOKEN_EOB(eob_run);
972  blocks_ended += eob_run;
973  coeff_i += eob_run;
974  eob_run = 0;
975  }
976  } else if (token >= 0) {
977  bits_to_get = coeff_get_bits[token];
978  if (bits_to_get)
979  bits_to_get = get_bits(gb, bits_to_get);
980  coeff = coeff_tables[token][bits_to_get];
981 
982  zero_run = zero_run_base[token];
983  if (zero_run_get_bits[token])
984  zero_run += get_bits(gb, zero_run_get_bits[token]);
985 
986  if (zero_run) {
987  dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run);
988  } else {
989  // Save DC into the fragment structure. DC prediction is
990  // done in raster order, so the actual DC can't be in with
991  // other tokens. We still need the token in dct_tokens[]
992  // however, or else the structure collapses on itself.
993  if (!coeff_index)
994  all_fragments[coded_fragment_list[coeff_i]].dc = coeff;
995 
996  dct_tokens[j++] = TOKEN_COEFF(coeff);
997  }
998 
999  if (coeff_index + zero_run > 64) {
1000  av_log(s->avctx, AV_LOG_DEBUG, "Invalid zero run of %d with"
1001  " %d coeffs left\n", zero_run, 64-coeff_index);
1002  zero_run = 64 - coeff_index;
1003  }
1004 
1005  // zero runs code multiple coefficients,
1006  // so don't try to decode coeffs for those higher levels
1007  for (i = coeff_index+1; i <= coeff_index+zero_run; i++)
1008  s->num_coded_frags[plane][i]--;
1009  coeff_i++;
1010  } else {
1012  "Invalid token %d\n", token);
1013  return -1;
1014  }
1015  }
1016 
1017  if (blocks_ended > s->num_coded_frags[plane][coeff_index])
1018  av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n");
1019 
1020  // decrement the number of blocks that have higher coeffecients for each
1021  // EOB run at this level
1022  if (blocks_ended)
1023  for (i = coeff_index+1; i < 64; i++)
1024  s->num_coded_frags[plane][i] -= blocks_ended;
1025 
1026  // setup the next buffer
1027  if (plane < 2)
1028  s->dct_tokens[plane+1][coeff_index] = dct_tokens + j;
1029  else if (coeff_index < 63)
1030  s->dct_tokens[0][coeff_index+1] = dct_tokens + j;
1031 
1032  return eob_run;
1033 }
1034 
1036  int first_fragment,
1037  int fragment_width,
1038  int fragment_height);
1039 /*
1040  * This function unpacks all of the DCT coefficient data from the
1041  * bitstream.
1042  */
1044 {
1045  int i;
1046  int dc_y_table;
1047  int dc_c_table;
1048  int ac_y_table;
1049  int ac_c_table;
1050  int residual_eob_run = 0;
1051  VLC *y_tables[64];
1052  VLC *c_tables[64];
1053 
1054  s->dct_tokens[0][0] = s->dct_tokens_base;
1055 
1056  /* fetch the DC table indexes */
1057  dc_y_table = get_bits(gb, 4);
1058  dc_c_table = get_bits(gb, 4);
1059 
1060  /* unpack the Y plane DC coefficients */
1061  residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
1062  0, residual_eob_run);
1063  if (residual_eob_run < 0)
1064  return residual_eob_run;
1065 
1066  /* reverse prediction of the Y-plane DC coefficients */
1068 
1069  /* unpack the C plane DC coefficients */
1070  residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
1071  1, residual_eob_run);
1072  if (residual_eob_run < 0)
1073  return residual_eob_run;
1074  residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
1075  2, residual_eob_run);
1076  if (residual_eob_run < 0)
1077  return residual_eob_run;
1078 
1079  /* reverse prediction of the C-plane DC coefficients */
1080  if (!(s->avctx->flags & CODEC_FLAG_GRAY))
1081  {
1083  s->fragment_width[1], s->fragment_height[1]);
1085  s->fragment_width[1], s->fragment_height[1]);
1086  }
1087 
1088  /* fetch the AC table indexes */
1089  ac_y_table = get_bits(gb, 4);
1090  ac_c_table = get_bits(gb, 4);
1091 
1092  /* build tables of AC VLC tables */
1093  for (i = 1; i <= 5; i++) {
1094  y_tables[i] = &s->ac_vlc_1[ac_y_table];
1095  c_tables[i] = &s->ac_vlc_1[ac_c_table];
1096  }
1097  for (i = 6; i <= 14; i++) {
1098  y_tables[i] = &s->ac_vlc_2[ac_y_table];
1099  c_tables[i] = &s->ac_vlc_2[ac_c_table];
1100  }
1101  for (i = 15; i <= 27; i++) {
1102  y_tables[i] = &s->ac_vlc_3[ac_y_table];
1103  c_tables[i] = &s->ac_vlc_3[ac_c_table];
1104  }
1105  for (i = 28; i <= 63; i++) {
1106  y_tables[i] = &s->ac_vlc_4[ac_y_table];
1107  c_tables[i] = &s->ac_vlc_4[ac_c_table];
1108  }
1109 
1110  /* decode all AC coefficents */
1111  for (i = 1; i <= 63; i++) {
1112  residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
1113  0, residual_eob_run);
1114  if (residual_eob_run < 0)
1115  return residual_eob_run;
1116 
1117  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1118  1, residual_eob_run);
1119  if (residual_eob_run < 0)
1120  return residual_eob_run;
1121  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1122  2, residual_eob_run);
1123  if (residual_eob_run < 0)
1124  return residual_eob_run;
1125  }
1126 
1127  return 0;
1128 }
1129 
1130 /*
1131  * This function reverses the DC prediction for each coded fragment in
1132  * the frame. Much of this function is adapted directly from the original
1133  * VP3 source code.
1134  */
1135 #define COMPATIBLE_FRAME(x) \
1136  (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1137 #define DC_COEFF(u) s->all_fragments[u].dc
1138 
1140  int first_fragment,
1141  int fragment_width,
1142  int fragment_height)
1143 {
1144 
1145 #define PUL 8
1146 #define PU 4
1147 #define PUR 2
1148 #define PL 1
1149 
1150  int x, y;
1151  int i = first_fragment;
1152 
1153  int predicted_dc;
1154 
1155  /* DC values for the left, up-left, up, and up-right fragments */
1156  int vl, vul, vu, vur;
1157 
1158  /* indexes for the left, up-left, up, and up-right fragments */
1159  int l, ul, u, ur;
1160 
1161  /*
1162  * The 6 fields mean:
1163  * 0: up-left multiplier
1164  * 1: up multiplier
1165  * 2: up-right multiplier
1166  * 3: left multiplier
1167  */
1168  static const int predictor_transform[16][4] = {
1169  { 0, 0, 0, 0},
1170  { 0, 0, 0,128}, // PL
1171  { 0, 0,128, 0}, // PUR
1172  { 0, 0, 53, 75}, // PUR|PL
1173  { 0,128, 0, 0}, // PU
1174  { 0, 64, 0, 64}, // PU|PL
1175  { 0,128, 0, 0}, // PU|PUR
1176  { 0, 0, 53, 75}, // PU|PUR|PL
1177  {128, 0, 0, 0}, // PUL
1178  { 0, 0, 0,128}, // PUL|PL
1179  { 64, 0, 64, 0}, // PUL|PUR
1180  { 0, 0, 53, 75}, // PUL|PUR|PL
1181  { 0,128, 0, 0}, // PUL|PU
1182  {-104,116, 0,116}, // PUL|PU|PL
1183  { 24, 80, 24, 0}, // PUL|PU|PUR
1184  {-104,116, 0,116} // PUL|PU|PUR|PL
1185  };
1186 
1187  /* This table shows which types of blocks can use other blocks for
1188  * prediction. For example, INTRA is the only mode in this table to
1189  * have a frame number of 0. That means INTRA blocks can only predict
1190  * from other INTRA blocks. There are 2 golden frame coding types;
1191  * blocks encoding in these modes can only predict from other blocks
1192  * that were encoded with these 1 of these 2 modes. */
1193  static const unsigned char compatible_frame[9] = {
1194  1, /* MODE_INTER_NO_MV */
1195  0, /* MODE_INTRA */
1196  1, /* MODE_INTER_PLUS_MV */
1197  1, /* MODE_INTER_LAST_MV */
1198  1, /* MODE_INTER_PRIOR_MV */
1199  2, /* MODE_USING_GOLDEN */
1200  2, /* MODE_GOLDEN_MV */
1201  1, /* MODE_INTER_FOUR_MV */
1202  3 /* MODE_COPY */
1203  };
1204  int current_frame_type;
1205 
1206  /* there is a last DC predictor for each of the 3 frame types */
1207  short last_dc[3];
1208 
1209  int transform = 0;
1210 
1211  vul = vu = vur = vl = 0;
1212  last_dc[0] = last_dc[1] = last_dc[2] = 0;
1213 
1214  /* for each fragment row... */
1215  for (y = 0; y < fragment_height; y++) {
1216 
1217  /* for each fragment in a row... */
1218  for (x = 0; x < fragment_width; x++, i++) {
1219 
1220  /* reverse prediction if this block was coded */
1221  if (s->all_fragments[i].coding_method != MODE_COPY) {
1222 
1223  current_frame_type =
1224  compatible_frame[s->all_fragments[i].coding_method];
1225 
1226  transform= 0;
1227  if(x){
1228  l= i-1;
1229  vl = DC_COEFF(l);
1230  if(COMPATIBLE_FRAME(l))
1231  transform |= PL;
1232  }
1233  if(y){
1234  u= i-fragment_width;
1235  vu = DC_COEFF(u);
1236  if(COMPATIBLE_FRAME(u))
1237  transform |= PU;
1238  if(x){
1239  ul= i-fragment_width-1;
1240  vul = DC_COEFF(ul);
1241  if(COMPATIBLE_FRAME(ul))
1242  transform |= PUL;
1243  }
1244  if(x + 1 < fragment_width){
1245  ur= i-fragment_width+1;
1246  vur = DC_COEFF(ur);
1247  if(COMPATIBLE_FRAME(ur))
1248  transform |= PUR;
1249  }
1250  }
1251 
1252  if (transform == 0) {
1253 
1254  /* if there were no fragments to predict from, use last
1255  * DC saved */
1256  predicted_dc = last_dc[current_frame_type];
1257  } else {
1258 
1259  /* apply the appropriate predictor transform */
1260  predicted_dc =
1261  (predictor_transform[transform][0] * vul) +
1262  (predictor_transform[transform][1] * vu) +
1263  (predictor_transform[transform][2] * vur) +
1264  (predictor_transform[transform][3] * vl);
1265 
1266  predicted_dc /= 128;
1267 
1268  /* check for outranging on the [ul u l] and
1269  * [ul u ur l] predictors */
1270  if ((transform == 15) || (transform == 13)) {
1271  if (FFABS(predicted_dc - vu) > 128)
1272  predicted_dc = vu;
1273  else if (FFABS(predicted_dc - vl) > 128)
1274  predicted_dc = vl;
1275  else if (FFABS(predicted_dc - vul) > 128)
1276  predicted_dc = vul;
1277  }
1278  }
1279 
1280  /* at long last, apply the predictor */
1281  DC_COEFF(i) += predicted_dc;
1282  /* save the DC */
1283  last_dc[current_frame_type] = DC_COEFF(i);
1284  }
1285  }
1286  }
1287 }
1288 
1289 static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
1290 {
1291  int x, y;
1292  int *bounding_values= s->bounding_values_array+127;
1293 
1294  int width = s->fragment_width[!!plane];
1295  int height = s->fragment_height[!!plane];
1296  int fragment = s->fragment_start [plane] + ystart * width;
1297  int stride = s->current_frame.f->linesize[plane];
1298  uint8_t *plane_data = s->current_frame.f->data [plane];
1299  if (!s->flipped_image) stride = -stride;
1300  plane_data += s->data_offset[plane] + 8*ystart*stride;
1301 
1302  for (y = ystart; y < yend; y++) {
1303 
1304  for (x = 0; x < width; x++) {
1305  /* This code basically just deblocks on the edges of coded blocks.
1306  * However, it has to be much more complicated because of the
1307  * braindamaged deblock ordering used in VP3/Theora. Order matters
1308  * because some pixels get filtered twice. */
1309  if( s->all_fragments[fragment].coding_method != MODE_COPY )
1310  {
1311  /* do not perform left edge filter for left columns frags */
1312  if (x > 0) {
1313  s->vp3dsp.h_loop_filter(
1314  plane_data + 8*x,
1315  stride, bounding_values);
1316  }
1317 
1318  /* do not perform top edge filter for top row fragments */
1319  if (y > 0) {
1320  s->vp3dsp.v_loop_filter(
1321  plane_data + 8*x,
1322  stride, bounding_values);
1323  }
1324 
1325  /* do not perform right edge filter for right column
1326  * fragments or if right fragment neighbor is also coded
1327  * in this frame (it will be filtered in next iteration) */
1328  if ((x < width - 1) &&
1329  (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
1330  s->vp3dsp.h_loop_filter(
1331  plane_data + 8*x + 8,
1332  stride, bounding_values);
1333  }
1334 
1335  /* do not perform bottom edge filter for bottom row
1336  * fragments or if bottom fragment neighbor is also coded
1337  * in this frame (it will be filtered in the next row) */
1338  if ((y < height - 1) &&
1339  (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
1340  s->vp3dsp.v_loop_filter(
1341  plane_data + 8*x + 8*stride,
1342  stride, bounding_values);
1343  }
1344  }
1345 
1346  fragment++;
1347  }
1348  plane_data += 8*stride;
1349  }
1350 }
1351 
1352 /**
1353  * Pull DCT tokens from the 64 levels to decode and dequant the coefficients
1354  * for the next block in coding order
1355  */
1356 static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag,
1357  int plane, int inter, int16_t block[64])
1358 {
1359  int16_t *dequantizer = s->qmat[frag->qpi][inter][plane];
1360  uint8_t *perm = s->idct_scantable;
1361  int i = 0;
1362 
1363  do {
1364  int token = *s->dct_tokens[plane][i];
1365  switch (token & 3) {
1366  case 0: // EOB
1367  if (--token < 4) // 0-3 are token types, so the EOB run must now be 0
1368  s->dct_tokens[plane][i]++;
1369  else
1370  *s->dct_tokens[plane][i] = token & ~3;
1371  goto end;
1372  case 1: // zero run
1373  s->dct_tokens[plane][i]++;
1374  i += (token >> 2) & 0x7f;
1375  if (i > 63) {
1376  av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n");
1377  return i;
1378  }
1379  block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
1380  i++;
1381  break;
1382  case 2: // coeff
1383  block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
1384  s->dct_tokens[plane][i++]++;
1385  break;
1386  default: // shouldn't happen
1387  return i;
1388  }
1389  } while (i < 64);
1390  // return value is expected to be a valid level
1391  i--;
1392 end:
1393  // the actual DC+prediction is in the fragment structure
1394  block[0] = frag->dc * s->qmat[0][inter][plane][0];
1395  return i;
1396 }
1397 
1398 /**
1399  * called when all pixels up to row y are complete
1400  */
1402 {
1403  int h, cy, i;
1405 
1407  int y_flipped = s->flipped_image ? s->avctx->height-y : y;
1408 
1409  // At the end of the frame, report INT_MAX instead of the height of the frame.
1410  // This makes the other threads' ff_thread_await_progress() calls cheaper, because
1411  // they don't have to clip their values.
1412  ff_thread_report_progress(&s->current_frame, y_flipped==s->avctx->height ? INT_MAX : y_flipped-1, 0);
1413  }
1414 
1415  if(s->avctx->draw_horiz_band==NULL)
1416  return;
1417 
1418  h= y - s->last_slice_end;
1419  s->last_slice_end= y;
1420  y -= h;
1421 
1422  if (!s->flipped_image) {
1423  y = s->avctx->height - y - h;
1424  }
1425 
1426  cy = y >> s->chroma_y_shift;
1427  offset[0] = s->current_frame.f->linesize[0]*y;
1428  offset[1] = s->current_frame.f->linesize[1]*cy;
1429  offset[2] = s->current_frame.f->linesize[2]*cy;
1430  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
1431  offset[i] = 0;
1432 
1433  emms_c();
1434  s->avctx->draw_horiz_band(s->avctx, s->current_frame.f, offset, y, 3, h);
1435 }
1436 
1437 /**
1438  * Wait for the reference frame of the current fragment.
1439  * The progress value is in luma pixel rows.
1440  */
1441 static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
1442 {
1444  int ref_row;
1445  int border = motion_y&1;
1446 
1447  if (fragment->coding_method == MODE_USING_GOLDEN ||
1448  fragment->coding_method == MODE_GOLDEN_MV)
1449  ref_frame = &s->golden_frame;
1450  else
1451  ref_frame = &s->last_frame;
1452 
1453  ref_row = y + (motion_y>>1);
1454  ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border);
1455 
1456  ff_thread_await_progress(ref_frame, ref_row, 0);
1457 }
1458 
1459 /*
1460  * Perform the final rendering for a particular slice of data.
1461  * The slice number ranges from 0..(c_superblock_height - 1).
1462  */
1463 static void render_slice(Vp3DecodeContext *s, int slice)
1464 {
1465  int x, y, i, j, fragment;
1466  int16_t *block = s->block;
1467  int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
1468  int motion_halfpel_index;
1469  uint8_t *motion_source;
1470  int plane, first_pixel;
1471 
1472  if (slice >= s->c_superblock_height)
1473  return;
1474 
1475  for (plane = 0; plane < 3; plane++) {
1476  uint8_t *output_plane = s->current_frame.f->data [plane] + s->data_offset[plane];
1477  uint8_t * last_plane = s-> last_frame.f->data [plane] + s->data_offset[plane];
1478  uint8_t *golden_plane = s-> golden_frame.f->data [plane] + s->data_offset[plane];
1479  int stride = s->current_frame.f->linesize[plane];
1480  int plane_width = s->width >> (plane && s->chroma_x_shift);
1481  int plane_height = s->height >> (plane && s->chroma_y_shift);
1482  int8_t (*motion_val)[2] = s->motion_val[!!plane];
1483 
1484  int sb_x, sb_y = slice << (!plane && s->chroma_y_shift);
1485  int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift);
1486  int slice_width = plane ? s->c_superblock_width : s->y_superblock_width;
1487 
1488  int fragment_width = s->fragment_width[!!plane];
1489  int fragment_height = s->fragment_height[!!plane];
1490  int fragment_start = s->fragment_start[plane];
1491  int do_await = !plane && HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_FRAME);
1492 
1493  if (!s->flipped_image) stride = -stride;
1494  if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY))
1495  continue;
1496 
1497  /* for each superblock row in the slice (both of them)... */
1498  for (; sb_y < slice_height; sb_y++) {
1499 
1500  /* for each superblock in a row... */
1501  for (sb_x = 0; sb_x < slice_width; sb_x++) {
1502 
1503  /* for each block in a superblock... */
1504  for (j = 0; j < 16; j++) {
1505  x = 4*sb_x + hilbert_offset[j][0];
1506  y = 4*sb_y + hilbert_offset[j][1];
1507  fragment = y*fragment_width + x;
1508 
1509  i = fragment_start + fragment;
1510 
1511  // bounds check
1512  if (x >= fragment_width || y >= fragment_height)
1513  continue;
1514 
1515  first_pixel = 8*y*stride + 8*x;
1516 
1517  if (do_await && s->all_fragments[i].coding_method != MODE_INTRA)
1518  await_reference_row(s, &s->all_fragments[i], motion_val[fragment][1], (16*y) >> s->chroma_y_shift);
1519 
1520  /* transform if this block was coded */
1521  if (s->all_fragments[i].coding_method != MODE_COPY) {
1524  motion_source= golden_plane;
1525  else
1526  motion_source= last_plane;
1527 
1528  motion_source += first_pixel;
1529  motion_halfpel_index = 0;
1530 
1531  /* sort out the motion vector if this fragment is coded
1532  * using a motion vector method */
1533  if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
1535  int src_x, src_y;
1536  motion_x = motion_val[fragment][0];
1537  motion_y = motion_val[fragment][1];
1538 
1539  src_x= (motion_x>>1) + 8*x;
1540  src_y= (motion_y>>1) + 8*y;
1541 
1542  motion_halfpel_index = motion_x & 0x01;
1543  motion_source += (motion_x >> 1);
1544 
1545  motion_halfpel_index |= (motion_y & 0x01) << 1;
1546  motion_source += ((motion_y >> 1) * stride);
1547 
1548  if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){
1550  if(stride<0) temp -= 8*stride;
1551 
1552  s->vdsp.emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height);
1553  motion_source= temp;
1554  }
1555  }
1556 
1557 
1558  /* first, take care of copying a block from either the
1559  * previous or the golden frame */
1560  if (s->all_fragments[i].coding_method != MODE_INTRA) {
1561  /* Note, it is possible to implement all MC cases with
1562  put_no_rnd_pixels_l2 which would look more like the
1563  VP3 source but this would be slower as
1564  put_no_rnd_pixels_tab is better optimzed */
1565  if(motion_halfpel_index != 3){
1566  s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
1567  output_plane + first_pixel,
1568  motion_source, stride, 8);
1569  }else{
1570  int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1
1572  output_plane + first_pixel,
1573  motion_source - d,
1574  motion_source + stride + 1 + d,
1575  stride, 8);
1576  }
1577  }
1578 
1579  /* invert DCT and place (or add) in final output */
1580 
1581  if (s->all_fragments[i].coding_method == MODE_INTRA) {
1582  vp3_dequant(s, s->all_fragments + i, plane, 0, block);
1583  s->vp3dsp.idct_put(
1584  output_plane + first_pixel,
1585  stride,
1586  block);
1587  } else {
1588  if (vp3_dequant(s, s->all_fragments + i, plane, 1, block)) {
1589  s->vp3dsp.idct_add(
1590  output_plane + first_pixel,
1591  stride,
1592  block);
1593  } else {
1594  s->vp3dsp.idct_dc_add(output_plane + first_pixel, stride, block);
1595  }
1596  }
1597  } else {
1598 
1599  /* copy directly from the previous frame */
1600  s->hdsp.put_pixels_tab[1][0](
1601  output_plane + first_pixel,
1602  last_plane + first_pixel,
1603  stride, 8);
1604 
1605  }
1606  }
1607  }
1608 
1609  // Filter up to the last row in the superblock row
1610  if (!s->skip_loop_filter)
1611  apply_loop_filter(s, plane, 4*sb_y - !!sb_y, FFMIN(4*sb_y+3, fragment_height-1));
1612  }
1613  }
1614 
1615  /* this looks like a good place for slice dispatch... */
1616  /* algorithm:
1617  * if (slice == s->macroblock_height - 1)
1618  * dispatch (both last slice & 2nd-to-last slice);
1619  * else if (slice > 0)
1620  * dispatch (slice - 1);
1621  */
1622 
1623  vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) -16, s->height-16));
1624 }
1625 
1626 /// Allocate tables for per-frame data in Vp3DecodeContext
1628 {
1629  Vp3DecodeContext *s = avctx->priv_data;
1630  int y_fragment_count, c_fragment_count;
1631 
1632  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
1633  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
1634 
1637  s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int));
1638  s->dct_tokens_base = av_malloc(64*s->fragment_count * sizeof(*s->dct_tokens_base));
1639  s->motion_val[0] = av_malloc(y_fragment_count * sizeof(*s->motion_val[0]));
1640  s->motion_val[1] = av_malloc(c_fragment_count * sizeof(*s->motion_val[1]));
1641 
1642  /* work out the block mapping tables */
1643  s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int));
1645 
1646  if (!s->superblock_coding || !s->all_fragments || !s->dct_tokens_base ||
1648  !s->motion_val[0] || !s->motion_val[1]) {
1649  vp3_decode_end(avctx);
1650  return -1;
1651  }
1652 
1653  init_block_mapping(s);
1654 
1655  return 0;
1656 }
1657 
1659 {
1661  s->last_frame.f = av_frame_alloc();
1662  s->golden_frame.f = av_frame_alloc();
1663 
1664  if (!s->current_frame.f || !s->last_frame.f || !s->golden_frame.f) {
1666  av_frame_free(&s->last_frame.f);
1668  return AVERROR(ENOMEM);
1669  }
1670 
1671  return 0;
1672 }
1673 
1675 {
1676  Vp3DecodeContext *s = avctx->priv_data;
1677  int i, inter, plane, ret;
1678  int c_width;
1679  int c_height;
1680  int y_fragment_count, c_fragment_count;
1681 
1682  ret = init_frames(s);
1683  if (ret < 0)
1684  return ret;
1685 
1686  avctx->internal->allocate_progress = 1;
1687 
1688  if (avctx->codec_tag == MKTAG('V','P','3','0'))
1689  s->version = 0;
1690  else
1691  s->version = 1;
1692 
1693  s->avctx = avctx;
1694  s->width = FFALIGN(avctx->width, 16);
1695  s->height = FFALIGN(avctx->height, 16);
1696  if (avctx->codec_id != AV_CODEC_ID_THEORA)
1697  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1700  ff_videodsp_init(&s->vdsp, 8);
1701  ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
1702 
1703  for (i = 0; i < 64; i++) {
1704 #define T(x) (x >> 3) | ((x & 7) << 3)
1705  s->idct_permutation[i] = T(i);
1706  s->idct_scantable[i] = T(ff_zigzag_direct[i]);
1707 #undef T
1708  }
1709 
1710  /* initialize to an impossible value which will force a recalculation
1711  * in the first frame decode */
1712  for (i = 0; i < 3; i++)
1713  s->qps[i] = -1;
1714 
1716 
1717  s->y_superblock_width = (s->width + 31) / 32;
1718  s->y_superblock_height = (s->height + 31) / 32;
1720 
1721  /* work out the dimensions for the C planes */
1722  c_width = s->width >> s->chroma_x_shift;
1723  c_height = s->height >> s->chroma_y_shift;
1724  s->c_superblock_width = (c_width + 31) / 32;
1725  s->c_superblock_height = (c_height + 31) / 32;
1727 
1731 
1732  s->macroblock_width = (s->width + 15) / 16;
1733  s->macroblock_height = (s->height + 15) / 16;
1735 
1736  s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
1737  s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
1738  s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift;
1739  s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift;
1740 
1741  /* fragment count covers all 8x8 blocks for all 3 planes */
1742  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
1743  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
1744  s->fragment_count = y_fragment_count + 2*c_fragment_count;
1745  s->fragment_start[1] = y_fragment_count;
1746  s->fragment_start[2] = y_fragment_count + c_fragment_count;
1747 
1748  if (!s->theora_tables)
1749  {
1750  for (i = 0; i < 64; i++) {
1753  s->base_matrix[0][i] = vp31_intra_y_dequant[i];
1754  s->base_matrix[1][i] = vp31_intra_c_dequant[i];
1755  s->base_matrix[2][i] = vp31_inter_dequant[i];
1757  }
1758 
1759  for(inter=0; inter<2; inter++){
1760  for(plane=0; plane<3; plane++){
1761  s->qr_count[inter][plane]= 1;
1762  s->qr_size [inter][plane][0]= 63;
1763  s->qr_base [inter][plane][0]=
1764  s->qr_base [inter][plane][1]= 2*inter + (!!plane)*!inter;
1765  }
1766  }
1767 
1768  /* init VLC tables */
1769  for (i = 0; i < 16; i++) {
1770 
1771  /* DC histograms */
1772  init_vlc(&s->dc_vlc[i], 11, 32,
1773  &dc_bias[i][0][1], 4, 2,
1774  &dc_bias[i][0][0], 4, 2, 0);
1775 
1776  /* group 1 AC histograms */
1777  init_vlc(&s->ac_vlc_1[i], 11, 32,
1778  &ac_bias_0[i][0][1], 4, 2,
1779  &ac_bias_0[i][0][0], 4, 2, 0);
1780 
1781  /* group 2 AC histograms */
1782  init_vlc(&s->ac_vlc_2[i], 11, 32,
1783  &ac_bias_1[i][0][1], 4, 2,
1784  &ac_bias_1[i][0][0], 4, 2, 0);
1785 
1786  /* group 3 AC histograms */
1787  init_vlc(&s->ac_vlc_3[i], 11, 32,
1788  &ac_bias_2[i][0][1], 4, 2,
1789  &ac_bias_2[i][0][0], 4, 2, 0);
1790 
1791  /* group 4 AC histograms */
1792  init_vlc(&s->ac_vlc_4[i], 11, 32,
1793  &ac_bias_3[i][0][1], 4, 2,
1794  &ac_bias_3[i][0][0], 4, 2, 0);
1795  }
1796  } else {
1797 
1798  for (i = 0; i < 16; i++) {
1799  /* DC histograms */
1800  if (init_vlc(&s->dc_vlc[i], 11, 32,
1801  &s->huffman_table[i][0][1], 8, 4,
1802  &s->huffman_table[i][0][0], 8, 4, 0) < 0)
1803  goto vlc_fail;
1804 
1805  /* group 1 AC histograms */
1806  if (init_vlc(&s->ac_vlc_1[i], 11, 32,
1807  &s->huffman_table[i+16][0][1], 8, 4,
1808  &s->huffman_table[i+16][0][0], 8, 4, 0) < 0)
1809  goto vlc_fail;
1810 
1811  /* group 2 AC histograms */
1812  if (init_vlc(&s->ac_vlc_2[i], 11, 32,
1813  &s->huffman_table[i+16*2][0][1], 8, 4,
1814  &s->huffman_table[i+16*2][0][0], 8, 4, 0) < 0)
1815  goto vlc_fail;
1816 
1817  /* group 3 AC histograms */
1818  if (init_vlc(&s->ac_vlc_3[i], 11, 32,
1819  &s->huffman_table[i+16*3][0][1], 8, 4,
1820  &s->huffman_table[i+16*3][0][0], 8, 4, 0) < 0)
1821  goto vlc_fail;
1822 
1823  /* group 4 AC histograms */
1824  if (init_vlc(&s->ac_vlc_4[i], 11, 32,
1825  &s->huffman_table[i+16*4][0][1], 8, 4,
1826  &s->huffman_table[i+16*4][0][0], 8, 4, 0) < 0)
1827  goto vlc_fail;
1828  }
1829  }
1830 
1832  &superblock_run_length_vlc_table[0][1], 4, 2,
1833  &superblock_run_length_vlc_table[0][0], 4, 2, 0);
1834 
1835  init_vlc(&s->fragment_run_length_vlc, 5, 30,
1836  &fragment_run_length_vlc_table[0][1], 4, 2,
1837  &fragment_run_length_vlc_table[0][0], 4, 2, 0);
1838 
1839  init_vlc(&s->mode_code_vlc, 3, 8,
1840  &mode_code_vlc_table[0][1], 2, 1,
1841  &mode_code_vlc_table[0][0], 2, 1, 0);
1842 
1843  init_vlc(&s->motion_vector_vlc, 6, 63,
1844  &motion_vector_vlc_table[0][1], 2, 1,
1845  &motion_vector_vlc_table[0][0], 2, 1, 0);
1846 
1847  return allocate_tables(avctx);
1848 
1849 vlc_fail:
1850  av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n");
1851  return -1;
1852 }
1853 
1854 /// Release and shuffle frames after decode finishes
1855 static int update_frames(AVCodecContext *avctx)
1856 {
1857  Vp3DecodeContext *s = avctx->priv_data;
1858  int ret = 0;
1859 
1860 
1861  /* shuffle frames (last = current) */
1864  if (ret < 0)
1865  goto fail;
1866 
1867  if (s->keyframe) {
1870  }
1871 
1872 fail:
1874  return ret;
1875 }
1876 
1878 {
1880  if (src->f->data[0])
1881  return ff_thread_ref_frame(dst, src);
1882  return 0;
1883 }
1884 
1886 {
1887  int ret;
1888  if ((ret = ref_frame(dst, &dst->current_frame, &src->current_frame)) < 0 ||
1889  (ret = ref_frame(dst, &dst->golden_frame, &src->golden_frame)) < 0 ||
1890  (ret = ref_frame(dst, &dst->last_frame, &src->last_frame)) < 0)
1891  return ret;
1892  return 0;
1893 }
1894 
1896 {
1897  Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data;
1898  int qps_changed = 0, i, err;
1899 
1900 #define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field)
1901 
1902  if (!s1->current_frame.f->data[0]
1903  ||s->width != s1->width
1904  ||s->height!= s1->height) {
1905  if (s != s1)
1906  ref_frames(s, s1);
1907  return -1;
1908  }
1909 
1910  if (s != s1) {
1911  // init tables if the first frame hasn't been decoded
1912  if (!s->current_frame.f->data[0]) {
1913  int y_fragment_count, c_fragment_count;
1914  s->avctx = dst;
1915  err = allocate_tables(dst);
1916  if (err)
1917  return err;
1918  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
1919  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
1920  memcpy(s->motion_val[0], s1->motion_val[0], y_fragment_count * sizeof(*s->motion_val[0]));
1921  memcpy(s->motion_val[1], s1->motion_val[1], c_fragment_count * sizeof(*s->motion_val[1]));
1922  }
1923 
1924  // copy previous frame data
1925  if ((err = ref_frames(s, s1)) < 0)
1926  return err;
1927 
1928  s->keyframe = s1->keyframe;
1929 
1930  // copy qscale data if necessary
1931  for (i = 0; i < 3; i++) {
1932  if (s->qps[i] != s1->qps[1]) {
1933  qps_changed = 1;
1934  memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i]));
1935  }
1936  }
1937 
1938  if (s->qps[0] != s1->qps[0])
1939  memcpy(&s->bounding_values_array, &s1->bounding_values_array, sizeof(s->bounding_values_array));
1940 
1941  if (qps_changed)
1942  copy_fields(s, s1, qps, superblock_count);
1943 #undef copy_fields
1944  }
1945 
1946  return update_frames(dst);
1947 }
1948 
1950  void *data, int *got_frame,
1951  AVPacket *avpkt)
1952 {
1953  const uint8_t *buf = avpkt->data;
1954  int buf_size = avpkt->size;
1955  Vp3DecodeContext *s = avctx->priv_data;
1956  GetBitContext gb;
1957  int i, ret;
1958 
1959  init_get_bits(&gb, buf, buf_size * 8);
1960 
1961 #if CONFIG_THEORA_DECODER
1962  if (s->theora && get_bits1(&gb))
1963  {
1964  int type = get_bits(&gb, 7);
1965  skip_bits_long(&gb, 6*8); /* "theora" */
1966 
1968  av_log(avctx, AV_LOG_ERROR, "midstream reconfiguration with multithreading is unsupported, try -threads 1\n");
1969  return AVERROR_PATCHWELCOME;
1970  }
1971  if (type == 0) {
1972  vp3_decode_end(avctx);
1973  ret = theora_decode_header(avctx, &gb);
1974 
1975  if (ret < 0) {
1976  vp3_decode_end(avctx);
1977  } else
1978  ret = vp3_decode_init(avctx);
1979  return ret;
1980  } else if (type == 2) {
1981  ret = theora_decode_tables(avctx, &gb);
1982  if (ret < 0) {
1983  vp3_decode_end(avctx);
1984  } else
1985  ret = vp3_decode_init(avctx);
1986  return ret;
1987  }
1988 
1989  av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
1990  return -1;
1991  }
1992 #endif
1993 
1994  s->keyframe = !get_bits1(&gb);
1995  if (!s->all_fragments) {
1996  av_log(avctx, AV_LOG_ERROR, "Data packet without prior valid headers\n");
1997  return -1;
1998  }
1999  if (!s->theora)
2000  skip_bits(&gb, 1);
2001  for (i = 0; i < 3; i++)
2002  s->last_qps[i] = s->qps[i];
2003 
2004  s->nqps=0;
2005  do{
2006  s->qps[s->nqps++]= get_bits(&gb, 6);
2007  } while(s->theora >= 0x030200 && s->nqps<3 && get_bits1(&gb));
2008  for (i = s->nqps; i < 3; i++)
2009  s->qps[i] = -1;
2010 
2011  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2012  av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
2013  s->keyframe?"key":"", avctx->frame_number+1, s->qps[0]);
2014 
2015  s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
2016  avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL : AVDISCARD_NONKEY);
2017 
2018  if (s->qps[0] != s->last_qps[0])
2019  init_loop_filter(s);
2020 
2021  for (i = 0; i < s->nqps; i++)
2022  // reinit all dequantizers if the first one changed, because
2023  // the DC of the first quantizer must be used for all matrices
2024  if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0])
2025  init_dequantizer(s, i);
2026 
2027  if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
2028  return buf_size;
2029 
2031  s->current_frame.f->key_frame = s->keyframe;
2033  goto error;
2034 
2035  if (!s->edge_emu_buffer)
2037 
2038  if (s->keyframe) {
2039  if (!s->theora)
2040  {
2041  skip_bits(&gb, 4); /* width code */
2042  skip_bits(&gb, 4); /* height code */
2043  if (s->version)
2044  {
2045  s->version = get_bits(&gb, 5);
2046  if (avctx->frame_number == 0)
2047  av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
2048  }
2049  }
2050  if (s->version || s->theora)
2051  {
2052  if (get_bits1(&gb))
2053  av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
2054  skip_bits(&gb, 2); /* reserved? */
2055  }
2056  } else {
2057  if (!s->golden_frame.f->data[0]) {
2058  av_log(s->avctx, AV_LOG_WARNING, "vp3: first frame not a keyframe\n");
2059 
2062  goto error;
2064  if ((ret = ff_thread_ref_frame(&s->last_frame, &s->golden_frame)) < 0)
2065  goto error;
2066  ff_thread_report_progress(&s->last_frame, INT_MAX, 0);
2067  }
2068  }
2069 
2070  memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment));
2071  ff_thread_finish_setup(avctx);
2072 
2073  if (unpack_superblocks(s, &gb)){
2074  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2075  goto error;
2076  }
2077  if (unpack_modes(s, &gb)){
2078  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2079  goto error;
2080  }
2081  if (unpack_vectors(s, &gb)){
2082  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2083  goto error;
2084  }
2085  if (unpack_block_qpis(s, &gb)){
2086  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
2087  goto error;
2088  }
2089  if (unpack_dct_coeffs(s, &gb)){
2090  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2091  goto error;
2092  }
2093 
2094  for (i = 0; i < 3; i++) {
2095  int height = s->height >> (i && s->chroma_y_shift);
2096  if (s->flipped_image)
2097  s->data_offset[i] = 0;
2098  else
2099  s->data_offset[i] = (height-1) * s->current_frame.f->linesize[i];
2100  }
2101 
2102  s->last_slice_end = 0;
2103  for (i = 0; i < s->c_superblock_height; i++)
2104  render_slice(s, i);
2105 
2106  // filter the last row
2107  for (i = 0; i < 3; i++) {
2108  int row = (s->height >> (3+(i && s->chroma_y_shift))) - 1;
2109  apply_loop_filter(s, i, row, row+1);
2110  }
2112 
2113  if ((ret = av_frame_ref(data, s->current_frame.f)) < 0)
2114  return ret;
2115  *got_frame = 1;
2116 
2118  ret = update_frames(avctx);
2119  if (ret < 0)
2120  return ret;
2121  }
2122 
2123  return buf_size;
2124 
2125 error:
2126  ff_thread_report_progress(&s->current_frame, INT_MAX, 0);
2127 
2130 
2131  return -1;
2132 }
2133 
2135 {
2136  Vp3DecodeContext *s = avctx->priv_data;
2137 
2138  if (get_bits1(gb)) {
2139  int token;
2140  if (s->entries >= 32) { /* overflow */
2141  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2142  return -1;
2143  }
2144  token = get_bits(gb, 5);
2145  av_dlog(avctx, "hti %d hbits %x token %d entry : %d size %d\n",
2146  s->hti, s->hbits, token, s->entries, s->huff_code_size);
2147  s->huffman_table[s->hti][token][0] = s->hbits;
2148  s->huffman_table[s->hti][token][1] = s->huff_code_size;
2149  s->entries++;
2150  }
2151  else {
2152  if (s->huff_code_size >= 32) {/* overflow */
2153  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2154  return -1;
2155  }
2156  s->huff_code_size++;
2157  s->hbits <<= 1;
2158  if (read_huffman_tree(avctx, gb))
2159  return -1;
2160  s->hbits |= 1;
2161  if (read_huffman_tree(avctx, gb))
2162  return -1;
2163  s->hbits >>= 1;
2164  s->huff_code_size--;
2165  }
2166  return 0;
2167 }
2168 
2170 {
2171  Vp3DecodeContext *s = avctx->priv_data;
2172 
2173  s->superblock_coding = NULL;
2174  s->all_fragments = NULL;
2175  s->coded_fragment_list[0] = NULL;
2176  s->dct_tokens_base = NULL;
2178  s->macroblock_coding = NULL;
2179  s->motion_val[0] = NULL;
2180  s->motion_val[1] = NULL;
2181  s->edge_emu_buffer = NULL;
2182 
2183  return init_frames(s);
2184 }
2185 
2186 #if CONFIG_THEORA_DECODER
2187 static const enum AVPixelFormat theora_pix_fmts[4] = {
2189 };
2190 
2191 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
2192 {
2193  Vp3DecodeContext *s = avctx->priv_data;
2194  int visible_width, visible_height, colorspace;
2195  int offset_x = 0, offset_y = 0;
2196  AVRational fps, aspect;
2197 
2198  s->theora = get_bits_long(gb, 24);
2199  av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
2200 
2201  /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */
2202  /* but previous versions have the image flipped relative to vp3 */
2203  if (s->theora < 0x030200)
2204  {
2205  s->flipped_image = 1;
2206  av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n");
2207  }
2208 
2209  visible_width = s->width = get_bits(gb, 16) << 4;
2210  visible_height = s->height = get_bits(gb, 16) << 4;
2211 
2212  if(av_image_check_size(s->width, s->height, 0, avctx)){
2213  av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height);
2214  s->width= s->height= 0;
2215  return -1;
2216  }
2217 
2218  if (s->theora >= 0x030200) {
2219  visible_width = get_bits_long(gb, 24);
2220  visible_height = get_bits_long(gb, 24);
2221 
2222  offset_x = get_bits(gb, 8); /* offset x */
2223  offset_y = get_bits(gb, 8); /* offset y, from bottom */
2224  }
2225 
2226  fps.num = get_bits_long(gb, 32);
2227  fps.den = get_bits_long(gb, 32);
2228  if (fps.num>0 && fps.den>0) {
2229  av_reduce(&avctx->time_base.num, &avctx->time_base.den,
2230  fps.den, fps.num, 1<<30);
2231  }
2232 
2233  aspect.num = get_bits_long(gb, 24);
2234  aspect.den = get_bits_long(gb, 24);
2235  if (aspect.num && aspect.den) {
2237  &avctx->sample_aspect_ratio.den,
2238  aspect.num, aspect.den, 1<<30);
2239  }
2240 
2241  if (s->theora < 0x030200)
2242  skip_bits(gb, 5); /* keyframe frequency force */
2243  colorspace = get_bits(gb, 8);
2244  skip_bits(gb, 24); /* bitrate */
2245 
2246  skip_bits(gb, 6); /* quality hint */
2247 
2248  if (s->theora >= 0x030200)
2249  {
2250  skip_bits(gb, 5); /* keyframe frequency force */
2251  avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
2252  if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
2253  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format\n");
2254  return AVERROR_INVALIDDATA;
2255  }
2256  skip_bits(gb, 3); /* reserved */
2257  }
2258 
2259 // align_get_bits(gb);
2260 
2261  if ( visible_width <= s->width && visible_width > s->width-16
2262  && visible_height <= s->height && visible_height > s->height-16
2263  && !offset_x && (offset_y == s->height - visible_height))
2264  avcodec_set_dimensions(avctx, visible_width, visible_height);
2265  else
2266  avcodec_set_dimensions(avctx, s->width, s->height);
2267 
2268  if (colorspace == 1) {
2270  } else if (colorspace == 2) {
2272  }
2273  if (colorspace == 1 || colorspace == 2) {
2274  avctx->colorspace = AVCOL_SPC_BT470BG;
2275  avctx->color_trc = AVCOL_TRC_BT709;
2276  }
2277 
2278  return 0;
2279 }
2280 
2281 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
2282 {
2283  Vp3DecodeContext *s = avctx->priv_data;
2284  int i, n, matrices, inter, plane;
2285 
2286  if (s->theora >= 0x030200) {
2287  n = get_bits(gb, 3);
2288  /* loop filter limit values table */
2289  if (n)
2290  for (i = 0; i < 64; i++)
2291  s->filter_limit_values[i] = get_bits(gb, n);
2292  }
2293 
2294  if (s->theora >= 0x030200)
2295  n = get_bits(gb, 4) + 1;
2296  else
2297  n = 16;
2298  /* quality threshold table */
2299  for (i = 0; i < 64; i++)
2300  s->coded_ac_scale_factor[i] = get_bits(gb, n);
2301 
2302  if (s->theora >= 0x030200)
2303  n = get_bits(gb, 4) + 1;
2304  else
2305  n = 16;
2306  /* dc scale factor table */
2307  for (i = 0; i < 64; i++)
2308  s->coded_dc_scale_factor[i] = get_bits(gb, n);
2309 
2310  if (s->theora >= 0x030200)
2311  matrices = get_bits(gb, 9) + 1;
2312  else
2313  matrices = 3;
2314 
2315  if(matrices > 384){
2316  av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
2317  return -1;
2318  }
2319 
2320  for(n=0; n<matrices; n++){
2321  for (i = 0; i < 64; i++)
2322  s->base_matrix[n][i]= get_bits(gb, 8);
2323  }
2324 
2325  for (inter = 0; inter <= 1; inter++) {
2326  for (plane = 0; plane <= 2; plane++) {
2327  int newqr= 1;
2328  if (inter || plane > 0)
2329  newqr = get_bits1(gb);
2330  if (!newqr) {
2331  int qtj, plj;
2332  if(inter && get_bits1(gb)){
2333  qtj = 0;
2334  plj = plane;
2335  }else{
2336  qtj= (3*inter + plane - 1) / 3;
2337  plj= (plane + 2) % 3;
2338  }
2339  s->qr_count[inter][plane]= s->qr_count[qtj][plj];
2340  memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj], sizeof(s->qr_size[0][0]));
2341  memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj], sizeof(s->qr_base[0][0]));
2342  } else {
2343  int qri= 0;
2344  int qi = 0;
2345 
2346  for(;;){
2347  i= get_bits(gb, av_log2(matrices-1)+1);
2348  if(i>= matrices){
2349  av_log(avctx, AV_LOG_ERROR, "invalid base matrix index\n");
2350  return -1;
2351  }
2352  s->qr_base[inter][plane][qri]= i;
2353  if(qi >= 63)
2354  break;
2355  i = get_bits(gb, av_log2(63-qi)+1) + 1;
2356  s->qr_size[inter][plane][qri++]= i;
2357  qi += i;
2358  }
2359 
2360  if (qi > 63) {
2361  av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
2362  return -1;
2363  }
2364  s->qr_count[inter][plane]= qri;
2365  }
2366  }
2367  }
2368 
2369  /* Huffman tables */
2370  for (s->hti = 0; s->hti < 80; s->hti++) {
2371  s->entries = 0;
2372  s->huff_code_size = 1;
2373  if (!get_bits1(gb)) {
2374  s->hbits = 0;
2375  if(read_huffman_tree(avctx, gb))
2376  return -1;
2377  s->hbits = 1;
2378  if(read_huffman_tree(avctx, gb))
2379  return -1;
2380  }
2381  }
2382 
2383  s->theora_tables = 1;
2384 
2385  return 0;
2386 }
2387 
2388 static av_cold int theora_decode_init(AVCodecContext *avctx)
2389 {
2390  Vp3DecodeContext *s = avctx->priv_data;
2391  GetBitContext gb;
2392  int ptype;
2393  uint8_t *header_start[3];
2394  int header_len[3];
2395  int i;
2396 
2397  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2398 
2399  s->theora = 1;
2400 
2401  if (!avctx->extradata_size)
2402  {
2403  av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
2404  return -1;
2405  }
2406 
2408  42, header_start, header_len) < 0) {
2409  av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
2410  return -1;
2411  }
2412 
2413  for(i=0;i<3;i++) {
2414  if (header_len[i] <= 0)
2415  continue;
2416  init_get_bits(&gb, header_start[i], header_len[i] * 8);
2417 
2418  ptype = get_bits(&gb, 8);
2419 
2420  if (!(ptype & 0x80))
2421  {
2422  av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
2423 // return -1;
2424  }
2425 
2426  // FIXME: Check for this as well.
2427  skip_bits_long(&gb, 6*8); /* "theora" */
2428 
2429  switch(ptype)
2430  {
2431  case 0x80:
2432  if (theora_decode_header(avctx, &gb) < 0)
2433  return -1;
2434  break;
2435  case 0x81:
2436 // FIXME: is this needed? it breaks sometimes
2437 // theora_decode_comments(avctx, gb);
2438  break;
2439  case 0x82:
2440  if (theora_decode_tables(avctx, &gb))
2441  return -1;
2442  break;
2443  default:
2444  av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80);
2445  break;
2446  }
2447  if(ptype != 0x81 && 8*header_len[i] != get_bits_count(&gb))
2448  av_log(avctx, AV_LOG_WARNING, "%d bits left in packet %X\n", 8*header_len[i] - get_bits_count(&gb), ptype);
2449  if (s->theora < 0x030200)
2450  break;
2451  }
2452 
2453  return vp3_decode_init(avctx);
2454 }
2455 
2456 AVCodec ff_theora_decoder = {
2457  .name = "theora",
2458  .type = AVMEDIA_TYPE_VIDEO,
2459  .id = AV_CODEC_ID_THEORA,
2460  .priv_data_size = sizeof(Vp3DecodeContext),
2461  .init = theora_decode_init,
2462  .close = vp3_decode_end,
2464  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
2467  .long_name = NULL_IF_CONFIG_SMALL("Theora"),
2468  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
2469  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context)
2470 };
2471 #endif
2472 
2474  .name = "vp3",
2475  .type = AVMEDIA_TYPE_VIDEO,
2476  .id = AV_CODEC_ID_VP3,
2477  .priv_data_size = sizeof(Vp3DecodeContext),
2478  .init = vp3_decode_init,
2479  .close = vp3_decode_end,
2481  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
2484  .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"),
2485  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
2486  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
2487 };
#define BLOCK_Y
static const int16_t vp31_intra_y_dequant[64]
Definition: vp3data.h:29
int last_slice_end
Definition: vp3.c:150
uint8_t idct_scantable[64]
Definition: vp3.c:144
discard all frames except keyframes
const char * s
Definition: avisynth_c.h:668
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AV_NUM_DATA_POINTERS
Definition: frame.h:77
static int init_block_mapping(Vp3DecodeContext *s)
This function sets up all of the various blocks mappings: superblocks <-> fragments, macroblocks <-> fragments, superblocks <-> macroblocks.
Definition: vp3.c:327
#define SB_NOT_CODED
Definition: vp3.c:57
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
#define TOKEN_EOB(eob_run)
Definition: vp3.c:209
static void render_slice(Vp3DecodeContext *s, int slice)
Definition: vp3.c:1463
#define PUR
int y_superblock_count
Definition: vp3.c:160
void(* put_no_rnd_pixels_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, ptrdiff_t stride, int h)
Copy 8xH pixels from source to destination buffer using a bilinear filter with no rounding (i...
Definition: vp3dsp.h:36
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:73
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
uint16_t qr_base[2][3][64]
Definition: vp3.c:188
AVFrame * f
Definition: thread.h:36
else temp
Definition: vf_mcdeint.c:148
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:198
VLC mode_code_vlc
Definition: vp3.c:231
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
int y_superblock_width
Definition: vp3.c:158
static const uint16_t fragment_run_length_vlc_table[30][2]
Definition: vp3data.h:119
HpelDSPContext hdsp
Definition: vp3.c:145
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:154
#define MODE_INTER_PLUS_MV
Definition: vp3.c:68
int num
numerator
Definition: rational.h:44
void avcodec_set_dimensions(AVCodecContext *s, int width, int height)
static av_cold int init_frames(Vp3DecodeContext *s)
Definition: vp3.c:1658
int u_superblock_start
Definition: vp3.c:164
#define BLOCK_X
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:59
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:582
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
uint8_t coding_method
Definition: vp3.c:53
static av_cold int vp3_decode_init(AVCodecContext *avctx)
Definition: vp3.c:1674
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:428
#define VLC_TYPE
Definition: get_bits.h:61
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
Definition: vp3.c:1139
VLC ac_vlc_4[16]
Definition: vp3.c:227
VLC motion_vector_vlc
Definition: vp3.c:232
static av_cold int vp3_decode_end(AVCodecContext *avctx)
Definition: vp3.c:277
int huff_code_size
Definition: vp3.c:254
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
int * superblock_fragments
Definition: vp3.c:242
VLC superblock_run_length_vlc
Definition: vp3.c:229
int stride
Definition: mace.c:144
static const uint32_t vp31_ac_scale_factor[64]
Definition: vp3data.h:76
#define MAXIMUM_LONG_BIT_RUN
Definition: vp3.c:64
static const int motion_vector_table[63]
Definition: vp3data.h:179
static const uint16_t ac_bias_3[16][32][2]
Definition: vp3data.h:2634
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, &#39;draw_horiz_band&#39; is called by the libavcodec decoder to draw a horizontal band...
static const uint16_t dc_bias[16][32][2]
Definition: vp3data.h:446
Vp3Fragment * all_fragments
Definition: vp3.c:176
#define FFALIGN(x, a)
Definition: common.h:63
static void init_loop_filter(Vp3DecodeContext *s)
Definition: vp3.c:399
#define COMPATIBLE_FRAME(x)
Definition: vp3.c:1135
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
set threshold d
initialize output if(nPeaks >3)%at least 3 peaks in spectrum for trying to find f0 nf0peaks
void(* idct_add)(uint8_t *dest, int line_size, int16_t *block)
Definition: vp3dsp.h:42
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int y_superblock_height
Definition: vp3.c:159
uint8_t
#define av_cold
Definition: attributes.h:78
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:688
VLC ac_vlc_1[16]
Definition: vp3.c:224
#define TOKEN_ZERO_RUN(coeff, zero_run)
Definition: vp3.c:210
static int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, int plane, int inter, int16_t block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
Definition: vp3.c:1356
unsigned int hbits
Definition: vp3.c:252
end end
int macroblock_width
Definition: vp3.c:169
uint8_t idct_permutation[64]
Definition: vp3.c:143
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
Definition: vp3.c:358
#define emms_c()
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
uint8_t qpi
Definition: vp3.c:54
static void vp3_decode_flush(AVCodecContext *avctx)
Definition: vp3.c:265
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define DC_COEFF(u)
Definition: vp3.c:1137
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:142
mpeg1, jpeg, h263
uint8_t * data
uint8_t filter_limit_values[64]
Definition: vp3.c:257
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:193
bitstream reader API header.
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
VLC ac_vlc_2[16]
Definition: vp3.c:225
static const uint8_t mode_code_vlc_table[8][2]
Definition: vp3data.h:144
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
#define MODE_INTRA
Definition: vp3.c:67
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1043
static const int16_t vp31_inter_dequant[64]
Definition: vp3data.h:54
static const uint16_t ac_bias_1[16][32][2]
Definition: vp3data.h:1540
int height
Definition: vp3.c:137
Discrete Time axis x
#define U(x)
static int ref_frames(Vp3DecodeContext *dst, Vp3DecodeContext *src)
Definition: vp3.c:1885
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:557
static int vp3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp3.c:1949
static const uint8_t motion_vector_vlc_table[63][2]
Definition: vp3data.h:151
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, int dst_pitch, int dst_height)
Convert and output the current plane.
Definition: indeo3.c:1023
VP3DSPContext vp3dsp
Definition: vp3.c:147
int c_superblock_width
Definition: vp3.c:161
Multithreading support functions.
uint8_t qr_count[2][3]
Definition: vp3.c:186
struct Vp3DecodeContext Vp3DecodeContext
int fragment_height[2]
Definition: vp3.c:174
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
#define T(x)
VLC ac_vlc_3[16]
Definition: vp3.c:226
#define CODING_MODE_COUNT
Definition: vp3.c:74
static const struct endianess table[]
static const int zero_run_base[32]
Definition: vp3data.h:208
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int active_thread_type
Which multithreading methods are in use by the codec.
static const int8_t fixed_motion_vector_table[64]
Definition: vp3data.h:189
Spectrum Plot time data
int flags
CODEC_FLAG_*.
void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:37
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
int theora
Definition: vp3.c:135
const char * name
Name of the codec implementation.
static const uint8_t offset[127][2]
Definition: vf_spp.c:70
#define FFMAX(a, b)
Definition: common.h:56
external API header
int qps[3]
Definition: vp3.c:153
static const int ModeAlphabet[6][CODING_MODE_COUNT]
Definition: vp3.c:84
static const int16_t vp31_intra_c_dequant[64]
Definition: vp3data.h:42
Definition: get_bits.h:63
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:72
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:37
static const int coeff_get_bits[32]
Definition: vp3data.h:223
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:36
also ITU-R BT1361
static const int16_t *const coeff_tables[32]
Definition: vp3data.h:408
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
Definition: vp3dsp.h:41
int chroma_y_shift
Definition: vp3.c:138
int flipped_image
Definition: vp3.c:149
unsigned char * macroblock_coding
Definition: vp3.c:246
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:231
Half-pel DSP context.
Definition: hpeldsp.h:45
int fragment_width[2]
Definition: vp3.c:173
#define SET_CHROMA_MODES
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
#define CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
#define FFMIN(a, b)
Definition: common.h:58
VLC fragment_run_length_vlc
Definition: vp3.c:230
#define PU
int macroblock_height
Definition: vp3.c:170
ret
Definition: avfilter.c:821
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
int width
picture width / height.
#define SB_PARTIALLY_CODED
Definition: vp3.c:58
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, VLC *table, int coeff_index, int plane, int eob_run)
Definition: vp3.c:919
uint8_t * edge_emu_buffer
Definition: vp3.c:248
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
perm
Definition: f_perms.c:74
#define MODE_COPY
Definition: vp3.c:77
#define FFABS(a)
Definition: common.h:53
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:524
#define CONFIG_GRAY
Definition: config.h:376
static const uint16_t ac_bias_2[16][32][2]
Definition: vp3data.h:2087
float u
static const uint8_t hilbert_offset[16][2]
Definition: vp3.c:124
int macroblock_count
Definition: vp3.c:168
void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: imgconvert.c:65
int c_superblock_height
Definition: vp3.c:162
int total_num_coded_frags
Definition: vp3.c:217
static void flush(AVCodecContext *avctx)
int c_superblock_count
Definition: vp3.c:163
AVCodec ff_vp3_decoder
Definition: vp3.c:2473
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
for k
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1289
NULL
Definition: eval.c:55
Half-pel DSP functions.
static int width
Definition: tests/utils.c:158
int superblock_count
Definition: vp3.c:157
AVS_Value src
Definition: avisynth_c.h:523
int entries
Definition: vp3.c:253
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an so the codec calls ff_thread_report set AVCodecInternal allocate_progress The frames must then be freed with ff_thread_release_buffer().Otherwise leave it at zero and decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
static const uint16_t ac_bias_0[16][32][2]
Definition: vp3data.h:993
enum AVCodecID codec_id
void(* h_loop_filter)(uint8_t *src, int stride, int *bounding_values)
Definition: vp3dsp.h:45
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
Definition: vp3.c:207
int skip_loop_filter
Definition: vp3.c:151
ThreadFrame current_frame
Definition: vp3.c:141
main external API structure.
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:375
#define RSHIFT(a, b)
Definition: common.h:48
int last_qps[3]
Definition: vp3.c:155
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
uint8_t qr_size[2][3][64]
Definition: vp3.c:187
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
Definition: get_bits.h:426
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
#define PUL
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
Definition: vp3.c:1627
int data_offset[3]
Definition: vp3.c:178
void * buf
Definition: avisynth_c.h:594
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:273
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
#define FF_THREAD_FRAME
Decode more than one frame at once.
double value
Definition: eval.c:82
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:80
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:265
#define SB_FULLY_CODED
Definition: vp3.c:59
synthesis window for stochastic i
enum AVColorSpace colorspace
YUV colorspace type.
rational number numerator/denominator
Definition: rational.h:43
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:379
int avpriv_split_xiph_headers(uint8_t *extradata, int extradata_size, int first_header_size, uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use. ...
Definition: xiph.c:24
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
Definition: vp3.c:216
int keyframe
Definition: vp3.c:142
static const double coeff[2][5]
Definition: vf_ow.c:64
#define TOKEN_COEFF(coeff)
Definition: vp3.c:211
#define s1
Definition: regdef.h:38
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:330
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread.c:1066
#define MODE_GOLDEN_MV
Definition: vp3.c:72
int allocate_progress
Whether to allocate progress for frame threading.
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:306
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
#define FRAGMENT_PIXELS
Definition: vp3.c:48
static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
Definition: vp3.c:2134
static int update_frames(AVCodecContext *avctx)
Release and shuffle frames after decode finishes.
Definition: vp3.c:1855
#define type
static const uint16_t superblock_run_length_vlc_table[34][2]
Definition: vp3data.h:98
#define MODE_USING_GOLDEN
Definition: vp3.c:71
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:95
uint32_t huffman_table[80][32][2]
Definition: vp3.c:255
#define MODE_INTER_FOURMV
Definition: vp3.c:73
int av_frame_ref(AVFrame *dst, AVFrame *src)
Setup a new reference to the data described by an given frame.
Definition: frame.c:228
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
#define FF_DEBUG_PICT_INFO
#define copy_fields(to, from, start_field, end_field)
int v_superblock_start
Definition: vp3.c:165
int version
Definition: vp3.c:136
int * coded_fragment_list[3]
Definition: vp3.c:221
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
unsigned char * superblock_coding
Definition: vp3.c:166
common internal api header.
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:115
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
ThreadFrame last_frame
Definition: vp3.c:140
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
int16_t * dct_tokens_base
Definition: vp3.c:208
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
Definition: vp3.c:1877
AVCodecContext * avctx
Definition: vp3.c:134
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
VideoDSPContext vdsp
Definition: vp3.c:146
struct Vp3Fragment Vp3Fragment
static const int eob_run_get_bits[7]
Definition: vp3data.h:204
static int vp3_init_thread_copy(AVCodecContext *avctx)
Definition: vp3.c:2169
static const int16_t vp31_dc_scale_factor[64]
Definition: vp3data.h:65
uint16_t coded_dc_scale_factor[64]
Definition: vp3.c:183
int den
denominator
Definition: rational.h:45
function y
Definition: D.m:1
Core video DSP helper functions.
uint8_t base_matrix[384][64]
Definition: vp3.c:185
DSP utils.
int fragment_count
Definition: vp3.c:172
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:58
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:865
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread.c:705
static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
Definition: vp3.c:1441
#define av_log2
Definition: intmath.h:89
struct AVCodecInternal * internal
Private context used for internal data.
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
VLC_TYPE(* table)[2]
code, bits
Definition: get_bits.h:65
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:139
#define HAVE_THREADS
Definition: config.h:274
#define MODE_INTER_PRIOR_LAST
Definition: vp3.c:70
void(* v_loop_filter)(uint8_t *src, int stride, int *bounding_values)
Definition: vp3dsp.h:44
#define MODE_INTER_NO_MV
Definition: vp3.c:66
static const int eob_run_base[7]
Definition: vp3data.h:201
int fragment_start[3]
Definition: vp3.c:177
int theora_tables
Definition: vp3.c:135
#define AV_LOG_INFO
Definition: log.h:156
void(* idct_dc_add)(uint8_t *dest, int line_size, int16_t *block)
Definition: vp3dsp.h:43
static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: vp3.c:1895
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
#define MODE_INTER_LAST_MV
Definition: vp3.c:69
ThreadFrame golden_frame
Definition: vp3.c:139
int chroma_x_shift
Definition: vp3.c:138
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
enum AVColorSpace colorspace
Definition: dirac.c:98
static const int zero_run_get_bits[32]
Definition: vp3data.h:215
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
Definition: vp3dsp.c:280
static const uint8_t vp31_filter_limit_values[64]
Definition: vp3data.h:87
#define MKTAG(a, b, c, d)
Definition: common.h:282
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
This structure stores compressed data.
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
Definition: vp3.c:1401
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:344
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
int16_t dc
Definition: vp3.c:52
uint32_t coded_ac_scale_factor[64]
Definition: vp3.c:184
Predicted.
Definition: avutil.h:217
VLC dc_vlc[16]
Definition: vp3.c:223
#define PL
int8_t(*[2] motion_val)[2]
Definition: vp3.c:180