svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 #include "internal.h"
43 #include "avcodec.h"
44 #include "mpegvideo.h"
45 #include "h264.h"
46 
47 #include "h264data.h" // FIXME FIXME FIXME
48 
49 #include "h264_mvpred.h"
50 #include "golomb.h"
51 #include "hpeldsp.h"
52 #include "rectangle.h"
53 #include "vdpau_internal.h"
54 
55 #if CONFIG_ZLIB
56 #include <zlib.h>
57 #endif
58 
59 #include "svq1.h"
60 #include "svq3.h"
61 
62 /**
63  * @file
64  * svq3 decoder.
65  */
66 
67 typedef struct {
77  uint32_t watermark_key;
79  int buf_size;
85 } SVQ3Context;
86 
87 #define FULLPEL_MODE 1
88 #define HALFPEL_MODE 2
89 #define THIRDPEL_MODE 3
90 #define PREDICT_MODE 4
91 
92 /* dual scan (from some older h264 draft)
93  * o-->o-->o o
94  * | /|
95  * o o o / o
96  * | / | |/ |
97  * o o o o
98  * /
99  * o-->o-->o-->o
100  */
101 static const uint8_t svq3_scan[16] = {
102  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
103  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
104  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
105  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
106 };
107 
108 static const uint8_t svq3_pred_0[25][2] = {
109  { 0, 0 },
110  { 1, 0 }, { 0, 1 },
111  { 0, 2 }, { 1, 1 }, { 2, 0 },
112  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
113  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
114  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
115  { 2, 4 }, { 3, 3 }, { 4, 2 },
116  { 4, 3 }, { 3, 4 },
117  { 4, 4 }
118 };
119 
120 static const int8_t svq3_pred_1[6][6][5] = {
121  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
122  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
123  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
124  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
125  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
126  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
127  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
128  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
129  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
130  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
131  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
132  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
133 };
134 
135 static const struct {
138 } svq3_dct_tables[2][16] = {
139  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
140  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
141  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
142  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
143 };
144 
145 static const uint32_t svq3_dequant_coeff[32] = {
146  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
147  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
148  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
149  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
150 };
151 
152 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
153 {
154  const int qmul = svq3_dequant_coeff[qp];
155 #define stride 16
156  int i;
157  int temp[16];
158  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
159 
160  for (i = 0; i < 4; i++) {
161  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
162  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
163  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
164  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
165 
166  temp[4 * i + 0] = z0 + z3;
167  temp[4 * i + 1] = z1 + z2;
168  temp[4 * i + 2] = z1 - z2;
169  temp[4 * i + 3] = z0 - z3;
170  }
171 
172  for (i = 0; i < 4; i++) {
173  const int offset = x_offset[i];
174  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
175  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
176  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
177  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
178 
179  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
180  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
181  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
182  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
183  }
184 }
185 #undef stride
186 
187 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
188  int stride, int qp, int dc)
189 {
190  const int qmul = svq3_dequant_coeff[qp];
191  int i;
192 
193  if (dc) {
194  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
195  : qmul * (block[0] >> 3) / 2);
196  block[0] = 0;
197  }
198 
199  for (i = 0; i < 4; i++) {
200  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
201  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
202  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
203  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
204 
205  block[0 + 4 * i] = z0 + z3;
206  block[1 + 4 * i] = z1 + z2;
207  block[2 + 4 * i] = z1 - z2;
208  block[3 + 4 * i] = z0 - z3;
209  }
210 
211  for (i = 0; i < 4; i++) {
212  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
213  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
214  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
215  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
216  const int rr = (dc + 0x80000);
217 
218  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
219  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
220  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
221  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
222  }
223 
224  memset(block, 0, 16 * sizeof(int16_t));
225 }
226 
227 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
228  int index, const int type)
229 {
230  static const uint8_t *const scan_patterns[4] =
232 
233  int run, level, sign, limit;
234  unsigned vlc;
235  const int intra = 3 * type >> 2;
236  const uint8_t *const scan = scan_patterns[type];
237 
238  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
239  for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
240  if ((int32_t)vlc < 0)
241  return -1;
242 
243  sign = (vlc & 1) ? 0 : -1;
244  vlc = vlc + 1 >> 1;
245 
246  if (type == 3) {
247  if (vlc < 3) {
248  run = 0;
249  level = vlc;
250  } else if (vlc < 4) {
251  run = 1;
252  level = 1;
253  } else {
254  run = vlc & 0x3;
255  level = (vlc + 9 >> 2) - run;
256  }
257  } else {
258  if (vlc < 16U) {
259  run = svq3_dct_tables[intra][vlc].run;
260  level = svq3_dct_tables[intra][vlc].level;
261  } else if (intra) {
262  run = vlc & 0x7;
263  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
264  } else {
265  run = vlc & 0xF;
266  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
267  }
268  }
269 
270 
271  if ((index += run) >= limit)
272  return -1;
273 
274  block[scan[index]] = (level ^ sign) - sign;
275  }
276 
277  if (type != 2) {
278  break;
279  }
280  }
281 
282  return 0;
283 }
284 
285 static inline void svq3_mc_dir_part(SVQ3Context *s,
286  int x, int y, int width, int height,
287  int mx, int my, int dxy,
288  int thirdpel, int dir, int avg)
289 {
290  H264Context *h = &s->h;
291  const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
292  uint8_t *src, *dest;
293  int i, emu = 0;
294  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
295 
296  mx += x;
297  my += y;
298 
299  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
300  my < 0 || my >= s->v_edge_pos - height - 1) {
301  emu = 1;
302  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
303  my = av_clip(my, -16, s->v_edge_pos - height + 15);
304  }
305 
306  /* form component predictions */
307  dest = h->cur_pic.f.data[0] + x + y * h->linesize;
308  src = pic->f.data[0] + mx + my * h->linesize;
309 
310  if (emu) {
312  width + 1, height + 1,
313  mx, my, s->h_edge_pos, s->v_edge_pos);
314  src = h->edge_emu_buffer;
315  }
316  if (thirdpel)
317  (avg ? h->dsp.avg_tpel_pixels_tab
318  : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
319  width, height);
320  else
321  (avg ? s->hdsp.avg_pixels_tab
322  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
323  height);
324 
325  if (!(h->flags & CODEC_FLAG_GRAY)) {
326  mx = mx + (mx < (int) x) >> 1;
327  my = my + (my < (int) y) >> 1;
328  width = width >> 1;
329  height = height >> 1;
330  blocksize++;
331 
332  for (i = 1; i < 3; i++) {
333  dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
334  src = pic->f.data[i] + mx + my * h->uvlinesize;
335 
336  if (emu) {
338  width + 1, height + 1,
339  mx, my, (s->h_edge_pos >> 1),
340  s->v_edge_pos >> 1);
341  src = h->edge_emu_buffer;
342  }
343  if (thirdpel)
344  (avg ? h->dsp.avg_tpel_pixels_tab
345  : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
346  h->uvlinesize,
347  width, height);
348  else
349  (avg ? s->hdsp.avg_pixels_tab
350  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
351  h->uvlinesize,
352  height);
353  }
354  }
355 }
356 
357 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
358  int dir, int avg)
359 {
360  int i, j, k, mx, my, dx, dy, x, y;
361  H264Context *h = &s->h;
362  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
363  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
364  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
365  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
366  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
367 
368  for (i = 0; i < 16; i += part_height)
369  for (j = 0; j < 16; j += part_width) {
370  const int b_xy = (4 * h->mb_x + (j >> 2)) +
371  (4 * h->mb_y + (i >> 2)) * h->b_stride;
372  int dxy;
373  x = 16 * h->mb_x + j;
374  y = 16 * h->mb_y + i;
375  k = (j >> 2 & 1) + (i >> 1 & 2) +
376  (j >> 1 & 4) + (i & 8);
377 
378  if (mode != PREDICT_MODE) {
379  pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
380  } else {
381  mx = s->next_pic->motion_val[0][b_xy][0] << 1;
382  my = s->next_pic->motion_val[0][b_xy][1] << 1;
383 
384  if (dir == 0) {
385  mx = mx * h->frame_num_offset /
386  h->prev_frame_num_offset + 1 >> 1;
387  my = my * h->frame_num_offset /
388  h->prev_frame_num_offset + 1 >> 1;
389  } else {
390  mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
391  h->prev_frame_num_offset + 1 >> 1;
392  my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
393  h->prev_frame_num_offset + 1 >> 1;
394  }
395  }
396 
397  /* clip motion vector prediction to frame border */
398  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
399  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
400 
401  /* get (optional) motion vector differential */
402  if (mode == PREDICT_MODE) {
403  dx = dy = 0;
404  } else {
405  dy = svq3_get_se_golomb(&h->gb);
406  dx = svq3_get_se_golomb(&h->gb);
407 
408  if (dx == INVALID_VLC || dy == INVALID_VLC) {
409  av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
410  return -1;
411  }
412  }
413 
414  /* compute motion vector */
415  if (mode == THIRDPEL_MODE) {
416  int fx, fy;
417  mx = (mx + 1 >> 1) + dx;
418  my = (my + 1 >> 1) + dy;
419  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
420  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
421  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
422 
423  svq3_mc_dir_part(s, x, y, part_width, part_height,
424  fx, fy, dxy, 1, dir, avg);
425  mx += mx;
426  my += my;
427  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
428  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
429  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
430  dxy = (mx & 1) + 2 * (my & 1);
431 
432  svq3_mc_dir_part(s, x, y, part_width, part_height,
433  mx >> 1, my >> 1, dxy, 0, dir, avg);
434  mx *= 3;
435  my *= 3;
436  } else {
437  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
438  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
439 
440  svq3_mc_dir_part(s, x, y, part_width, part_height,
441  mx, my, 0, 0, dir, avg);
442  mx *= 6;
443  my *= 6;
444  }
445 
446  /* update mv_cache */
447  if (mode != PREDICT_MODE) {
448  int32_t mv = pack16to32(mx, my);
449 
450  if (part_height == 8 && i < 8) {
451  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
452 
453  if (part_width == 8 && j < 8)
454  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
455  }
456  if (part_width == 8 && j < 8)
457  AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
458  if (part_width == 4 || part_height == 4)
459  AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
460  }
461 
462  /* write back motion vectors */
463  fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
464  part_width >> 2, part_height >> 2, h->b_stride,
465  pack16to32(mx, my), 4);
466  }
467 
468  return 0;
469 }
470 
471 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
472 {
473  H264Context *h = &s->h;
474  int i, j, k, m, dir, mode;
475  int cbp = 0;
476  uint32_t vlc;
477  int8_t *top, *left;
478  const int mb_xy = h->mb_xy;
479  const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
480 
481  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
482  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
483  h->topright_samples_available = 0xFFFF;
484 
485  if (mb_type == 0) { /* SKIP */
486  if (h->pict_type == AV_PICTURE_TYPE_P ||
487  s->next_pic->mb_type[mb_xy] == -1) {
488  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
489  0, 0, 0, 0, 0, 0);
490 
491  if (h->pict_type == AV_PICTURE_TYPE_B)
492  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
493  0, 0, 0, 0, 1, 1);
494 
495  mb_type = MB_TYPE_SKIP;
496  } else {
497  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
498  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
499  return -1;
500  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
501  return -1;
502 
503  mb_type = MB_TYPE_16x16;
504  }
505  } else if (mb_type < 8) { /* INTER */
506  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
507  mode = THIRDPEL_MODE;
508  else if (s->halfpel_flag &&
509  s->thirdpel_flag == !get_bits1(&h->gb))
510  mode = HALFPEL_MODE;
511  else
512  mode = FULLPEL_MODE;
513 
514  /* fill caches */
515  /* note ref_cache should contain here:
516  * ????????
517  * ???11111
518  * N??11111
519  * N??11111
520  * N??11111
521  */
522 
523  for (m = 0; m < 2; m++) {
524  if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
525  for (i = 0; i < 4; i++)
526  AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
527  h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
528  } else {
529  for (i = 0; i < 4; i++)
530  AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
531  }
532  if (h->mb_y > 0) {
533  memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
534  h->cur_pic.motion_val[m][b_xy - h->b_stride],
535  4 * 2 * sizeof(int16_t));
536  memset(&h->ref_cache[m][scan8[0] - 1 * 8],
537  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
538 
539  if (h->mb_x < h->mb_width - 1) {
540  AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
541  h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
542  h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
543  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
544  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
545  } else
546  h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
547  if (h->mb_x > 0) {
548  AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
549  h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
550  h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
551  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
552  } else
553  h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
554  } else
555  memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
556  PART_NOT_AVAILABLE, 8);
557 
558  if (h->pict_type != AV_PICTURE_TYPE_B)
559  break;
560  }
561 
562  /* decode motion vector(s) and form prediction(s) */
563  if (h->pict_type == AV_PICTURE_TYPE_P) {
564  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
565  return -1;
566  } else { /* AV_PICTURE_TYPE_B */
567  if (mb_type != 2) {
568  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
569  return -1;
570  } else {
571  for (i = 0; i < 4; i++)
572  memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
573  0, 4 * 2 * sizeof(int16_t));
574  }
575  if (mb_type != 1) {
576  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
577  return -1;
578  } else {
579  for (i = 0; i < 4; i++)
580  memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
581  0, 4 * 2 * sizeof(int16_t));
582  }
583  }
584 
585  mb_type = MB_TYPE_16x16;
586  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
587  memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
588 
589  if (mb_type == 8) {
590  if (h->mb_x > 0) {
591  for (i = 0; i < 4; i++)
592  h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
593  if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
594  h->left_samples_available = 0x5F5F;
595  }
596  if (h->mb_y > 0) {
597  h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
598  h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
599  h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
600  h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
601 
602  if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
603  h->top_samples_available = 0x33FF;
604  }
605 
606  /* decode prediction codes for luma blocks */
607  for (i = 0; i < 16; i += 2) {
608  vlc = svq3_get_ue_golomb(&h->gb);
609 
610  if (vlc >= 25U) {
611  av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
612  return -1;
613  }
614 
615  left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
616  top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
617 
618  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
619  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
620 
621  if (left[1] == -1 || left[2] == -1) {
622  av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
623  return -1;
624  }
625  }
626  } else { /* mb_type == 33, DC_128_PRED block type */
627  for (i = 0; i < 4; i++)
628  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
629  }
630 
632 
633  if (mb_type == 8) {
635 
636  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
637  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
638  } else {
639  for (i = 0; i < 4; i++)
640  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
641 
642  h->top_samples_available = 0x33FF;
643  h->left_samples_available = 0x5F5F;
644  }
645 
646  mb_type = MB_TYPE_INTRA4x4;
647  } else { /* INTRA16x16 */
648  dir = i_mb_type_info[mb_type - 8].pred_mode;
649  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
650 
651  if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1) {
652  av_log(h->avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
653  return -1;
654  }
655 
656  cbp = i_mb_type_info[mb_type - 8].cbp;
657  mb_type = MB_TYPE_INTRA16x16;
658  }
659 
660  if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
661  for (i = 0; i < 4; i++)
662  memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
663  0, 4 * 2 * sizeof(int16_t));
664  if (h->pict_type == AV_PICTURE_TYPE_B) {
665  for (i = 0; i < 4; i++)
666  memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
667  0, 4 * 2 * sizeof(int16_t));
668  }
669  }
670  if (!IS_INTRA4x4(mb_type)) {
671  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
672  }
673  if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
674  memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
675  }
676 
677  if (!IS_INTRA16x16(mb_type) &&
678  (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
679  if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
680  av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
681  return -1;
682  }
683 
684  cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
685  : golomb_to_inter_cbp[vlc];
686  }
687  if (IS_INTRA16x16(mb_type) ||
688  (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
689  h->qscale += svq3_get_se_golomb(&h->gb);
690 
691  if (h->qscale > 31u) {
692  av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
693  return -1;
694  }
695  }
696  if (IS_INTRA16x16(mb_type)) {
697  AV_ZERO128(h->mb_luma_dc[0] + 0);
698  AV_ZERO128(h->mb_luma_dc[0] + 8);
699  if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
701  "error while decoding intra luma dc\n");
702  return -1;
703  }
704  }
705 
706  if (cbp) {
707  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
708  const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
709 
710  for (i = 0; i < 4; i++)
711  if ((cbp & (1 << i))) {
712  for (j = 0; j < 4; j++) {
713  k = index ? (1 * (j & 1) + 2 * (i & 1) +
714  2 * (j & 2) + 4 * (i & 2))
715  : (4 * i + j);
716  h->non_zero_count_cache[scan8[k]] = 1;
717 
718  if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
720  "error while decoding block\n");
721  return -1;
722  }
723  }
724  }
725 
726  if ((cbp & 0x30)) {
727  for (i = 1; i < 3; ++i)
728  if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
730  "error while decoding chroma dc block\n");
731  return -1;
732  }
733 
734  if ((cbp & 0x20)) {
735  for (i = 1; i < 3; i++) {
736  for (j = 0; j < 4; j++) {
737  k = 16 * i + j;
738  h->non_zero_count_cache[scan8[k]] = 1;
739 
740  if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
742  "error while decoding chroma ac block\n");
743  return -1;
744  }
745  }
746  }
747  }
748  }
749  }
750 
751  h->cbp = cbp;
752  h->cur_pic.mb_type[mb_xy] = mb_type;
753 
754  if (IS_INTRA(mb_type))
756 
757  return 0;
758 }
759 
761 {
762  SVQ3Context *s = avctx->priv_data;
763  H264Context *h = &s->h;
764  const int mb_xy = h->mb_xy;
765  int i, header;
766  unsigned slice_id;
767 
768  header = get_bits(&h->gb, 8);
769 
770  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
771  /* TODO: what? */
772  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
773  return -1;
774  } else {
775  int length = header >> 5 & 3;
776 
778  8 * show_bits(&h->gb, 8 * length) +
779  8 * length;
780 
781  if (s->next_slice_index > h->gb.size_in_bits) {
782  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
783  return -1;
784  }
785 
786  h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
787  skip_bits(&h->gb, 8);
788 
789  if (s->watermark_key) {
790  uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
791  AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
792  header ^ s->watermark_key);
793  }
794  if (length > 0) {
795  memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
796  &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
797  }
798  skip_bits_long(&h->gb, 0);
799  }
800 
801  if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
802  av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
803  return -1;
804  }
805 
806  h->slice_type = golomb_to_pict_type[slice_id];
807 
808  if ((header & 0x9F) == 2) {
809  i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
810  h->mb_skip_run = get_bits(&h->gb, i) -
811  (h->mb_y * h->mb_width + h->mb_x);
812  } else {
813  skip_bits1(&h->gb);
814  h->mb_skip_run = 0;
815  }
816 
817  h->slice_num = get_bits(&h->gb, 8);
818  h->qscale = get_bits(&h->gb, 5);
819  s->adaptive_quant = get_bits1(&h->gb);
820 
821  /* unknown fields */
822  skip_bits1(&h->gb);
823 
824  if (s->unknown_flag)
825  skip_bits1(&h->gb);
826 
827  skip_bits1(&h->gb);
828  skip_bits(&h->gb, 2);
829 
830  while (get_bits1(&h->gb))
831  skip_bits(&h->gb, 8);
832 
833  /* reset intra predictors and invalidate motion vector references */
834  if (h->mb_x > 0) {
835  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
836  -1, 4 * sizeof(int8_t));
837  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
838  -1, 8 * sizeof(int8_t) * h->mb_x);
839  }
840  if (h->mb_y > 0) {
841  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
842  -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
843 
844  if (h->mb_x > 0)
845  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
846  }
847 
848  return 0;
849 }
850 
852 {
853  SVQ3Context *s = avctx->priv_data;
854  H264Context *h = &s->h;
855  int m;
856  unsigned char *extradata;
857  unsigned char *extradata_end;
858  unsigned int size;
859  int marker_found = 0;
860 
861  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
862  s->last_pic = av_mallocz(sizeof(*s->last_pic));
863  s->next_pic = av_mallocz(sizeof(*s->next_pic));
864  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
865  av_freep(&s->cur_pic);
866  av_freep(&s->last_pic);
867  av_freep(&s->next_pic);
868  return AVERROR(ENOMEM);
869  }
870 
871  if (ff_h264_decode_init(avctx) < 0)
872  return -1;
873 
874  ff_hpeldsp_init(&s->hdsp, avctx->flags);
875  h->flags = avctx->flags;
876  h->is_complex = 1;
877  h->sps.chroma_format_idc = 1;
879  avctx->pix_fmt = avctx->codec->pix_fmts[0];
880 
881  h->chroma_qp[0] = h->chroma_qp[1] = 4;
882  h->chroma_x_shift = h->chroma_y_shift = 1;
883 
884  s->halfpel_flag = 1;
885  s->thirdpel_flag = 1;
886  s->unknown_flag = 0;
887 
888  /* prowl for the "SEQH" marker in the extradata */
889  extradata = (unsigned char *)avctx->extradata;
890  extradata_end = avctx->extradata + avctx->extradata_size;
891  if (extradata) {
892  for (m = 0; m + 8 < avctx->extradata_size; m++) {
893  if (!memcmp(extradata, "SEQH", 4)) {
894  marker_found = 1;
895  break;
896  }
897  extradata++;
898  }
899  }
900 
901  /* if a match was found, parse the extra data */
902  if (marker_found) {
903  GetBitContext gb;
904  int frame_size_code;
905 
906  size = AV_RB32(&extradata[4]);
907  if (size > extradata_end - extradata - 8)
908  return AVERROR_INVALIDDATA;
909  init_get_bits(&gb, extradata + 8, size * 8);
910 
911  /* 'frame size code' and optional 'width, height' */
912  frame_size_code = get_bits(&gb, 3);
913  switch (frame_size_code) {
914  case 0:
915  avctx->width = 160;
916  avctx->height = 120;
917  break;
918  case 1:
919  avctx->width = 128;
920  avctx->height = 96;
921  break;
922  case 2:
923  avctx->width = 176;
924  avctx->height = 144;
925  break;
926  case 3:
927  avctx->width = 352;
928  avctx->height = 288;
929  break;
930  case 4:
931  avctx->width = 704;
932  avctx->height = 576;
933  break;
934  case 5:
935  avctx->width = 240;
936  avctx->height = 180;
937  break;
938  case 6:
939  avctx->width = 320;
940  avctx->height = 240;
941  break;
942  case 7:
943  avctx->width = get_bits(&gb, 12);
944  avctx->height = get_bits(&gb, 12);
945  break;
946  }
947 
948  s->halfpel_flag = get_bits1(&gb);
949  s->thirdpel_flag = get_bits1(&gb);
950 
951  /* unknown fields */
952  skip_bits1(&gb);
953  skip_bits1(&gb);
954  skip_bits1(&gb);
955  skip_bits1(&gb);
956 
957  h->low_delay = get_bits1(&gb);
958 
959  /* unknown field */
960  skip_bits1(&gb);
961 
962  while (get_bits1(&gb))
963  skip_bits(&gb, 8);
964 
965  s->unknown_flag = get_bits1(&gb);
966  avctx->has_b_frames = !h->low_delay;
967  if (s->unknown_flag) {
968 #if CONFIG_ZLIB
969  unsigned watermark_width = svq3_get_ue_golomb(&gb);
970  unsigned watermark_height = svq3_get_ue_golomb(&gb);
971  int u1 = svq3_get_ue_golomb(&gb);
972  int u2 = get_bits(&gb, 8);
973  int u3 = get_bits(&gb, 2);
974  int u4 = svq3_get_ue_golomb(&gb);
975  unsigned long buf_len = watermark_width *
976  watermark_height * 4;
977  int offset = get_bits_count(&gb) + 7 >> 3;
978  uint8_t *buf;
979 
980  if (watermark_height <= 0 || (uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
981  return -1;
982 
983  buf = av_malloc(buf_len);
984  av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
985  watermark_width, watermark_height);
986  av_log(avctx, AV_LOG_DEBUG,
987  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
988  u1, u2, u3, u4, offset);
989  if (uncompress(buf, &buf_len, extradata + 8 + offset,
990  size - offset) != Z_OK) {
991  av_log(avctx, AV_LOG_ERROR,
992  "could not uncompress watermark logo\n");
993  av_free(buf);
994  return -1;
995  }
996  s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
997  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
998  av_log(avctx, AV_LOG_DEBUG,
999  "watermark key %#x\n", s->watermark_key);
1000  av_free(buf);
1001 #else
1002  av_log(avctx, AV_LOG_ERROR,
1003  "this svq3 file contains watermark which need zlib support compiled in\n");
1004  return -1;
1005 #endif
1006  }
1007  }
1008 
1009  h->width = avctx->width;
1010  h->height = avctx->height;
1011  h->mb_width = (h->width + 15) / 16;
1012  h->mb_height = (h->height + 15) / 16;
1013  h->mb_stride = h->mb_width + 1;
1014  h->mb_num = h->mb_width * h->mb_height;
1015  h->b_stride = 4 * h->mb_width;
1016  s->h_edge_pos = h->mb_width * 16;
1017  s->v_edge_pos = h->mb_height * 16;
1018 
1019  if (ff_h264_alloc_tables(h) < 0) {
1020  av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1021  return AVERROR(ENOMEM);
1022  }
1023 
1024  return 0;
1025 }
1026 
1027 static void free_picture(AVCodecContext *avctx, Picture *pic)
1028 {
1029  int i;
1030  for (i = 0; i < 2; i++) {
1031  av_buffer_unref(&pic->motion_val_buf[i]);
1032  av_buffer_unref(&pic->ref_index_buf[i]);
1033  }
1035 
1036  av_frame_unref(&pic->f);
1037 }
1038 
1039 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1040 {
1041  SVQ3Context *s = avctx->priv_data;
1042  H264Context *h = &s->h;
1043  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1044  const int mb_array_size = h->mb_stride * h->mb_height;
1045  const int b4_stride = h->mb_width * 4 + 1;
1046  const int b4_array_size = b4_stride * h->mb_height * 4;
1047  int ret;
1048 
1049  if (!pic->motion_val_buf[0]) {
1050  int i;
1051 
1052  pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1053  if (!pic->mb_type_buf)
1054  return AVERROR(ENOMEM);
1055  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1056 
1057  for (i = 0; i < 2; i++) {
1058  pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1059  pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1060  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1061  ret = AVERROR(ENOMEM);
1062  goto fail;
1063  }
1064 
1065  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1066  pic->ref_index[i] = pic->ref_index_buf[i]->data;
1067  }
1068  }
1069  pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1070 
1071  ret = ff_get_buffer(avctx, &pic->f,
1072  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1073  if (ret < 0)
1074  goto fail;
1075 
1076  if (!h->edge_emu_buffer) {
1077  h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1078  if (!h->edge_emu_buffer)
1079  return AVERROR(ENOMEM);
1080  }
1081 
1082  h->linesize = pic->f.linesize[0];
1083  h->uvlinesize = pic->f.linesize[1];
1084 
1085  return 0;
1086 fail:
1087  free_picture(avctx, pic);
1088  return ret;
1089 }
1090 
1091 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1092  int *got_frame, AVPacket *avpkt)
1093 {
1094  SVQ3Context *s = avctx->priv_data;
1095  H264Context *h = &s->h;
1096  int buf_size = avpkt->size;
1097  int left;
1098  uint8_t *buf;
1099  int ret, m, i;
1100 
1101  /* special case for last picture */
1102  if (buf_size == 0) {
1103  if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1104  ret = av_frame_ref(data, &s->next_pic->f);
1105  if (ret < 0)
1106  return ret;
1107  s->last_frame_output = 1;
1108  *got_frame = 1;
1109  }
1110  return 0;
1111  }
1112 
1113  h->mb_x = h->mb_y = h->mb_xy = 0;
1114 
1115  if (s->watermark_key) {
1116  av_fast_malloc(&s->buf, &s->buf_size,
1117  buf_size+FF_INPUT_BUFFER_PADDING_SIZE);
1118  if (!s->buf)
1119  return AVERROR(ENOMEM);
1120  memcpy(s->buf, avpkt->data, buf_size);
1121  buf = s->buf;
1122  } else {
1123  buf = avpkt->data;
1124  }
1125 
1126  init_get_bits(&h->gb, buf, 8 * buf_size);
1127 
1128  if (svq3_decode_slice_header(avctx))
1129  return -1;
1130 
1131  h->pict_type = h->slice_type;
1132 
1133  if (h->pict_type != AV_PICTURE_TYPE_B)
1134  FFSWAP(Picture*, s->next_pic, s->last_pic);
1135 
1136  av_frame_unref(&s->cur_pic->f);
1137 
1138  /* for skipping the frame */
1139  s->cur_pic->f.pict_type = h->pict_type;
1141 
1142  ret = get_buffer(avctx, s->cur_pic);
1143  if (ret < 0)
1144  return ret;
1145 
1146  h->cur_pic_ptr = s->cur_pic;
1147  av_frame_unref(&h->cur_pic.f);
1148  h->cur_pic = *s->cur_pic;
1149  ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1150  if (ret < 0)
1151  return ret;
1152 
1153  for (i = 0; i < 16; i++) {
1154  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1155  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1156  }
1157  for (i = 0; i < 16; i++) {
1158  h->block_offset[16 + i] =
1159  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1160  h->block_offset[48 + 16 + i] =
1161  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1162  }
1163 
1164  if (h->pict_type != AV_PICTURE_TYPE_I) {
1165  if (!s->last_pic->f.data[0]) {
1166  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1167  ret = get_buffer(avctx, s->last_pic);
1168  if (ret < 0)
1169  return ret;
1170  memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1171  memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1172  s->last_pic->f.linesize[1]);
1173  memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1174  s->last_pic->f.linesize[2]);
1175  }
1176 
1177  if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1178  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1179  ret = get_buffer(avctx, s->next_pic);
1180  if (ret < 0)
1181  return ret;
1182  memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1183  memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1184  s->next_pic->f.linesize[1]);
1185  memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1186  s->next_pic->f.linesize[2]);
1187  }
1188  }
1189 
1190  if (avctx->debug & FF_DEBUG_PICT_INFO)
1192  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1194  s->halfpel_flag, s->thirdpel_flag,
1195  s->adaptive_quant, h->qscale, h->slice_num);
1196 
1197  if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1199  avctx->skip_frame >= AVDISCARD_ALL)
1200  return 0;
1201 
1202  if (s->next_p_frame_damaged) {
1203  if (h->pict_type == AV_PICTURE_TYPE_B)
1204  return 0;
1205  else
1206  s->next_p_frame_damaged = 0;
1207  }
1208 
1209  if (h->pict_type == AV_PICTURE_TYPE_B) {
1211 
1212  if (h->frame_num_offset < 0)
1213  h->frame_num_offset += 256;
1214  if (h->frame_num_offset == 0 ||
1216  av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1217  return -1;
1218  }
1219  } else {
1220  h->prev_frame_num = h->frame_num;
1221  h->frame_num = h->slice_num;
1223 
1224  if (h->prev_frame_num_offset < 0)
1225  h->prev_frame_num_offset += 256;
1226  }
1227 
1228  for (m = 0; m < 2; m++) {
1229  int i;
1230  for (i = 0; i < 4; i++) {
1231  int j;
1232  for (j = -1; j < 4; j++)
1233  h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1234  if (i < 3)
1235  h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1236  }
1237  }
1238 
1239  for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1240  for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1241  unsigned mb_type;
1242  h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1243 
1244  if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1245  ((get_bits_count(&h->gb) & 7) == 0 ||
1246  show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1247  skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1248  h->gb.size_in_bits = 8 * buf_size;
1249 
1250  if (svq3_decode_slice_header(avctx))
1251  return -1;
1252 
1253  /* TODO: support s->mb_skip_run */
1254  }
1255 
1256  mb_type = svq3_get_ue_golomb(&h->gb);
1257 
1258  if (h->pict_type == AV_PICTURE_TYPE_I)
1259  mb_type += 8;
1260  else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1261  mb_type += 4;
1262  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1264  "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1265  return -1;
1266  }
1267 
1268  if (mb_type != 0 || h->cbp)
1270 
1271  if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1272  h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1273  (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1274  }
1275 
1276  ff_draw_horiz_band(avctx, NULL, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
1277  16 * h->mb_y, 16, h->picture_structure, 0, 0,
1278  h->low_delay, h->mb_height * 16, h->mb_width * 16);
1279  }
1280 
1281  left = buf_size*8 - get_bits_count(&h->gb);
1282 
1283  if (h->mb_y != h->mb_height || h->mb_x != h->mb_width) {
1284  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, h->mb_y, h->mb_x, left);
1285  //av_hex_dump(stderr, buf+buf_size-8, 8);
1286  }
1287 
1288  if (left < 0) {
1289  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1290  return -1;
1291  }
1292 
1293  if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1294  ret = av_frame_ref(data, &s->cur_pic->f);
1295  else if (s->last_pic->f.data[0])
1296  ret = av_frame_ref(data, &s->last_pic->f);
1297  if (ret < 0)
1298  return ret;
1299 
1300  /* Do not output the last pic after seeking. */
1301  if (s->last_pic->f.data[0] || h->low_delay)
1302  *got_frame = 1;
1303 
1304  if (h->pict_type != AV_PICTURE_TYPE_B) {
1305  FFSWAP(Picture*, s->cur_pic, s->next_pic);
1306  } else {
1307  av_frame_unref(&s->cur_pic->f);
1308  }
1309 
1310  return buf_size;
1311 }
1312 
1314 {
1315  SVQ3Context *s = avctx->priv_data;
1316  H264Context *h = &s->h;
1317 
1318  free_picture(avctx, s->cur_pic);
1319  free_picture(avctx, s->next_pic);
1320  free_picture(avctx, s->last_pic);
1321  av_freep(&s->cur_pic);
1322  av_freep(&s->next_pic);
1323  av_freep(&s->last_pic);
1324 
1325  av_frame_unref(&h->cur_pic.f);
1326 
1328 
1329  av_freep(&s->buf);
1330  s->buf_size = 0;
1332 
1333  return 0;
1334 }
1335 
1337  .name = "svq3",
1338  .type = AVMEDIA_TYPE_VIDEO,
1339  .id = AV_CODEC_ID_SVQ3,
1340  .priv_data_size = sizeof(SVQ3Context),
1344  .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1345  CODEC_CAP_DR1 |
1347  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1348  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1349  AV_PIX_FMT_NONE},
1350 };
int chroma_format_idc
Definition: h264.h:154
#define MB_TYPE_INTRA16x16
uint8_t pred_mode
Definition: h264data.h:155
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
const struct AVCodec * codec
#define MB_TYPE_SKIP
discard all frames except keyframes
uint8_t * edge_emu_buffer
Definition: h264.h:648
int8_t * ref_index[2]
Definition: mpegvideo.h:114
const char * s
Definition: avisynth_c.h:668
unsigned int top_samples_available
Definition: h264.h:320
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
GetBitContext gb
Definition: h264.h:268
int low_delay
Definition: h264.h:290
int mb_num
Definition: h264.h:467
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
#define IS_SKIP(a)
Definition: mpegvideo.h:140
static const struct @87 svq3_dct_tables[2][16]
Picture * last_pic
Definition: svq3.c:72
int cbp
Definition: h264.h:435
HpelDSPContext hdsp
Definition: svq3.c:69
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
uint16_t ff_svq1_packet_checksum(const uint8_t *data, const int length, int value)
Definition: svq13.c:60
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:227
else temp
Definition: vf_mcdeint.c:148
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:198
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:1455
int mb_y
Definition: h264.h:461
#define MB_TYPE_INTRA4x4
int chroma_x_shift
Definition: h264.h:284
const uint8_t * buffer
Definition: get_bits.h:55
static unsigned svq3_get_ue_golomb(GetBitContext *gb)
Definition: golomb.h:114
#define INVALID_VLC
Definition: golomb.h:37
int flags
Definition: h264.h:293
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int mb_height
Definition: h264.h:465
static void free_picture(AVCodecContext *avctx, Picture *pic)
Definition: svq3.c:1027
DSPContext dsp
Definition: h264.h:269
mpegvideo header.
int v_edge_pos
Definition: svq3.c:83
H264Context.
Definition: h264.h:260
#define IS_INTRA4x4(a)
Definition: mpegvideo.h:135
uint8_t run
Definition: svq3.c:136
#define FULLPEL_MODE
Definition: svq3.c:87
int picture_structure
Definition: h264.h:382
#define AV_WN32A(p, v)
Definition: intreadwrite.h:530
#define AV_COPY32(d, s)
Definition: intreadwrite.h:578
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264.h:828
int mb_skip_run
Definition: h264.h:464
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:357
initialize output if(nPeaks >3)%at least 3 peaks in spectrum for trying to find f0 nf0peaks
enum AVDiscard skip_frame
Skip decoding for selected frames.
static const uint8_t golomb_to_pict_type[5]
Definition: h264data.h:38
#define AV_WL32(p, darg)
Definition: intreadwrite.h:282
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
int thirdpel_flag
Definition: svq3.c:74
#define IS_INTER(a)
Definition: mpegvideo.h:139
uint8_t
#define av_cold
Definition: attributes.h:78
int prev_frame_num_offset
for POC type 2
Definition: h264.h:511
#define DC_PRED8x8
Definition: h264pred.h:68
mode
Definition: f_perms.c:27
#define PICT_FRAME
Definition: mpegvideo.h:664
window constants for m
#define AV_RB32
static int get_buffer(AVCodecContext *avctx, Picture *pic)
Definition: svq3.c:1039
int mb_xy
Definition: h264.h:468
static const uint8_t luma_dc_zigzag_scan[16]
Definition: h264data.h:69
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
int uvlinesize
Definition: h264.h:283
int height
Definition: h264.h:282
int mb_x
Definition: h264.h:461
static const IMbInfo i_mb_type_info[26]
Definition: h264data.h:159
uint8_t * data
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:193
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
int chroma_y_shift
Definition: h264.h:284
AVBufferRef * mb_type_buf
Definition: mpegvideo.h:107
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:489
int width
Definition: h264.h:282
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:471
H.264 / AVC / MPEG4 part10 codec.
Discrete Time axis x
#define U(x)
int frame_num
Definition: h264.h:507
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:473
int next_slice_index
Definition: svq3.c:76
int has_b_frames
Size of the frame reordering buffer in the decoder.
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:183
#define HALFPEL_MODE
Definition: svq3.c:88
tpel_mc_func avg_tpel_pixels_tab[11]
Definition: dsputil.h:187
void ff_h264_hl_decode_mb(H264Context *h)
Definition: h264.c:2469
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define DC_128_PRED
Definition: h264pred.h:51
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int reference
Definition: mpegvideo.h:178
Picture * next_pic
Definition: svq3.c:71
Spectrum Plot time data
int flags
CODEC_FLAG_*.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame This method is called when a frame is wanted on an output For an input
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
const char * name
Name of the codec implementation.
#define IS_INTRA(a)
Definition: mpegvideo.h:138
#define PREDICT_MODE
Definition: svq3.c:90
static const uint8_t offset[127][2]
Definition: vf_spp.c:70
external API header
int size
Sorenson Vector Quantizer #1 (SVQ1) video codec.
static const uint8_t scan8[16 *3+3]
Definition: h264.h:812
static av_always_inline void pred_motion(H264Context *const h, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: h264_mvpred.h:93
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:37
static int svq3_get_se_golomb(GetBitContext *gb)
Definition: golomb.h:207
int chroma_pred_mode
Definition: h264.h:300
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:187
useful rectangle filling function
unsigned int left_samples_available
Definition: h264.h:322
AVBufferRef * motion_val_buf[2]
Definition: mpegvideo.h:104
Half-pel DSP context.
Definition: hpeldsp.h:45
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
int frame_num_offset
for POC type 2
Definition: h264.h:510
uint32_t * mb2br_xy
Definition: h264.h:353
#define CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
#define FFMIN(a, b)
Definition: common.h:58
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:80
H264Context h
Definition: svq3.c:68
static int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1313
ret
Definition: avfilter.c:821
int width
picture width / height.
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:105
void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:152
Picture.
Definition: mpegvideo.h:97
SPS sps
current sps
Definition: h264.h:360
int size_in_bits
Definition: get_bits.h:57
int32_t
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:255
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:851
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:316
#define AV_RL32
float u
unsigned int topright_samples_available
Definition: h264.h:321
int slice_type
Definition: h264.h:374
static const uint8_t golomb_to_intra4x4_cbp[48]
Definition: h264data.h:43
void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur, Picture *last, int y, int h, int picture_structure, int first_field, int draw_edges, int low_delay, int v_edge_pos, int h_edge_pos)
Definition: mpegvideo.c:2921
int last_frame_output
Definition: svq3.c:84
#define PART_NOT_AVAILABLE
Definition: h264.h:339
int next_p_frame_damaged
Definition: svq3.c:81
Picture cur_pic
Definition: h264.h:274
static const int8_t mv[256][2]
for k
VideoDSPContext vdsp
Definition: h264.h:262
NULL
Definition: eval.c:55
Half-pel DSP functions.
AVCodec ff_svq3_decoder
Definition: svq3.c:1336
static int width
Definition: tests/utils.c:158
int mb_stride
Definition: h264.h:466
dest
Definition: start.py:60
AVCodecContext * avctx
Definition: h264.h:261
AVS_Value src
Definition: avisynth_c.h:523
H264 / AVC / MPEG4 part10 codec data table
static const uint8_t zigzag_scan[16+1]
Definition: h264data.h:55
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:512
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
main external API structure.
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:375
FIXME Range Coding of cr are mx and my are Motion Vector top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Predicton block[y][x] dc[1]
Definition: snow.txt:392
int ff_h264_check_intra4x4_pred_mode(H264Context *h)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:426
uint8_t * data
The data buffer.
Definition: buffer.h:89
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:1235
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
void * buf
Definition: avisynth_c.h:594
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:273
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:298
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:265
int index
Definition: gxfenc.c:89
synthesis window for stochastic i
static const uint8_t chroma_dc_scan[4]
Definition: h264data.h:83
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:285
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:379
#define MB_TYPE_16x16
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:108
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:330
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
#define type
int unknown_flag
Definition: svq3.c:75
uint8_t * buf
Definition: svq3.c:78
int block_offset[2 *(16 *3)]
block_offset[ 0..23] for frame macroblocks block_offset[24..47] for field macroblocks ...
Definition: h264.h:350
int av_frame_ref(AVFrame *dst, AVFrame *src)
Setup a new reference to the data described by an given frame.
Definition: frame.c:228
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:4978
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
uint8_t level
Definition: svq3.c:137
#define IS_INTRA16x16(a)
Definition: mpegvideo.h:136
#define FF_DEBUG_PICT_INFO
#define AV_ZERO128(d)
Definition: intreadwrite.h:614
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a+b+1)>>1.
Definition: dsputil.h:186
discard all non reference
int is_complex
Definition: h264.h:470
int qscale
Definition: h264.h:286
uint8_t cbp
Definition: h264data.h:156
common internal api header.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
int h_edge_pos
Definition: svq3.c:82
H.264 / AVC / MPEG4 part10 motion vector predicion.
Bi-dir predicted.
Definition: avutil.h:218
these buffered frames must be flushed immediately if a new input produces new output(Example:frame rate-doubling filter:filter_frame must(1) flush the second copy of the previous frame, if it is still there,(2) push the first copy of the incoming frame,(3) keep the second copy for later.) If the input frame is not enough to produce output
#define stride
function y
Definition: D.m:1
int chroma_qp[2]
Definition: h264.h:277
static const uint8_t golomb_to_inter_cbp[48]
Definition: h264data.h:49
static av_always_inline void write_back_intra_pred_mode(H264Context *h)
Definition: h264.h:872
int intra16x16_pred_mode
Definition: h264.h:301
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:145
#define THIRDPEL_MODE
Definition: svq3.c:89
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:58
int linesize
Definition: h264.h:283
Picture * cur_pic_ptr
Definition: h264.h:273
#define avg(d, s)
Definition: dsputil_align.c:52
#define av_log2
Definition: intmath.h:89
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:760
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:139
#define AV_ZERO32(d)
Definition: intreadwrite.h:606
int mb_width
Definition: h264.h:465
enum AVPictureType pict_type
Definition: h264.h:574
static const uint8_t svq3_scan[16]
Definition: svq3.c:101
Picture * cur_pic
Definition: svq3.c:70
struct AVFrame f
Definition: mpegvideo.h:98
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:120
int frame_number
Frame counter, set by libavcodec.
#define AV_LOG_INFO
Definition: log.h:156
uint32_t * mb_type
Definition: mpegvideo.h:108
uint32_t watermark_key
Definition: svq3.c:77
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
int8_t * intra4x4_pred_mode
Definition: h264.h:317
#define DC_PRED
Definition: h264pred.h:40
#define FFSWAP(type, a, b)
Definition: common.h:61
const char int length
Definition: avisynth_c.h:668
int buf_size
Definition: svq3.c:79
exp golomb vlc stuff
int slice_num
Definition: h264.h:372
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
This structure stores compressed data.
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1091
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
for(j=16;j >0;--j)
int b_stride
Definition: h264.h:354
Predicted.
Definition: avutil.h:217
int halfpel_flag
Definition: svq3.c:73
int adaptive_quant
Definition: svq3.c:80
AVBufferRef * ref_index_buf[2]
Definition: mpegvideo.h:113