mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/imgutils.h"
32 #include "avcodec.h"
33 #include "dsputil.h"
34 #include "h264chroma.h"
35 #include "internal.h"
36 #include "mathops.h"
37 #include "mpegvideo.h"
38 #include "mjpegenc.h"
39 #include "msmpeg4.h"
40 #include "xvmc_internal.h"
41 #include "thread.h"
42 #include <limits.h>
43 
44 //#undef NDEBUG
45 //#include <assert.h>
46 
48  int16_t *block, int n, int qscale);
50  int16_t *block, int n, int qscale);
52  int16_t *block, int n, int qscale);
54  int16_t *block, int n, int qscale);
56  int16_t *block, int n, int qscale);
58  int16_t *block, int n, int qscale);
60  int16_t *block, int n, int qscale);
61 
62 
63 //#define DEBUG
64 
65 
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
69  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
70 };
71 
73 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
74  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 };
83 
84 static const uint8_t mpeg2_dc_scale_table1[128] = {
85 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
86  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 };
95 
96 static const uint8_t mpeg2_dc_scale_table2[128] = {
97 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
98  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 };
107 
108 static const uint8_t mpeg2_dc_scale_table3[128] = {
109 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
110  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 };
119 
120 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
125 };
126 
130 };
131 
132 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
133  int (*mv)[2][4][2],
134  int mb_x, int mb_y, int mb_intra, int mb_skipped)
135 {
136  MpegEncContext *s = opaque;
137 
138  s->mv_dir = mv_dir;
139  s->mv_type = mv_type;
140  s->mb_intra = mb_intra;
141  s->mb_skipped = mb_skipped;
142  s->mb_x = mb_x;
143  s->mb_y = mb_y;
144  memcpy(s->mv, mv, sizeof(*mv));
145 
148 
149  s->dsp.clear_blocks(s->block[0]);
150 
151  s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
152  s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
153  s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
154 
155  assert(ref == 0);
156  ff_MPV_decode_mb(s, s->block);
157 }
158 
159 /* init common dct for both encoder and decoder */
161 {
162  ff_dsputil_init(&s->dsp, s->avctx);
163  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
164  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
166 
172  if (s->flags & CODEC_FLAG_BITEXACT)
175 
176 #if ARCH_X86
178 #elif ARCH_ALPHA
180 #elif ARCH_ARM
182 #elif HAVE_ALTIVEC
184 #elif ARCH_BFIN
186 #endif
187 
188  /* load & permutate scantables
189  * note: only wmv uses different ones
190  */
191  if (s->alternate_scan) {
194  } else {
197  }
200 
201  return 0;
202 }
203 
205 {
206  int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
207 
208  // edge emu needs blocksize + filter length - 1
209  // (= 17x17 for halfpel / 21x21 for h264)
210  // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
211  // at uvlinesize. It supports only YUV420 so 24x24 is enough
212  // linesize * interlaced * MBsize
213  FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
214  fail);
215 
216  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
217  fail)
218  s->me.temp = s->me.scratchpad;
219  s->rd_scratchpad = s->me.scratchpad;
220  s->b_scratchpad = s->me.scratchpad;
221  s->obmc_scratchpad = s->me.scratchpad + 16;
222 
223  return 0;
224 fail:
226  return AVERROR(ENOMEM);
227 }
228 
229 /**
230  * Allocate a frame buffer
231  */
233 {
234  int r, ret;
235 
236  pic->tf.f = &pic->f;
237  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
240  r = ff_thread_get_buffer(s->avctx, &pic->tf,
241  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
242  else {
243  pic->f.width = s->avctx->width;
244  pic->f.height = s->avctx->height;
245  pic->f.format = s->avctx->pix_fmt;
246  r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
247  }
248 
249  if (r < 0 || !pic->f.data[0]) {
250  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
251  r, pic->f.data[0]);
252  return -1;
253  }
254 
255  if (s->avctx->hwaccel) {
256  assert(!pic->hwaccel_picture_private);
257  if (s->avctx->hwaccel->priv_data_size) {
259  if (!pic->hwaccel_priv_buf) {
260  av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
261  return -1;
262  }
264  }
265  }
266 
267  if (s->linesize && (s->linesize != pic->f.linesize[0] ||
268  s->uvlinesize != pic->f.linesize[1])) {
270  "get_buffer() failed (stride changed)\n");
271  ff_mpeg_unref_picture(s, pic);
272  return -1;
273  }
274 
275  if (pic->f.linesize[1] != pic->f.linesize[2]) {
277  "get_buffer() failed (uv stride mismatch)\n");
278  ff_mpeg_unref_picture(s, pic);
279  return -1;
280  }
281 
282  if (!s->edge_emu_buffer &&
283  (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
285  "get_buffer() failed to allocate context scratch buffers.\n");
286  ff_mpeg_unref_picture(s, pic);
287  return ret;
288  }
289 
290  return 0;
291 }
292 
293 static void free_picture_tables(Picture *pic)
294 {
295  int i;
296 
297  pic->alloc_mb_width =
298  pic->alloc_mb_height = 0;
299 
306 
307  for (i = 0; i < 2; i++) {
309  av_buffer_unref(&pic->ref_index_buf[i]);
310  }
311 }
312 
314 {
315  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
316  const int mb_array_size = s->mb_stride * s->mb_height;
317  const int b8_array_size = s->b8_stride * s->mb_height * 2;
318  int i;
319 
320 
321  pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
322  pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
323  pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
324  sizeof(uint32_t));
325  if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
326  return AVERROR(ENOMEM);
327 
328  if (s->encoding) {
329  pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
330  pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
331  pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
332  if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
333  return AVERROR(ENOMEM);
334  }
335 
336  if (s->out_format == FMT_H263 || s->encoding ||
337  (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
338  int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
339  int ref_index_size = 4 * mb_array_size;
340 
341  for (i = 0; mv_size && i < 2; i++) {
342  pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
343  pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
344  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
345  return AVERROR(ENOMEM);
346  }
347  }
348 
349  pic->alloc_mb_width = s->mb_width;
350  pic->alloc_mb_height = s->mb_height;
351 
352  return 0;
353 }
354 
356 {
357  int ret, i;
358 #define MAKE_WRITABLE(table) \
359 do {\
360  if (pic->table &&\
361  (ret = av_buffer_make_writable(&pic->table)) < 0)\
362  return ret;\
363 } while (0)
364 
365  MAKE_WRITABLE(mb_var_buf);
366  MAKE_WRITABLE(mc_mb_var_buf);
367  MAKE_WRITABLE(mb_mean_buf);
368  MAKE_WRITABLE(mbskip_table_buf);
369  MAKE_WRITABLE(qscale_table_buf);
370  MAKE_WRITABLE(mb_type_buf);
371 
372  for (i = 0; i < 2; i++) {
373  MAKE_WRITABLE(motion_val_buf[i]);
374  MAKE_WRITABLE(ref_index_buf[i]);
375  }
376 
377  return 0;
378 }
379 
380 /**
381  * Allocate a Picture.
382  * The pixels are allocated/set by calling get_buffer() if shared = 0
383  */
384 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
385 {
386  int i, ret;
387 
388  if (pic->qscale_table_buf)
389  if ( pic->alloc_mb_width != s->mb_width
390  || pic->alloc_mb_height != s->mb_height)
391  free_picture_tables(pic);
392 
393  if (shared) {
394  assert(pic->f.data[0]);
395  pic->shared = 1;
396  } else {
397  assert(!pic->f.data[0]);
398 
399  if (alloc_frame_buffer(s, pic) < 0)
400  return -1;
401 
402  s->linesize = pic->f.linesize[0];
403  s->uvlinesize = pic->f.linesize[1];
404  }
405 
406  if (!pic->qscale_table_buf)
407  ret = alloc_picture_tables(s, pic);
408  else
409  ret = make_tables_writable(pic);
410  if (ret < 0)
411  goto fail;
412 
413  if (s->encoding) {
414  pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
415  pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
416  pic->mb_mean = pic->mb_mean_buf->data;
417  }
418 
419  pic->mbskip_table = pic->mbskip_table_buf->data;
420  pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
421  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
422 
423  if (pic->motion_val_buf[0]) {
424  for (i = 0; i < 2; i++) {
425  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
426  pic->ref_index[i] = pic->ref_index_buf[i]->data;
427  }
428  }
429 
430  return 0;
431 fail:
432  av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
433  ff_mpeg_unref_picture(s, pic);
434  free_picture_tables(pic);
435  return AVERROR(ENOMEM);
436 }
437 
438 /**
439  * Deallocate a picture.
440  */
442 {
443  int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
444 
445  pic->tf.f = &pic->f;
446  /* WM Image / Screen codecs allocate internal buffers with different
447  * dimensions / colorspaces; ignore user-defined callbacks for these. */
448  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
451  ff_thread_release_buffer(s->avctx, &pic->tf);
452  else
453  av_frame_unref(&pic->f);
454 
456 
457  memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
458 }
459 
461 {
462  int i;
463 
464 #define UPDATE_TABLE(table)\
465 do {\
466  if (src->table &&\
467  (!dst->table || dst->table->buffer != src->table->buffer)) {\
468  av_buffer_unref(&dst->table);\
469  dst->table = av_buffer_ref(src->table);\
470  if (!dst->table) {\
471  free_picture_tables(dst);\
472  return AVERROR(ENOMEM);\
473  }\
474  }\
475 } while (0)
476 
477  UPDATE_TABLE(mb_var_buf);
478  UPDATE_TABLE(mc_mb_var_buf);
479  UPDATE_TABLE(mb_mean_buf);
480  UPDATE_TABLE(mbskip_table_buf);
481  UPDATE_TABLE(qscale_table_buf);
482  UPDATE_TABLE(mb_type_buf);
483  for (i = 0; i < 2; i++) {
484  UPDATE_TABLE(motion_val_buf[i]);
485  UPDATE_TABLE(ref_index_buf[i]);
486  }
487 
488  dst->mb_var = src->mb_var;
489  dst->mc_mb_var = src->mc_mb_var;
490  dst->mb_mean = src->mb_mean;
491  dst->mbskip_table = src->mbskip_table;
492  dst->qscale_table = src->qscale_table;
493  dst->mb_type = src->mb_type;
494  for (i = 0; i < 2; i++) {
495  dst->motion_val[i] = src->motion_val[i];
496  dst->ref_index[i] = src->ref_index[i];
497  }
498 
499  dst->alloc_mb_width = src->alloc_mb_width;
500  dst->alloc_mb_height = src->alloc_mb_height;
501 
502  return 0;
503 }
504 
506 {
507  int ret;
508 
509  av_assert0(!dst->f.buf[0]);
510  av_assert0(src->f.buf[0]);
511 
512  src->tf.f = &src->f;
513  dst->tf.f = &dst->f;
514  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
515  if (ret < 0)
516  goto fail;
517 
518  ret = update_picture_tables(dst, src);
519  if (ret < 0)
520  goto fail;
521 
522  if (src->hwaccel_picture_private) {
524  if (!dst->hwaccel_priv_buf)
525  goto fail;
527  }
528 
529  dst->field_picture = src->field_picture;
530  dst->mb_var_sum = src->mb_var_sum;
531  dst->mc_mb_var_sum = src->mc_mb_var_sum;
532  dst->b_frame_score = src->b_frame_score;
533  dst->needs_realloc = src->needs_realloc;
534  dst->reference = src->reference;
535  dst->shared = src->shared;
536 
537  return 0;
538 fail:
539  ff_mpeg_unref_picture(s, dst);
540  return ret;
541 }
542 
544 {
545  int y_size = s->b8_stride * (2 * s->mb_height + 1);
546  int c_size = s->mb_stride * (s->mb_height + 1);
547  int yc_size = y_size + 2 * c_size;
548  int i;
549 
550  s->edge_emu_buffer =
551  s->me.scratchpad =
552  s->me.temp =
553  s->rd_scratchpad =
554  s->b_scratchpad =
555  s->obmc_scratchpad = NULL;
556 
557  if (s->encoding) {
558  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
559  ME_MAP_SIZE * sizeof(uint32_t), fail)
561  ME_MAP_SIZE * sizeof(uint32_t), fail)
562  if (s->avctx->noise_reduction) {
564  2 * 64 * sizeof(int), fail)
565  }
566  }
567  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
568  s->block = s->blocks[0];
569 
570  for (i = 0; i < 12; i++) {
571  s->pblocks[i] = &s->block[i];
572  }
573 
574  if (s->out_format == FMT_H263) {
575  /* ac values */
577  yc_size * sizeof(int16_t) * 16, fail);
578  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
579  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
580  s->ac_val[2] = s->ac_val[1] + c_size;
581  }
582 
583  return 0;
584 fail:
585  return -1; // free() through ff_MPV_common_end()
586 }
587 
589 {
590  if (s == NULL)
591  return;
592 
594  av_freep(&s->me.scratchpad);
595  s->me.temp =
596  s->rd_scratchpad =
597  s->b_scratchpad =
598  s->obmc_scratchpad = NULL;
599 
600  av_freep(&s->dct_error_sum);
601  av_freep(&s->me.map);
602  av_freep(&s->me.score_map);
603  av_freep(&s->blocks);
604  av_freep(&s->ac_val_base);
605  s->block = NULL;
606 }
607 
609 {
610 #define COPY(a) bak->a = src->a
611  COPY(edge_emu_buffer);
612  COPY(me.scratchpad);
613  COPY(me.temp);
614  COPY(rd_scratchpad);
615  COPY(b_scratchpad);
616  COPY(obmc_scratchpad);
617  COPY(me.map);
618  COPY(me.score_map);
619  COPY(blocks);
620  COPY(block);
621  COPY(start_mb_y);
622  COPY(end_mb_y);
623  COPY(me.map_generation);
624  COPY(pb);
625  COPY(dct_error_sum);
626  COPY(dct_count[0]);
627  COPY(dct_count[1]);
628  COPY(ac_val_base);
629  COPY(ac_val[0]);
630  COPY(ac_val[1]);
631  COPY(ac_val[2]);
632 #undef COPY
633 }
634 
636 {
637  MpegEncContext bak;
638  int i, ret;
639  // FIXME copy only needed parts
640  // START_TIMER
641  backup_duplicate_context(&bak, dst);
642  memcpy(dst, src, sizeof(MpegEncContext));
643  backup_duplicate_context(dst, &bak);
644  for (i = 0; i < 12; i++) {
645  dst->pblocks[i] = &dst->block[i];
646  }
647  if (!dst->edge_emu_buffer &&
648  (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
649  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
650  "scratch buffers.\n");
651  return ret;
652  }
653  // STOP_TIMER("update_duplicate_context")
654  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
655  return 0;
656 }
657 
659  const AVCodecContext *src)
660 {
661  int i, ret;
662  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
663 
664  if (dst == src)
665  return 0;
666 
667  av_assert0(s != s1);
668 
669  // FIXME can parameters change on I-frames?
670  // in that case dst may need a reinit
671  if (!s->context_initialized) {
672  memcpy(s, s1, sizeof(MpegEncContext));
673 
674  s->avctx = dst;
675  s->bitstream_buffer = NULL;
677 
678  if (s1->context_initialized){
679 // s->picture_range_start += MAX_PICTURE_COUNT;
680 // s->picture_range_end += MAX_PICTURE_COUNT;
681  if((ret = ff_MPV_common_init(s)) < 0){
682  memset(s, 0, sizeof(MpegEncContext));
683  s->avctx = dst;
684  return ret;
685  }
686  }
687  }
688 
689  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
690  s->context_reinit = 0;
691  s->height = s1->height;
692  s->width = s1->width;
693  if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
694  return ret;
695  }
696 
697  s->avctx->coded_height = s1->avctx->coded_height;
698  s->avctx->coded_width = s1->avctx->coded_width;
699  s->avctx->width = s1->avctx->width;
700  s->avctx->height = s1->avctx->height;
701 
702  s->coded_picture_number = s1->coded_picture_number;
703  s->picture_number = s1->picture_number;
704  s->input_picture_number = s1->input_picture_number;
705 
706  av_assert0(!s->picture || s->picture != s1->picture);
707  if(s->picture)
708  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
709  ff_mpeg_unref_picture(s, &s->picture[i]);
710  if (s1->picture[i].f.data[0] &&
711  (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
712  return ret;
713  }
714 
715 #define UPDATE_PICTURE(pic)\
716 do {\
717  ff_mpeg_unref_picture(s, &s->pic);\
718  if (s1->pic.f.data[0])\
719  ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
720  else\
721  ret = update_picture_tables(&s->pic, &s1->pic);\
722  if (ret < 0)\
723  return ret;\
724 } while (0)
725 
726  UPDATE_PICTURE(current_picture);
727  UPDATE_PICTURE(last_picture);
728  UPDATE_PICTURE(next_picture);
729 
730  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
731  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
732  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
733 
734  // Error/bug resilience
735  s->next_p_frame_damaged = s1->next_p_frame_damaged;
736  s->workaround_bugs = s1->workaround_bugs;
737  s->padding_bug_score = s1->padding_bug_score;
738 
739  // MPEG4 timing info
740  memcpy(&s->time_increment_bits, &s1->time_increment_bits,
741  (char *) &s1->shape - (char *) &s1->time_increment_bits);
742 
743  // B-frame info
744  s->max_b_frames = s1->max_b_frames;
745  s->low_delay = s1->low_delay;
746  s->droppable = s1->droppable;
747 
748  // DivX handling (doesn't work)
749  s->divx_packed = s1->divx_packed;
750 
751  if (s1->bitstream_buffer) {
752  if (s1->bitstream_buffer_size +
756  s1->allocated_bitstream_buffer_size);
757  s->bitstream_buffer_size = s1->bitstream_buffer_size;
758  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
759  s1->bitstream_buffer_size);
760  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
762  }
763 
764  // linesize dependend scratch buffer allocation
765  if (!s->edge_emu_buffer)
766  if (s1->linesize) {
767  if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
768  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
769  "scratch buffers.\n");
770  return AVERROR(ENOMEM);
771  }
772  } else {
773  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
774  "be allocated due to unknown size.\n");
775  }
776 
777  // MPEG2/interlacing info
778  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
779  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
780 
781  if (!s1->first_field) {
782  s->last_pict_type = s1->pict_type;
783  if (s1->current_picture_ptr)
784  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
785 
786  if (s1->pict_type != AV_PICTURE_TYPE_B) {
787  s->last_non_b_pict_type = s1->pict_type;
788  }
789  }
790 
791  return 0;
792 }
793 
794 /**
795  * Set the given MpegEncContext to common defaults
796  * (same for encoding and decoding).
797  * The changed fields will not depend upon the
798  * prior state of the MpegEncContext.
799  */
801 {
802  s->y_dc_scale_table =
805  s->progressive_frame = 1;
806  s->progressive_sequence = 1;
808 
809  s->coded_picture_number = 0;
810  s->picture_number = 0;
811  s->input_picture_number = 0;
812 
813  s->picture_in_gop_number = 0;
814 
815  s->f_code = 1;
816  s->b_code = 1;
817 
818  s->slice_context_count = 1;
819 }
820 
821 /**
822  * Set the given MpegEncContext to defaults for decoding.
823  * the changed fields will not depend upon
824  * the prior state of the MpegEncContext.
825  */
827 {
829 }
830 
832 {
833  ERContext *er = &s->er;
834  int mb_array_size = s->mb_height * s->mb_stride;
835  int i;
836 
837  er->avctx = s->avctx;
838  er->dsp = &s->dsp;
839 
840  er->mb_index2xy = s->mb_index2xy;
841  er->mb_num = s->mb_num;
842  er->mb_width = s->mb_width;
843  er->mb_height = s->mb_height;
844  er->mb_stride = s->mb_stride;
845  er->b8_stride = s->b8_stride;
846 
848  er->error_status_table = av_mallocz(mb_array_size);
849  if (!er->er_temp_buffer || !er->error_status_table)
850  goto fail;
851 
852  er->mbskip_table = s->mbskip_table;
853  er->mbintra_table = s->mbintra_table;
854 
855  for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
856  er->dc_val[i] = s->dc_val[i];
857 
859  er->opaque = s;
860 
861  return 0;
862 fail:
863  av_freep(&er->er_temp_buffer);
865  return AVERROR(ENOMEM);
866 }
867 
868 /**
869  * Initialize and allocates MpegEncContext fields dependent on the resolution.
870  */
872 {
873  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
874 
875  s->mb_width = (s->width + 15) / 16;
876  s->mb_stride = s->mb_width + 1;
877  s->b8_stride = s->mb_width * 2 + 1;
878  s->b4_stride = s->mb_width * 4 + 1;
879  mb_array_size = s->mb_height * s->mb_stride;
880  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
881 
882  /* set default edge pos, will be overriden
883  * in decode_header if needed */
884  s->h_edge_pos = s->mb_width * 16;
885  s->v_edge_pos = s->mb_height * 16;
886 
887  s->mb_num = s->mb_width * s->mb_height;
888 
889  s->block_wrap[0] =
890  s->block_wrap[1] =
891  s->block_wrap[2] =
892  s->block_wrap[3] = s->b8_stride;
893  s->block_wrap[4] =
894  s->block_wrap[5] = s->mb_stride;
895 
896  y_size = s->b8_stride * (2 * s->mb_height + 1);
897  c_size = s->mb_stride * (s->mb_height + 1);
898  yc_size = y_size + 2 * c_size;
899 
900  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
901  for (y = 0; y < s->mb_height; y++)
902  for (x = 0; x < s->mb_width; x++)
903  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
904 
905  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
906 
907  if (s->encoding) {
908  /* Allocate MV tables */
909  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
910  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
911  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
912  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
913  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
914  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
915  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
921 
922  /* Allocate MB type table */
923  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
924 
925  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
926 
928  mb_array_size * sizeof(float), fail);
930  mb_array_size * sizeof(float), fail);
931 
932  }
933 
934  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
936  /* interlaced direct mode decoding tables */
937  for (i = 0; i < 2; i++) {
938  int j, k;
939  for (j = 0; j < 2; j++) {
940  for (k = 0; k < 2; k++) {
942  s->b_field_mv_table_base[i][j][k],
943  mv_table_size * 2 * sizeof(int16_t),
944  fail);
945  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
946  s->mb_stride + 1;
947  }
948  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
949  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
950  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
951  }
952  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
953  }
954  }
955  if (s->out_format == FMT_H263) {
956  /* cbp values */
957  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
958  s->coded_block = s->coded_block_base + s->b8_stride + 1;
959 
960  /* cbp, ac_pred, pred_dir */
961  FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
962  FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
963  }
964 
965  if (s->h263_pred || s->h263_plus || !s->encoding) {
966  /* dc values */
967  // MN: we need these for error resilience of intra-frames
968  FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
969  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
970  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
971  s->dc_val[2] = s->dc_val[1] + c_size;
972  for (i = 0; i < yc_size; i++)
973  s->dc_val_base[i] = 1024;
974  }
975 
976  /* which mb is a intra block */
977  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
978  memset(s->mbintra_table, 1, mb_array_size);
979 
980  /* init macroblock skip table */
981  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
982  // Note the + 1 is for a quicker mpeg4 slice_end detection
983 
984  return init_er(s);
985 fail:
986  return AVERROR(ENOMEM);
987 }
988 
989 /**
990  * init common structure for both encoder and decoder.
991  * this assumes that some variables like width/height are already set
992  */
994 {
995  int i;
996  int nb_slices = (HAVE_THREADS &&
998  s->avctx->thread_count : 1;
999 
1000  if (s->encoding && s->avctx->slices)
1001  nb_slices = s->avctx->slices;
1002 
1004  s->mb_height = (s->height + 31) / 32 * 2;
1005  else
1006  s->mb_height = (s->height + 15) / 16;
1007 
1008  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1010  "decoding to AV_PIX_FMT_NONE is not supported.\n");
1011  return -1;
1012  }
1013 
1014  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1015  int max_slices;
1016  if (s->mb_height)
1017  max_slices = FFMIN(MAX_THREADS, s->mb_height);
1018  else
1019  max_slices = MAX_THREADS;
1020  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1021  " reducing to %d\n", nb_slices, max_slices);
1022  nb_slices = max_slices;
1023  }
1024 
1025  if ((s->width || s->height) &&
1026  av_image_check_size(s->width, s->height, 0, s->avctx))
1027  return -1;
1028 
1029  ff_dct_common_init(s);
1030 
1031  s->flags = s->avctx->flags;
1032  s->flags2 = s->avctx->flags2;
1033 
1034  /* set chroma shifts */
1036 
1037  /* convert fourcc to upper case */
1040 
1041  s->avctx->coded_frame = &s->current_picture.f;
1042 
1043  if (s->encoding) {
1044  if (s->msmpeg4_version) {
1046  2 * 2 * (MAX_LEVEL + 1) *
1047  (MAX_RUN + 1) * 2 * sizeof(int), fail);
1048  }
1049  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1050 
1051  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1052  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1053  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1054  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1055  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1056  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1059 
1060  if (s->avctx->noise_reduction) {
1061  FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1062  }
1063  }
1064 
1066  MAX_PICTURE_COUNT * sizeof(Picture), fail);
1067  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1069  }
1070  memset(&s->next_picture, 0, sizeof(s->next_picture));
1071  memset(&s->last_picture, 0, sizeof(s->last_picture));
1072  memset(&s->current_picture, 0, sizeof(s->current_picture));
1076 
1077  if (init_context_frame(s))
1078  goto fail;
1079 
1080  s->parse_context.state = -1;
1081 
1082  s->context_initialized = 1;
1083  s->thread_context[0] = s;
1084 
1085 // if (s->width && s->height) {
1086  if (nb_slices > 1) {
1087  for (i = 1; i < nb_slices; i++) {
1088  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1089  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1090  }
1091 
1092  for (i = 0; i < nb_slices; i++) {
1093  if (init_duplicate_context(s->thread_context[i]) < 0)
1094  goto fail;
1095  s->thread_context[i]->start_mb_y =
1096  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1097  s->thread_context[i]->end_mb_y =
1098  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1099  }
1100  } else {
1101  if (init_duplicate_context(s) < 0)
1102  goto fail;
1103  s->start_mb_y = 0;
1104  s->end_mb_y = s->mb_height;
1105  }
1106  s->slice_context_count = nb_slices;
1107 // }
1108 
1109  return 0;
1110  fail:
1111  ff_MPV_common_end(s);
1112  return -1;
1113 }
1114 
1115 /**
1116  * Frees and resets MpegEncContext fields depending on the resolution.
1117  * Is used during resolution changes to avoid a full reinitialization of the
1118  * codec.
1119  */
1121 {
1122  int i, j, k;
1123 
1124  av_freep(&s->mb_type);
1131  s->p_mv_table = NULL;
1132  s->b_forw_mv_table = NULL;
1133  s->b_back_mv_table = NULL;
1136  s->b_direct_mv_table = NULL;
1137  for (i = 0; i < 2; i++) {
1138  for (j = 0; j < 2; j++) {
1139  for (k = 0; k < 2; k++) {
1140  av_freep(&s->b_field_mv_table_base[i][j][k]);
1141  s->b_field_mv_table[i][j][k] = NULL;
1142  }
1143  av_freep(&s->b_field_select_table[i][j]);
1144  av_freep(&s->p_field_mv_table_base[i][j]);
1145  s->p_field_mv_table[i][j] = NULL;
1146  }
1148  }
1149 
1150  av_freep(&s->dc_val_base);
1152  av_freep(&s->mbintra_table);
1153  av_freep(&s->cbp_table);
1154  av_freep(&s->pred_dir_table);
1155 
1156  av_freep(&s->mbskip_table);
1157 
1159  av_freep(&s->er.er_temp_buffer);
1160  av_freep(&s->mb_index2xy);
1161  av_freep(&s->lambda_table);
1162 
1163  av_freep(&s->cplx_tab);
1164  av_freep(&s->bits_tab);
1165 
1166  s->linesize = s->uvlinesize = 0;
1167 
1168  return 0;
1169 }
1170 
1172 {
1173  int i, err = 0;
1174 
1175  if (s->slice_context_count > 1) {
1176  for (i = 0; i < s->slice_context_count; i++) {
1178  }
1179  for (i = 1; i < s->slice_context_count; i++) {
1180  av_freep(&s->thread_context[i]);
1181  }
1182  } else
1184 
1185  if ((err = free_context_frame(s)) < 0)
1186  return err;
1187 
1188  if (s->picture)
1189  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1190  s->picture[i].needs_realloc = 1;
1191  }
1192 
1193  s->last_picture_ptr =
1194  s->next_picture_ptr =
1196 
1197  // init
1199  s->mb_height = (s->height + 31) / 32 * 2;
1200  else
1201  s->mb_height = (s->height + 15) / 16;
1202 
1203  if ((s->width || s->height) &&
1204  av_image_check_size(s->width, s->height, 0, s->avctx))
1205  return AVERROR_INVALIDDATA;
1206 
1207  if ((err = init_context_frame(s)))
1208  goto fail;
1209 
1210  s->thread_context[0] = s;
1211 
1212  if (s->width && s->height) {
1213  int nb_slices = s->slice_context_count;
1214  if (nb_slices > 1) {
1215  for (i = 1; i < nb_slices; i++) {
1216  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1217  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1218  }
1219 
1220  for (i = 0; i < nb_slices; i++) {
1221  if (init_duplicate_context(s->thread_context[i]) < 0)
1222  goto fail;
1223  s->thread_context[i]->start_mb_y =
1224  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1225  s->thread_context[i]->end_mb_y =
1226  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1227  }
1228  } else {
1229  if (init_duplicate_context(s) < 0)
1230  goto fail;
1231  s->start_mb_y = 0;
1232  s->end_mb_y = s->mb_height;
1233  }
1234  s->slice_context_count = nb_slices;
1235  }
1236 
1237  return 0;
1238  fail:
1239  ff_MPV_common_end(s);
1240  return err;
1241 }
1242 
1243 /* init common structure for both encoder and decoder */
1245 {
1246  int i;
1247 
1248  if (s->slice_context_count > 1) {
1249  for (i = 0; i < s->slice_context_count; i++) {
1251  }
1252  for (i = 1; i < s->slice_context_count; i++) {
1253  av_freep(&s->thread_context[i]);
1254  }
1255  s->slice_context_count = 1;
1256  } else free_duplicate_context(s);
1257 
1259  s->parse_context.buffer_size = 0;
1260 
1263 
1264  av_freep(&s->avctx->stats_out);
1265  av_freep(&s->ac_stats);
1266 
1271  av_freep(&s->q_intra_matrix);
1272  av_freep(&s->q_inter_matrix);
1275  av_freep(&s->input_picture);
1277  av_freep(&s->dct_offset);
1278 
1279  if (s->picture) {
1280  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1281  free_picture_tables(&s->picture[i]);
1282  ff_mpeg_unref_picture(s, &s->picture[i]);
1283  }
1284  }
1285  av_freep(&s->picture);
1294 
1295  free_context_frame(s);
1296 
1297  s->context_initialized = 0;
1298  s->last_picture_ptr =
1299  s->next_picture_ptr =
1301  s->linesize = s->uvlinesize = 0;
1302 }
1303 
1305  uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1306 {
1307  int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1308  uint8_t index_run[MAX_RUN + 1];
1309  int last, run, level, start, end, i;
1310 
1311  /* If table is static, we can quit if rl->max_level[0] is not NULL */
1312  if (static_store && rl->max_level[0])
1313  return;
1314 
1315  /* compute max_level[], max_run[] and index_run[] */
1316  for (last = 0; last < 2; last++) {
1317  if (last == 0) {
1318  start = 0;
1319  end = rl->last;
1320  } else {
1321  start = rl->last;
1322  end = rl->n;
1323  }
1324 
1325  memset(max_level, 0, MAX_RUN + 1);
1326  memset(max_run, 0, MAX_LEVEL + 1);
1327  memset(index_run, rl->n, MAX_RUN + 1);
1328  for (i = start; i < end; i++) {
1329  run = rl->table_run[i];
1330  level = rl->table_level[i];
1331  if (index_run[run] == rl->n)
1332  index_run[run] = i;
1333  if (level > max_level[run])
1334  max_level[run] = level;
1335  if (run > max_run[level])
1336  max_run[level] = run;
1337  }
1338  if (static_store)
1339  rl->max_level[last] = static_store[last];
1340  else
1341  rl->max_level[last] = av_malloc(MAX_RUN + 1);
1342  memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1343  if (static_store)
1344  rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1345  else
1346  rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1347  memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1348  if (static_store)
1349  rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1350  else
1351  rl->index_run[last] = av_malloc(MAX_RUN + 1);
1352  memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1353  }
1354 }
1355 
1357 {
1358  int i, q;
1359 
1360  for (q = 0; q < 32; q++) {
1361  int qmul = q * 2;
1362  int qadd = (q - 1) | 1;
1363 
1364  if (q == 0) {
1365  qmul = 1;
1366  qadd = 0;
1367  }
1368  for (i = 0; i < rl->vlc.table_size; i++) {
1369  int code = rl->vlc.table[i][0];
1370  int len = rl->vlc.table[i][1];
1371  int level, run;
1372 
1373  if (len == 0) { // illegal code
1374  run = 66;
1375  level = MAX_LEVEL;
1376  } else if (len < 0) { // more bits needed
1377  run = 0;
1378  level = code;
1379  } else {
1380  if (code == rl->n) { // esc
1381  run = 66;
1382  level = 0;
1383  } else {
1384  run = rl->table_run[code] + 1;
1385  level = rl->table_level[code] * qmul + qadd;
1386  if (code >= rl->last) run += 192;
1387  }
1388  }
1389  rl->rl_vlc[q][i].len = len;
1390  rl->rl_vlc[q][i].level = level;
1391  rl->rl_vlc[q][i].run = run;
1392  }
1393  }
1394 }
1395 
1397 {
1398  int i;
1399 
1400  /* release non reference frames */
1401  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1402  if (!s->picture[i].reference &&
1403  (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1404  ff_mpeg_unref_picture(s, &s->picture[i]);
1405  }
1406  }
1407 }
1408 
1409 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1410 {
1411  if (pic == s->last_picture_ptr)
1412  return 0;
1413  if (pic->f.data[0] == NULL)
1414  return 1;
1415  if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1416  return 1;
1417  return 0;
1418 }
1419 
1420 static int find_unused_picture(MpegEncContext *s, int shared)
1421 {
1422  int i;
1423 
1424  if (shared) {
1425  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1426  if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1427  return i;
1428  }
1429  } else {
1430  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1431  if (pic_is_unused(s, &s->picture[i]))
1432  return i;
1433  }
1434  }
1435 
1437  "Internal error, picture buffer overflow\n");
1438  /* We could return -1, but the codec would crash trying to draw into a
1439  * non-existing frame anyway. This is safer than waiting for a random crash.
1440  * Also the return of this is never useful, an encoder must only allocate
1441  * as much as allowed in the specification. This has no relationship to how
1442  * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1443  * enough for such valid streams).
1444  * Plus, a decoder has to check stream validity and remove frames if too
1445  * many reference frames are around. Waiting for "OOM" is not correct at
1446  * all. Similarly, missing reference frames have to be replaced by
1447  * interpolated/MC frames, anything else is a bug in the codec ...
1448  */
1449  abort();
1450  return -1;
1451 }
1452 
1454 {
1455  int ret = find_unused_picture(s, shared);
1456 
1457  if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1458  if (s->picture[ret].needs_realloc) {
1459  s->picture[ret].needs_realloc = 0;
1460  free_picture_tables(&s->picture[ret]);
1461  ff_mpeg_unref_picture(s, &s->picture[ret]);
1463  }
1464  }
1465  return ret;
1466 }
1467 
1469 {
1470  int intra, i;
1471 
1472  for (intra = 0; intra < 2; intra++) {
1473  if (s->dct_count[intra] > (1 << 16)) {
1474  for (i = 0; i < 64; i++) {
1475  s->dct_error_sum[intra][i] >>= 1;
1476  }
1477  s->dct_count[intra] >>= 1;
1478  }
1479 
1480  for (i = 0; i < 64; i++) {
1481  s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1482  s->dct_count[intra] +
1483  s->dct_error_sum[intra][i] / 2) /
1484  (s->dct_error_sum[intra][i] + 1);
1485  }
1486  }
1487 }
1488 
1489 /**
1490  * generic function for encode/decode called after coding/decoding
1491  * the header and before a frame is coded/decoded.
1492  */
1494 {
1495  int i, ret;
1496  Picture *pic;
1497  s->mb_skipped = 0;
1498 
1499  if (!ff_thread_can_start_frame(avctx)) {
1500  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1501  return -1;
1502  }
1503 
1504  /* mark & release old frames */
1505  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1507  s->last_picture_ptr->f.data[0]) {
1509  }
1510 
1511  /* release forgotten pictures */
1512  /* if (mpeg124/h263) */
1513  if (!s->encoding) {
1514  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1515  if (&s->picture[i] != s->last_picture_ptr &&
1516  &s->picture[i] != s->next_picture_ptr &&
1517  s->picture[i].reference && !s->picture[i].needs_realloc) {
1518  if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1519  av_log(avctx, AV_LOG_ERROR,
1520  "releasing zombie picture\n");
1521  ff_mpeg_unref_picture(s, &s->picture[i]);
1522  }
1523  }
1524  }
1525 
1526  if (!s->encoding) {
1528 
1529  if (s->current_picture_ptr &&
1530  s->current_picture_ptr->f.data[0] == NULL) {
1531  // we already have a unused image
1532  // (maybe it was set before reading the header)
1533  pic = s->current_picture_ptr;
1534  } else {
1535  i = ff_find_unused_picture(s, 0);
1536  if (i < 0) {
1537  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1538  return i;
1539  }
1540  pic = &s->picture[i];
1541  }
1542 
1543  pic->reference = 0;
1544  if (!s->droppable) {
1545  if (s->pict_type != AV_PICTURE_TYPE_B)
1546  pic->reference = 3;
1547  }
1548 
1550 
1551  if (ff_alloc_picture(s, pic, 0) < 0)
1552  return -1;
1553 
1554  s->current_picture_ptr = pic;
1555  // FIXME use only the vars from current_pic
1557  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1559  if (s->picture_structure != PICT_FRAME)
1562  }
1566  }
1567 
1569  // if (s->flags && CODEC_FLAG_QSCALE)
1570  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1572 
1574  if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1575  s->current_picture_ptr)) < 0)
1576  return ret;
1577 
1578  if (s->pict_type != AV_PICTURE_TYPE_B) {
1580  if (!s->droppable)
1582  }
1583  av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1588  s->pict_type, s->droppable);
1589 
1590  if ((s->last_picture_ptr == NULL ||
1591  s->last_picture_ptr->f.data[0] == NULL) &&
1592  (s->pict_type != AV_PICTURE_TYPE_I ||
1593  s->picture_structure != PICT_FRAME)) {
1594  int h_chroma_shift, v_chroma_shift;
1596  &h_chroma_shift, &v_chroma_shift);
1597  if (s->pict_type != AV_PICTURE_TYPE_I)
1598  av_log(avctx, AV_LOG_ERROR,
1599  "warning: first frame is no keyframe\n");
1600  else if (s->picture_structure != PICT_FRAME)
1601  av_log(avctx, AV_LOG_INFO,
1602  "allocate dummy last picture for field based first keyframe\n");
1603 
1604  /* Allocate a dummy frame */
1605  i = ff_find_unused_picture(s, 0);
1606  if (i < 0) {
1607  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1608  return i;
1609  }
1610  s->last_picture_ptr = &s->picture[i];
1611  s->last_picture_ptr->f.key_frame = 0;
1612  if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1613  s->last_picture_ptr = NULL;
1614  return -1;
1615  }
1616 
1617  memset(s->last_picture_ptr->f.data[0], 0x80,
1618  avctx->height * s->last_picture_ptr->f.linesize[0]);
1619  memset(s->last_picture_ptr->f.data[1], 0x80,
1620  (avctx->height >> v_chroma_shift) *
1621  s->last_picture_ptr->f.linesize[1]);
1622  memset(s->last_picture_ptr->f.data[2], 0x80,
1623  (avctx->height >> v_chroma_shift) *
1624  s->last_picture_ptr->f.linesize[2]);
1625 
1627  for(i=0; i<avctx->height; i++)
1628  memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1629  }
1630 
1631  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1632  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1633  }
1634  if ((s->next_picture_ptr == NULL ||
1635  s->next_picture_ptr->f.data[0] == NULL) &&
1636  s->pict_type == AV_PICTURE_TYPE_B) {
1637  /* Allocate a dummy frame */
1638  i = ff_find_unused_picture(s, 0);
1639  if (i < 0) {
1640  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1641  return i;
1642  }
1643  s->next_picture_ptr = &s->picture[i];
1644  s->next_picture_ptr->f.key_frame = 0;
1645  if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1646  s->next_picture_ptr = NULL;
1647  return -1;
1648  }
1649  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1650  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1651  }
1652 
1653 #if 0 // BUFREF-FIXME
1654  memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1655  memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1656 #endif
1657  if (s->last_picture_ptr) {
1659  if (s->last_picture_ptr->f.data[0] &&
1660  (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1661  s->last_picture_ptr)) < 0)
1662  return ret;
1663  }
1664  if (s->next_picture_ptr) {
1666  if (s->next_picture_ptr->f.data[0] &&
1667  (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1668  s->next_picture_ptr)) < 0)
1669  return ret;
1670  }
1671 
1672  assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1673  s->last_picture_ptr->f.data[0]));
1674 
1675  if (s->picture_structure!= PICT_FRAME) {
1676  int i;
1677  for (i = 0; i < 4; i++) {
1679  s->current_picture.f.data[i] +=
1681  }
1682  s->current_picture.f.linesize[i] *= 2;
1683  s->last_picture.f.linesize[i] *= 2;
1684  s->next_picture.f.linesize[i] *= 2;
1685  }
1686  }
1687 
1688  s->err_recognition = avctx->err_recognition;
1689 
1690  /* set dequantizer, we can't do it during init as
1691  * it might change for mpeg4 and we can't do it in the header
1692  * decode as init is not called for mpeg4 there yet */
1693  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1696  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1699  } else {
1702  }
1703 
1704  if (s->dct_error_sum) {
1707  }
1708 
1710  return ff_xvmc_field_start(s, avctx);
1711 
1712  return 0;
1713 }
1714 
1715 /* generic function for encode/decode called after a
1716  * frame has been coded/decoded. */
1718 {
1719  int i;
1720  /* redraw edges for the frame if decoding didn't complete */
1721  // just to make sure that all data is rendered.
1723  ff_xvmc_field_end(s);
1724  } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1725  !s->avctx->hwaccel &&
1727  s->unrestricted_mv &&
1729  !s->intra_only &&
1730  !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1731  !s->avctx->lowres
1732  ) {
1734  int hshift = desc->log2_chroma_w;
1735  int vshift = desc->log2_chroma_h;
1737  s->h_edge_pos, s->v_edge_pos,
1739  EDGE_TOP | EDGE_BOTTOM);
1741  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1742  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1743  EDGE_TOP | EDGE_BOTTOM);
1745  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1746  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1747  EDGE_TOP | EDGE_BOTTOM);
1748  }
1749 
1750  emms_c();
1751 
1752  s->last_pict_type = s->pict_type;
1754  if (s->pict_type!= AV_PICTURE_TYPE_B) {
1756  }
1757 #if 0
1758  /* copy back current_picture variables */
1759  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1760  if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1761  s->picture[i] = s->current_picture;
1762  break;
1763  }
1764  }
1765  assert(i < MAX_PICTURE_COUNT);
1766 #endif
1767 
1768  if (s->encoding) {
1769  /* release non-reference frames */
1770  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1771  if (!s->picture[i].reference)
1772  ff_mpeg_unref_picture(s, &s->picture[i]);
1773  }
1774  }
1775  // clear copies, to avoid confusion
1776 #if 0
1777  memset(&s->last_picture, 0, sizeof(Picture));
1778  memset(&s->next_picture, 0, sizeof(Picture));
1779  memset(&s->current_picture, 0, sizeof(Picture));
1780 #endif
1782 
1783  if (s->current_picture.reference)
1785 }
1786 
1787 /**
1788  * Draw a line from (ex, ey) -> (sx, sy).
1789  * @param w width of the image
1790  * @param h height of the image
1791  * @param stride stride/linesize of the image
1792  * @param color color of the arrow
1793  */
1794 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1795  int w, int h, int stride, int color)
1796 {
1797  int x, y, fr, f;
1798 
1799  sx = av_clip(sx, 0, w - 1);
1800  sy = av_clip(sy, 0, h - 1);
1801  ex = av_clip(ex, 0, w - 1);
1802  ey = av_clip(ey, 0, h - 1);
1803 
1804  buf[sy * stride + sx] += color;
1805 
1806  if (FFABS(ex - sx) > FFABS(ey - sy)) {
1807  if (sx > ex) {
1808  FFSWAP(int, sx, ex);
1809  FFSWAP(int, sy, ey);
1810  }
1811  buf += sx + sy * stride;
1812  ex -= sx;
1813  f = ((ey - sy) << 16) / ex;
1814  for (x = 0; x <= ex; x++) {
1815  y = (x * f) >> 16;
1816  fr = (x * f) & 0xFFFF;
1817  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1818  if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1819  }
1820  } else {
1821  if (sy > ey) {
1822  FFSWAP(int, sx, ex);
1823  FFSWAP(int, sy, ey);
1824  }
1825  buf += sx + sy * stride;
1826  ey -= sy;
1827  if (ey)
1828  f = ((ex - sx) << 16) / ey;
1829  else
1830  f = 0;
1831  for(y= 0; y <= ey; y++){
1832  x = (y*f) >> 16;
1833  fr = (y*f) & 0xFFFF;
1834  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1835  if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1836  }
1837  }
1838 }
1839 
1840 /**
1841  * Draw an arrow from (ex, ey) -> (sx, sy).
1842  * @param w width of the image
1843  * @param h height of the image
1844  * @param stride stride/linesize of the image
1845  * @param color color of the arrow
1846  */
1847 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1848  int ey, int w, int h, int stride, int color)
1849 {
1850  int dx,dy;
1851 
1852  sx = av_clip(sx, -100, w + 100);
1853  sy = av_clip(sy, -100, h + 100);
1854  ex = av_clip(ex, -100, w + 100);
1855  ey = av_clip(ey, -100, h + 100);
1856 
1857  dx = ex - sx;
1858  dy = ey - sy;
1859 
1860  if (dx * dx + dy * dy > 3 * 3) {
1861  int rx = dx + dy;
1862  int ry = -dx + dy;
1863  int length = ff_sqrt((rx * rx + ry * ry) << 8);
1864 
1865  // FIXME subpixel accuracy
1866  rx = ROUNDED_DIV(rx * 3 << 4, length);
1867  ry = ROUNDED_DIV(ry * 3 << 4, length);
1868 
1869  draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1870  draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1871  }
1872  draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1873 }
1874 
1875 /**
1876  * Print debugging info for the given picture.
1877  */
1878 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1879  int *low_delay,
1880  int mb_width, int mb_height, int mb_stride, int quarter_sample)
1881 {
1882  if (avctx->hwaccel || !p || !p->mb_type
1884  return;
1885 
1886 
1887  if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1888  int x,y;
1889 
1890  av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1892  for (y = 0; y < mb_height; y++) {
1893  for (x = 0; x < mb_width; x++) {
1894  if (avctx->debug & FF_DEBUG_SKIP) {
1895  int count = mbskip_table[x + y * mb_stride];
1896  if (count > 9)
1897  count = 9;
1898  av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1899  }
1900  if (avctx->debug & FF_DEBUG_QP) {
1901  av_log(avctx, AV_LOG_DEBUG, "%2d",
1902  p->qscale_table[x + y * mb_stride]);
1903  }
1904  if (avctx->debug & FF_DEBUG_MB_TYPE) {
1905  int mb_type = p->mb_type[x + y * mb_stride];
1906  // Type & MV direction
1907  if (IS_PCM(mb_type))
1908  av_log(avctx, AV_LOG_DEBUG, "P");
1909  else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1910  av_log(avctx, AV_LOG_DEBUG, "A");
1911  else if (IS_INTRA4x4(mb_type))
1912  av_log(avctx, AV_LOG_DEBUG, "i");
1913  else if (IS_INTRA16x16(mb_type))
1914  av_log(avctx, AV_LOG_DEBUG, "I");
1915  else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1916  av_log(avctx, AV_LOG_DEBUG, "d");
1917  else if (IS_DIRECT(mb_type))
1918  av_log(avctx, AV_LOG_DEBUG, "D");
1919  else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1920  av_log(avctx, AV_LOG_DEBUG, "g");
1921  else if (IS_GMC(mb_type))
1922  av_log(avctx, AV_LOG_DEBUG, "G");
1923  else if (IS_SKIP(mb_type))
1924  av_log(avctx, AV_LOG_DEBUG, "S");
1925  else if (!USES_LIST(mb_type, 1))
1926  av_log(avctx, AV_LOG_DEBUG, ">");
1927  else if (!USES_LIST(mb_type, 0))
1928  av_log(avctx, AV_LOG_DEBUG, "<");
1929  else {
1930  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1931  av_log(avctx, AV_LOG_DEBUG, "X");
1932  }
1933 
1934  // segmentation
1935  if (IS_8X8(mb_type))
1936  av_log(avctx, AV_LOG_DEBUG, "+");
1937  else if (IS_16X8(mb_type))
1938  av_log(avctx, AV_LOG_DEBUG, "-");
1939  else if (IS_8X16(mb_type))
1940  av_log(avctx, AV_LOG_DEBUG, "|");
1941  else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1942  av_log(avctx, AV_LOG_DEBUG, " ");
1943  else
1944  av_log(avctx, AV_LOG_DEBUG, "?");
1945 
1946 
1947  if (IS_INTERLACED(mb_type))
1948  av_log(avctx, AV_LOG_DEBUG, "=");
1949  else
1950  av_log(avctx, AV_LOG_DEBUG, " ");
1951  }
1952  }
1953  av_log(avctx, AV_LOG_DEBUG, "\n");
1954  }
1955  }
1956 
1957  if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1958  (avctx->debug_mv)) {
1959  const int shift = 1 + quarter_sample;
1960  int mb_y;
1961  uint8_t *ptr;
1962  int i;
1963  int h_chroma_shift, v_chroma_shift, block_height;
1964  const int width = avctx->width;
1965  const int height = avctx->height;
1966  const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1967  const int mv_stride = (mb_width << mv_sample_log2) +
1968  (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1969 
1970  *low_delay = 0; // needed to see the vectors without trashing the buffers
1971 
1972  avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1973 
1974  av_frame_make_writable(pict);
1975 
1976  pict->opaque = NULL;
1977  ptr = pict->data[0];
1978  block_height = 16 >> v_chroma_shift;
1979 
1980  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1981  int mb_x;
1982  for (mb_x = 0; mb_x < mb_width; mb_x++) {
1983  const int mb_index = mb_x + mb_y * mb_stride;
1984  if ((avctx->debug_mv) && p->motion_val[0]) {
1985  int type;
1986  for (type = 0; type < 3; type++) {
1987  int direction = 0;
1988  switch (type) {
1989  case 0:
1990  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1991  (pict->pict_type!= AV_PICTURE_TYPE_P))
1992  continue;
1993  direction = 0;
1994  break;
1995  case 1:
1996  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1997  (pict->pict_type!= AV_PICTURE_TYPE_B))
1998  continue;
1999  direction = 0;
2000  break;
2001  case 2:
2002  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2003  (pict->pict_type!= AV_PICTURE_TYPE_B))
2004  continue;
2005  direction = 1;
2006  break;
2007  }
2008  if (!USES_LIST(p->mb_type[mb_index], direction))
2009  continue;
2010 
2011  if (IS_8X8(p->mb_type[mb_index])) {
2012  int i;
2013  for (i = 0; i < 4; i++) {
2014  int sx = mb_x * 16 + 4 + 8 * (i & 1);
2015  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2016  int xy = (mb_x * 2 + (i & 1) +
2017  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2018  int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2019  int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2020  draw_arrow(ptr, sx, sy, mx, my, width,
2021  height, pict->linesize[0], 100);
2022  }
2023  } else if (IS_16X8(p->mb_type[mb_index])) {
2024  int i;
2025  for (i = 0; i < 2; i++) {
2026  int sx = mb_x * 16 + 8;
2027  int sy = mb_y * 16 + 4 + 8 * i;
2028  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2029  int mx = (p->motion_val[direction][xy][0] >> shift);
2030  int my = (p->motion_val[direction][xy][1] >> shift);
2031 
2032  if (IS_INTERLACED(p->mb_type[mb_index]))
2033  my *= 2;
2034 
2035  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2036  height, pict->linesize[0], 100);
2037  }
2038  } else if (IS_8X16(p->mb_type[mb_index])) {
2039  int i;
2040  for (i = 0; i < 2; i++) {
2041  int sx = mb_x * 16 + 4 + 8 * i;
2042  int sy = mb_y * 16 + 8;
2043  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2044  int mx = p->motion_val[direction][xy][0] >> shift;
2045  int my = p->motion_val[direction][xy][1] >> shift;
2046 
2047  if (IS_INTERLACED(p->mb_type[mb_index]))
2048  my *= 2;
2049 
2050  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2051  height, pict->linesize[0], 100);
2052  }
2053  } else {
2054  int sx= mb_x * 16 + 8;
2055  int sy= mb_y * 16 + 8;
2056  int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2057  int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2058  int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2059  draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2060  }
2061  }
2062  }
2063  if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2064  uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2065  0x0101010101010101ULL;
2066  int y;
2067  for (y = 0; y < block_height; y++) {
2068  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2069  (block_height * mb_y + y) *
2070  pict->linesize[1]) = c;
2071  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2072  (block_height * mb_y + y) *
2073  pict->linesize[2]) = c;
2074  }
2075  }
2076  if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2077  p->motion_val[0]) {
2078  int mb_type = p->mb_type[mb_index];
2079  uint64_t u,v;
2080  int y;
2081 #define COLOR(theta, r) \
2082  u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2083  v = (int)(128 + r * sin(theta * 3.141592 / 180));
2084 
2085 
2086  u = v = 128;
2087  if (IS_PCM(mb_type)) {
2088  COLOR(120, 48)
2089  } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2090  IS_INTRA16x16(mb_type)) {
2091  COLOR(30, 48)
2092  } else if (IS_INTRA4x4(mb_type)) {
2093  COLOR(90, 48)
2094  } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2095  // COLOR(120, 48)
2096  } else if (IS_DIRECT(mb_type)) {
2097  COLOR(150, 48)
2098  } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2099  COLOR(170, 48)
2100  } else if (IS_GMC(mb_type)) {
2101  COLOR(190, 48)
2102  } else if (IS_SKIP(mb_type)) {
2103  // COLOR(180, 48)
2104  } else if (!USES_LIST(mb_type, 1)) {
2105  COLOR(240, 48)
2106  } else if (!USES_LIST(mb_type, 0)) {
2107  COLOR(0, 48)
2108  } else {
2109  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2110  COLOR(300,48)
2111  }
2112 
2113  u *= 0x0101010101010101ULL;
2114  v *= 0x0101010101010101ULL;
2115  for (y = 0; y < block_height; y++) {
2116  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2117  (block_height * mb_y + y) * pict->linesize[1]) = u;
2118  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2119  (block_height * mb_y + y) * pict->linesize[2]) = v;
2120  }
2121 
2122  // segmentation
2123  if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2124  *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2125  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2126  *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2127  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2128  }
2129  if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2130  for (y = 0; y < 16; y++)
2131  pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2132  pict->linesize[0]] ^= 0x80;
2133  }
2134  if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2135  int dm = 1 << (mv_sample_log2 - 2);
2136  for (i = 0; i < 4; i++) {
2137  int sx = mb_x * 16 + 8 * (i & 1);
2138  int sy = mb_y * 16 + 8 * (i >> 1);
2139  int xy = (mb_x * 2 + (i & 1) +
2140  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2141  // FIXME bidir
2142  int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2143  if (mv[0] != mv[dm] ||
2144  mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2145  for (y = 0; y < 8; y++)
2146  pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2147  if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2148  *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2149  pict->linesize[0]) ^= 0x8080808080808080ULL;
2150  }
2151  }
2152 
2153  if (IS_INTERLACED(mb_type) &&
2154  avctx->codec->id == AV_CODEC_ID_H264) {
2155  // hmm
2156  }
2157  }
2158  mbskip_table[mb_index] = 0;
2159  }
2160  }
2161  }
2162 }
2163 
2165 {
2166  ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2167  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2168 }
2169 
2171 {
2173  int offset = 2*s->mb_stride + 1;
2174  if(!ref)
2175  return AVERROR(ENOMEM);
2176  av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2177  ref->size -= offset;
2178  ref->data += offset;
2179  return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2180 }
2181 
2183  uint8_t *dest, uint8_t *src,
2184  int field_based, int field_select,
2185  int src_x, int src_y,
2186  int width, int height, int stride,
2187  int h_edge_pos, int v_edge_pos,
2188  int w, int h, h264_chroma_mc_func *pix_op,
2189  int motion_x, int motion_y)
2190 {
2191  const int lowres = s->avctx->lowres;
2192  const int op_index = FFMIN(lowres, 2);
2193  const int s_mask = (2 << lowres) - 1;
2194  int emu = 0;
2195  int sx, sy;
2196 
2197  if (s->quarter_sample) {
2198  motion_x /= 2;
2199  motion_y /= 2;
2200  }
2201 
2202  sx = motion_x & s_mask;
2203  sy = motion_y & s_mask;
2204  src_x += motion_x >> lowres + 1;
2205  src_y += motion_y >> lowres + 1;
2206 
2207  src += src_y * stride + src_x;
2208 
2209  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2210  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2211  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
2212  (h + 1) << field_based, src_x,
2213  src_y << field_based,
2214  h_edge_pos,
2215  v_edge_pos);
2216  src = s->edge_emu_buffer;
2217  emu = 1;
2218  }
2219 
2220  sx = (sx << 2) >> lowres;
2221  sy = (sy << 2) >> lowres;
2222  if (field_select)
2223  src += s->linesize;
2224  pix_op[op_index](dest, src, stride, h, sx, sy);
2225  return emu;
2226 }
2227 
2228 /* apply one mpeg motion vector to the three components */
2230  uint8_t *dest_y,
2231  uint8_t *dest_cb,
2232  uint8_t *dest_cr,
2233  int field_based,
2234  int bottom_field,
2235  int field_select,
2236  uint8_t **ref_picture,
2237  h264_chroma_mc_func *pix_op,
2238  int motion_x, int motion_y,
2239  int h, int mb_y)
2240 {
2241  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2242  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
2243  uvsx, uvsy;
2244  const int lowres = s->avctx->lowres;
2245  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
2246  const int block_s = 8>>lowres;
2247  const int s_mask = (2 << lowres) - 1;
2248  const int h_edge_pos = s->h_edge_pos >> lowres;
2249  const int v_edge_pos = s->v_edge_pos >> lowres;
2250  linesize = s->current_picture.f.linesize[0] << field_based;
2251  uvlinesize = s->current_picture.f.linesize[1] << field_based;
2252 
2253  // FIXME obviously not perfect but qpel will not work in lowres anyway
2254  if (s->quarter_sample) {
2255  motion_x /= 2;
2256  motion_y /= 2;
2257  }
2258 
2259  if(field_based){
2260  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2261  }
2262 
2263  sx = motion_x & s_mask;
2264  sy = motion_y & s_mask;
2265  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2266  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2267 
2268  if (s->out_format == FMT_H263) {
2269  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2270  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2271  uvsrc_x = src_x >> 1;
2272  uvsrc_y = src_y >> 1;
2273  } else if (s->out_format == FMT_H261) {
2274  // even chroma mv's are full pel in H261
2275  mx = motion_x / 4;
2276  my = motion_y / 4;
2277  uvsx = (2 * mx) & s_mask;
2278  uvsy = (2 * my) & s_mask;
2279  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2280  uvsrc_y = mb_y * block_s + (my >> lowres);
2281  } else {
2282  if(s->chroma_y_shift){
2283  mx = motion_x / 2;
2284  my = motion_y / 2;
2285  uvsx = mx & s_mask;
2286  uvsy = my & s_mask;
2287  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2288  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2289  } else {
2290  if(s->chroma_x_shift){
2291  //Chroma422
2292  mx = motion_x / 2;
2293  uvsx = mx & s_mask;
2294  uvsy = motion_y & s_mask;
2295  uvsrc_y = src_y;
2296  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2297  } else {
2298  //Chroma444
2299  uvsx = motion_x & s_mask;
2300  uvsy = motion_y & s_mask;
2301  uvsrc_x = src_x;
2302  uvsrc_y = src_y;
2303  }
2304  }
2305  }
2306 
2307  ptr_y = ref_picture[0] + src_y * linesize + src_x;
2308  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2309  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2310 
2311  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
2312  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2313  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2314  linesize >> field_based, 17, 17 + field_based,
2315  src_x, src_y << field_based, h_edge_pos,
2316  v_edge_pos);
2317  ptr_y = s->edge_emu_buffer;
2318  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2319  uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2320  s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
2321  9 + field_based,
2322  uvsrc_x, uvsrc_y << field_based,
2323  h_edge_pos >> 1, v_edge_pos >> 1);
2324  s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
2325  9 + field_based,
2326  uvsrc_x, uvsrc_y << field_based,
2327  h_edge_pos >> 1, v_edge_pos >> 1);
2328  ptr_cb = uvbuf;
2329  ptr_cr = uvbuf + 16;
2330  }
2331  }
2332 
2333  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2334  if (bottom_field) {
2335  dest_y += s->linesize;
2336  dest_cb += s->uvlinesize;
2337  dest_cr += s->uvlinesize;
2338  }
2339 
2340  if (field_select) {
2341  ptr_y += s->linesize;
2342  ptr_cb += s->uvlinesize;
2343  ptr_cr += s->uvlinesize;
2344  }
2345 
2346  sx = (sx << 2) >> lowres;
2347  sy = (sy << 2) >> lowres;
2348  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2349 
2350  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2351  uvsx = (uvsx << 2) >> lowres;
2352  uvsy = (uvsy << 2) >> lowres;
2353  if (h >> s->chroma_y_shift) {
2354  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
2355  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
2356  }
2357  }
2358  // FIXME h261 lowres loop filter
2359 }
2360 
2362  uint8_t *dest_cb, uint8_t *dest_cr,
2363  uint8_t **ref_picture,
2364  h264_chroma_mc_func * pix_op,
2365  int mx, int my)
2366 {
2367  const int lowres = s->avctx->lowres;
2368  const int op_index = FFMIN(lowres, 2);
2369  const int block_s = 8 >> lowres;
2370  const int s_mask = (2 << lowres) - 1;
2371  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2372  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2373  int emu = 0, src_x, src_y, offset, sx, sy;
2374  uint8_t *ptr;
2375 
2376  if (s->quarter_sample) {
2377  mx /= 2;
2378  my /= 2;
2379  }
2380 
2381  /* In case of 8X8, we construct a single chroma motion vector
2382  with a special rounding */
2383  mx = ff_h263_round_chroma(mx);
2384  my = ff_h263_round_chroma(my);
2385 
2386  sx = mx & s_mask;
2387  sy = my & s_mask;
2388  src_x = s->mb_x * block_s + (mx >> lowres + 1);
2389  src_y = s->mb_y * block_s + (my >> lowres + 1);
2390 
2391  offset = src_y * s->uvlinesize + src_x;
2392  ptr = ref_picture[1] + offset;
2393  if (s->flags & CODEC_FLAG_EMU_EDGE) {
2394  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2395  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2397  9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2398  ptr = s->edge_emu_buffer;
2399  emu = 1;
2400  }
2401  }
2402  sx = (sx << 2) >> lowres;
2403  sy = (sy << 2) >> lowres;
2404  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2405 
2406  ptr = ref_picture[2] + offset;
2407  if (emu) {
2408  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2409  src_x, src_y, h_edge_pos, v_edge_pos);
2410  ptr = s->edge_emu_buffer;
2411  }
2412  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2413 }
2414 
2415 /**
2416  * motion compensation of a single macroblock
2417  * @param s context
2418  * @param dest_y luma destination pointer
2419  * @param dest_cb chroma cb/u destination pointer
2420  * @param dest_cr chroma cr/v destination pointer
2421  * @param dir direction (0->forward, 1->backward)
2422  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2423  * @param pix_op halfpel motion compensation function (average or put normally)
2424  * the motion vectors are taken from s->mv and the MV type from s->mv_type
2425  */
2426 static inline void MPV_motion_lowres(MpegEncContext *s,
2427  uint8_t *dest_y, uint8_t *dest_cb,
2428  uint8_t *dest_cr,
2429  int dir, uint8_t **ref_picture,
2430  h264_chroma_mc_func *pix_op)
2431 {
2432  int mx, my;
2433  int mb_x, mb_y, i;
2434  const int lowres = s->avctx->lowres;
2435  const int block_s = 8 >>lowres;
2436 
2437  mb_x = s->mb_x;
2438  mb_y = s->mb_y;
2439 
2440  switch (s->mv_type) {
2441  case MV_TYPE_16X16:
2442  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2443  0, 0, 0,
2444  ref_picture, pix_op,
2445  s->mv[dir][0][0], s->mv[dir][0][1],
2446  2 * block_s, mb_y);
2447  break;
2448  case MV_TYPE_8X8:
2449  mx = 0;
2450  my = 0;
2451  for (i = 0; i < 4; i++) {
2452  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2453  s->linesize) * block_s,
2454  ref_picture[0], 0, 0,
2455  (2 * mb_x + (i & 1)) * block_s,
2456  (2 * mb_y + (i >> 1)) * block_s,
2457  s->width, s->height, s->linesize,
2458  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2459  block_s, block_s, pix_op,
2460  s->mv[dir][i][0], s->mv[dir][i][1]);
2461 
2462  mx += s->mv[dir][i][0];
2463  my += s->mv[dir][i][1];
2464  }
2465 
2466  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2467  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2468  pix_op, mx, my);
2469  break;
2470  case MV_TYPE_FIELD:
2471  if (s->picture_structure == PICT_FRAME) {
2472  /* top field */
2473  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2474  1, 0, s->field_select[dir][0],
2475  ref_picture, pix_op,
2476  s->mv[dir][0][0], s->mv[dir][0][1],
2477  block_s, mb_y);
2478  /* bottom field */
2479  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2480  1, 1, s->field_select[dir][1],
2481  ref_picture, pix_op,
2482  s->mv[dir][1][0], s->mv[dir][1][1],
2483  block_s, mb_y);
2484  } else {
2485  if (s->picture_structure != s->field_select[dir][0] + 1 &&
2486  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2487  ref_picture = s->current_picture_ptr->f.data;
2488 
2489  }
2490  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2491  0, 0, s->field_select[dir][0],
2492  ref_picture, pix_op,
2493  s->mv[dir][0][0],
2494  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2495  }
2496  break;
2497  case MV_TYPE_16X8:
2498  for (i = 0; i < 2; i++) {
2499  uint8_t **ref2picture;
2500 
2501  if (s->picture_structure == s->field_select[dir][i] + 1 ||
2502  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2503  ref2picture = ref_picture;
2504  } else {
2505  ref2picture = s->current_picture_ptr->f.data;
2506  }
2507 
2508  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2509  0, 0, s->field_select[dir][i],
2510  ref2picture, pix_op,
2511  s->mv[dir][i][0], s->mv[dir][i][1] +
2512  2 * block_s * i, block_s, mb_y >> 1);
2513 
2514  dest_y += 2 * block_s * s->linesize;
2515  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2516  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2517  }
2518  break;
2519  case MV_TYPE_DMV:
2520  if (s->picture_structure == PICT_FRAME) {
2521  for (i = 0; i < 2; i++) {
2522  int j;
2523  for (j = 0; j < 2; j++) {
2524  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2525  1, j, j ^ i,
2526  ref_picture, pix_op,
2527  s->mv[dir][2 * i + j][0],
2528  s->mv[dir][2 * i + j][1],
2529  block_s, mb_y);
2530  }
2532  }
2533  } else {
2534  for (i = 0; i < 2; i++) {
2535  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2536  0, 0, s->picture_structure != i + 1,
2537  ref_picture, pix_op,
2538  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2539  2 * block_s, mb_y >> 1);
2540 
2541  // after put we make avg of the same block
2543 
2544  // opposite parity is always in the same
2545  // frame if this is second field
2546  if (!s->first_field) {
2547  ref_picture = s->current_picture_ptr->f.data;
2548  }
2549  }
2550  }
2551  break;
2552  default:
2553  av_assert2(0);
2554  }
2555 }
2556 
2557 /**
2558  * find the lowest MB row referenced in the MVs
2559  */
2561 {
2562  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2563  int my, off, i, mvs;
2564 
2565  if (s->picture_structure != PICT_FRAME || s->mcsel)
2566  goto unhandled;
2567 
2568  switch (s->mv_type) {
2569  case MV_TYPE_16X16:
2570  mvs = 1;
2571  break;
2572  case MV_TYPE_16X8:
2573  mvs = 2;
2574  break;
2575  case MV_TYPE_8X8:
2576  mvs = 4;
2577  break;
2578  default:
2579  goto unhandled;
2580  }
2581 
2582  for (i = 0; i < mvs; i++) {
2583  my = s->mv[dir][i][1]<<qpel_shift;
2584  my_max = FFMAX(my_max, my);
2585  my_min = FFMIN(my_min, my);
2586  }
2587 
2588  off = (FFMAX(-my_min, my_max) + 63) >> 6;
2589 
2590  return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2591 unhandled:
2592  return s->mb_height-1;
2593 }
2594 
2595 /* put block[] to dest[] */
2596 static inline void put_dct(MpegEncContext *s,
2597  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2598 {
2599  s->dct_unquantize_intra(s, block, i, qscale);
2600  s->dsp.idct_put (dest, line_size, block);
2601 }
2602 
2603 /* add block[] to dest[] */
2604 static inline void add_dct(MpegEncContext *s,
2605  int16_t *block, int i, uint8_t *dest, int line_size)
2606 {
2607  if (s->block_last_index[i] >= 0) {
2608  s->dsp.idct_add (dest, line_size, block);
2609  }
2610 }
2611 
2612 static inline void add_dequant_dct(MpegEncContext *s,
2613  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2614 {
2615  if (s->block_last_index[i] >= 0) {
2616  s->dct_unquantize_inter(s, block, i, qscale);
2617 
2618  s->dsp.idct_add (dest, line_size, block);
2619  }
2620 }
2621 
2622 /**
2623  * Clean dc, ac, coded_block for the current non-intra MB.
2624  */
2626 {
2627  int wrap = s->b8_stride;
2628  int xy = s->block_index[0];
2629 
2630  s->dc_val[0][xy ] =
2631  s->dc_val[0][xy + 1 ] =
2632  s->dc_val[0][xy + wrap] =
2633  s->dc_val[0][xy + 1 + wrap] = 1024;
2634  /* ac pred */
2635  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2636  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2637  if (s->msmpeg4_version>=3) {
2638  s->coded_block[xy ] =
2639  s->coded_block[xy + 1 ] =
2640  s->coded_block[xy + wrap] =
2641  s->coded_block[xy + 1 + wrap] = 0;
2642  }
2643  /* chroma */
2644  wrap = s->mb_stride;
2645  xy = s->mb_x + s->mb_y * wrap;
2646  s->dc_val[1][xy] =
2647  s->dc_val[2][xy] = 1024;
2648  /* ac pred */
2649  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2650  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2651 
2652  s->mbintra_table[xy]= 0;
2653 }
2654 
2655 /* generic function called after a macroblock has been parsed by the
2656  decoder or after it has been encoded by the encoder.
2657 
2658  Important variables used:
2659  s->mb_intra : true if intra macroblock
2660  s->mv_dir : motion vector direction
2661  s->mv_type : motion vector type
2662  s->mv : motion vector
2663  s->interlaced_dct : true if interlaced dct used (mpeg2)
2664  */
2665 static av_always_inline
2666 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2667  int lowres_flag, int is_mpeg12)
2668 {
2669  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2671  ff_xvmc_decode_mb(s);//xvmc uses pblocks
2672  return;
2673  }
2674 
2675  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2676  /* print DCT coefficients */
2677  int i,j;
2678  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2679  for(i=0; i<6; i++){
2680  for(j=0; j<64; j++){
2681  av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2682  }
2683  av_log(s->avctx, AV_LOG_DEBUG, "\n");
2684  }
2685  }
2686 
2687  s->current_picture.qscale_table[mb_xy] = s->qscale;
2688 
2689  /* update DC predictors for P macroblocks */
2690  if (!s->mb_intra) {
2691  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2692  if(s->mbintra_table[mb_xy])
2694  } else {
2695  s->last_dc[0] =
2696  s->last_dc[1] =
2697  s->last_dc[2] = 128 << s->intra_dc_precision;
2698  }
2699  }
2700  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2701  s->mbintra_table[mb_xy]=1;
2702 
2703  if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2704  uint8_t *dest_y, *dest_cb, *dest_cr;
2705  int dct_linesize, dct_offset;
2706  op_pixels_func (*op_pix)[4];
2707  qpel_mc_func (*op_qpix)[16];
2708  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2709  const int uvlinesize = s->current_picture.f.linesize[1];
2710  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2711  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2712 
2713  /* avoid copy if macroblock skipped in last frame too */
2714  /* skip only during decoding as we might trash the buffers during encoding a bit */
2715  if(!s->encoding){
2716  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2717 
2718  if (s->mb_skipped) {
2719  s->mb_skipped= 0;
2721  *mbskip_ptr = 1;
2722  } else if(!s->current_picture.reference) {
2723  *mbskip_ptr = 1;
2724  } else{
2725  *mbskip_ptr = 0; /* not skipped */
2726  }
2727  }
2728 
2729  dct_linesize = linesize << s->interlaced_dct;
2730  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2731 
2732  if(readable){
2733  dest_y= s->dest[0];
2734  dest_cb= s->dest[1];
2735  dest_cr= s->dest[2];
2736  }else{
2737  dest_y = s->b_scratchpad;
2738  dest_cb= s->b_scratchpad+16*linesize;
2739  dest_cr= s->b_scratchpad+32*linesize;
2740  }
2741 
2742  if (!s->mb_intra) {
2743  /* motion handling */
2744  /* decoding or more than one mb_type (MC was already done otherwise) */
2745  if(!s->encoding){
2746 
2748  if (s->mv_dir & MV_DIR_FORWARD) {
2751  0);
2752  }
2753  if (s->mv_dir & MV_DIR_BACKWARD) {
2756  0);
2757  }
2758  }
2759 
2760  if(lowres_flag){
2762 
2763  if (s->mv_dir & MV_DIR_FORWARD) {
2764  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2766  }
2767  if (s->mv_dir & MV_DIR_BACKWARD) {
2768  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2769  }
2770  }else{
2771  op_qpix= s->me.qpel_put;
2772  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2773  op_pix = s->hdsp.put_pixels_tab;
2774  }else{
2775  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2776  }
2777  if (s->mv_dir & MV_DIR_FORWARD) {
2778  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2779  op_pix = s->hdsp.avg_pixels_tab;
2780  op_qpix= s->me.qpel_avg;
2781  }
2782  if (s->mv_dir & MV_DIR_BACKWARD) {
2783  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2784  }
2785  }
2786  }
2787 
2788  /* skip dequant / idct if we are really late ;) */
2789  if(s->avctx->skip_idct){
2792  || s->avctx->skip_idct >= AVDISCARD_ALL)
2793  goto skip_idct;
2794  }
2795 
2796  /* add dct residue */
2798  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2799  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2800  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2801  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2802  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2803 
2804  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2805  if (s->chroma_y_shift){
2806  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2807  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2808  }else{
2809  dct_linesize >>= 1;
2810  dct_offset >>=1;
2811  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2812  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2813  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2814  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2815  }
2816  }
2817  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2818  add_dct(s, block[0], 0, dest_y , dct_linesize);
2819  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2820  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2821  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2822 
2823  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2824  if(s->chroma_y_shift){//Chroma420
2825  add_dct(s, block[4], 4, dest_cb, uvlinesize);
2826  add_dct(s, block[5], 5, dest_cr, uvlinesize);
2827  }else{
2828  //chroma422
2829  dct_linesize = uvlinesize << s->interlaced_dct;
2830  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2831 
2832  add_dct(s, block[4], 4, dest_cb, dct_linesize);
2833  add_dct(s, block[5], 5, dest_cr, dct_linesize);
2834  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2835  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2836  if(!s->chroma_x_shift){//Chroma444
2837  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2838  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2839  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2840  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2841  }
2842  }
2843  }//fi gray
2844  }
2846  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2847  }
2848  } else {
2849  /* dct only in intra block */
2851  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2852  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2853  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2854  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2855 
2856  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2857  if(s->chroma_y_shift){
2858  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2859  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2860  }else{
2861  dct_offset >>=1;
2862  dct_linesize >>=1;
2863  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2864  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2865  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2866  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2867  }
2868  }
2869  }else{
2870  s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2871  s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2872  s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2873  s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2874 
2875  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2876  if(s->chroma_y_shift){
2877  s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2878  s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2879  }else{
2880 
2881  dct_linesize = uvlinesize << s->interlaced_dct;
2882  dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2883 
2884  s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2885  s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2886  s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2887  s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2888  if(!s->chroma_x_shift){//Chroma444
2889  s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2890  s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2891  s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2892  s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2893  }
2894  }
2895  }//gray
2896  }
2897  }
2898 skip_idct:
2899  if(!readable){
2900  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2901  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2902  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2903  }
2904  }
2905 }
2906 
2907 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2908 #if !CONFIG_SMALL
2909  if(s->out_format == FMT_MPEG1) {
2910  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2911  else MPV_decode_mb_internal(s, block, 0, 1);
2912  } else
2913 #endif
2914  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2915  else MPV_decode_mb_internal(s, block, 0, 0);
2916 }
2917 
2918 /**
2919  * @param h is the normal height, this will be reduced automatically if needed for the last row
2920  */
2922  Picture *last, int y, int h, int picture_structure,
2923  int first_field, int draw_edges, int low_delay,
2924  int v_edge_pos, int h_edge_pos)
2925 {
2926  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2927  int hshift = desc->log2_chroma_w;
2928  int vshift = desc->log2_chroma_h;
2929  const int field_pic = picture_structure != PICT_FRAME;
2930  if(field_pic){
2931  h <<= 1;
2932  y <<= 1;
2933  }
2934 
2935  if (!avctx->hwaccel &&
2937  draw_edges &&
2938  cur->reference &&
2939  !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2940  int *linesize = cur->f.linesize;
2941  int sides = 0, edge_h;
2942  if (y==0) sides |= EDGE_TOP;
2943  if (y + h >= v_edge_pos)
2944  sides |= EDGE_BOTTOM;
2945 
2946  edge_h= FFMIN(h, v_edge_pos - y);
2947 
2948  dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2949  linesize[0], h_edge_pos, edge_h,
2950  EDGE_WIDTH, EDGE_WIDTH, sides);
2951  dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2952  linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2953  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2954  dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2955  linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2956  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2957  }
2958 
2959  h = FFMIN(h, avctx->height - y);
2960 
2961  if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2962 
2963  if (avctx->draw_horiz_band) {
2964  AVFrame *src;
2966  int i;
2967 
2968  if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2970  src = &cur->f;
2971  else if (last)
2972  src = &last->f;
2973  else
2974  return;
2975 
2976  if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2977  picture_structure == PICT_FRAME &&
2978  avctx->codec_id != AV_CODEC_ID_SVQ3) {
2979  for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2980  offset[i] = 0;
2981  }else{
2982  offset[0]= y * src->linesize[0];
2983  offset[1]=
2984  offset[2]= (y >> vshift) * src->linesize[1];
2985  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2986  offset[i] = 0;
2987  }
2988 
2989  emms_c();
2990 
2991  avctx->draw_horiz_band(avctx, src, offset,
2992  y, picture_structure, h);
2993  }
2994 }
2995 
2997 {
2998  int draw_edges = s->unrestricted_mv && !s->intra_only;
3000  &s->last_picture, y, h, s->picture_structure,
3001  s->first_field, draw_edges, s->low_delay,
3002  s->v_edge_pos, s->h_edge_pos);
3003 }
3004 
3005 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3006  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
3007  const int uvlinesize = s->current_picture.f.linesize[1];
3008  const int mb_size= 4 - s->avctx->lowres;
3009 
3010  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3011  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3012  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3013  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3014  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3015  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3016  //block_index is not used by mpeg2, so it is not affected by chroma_format
3017 
3018  s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3019  s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3020  s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3021 
3023  {
3024  if(s->picture_structure==PICT_FRAME){
3025  s->dest[0] += s->mb_y * linesize << mb_size;
3026  s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3027  s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3028  }else{
3029  s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3030  s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3031  s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3033  }
3034  }
3035 }
3036 
3037 /**
3038  * Permute an 8x8 block.
3039  * @param block the block which will be permuted according to the given permutation vector
3040  * @param permutation the permutation vector
3041  * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3042  * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3043  * (inverse) permutated to scantable order!
3044  */
3045 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3046 {
3047  int i;
3048  int16_t temp[64];
3049 
3050  if(last<=0) return;
3051  //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3052 
3053  for(i=0; i<=last; i++){
3054  const int j= scantable[i];
3055  temp[j]= block[j];
3056  block[j]=0;
3057  }
3058 
3059  for(i=0; i<=last; i++){
3060  const int j= scantable[i];
3061  const int perm_j= permutation[j];
3062  block[perm_j]= temp[j];
3063  }
3064 }
3065 
3067  int i;
3068  MpegEncContext *s = avctx->priv_data;
3069 
3070  if(s==NULL || s->picture==NULL)
3071  return;
3072 
3073  for (i = 0; i < MAX_PICTURE_COUNT; i++)
3074  ff_mpeg_unref_picture(s, &s->picture[i]);
3076 
3077  s->mb_x= s->mb_y= 0;
3078  s->closed_gop= 0;
3079 
3080  s->parse_context.state= -1;
3082  s->parse_context.overread= 0;
3084  s->parse_context.index= 0;
3085  s->parse_context.last_index= 0;
3086  s->bitstream_buffer_size=0;
3087  s->pp_time=0;
3088 }
3089 
3091  int16_t *block, int n, int qscale)
3092 {
3093  int i, level, nCoeffs;
3094  const uint16_t *quant_matrix;
3095 
3096  nCoeffs= s->block_last_index[n];
3097 
3098  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3099  /* XXX: only mpeg1 */
3100  quant_matrix = s->intra_matrix;
3101  for(i=1;i<=nCoeffs;i++) {
3102  int j= s->intra_scantable.permutated[i];
3103  level = block[j];
3104  if (level) {
3105  if (level < 0) {
3106  level = -level;
3107  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3108  level = (level - 1) | 1;
3109  level = -level;
3110  } else {
3111  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3112  level = (level - 1) | 1;
3113  }
3114  block[j] = level;
3115  }
3116  }
3117 }
3118 
3120  int16_t *block, int n, int qscale)
3121 {
3122  int i, level, nCoeffs;
3123  const uint16_t *quant_matrix;
3124 
3125  nCoeffs= s->block_last_index[n];
3126 
3127  quant_matrix = s->inter_matrix;
3128  for(i=0; i<=nCoeffs; i++) {
3129  int j= s->intra_scantable.permutated[i];
3130  level = block[j];
3131  if (level) {
3132  if (level < 0) {
3133  level = -level;
3134  level = (((level << 1) + 1) * qscale *
3135  ((int) (quant_matrix[j]))) >> 4;
3136  level = (level - 1) | 1;
3137  level = -level;
3138  } else {
3139  level = (((level << 1) + 1) * qscale *
3140  ((int) (quant_matrix[j]))) >> 4;
3141  level = (level - 1) | 1;
3142  }
3143  block[j] = level;
3144  }
3145  }
3146 }
3147 
3149  int16_t *block, int n, int qscale)
3150 {
3151  int i, level, nCoeffs;
3152  const uint16_t *quant_matrix;
3153 
3154  if(s->alternate_scan) nCoeffs= 63;
3155  else nCoeffs= s->block_last_index[n];
3156 
3157  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3158  quant_matrix = s->intra_matrix;
3159  for(i=1;i<=nCoeffs;i++) {
3160  int j= s->intra_scantable.permutated[i];
3161  level = block[j];
3162  if (level) {
3163  if (level < 0) {
3164  level = -level;
3165  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3166  level = -level;
3167  } else {
3168  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3169  }
3170  block[j] = level;
3171  }
3172  }
3173 }
3174 
3176  int16_t *block, int n, int qscale)
3177 {
3178  int i, level, nCoeffs;
3179  const uint16_t *quant_matrix;
3180  int sum=-1;
3181 
3182  if(s->alternate_scan) nCoeffs= 63;
3183  else nCoeffs= s->block_last_index[n];
3184 
3185  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3186  sum += block[0];
3187  quant_matrix = s->intra_matrix;
3188  for(i=1;i<=nCoeffs;i++) {
3189  int j= s->intra_scantable.permutated[i];
3190  level = block[j];
3191  if (level) {
3192  if (level < 0) {
3193  level = -level;
3194  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3195  level = -level;
3196  } else {
3197  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3198  }
3199  block[j] = level;
3200  sum+=level;
3201  }
3202  }
3203  block[63]^=sum&1;
3204 }
3205 
3207  int16_t *block, int n, int qscale)
3208 {
3209  int i, level, nCoeffs;
3210  const uint16_t *quant_matrix;
3211  int sum=-1;
3212 
3213  if(s->alternate_scan) nCoeffs= 63;
3214  else nCoeffs= s->block_last_index[n];
3215 
3216  quant_matrix = s->inter_matrix;
3217  for(i=0; i<=nCoeffs; i++) {
3218  int j= s->intra_scantable.permutated[i];
3219  level = block[j];
3220  if (level) {
3221  if (level < 0) {
3222  level = -level;
3223  level = (((level << 1) + 1) * qscale *
3224  ((int) (quant_matrix[j]))) >> 4;
3225  level = -level;
3226  } else {
3227  level = (((level << 1) + 1) * qscale *
3228  ((int) (quant_matrix[j]))) >> 4;
3229  }
3230  block[j] = level;
3231  sum+=level;
3232  }
3233  }
3234  block[63]^=sum&1;
3235 }
3236 
3238  int16_t *block, int n, int qscale)
3239 {
3240  int i, level, qmul, qadd;
3241  int nCoeffs;
3242 
3243  av_assert2(s->block_last_index[n]>=0);
3244 
3245  qmul = qscale << 1;
3246 
3247  if (!s->h263_aic) {
3248  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3249  qadd = (qscale - 1) | 1;
3250  }else{
3251  qadd = 0;
3252  }
3253  if(s->ac_pred)
3254  nCoeffs=63;
3255  else
3256  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3257 
3258  for(i=1; i<=nCoeffs; i++) {
3259  level = block[i];
3260  if (level) {
3261  if (level < 0) {
3262  level = level * qmul - qadd;
3263  } else {
3264  level = level * qmul + qadd;
3265  }
3266  block[i] = level;
3267  }
3268  }
3269 }
3270 
3272  int16_t *block, int n, int qscale)
3273 {
3274  int i, level, qmul, qadd;
3275  int nCoeffs;
3276 
3277  av_assert2(s->block_last_index[n]>=0);
3278 
3279  qadd = (qscale - 1) | 1;
3280  qmul = qscale << 1;
3281 
3282  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3283 
3284  for(i=0; i<=nCoeffs; i++) {
3285  level = block[i];
3286  if (level) {
3287  if (level < 0) {
3288  level = level * qmul - qadd;
3289  } else {
3290  level = level * qmul + qadd;
3291  }
3292  block[i] = level;
3293  }
3294  }
3295 }
3296 
3297 /**
3298  * set qscale and update qscale dependent variables.
3299  */
3300 void ff_set_qscale(MpegEncContext * s, int qscale)
3301 {
3302  if (qscale < 1)
3303  qscale = 1;
3304  else if (qscale > 31)
3305  qscale = 31;
3306 
3307  s->qscale = qscale;
3308  s->chroma_qscale= s->chroma_qscale_table[qscale];
3309 
3310  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3312 }
3313 
3315 {
3318 }
3319 
3320 #if CONFIG_ERROR_RESILIENCE
3322 {
3323  ERContext *er = &s->er;
3324 
3325  er->cur_pic = s->current_picture_ptr;
3326  er->last_pic = s->last_picture_ptr;
3327  er->next_pic = s->next_picture_ptr;
3328 
3329  er->pp_time = s->pp_time;
3330  er->pb_time = s->pb_time;
3331  er->quarter_sample = s->quarter_sample;
3333 
3334  ff_er_frame_start(er);
3335 }
3336 #endif /* CONFIG_ERROR_RESILIENCE */
int bitstream_buffer_size
Definition: mpegvideo.h:611
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free
Definition: mpegvideo.h:190
#define PICT_BOTTOM_FIELD
Definition: mpegvideo.h:663
int last
number of values for last = 0
Definition: rl.h:40
#define FF_DEBUG_DCT_COEFF
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
Definition: mpegvideo.c:505
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:543
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread.c:959
const struct AVCodec * codec
int16_t(* b_bidir_back_mv_table_base)[2]
Definition: mpegvideo.h:401
int table_size
Definition: get_bits.h:66
#define PICT_TOP_FIELD
Definition: mpegvideo.h:662
discard all frames except keyframes
int8_t * ref_index[2]
Definition: mpegvideo.h:114
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:3005
unsigned int stream_codec_tag
fourcc from the AVI stream header (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;)...
float v
av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
Definition: dsputil.c:2675
int picture_number
Definition: mpegvideo.h:275
const char * s
Definition: avisynth_c.h:668
#define MAX_PICTURE_COUNT
Definition: mpegvideo.h:66
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define CONFIG_MPEG_XVMC_DECODER
Definition: config.h:527
#define AV_NUM_DATA_POINTERS
Definition: frame.h:77
ScanTable intra_v_scantable
Definition: mpegvideo.h:298
AVBufferRef * mb_var_buf
Definition: mpegvideo.h:116
static int shift(int a, int b)
Definition: sonic.c:86
#define CONFIG_WMV2_ENCODER
Definition: config.h:1082
#define FF_DEBUG_MV
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
Definition: mpegvideo.c:1396
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1778
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
int time_increment_bits
number of bits to represent the fractional part of time
Definition: mpegvideo.h:555
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
#define IS_SKIP(a)
Definition: mpegvideo.h:140
FIXME Range Coding of cr are ref
Definition: snow.txt:367
int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
Allocate a Picture.
Definition: mpegvideo.c:384
int16_t(* p_mv_table)[2]
MV table (1MV per MB) p-frame encoding.
Definition: mpegvideo.h:405
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
Definition: mpegvideo.h:365
#define FF_DEBUG_VIS_QP
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:316
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:424
#define MAKE_WRITABLE(table)
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:351
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegvideo.h:126
int coded_width
Bitstream width / height, may be different from width/height e.g.
av_cold int ff_dct_common_init(MpegEncContext *s)
Definition: mpegvideo.c:160
av_cold int ff_MPV_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:993
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:718
misc image utilities
void ff_MPV_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo.c:3314
AVFrame * f
Definition: thread.h:36
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo.c:2361
uint8_t * coded_block_base
Definition: mpegvideo.h:354
else temp
Definition: vf_mcdeint.c:148
static int update_picture_tables(Picture *dst, Picture *src)
Definition: mpegvideo.c:460
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:343
#define EDGE_TOP
Definition: dsputil.h:265
AVFrame * coded_frame
the picture in the bitstream
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:317
uint16_t * mb_var
Table for MB variances.
Definition: mpegvideo.h:117
uint16_t(* q_chroma_intra_matrix16)[2][64]
Definition: mpegvideo.h:497
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block.
Definition: mpegvideo.c:3045
void ff_MPV_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
int16_t(*[3] ac_val)[16]
used for for mpeg4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:357
MJPEG encoder.
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:154
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:281
void * opaque
for some private data of the user
Definition: frame.h:249
#define me
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
Definition: mpegvideo.h:639
static const uint8_t mpeg2_dc_scale_table3[128]
Definition: mpegvideo.c:108
void ff_xvmc_field_end(MpegEncContext *s)
Complete frame/field rendering by passing any remaining blocks.
int needs_realloc
Picture needs to be reallocated (eg due to a frame size change)
Definition: mpegvideo.h:176
uint8_t * bitstream_buffer
Definition: mpegvideo.h:610
enum AVCodecID codec_id
Definition: mpegvideo.h:257
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
void ff_init_rl(RLTable *rl, uint8_t static_store[2][2 *MAX_RUN+MAX_LEVEL+3])
Definition: mpegvideo.c:1304
Sinusoidal phase f
int field_picture
whether or not the picture was encoded in separate fields
Definition: mpegvideo.h:169
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced p-frame encoding.
Definition: mpegvideo.h:411
int16_t(* p_mv_table_base)[2]
Definition: mpegvideo.h:397
static int make_tables_writable(Picture *pic)
Definition: mpegvideo.c:355
uint8_t raster_end[64]
Definition: dsputil.h:117
#define wrap(func)
Definition: w64xmmtest.h:70
uint32_t * score_map
map to store the scores
Definition: mpegvideo.h:196
mpegvideo header.
#define FF_ARRAY_ELEMS(a)
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
#define FF_DEBUG_VIS_MV_B_BACK
uint8_t permutated[64]
Definition: dsputil.h:116
#define IS_INTRA4x4(a)
Definition: mpegvideo.h:135
const int8_t * table_level
Definition: rl.h:43
uint8_t run
Definition: svq3.c:136
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:588
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define FF_DEBUG_VIS_MB_TYPE
int padding_bug_score
used to detect the VERY common padding bug in MPEG4
Definition: mpegvideo.h:601
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:282
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
int stride
Definition: mace.c:144
int frame_start_found
Definition: parser.h:34
int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
Find and store the surfaces that are used as reference frames.
int qscale
QP.
Definition: mpegvideo.h:369
RLTable.
Definition: rl.h:38
int h263_aic
Advanded INTRA Coding (AIC)
Definition: mpegvideo.h:292
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode b-frame encoding.
Definition: mpegvideo.h:407
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:66
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, &#39;draw_horiz_band&#39; is called by the libavcodec decoder to draw a horizontal band...
int chroma_x_shift
Definition: mpegvideo.h:679
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:259
output residual component w
int field_select[2][2]
Definition: mpegvideo.h:432
av_cold void ff_MPV_common_init_axp(MpegEncContext *s)
int block_wrap[6]
Definition: mpegvideo.h:466
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:3119
#define FFALIGN(x, a)
Definition: common.h:63
int16_t(* b_back_mv_table_base)[2]
Definition: mpegvideo.h:399
uint16_t pp_time
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:608
#define USES_LIST(a, list)
does this mb use listX, note does not work if subMBs
Definition: mpegvideo.h:156
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:2625
#define COLOR(theta, r)
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
#define FF_DEBUG_QP
initialize output if(nPeaks >3)%at least 3 peaks in spectrum for trying to find f0 nf0peaks
int b_frame_score
Definition: mpegvideo.h:175
int alloc_mb_width
mb_width used to allocate tables
Definition: mpegvideo.h:122
#define CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
struct AVHWAccel * hwaccel
Hardware accelerator in use.
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideo.h:276
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2996
int8_t * max_run[2]
encoding & decoding
Definition: rl.h:46
void ff_MPV_common_init_altivec(MpegEncContext *s)
int context_reinit
Definition: mpegvideo.h:740
const uint8_t ff_alternate_vertical_scan[64]
Definition: dsputil.c:85
static av_always_inline void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo.c:2666
int16_t * dc_val_base
Definition: mpegvideo.h:349
int ff_MPV_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo.c:1171
uint8_t
#define av_cold
Definition: attributes.h:78
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:63
#define IS_8X16(a)
Definition: mpegvideo.h:147
Picture ** input_picture
next pictures on display order for encoding
Definition: mpegvideo.h:286
#define PICT_FRAME
Definition: mpegvideo.h:664
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an so the codec calls ff_thread_report set AVCodecInternal allocate_progress The frames must then be freed with as it s useful too and the implementation is trivial when you re doing this Note that draw_edges() needs to be called before reporting progress.Before accessing a reference frame or its MVs
enum OutputFormat out_format
output format
Definition: mpegvideo.h:249
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:77
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:503
uint8_t * pred_dir_table
used to store pred_dir for partitioned decoding
Definition: mpegvideo.h:363
end end
#define CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
qpel_mc_func(* qpel_put)[16]
Definition: mpegvideo.h:228
#define emms_c()
#define IS_GMC(a)
Definition: mpegvideo.h:144
int no_rounding
apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is always 0...
Definition: mpegvideo.h:439
int interlaced_dct
Definition: mpegvideo.h:684
int(* q_chroma_intra_matrix)[64]
Definition: mpegvideo.h:493
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:343
int intra_dc_precision
Definition: mpegvideo.h:666
static int pic_is_unused(MpegEncContext *s, Picture *pic)
Definition: mpegvideo.c:1409
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:142
static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
Definition: mpegvideo.c:313
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
Definition: mpegvideo.h:408
float * cplx_tab
Definition: mpegvideo.h:736
int8_t * max_level[2]
encoding & decoding
Definition: rl.h:45
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:560
uint8_t idct_permutation[64]
idct input permutation.
Definition: dsputil.h:249
AVBufferRef * mb_type_buf
Definition: mpegvideo.h:107
uint8_t * b_scratchpad
scratchpad used for writing into write only buffers
Definition: mpegvideo.h:367
static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: mpegvideo.c:132
int flags2
AVCodecContext.flags2.
Definition: mpegvideo.h:261
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, int stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo.c:2182
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:270
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:277
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
enum AVPixelFormat ff_pixfmt_list_420[]
Definition: mpegvideo.c:127
void ff_MPV_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1717
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
Definition: mpegvideo.h:267
char * stats_out
pass1 encoding statistics output buffer
int16_t(*[2][2] p_field_mv_table_base)[2]
Definition: mpegvideo.h:403
static int free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution.
Definition: mpegvideo.c:1120
static void update_noise_reduction(MpegEncContext *s)
Definition: mpegvideo.c:1468
#define MAX_LEVEL
Definition: rl.h:35
#define IS_INTERLACED(a)
Definition: mpegvideo.h:142
void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:38
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:861
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:3300
void(* qpel_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
Definition: dsputil.h:84
#define ROUNDED_DIV(a, b)
Definition: common.h:50
AVBufferRef * mb_mean_buf
Definition: mpegvideo.h:125
int(* q_inter_matrix)[64]
Definition: mpegvideo.h:494
void ff_xvmc_decode_mb(MpegEncContext *s)
Synthesize the data needed by XvMC to render one macroblock of data.
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
Definition: mpegvideo.h:492
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:377
int intra_only
if true, only intra pictures are generated
Definition: mpegvideo.h:247
Discrete Time axis x
ThreadFrame tf
Definition: mpegvideo.h:99
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:350
enum AVCodecID id
int h263_plus
h263 plus headers
Definition: mpegvideo.h:254
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:319
int last_non_b_pict_type
used for mpeg4 gmc b-frames & ratecontrol
Definition: mpegvideo.h:380
unsigned int buffer_size
Definition: parser.h:32
int width
width and height of the video frame
Definition: frame.h:122
uint8_t * mbskip_table
Definition: mpegvideo.h:111
int stream_codec_tag
internal stream_codec_tag upper case converted from avctx stream_codec_tag
Definition: mpegvideo.h:268
int last_dc[3]
last DC values for MPEG1
Definition: mpegvideo.h:348
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:75
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo.c:2604
static int ff_h263_round_chroma(int x)
Definition: mpegvideo.h:884
Multithreading support functions.
#define CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:358
int chroma_y_shift
Definition: mpegvideo.h:680
static int find_unused_picture(MpegEncContext *s, int shared)
Definition: mpegvideo.c:1420
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:589
const uint8_t ff_alternate_horizontal_scan[64]
Definition: dsputil.c:74
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:1806
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:388
ERContext er
Definition: mpegvideo.h:742
int active_thread_type
Which multithreading methods are in use by the codec.
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideo.h:384
int reference
Definition: mpegvideo.h:178
const char * r
Definition: vf_curves.c:94
#define FF_DEBUG_VIS_MV_B_FOR
int capabilities
Codec capabilities.
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:364
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:3175
int flags
CODEC_FLAG_*.
struct Picture * next_pic
void(* clear_blocks)(int16_t *blocks)
Definition: dsputil.h:146
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:2596
simple assert() macros that are a bit more flexible than ISO C assert().
void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:37
int overread_index
the index into ParseContext.buffer of the overread bytes
Definition: parser.h:36
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:3090
#define FF_DEBUG_SKIP
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:579
uint16_t * mb_type
Table for candidate MB types for encoding.
Definition: mpegvideo.h:446
#define IS_INTRA(a)
Definition: mpegvideo.h:138
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color)
Draw a line from (ex, ey) -> (sx, sy).
Definition: mpegvideo.c:1794
int low_delay
no reordering needed / has no b-frames
Definition: mpegvideo.h:592
void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
Deallocate a picture.
Definition: mpegvideo.c:441
VLC vlc
decoding only deprecated FIXME remove
Definition: rl.h:47
uint8_t *[2][2] b_field_select_table
Definition: mpegvideo.h:414
static const uint8_t offset[127][2]
Definition: vf_spp.c:70
#define FFMAX(a, b)
Definition: common.h:56
external API header
int8_t len
Definition: get_bits.h:71
uint8_t * mbintra_table
int * mb_index2xy
int priv_data_size
Size of HW accelerator private data.
static const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideo.c:66
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:3066
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:37
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:274
int * lambda_table
Definition: mpegvideo.h:373
uint8_t * error_status_table
AVBufferRef * hwaccel_priv_buf
Definition: mpegvideo.h:128
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
#define MAX_THREADS
int n
number of entries of table_vlc minus 1
Definition: rl.h:39
#define IS_8X8(a)
Definition: mpegvideo.h:148
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:231
int err_recognition
Definition: mpegvideo.h:532
AVBufferRef * motion_val_buf[2]
Definition: mpegvideo.h:104
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
Definition: mpegvideo.c:2170
int progressive_frame
Definition: mpegvideo.h:682
void ff_mpeg_er_frame_start(MpegEncContext *s)
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
struct Picture * last_pic
#define UPDATE_PICTURE(pic)
int top_field_first
Definition: mpegvideo.h:668
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
uint8_t * er_temp_buffer
int overread
the number of bytes which where irreversibly read from the next frame
Definition: parser.h:35
#define FFMIN(a, b)
Definition: common.h:58
uint16_t(* q_inter_matrix16)[2][64]
Definition: mpegvideo.h:498
int last_index
Definition: parser.h:31
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed b frames
Definition: mpegvideo.h:531
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:3206
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:337
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:708
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
Definition: dsputil.h:263
ret
Definition: avfilter.c:821
int width
picture width / height.
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encodin...
Definition: mpegvideo.h:359
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:105
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:347
Picture.
Definition: mpegvideo.h:97
int alternate_scan
Definition: mpegvideo.h:672
unsigned int allocated_bitstream_buffer_size
Definition: mpegvideo.h:612
void * hwaccel_picture_private
hardware accelerator private data
Definition: mpegvideo.h:132
uint16_t pb_time
int16_t(* ac_val_base)[16]
Definition: mpegvideo.h:356
int32_t
DSPContext * dsp
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX & these are not permutated, second 64 entries are bias ...
Definition: mpegvideo.h:496
const int8_t * table_run
Definition: rl.h:42
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y)
Definition: h264chroma.h:24
#define FFABS(a)
Definition: common.h:53
int16_t(*[2][2][2] b_field_mv_table_base)[2]
Definition: mpegvideo.h:404
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:185
int(* ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2]
[mb_intra][isChroma][level][run][last]
Definition: mpegvideo.h:644
int16_t(* b_forw_mv_table_base)[2]
Definition: mpegvideo.h:398
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:698
#define CONFIG_GRAY
Definition: config.h:376
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:291
MotionEstContext me
Definition: mpegvideo.h:437
float u
int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
Definition: mpegvideo.c:204
#define EDGE_BOTTOM
Definition: dsputil.h:266
int mb_decision
macroblock decision mode
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3]
Definition: h264chroma.h:28
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
Definition: mpegvideo.h:361
FIXME Range Coding of cr are level
Definition: snow.txt:367
void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: imgconvert.c:65
#define ME_MAP_SIZE
Definition: mpegvideo.h:68
#define FF_DEBUG_MB_TYPE
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function for encode/decode called after coding/decoding the header and before a frame is code...
Definition: mpegvideo.c:1493
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:714
void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur, Picture *last, int y, int h, int picture_structure, int first_field, int draw_edges, int low_delay, int v_edge_pos, int h_edge_pos)
Definition: mpegvideo.c:2921
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:658
RL_VLC_ELEM * rl_vlc[32]
decoding only
Definition: rl.h:48
preferred ID for MPEG-1/2 video decoding
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:465
#define IS_16X8(a)
Definition: mpegvideo.h:146
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG2 field pics)
int xvmc_acceleration
XVideo Motion Acceleration.
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
Definition: mpegvideo.h:469
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:686
static const int8_t mv[256][2]
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:134
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:421
for k
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2.c:76
NULL
Definition: eval.c:55
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegvideo.h:120
AVBufferRef * qscale_table_buf
Definition: mpegvideo.h:101
or the Software in violation of any applicable export control laws in any jurisdiction Except as provided by mandatorily applicable UPF has no obligation to provide you with source code to the Software In the event Software contains any source code
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:418
struct Picture * cur_pic
int16_t(* b_bidir_forw_mv_table_base)[2]
Definition: mpegvideo.h:400
void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table, int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegvideo.c:1878
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideo.c:120
static int width
Definition: tests/utils.c:158
int coded_picture_number
picture number in bitstream order
Definition: frame.h:176
dest
Definition: start.py:60
uint16_t inter_matrix[64]
Definition: mpegvideo.h:474
int alloc_mb_height
mb_height used to allocate tables
Definition: mpegvideo.h:123
uint8_t * buffer
Definition: parser.h:29
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:318
AVS_Value src
Definition: avisynth_c.h:523
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an so the codec calls ff_thread_report set AVCodecInternal allocate_progress The frames must then be freed with ff_thread_release_buffer().Otherwise leave it at zero and decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
enum AVCodecID codec_id
static av_const unsigned int ff_sqrt(unsigned int a)
Definition: mathops.h:198
enum AVDiscard skip_idct
Skip IDCT/dequantization for selected frames.
void ff_MPV_common_init_x86(MpegEncContext *s)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
void(* idct_add)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
Definition: dsputil.h:235
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
main external API structure.
ScanTable intra_scantable
Definition: mpegvideo.h:296
uint8_t * data
The data buffer.
Definition: buffer.h:89
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
Definition: mpegvideo.h:355
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:245
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
const uint8_t ff_mpeg1_dc_scale_table[128]
Definition: mpegvideo.c:72
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:3148
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:712
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:423
void * buf
Definition: avisynth_c.h:594
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
Definition: mpegvideo.c:2164
uint32_t state
contains the last few bytes in MSB order
Definition: parser.h:33
Picture * picture
main picture buffer
Definition: mpegvideo.h:285
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
int progressive_sequence
Definition: mpegvideo.h:658
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
#define FF_THREAD_FRAME
Decode more than one frame at once.
int slice_flags
slice flags
void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2907
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
void avcodec_get_frame_defaults(AVFrame *frame)
Set the fields of the given AVFrame to default values.
ScanTable intra_h_scantable
Definition: mpegvideo.h:297
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:80
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced b-frame encoding.
Definition: mpegvideo.h:412
uint8_t * cbp_table
used to store cbp, ac_pred for partitioned decoding
Definition: mpegvideo.h:362
int closed_gop
MPEG1/2 GOP is closed.
Definition: mpegvideo.h:376
#define UPDATE_TABLE(table)
synthesis window for stochastic i
unsigned int avpriv_toupper4(unsigned int x)
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:716
uint8_t * index_run[2]
encoding only
Definition: rl.h:44
int context_initialized
Definition: mpegvideo.h:272
int input_picture_number
used to set pic->display_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:273
DSPContext dsp
pointers for accelerated dsp functions
Definition: mpegvideo.h:391
#define s1
Definition: regdef.h:38
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:330
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread.c:1066
int f_code
forward MV resolution
Definition: mpegvideo.h:395
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
#define COPY(a)
#define type
AVCodecContext * avctx
#define MV_DIR_FORWARD
Definition: mpegvideo.h:417
int max_b_frames
max number of b-frames for encoding
Definition: mpegvideo.h:262
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:377
int size
Size of data in bytes.
Definition: buffer.h:93
int h263_pred
use mpeg4/h263 ac/dc predictions
Definition: mpegvideo.h:250
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
Definition: mpegvideo.h:409
static int init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:871
uint8_t *[2] p_field_select_table
Definition: mpegvideo.h:413
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode b-frame encoding.
Definition: mpegvideo.h:410
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:352
#define IS_INTRA16x16(a)
Definition: mpegvideo.h:136
qpel_mc_func(* qpel_avg)[16]
Definition: mpegvideo.h:229
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:431
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode b-frame encoding.
Definition: mpegvideo.h:406
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:279
int noise_reduction
noise reduction strength
#define IS_ACPRED(a)
Definition: mpegvideo.h:153
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:3237
MpegEncContext.
Definition: mpegvideo.h:241
uint8_t run
Definition: get_bits.h:72
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:346
int8_t * qscale_table
Definition: mpegvideo.h:102
#define MAX_RUN
Definition: rl.h:34
struct AVCodecContext * avctx
Definition: mpegvideo.h:243
A reference to a data buffer.
Definition: buffer.h:81
static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color)
Draw an arrow from (ex, ey) -> (sx, sy).
Definition: mpegvideo.c:1847
discard all non reference
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:726
av_cold void ff_MPV_common_init_bfin(MpegEncContext *s)
#define CODEC_FLAG_EMU_EDGE
Don&#39;t draw edges.
int(* dct_error_sum)[64]
Definition: mpegvideo.h:501
int partitioned_frame
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
void ff_init_vlc_rl(RLTable *rl)
Definition: mpegvideo.c:1356
#define FF_MB_DECISION_RD
rate distortion
common internal api header.
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:115
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:278
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
AVBufferRef * mbskip_table_buf
Definition: mpegvideo.h:110
void ff_MPV_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
Definition: mpegvideo.c:800
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
#define IS_PCM(a)
Definition: mpegvideo.h:137
uint8_t * dest[3]
Definition: mpegvideo.h:467
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
int shared
Definition: mpegvideo.h:179
static double c[64]
int last_pict_type
Definition: mpegvideo.h:379
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:3271
int b4_stride
4*mb_width+1 used for some 4x4 block arrays to allow simple addressing
Definition: mpegvideo.h:280
int16_t * dc_val[3]
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:325
uint8_t * obmc_scratchpad
Definition: mpegvideo.h:366
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
Allocate a frame buffer.
Definition: mpegvideo.c:232
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:345
Bi-dir predicted.
Definition: avutil.h:218
int index
Definition: parser.h:30
function y
Definition: D.m:1
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (h263)
Definition: mpegvideo.h:353
uint32_t * map
map to avoid duplicate evaluations
Definition: mpegvideo.h:195
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:635
DSP utils.
#define SLICE_FLAG_CODED_ORDER
draw_horiz_band() is called in coded order instead of display
static int lowres
Definition: ffplay.c:298
H264ChromaContext h264chroma
Definition: mpegvideo.h:392
int16_t(* blocks)[12][64]
Definition: mpegvideo.h:701
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: dsputil.h:229
int slices
Number of slices.
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:58
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread.c:705
h264_chroma_mc_func put_h264_chroma_pixels_tab[3]
Definition: h264chroma.h:27
int picture_structure
Definition: mpegvideo.h:660
VideoDSPContext vdsp
Definition: mpegvideo.h:394
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:275
#define IS_DIRECT(a)
Definition: mpegvideo.h:143
int len
void ff_MPV_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1244
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:724
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:425
void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: dsputil.c:110
int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo.c:2560
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:700
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
ParseContext parse_context
Definition: mpegvideo.h:534
VLC_TYPE(* table)[2]
code, bits
Definition: get_bits.h:65
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:2612
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:331
#define EDGE_WIDTH
Definition: dsputil.h:264
AVBufferRef * mc_mb_var_buf
Definition: mpegvideo.h:119
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:139
static const uint8_t mpeg2_dc_scale_table1[128]
Definition: mpegvideo.c:84
int linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:283
#define CONFIG_WMV2_DECODER
Definition: config.h:625
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo.c:2229
int16_t level
Definition: get_bits.h:70
#define IS_16X16(a)
Definition: mpegvideo.h:145
Picture ** reordered_input_picture
pointer to the next pictures in codedorder for encoding
Definition: mpegvideo.h:287
int flags2
CODEC_FLAG2_*.
static void free_picture_tables(Picture *pic)
Definition: mpegvideo.c:293
#define HAVE_THREADS
Definition: config.h:274
static int init_er(MpegEncContext *s)
Definition: mpegvideo.c:831
static const uint8_t mpeg2_dc_scale_table2[128]
Definition: mpegvideo.c:96
int chroma_qscale
chroma QP
Definition: mpegvideo.h:370
struct AVFrame f
Definition: mpegvideo.h:98
int mb_var_sum
sum of MB variance for current frame
Definition: mpegvideo.h:172
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264.c:1539
void ff_er_frame_start(ERContext *s)
int height
Definition: frame.h:122
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:260
#define AV_LOG_INFO
Definition: log.h:156
int mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideo.h:173
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:472
uint32_t * mb_type
Definition: mpegvideo.h:108
void INT64 INT64 count
Definition: avisynth_c.h:594
void INT64 start
Definition: avisynth_c.h:594
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:266
ScanTable inter_scantable
if inter == intra then intra should be used to reduce tha cache usage
Definition: mpegvideo.h:295
#define av_always_inline
Definition: attributes.h:41
uint8_t * temp
Definition: mpegvideo.h:193
#define FFSWAP(type, a, b)
Definition: common.h:61
const char int length
Definition: avisynth_c.h:668
static int ref_picture(H264Context *h, Picture *dst, Picture *src)
Definition: h264.c:225
static int first_field(int fd)
Definition: v4l2.c:262
av_cold void ff_MPV_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:41
int ff_find_unused_picture(MpegEncContext *s, int shared)
Definition: mpegvideo.c:1453
#define MV_TYPE_8X8
4 vectors (h263, mpeg4 4MV)
Definition: mpegvideo.h:422
#define FF_DEBUG_VIS_MV_P_FOR
int16_t(* b_direct_mv_table_base)[2]
Definition: mpegvideo.h:402
int b_code
backward MV resolution for B Frames (mpeg4)
Definition: mpegvideo.h:396
float * bits_tab
Definition: mpegvideo.h:736
int dct_count[2]
Definition: mpegvideo.h:502
uint8_t * mbskip_table
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:710
int uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:284
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo.c:2426
for(j=16;j >0;--j)
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:51
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Predicted.
Definition: avutil.h:217
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
DSPContext.
Definition: dsputil.h:127
void ff_MPV_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
Definition: mpegvideo.c:826
uint16_t pb_time
time distance between the last b and p,s,i frame
Definition: mpegvideo.h:561
AVBufferRef * ref_index_buf[2]
Definition: mpegvideo.h:113
HpelDSPContext hdsp
Definition: mpegvideo.h:393