yading@10
|
1 /*
|
yading@10
|
2 * The simplest mpeg encoder (well, it was the simplest!)
|
yading@10
|
3 * Copyright (c) 2000,2001 Fabrice Bellard
|
yading@10
|
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
|
yading@10
|
5 *
|
yading@10
|
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
|
yading@10
|
7 *
|
yading@10
|
8 * This file is part of FFmpeg.
|
yading@10
|
9 *
|
yading@10
|
10 * FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
11 * modify it under the terms of the GNU Lesser General Public
|
yading@10
|
12 * License as published by the Free Software Foundation; either
|
yading@10
|
13 * version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
14 *
|
yading@10
|
15 * FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
18 * Lesser General Public License for more details.
|
yading@10
|
19 *
|
yading@10
|
20 * You should have received a copy of the GNU Lesser General Public
|
yading@10
|
21 * License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
23 */
|
yading@10
|
24
|
yading@10
|
25 /**
|
yading@10
|
26 * @file
|
yading@10
|
27 * The simplest mpeg encoder (well, it was the simplest!).
|
yading@10
|
28 */
|
yading@10
|
29
|
yading@10
|
30 #include "libavutil/avassert.h"
|
yading@10
|
31 #include "libavutil/imgutils.h"
|
yading@10
|
32 #include "avcodec.h"
|
yading@10
|
33 #include "dsputil.h"
|
yading@10
|
34 #include "h264chroma.h"
|
yading@10
|
35 #include "internal.h"
|
yading@10
|
36 #include "mathops.h"
|
yading@10
|
37 #include "mpegvideo.h"
|
yading@10
|
38 #include "mjpegenc.h"
|
yading@10
|
39 #include "msmpeg4.h"
|
yading@10
|
40 #include "xvmc_internal.h"
|
yading@10
|
41 #include "thread.h"
|
yading@10
|
42 #include <limits.h>
|
yading@10
|
43
|
yading@10
|
44 //#undef NDEBUG
|
yading@10
|
45 //#include <assert.h>
|
yading@10
|
46
|
yading@10
|
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
|
yading@10
|
48 int16_t *block, int n, int qscale);
|
yading@10
|
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
|
yading@10
|
50 int16_t *block, int n, int qscale);
|
yading@10
|
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
|
yading@10
|
52 int16_t *block, int n, int qscale);
|
yading@10
|
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
|
yading@10
|
54 int16_t *block, int n, int qscale);
|
yading@10
|
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
|
yading@10
|
56 int16_t *block, int n, int qscale);
|
yading@10
|
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
|
yading@10
|
58 int16_t *block, int n, int qscale);
|
yading@10
|
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
|
yading@10
|
60 int16_t *block, int n, int qscale);
|
yading@10
|
61
|
yading@10
|
62
|
yading@10
|
63 //#define DEBUG
|
yading@10
|
64
|
yading@10
|
65
|
yading@10
|
66 static const uint8_t ff_default_chroma_qscale_table[32] = {
|
yading@10
|
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
yading@10
|
68 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
yading@10
|
69 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
|
yading@10
|
70 };
|
yading@10
|
71
|
yading@10
|
72 const uint8_t ff_mpeg1_dc_scale_table[128] = {
|
yading@10
|
73 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
yading@10
|
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
yading@10
|
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
yading@10
|
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
yading@10
|
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
yading@10
|
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
yading@10
|
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
yading@10
|
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
yading@10
|
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
yading@10
|
82 };
|
yading@10
|
83
|
yading@10
|
84 static const uint8_t mpeg2_dc_scale_table1[128] = {
|
yading@10
|
85 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
yading@10
|
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
|
yading@10
|
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
|
yading@10
|
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
|
yading@10
|
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
|
yading@10
|
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
|
yading@10
|
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
|
yading@10
|
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
|
yading@10
|
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
|
yading@10
|
94 };
|
yading@10
|
95
|
yading@10
|
96 static const uint8_t mpeg2_dc_scale_table2[128] = {
|
yading@10
|
97 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
yading@10
|
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
yading@10
|
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
yading@10
|
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
yading@10
|
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
yading@10
|
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
yading@10
|
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
yading@10
|
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
yading@10
|
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
yading@10
|
106 };
|
yading@10
|
107
|
yading@10
|
108 static const uint8_t mpeg2_dc_scale_table3[128] = {
|
yading@10
|
109 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
yading@10
|
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
yading@10
|
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
yading@10
|
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
yading@10
|
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
yading@10
|
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
yading@10
|
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
yading@10
|
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
yading@10
|
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
yading@10
|
118 };
|
yading@10
|
119
|
yading@10
|
120 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
|
yading@10
|
121 ff_mpeg1_dc_scale_table,
|
yading@10
|
122 mpeg2_dc_scale_table1,
|
yading@10
|
123 mpeg2_dc_scale_table2,
|
yading@10
|
124 mpeg2_dc_scale_table3,
|
yading@10
|
125 };
|
yading@10
|
126
|
yading@10
|
127 const enum AVPixelFormat ff_pixfmt_list_420[] = {
|
yading@10
|
128 AV_PIX_FMT_YUV420P,
|
yading@10
|
129 AV_PIX_FMT_NONE
|
yading@10
|
130 };
|
yading@10
|
131
|
yading@10
|
132 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
|
yading@10
|
133 int (*mv)[2][4][2],
|
yading@10
|
134 int mb_x, int mb_y, int mb_intra, int mb_skipped)
|
yading@10
|
135 {
|
yading@10
|
136 MpegEncContext *s = opaque;
|
yading@10
|
137
|
yading@10
|
138 s->mv_dir = mv_dir;
|
yading@10
|
139 s->mv_type = mv_type;
|
yading@10
|
140 s->mb_intra = mb_intra;
|
yading@10
|
141 s->mb_skipped = mb_skipped;
|
yading@10
|
142 s->mb_x = mb_x;
|
yading@10
|
143 s->mb_y = mb_y;
|
yading@10
|
144 memcpy(s->mv, mv, sizeof(*mv));
|
yading@10
|
145
|
yading@10
|
146 ff_init_block_index(s);
|
yading@10
|
147 ff_update_block_index(s);
|
yading@10
|
148
|
yading@10
|
149 s->dsp.clear_blocks(s->block[0]);
|
yading@10
|
150
|
yading@10
|
151 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
|
yading@10
|
152 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
|
yading@10
|
153 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
|
yading@10
|
154
|
yading@10
|
155 assert(ref == 0);
|
yading@10
|
156 ff_MPV_decode_mb(s, s->block);
|
yading@10
|
157 }
|
yading@10
|
158
|
yading@10
|
159 /* init common dct for both encoder and decoder */
|
yading@10
|
160 av_cold int ff_dct_common_init(MpegEncContext *s)
|
yading@10
|
161 {
|
yading@10
|
162 ff_dsputil_init(&s->dsp, s->avctx);
|
yading@10
|
163 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
|
yading@10
|
164 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
|
yading@10
|
165 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
|
yading@10
|
166
|
yading@10
|
167 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
|
yading@10
|
168 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
|
yading@10
|
169 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
|
yading@10
|
170 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
|
yading@10
|
171 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
|
yading@10
|
172 if (s->flags & CODEC_FLAG_BITEXACT)
|
yading@10
|
173 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
|
yading@10
|
174 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
|
yading@10
|
175
|
yading@10
|
176 #if ARCH_X86
|
yading@10
|
177 ff_MPV_common_init_x86(s);
|
yading@10
|
178 #elif ARCH_ALPHA
|
yading@10
|
179 ff_MPV_common_init_axp(s);
|
yading@10
|
180 #elif ARCH_ARM
|
yading@10
|
181 ff_MPV_common_init_arm(s);
|
yading@10
|
182 #elif HAVE_ALTIVEC
|
yading@10
|
183 ff_MPV_common_init_altivec(s);
|
yading@10
|
184 #elif ARCH_BFIN
|
yading@10
|
185 ff_MPV_common_init_bfin(s);
|
yading@10
|
186 #endif
|
yading@10
|
187
|
yading@10
|
188 /* load & permutate scantables
|
yading@10
|
189 * note: only wmv uses different ones
|
yading@10
|
190 */
|
yading@10
|
191 if (s->alternate_scan) {
|
yading@10
|
192 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
|
yading@10
|
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
|
yading@10
|
194 } else {
|
yading@10
|
195 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
|
yading@10
|
196 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
|
yading@10
|
197 }
|
yading@10
|
198 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
|
yading@10
|
199 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
|
yading@10
|
200
|
yading@10
|
201 return 0;
|
yading@10
|
202 }
|
yading@10
|
203
|
yading@10
|
204 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
|
yading@10
|
205 {
|
yading@10
|
206 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
|
yading@10
|
207
|
yading@10
|
208 // edge emu needs blocksize + filter length - 1
|
yading@10
|
209 // (= 17x17 for halfpel / 21x21 for h264)
|
yading@10
|
210 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
|
yading@10
|
211 // at uvlinesize. It supports only YUV420 so 24x24 is enough
|
yading@10
|
212 // linesize * interlaced * MBsize
|
yading@10
|
213 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
|
yading@10
|
214 fail);
|
yading@10
|
215
|
yading@10
|
216 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
|
yading@10
|
217 fail)
|
yading@10
|
218 s->me.temp = s->me.scratchpad;
|
yading@10
|
219 s->rd_scratchpad = s->me.scratchpad;
|
yading@10
|
220 s->b_scratchpad = s->me.scratchpad;
|
yading@10
|
221 s->obmc_scratchpad = s->me.scratchpad + 16;
|
yading@10
|
222
|
yading@10
|
223 return 0;
|
yading@10
|
224 fail:
|
yading@10
|
225 av_freep(&s->edge_emu_buffer);
|
yading@10
|
226 return AVERROR(ENOMEM);
|
yading@10
|
227 }
|
yading@10
|
228
|
yading@10
|
229 /**
|
yading@10
|
230 * Allocate a frame buffer
|
yading@10
|
231 */
|
yading@10
|
232 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
|
yading@10
|
233 {
|
yading@10
|
234 int r, ret;
|
yading@10
|
235
|
yading@10
|
236 pic->tf.f = &pic->f;
|
yading@10
|
237 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
|
yading@10
|
238 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
|
yading@10
|
239 s->codec_id != AV_CODEC_ID_MSS2)
|
yading@10
|
240 r = ff_thread_get_buffer(s->avctx, &pic->tf,
|
yading@10
|
241 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
|
yading@10
|
242 else {
|
yading@10
|
243 pic->f.width = s->avctx->width;
|
yading@10
|
244 pic->f.height = s->avctx->height;
|
yading@10
|
245 pic->f.format = s->avctx->pix_fmt;
|
yading@10
|
246 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
|
yading@10
|
247 }
|
yading@10
|
248
|
yading@10
|
249 if (r < 0 || !pic->f.data[0]) {
|
yading@10
|
250 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
|
yading@10
|
251 r, pic->f.data[0]);
|
yading@10
|
252 return -1;
|
yading@10
|
253 }
|
yading@10
|
254
|
yading@10
|
255 if (s->avctx->hwaccel) {
|
yading@10
|
256 assert(!pic->hwaccel_picture_private);
|
yading@10
|
257 if (s->avctx->hwaccel->priv_data_size) {
|
yading@10
|
258 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
|
yading@10
|
259 if (!pic->hwaccel_priv_buf) {
|
yading@10
|
260 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
|
yading@10
|
261 return -1;
|
yading@10
|
262 }
|
yading@10
|
263 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
|
yading@10
|
264 }
|
yading@10
|
265 }
|
yading@10
|
266
|
yading@10
|
267 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
|
yading@10
|
268 s->uvlinesize != pic->f.linesize[1])) {
|
yading@10
|
269 av_log(s->avctx, AV_LOG_ERROR,
|
yading@10
|
270 "get_buffer() failed (stride changed)\n");
|
yading@10
|
271 ff_mpeg_unref_picture(s, pic);
|
yading@10
|
272 return -1;
|
yading@10
|
273 }
|
yading@10
|
274
|
yading@10
|
275 if (pic->f.linesize[1] != pic->f.linesize[2]) {
|
yading@10
|
276 av_log(s->avctx, AV_LOG_ERROR,
|
yading@10
|
277 "get_buffer() failed (uv stride mismatch)\n");
|
yading@10
|
278 ff_mpeg_unref_picture(s, pic);
|
yading@10
|
279 return -1;
|
yading@10
|
280 }
|
yading@10
|
281
|
yading@10
|
282 if (!s->edge_emu_buffer &&
|
yading@10
|
283 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
|
yading@10
|
284 av_log(s->avctx, AV_LOG_ERROR,
|
yading@10
|
285 "get_buffer() failed to allocate context scratch buffers.\n");
|
yading@10
|
286 ff_mpeg_unref_picture(s, pic);
|
yading@10
|
287 return ret;
|
yading@10
|
288 }
|
yading@10
|
289
|
yading@10
|
290 return 0;
|
yading@10
|
291 }
|
yading@10
|
292
|
yading@10
|
293 static void free_picture_tables(Picture *pic)
|
yading@10
|
294 {
|
yading@10
|
295 int i;
|
yading@10
|
296
|
yading@10
|
297 pic->alloc_mb_width =
|
yading@10
|
298 pic->alloc_mb_height = 0;
|
yading@10
|
299
|
yading@10
|
300 av_buffer_unref(&pic->mb_var_buf);
|
yading@10
|
301 av_buffer_unref(&pic->mc_mb_var_buf);
|
yading@10
|
302 av_buffer_unref(&pic->mb_mean_buf);
|
yading@10
|
303 av_buffer_unref(&pic->mbskip_table_buf);
|
yading@10
|
304 av_buffer_unref(&pic->qscale_table_buf);
|
yading@10
|
305 av_buffer_unref(&pic->mb_type_buf);
|
yading@10
|
306
|
yading@10
|
307 for (i = 0; i < 2; i++) {
|
yading@10
|
308 av_buffer_unref(&pic->motion_val_buf[i]);
|
yading@10
|
309 av_buffer_unref(&pic->ref_index_buf[i]);
|
yading@10
|
310 }
|
yading@10
|
311 }
|
yading@10
|
312
|
yading@10
|
313 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
|
yading@10
|
314 {
|
yading@10
|
315 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
|
yading@10
|
316 const int mb_array_size = s->mb_stride * s->mb_height;
|
yading@10
|
317 const int b8_array_size = s->b8_stride * s->mb_height * 2;
|
yading@10
|
318 int i;
|
yading@10
|
319
|
yading@10
|
320
|
yading@10
|
321 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
|
yading@10
|
322 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
|
yading@10
|
323 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
|
yading@10
|
324 sizeof(uint32_t));
|
yading@10
|
325 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
|
yading@10
|
326 return AVERROR(ENOMEM);
|
yading@10
|
327
|
yading@10
|
328 if (s->encoding) {
|
yading@10
|
329 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
|
yading@10
|
330 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
|
yading@10
|
331 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
|
yading@10
|
332 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
|
yading@10
|
333 return AVERROR(ENOMEM);
|
yading@10
|
334 }
|
yading@10
|
335
|
yading@10
|
336 if (s->out_format == FMT_H263 || s->encoding ||
|
yading@10
|
337 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
|
yading@10
|
338 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
|
yading@10
|
339 int ref_index_size = 4 * mb_array_size;
|
yading@10
|
340
|
yading@10
|
341 for (i = 0; mv_size && i < 2; i++) {
|
yading@10
|
342 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
|
yading@10
|
343 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
|
yading@10
|
344 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
|
yading@10
|
345 return AVERROR(ENOMEM);
|
yading@10
|
346 }
|
yading@10
|
347 }
|
yading@10
|
348
|
yading@10
|
349 pic->alloc_mb_width = s->mb_width;
|
yading@10
|
350 pic->alloc_mb_height = s->mb_height;
|
yading@10
|
351
|
yading@10
|
352 return 0;
|
yading@10
|
353 }
|
yading@10
|
354
|
yading@10
|
355 static int make_tables_writable(Picture *pic)
|
yading@10
|
356 {
|
yading@10
|
357 int ret, i;
|
yading@10
|
358 #define MAKE_WRITABLE(table) \
|
yading@10
|
359 do {\
|
yading@10
|
360 if (pic->table &&\
|
yading@10
|
361 (ret = av_buffer_make_writable(&pic->table)) < 0)\
|
yading@10
|
362 return ret;\
|
yading@10
|
363 } while (0)
|
yading@10
|
364
|
yading@10
|
365 MAKE_WRITABLE(mb_var_buf);
|
yading@10
|
366 MAKE_WRITABLE(mc_mb_var_buf);
|
yading@10
|
367 MAKE_WRITABLE(mb_mean_buf);
|
yading@10
|
368 MAKE_WRITABLE(mbskip_table_buf);
|
yading@10
|
369 MAKE_WRITABLE(qscale_table_buf);
|
yading@10
|
370 MAKE_WRITABLE(mb_type_buf);
|
yading@10
|
371
|
yading@10
|
372 for (i = 0; i < 2; i++) {
|
yading@10
|
373 MAKE_WRITABLE(motion_val_buf[i]);
|
yading@10
|
374 MAKE_WRITABLE(ref_index_buf[i]);
|
yading@10
|
375 }
|
yading@10
|
376
|
yading@10
|
377 return 0;
|
yading@10
|
378 }
|
yading@10
|
379
|
yading@10
|
380 /**
|
yading@10
|
381 * Allocate a Picture.
|
yading@10
|
382 * The pixels are allocated/set by calling get_buffer() if shared = 0
|
yading@10
|
383 */
|
yading@10
|
384 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
|
yading@10
|
385 {
|
yading@10
|
386 int i, ret;
|
yading@10
|
387
|
yading@10
|
388 if (pic->qscale_table_buf)
|
yading@10
|
389 if ( pic->alloc_mb_width != s->mb_width
|
yading@10
|
390 || pic->alloc_mb_height != s->mb_height)
|
yading@10
|
391 free_picture_tables(pic);
|
yading@10
|
392
|
yading@10
|
393 if (shared) {
|
yading@10
|
394 assert(pic->f.data[0]);
|
yading@10
|
395 pic->shared = 1;
|
yading@10
|
396 } else {
|
yading@10
|
397 assert(!pic->f.data[0]);
|
yading@10
|
398
|
yading@10
|
399 if (alloc_frame_buffer(s, pic) < 0)
|
yading@10
|
400 return -1;
|
yading@10
|
401
|
yading@10
|
402 s->linesize = pic->f.linesize[0];
|
yading@10
|
403 s->uvlinesize = pic->f.linesize[1];
|
yading@10
|
404 }
|
yading@10
|
405
|
yading@10
|
406 if (!pic->qscale_table_buf)
|
yading@10
|
407 ret = alloc_picture_tables(s, pic);
|
yading@10
|
408 else
|
yading@10
|
409 ret = make_tables_writable(pic);
|
yading@10
|
410 if (ret < 0)
|
yading@10
|
411 goto fail;
|
yading@10
|
412
|
yading@10
|
413 if (s->encoding) {
|
yading@10
|
414 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
|
yading@10
|
415 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
|
yading@10
|
416 pic->mb_mean = pic->mb_mean_buf->data;
|
yading@10
|
417 }
|
yading@10
|
418
|
yading@10
|
419 pic->mbskip_table = pic->mbskip_table_buf->data;
|
yading@10
|
420 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
|
yading@10
|
421 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
|
yading@10
|
422
|
yading@10
|
423 if (pic->motion_val_buf[0]) {
|
yading@10
|
424 for (i = 0; i < 2; i++) {
|
yading@10
|
425 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
|
yading@10
|
426 pic->ref_index[i] = pic->ref_index_buf[i]->data;
|
yading@10
|
427 }
|
yading@10
|
428 }
|
yading@10
|
429
|
yading@10
|
430 return 0;
|
yading@10
|
431 fail:
|
yading@10
|
432 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
|
yading@10
|
433 ff_mpeg_unref_picture(s, pic);
|
yading@10
|
434 free_picture_tables(pic);
|
yading@10
|
435 return AVERROR(ENOMEM);
|
yading@10
|
436 }
|
yading@10
|
437
|
yading@10
|
438 /**
|
yading@10
|
439 * Deallocate a picture.
|
yading@10
|
440 */
|
yading@10
|
441 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
|
yading@10
|
442 {
|
yading@10
|
443 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
|
yading@10
|
444
|
yading@10
|
445 pic->tf.f = &pic->f;
|
yading@10
|
446 /* WM Image / Screen codecs allocate internal buffers with different
|
yading@10
|
447 * dimensions / colorspaces; ignore user-defined callbacks for these. */
|
yading@10
|
448 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
|
yading@10
|
449 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
|
yading@10
|
450 s->codec_id != AV_CODEC_ID_MSS2)
|
yading@10
|
451 ff_thread_release_buffer(s->avctx, &pic->tf);
|
yading@10
|
452 else
|
yading@10
|
453 av_frame_unref(&pic->f);
|
yading@10
|
454
|
yading@10
|
455 av_buffer_unref(&pic->hwaccel_priv_buf);
|
yading@10
|
456
|
yading@10
|
457 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
|
yading@10
|
458 }
|
yading@10
|
459
|
yading@10
|
460 static int update_picture_tables(Picture *dst, Picture *src)
|
yading@10
|
461 {
|
yading@10
|
462 int i;
|
yading@10
|
463
|
yading@10
|
464 #define UPDATE_TABLE(table)\
|
yading@10
|
465 do {\
|
yading@10
|
466 if (src->table &&\
|
yading@10
|
467 (!dst->table || dst->table->buffer != src->table->buffer)) {\
|
yading@10
|
468 av_buffer_unref(&dst->table);\
|
yading@10
|
469 dst->table = av_buffer_ref(src->table);\
|
yading@10
|
470 if (!dst->table) {\
|
yading@10
|
471 free_picture_tables(dst);\
|
yading@10
|
472 return AVERROR(ENOMEM);\
|
yading@10
|
473 }\
|
yading@10
|
474 }\
|
yading@10
|
475 } while (0)
|
yading@10
|
476
|
yading@10
|
477 UPDATE_TABLE(mb_var_buf);
|
yading@10
|
478 UPDATE_TABLE(mc_mb_var_buf);
|
yading@10
|
479 UPDATE_TABLE(mb_mean_buf);
|
yading@10
|
480 UPDATE_TABLE(mbskip_table_buf);
|
yading@10
|
481 UPDATE_TABLE(qscale_table_buf);
|
yading@10
|
482 UPDATE_TABLE(mb_type_buf);
|
yading@10
|
483 for (i = 0; i < 2; i++) {
|
yading@10
|
484 UPDATE_TABLE(motion_val_buf[i]);
|
yading@10
|
485 UPDATE_TABLE(ref_index_buf[i]);
|
yading@10
|
486 }
|
yading@10
|
487
|
yading@10
|
488 dst->mb_var = src->mb_var;
|
yading@10
|
489 dst->mc_mb_var = src->mc_mb_var;
|
yading@10
|
490 dst->mb_mean = src->mb_mean;
|
yading@10
|
491 dst->mbskip_table = src->mbskip_table;
|
yading@10
|
492 dst->qscale_table = src->qscale_table;
|
yading@10
|
493 dst->mb_type = src->mb_type;
|
yading@10
|
494 for (i = 0; i < 2; i++) {
|
yading@10
|
495 dst->motion_val[i] = src->motion_val[i];
|
yading@10
|
496 dst->ref_index[i] = src->ref_index[i];
|
yading@10
|
497 }
|
yading@10
|
498
|
yading@10
|
499 dst->alloc_mb_width = src->alloc_mb_width;
|
yading@10
|
500 dst->alloc_mb_height = src->alloc_mb_height;
|
yading@10
|
501
|
yading@10
|
502 return 0;
|
yading@10
|
503 }
|
yading@10
|
504
|
yading@10
|
505 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
|
yading@10
|
506 {
|
yading@10
|
507 int ret;
|
yading@10
|
508
|
yading@10
|
509 av_assert0(!dst->f.buf[0]);
|
yading@10
|
510 av_assert0(src->f.buf[0]);
|
yading@10
|
511
|
yading@10
|
512 src->tf.f = &src->f;
|
yading@10
|
513 dst->tf.f = &dst->f;
|
yading@10
|
514 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
|
yading@10
|
515 if (ret < 0)
|
yading@10
|
516 goto fail;
|
yading@10
|
517
|
yading@10
|
518 ret = update_picture_tables(dst, src);
|
yading@10
|
519 if (ret < 0)
|
yading@10
|
520 goto fail;
|
yading@10
|
521
|
yading@10
|
522 if (src->hwaccel_picture_private) {
|
yading@10
|
523 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
|
yading@10
|
524 if (!dst->hwaccel_priv_buf)
|
yading@10
|
525 goto fail;
|
yading@10
|
526 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
|
yading@10
|
527 }
|
yading@10
|
528
|
yading@10
|
529 dst->field_picture = src->field_picture;
|
yading@10
|
530 dst->mb_var_sum = src->mb_var_sum;
|
yading@10
|
531 dst->mc_mb_var_sum = src->mc_mb_var_sum;
|
yading@10
|
532 dst->b_frame_score = src->b_frame_score;
|
yading@10
|
533 dst->needs_realloc = src->needs_realloc;
|
yading@10
|
534 dst->reference = src->reference;
|
yading@10
|
535 dst->shared = src->shared;
|
yading@10
|
536
|
yading@10
|
537 return 0;
|
yading@10
|
538 fail:
|
yading@10
|
539 ff_mpeg_unref_picture(s, dst);
|
yading@10
|
540 return ret;
|
yading@10
|
541 }
|
yading@10
|
542
|
yading@10
|
543 static int init_duplicate_context(MpegEncContext *s)
|
yading@10
|
544 {
|
yading@10
|
545 int y_size = s->b8_stride * (2 * s->mb_height + 1);
|
yading@10
|
546 int c_size = s->mb_stride * (s->mb_height + 1);
|
yading@10
|
547 int yc_size = y_size + 2 * c_size;
|
yading@10
|
548 int i;
|
yading@10
|
549
|
yading@10
|
550 s->edge_emu_buffer =
|
yading@10
|
551 s->me.scratchpad =
|
yading@10
|
552 s->me.temp =
|
yading@10
|
553 s->rd_scratchpad =
|
yading@10
|
554 s->b_scratchpad =
|
yading@10
|
555 s->obmc_scratchpad = NULL;
|
yading@10
|
556
|
yading@10
|
557 if (s->encoding) {
|
yading@10
|
558 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
|
yading@10
|
559 ME_MAP_SIZE * sizeof(uint32_t), fail)
|
yading@10
|
560 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
|
yading@10
|
561 ME_MAP_SIZE * sizeof(uint32_t), fail)
|
yading@10
|
562 if (s->avctx->noise_reduction) {
|
yading@10
|
563 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
|
yading@10
|
564 2 * 64 * sizeof(int), fail)
|
yading@10
|
565 }
|
yading@10
|
566 }
|
yading@10
|
567 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
|
yading@10
|
568 s->block = s->blocks[0];
|
yading@10
|
569
|
yading@10
|
570 for (i = 0; i < 12; i++) {
|
yading@10
|
571 s->pblocks[i] = &s->block[i];
|
yading@10
|
572 }
|
yading@10
|
573
|
yading@10
|
574 if (s->out_format == FMT_H263) {
|
yading@10
|
575 /* ac values */
|
yading@10
|
576 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
|
yading@10
|
577 yc_size * sizeof(int16_t) * 16, fail);
|
yading@10
|
578 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
|
yading@10
|
579 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
|
yading@10
|
580 s->ac_val[2] = s->ac_val[1] + c_size;
|
yading@10
|
581 }
|
yading@10
|
582
|
yading@10
|
583 return 0;
|
yading@10
|
584 fail:
|
yading@10
|
585 return -1; // free() through ff_MPV_common_end()
|
yading@10
|
586 }
|
yading@10
|
587
|
yading@10
|
588 static void free_duplicate_context(MpegEncContext *s)
|
yading@10
|
589 {
|
yading@10
|
590 if (s == NULL)
|
yading@10
|
591 return;
|
yading@10
|
592
|
yading@10
|
593 av_freep(&s->edge_emu_buffer);
|
yading@10
|
594 av_freep(&s->me.scratchpad);
|
yading@10
|
595 s->me.temp =
|
yading@10
|
596 s->rd_scratchpad =
|
yading@10
|
597 s->b_scratchpad =
|
yading@10
|
598 s->obmc_scratchpad = NULL;
|
yading@10
|
599
|
yading@10
|
600 av_freep(&s->dct_error_sum);
|
yading@10
|
601 av_freep(&s->me.map);
|
yading@10
|
602 av_freep(&s->me.score_map);
|
yading@10
|
603 av_freep(&s->blocks);
|
yading@10
|
604 av_freep(&s->ac_val_base);
|
yading@10
|
605 s->block = NULL;
|
yading@10
|
606 }
|
yading@10
|
607
|
yading@10
|
608 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
|
yading@10
|
609 {
|
yading@10
|
610 #define COPY(a) bak->a = src->a
|
yading@10
|
611 COPY(edge_emu_buffer);
|
yading@10
|
612 COPY(me.scratchpad);
|
yading@10
|
613 COPY(me.temp);
|
yading@10
|
614 COPY(rd_scratchpad);
|
yading@10
|
615 COPY(b_scratchpad);
|
yading@10
|
616 COPY(obmc_scratchpad);
|
yading@10
|
617 COPY(me.map);
|
yading@10
|
618 COPY(me.score_map);
|
yading@10
|
619 COPY(blocks);
|
yading@10
|
620 COPY(block);
|
yading@10
|
621 COPY(start_mb_y);
|
yading@10
|
622 COPY(end_mb_y);
|
yading@10
|
623 COPY(me.map_generation);
|
yading@10
|
624 COPY(pb);
|
yading@10
|
625 COPY(dct_error_sum);
|
yading@10
|
626 COPY(dct_count[0]);
|
yading@10
|
627 COPY(dct_count[1]);
|
yading@10
|
628 COPY(ac_val_base);
|
yading@10
|
629 COPY(ac_val[0]);
|
yading@10
|
630 COPY(ac_val[1]);
|
yading@10
|
631 COPY(ac_val[2]);
|
yading@10
|
632 #undef COPY
|
yading@10
|
633 }
|
yading@10
|
634
|
yading@10
|
635 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
|
yading@10
|
636 {
|
yading@10
|
637 MpegEncContext bak;
|
yading@10
|
638 int i, ret;
|
yading@10
|
639 // FIXME copy only needed parts
|
yading@10
|
640 // START_TIMER
|
yading@10
|
641 backup_duplicate_context(&bak, dst);
|
yading@10
|
642 memcpy(dst, src, sizeof(MpegEncContext));
|
yading@10
|
643 backup_duplicate_context(dst, &bak);
|
yading@10
|
644 for (i = 0; i < 12; i++) {
|
yading@10
|
645 dst->pblocks[i] = &dst->block[i];
|
yading@10
|
646 }
|
yading@10
|
647 if (!dst->edge_emu_buffer &&
|
yading@10
|
648 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
|
yading@10
|
649 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
|
yading@10
|
650 "scratch buffers.\n");
|
yading@10
|
651 return ret;
|
yading@10
|
652 }
|
yading@10
|
653 // STOP_TIMER("update_duplicate_context")
|
yading@10
|
654 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
|
yading@10
|
655 return 0;
|
yading@10
|
656 }
|
yading@10
|
657
|
yading@10
|
658 int ff_mpeg_update_thread_context(AVCodecContext *dst,
|
yading@10
|
659 const AVCodecContext *src)
|
yading@10
|
660 {
|
yading@10
|
661 int i, ret;
|
yading@10
|
662 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
|
yading@10
|
663
|
yading@10
|
664 if (dst == src)
|
yading@10
|
665 return 0;
|
yading@10
|
666
|
yading@10
|
667 av_assert0(s != s1);
|
yading@10
|
668
|
yading@10
|
669 // FIXME can parameters change on I-frames?
|
yading@10
|
670 // in that case dst may need a reinit
|
yading@10
|
671 if (!s->context_initialized) {
|
yading@10
|
672 memcpy(s, s1, sizeof(MpegEncContext));
|
yading@10
|
673
|
yading@10
|
674 s->avctx = dst;
|
yading@10
|
675 s->bitstream_buffer = NULL;
|
yading@10
|
676 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
|
yading@10
|
677
|
yading@10
|
678 if (s1->context_initialized){
|
yading@10
|
679 // s->picture_range_start += MAX_PICTURE_COUNT;
|
yading@10
|
680 // s->picture_range_end += MAX_PICTURE_COUNT;
|
yading@10
|
681 if((ret = ff_MPV_common_init(s)) < 0){
|
yading@10
|
682 memset(s, 0, sizeof(MpegEncContext));
|
yading@10
|
683 s->avctx = dst;
|
yading@10
|
684 return ret;
|
yading@10
|
685 }
|
yading@10
|
686 }
|
yading@10
|
687 }
|
yading@10
|
688
|
yading@10
|
689 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
|
yading@10
|
690 s->context_reinit = 0;
|
yading@10
|
691 s->height = s1->height;
|
yading@10
|
692 s->width = s1->width;
|
yading@10
|
693 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
|
yading@10
|
694 return ret;
|
yading@10
|
695 }
|
yading@10
|
696
|
yading@10
|
697 s->avctx->coded_height = s1->avctx->coded_height;
|
yading@10
|
698 s->avctx->coded_width = s1->avctx->coded_width;
|
yading@10
|
699 s->avctx->width = s1->avctx->width;
|
yading@10
|
700 s->avctx->height = s1->avctx->height;
|
yading@10
|
701
|
yading@10
|
702 s->coded_picture_number = s1->coded_picture_number;
|
yading@10
|
703 s->picture_number = s1->picture_number;
|
yading@10
|
704 s->input_picture_number = s1->input_picture_number;
|
yading@10
|
705
|
yading@10
|
706 av_assert0(!s->picture || s->picture != s1->picture);
|
yading@10
|
707 if(s->picture)
|
yading@10
|
708 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
|
yading@10
|
709 ff_mpeg_unref_picture(s, &s->picture[i]);
|
yading@10
|
710 if (s1->picture[i].f.data[0] &&
|
yading@10
|
711 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
|
yading@10
|
712 return ret;
|
yading@10
|
713 }
|
yading@10
|
714
|
yading@10
|
715 #define UPDATE_PICTURE(pic)\
|
yading@10
|
716 do {\
|
yading@10
|
717 ff_mpeg_unref_picture(s, &s->pic);\
|
yading@10
|
718 if (s1->pic.f.data[0])\
|
yading@10
|
719 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
|
yading@10
|
720 else\
|
yading@10
|
721 ret = update_picture_tables(&s->pic, &s1->pic);\
|
yading@10
|
722 if (ret < 0)\
|
yading@10
|
723 return ret;\
|
yading@10
|
724 } while (0)
|
yading@10
|
725
|
yading@10
|
726 UPDATE_PICTURE(current_picture);
|
yading@10
|
727 UPDATE_PICTURE(last_picture);
|
yading@10
|
728 UPDATE_PICTURE(next_picture);
|
yading@10
|
729
|
yading@10
|
730 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
|
yading@10
|
731 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
|
yading@10
|
732 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
|
yading@10
|
733
|
yading@10
|
734 // Error/bug resilience
|
yading@10
|
735 s->next_p_frame_damaged = s1->next_p_frame_damaged;
|
yading@10
|
736 s->workaround_bugs = s1->workaround_bugs;
|
yading@10
|
737 s->padding_bug_score = s1->padding_bug_score;
|
yading@10
|
738
|
yading@10
|
739 // MPEG4 timing info
|
yading@10
|
740 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
|
yading@10
|
741 (char *) &s1->shape - (char *) &s1->time_increment_bits);
|
yading@10
|
742
|
yading@10
|
743 // B-frame info
|
yading@10
|
744 s->max_b_frames = s1->max_b_frames;
|
yading@10
|
745 s->low_delay = s1->low_delay;
|
yading@10
|
746 s->droppable = s1->droppable;
|
yading@10
|
747
|
yading@10
|
748 // DivX handling (doesn't work)
|
yading@10
|
749 s->divx_packed = s1->divx_packed;
|
yading@10
|
750
|
yading@10
|
751 if (s1->bitstream_buffer) {
|
yading@10
|
752 if (s1->bitstream_buffer_size +
|
yading@10
|
753 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
|
yading@10
|
754 av_fast_malloc(&s->bitstream_buffer,
|
yading@10
|
755 &s->allocated_bitstream_buffer_size,
|
yading@10
|
756 s1->allocated_bitstream_buffer_size);
|
yading@10
|
757 s->bitstream_buffer_size = s1->bitstream_buffer_size;
|
yading@10
|
758 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
|
yading@10
|
759 s1->bitstream_buffer_size);
|
yading@10
|
760 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
|
yading@10
|
761 FF_INPUT_BUFFER_PADDING_SIZE);
|
yading@10
|
762 }
|
yading@10
|
763
|
yading@10
|
764 // linesize dependend scratch buffer allocation
|
yading@10
|
765 if (!s->edge_emu_buffer)
|
yading@10
|
766 if (s1->linesize) {
|
yading@10
|
767 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
|
yading@10
|
768 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
|
yading@10
|
769 "scratch buffers.\n");
|
yading@10
|
770 return AVERROR(ENOMEM);
|
yading@10
|
771 }
|
yading@10
|
772 } else {
|
yading@10
|
773 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
|
yading@10
|
774 "be allocated due to unknown size.\n");
|
yading@10
|
775 }
|
yading@10
|
776
|
yading@10
|
777 // MPEG2/interlacing info
|
yading@10
|
778 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
|
yading@10
|
779 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
|
yading@10
|
780
|
yading@10
|
781 if (!s1->first_field) {
|
yading@10
|
782 s->last_pict_type = s1->pict_type;
|
yading@10
|
783 if (s1->current_picture_ptr)
|
yading@10
|
784 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
|
yading@10
|
785
|
yading@10
|
786 if (s1->pict_type != AV_PICTURE_TYPE_B) {
|
yading@10
|
787 s->last_non_b_pict_type = s1->pict_type;
|
yading@10
|
788 }
|
yading@10
|
789 }
|
yading@10
|
790
|
yading@10
|
791 return 0;
|
yading@10
|
792 }
|
yading@10
|
793
|
yading@10
|
794 /**
|
yading@10
|
795 * Set the given MpegEncContext to common defaults
|
yading@10
|
796 * (same for encoding and decoding).
|
yading@10
|
797 * The changed fields will not depend upon the
|
yading@10
|
798 * prior state of the MpegEncContext.
|
yading@10
|
799 */
|
yading@10
|
800 void ff_MPV_common_defaults(MpegEncContext *s)
|
yading@10
|
801 {
|
yading@10
|
802 s->y_dc_scale_table =
|
yading@10
|
803 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
|
yading@10
|
804 s->chroma_qscale_table = ff_default_chroma_qscale_table;
|
yading@10
|
805 s->progressive_frame = 1;
|
yading@10
|
806 s->progressive_sequence = 1;
|
yading@10
|
807 s->picture_structure = PICT_FRAME;
|
yading@10
|
808
|
yading@10
|
809 s->coded_picture_number = 0;
|
yading@10
|
810 s->picture_number = 0;
|
yading@10
|
811 s->input_picture_number = 0;
|
yading@10
|
812
|
yading@10
|
813 s->picture_in_gop_number = 0;
|
yading@10
|
814
|
yading@10
|
815 s->f_code = 1;
|
yading@10
|
816 s->b_code = 1;
|
yading@10
|
817
|
yading@10
|
818 s->slice_context_count = 1;
|
yading@10
|
819 }
|
yading@10
|
820
|
yading@10
|
821 /**
|
yading@10
|
822 * Set the given MpegEncContext to defaults for decoding.
|
yading@10
|
823 * the changed fields will not depend upon
|
yading@10
|
824 * the prior state of the MpegEncContext.
|
yading@10
|
825 */
|
yading@10
|
826 void ff_MPV_decode_defaults(MpegEncContext *s)
|
yading@10
|
827 {
|
yading@10
|
828 ff_MPV_common_defaults(s);
|
yading@10
|
829 }
|
yading@10
|
830
|
yading@10
|
831 static int init_er(MpegEncContext *s)
|
yading@10
|
832 {
|
yading@10
|
833 ERContext *er = &s->er;
|
yading@10
|
834 int mb_array_size = s->mb_height * s->mb_stride;
|
yading@10
|
835 int i;
|
yading@10
|
836
|
yading@10
|
837 er->avctx = s->avctx;
|
yading@10
|
838 er->dsp = &s->dsp;
|
yading@10
|
839
|
yading@10
|
840 er->mb_index2xy = s->mb_index2xy;
|
yading@10
|
841 er->mb_num = s->mb_num;
|
yading@10
|
842 er->mb_width = s->mb_width;
|
yading@10
|
843 er->mb_height = s->mb_height;
|
yading@10
|
844 er->mb_stride = s->mb_stride;
|
yading@10
|
845 er->b8_stride = s->b8_stride;
|
yading@10
|
846
|
yading@10
|
847 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
|
yading@10
|
848 er->error_status_table = av_mallocz(mb_array_size);
|
yading@10
|
849 if (!er->er_temp_buffer || !er->error_status_table)
|
yading@10
|
850 goto fail;
|
yading@10
|
851
|
yading@10
|
852 er->mbskip_table = s->mbskip_table;
|
yading@10
|
853 er->mbintra_table = s->mbintra_table;
|
yading@10
|
854
|
yading@10
|
855 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
|
yading@10
|
856 er->dc_val[i] = s->dc_val[i];
|
yading@10
|
857
|
yading@10
|
858 er->decode_mb = mpeg_er_decode_mb;
|
yading@10
|
859 er->opaque = s;
|
yading@10
|
860
|
yading@10
|
861 return 0;
|
yading@10
|
862 fail:
|
yading@10
|
863 av_freep(&er->er_temp_buffer);
|
yading@10
|
864 av_freep(&er->error_status_table);
|
yading@10
|
865 return AVERROR(ENOMEM);
|
yading@10
|
866 }
|
yading@10
|
867
|
yading@10
|
868 /**
|
yading@10
|
869 * Initialize and allocates MpegEncContext fields dependent on the resolution.
|
yading@10
|
870 */
|
yading@10
|
871 static int init_context_frame(MpegEncContext *s)
|
yading@10
|
872 {
|
yading@10
|
873 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
|
yading@10
|
874
|
yading@10
|
875 s->mb_width = (s->width + 15) / 16;
|
yading@10
|
876 s->mb_stride = s->mb_width + 1;
|
yading@10
|
877 s->b8_stride = s->mb_width * 2 + 1;
|
yading@10
|
878 s->b4_stride = s->mb_width * 4 + 1;
|
yading@10
|
879 mb_array_size = s->mb_height * s->mb_stride;
|
yading@10
|
880 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
|
yading@10
|
881
|
yading@10
|
882 /* set default edge pos, will be overriden
|
yading@10
|
883 * in decode_header if needed */
|
yading@10
|
884 s->h_edge_pos = s->mb_width * 16;
|
yading@10
|
885 s->v_edge_pos = s->mb_height * 16;
|
yading@10
|
886
|
yading@10
|
887 s->mb_num = s->mb_width * s->mb_height;
|
yading@10
|
888
|
yading@10
|
889 s->block_wrap[0] =
|
yading@10
|
890 s->block_wrap[1] =
|
yading@10
|
891 s->block_wrap[2] =
|
yading@10
|
892 s->block_wrap[3] = s->b8_stride;
|
yading@10
|
893 s->block_wrap[4] =
|
yading@10
|
894 s->block_wrap[5] = s->mb_stride;
|
yading@10
|
895
|
yading@10
|
896 y_size = s->b8_stride * (2 * s->mb_height + 1);
|
yading@10
|
897 c_size = s->mb_stride * (s->mb_height + 1);
|
yading@10
|
898 yc_size = y_size + 2 * c_size;
|
yading@10
|
899
|
yading@10
|
900 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
|
yading@10
|
901 for (y = 0; y < s->mb_height; y++)
|
yading@10
|
902 for (x = 0; x < s->mb_width; x++)
|
yading@10
|
903 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
|
yading@10
|
904
|
yading@10
|
905 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
|
yading@10
|
906
|
yading@10
|
907 if (s->encoding) {
|
yading@10
|
908 /* Allocate MV tables */
|
yading@10
|
909 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
|
yading@10
|
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
|
yading@10
|
911 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
|
yading@10
|
912 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
|
yading@10
|
913 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
|
yading@10
|
914 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
|
yading@10
|
915 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
|
yading@10
|
916 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
|
yading@10
|
917 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
|
yading@10
|
918 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
|
yading@10
|
919 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
|
yading@10
|
920 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
|
yading@10
|
921
|
yading@10
|
922 /* Allocate MB type table */
|
yading@10
|
923 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
|
yading@10
|
924
|
yading@10
|
925 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
|
yading@10
|
926
|
yading@10
|
927 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
|
yading@10
|
928 mb_array_size * sizeof(float), fail);
|
yading@10
|
929 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
|
yading@10
|
930 mb_array_size * sizeof(float), fail);
|
yading@10
|
931
|
yading@10
|
932 }
|
yading@10
|
933
|
yading@10
|
934 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
|
yading@10
|
935 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
|
yading@10
|
936 /* interlaced direct mode decoding tables */
|
yading@10
|
937 for (i = 0; i < 2; i++) {
|
yading@10
|
938 int j, k;
|
yading@10
|
939 for (j = 0; j < 2; j++) {
|
yading@10
|
940 for (k = 0; k < 2; k++) {
|
yading@10
|
941 FF_ALLOCZ_OR_GOTO(s->avctx,
|
yading@10
|
942 s->b_field_mv_table_base[i][j][k],
|
yading@10
|
943 mv_table_size * 2 * sizeof(int16_t),
|
yading@10
|
944 fail);
|
yading@10
|
945 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
|
yading@10
|
946 s->mb_stride + 1;
|
yading@10
|
947 }
|
yading@10
|
948 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
|
yading@10
|
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
|
yading@10
|
950 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
|
yading@10
|
951 }
|
yading@10
|
952 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
|
yading@10
|
953 }
|
yading@10
|
954 }
|
yading@10
|
955 if (s->out_format == FMT_H263) {
|
yading@10
|
956 /* cbp values */
|
yading@10
|
957 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
|
yading@10
|
958 s->coded_block = s->coded_block_base + s->b8_stride + 1;
|
yading@10
|
959
|
yading@10
|
960 /* cbp, ac_pred, pred_dir */
|
yading@10
|
961 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
|
yading@10
|
962 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
|
yading@10
|
963 }
|
yading@10
|
964
|
yading@10
|
965 if (s->h263_pred || s->h263_plus || !s->encoding) {
|
yading@10
|
966 /* dc values */
|
yading@10
|
967 // MN: we need these for error resilience of intra-frames
|
yading@10
|
968 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
|
yading@10
|
969 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
|
yading@10
|
970 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
|
yading@10
|
971 s->dc_val[2] = s->dc_val[1] + c_size;
|
yading@10
|
972 for (i = 0; i < yc_size; i++)
|
yading@10
|
973 s->dc_val_base[i] = 1024;
|
yading@10
|
974 }
|
yading@10
|
975
|
yading@10
|
976 /* which mb is a intra block */
|
yading@10
|
977 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
|
yading@10
|
978 memset(s->mbintra_table, 1, mb_array_size);
|
yading@10
|
979
|
yading@10
|
980 /* init macroblock skip table */
|
yading@10
|
981 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
|
yading@10
|
982 // Note the + 1 is for a quicker mpeg4 slice_end detection
|
yading@10
|
983
|
yading@10
|
984 return init_er(s);
|
yading@10
|
985 fail:
|
yading@10
|
986 return AVERROR(ENOMEM);
|
yading@10
|
987 }
|
yading@10
|
988
|
yading@10
|
989 /**
|
yading@10
|
990 * init common structure for both encoder and decoder.
|
yading@10
|
991 * this assumes that some variables like width/height are already set
|
yading@10
|
992 */
|
yading@10
|
993 av_cold int ff_MPV_common_init(MpegEncContext *s)
|
yading@10
|
994 {
|
yading@10
|
995 int i;
|
yading@10
|
996 int nb_slices = (HAVE_THREADS &&
|
yading@10
|
997 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
|
yading@10
|
998 s->avctx->thread_count : 1;
|
yading@10
|
999
|
yading@10
|
1000 if (s->encoding && s->avctx->slices)
|
yading@10
|
1001 nb_slices = s->avctx->slices;
|
yading@10
|
1002
|
yading@10
|
1003 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
|
yading@10
|
1004 s->mb_height = (s->height + 31) / 32 * 2;
|
yading@10
|
1005 else
|
yading@10
|
1006 s->mb_height = (s->height + 15) / 16;
|
yading@10
|
1007
|
yading@10
|
1008 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
|
yading@10
|
1009 av_log(s->avctx, AV_LOG_ERROR,
|
yading@10
|
1010 "decoding to AV_PIX_FMT_NONE is not supported.\n");
|
yading@10
|
1011 return -1;
|
yading@10
|
1012 }
|
yading@10
|
1013
|
yading@10
|
1014 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
|
yading@10
|
1015 int max_slices;
|
yading@10
|
1016 if (s->mb_height)
|
yading@10
|
1017 max_slices = FFMIN(MAX_THREADS, s->mb_height);
|
yading@10
|
1018 else
|
yading@10
|
1019 max_slices = MAX_THREADS;
|
yading@10
|
1020 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
|
yading@10
|
1021 " reducing to %d\n", nb_slices, max_slices);
|
yading@10
|
1022 nb_slices = max_slices;
|
yading@10
|
1023 }
|
yading@10
|
1024
|
yading@10
|
1025 if ((s->width || s->height) &&
|
yading@10
|
1026 av_image_check_size(s->width, s->height, 0, s->avctx))
|
yading@10
|
1027 return -1;
|
yading@10
|
1028
|
yading@10
|
1029 ff_dct_common_init(s);
|
yading@10
|
1030
|
yading@10
|
1031 s->flags = s->avctx->flags;
|
yading@10
|
1032 s->flags2 = s->avctx->flags2;
|
yading@10
|
1033
|
yading@10
|
1034 /* set chroma shifts */
|
yading@10
|
1035 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
|
yading@10
|
1036
|
yading@10
|
1037 /* convert fourcc to upper case */
|
yading@10
|
1038 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
|
yading@10
|
1039 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
|
yading@10
|
1040
|
yading@10
|
1041 s->avctx->coded_frame = &s->current_picture.f;
|
yading@10
|
1042
|
yading@10
|
1043 if (s->encoding) {
|
yading@10
|
1044 if (s->msmpeg4_version) {
|
yading@10
|
1045 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
|
yading@10
|
1046 2 * 2 * (MAX_LEVEL + 1) *
|
yading@10
|
1047 (MAX_RUN + 1) * 2 * sizeof(int), fail);
|
yading@10
|
1048 }
|
yading@10
|
1049 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
|
yading@10
|
1050
|
yading@10
|
1051 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
|
yading@10
|
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
|
yading@10
|
1053 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
|
yading@10
|
1054 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
|
yading@10
|
1055 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
|
yading@10
|
1056 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
|
yading@10
|
1057 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
|
yading@10
|
1058 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
|
yading@10
|
1059
|
yading@10
|
1060 if (s->avctx->noise_reduction) {
|
yading@10
|
1061 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
|
yading@10
|
1062 }
|
yading@10
|
1063 }
|
yading@10
|
1064
|
yading@10
|
1065 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
|
yading@10
|
1066 MAX_PICTURE_COUNT * sizeof(Picture), fail);
|
yading@10
|
1067 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
|
yading@10
|
1068 avcodec_get_frame_defaults(&s->picture[i].f);
|
yading@10
|
1069 }
|
yading@10
|
1070 memset(&s->next_picture, 0, sizeof(s->next_picture));
|
yading@10
|
1071 memset(&s->last_picture, 0, sizeof(s->last_picture));
|
yading@10
|
1072 memset(&s->current_picture, 0, sizeof(s->current_picture));
|
yading@10
|
1073 avcodec_get_frame_defaults(&s->next_picture.f);
|
yading@10
|
1074 avcodec_get_frame_defaults(&s->last_picture.f);
|
yading@10
|
1075 avcodec_get_frame_defaults(&s->current_picture.f);
|
yading@10
|
1076
|
yading@10
|
1077 if (init_context_frame(s))
|
yading@10
|
1078 goto fail;
|
yading@10
|
1079
|
yading@10
|
1080 s->parse_context.state = -1;
|
yading@10
|
1081
|
yading@10
|
1082 s->context_initialized = 1;
|
yading@10
|
1083 s->thread_context[0] = s;
|
yading@10
|
1084
|
yading@10
|
1085 // if (s->width && s->height) {
|
yading@10
|
1086 if (nb_slices > 1) {
|
yading@10
|
1087 for (i = 1; i < nb_slices; i++) {
|
yading@10
|
1088 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
|
yading@10
|
1089 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
|
yading@10
|
1090 }
|
yading@10
|
1091
|
yading@10
|
1092 for (i = 0; i < nb_slices; i++) {
|
yading@10
|
1093 if (init_duplicate_context(s->thread_context[i]) < 0)
|
yading@10
|
1094 goto fail;
|
yading@10
|
1095 s->thread_context[i]->start_mb_y =
|
yading@10
|
1096 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
|
yading@10
|
1097 s->thread_context[i]->end_mb_y =
|
yading@10
|
1098 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
|
yading@10
|
1099 }
|
yading@10
|
1100 } else {
|
yading@10
|
1101 if (init_duplicate_context(s) < 0)
|
yading@10
|
1102 goto fail;
|
yading@10
|
1103 s->start_mb_y = 0;
|
yading@10
|
1104 s->end_mb_y = s->mb_height;
|
yading@10
|
1105 }
|
yading@10
|
1106 s->slice_context_count = nb_slices;
|
yading@10
|
1107 // }
|
yading@10
|
1108
|
yading@10
|
1109 return 0;
|
yading@10
|
1110 fail:
|
yading@10
|
1111 ff_MPV_common_end(s);
|
yading@10
|
1112 return -1;
|
yading@10
|
1113 }
|
yading@10
|
1114
|
yading@10
|
1115 /**
|
yading@10
|
1116 * Frees and resets MpegEncContext fields depending on the resolution.
|
yading@10
|
1117 * Is used during resolution changes to avoid a full reinitialization of the
|
yading@10
|
1118 * codec.
|
yading@10
|
1119 */
|
yading@10
|
1120 static int free_context_frame(MpegEncContext *s)
|
yading@10
|
1121 {
|
yading@10
|
1122 int i, j, k;
|
yading@10
|
1123
|
yading@10
|
1124 av_freep(&s->mb_type);
|
yading@10
|
1125 av_freep(&s->p_mv_table_base);
|
yading@10
|
1126 av_freep(&s->b_forw_mv_table_base);
|
yading@10
|
1127 av_freep(&s->b_back_mv_table_base);
|
yading@10
|
1128 av_freep(&s->b_bidir_forw_mv_table_base);
|
yading@10
|
1129 av_freep(&s->b_bidir_back_mv_table_base);
|
yading@10
|
1130 av_freep(&s->b_direct_mv_table_base);
|
yading@10
|
1131 s->p_mv_table = NULL;
|
yading@10
|
1132 s->b_forw_mv_table = NULL;
|
yading@10
|
1133 s->b_back_mv_table = NULL;
|
yading@10
|
1134 s->b_bidir_forw_mv_table = NULL;
|
yading@10
|
1135 s->b_bidir_back_mv_table = NULL;
|
yading@10
|
1136 s->b_direct_mv_table = NULL;
|
yading@10
|
1137 for (i = 0; i < 2; i++) {
|
yading@10
|
1138 for (j = 0; j < 2; j++) {
|
yading@10
|
1139 for (k = 0; k < 2; k++) {
|
yading@10
|
1140 av_freep(&s->b_field_mv_table_base[i][j][k]);
|
yading@10
|
1141 s->b_field_mv_table[i][j][k] = NULL;
|
yading@10
|
1142 }
|
yading@10
|
1143 av_freep(&s->b_field_select_table[i][j]);
|
yading@10
|
1144 av_freep(&s->p_field_mv_table_base[i][j]);
|
yading@10
|
1145 s->p_field_mv_table[i][j] = NULL;
|
yading@10
|
1146 }
|
yading@10
|
1147 av_freep(&s->p_field_select_table[i]);
|
yading@10
|
1148 }
|
yading@10
|
1149
|
yading@10
|
1150 av_freep(&s->dc_val_base);
|
yading@10
|
1151 av_freep(&s->coded_block_base);
|
yading@10
|
1152 av_freep(&s->mbintra_table);
|
yading@10
|
1153 av_freep(&s->cbp_table);
|
yading@10
|
1154 av_freep(&s->pred_dir_table);
|
yading@10
|
1155
|
yading@10
|
1156 av_freep(&s->mbskip_table);
|
yading@10
|
1157
|
yading@10
|
1158 av_freep(&s->er.error_status_table);
|
yading@10
|
1159 av_freep(&s->er.er_temp_buffer);
|
yading@10
|
1160 av_freep(&s->mb_index2xy);
|
yading@10
|
1161 av_freep(&s->lambda_table);
|
yading@10
|
1162
|
yading@10
|
1163 av_freep(&s->cplx_tab);
|
yading@10
|
1164 av_freep(&s->bits_tab);
|
yading@10
|
1165
|
yading@10
|
1166 s->linesize = s->uvlinesize = 0;
|
yading@10
|
1167
|
yading@10
|
1168 return 0;
|
yading@10
|
1169 }
|
yading@10
|
1170
|
yading@10
|
1171 int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
yading@10
|
1172 {
|
yading@10
|
1173 int i, err = 0;
|
yading@10
|
1174
|
yading@10
|
1175 if (s->slice_context_count > 1) {
|
yading@10
|
1176 for (i = 0; i < s->slice_context_count; i++) {
|
yading@10
|
1177 free_duplicate_context(s->thread_context[i]);
|
yading@10
|
1178 }
|
yading@10
|
1179 for (i = 1; i < s->slice_context_count; i++) {
|
yading@10
|
1180 av_freep(&s->thread_context[i]);
|
yading@10
|
1181 }
|
yading@10
|
1182 } else
|
yading@10
|
1183 free_duplicate_context(s);
|
yading@10
|
1184
|
yading@10
|
1185 if ((err = free_context_frame(s)) < 0)
|
yading@10
|
1186 return err;
|
yading@10
|
1187
|
yading@10
|
1188 if (s->picture)
|
yading@10
|
1189 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
|
yading@10
|
1190 s->picture[i].needs_realloc = 1;
|
yading@10
|
1191 }
|
yading@10
|
1192
|
yading@10
|
1193 s->last_picture_ptr =
|
yading@10
|
1194 s->next_picture_ptr =
|
yading@10
|
1195 s->current_picture_ptr = NULL;
|
yading@10
|
1196
|
yading@10
|
1197 // init
|
yading@10
|
1198 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
|
yading@10
|
1199 s->mb_height = (s->height + 31) / 32 * 2;
|
yading@10
|
1200 else
|
yading@10
|
1201 s->mb_height = (s->height + 15) / 16;
|
yading@10
|
1202
|
yading@10
|
1203 if ((s->width || s->height) &&
|
yading@10
|
1204 av_image_check_size(s->width, s->height, 0, s->avctx))
|
yading@10
|
1205 return AVERROR_INVALIDDATA;
|
yading@10
|
1206
|
yading@10
|
1207 if ((err = init_context_frame(s)))
|
yading@10
|
1208 goto fail;
|
yading@10
|
1209
|
yading@10
|
1210 s->thread_context[0] = s;
|
yading@10
|
1211
|
yading@10
|
1212 if (s->width && s->height) {
|
yading@10
|
1213 int nb_slices = s->slice_context_count;
|
yading@10
|
1214 if (nb_slices > 1) {
|
yading@10
|
1215 for (i = 1; i < nb_slices; i++) {
|
yading@10
|
1216 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
|
yading@10
|
1217 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
|
yading@10
|
1218 }
|
yading@10
|
1219
|
yading@10
|
1220 for (i = 0; i < nb_slices; i++) {
|
yading@10
|
1221 if (init_duplicate_context(s->thread_context[i]) < 0)
|
yading@10
|
1222 goto fail;
|
yading@10
|
1223 s->thread_context[i]->start_mb_y =
|
yading@10
|
1224 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
|
yading@10
|
1225 s->thread_context[i]->end_mb_y =
|
yading@10
|
1226 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
|
yading@10
|
1227 }
|
yading@10
|
1228 } else {
|
yading@10
|
1229 if (init_duplicate_context(s) < 0)
|
yading@10
|
1230 goto fail;
|
yading@10
|
1231 s->start_mb_y = 0;
|
yading@10
|
1232 s->end_mb_y = s->mb_height;
|
yading@10
|
1233 }
|
yading@10
|
1234 s->slice_context_count = nb_slices;
|
yading@10
|
1235 }
|
yading@10
|
1236
|
yading@10
|
1237 return 0;
|
yading@10
|
1238 fail:
|
yading@10
|
1239 ff_MPV_common_end(s);
|
yading@10
|
1240 return err;
|
yading@10
|
1241 }
|
yading@10
|
1242
|
yading@10
|
1243 /* init common structure for both encoder and decoder */
|
yading@10
|
1244 void ff_MPV_common_end(MpegEncContext *s)
|
yading@10
|
1245 {
|
yading@10
|
1246 int i;
|
yading@10
|
1247
|
yading@10
|
1248 if (s->slice_context_count > 1) {
|
yading@10
|
1249 for (i = 0; i < s->slice_context_count; i++) {
|
yading@10
|
1250 free_duplicate_context(s->thread_context[i]);
|
yading@10
|
1251 }
|
yading@10
|
1252 for (i = 1; i < s->slice_context_count; i++) {
|
yading@10
|
1253 av_freep(&s->thread_context[i]);
|
yading@10
|
1254 }
|
yading@10
|
1255 s->slice_context_count = 1;
|
yading@10
|
1256 } else free_duplicate_context(s);
|
yading@10
|
1257
|
yading@10
|
1258 av_freep(&s->parse_context.buffer);
|
yading@10
|
1259 s->parse_context.buffer_size = 0;
|
yading@10
|
1260
|
yading@10
|
1261 av_freep(&s->bitstream_buffer);
|
yading@10
|
1262 s->allocated_bitstream_buffer_size = 0;
|
yading@10
|
1263
|
yading@10
|
1264 av_freep(&s->avctx->stats_out);
|
yading@10
|
1265 av_freep(&s->ac_stats);
|
yading@10
|
1266
|
yading@10
|
1267 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
|
yading@10
|
1268 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
|
yading@10
|
1269 s->q_chroma_intra_matrix= NULL;
|
yading@10
|
1270 s->q_chroma_intra_matrix16= NULL;
|
yading@10
|
1271 av_freep(&s->q_intra_matrix);
|
yading@10
|
1272 av_freep(&s->q_inter_matrix);
|
yading@10
|
1273 av_freep(&s->q_intra_matrix16);
|
yading@10
|
1274 av_freep(&s->q_inter_matrix16);
|
yading@10
|
1275 av_freep(&s->input_picture);
|
yading@10
|
1276 av_freep(&s->reordered_input_picture);
|
yading@10
|
1277 av_freep(&s->dct_offset);
|
yading@10
|
1278
|
yading@10
|
1279 if (s->picture) {
|
yading@10
|
1280 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
|
yading@10
|
1281 free_picture_tables(&s->picture[i]);
|
yading@10
|
1282 ff_mpeg_unref_picture(s, &s->picture[i]);
|
yading@10
|
1283 }
|
yading@10
|
1284 }
|
yading@10
|
1285 av_freep(&s->picture);
|
yading@10
|
1286 free_picture_tables(&s->last_picture);
|
yading@10
|
1287 ff_mpeg_unref_picture(s, &s->last_picture);
|
yading@10
|
1288 free_picture_tables(&s->current_picture);
|
yading@10
|
1289 ff_mpeg_unref_picture(s, &s->current_picture);
|
yading@10
|
1290 free_picture_tables(&s->next_picture);
|
yading@10
|
1291 ff_mpeg_unref_picture(s, &s->next_picture);
|
yading@10
|
1292 free_picture_tables(&s->new_picture);
|
yading@10
|
1293 ff_mpeg_unref_picture(s, &s->new_picture);
|
yading@10
|
1294
|
yading@10
|
1295 free_context_frame(s);
|
yading@10
|
1296
|
yading@10
|
1297 s->context_initialized = 0;
|
yading@10
|
1298 s->last_picture_ptr =
|
yading@10
|
1299 s->next_picture_ptr =
|
yading@10
|
1300 s->current_picture_ptr = NULL;
|
yading@10
|
1301 s->linesize = s->uvlinesize = 0;
|
yading@10
|
1302 }
|
yading@10
|
1303
|
yading@10
|
1304 void ff_init_rl(RLTable *rl,
|
yading@10
|
1305 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
|
yading@10
|
1306 {
|
yading@10
|
1307 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
|
yading@10
|
1308 uint8_t index_run[MAX_RUN + 1];
|
yading@10
|
1309 int last, run, level, start, end, i;
|
yading@10
|
1310
|
yading@10
|
1311 /* If table is static, we can quit if rl->max_level[0] is not NULL */
|
yading@10
|
1312 if (static_store && rl->max_level[0])
|
yading@10
|
1313 return;
|
yading@10
|
1314
|
yading@10
|
1315 /* compute max_level[], max_run[] and index_run[] */
|
yading@10
|
1316 for (last = 0; last < 2; last++) {
|
yading@10
|
1317 if (last == 0) {
|
yading@10
|
1318 start = 0;
|
yading@10
|
1319 end = rl->last;
|
yading@10
|
1320 } else {
|
yading@10
|
1321 start = rl->last;
|
yading@10
|
1322 end = rl->n;
|
yading@10
|
1323 }
|
yading@10
|
1324
|
yading@10
|
1325 memset(max_level, 0, MAX_RUN + 1);
|
yading@10
|
1326 memset(max_run, 0, MAX_LEVEL + 1);
|
yading@10
|
1327 memset(index_run, rl->n, MAX_RUN + 1);
|
yading@10
|
1328 for (i = start; i < end; i++) {
|
yading@10
|
1329 run = rl->table_run[i];
|
yading@10
|
1330 level = rl->table_level[i];
|
yading@10
|
1331 if (index_run[run] == rl->n)
|
yading@10
|
1332 index_run[run] = i;
|
yading@10
|
1333 if (level > max_level[run])
|
yading@10
|
1334 max_level[run] = level;
|
yading@10
|
1335 if (run > max_run[level])
|
yading@10
|
1336 max_run[level] = run;
|
yading@10
|
1337 }
|
yading@10
|
1338 if (static_store)
|
yading@10
|
1339 rl->max_level[last] = static_store[last];
|
yading@10
|
1340 else
|
yading@10
|
1341 rl->max_level[last] = av_malloc(MAX_RUN + 1);
|
yading@10
|
1342 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
|
yading@10
|
1343 if (static_store)
|
yading@10
|
1344 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
|
yading@10
|
1345 else
|
yading@10
|
1346 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
|
yading@10
|
1347 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
|
yading@10
|
1348 if (static_store)
|
yading@10
|
1349 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
|
yading@10
|
1350 else
|
yading@10
|
1351 rl->index_run[last] = av_malloc(MAX_RUN + 1);
|
yading@10
|
1352 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
|
yading@10
|
1353 }
|
yading@10
|
1354 }
|
yading@10
|
1355
|
yading@10
|
1356 void ff_init_vlc_rl(RLTable *rl)
|
yading@10
|
1357 {
|
yading@10
|
1358 int i, q;
|
yading@10
|
1359
|
yading@10
|
1360 for (q = 0; q < 32; q++) {
|
yading@10
|
1361 int qmul = q * 2;
|
yading@10
|
1362 int qadd = (q - 1) | 1;
|
yading@10
|
1363
|
yading@10
|
1364 if (q == 0) {
|
yading@10
|
1365 qmul = 1;
|
yading@10
|
1366 qadd = 0;
|
yading@10
|
1367 }
|
yading@10
|
1368 for (i = 0; i < rl->vlc.table_size; i++) {
|
yading@10
|
1369 int code = rl->vlc.table[i][0];
|
yading@10
|
1370 int len = rl->vlc.table[i][1];
|
yading@10
|
1371 int level, run;
|
yading@10
|
1372
|
yading@10
|
1373 if (len == 0) { // illegal code
|
yading@10
|
1374 run = 66;
|
yading@10
|
1375 level = MAX_LEVEL;
|
yading@10
|
1376 } else if (len < 0) { // more bits needed
|
yading@10
|
1377 run = 0;
|
yading@10
|
1378 level = code;
|
yading@10
|
1379 } else {
|
yading@10
|
1380 if (code == rl->n) { // esc
|
yading@10
|
1381 run = 66;
|
yading@10
|
1382 level = 0;
|
yading@10
|
1383 } else {
|
yading@10
|
1384 run = rl->table_run[code] + 1;
|
yading@10
|
1385 level = rl->table_level[code] * qmul + qadd;
|
yading@10
|
1386 if (code >= rl->last) run += 192;
|
yading@10
|
1387 }
|
yading@10
|
1388 }
|
yading@10
|
1389 rl->rl_vlc[q][i].len = len;
|
yading@10
|
1390 rl->rl_vlc[q][i].level = level;
|
yading@10
|
1391 rl->rl_vlc[q][i].run = run;
|
yading@10
|
1392 }
|
yading@10
|
1393 }
|
yading@10
|
1394 }
|
yading@10
|
1395
|
yading@10
|
1396 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
|
yading@10
|
1397 {
|
yading@10
|
1398 int i;
|
yading@10
|
1399
|
yading@10
|
1400 /* release non reference frames */
|
yading@10
|
1401 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
|
yading@10
|
1402 if (!s->picture[i].reference &&
|
yading@10
|
1403 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
|
yading@10
|
1404 ff_mpeg_unref_picture(s, &s->picture[i]);
|
yading@10
|
1405 }
|
yading@10
|
1406 }
|
yading@10
|
1407 }
|
yading@10
|
1408
|
yading@10
|
1409 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
|
yading@10
|
1410 {
|
yading@10
|
1411 if (pic == s->last_picture_ptr)
|
yading@10
|
1412 return 0;
|
yading@10
|
1413 if (pic->f.data[0] == NULL)
|
yading@10
|
1414 return 1;
|
yading@10
|
1415 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
|
yading@10
|
1416 return 1;
|
yading@10
|
1417 return 0;
|
yading@10
|
1418 }
|
yading@10
|
1419
|
yading@10
|
1420 static int find_unused_picture(MpegEncContext *s, int shared)
|
yading@10
|
1421 {
|
yading@10
|
1422 int i;
|
yading@10
|
1423
|
yading@10
|
1424 if (shared) {
|
yading@10
|
1425 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
|
yading@10
|
1426 if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
|
yading@10
|
1427 return i;
|
yading@10
|
1428 }
|
yading@10
|
1429 } else {
|
yading@10
|
1430 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
|
yading@10
|
1431 if (pic_is_unused(s, &s->picture[i]))
|
yading@10
|
1432 return i;
|
yading@10
|
1433 }
|
yading@10
|
1434 }
|
yading@10
|
1435
|
yading@10
|
1436 av_log(s->avctx, AV_LOG_FATAL,
|
yading@10
|
1437 "Internal error, picture buffer overflow\n");
|
yading@10
|
1438 /* We could return -1, but the codec would crash trying to draw into a
|
yading@10
|
1439 * non-existing frame anyway. This is safer than waiting for a random crash.
|
yading@10
|
1440 * Also the return of this is never useful, an encoder must only allocate
|
yading@10
|
1441 * as much as allowed in the specification. This has no relationship to how
|
yading@10
|
1442 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
|
yading@10
|
1443 * enough for such valid streams).
|
yading@10
|
1444 * Plus, a decoder has to check stream validity and remove frames if too
|
yading@10
|
1445 * many reference frames are around. Waiting for "OOM" is not correct at
|
yading@10
|
1446 * all. Similarly, missing reference frames have to be replaced by
|
yading@10
|
1447 * interpolated/MC frames, anything else is a bug in the codec ...
|
yading@10
|
1448 */
|
yading@10
|
1449 abort();
|
yading@10
|
1450 return -1;
|
yading@10
|
1451 }
|
yading@10
|
1452
|
yading@10
|
1453 int ff_find_unused_picture(MpegEncContext *s, int shared)
|
yading@10
|
1454 {
|
yading@10
|
1455 int ret = find_unused_picture(s, shared);
|
yading@10
|
1456
|
yading@10
|
1457 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
|
yading@10
|
1458 if (s->picture[ret].needs_realloc) {
|
yading@10
|
1459 s->picture[ret].needs_realloc = 0;
|
yading@10
|
1460 free_picture_tables(&s->picture[ret]);
|
yading@10
|
1461 ff_mpeg_unref_picture(s, &s->picture[ret]);
|
yading@10
|
1462 avcodec_get_frame_defaults(&s->picture[ret].f);
|
yading@10
|
1463 }
|
yading@10
|
1464 }
|
yading@10
|
1465 return ret;
|
yading@10
|
1466 }
|
yading@10
|
1467
|
yading@10
|
1468 static void update_noise_reduction(MpegEncContext *s)
|
yading@10
|
1469 {
|
yading@10
|
1470 int intra, i;
|
yading@10
|
1471
|
yading@10
|
1472 for (intra = 0; intra < 2; intra++) {
|
yading@10
|
1473 if (s->dct_count[intra] > (1 << 16)) {
|
yading@10
|
1474 for (i = 0; i < 64; i++) {
|
yading@10
|
1475 s->dct_error_sum[intra][i] >>= 1;
|
yading@10
|
1476 }
|
yading@10
|
1477 s->dct_count[intra] >>= 1;
|
yading@10
|
1478 }
|
yading@10
|
1479
|
yading@10
|
1480 for (i = 0; i < 64; i++) {
|
yading@10
|
1481 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
|
yading@10
|
1482 s->dct_count[intra] +
|
yading@10
|
1483 s->dct_error_sum[intra][i] / 2) /
|
yading@10
|
1484 (s->dct_error_sum[intra][i] + 1);
|
yading@10
|
1485 }
|
yading@10
|
1486 }
|
yading@10
|
1487 }
|
yading@10
|
1488
|
yading@10
|
1489 /**
|
yading@10
|
1490 * generic function for encode/decode called after coding/decoding
|
yading@10
|
1491 * the header and before a frame is coded/decoded.
|
yading@10
|
1492 */
|
yading@10
|
1493 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
yading@10
|
1494 {
|
yading@10
|
1495 int i, ret;
|
yading@10
|
1496 Picture *pic;
|
yading@10
|
1497 s->mb_skipped = 0;
|
yading@10
|
1498
|
yading@10
|
1499 if (!ff_thread_can_start_frame(avctx)) {
|
yading@10
|
1500 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
|
yading@10
|
1501 return -1;
|
yading@10
|
1502 }
|
yading@10
|
1503
|
yading@10
|
1504 /* mark & release old frames */
|
yading@10
|
1505 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
|
yading@10
|
1506 s->last_picture_ptr != s->next_picture_ptr &&
|
yading@10
|
1507 s->last_picture_ptr->f.data[0]) {
|
yading@10
|
1508 ff_mpeg_unref_picture(s, s->last_picture_ptr);
|
yading@10
|
1509 }
|
yading@10
|
1510
|
yading@10
|
1511 /* release forgotten pictures */
|
yading@10
|
1512 /* if (mpeg124/h263) */
|
yading@10
|
1513 if (!s->encoding) {
|
yading@10
|
1514 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
|
yading@10
|
1515 if (&s->picture[i] != s->last_picture_ptr &&
|
yading@10
|
1516 &s->picture[i] != s->next_picture_ptr &&
|
yading@10
|
1517 s->picture[i].reference && !s->picture[i].needs_realloc) {
|
yading@10
|
1518 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
|
yading@10
|
1519 av_log(avctx, AV_LOG_ERROR,
|
yading@10
|
1520 "releasing zombie picture\n");
|
yading@10
|
1521 ff_mpeg_unref_picture(s, &s->picture[i]);
|
yading@10
|
1522 }
|
yading@10
|
1523 }
|
yading@10
|
1524 }
|
yading@10
|
1525
|
yading@10
|
1526 if (!s->encoding) {
|
yading@10
|
1527 ff_release_unused_pictures(s, 1);
|
yading@10
|
1528
|
yading@10
|
1529 if (s->current_picture_ptr &&
|
yading@10
|
1530 s->current_picture_ptr->f.data[0] == NULL) {
|
yading@10
|
1531 // we already have a unused image
|
yading@10
|
1532 // (maybe it was set before reading the header)
|
yading@10
|
1533 pic = s->current_picture_ptr;
|
yading@10
|
1534 } else {
|
yading@10
|
1535 i = ff_find_unused_picture(s, 0);
|
yading@10
|
1536 if (i < 0) {
|
yading@10
|
1537 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
|
yading@10
|
1538 return i;
|
yading@10
|
1539 }
|
yading@10
|
1540 pic = &s->picture[i];
|
yading@10
|
1541 }
|
yading@10
|
1542
|
yading@10
|
1543 pic->reference = 0;
|
yading@10
|
1544 if (!s->droppable) {
|
yading@10
|
1545 if (s->pict_type != AV_PICTURE_TYPE_B)
|
yading@10
|
1546 pic->reference = 3;
|
yading@10
|
1547 }
|
yading@10
|
1548
|
yading@10
|
1549 pic->f.coded_picture_number = s->coded_picture_number++;
|
yading@10
|
1550
|
yading@10
|
1551 if (ff_alloc_picture(s, pic, 0) < 0)
|
yading@10
|
1552 return -1;
|
yading@10
|
1553
|
yading@10
|
1554 s->current_picture_ptr = pic;
|
yading@10
|
1555 // FIXME use only the vars from current_pic
|
yading@10
|
1556 s->current_picture_ptr->f.top_field_first = s->top_field_first;
|
yading@10
|
1557 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
|
yading@10
|
1558 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
yading@10
|
1559 if (s->picture_structure != PICT_FRAME)
|
yading@10
|
1560 s->current_picture_ptr->f.top_field_first =
|
yading@10
|
1561 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
|
yading@10
|
1562 }
|
yading@10
|
1563 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
|
yading@10
|
1564 !s->progressive_sequence;
|
yading@10
|
1565 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
|
yading@10
|
1566 }
|
yading@10
|
1567
|
yading@10
|
1568 s->current_picture_ptr->f.pict_type = s->pict_type;
|
yading@10
|
1569 // if (s->flags && CODEC_FLAG_QSCALE)
|
yading@10
|
1570 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
|
yading@10
|
1571 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
|
yading@10
|
1572
|
yading@10
|
1573 ff_mpeg_unref_picture(s, &s->current_picture);
|
yading@10
|
1574 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
|
yading@10
|
1575 s->current_picture_ptr)) < 0)
|
yading@10
|
1576 return ret;
|
yading@10
|
1577
|
yading@10
|
1578 if (s->pict_type != AV_PICTURE_TYPE_B) {
|
yading@10
|
1579 s->last_picture_ptr = s->next_picture_ptr;
|
yading@10
|
1580 if (!s->droppable)
|
yading@10
|
1581 s->next_picture_ptr = s->current_picture_ptr;
|
yading@10
|
1582 }
|
yading@10
|
1583 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
|
yading@10
|
1584 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
|
yading@10
|
1585 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
|
yading@10
|
1586 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
|
yading@10
|
1587 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
|
yading@10
|
1588 s->pict_type, s->droppable);
|
yading@10
|
1589
|
yading@10
|
1590 if ((s->last_picture_ptr == NULL ||
|
yading@10
|
1591 s->last_picture_ptr->f.data[0] == NULL) &&
|
yading@10
|
1592 (s->pict_type != AV_PICTURE_TYPE_I ||
|
yading@10
|
1593 s->picture_structure != PICT_FRAME)) {
|
yading@10
|
1594 int h_chroma_shift, v_chroma_shift;
|
yading@10
|
1595 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
|
yading@10
|
1596 &h_chroma_shift, &v_chroma_shift);
|
yading@10
|
1597 if (s->pict_type != AV_PICTURE_TYPE_I)
|
yading@10
|
1598 av_log(avctx, AV_LOG_ERROR,
|
yading@10
|
1599 "warning: first frame is no keyframe\n");
|
yading@10
|
1600 else if (s->picture_structure != PICT_FRAME)
|
yading@10
|
1601 av_log(avctx, AV_LOG_INFO,
|
yading@10
|
1602 "allocate dummy last picture for field based first keyframe\n");
|
yading@10
|
1603
|
yading@10
|
1604 /* Allocate a dummy frame */
|
yading@10
|
1605 i = ff_find_unused_picture(s, 0);
|
yading@10
|
1606 if (i < 0) {
|
yading@10
|
1607 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
|
yading@10
|
1608 return i;
|
yading@10
|
1609 }
|
yading@10
|
1610 s->last_picture_ptr = &s->picture[i];
|
yading@10
|
1611 s->last_picture_ptr->f.key_frame = 0;
|
yading@10
|
1612 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
|
yading@10
|
1613 s->last_picture_ptr = NULL;
|
yading@10
|
1614 return -1;
|
yading@10
|
1615 }
|
yading@10
|
1616
|
yading@10
|
1617 memset(s->last_picture_ptr->f.data[0], 0x80,
|
yading@10
|
1618 avctx->height * s->last_picture_ptr->f.linesize[0]);
|
yading@10
|
1619 memset(s->last_picture_ptr->f.data[1], 0x80,
|
yading@10
|
1620 (avctx->height >> v_chroma_shift) *
|
yading@10
|
1621 s->last_picture_ptr->f.linesize[1]);
|
yading@10
|
1622 memset(s->last_picture_ptr->f.data[2], 0x80,
|
yading@10
|
1623 (avctx->height >> v_chroma_shift) *
|
yading@10
|
1624 s->last_picture_ptr->f.linesize[2]);
|
yading@10
|
1625
|
yading@10
|
1626 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
|
yading@10
|
1627 for(i=0; i<avctx->height; i++)
|
yading@10
|
1628 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
|
yading@10
|
1629 }
|
yading@10
|
1630
|
yading@10
|
1631 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
|
yading@10
|
1632 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
|
yading@10
|
1633 }
|
yading@10
|
1634 if ((s->next_picture_ptr == NULL ||
|
yading@10
|
1635 s->next_picture_ptr->f.data[0] == NULL) &&
|
yading@10
|
1636 s->pict_type == AV_PICTURE_TYPE_B) {
|
yading@10
|
1637 /* Allocate a dummy frame */
|
yading@10
|
1638 i = ff_find_unused_picture(s, 0);
|
yading@10
|
1639 if (i < 0) {
|
yading@10
|
1640 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
|
yading@10
|
1641 return i;
|
yading@10
|
1642 }
|
yading@10
|
1643 s->next_picture_ptr = &s->picture[i];
|
yading@10
|
1644 s->next_picture_ptr->f.key_frame = 0;
|
yading@10
|
1645 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
|
yading@10
|
1646 s->next_picture_ptr = NULL;
|
yading@10
|
1647 return -1;
|
yading@10
|
1648 }
|
yading@10
|
1649 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
|
yading@10
|
1650 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
|
yading@10
|
1651 }
|
yading@10
|
1652
|
yading@10
|
1653 #if 0 // BUFREF-FIXME
|
yading@10
|
1654 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
|
yading@10
|
1655 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
|
yading@10
|
1656 #endif
|
yading@10
|
1657 if (s->last_picture_ptr) {
|
yading@10
|
1658 ff_mpeg_unref_picture(s, &s->last_picture);
|
yading@10
|
1659 if (s->last_picture_ptr->f.data[0] &&
|
yading@10
|
1660 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
|
yading@10
|
1661 s->last_picture_ptr)) < 0)
|
yading@10
|
1662 return ret;
|
yading@10
|
1663 }
|
yading@10
|
1664 if (s->next_picture_ptr) {
|
yading@10
|
1665 ff_mpeg_unref_picture(s, &s->next_picture);
|
yading@10
|
1666 if (s->next_picture_ptr->f.data[0] &&
|
yading@10
|
1667 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
|
yading@10
|
1668 s->next_picture_ptr)) < 0)
|
yading@10
|
1669 return ret;
|
yading@10
|
1670 }
|
yading@10
|
1671
|
yading@10
|
1672 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
|
yading@10
|
1673 s->last_picture_ptr->f.data[0]));
|
yading@10
|
1674
|
yading@10
|
1675 if (s->picture_structure!= PICT_FRAME) {
|
yading@10
|
1676 int i;
|
yading@10
|
1677 for (i = 0; i < 4; i++) {
|
yading@10
|
1678 if (s->picture_structure == PICT_BOTTOM_FIELD) {
|
yading@10
|
1679 s->current_picture.f.data[i] +=
|
yading@10
|
1680 s->current_picture.f.linesize[i];
|
yading@10
|
1681 }
|
yading@10
|
1682 s->current_picture.f.linesize[i] *= 2;
|
yading@10
|
1683 s->last_picture.f.linesize[i] *= 2;
|
yading@10
|
1684 s->next_picture.f.linesize[i] *= 2;
|
yading@10
|
1685 }
|
yading@10
|
1686 }
|
yading@10
|
1687
|
yading@10
|
1688 s->err_recognition = avctx->err_recognition;
|
yading@10
|
1689
|
yading@10
|
1690 /* set dequantizer, we can't do it during init as
|
yading@10
|
1691 * it might change for mpeg4 and we can't do it in the header
|
yading@10
|
1692 * decode as init is not called for mpeg4 there yet */
|
yading@10
|
1693 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
yading@10
|
1694 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
|
yading@10
|
1695 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
|
yading@10
|
1696 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
|
yading@10
|
1697 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
|
yading@10
|
1698 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
|
yading@10
|
1699 } else {
|
yading@10
|
1700 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
|
yading@10
|
1701 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
|
yading@10
|
1702 }
|
yading@10
|
1703
|
yading@10
|
1704 if (s->dct_error_sum) {
|
yading@10
|
1705 av_assert2(s->avctx->noise_reduction && s->encoding);
|
yading@10
|
1706 update_noise_reduction(s);
|
yading@10
|
1707 }
|
yading@10
|
1708
|
yading@10
|
1709 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
|
yading@10
|
1710 return ff_xvmc_field_start(s, avctx);
|
yading@10
|
1711
|
yading@10
|
1712 return 0;
|
yading@10
|
1713 }
|
yading@10
|
1714
|
yading@10
|
1715 /* generic function for encode/decode called after a
|
yading@10
|
1716 * frame has been coded/decoded. */
|
yading@10
|
1717 void ff_MPV_frame_end(MpegEncContext *s)
|
yading@10
|
1718 {
|
yading@10
|
1719 int i;
|
yading@10
|
1720 /* redraw edges for the frame if decoding didn't complete */
|
yading@10
|
1721 // just to make sure that all data is rendered.
|
yading@10
|
1722 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
|
yading@10
|
1723 ff_xvmc_field_end(s);
|
yading@10
|
1724 } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
|
yading@10
|
1725 !s->avctx->hwaccel &&
|
yading@10
|
1726 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
|
yading@10
|
1727 s->unrestricted_mv &&
|
yading@10
|
1728 s->current_picture.reference &&
|
yading@10
|
1729 !s->intra_only &&
|
yading@10
|
1730 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
|
yading@10
|
1731 !s->avctx->lowres
|
yading@10
|
1732 ) {
|
yading@10
|
1733 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
|
yading@10
|
1734 int hshift = desc->log2_chroma_w;
|
yading@10
|
1735 int vshift = desc->log2_chroma_h;
|
yading@10
|
1736 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
|
yading@10
|
1737 s->h_edge_pos, s->v_edge_pos,
|
yading@10
|
1738 EDGE_WIDTH, EDGE_WIDTH,
|
yading@10
|
1739 EDGE_TOP | EDGE_BOTTOM);
|
yading@10
|
1740 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
|
yading@10
|
1741 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
|
yading@10
|
1742 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
|
yading@10
|
1743 EDGE_TOP | EDGE_BOTTOM);
|
yading@10
|
1744 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
|
yading@10
|
1745 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
|
yading@10
|
1746 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
|
yading@10
|
1747 EDGE_TOP | EDGE_BOTTOM);
|
yading@10
|
1748 }
|
yading@10
|
1749
|
yading@10
|
1750 emms_c();
|
yading@10
|
1751
|
yading@10
|
1752 s->last_pict_type = s->pict_type;
|
yading@10
|
1753 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
|
yading@10
|
1754 if (s->pict_type!= AV_PICTURE_TYPE_B) {
|
yading@10
|
1755 s->last_non_b_pict_type = s->pict_type;
|
yading@10
|
1756 }
|
yading@10
|
1757 #if 0
|
yading@10
|
1758 /* copy back current_picture variables */
|
yading@10
|
1759 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
|
yading@10
|
1760 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
|
yading@10
|
1761 s->picture[i] = s->current_picture;
|
yading@10
|
1762 break;
|
yading@10
|
1763 }
|
yading@10
|
1764 }
|
yading@10
|
1765 assert(i < MAX_PICTURE_COUNT);
|
yading@10
|
1766 #endif
|
yading@10
|
1767
|
yading@10
|
1768 if (s->encoding) {
|
yading@10
|
1769 /* release non-reference frames */
|
yading@10
|
1770 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
|
yading@10
|
1771 if (!s->picture[i].reference)
|
yading@10
|
1772 ff_mpeg_unref_picture(s, &s->picture[i]);
|
yading@10
|
1773 }
|
yading@10
|
1774 }
|
yading@10
|
1775 // clear copies, to avoid confusion
|
yading@10
|
1776 #if 0
|
yading@10
|
1777 memset(&s->last_picture, 0, sizeof(Picture));
|
yading@10
|
1778 memset(&s->next_picture, 0, sizeof(Picture));
|
yading@10
|
1779 memset(&s->current_picture, 0, sizeof(Picture));
|
yading@10
|
1780 #endif
|
yading@10
|
1781 s->avctx->coded_frame = &s->current_picture_ptr->f;
|
yading@10
|
1782
|
yading@10
|
1783 if (s->current_picture.reference)
|
yading@10
|
1784 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
|
yading@10
|
1785 }
|
yading@10
|
1786
|
yading@10
|
1787 /**
|
yading@10
|
1788 * Draw a line from (ex, ey) -> (sx, sy).
|
yading@10
|
1789 * @param w width of the image
|
yading@10
|
1790 * @param h height of the image
|
yading@10
|
1791 * @param stride stride/linesize of the image
|
yading@10
|
1792 * @param color color of the arrow
|
yading@10
|
1793 */
|
yading@10
|
1794 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
|
yading@10
|
1795 int w, int h, int stride, int color)
|
yading@10
|
1796 {
|
yading@10
|
1797 int x, y, fr, f;
|
yading@10
|
1798
|
yading@10
|
1799 sx = av_clip(sx, 0, w - 1);
|
yading@10
|
1800 sy = av_clip(sy, 0, h - 1);
|
yading@10
|
1801 ex = av_clip(ex, 0, w - 1);
|
yading@10
|
1802 ey = av_clip(ey, 0, h - 1);
|
yading@10
|
1803
|
yading@10
|
1804 buf[sy * stride + sx] += color;
|
yading@10
|
1805
|
yading@10
|
1806 if (FFABS(ex - sx) > FFABS(ey - sy)) {
|
yading@10
|
1807 if (sx > ex) {
|
yading@10
|
1808 FFSWAP(int, sx, ex);
|
yading@10
|
1809 FFSWAP(int, sy, ey);
|
yading@10
|
1810 }
|
yading@10
|
1811 buf += sx + sy * stride;
|
yading@10
|
1812 ex -= sx;
|
yading@10
|
1813 f = ((ey - sy) << 16) / ex;
|
yading@10
|
1814 for (x = 0; x <= ex; x++) {
|
yading@10
|
1815 y = (x * f) >> 16;
|
yading@10
|
1816 fr = (x * f) & 0xFFFF;
|
yading@10
|
1817 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
|
yading@10
|
1818 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
|
yading@10
|
1819 }
|
yading@10
|
1820 } else {
|
yading@10
|
1821 if (sy > ey) {
|
yading@10
|
1822 FFSWAP(int, sx, ex);
|
yading@10
|
1823 FFSWAP(int, sy, ey);
|
yading@10
|
1824 }
|
yading@10
|
1825 buf += sx + sy * stride;
|
yading@10
|
1826 ey -= sy;
|
yading@10
|
1827 if (ey)
|
yading@10
|
1828 f = ((ex - sx) << 16) / ey;
|
yading@10
|
1829 else
|
yading@10
|
1830 f = 0;
|
yading@10
|
1831 for(y= 0; y <= ey; y++){
|
yading@10
|
1832 x = (y*f) >> 16;
|
yading@10
|
1833 fr = (y*f) & 0xFFFF;
|
yading@10
|
1834 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
|
yading@10
|
1835 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
|
yading@10
|
1836 }
|
yading@10
|
1837 }
|
yading@10
|
1838 }
|
yading@10
|
1839
|
yading@10
|
1840 /**
|
yading@10
|
1841 * Draw an arrow from (ex, ey) -> (sx, sy).
|
yading@10
|
1842 * @param w width of the image
|
yading@10
|
1843 * @param h height of the image
|
yading@10
|
1844 * @param stride stride/linesize of the image
|
yading@10
|
1845 * @param color color of the arrow
|
yading@10
|
1846 */
|
yading@10
|
1847 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
|
yading@10
|
1848 int ey, int w, int h, int stride, int color)
|
yading@10
|
1849 {
|
yading@10
|
1850 int dx,dy;
|
yading@10
|
1851
|
yading@10
|
1852 sx = av_clip(sx, -100, w + 100);
|
yading@10
|
1853 sy = av_clip(sy, -100, h + 100);
|
yading@10
|
1854 ex = av_clip(ex, -100, w + 100);
|
yading@10
|
1855 ey = av_clip(ey, -100, h + 100);
|
yading@10
|
1856
|
yading@10
|
1857 dx = ex - sx;
|
yading@10
|
1858 dy = ey - sy;
|
yading@10
|
1859
|
yading@10
|
1860 if (dx * dx + dy * dy > 3 * 3) {
|
yading@10
|
1861 int rx = dx + dy;
|
yading@10
|
1862 int ry = -dx + dy;
|
yading@10
|
1863 int length = ff_sqrt((rx * rx + ry * ry) << 8);
|
yading@10
|
1864
|
yading@10
|
1865 // FIXME subpixel accuracy
|
yading@10
|
1866 rx = ROUNDED_DIV(rx * 3 << 4, length);
|
yading@10
|
1867 ry = ROUNDED_DIV(ry * 3 << 4, length);
|
yading@10
|
1868
|
yading@10
|
1869 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
|
yading@10
|
1870 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
|
yading@10
|
1871 }
|
yading@10
|
1872 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
|
yading@10
|
1873 }
|
yading@10
|
1874
|
yading@10
|
1875 /**
|
yading@10
|
1876 * Print debugging info for the given picture.
|
yading@10
|
1877 */
|
yading@10
|
1878 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
|
yading@10
|
1879 int *low_delay,
|
yading@10
|
1880 int mb_width, int mb_height, int mb_stride, int quarter_sample)
|
yading@10
|
1881 {
|
yading@10
|
1882 if (avctx->hwaccel || !p || !p->mb_type
|
yading@10
|
1883 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
|
yading@10
|
1884 return;
|
yading@10
|
1885
|
yading@10
|
1886
|
yading@10
|
1887 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
|
yading@10
|
1888 int x,y;
|
yading@10
|
1889
|
yading@10
|
1890 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
|
yading@10
|
1891 av_get_picture_type_char(pict->pict_type));
|
yading@10
|
1892 for (y = 0; y < mb_height; y++) {
|
yading@10
|
1893 for (x = 0; x < mb_width; x++) {
|
yading@10
|
1894 if (avctx->debug & FF_DEBUG_SKIP) {
|
yading@10
|
1895 int count = mbskip_table[x + y * mb_stride];
|
yading@10
|
1896 if (count > 9)
|
yading@10
|
1897 count = 9;
|
yading@10
|
1898 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
|
yading@10
|
1899 }
|
yading@10
|
1900 if (avctx->debug & FF_DEBUG_QP) {
|
yading@10
|
1901 av_log(avctx, AV_LOG_DEBUG, "%2d",
|
yading@10
|
1902 p->qscale_table[x + y * mb_stride]);
|
yading@10
|
1903 }
|
yading@10
|
1904 if (avctx->debug & FF_DEBUG_MB_TYPE) {
|
yading@10
|
1905 int mb_type = p->mb_type[x + y * mb_stride];
|
yading@10
|
1906 // Type & MV direction
|
yading@10
|
1907 if (IS_PCM(mb_type))
|
yading@10
|
1908 av_log(avctx, AV_LOG_DEBUG, "P");
|
yading@10
|
1909 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
|
yading@10
|
1910 av_log(avctx, AV_LOG_DEBUG, "A");
|
yading@10
|
1911 else if (IS_INTRA4x4(mb_type))
|
yading@10
|
1912 av_log(avctx, AV_LOG_DEBUG, "i");
|
yading@10
|
1913 else if (IS_INTRA16x16(mb_type))
|
yading@10
|
1914 av_log(avctx, AV_LOG_DEBUG, "I");
|
yading@10
|
1915 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
|
yading@10
|
1916 av_log(avctx, AV_LOG_DEBUG, "d");
|
yading@10
|
1917 else if (IS_DIRECT(mb_type))
|
yading@10
|
1918 av_log(avctx, AV_LOG_DEBUG, "D");
|
yading@10
|
1919 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
|
yading@10
|
1920 av_log(avctx, AV_LOG_DEBUG, "g");
|
yading@10
|
1921 else if (IS_GMC(mb_type))
|
yading@10
|
1922 av_log(avctx, AV_LOG_DEBUG, "G");
|
yading@10
|
1923 else if (IS_SKIP(mb_type))
|
yading@10
|
1924 av_log(avctx, AV_LOG_DEBUG, "S");
|
yading@10
|
1925 else if (!USES_LIST(mb_type, 1))
|
yading@10
|
1926 av_log(avctx, AV_LOG_DEBUG, ">");
|
yading@10
|
1927 else if (!USES_LIST(mb_type, 0))
|
yading@10
|
1928 av_log(avctx, AV_LOG_DEBUG, "<");
|
yading@10
|
1929 else {
|
yading@10
|
1930 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
|
yading@10
|
1931 av_log(avctx, AV_LOG_DEBUG, "X");
|
yading@10
|
1932 }
|
yading@10
|
1933
|
yading@10
|
1934 // segmentation
|
yading@10
|
1935 if (IS_8X8(mb_type))
|
yading@10
|
1936 av_log(avctx, AV_LOG_DEBUG, "+");
|
yading@10
|
1937 else if (IS_16X8(mb_type))
|
yading@10
|
1938 av_log(avctx, AV_LOG_DEBUG, "-");
|
yading@10
|
1939 else if (IS_8X16(mb_type))
|
yading@10
|
1940 av_log(avctx, AV_LOG_DEBUG, "|");
|
yading@10
|
1941 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
|
yading@10
|
1942 av_log(avctx, AV_LOG_DEBUG, " ");
|
yading@10
|
1943 else
|
yading@10
|
1944 av_log(avctx, AV_LOG_DEBUG, "?");
|
yading@10
|
1945
|
yading@10
|
1946
|
yading@10
|
1947 if (IS_INTERLACED(mb_type))
|
yading@10
|
1948 av_log(avctx, AV_LOG_DEBUG, "=");
|
yading@10
|
1949 else
|
yading@10
|
1950 av_log(avctx, AV_LOG_DEBUG, " ");
|
yading@10
|
1951 }
|
yading@10
|
1952 }
|
yading@10
|
1953 av_log(avctx, AV_LOG_DEBUG, "\n");
|
yading@10
|
1954 }
|
yading@10
|
1955 }
|
yading@10
|
1956
|
yading@10
|
1957 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
|
yading@10
|
1958 (avctx->debug_mv)) {
|
yading@10
|
1959 const int shift = 1 + quarter_sample;
|
yading@10
|
1960 int mb_y;
|
yading@10
|
1961 uint8_t *ptr;
|
yading@10
|
1962 int i;
|
yading@10
|
1963 int h_chroma_shift, v_chroma_shift, block_height;
|
yading@10
|
1964 const int width = avctx->width;
|
yading@10
|
1965 const int height = avctx->height;
|
yading@10
|
1966 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
|
yading@10
|
1967 const int mv_stride = (mb_width << mv_sample_log2) +
|
yading@10
|
1968 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
|
yading@10
|
1969
|
yading@10
|
1970 *low_delay = 0; // needed to see the vectors without trashing the buffers
|
yading@10
|
1971
|
yading@10
|
1972 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
|
yading@10
|
1973
|
yading@10
|
1974 av_frame_make_writable(pict);
|
yading@10
|
1975
|
yading@10
|
1976 pict->opaque = NULL;
|
yading@10
|
1977 ptr = pict->data[0];
|
yading@10
|
1978 block_height = 16 >> v_chroma_shift;
|
yading@10
|
1979
|
yading@10
|
1980 for (mb_y = 0; mb_y < mb_height; mb_y++) {
|
yading@10
|
1981 int mb_x;
|
yading@10
|
1982 for (mb_x = 0; mb_x < mb_width; mb_x++) {
|
yading@10
|
1983 const int mb_index = mb_x + mb_y * mb_stride;
|
yading@10
|
1984 if ((avctx->debug_mv) && p->motion_val[0]) {
|
yading@10
|
1985 int type;
|
yading@10
|
1986 for (type = 0; type < 3; type++) {
|
yading@10
|
1987 int direction = 0;
|
yading@10
|
1988 switch (type) {
|
yading@10
|
1989 case 0:
|
yading@10
|
1990 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
|
yading@10
|
1991 (pict->pict_type!= AV_PICTURE_TYPE_P))
|
yading@10
|
1992 continue;
|
yading@10
|
1993 direction = 0;
|
yading@10
|
1994 break;
|
yading@10
|
1995 case 1:
|
yading@10
|
1996 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
|
yading@10
|
1997 (pict->pict_type!= AV_PICTURE_TYPE_B))
|
yading@10
|
1998 continue;
|
yading@10
|
1999 direction = 0;
|
yading@10
|
2000 break;
|
yading@10
|
2001 case 2:
|
yading@10
|
2002 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
|
yading@10
|
2003 (pict->pict_type!= AV_PICTURE_TYPE_B))
|
yading@10
|
2004 continue;
|
yading@10
|
2005 direction = 1;
|
yading@10
|
2006 break;
|
yading@10
|
2007 }
|
yading@10
|
2008 if (!USES_LIST(p->mb_type[mb_index], direction))
|
yading@10
|
2009 continue;
|
yading@10
|
2010
|
yading@10
|
2011 if (IS_8X8(p->mb_type[mb_index])) {
|
yading@10
|
2012 int i;
|
yading@10
|
2013 for (i = 0; i < 4; i++) {
|
yading@10
|
2014 int sx = mb_x * 16 + 4 + 8 * (i & 1);
|
yading@10
|
2015 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
|
yading@10
|
2016 int xy = (mb_x * 2 + (i & 1) +
|
yading@10
|
2017 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
|
yading@10
|
2018 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
|
yading@10
|
2019 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
|
yading@10
|
2020 draw_arrow(ptr, sx, sy, mx, my, width,
|
yading@10
|
2021 height, pict->linesize[0], 100);
|
yading@10
|
2022 }
|
yading@10
|
2023 } else if (IS_16X8(p->mb_type[mb_index])) {
|
yading@10
|
2024 int i;
|
yading@10
|
2025 for (i = 0; i < 2; i++) {
|
yading@10
|
2026 int sx = mb_x * 16 + 8;
|
yading@10
|
2027 int sy = mb_y * 16 + 4 + 8 * i;
|
yading@10
|
2028 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
|
yading@10
|
2029 int mx = (p->motion_val[direction][xy][0] >> shift);
|
yading@10
|
2030 int my = (p->motion_val[direction][xy][1] >> shift);
|
yading@10
|
2031
|
yading@10
|
2032 if (IS_INTERLACED(p->mb_type[mb_index]))
|
yading@10
|
2033 my *= 2;
|
yading@10
|
2034
|
yading@10
|
2035 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
|
yading@10
|
2036 height, pict->linesize[0], 100);
|
yading@10
|
2037 }
|
yading@10
|
2038 } else if (IS_8X16(p->mb_type[mb_index])) {
|
yading@10
|
2039 int i;
|
yading@10
|
2040 for (i = 0; i < 2; i++) {
|
yading@10
|
2041 int sx = mb_x * 16 + 4 + 8 * i;
|
yading@10
|
2042 int sy = mb_y * 16 + 8;
|
yading@10
|
2043 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
|
yading@10
|
2044 int mx = p->motion_val[direction][xy][0] >> shift;
|
yading@10
|
2045 int my = p->motion_val[direction][xy][1] >> shift;
|
yading@10
|
2046
|
yading@10
|
2047 if (IS_INTERLACED(p->mb_type[mb_index]))
|
yading@10
|
2048 my *= 2;
|
yading@10
|
2049
|
yading@10
|
2050 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
|
yading@10
|
2051 height, pict->linesize[0], 100);
|
yading@10
|
2052 }
|
yading@10
|
2053 } else {
|
yading@10
|
2054 int sx= mb_x * 16 + 8;
|
yading@10
|
2055 int sy= mb_y * 16 + 8;
|
yading@10
|
2056 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
|
yading@10
|
2057 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
|
yading@10
|
2058 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
|
yading@10
|
2059 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
|
yading@10
|
2060 }
|
yading@10
|
2061 }
|
yading@10
|
2062 }
|
yading@10
|
2063 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
|
yading@10
|
2064 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
|
yading@10
|
2065 0x0101010101010101ULL;
|
yading@10
|
2066 int y;
|
yading@10
|
2067 for (y = 0; y < block_height; y++) {
|
yading@10
|
2068 *(uint64_t *)(pict->data[1] + 8 * mb_x +
|
yading@10
|
2069 (block_height * mb_y + y) *
|
yading@10
|
2070 pict->linesize[1]) = c;
|
yading@10
|
2071 *(uint64_t *)(pict->data[2] + 8 * mb_x +
|
yading@10
|
2072 (block_height * mb_y + y) *
|
yading@10
|
2073 pict->linesize[2]) = c;
|
yading@10
|
2074 }
|
yading@10
|
2075 }
|
yading@10
|
2076 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
|
yading@10
|
2077 p->motion_val[0]) {
|
yading@10
|
2078 int mb_type = p->mb_type[mb_index];
|
yading@10
|
2079 uint64_t u,v;
|
yading@10
|
2080 int y;
|
yading@10
|
2081 #define COLOR(theta, r) \
|
yading@10
|
2082 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
|
yading@10
|
2083 v = (int)(128 + r * sin(theta * 3.141592 / 180));
|
yading@10
|
2084
|
yading@10
|
2085
|
yading@10
|
2086 u = v = 128;
|
yading@10
|
2087 if (IS_PCM(mb_type)) {
|
yading@10
|
2088 COLOR(120, 48)
|
yading@10
|
2089 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
|
yading@10
|
2090 IS_INTRA16x16(mb_type)) {
|
yading@10
|
2091 COLOR(30, 48)
|
yading@10
|
2092 } else if (IS_INTRA4x4(mb_type)) {
|
yading@10
|
2093 COLOR(90, 48)
|
yading@10
|
2094 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
|
yading@10
|
2095 // COLOR(120, 48)
|
yading@10
|
2096 } else if (IS_DIRECT(mb_type)) {
|
yading@10
|
2097 COLOR(150, 48)
|
yading@10
|
2098 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
|
yading@10
|
2099 COLOR(170, 48)
|
yading@10
|
2100 } else if (IS_GMC(mb_type)) {
|
yading@10
|
2101 COLOR(190, 48)
|
yading@10
|
2102 } else if (IS_SKIP(mb_type)) {
|
yading@10
|
2103 // COLOR(180, 48)
|
yading@10
|
2104 } else if (!USES_LIST(mb_type, 1)) {
|
yading@10
|
2105 COLOR(240, 48)
|
yading@10
|
2106 } else if (!USES_LIST(mb_type, 0)) {
|
yading@10
|
2107 COLOR(0, 48)
|
yading@10
|
2108 } else {
|
yading@10
|
2109 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
|
yading@10
|
2110 COLOR(300,48)
|
yading@10
|
2111 }
|
yading@10
|
2112
|
yading@10
|
2113 u *= 0x0101010101010101ULL;
|
yading@10
|
2114 v *= 0x0101010101010101ULL;
|
yading@10
|
2115 for (y = 0; y < block_height; y++) {
|
yading@10
|
2116 *(uint64_t *)(pict->data[1] + 8 * mb_x +
|
yading@10
|
2117 (block_height * mb_y + y) * pict->linesize[1]) = u;
|
yading@10
|
2118 *(uint64_t *)(pict->data[2] + 8 * mb_x +
|
yading@10
|
2119 (block_height * mb_y + y) * pict->linesize[2]) = v;
|
yading@10
|
2120 }
|
yading@10
|
2121
|
yading@10
|
2122 // segmentation
|
yading@10
|
2123 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
|
yading@10
|
2124 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
|
yading@10
|
2125 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
|
yading@10
|
2126 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
|
yading@10
|
2127 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
|
yading@10
|
2128 }
|
yading@10
|
2129 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
|
yading@10
|
2130 for (y = 0; y < 16; y++)
|
yading@10
|
2131 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
|
yading@10
|
2132 pict->linesize[0]] ^= 0x80;
|
yading@10
|
2133 }
|
yading@10
|
2134 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
|
yading@10
|
2135 int dm = 1 << (mv_sample_log2 - 2);
|
yading@10
|
2136 for (i = 0; i < 4; i++) {
|
yading@10
|
2137 int sx = mb_x * 16 + 8 * (i & 1);
|
yading@10
|
2138 int sy = mb_y * 16 + 8 * (i >> 1);
|
yading@10
|
2139 int xy = (mb_x * 2 + (i & 1) +
|
yading@10
|
2140 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
|
yading@10
|
2141 // FIXME bidir
|
yading@10
|
2142 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
|
yading@10
|
2143 if (mv[0] != mv[dm] ||
|
yading@10
|
2144 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
|
yading@10
|
2145 for (y = 0; y < 8; y++)
|
yading@10
|
2146 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
|
yading@10
|
2147 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
|
yading@10
|
2148 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
|
yading@10
|
2149 pict->linesize[0]) ^= 0x8080808080808080ULL;
|
yading@10
|
2150 }
|
yading@10
|
2151 }
|
yading@10
|
2152
|
yading@10
|
2153 if (IS_INTERLACED(mb_type) &&
|
yading@10
|
2154 avctx->codec->id == AV_CODEC_ID_H264) {
|
yading@10
|
2155 // hmm
|
yading@10
|
2156 }
|
yading@10
|
2157 }
|
yading@10
|
2158 mbskip_table[mb_index] = 0;
|
yading@10
|
2159 }
|
yading@10
|
2160 }
|
yading@10
|
2161 }
|
yading@10
|
2162 }
|
yading@10
|
2163
|
yading@10
|
2164 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
|
yading@10
|
2165 {
|
yading@10
|
2166 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
|
yading@10
|
2167 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
|
yading@10
|
2168 }
|
yading@10
|
2169
|
yading@10
|
2170 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
|
yading@10
|
2171 {
|
yading@10
|
2172 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
|
yading@10
|
2173 int offset = 2*s->mb_stride + 1;
|
yading@10
|
2174 if(!ref)
|
yading@10
|
2175 return AVERROR(ENOMEM);
|
yading@10
|
2176 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
|
yading@10
|
2177 ref->size -= offset;
|
yading@10
|
2178 ref->data += offset;
|
yading@10
|
2179 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
|
yading@10
|
2180 }
|
yading@10
|
2181
|
yading@10
|
2182 static inline int hpel_motion_lowres(MpegEncContext *s,
|
yading@10
|
2183 uint8_t *dest, uint8_t *src,
|
yading@10
|
2184 int field_based, int field_select,
|
yading@10
|
2185 int src_x, int src_y,
|
yading@10
|
2186 int width, int height, int stride,
|
yading@10
|
2187 int h_edge_pos, int v_edge_pos,
|
yading@10
|
2188 int w, int h, h264_chroma_mc_func *pix_op,
|
yading@10
|
2189 int motion_x, int motion_y)
|
yading@10
|
2190 {
|
yading@10
|
2191 const int lowres = s->avctx->lowres;
|
yading@10
|
2192 const int op_index = FFMIN(lowres, 2);
|
yading@10
|
2193 const int s_mask = (2 << lowres) - 1;
|
yading@10
|
2194 int emu = 0;
|
yading@10
|
2195 int sx, sy;
|
yading@10
|
2196
|
yading@10
|
2197 if (s->quarter_sample) {
|
yading@10
|
2198 motion_x /= 2;
|
yading@10
|
2199 motion_y /= 2;
|
yading@10
|
2200 }
|
yading@10
|
2201
|
yading@10
|
2202 sx = motion_x & s_mask;
|
yading@10
|
2203 sy = motion_y & s_mask;
|
yading@10
|
2204 src_x += motion_x >> lowres + 1;
|
yading@10
|
2205 src_y += motion_y >> lowres + 1;
|
yading@10
|
2206
|
yading@10
|
2207 src += src_y * stride + src_x;
|
yading@10
|
2208
|
yading@10
|
2209 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
|
yading@10
|
2210 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
|
yading@10
|
2211 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
|
yading@10
|
2212 (h + 1) << field_based, src_x,
|
yading@10
|
2213 src_y << field_based,
|
yading@10
|
2214 h_edge_pos,
|
yading@10
|
2215 v_edge_pos);
|
yading@10
|
2216 src = s->edge_emu_buffer;
|
yading@10
|
2217 emu = 1;
|
yading@10
|
2218 }
|
yading@10
|
2219
|
yading@10
|
2220 sx = (sx << 2) >> lowres;
|
yading@10
|
2221 sy = (sy << 2) >> lowres;
|
yading@10
|
2222 if (field_select)
|
yading@10
|
2223 src += s->linesize;
|
yading@10
|
2224 pix_op[op_index](dest, src, stride, h, sx, sy);
|
yading@10
|
2225 return emu;
|
yading@10
|
2226 }
|
yading@10
|
2227
|
yading@10
|
2228 /* apply one mpeg motion vector to the three components */
|
yading@10
|
2229 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
yading@10
|
2230 uint8_t *dest_y,
|
yading@10
|
2231 uint8_t *dest_cb,
|
yading@10
|
2232 uint8_t *dest_cr,
|
yading@10
|
2233 int field_based,
|
yading@10
|
2234 int bottom_field,
|
yading@10
|
2235 int field_select,
|
yading@10
|
2236 uint8_t **ref_picture,
|
yading@10
|
2237 h264_chroma_mc_func *pix_op,
|
yading@10
|
2238 int motion_x, int motion_y,
|
yading@10
|
2239 int h, int mb_y)
|
yading@10
|
2240 {
|
yading@10
|
2241 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
|
yading@10
|
2242 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
|
yading@10
|
2243 uvsx, uvsy;
|
yading@10
|
2244 const int lowres = s->avctx->lowres;
|
yading@10
|
2245 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
|
yading@10
|
2246 const int block_s = 8>>lowres;
|
yading@10
|
2247 const int s_mask = (2 << lowres) - 1;
|
yading@10
|
2248 const int h_edge_pos = s->h_edge_pos >> lowres;
|
yading@10
|
2249 const int v_edge_pos = s->v_edge_pos >> lowres;
|
yading@10
|
2250 linesize = s->current_picture.f.linesize[0] << field_based;
|
yading@10
|
2251 uvlinesize = s->current_picture.f.linesize[1] << field_based;
|
yading@10
|
2252
|
yading@10
|
2253 // FIXME obviously not perfect but qpel will not work in lowres anyway
|
yading@10
|
2254 if (s->quarter_sample) {
|
yading@10
|
2255 motion_x /= 2;
|
yading@10
|
2256 motion_y /= 2;
|
yading@10
|
2257 }
|
yading@10
|
2258
|
yading@10
|
2259 if(field_based){
|
yading@10
|
2260 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
|
yading@10
|
2261 }
|
yading@10
|
2262
|
yading@10
|
2263 sx = motion_x & s_mask;
|
yading@10
|
2264 sy = motion_y & s_mask;
|
yading@10
|
2265 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
|
yading@10
|
2266 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
|
yading@10
|
2267
|
yading@10
|
2268 if (s->out_format == FMT_H263) {
|
yading@10
|
2269 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
|
yading@10
|
2270 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
|
yading@10
|
2271 uvsrc_x = src_x >> 1;
|
yading@10
|
2272 uvsrc_y = src_y >> 1;
|
yading@10
|
2273 } else if (s->out_format == FMT_H261) {
|
yading@10
|
2274 // even chroma mv's are full pel in H261
|
yading@10
|
2275 mx = motion_x / 4;
|
yading@10
|
2276 my = motion_y / 4;
|
yading@10
|
2277 uvsx = (2 * mx) & s_mask;
|
yading@10
|
2278 uvsy = (2 * my) & s_mask;
|
yading@10
|
2279 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
|
yading@10
|
2280 uvsrc_y = mb_y * block_s + (my >> lowres);
|
yading@10
|
2281 } else {
|
yading@10
|
2282 if(s->chroma_y_shift){
|
yading@10
|
2283 mx = motion_x / 2;
|
yading@10
|
2284 my = motion_y / 2;
|
yading@10
|
2285 uvsx = mx & s_mask;
|
yading@10
|
2286 uvsy = my & s_mask;
|
yading@10
|
2287 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
|
yading@10
|
2288 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
|
yading@10
|
2289 } else {
|
yading@10
|
2290 if(s->chroma_x_shift){
|
yading@10
|
2291 //Chroma422
|
yading@10
|
2292 mx = motion_x / 2;
|
yading@10
|
2293 uvsx = mx & s_mask;
|
yading@10
|
2294 uvsy = motion_y & s_mask;
|
yading@10
|
2295 uvsrc_y = src_y;
|
yading@10
|
2296 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
|
yading@10
|
2297 } else {
|
yading@10
|
2298 //Chroma444
|
yading@10
|
2299 uvsx = motion_x & s_mask;
|
yading@10
|
2300 uvsy = motion_y & s_mask;
|
yading@10
|
2301 uvsrc_x = src_x;
|
yading@10
|
2302 uvsrc_y = src_y;
|
yading@10
|
2303 }
|
yading@10
|
2304 }
|
yading@10
|
2305 }
|
yading@10
|
2306
|
yading@10
|
2307 ptr_y = ref_picture[0] + src_y * linesize + src_x;
|
yading@10
|
2308 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
|
yading@10
|
2309 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
|
yading@10
|
2310
|
yading@10
|
2311 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
|
yading@10
|
2312 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
|
yading@10
|
2313 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
|
yading@10
|
2314 linesize >> field_based, 17, 17 + field_based,
|
yading@10
|
2315 src_x, src_y << field_based, h_edge_pos,
|
yading@10
|
2316 v_edge_pos);
|
yading@10
|
2317 ptr_y = s->edge_emu_buffer;
|
yading@10
|
2318 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
|
yading@10
|
2319 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
|
yading@10
|
2320 s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
|
yading@10
|
2321 9 + field_based,
|
yading@10
|
2322 uvsrc_x, uvsrc_y << field_based,
|
yading@10
|
2323 h_edge_pos >> 1, v_edge_pos >> 1);
|
yading@10
|
2324 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
|
yading@10
|
2325 9 + field_based,
|
yading@10
|
2326 uvsrc_x, uvsrc_y << field_based,
|
yading@10
|
2327 h_edge_pos >> 1, v_edge_pos >> 1);
|
yading@10
|
2328 ptr_cb = uvbuf;
|
yading@10
|
2329 ptr_cr = uvbuf + 16;
|
yading@10
|
2330 }
|
yading@10
|
2331 }
|
yading@10
|
2332
|
yading@10
|
2333 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
|
yading@10
|
2334 if (bottom_field) {
|
yading@10
|
2335 dest_y += s->linesize;
|
yading@10
|
2336 dest_cb += s->uvlinesize;
|
yading@10
|
2337 dest_cr += s->uvlinesize;
|
yading@10
|
2338 }
|
yading@10
|
2339
|
yading@10
|
2340 if (field_select) {
|
yading@10
|
2341 ptr_y += s->linesize;
|
yading@10
|
2342 ptr_cb += s->uvlinesize;
|
yading@10
|
2343 ptr_cr += s->uvlinesize;
|
yading@10
|
2344 }
|
yading@10
|
2345
|
yading@10
|
2346 sx = (sx << 2) >> lowres;
|
yading@10
|
2347 sy = (sy << 2) >> lowres;
|
yading@10
|
2348 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
|
yading@10
|
2349
|
yading@10
|
2350 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
|
yading@10
|
2351 uvsx = (uvsx << 2) >> lowres;
|
yading@10
|
2352 uvsy = (uvsy << 2) >> lowres;
|
yading@10
|
2353 if (h >> s->chroma_y_shift) {
|
yading@10
|
2354 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
|
yading@10
|
2355 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
|
yading@10
|
2356 }
|
yading@10
|
2357 }
|
yading@10
|
2358 // FIXME h261 lowres loop filter
|
yading@10
|
2359 }
|
yading@10
|
2360
|
yading@10
|
2361 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
|
yading@10
|
2362 uint8_t *dest_cb, uint8_t *dest_cr,
|
yading@10
|
2363 uint8_t **ref_picture,
|
yading@10
|
2364 h264_chroma_mc_func * pix_op,
|
yading@10
|
2365 int mx, int my)
|
yading@10
|
2366 {
|
yading@10
|
2367 const int lowres = s->avctx->lowres;
|
yading@10
|
2368 const int op_index = FFMIN(lowres, 2);
|
yading@10
|
2369 const int block_s = 8 >> lowres;
|
yading@10
|
2370 const int s_mask = (2 << lowres) - 1;
|
yading@10
|
2371 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
|
yading@10
|
2372 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
|
yading@10
|
2373 int emu = 0, src_x, src_y, offset, sx, sy;
|
yading@10
|
2374 uint8_t *ptr;
|
yading@10
|
2375
|
yading@10
|
2376 if (s->quarter_sample) {
|
yading@10
|
2377 mx /= 2;
|
yading@10
|
2378 my /= 2;
|
yading@10
|
2379 }
|
yading@10
|
2380
|
yading@10
|
2381 /* In case of 8X8, we construct a single chroma motion vector
|
yading@10
|
2382 with a special rounding */
|
yading@10
|
2383 mx = ff_h263_round_chroma(mx);
|
yading@10
|
2384 my = ff_h263_round_chroma(my);
|
yading@10
|
2385
|
yading@10
|
2386 sx = mx & s_mask;
|
yading@10
|
2387 sy = my & s_mask;
|
yading@10
|
2388 src_x = s->mb_x * block_s + (mx >> lowres + 1);
|
yading@10
|
2389 src_y = s->mb_y * block_s + (my >> lowres + 1);
|
yading@10
|
2390
|
yading@10
|
2391 offset = src_y * s->uvlinesize + src_x;
|
yading@10
|
2392 ptr = ref_picture[1] + offset;
|
yading@10
|
2393 if (s->flags & CODEC_FLAG_EMU_EDGE) {
|
yading@10
|
2394 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
|
yading@10
|
2395 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
|
yading@10
|
2396 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
|
yading@10
|
2397 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
|
yading@10
|
2398 ptr = s->edge_emu_buffer;
|
yading@10
|
2399 emu = 1;
|
yading@10
|
2400 }
|
yading@10
|
2401 }
|
yading@10
|
2402 sx = (sx << 2) >> lowres;
|
yading@10
|
2403 sy = (sy << 2) >> lowres;
|
yading@10
|
2404 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
|
yading@10
|
2405
|
yading@10
|
2406 ptr = ref_picture[2] + offset;
|
yading@10
|
2407 if (emu) {
|
yading@10
|
2408 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
|
yading@10
|
2409 src_x, src_y, h_edge_pos, v_edge_pos);
|
yading@10
|
2410 ptr = s->edge_emu_buffer;
|
yading@10
|
2411 }
|
yading@10
|
2412 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
|
yading@10
|
2413 }
|
yading@10
|
2414
|
yading@10
|
2415 /**
|
yading@10
|
2416 * motion compensation of a single macroblock
|
yading@10
|
2417 * @param s context
|
yading@10
|
2418 * @param dest_y luma destination pointer
|
yading@10
|
2419 * @param dest_cb chroma cb/u destination pointer
|
yading@10
|
2420 * @param dest_cr chroma cr/v destination pointer
|
yading@10
|
2421 * @param dir direction (0->forward, 1->backward)
|
yading@10
|
2422 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
|
yading@10
|
2423 * @param pix_op halfpel motion compensation function (average or put normally)
|
yading@10
|
2424 * the motion vectors are taken from s->mv and the MV type from s->mv_type
|
yading@10
|
2425 */
|
yading@10
|
2426 static inline void MPV_motion_lowres(MpegEncContext *s,
|
yading@10
|
2427 uint8_t *dest_y, uint8_t *dest_cb,
|
yading@10
|
2428 uint8_t *dest_cr,
|
yading@10
|
2429 int dir, uint8_t **ref_picture,
|
yading@10
|
2430 h264_chroma_mc_func *pix_op)
|
yading@10
|
2431 {
|
yading@10
|
2432 int mx, my;
|
yading@10
|
2433 int mb_x, mb_y, i;
|
yading@10
|
2434 const int lowres = s->avctx->lowres;
|
yading@10
|
2435 const int block_s = 8 >>lowres;
|
yading@10
|
2436
|
yading@10
|
2437 mb_x = s->mb_x;
|
yading@10
|
2438 mb_y = s->mb_y;
|
yading@10
|
2439
|
yading@10
|
2440 switch (s->mv_type) {
|
yading@10
|
2441 case MV_TYPE_16X16:
|
yading@10
|
2442 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
yading@10
|
2443 0, 0, 0,
|
yading@10
|
2444 ref_picture, pix_op,
|
yading@10
|
2445 s->mv[dir][0][0], s->mv[dir][0][1],
|
yading@10
|
2446 2 * block_s, mb_y);
|
yading@10
|
2447 break;
|
yading@10
|
2448 case MV_TYPE_8X8:
|
yading@10
|
2449 mx = 0;
|
yading@10
|
2450 my = 0;
|
yading@10
|
2451 for (i = 0; i < 4; i++) {
|
yading@10
|
2452 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
|
yading@10
|
2453 s->linesize) * block_s,
|
yading@10
|
2454 ref_picture[0], 0, 0,
|
yading@10
|
2455 (2 * mb_x + (i & 1)) * block_s,
|
yading@10
|
2456 (2 * mb_y + (i >> 1)) * block_s,
|
yading@10
|
2457 s->width, s->height, s->linesize,
|
yading@10
|
2458 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
|
yading@10
|
2459 block_s, block_s, pix_op,
|
yading@10
|
2460 s->mv[dir][i][0], s->mv[dir][i][1]);
|
yading@10
|
2461
|
yading@10
|
2462 mx += s->mv[dir][i][0];
|
yading@10
|
2463 my += s->mv[dir][i][1];
|
yading@10
|
2464 }
|
yading@10
|
2465
|
yading@10
|
2466 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
|
yading@10
|
2467 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
|
yading@10
|
2468 pix_op, mx, my);
|
yading@10
|
2469 break;
|
yading@10
|
2470 case MV_TYPE_FIELD:
|
yading@10
|
2471 if (s->picture_structure == PICT_FRAME) {
|
yading@10
|
2472 /* top field */
|
yading@10
|
2473 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
yading@10
|
2474 1, 0, s->field_select[dir][0],
|
yading@10
|
2475 ref_picture, pix_op,
|
yading@10
|
2476 s->mv[dir][0][0], s->mv[dir][0][1],
|
yading@10
|
2477 block_s, mb_y);
|
yading@10
|
2478 /* bottom field */
|
yading@10
|
2479 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
yading@10
|
2480 1, 1, s->field_select[dir][1],
|
yading@10
|
2481 ref_picture, pix_op,
|
yading@10
|
2482 s->mv[dir][1][0], s->mv[dir][1][1],
|
yading@10
|
2483 block_s, mb_y);
|
yading@10
|
2484 } else {
|
yading@10
|
2485 if (s->picture_structure != s->field_select[dir][0] + 1 &&
|
yading@10
|
2486 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
|
yading@10
|
2487 ref_picture = s->current_picture_ptr->f.data;
|
yading@10
|
2488
|
yading@10
|
2489 }
|
yading@10
|
2490 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
yading@10
|
2491 0, 0, s->field_select[dir][0],
|
yading@10
|
2492 ref_picture, pix_op,
|
yading@10
|
2493 s->mv[dir][0][0],
|
yading@10
|
2494 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
|
yading@10
|
2495 }
|
yading@10
|
2496 break;
|
yading@10
|
2497 case MV_TYPE_16X8:
|
yading@10
|
2498 for (i = 0; i < 2; i++) {
|
yading@10
|
2499 uint8_t **ref2picture;
|
yading@10
|
2500
|
yading@10
|
2501 if (s->picture_structure == s->field_select[dir][i] + 1 ||
|
yading@10
|
2502 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
|
yading@10
|
2503 ref2picture = ref_picture;
|
yading@10
|
2504 } else {
|
yading@10
|
2505 ref2picture = s->current_picture_ptr->f.data;
|
yading@10
|
2506 }
|
yading@10
|
2507
|
yading@10
|
2508 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
yading@10
|
2509 0, 0, s->field_select[dir][i],
|
yading@10
|
2510 ref2picture, pix_op,
|
yading@10
|
2511 s->mv[dir][i][0], s->mv[dir][i][1] +
|
yading@10
|
2512 2 * block_s * i, block_s, mb_y >> 1);
|
yading@10
|
2513
|
yading@10
|
2514 dest_y += 2 * block_s * s->linesize;
|
yading@10
|
2515 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
|
yading@10
|
2516 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
|
yading@10
|
2517 }
|
yading@10
|
2518 break;
|
yading@10
|
2519 case MV_TYPE_DMV:
|
yading@10
|
2520 if (s->picture_structure == PICT_FRAME) {
|
yading@10
|
2521 for (i = 0; i < 2; i++) {
|
yading@10
|
2522 int j;
|
yading@10
|
2523 for (j = 0; j < 2; j++) {
|
yading@10
|
2524 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
yading@10
|
2525 1, j, j ^ i,
|
yading@10
|
2526 ref_picture, pix_op,
|
yading@10
|
2527 s->mv[dir][2 * i + j][0],
|
yading@10
|
2528 s->mv[dir][2 * i + j][1],
|
yading@10
|
2529 block_s, mb_y);
|
yading@10
|
2530 }
|
yading@10
|
2531 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
|
yading@10
|
2532 }
|
yading@10
|
2533 } else {
|
yading@10
|
2534 for (i = 0; i < 2; i++) {
|
yading@10
|
2535 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
yading@10
|
2536 0, 0, s->picture_structure != i + 1,
|
yading@10
|
2537 ref_picture, pix_op,
|
yading@10
|
2538 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
|
yading@10
|
2539 2 * block_s, mb_y >> 1);
|
yading@10
|
2540
|
yading@10
|
2541 // after put we make avg of the same block
|
yading@10
|
2542 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
|
yading@10
|
2543
|
yading@10
|
2544 // opposite parity is always in the same
|
yading@10
|
2545 // frame if this is second field
|
yading@10
|
2546 if (!s->first_field) {
|
yading@10
|
2547 ref_picture = s->current_picture_ptr->f.data;
|
yading@10
|
2548 }
|
yading@10
|
2549 }
|
yading@10
|
2550 }
|
yading@10
|
2551 break;
|
yading@10
|
2552 default:
|
yading@10
|
2553 av_assert2(0);
|
yading@10
|
2554 }
|
yading@10
|
2555 }
|
yading@10
|
2556
|
yading@10
|
2557 /**
|
yading@10
|
2558 * find the lowest MB row referenced in the MVs
|
yading@10
|
2559 */
|
yading@10
|
2560 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
|
yading@10
|
2561 {
|
yading@10
|
2562 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
|
yading@10
|
2563 int my, off, i, mvs;
|
yading@10
|
2564
|
yading@10
|
2565 if (s->picture_structure != PICT_FRAME || s->mcsel)
|
yading@10
|
2566 goto unhandled;
|
yading@10
|
2567
|
yading@10
|
2568 switch (s->mv_type) {
|
yading@10
|
2569 case MV_TYPE_16X16:
|
yading@10
|
2570 mvs = 1;
|
yading@10
|
2571 break;
|
yading@10
|
2572 case MV_TYPE_16X8:
|
yading@10
|
2573 mvs = 2;
|
yading@10
|
2574 break;
|
yading@10
|
2575 case MV_TYPE_8X8:
|
yading@10
|
2576 mvs = 4;
|
yading@10
|
2577 break;
|
yading@10
|
2578 default:
|
yading@10
|
2579 goto unhandled;
|
yading@10
|
2580 }
|
yading@10
|
2581
|
yading@10
|
2582 for (i = 0; i < mvs; i++) {
|
yading@10
|
2583 my = s->mv[dir][i][1]<<qpel_shift;
|
yading@10
|
2584 my_max = FFMAX(my_max, my);
|
yading@10
|
2585 my_min = FFMIN(my_min, my);
|
yading@10
|
2586 }
|
yading@10
|
2587
|
yading@10
|
2588 off = (FFMAX(-my_min, my_max) + 63) >> 6;
|
yading@10
|
2589
|
yading@10
|
2590 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
|
yading@10
|
2591 unhandled:
|
yading@10
|
2592 return s->mb_height-1;
|
yading@10
|
2593 }
|
yading@10
|
2594
|
yading@10
|
2595 /* put block[] to dest[] */
|
yading@10
|
2596 static inline void put_dct(MpegEncContext *s,
|
yading@10
|
2597 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
|
yading@10
|
2598 {
|
yading@10
|
2599 s->dct_unquantize_intra(s, block, i, qscale);
|
yading@10
|
2600 s->dsp.idct_put (dest, line_size, block);
|
yading@10
|
2601 }
|
yading@10
|
2602
|
yading@10
|
2603 /* add block[] to dest[] */
|
yading@10
|
2604 static inline void add_dct(MpegEncContext *s,
|
yading@10
|
2605 int16_t *block, int i, uint8_t *dest, int line_size)
|
yading@10
|
2606 {
|
yading@10
|
2607 if (s->block_last_index[i] >= 0) {
|
yading@10
|
2608 s->dsp.idct_add (dest, line_size, block);
|
yading@10
|
2609 }
|
yading@10
|
2610 }
|
yading@10
|
2611
|
yading@10
|
2612 static inline void add_dequant_dct(MpegEncContext *s,
|
yading@10
|
2613 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
|
yading@10
|
2614 {
|
yading@10
|
2615 if (s->block_last_index[i] >= 0) {
|
yading@10
|
2616 s->dct_unquantize_inter(s, block, i, qscale);
|
yading@10
|
2617
|
yading@10
|
2618 s->dsp.idct_add (dest, line_size, block);
|
yading@10
|
2619 }
|
yading@10
|
2620 }
|
yading@10
|
2621
|
yading@10
|
2622 /**
|
yading@10
|
2623 * Clean dc, ac, coded_block for the current non-intra MB.
|
yading@10
|
2624 */
|
yading@10
|
2625 void ff_clean_intra_table_entries(MpegEncContext *s)
|
yading@10
|
2626 {
|
yading@10
|
2627 int wrap = s->b8_stride;
|
yading@10
|
2628 int xy = s->block_index[0];
|
yading@10
|
2629
|
yading@10
|
2630 s->dc_val[0][xy ] =
|
yading@10
|
2631 s->dc_val[0][xy + 1 ] =
|
yading@10
|
2632 s->dc_val[0][xy + wrap] =
|
yading@10
|
2633 s->dc_val[0][xy + 1 + wrap] = 1024;
|
yading@10
|
2634 /* ac pred */
|
yading@10
|
2635 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
|
yading@10
|
2636 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
|
yading@10
|
2637 if (s->msmpeg4_version>=3) {
|
yading@10
|
2638 s->coded_block[xy ] =
|
yading@10
|
2639 s->coded_block[xy + 1 ] =
|
yading@10
|
2640 s->coded_block[xy + wrap] =
|
yading@10
|
2641 s->coded_block[xy + 1 + wrap] = 0;
|
yading@10
|
2642 }
|
yading@10
|
2643 /* chroma */
|
yading@10
|
2644 wrap = s->mb_stride;
|
yading@10
|
2645 xy = s->mb_x + s->mb_y * wrap;
|
yading@10
|
2646 s->dc_val[1][xy] =
|
yading@10
|
2647 s->dc_val[2][xy] = 1024;
|
yading@10
|
2648 /* ac pred */
|
yading@10
|
2649 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
|
yading@10
|
2650 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
|
yading@10
|
2651
|
yading@10
|
2652 s->mbintra_table[xy]= 0;
|
yading@10
|
2653 }
|
yading@10
|
2654
|
yading@10
|
2655 /* generic function called after a macroblock has been parsed by the
|
yading@10
|
2656 decoder or after it has been encoded by the encoder.
|
yading@10
|
2657
|
yading@10
|
2658 Important variables used:
|
yading@10
|
2659 s->mb_intra : true if intra macroblock
|
yading@10
|
2660 s->mv_dir : motion vector direction
|
yading@10
|
2661 s->mv_type : motion vector type
|
yading@10
|
2662 s->mv : motion vector
|
yading@10
|
2663 s->interlaced_dct : true if interlaced dct used (mpeg2)
|
yading@10
|
2664 */
|
yading@10
|
2665 static av_always_inline
|
yading@10
|
2666 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
|
yading@10
|
2667 int lowres_flag, int is_mpeg12)
|
yading@10
|
2668 {
|
yading@10
|
2669 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
|
yading@10
|
2670 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
|
yading@10
|
2671 ff_xvmc_decode_mb(s);//xvmc uses pblocks
|
yading@10
|
2672 return;
|
yading@10
|
2673 }
|
yading@10
|
2674
|
yading@10
|
2675 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
|
yading@10
|
2676 /* print DCT coefficients */
|
yading@10
|
2677 int i,j;
|
yading@10
|
2678 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
|
yading@10
|
2679 for(i=0; i<6; i++){
|
yading@10
|
2680 for(j=0; j<64; j++){
|
yading@10
|
2681 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
|
yading@10
|
2682 }
|
yading@10
|
2683 av_log(s->avctx, AV_LOG_DEBUG, "\n");
|
yading@10
|
2684 }
|
yading@10
|
2685 }
|
yading@10
|
2686
|
yading@10
|
2687 s->current_picture.qscale_table[mb_xy] = s->qscale;
|
yading@10
|
2688
|
yading@10
|
2689 /* update DC predictors for P macroblocks */
|
yading@10
|
2690 if (!s->mb_intra) {
|
yading@10
|
2691 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
|
yading@10
|
2692 if(s->mbintra_table[mb_xy])
|
yading@10
|
2693 ff_clean_intra_table_entries(s);
|
yading@10
|
2694 } else {
|
yading@10
|
2695 s->last_dc[0] =
|
yading@10
|
2696 s->last_dc[1] =
|
yading@10
|
2697 s->last_dc[2] = 128 << s->intra_dc_precision;
|
yading@10
|
2698 }
|
yading@10
|
2699 }
|
yading@10
|
2700 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
|
yading@10
|
2701 s->mbintra_table[mb_xy]=1;
|
yading@10
|
2702
|
yading@10
|
2703 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
|
yading@10
|
2704 uint8_t *dest_y, *dest_cb, *dest_cr;
|
yading@10
|
2705 int dct_linesize, dct_offset;
|
yading@10
|
2706 op_pixels_func (*op_pix)[4];
|
yading@10
|
2707 qpel_mc_func (*op_qpix)[16];
|
yading@10
|
2708 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
|
yading@10
|
2709 const int uvlinesize = s->current_picture.f.linesize[1];
|
yading@10
|
2710 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
|
yading@10
|
2711 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
|
yading@10
|
2712
|
yading@10
|
2713 /* avoid copy if macroblock skipped in last frame too */
|
yading@10
|
2714 /* skip only during decoding as we might trash the buffers during encoding a bit */
|
yading@10
|
2715 if(!s->encoding){
|
yading@10
|
2716 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
|
yading@10
|
2717
|
yading@10
|
2718 if (s->mb_skipped) {
|
yading@10
|
2719 s->mb_skipped= 0;
|
yading@10
|
2720 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
|
yading@10
|
2721 *mbskip_ptr = 1;
|
yading@10
|
2722 } else if(!s->current_picture.reference) {
|
yading@10
|
2723 *mbskip_ptr = 1;
|
yading@10
|
2724 } else{
|
yading@10
|
2725 *mbskip_ptr = 0; /* not skipped */
|
yading@10
|
2726 }
|
yading@10
|
2727 }
|
yading@10
|
2728
|
yading@10
|
2729 dct_linesize = linesize << s->interlaced_dct;
|
yading@10
|
2730 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
|
yading@10
|
2731
|
yading@10
|
2732 if(readable){
|
yading@10
|
2733 dest_y= s->dest[0];
|
yading@10
|
2734 dest_cb= s->dest[1];
|
yading@10
|
2735 dest_cr= s->dest[2];
|
yading@10
|
2736 }else{
|
yading@10
|
2737 dest_y = s->b_scratchpad;
|
yading@10
|
2738 dest_cb= s->b_scratchpad+16*linesize;
|
yading@10
|
2739 dest_cr= s->b_scratchpad+32*linesize;
|
yading@10
|
2740 }
|
yading@10
|
2741
|
yading@10
|
2742 if (!s->mb_intra) {
|
yading@10
|
2743 /* motion handling */
|
yading@10
|
2744 /* decoding or more than one mb_type (MC was already done otherwise) */
|
yading@10
|
2745 if(!s->encoding){
|
yading@10
|
2746
|
yading@10
|
2747 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
|
yading@10
|
2748 if (s->mv_dir & MV_DIR_FORWARD) {
|
yading@10
|
2749 ff_thread_await_progress(&s->last_picture_ptr->tf,
|
yading@10
|
2750 ff_MPV_lowest_referenced_row(s, 0),
|
yading@10
|
2751 0);
|
yading@10
|
2752 }
|
yading@10
|
2753 if (s->mv_dir & MV_DIR_BACKWARD) {
|
yading@10
|
2754 ff_thread_await_progress(&s->next_picture_ptr->tf,
|
yading@10
|
2755 ff_MPV_lowest_referenced_row(s, 1),
|
yading@10
|
2756 0);
|
yading@10
|
2757 }
|
yading@10
|
2758 }
|
yading@10
|
2759
|
yading@10
|
2760 if(lowres_flag){
|
yading@10
|
2761 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
|
yading@10
|
2762
|
yading@10
|
2763 if (s->mv_dir & MV_DIR_FORWARD) {
|
yading@10
|
2764 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
|
yading@10
|
2765 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
|
yading@10
|
2766 }
|
yading@10
|
2767 if (s->mv_dir & MV_DIR_BACKWARD) {
|
yading@10
|
2768 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
|
yading@10
|
2769 }
|
yading@10
|
2770 }else{
|
yading@10
|
2771 op_qpix= s->me.qpel_put;
|
yading@10
|
2772 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
|
yading@10
|
2773 op_pix = s->hdsp.put_pixels_tab;
|
yading@10
|
2774 }else{
|
yading@10
|
2775 op_pix = s->hdsp.put_no_rnd_pixels_tab;
|
yading@10
|
2776 }
|
yading@10
|
2777 if (s->mv_dir & MV_DIR_FORWARD) {
|
yading@10
|
2778 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
|
yading@10
|
2779 op_pix = s->hdsp.avg_pixels_tab;
|
yading@10
|
2780 op_qpix= s->me.qpel_avg;
|
yading@10
|
2781 }
|
yading@10
|
2782 if (s->mv_dir & MV_DIR_BACKWARD) {
|
yading@10
|
2783 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
|
yading@10
|
2784 }
|
yading@10
|
2785 }
|
yading@10
|
2786 }
|
yading@10
|
2787
|
yading@10
|
2788 /* skip dequant / idct if we are really late ;) */
|
yading@10
|
2789 if(s->avctx->skip_idct){
|
yading@10
|
2790 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
|
yading@10
|
2791 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
|
yading@10
|
2792 || s->avctx->skip_idct >= AVDISCARD_ALL)
|
yading@10
|
2793 goto skip_idct;
|
yading@10
|
2794 }
|
yading@10
|
2795
|
yading@10
|
2796 /* add dct residue */
|
yading@10
|
2797 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
|
yading@10
|
2798 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
|
yading@10
|
2799 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
|
yading@10
|
2800 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
|
yading@10
|
2801 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
|
yading@10
|
2802 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
|
yading@10
|
2803
|
yading@10
|
2804 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
|
yading@10
|
2805 if (s->chroma_y_shift){
|
yading@10
|
2806 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
|
yading@10
|
2807 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
|
yading@10
|
2808 }else{
|
yading@10
|
2809 dct_linesize >>= 1;
|
yading@10
|
2810 dct_offset >>=1;
|
yading@10
|
2811 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
|
yading@10
|
2812 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
|
yading@10
|
2813 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
|
yading@10
|
2814 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
|
yading@10
|
2815 }
|
yading@10
|
2816 }
|
yading@10
|
2817 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
|
yading@10
|
2818 add_dct(s, block[0], 0, dest_y , dct_linesize);
|
yading@10
|
2819 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
|
yading@10
|
2820 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
|
yading@10
|
2821 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
|
yading@10
|
2822
|
yading@10
|
2823 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
|
yading@10
|
2824 if(s->chroma_y_shift){//Chroma420
|
yading@10
|
2825 add_dct(s, block[4], 4, dest_cb, uvlinesize);
|
yading@10
|
2826 add_dct(s, block[5], 5, dest_cr, uvlinesize);
|
yading@10
|
2827 }else{
|
yading@10
|
2828 //chroma422
|
yading@10
|
2829 dct_linesize = uvlinesize << s->interlaced_dct;
|
yading@10
|
2830 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
|
yading@10
|
2831
|
yading@10
|
2832 add_dct(s, block[4], 4, dest_cb, dct_linesize);
|
yading@10
|
2833 add_dct(s, block[5], 5, dest_cr, dct_linesize);
|
yading@10
|
2834 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
|
yading@10
|
2835 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
|
yading@10
|
2836 if(!s->chroma_x_shift){//Chroma444
|
yading@10
|
2837 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
|
yading@10
|
2838 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
|
yading@10
|
2839 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
|
yading@10
|
2840 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
|
yading@10
|
2841 }
|
yading@10
|
2842 }
|
yading@10
|
2843 }//fi gray
|
yading@10
|
2844 }
|
yading@10
|
2845 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
|
yading@10
|
2846 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
|
yading@10
|
2847 }
|
yading@10
|
2848 } else {
|
yading@10
|
2849 /* dct only in intra block */
|
yading@10
|
2850 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
|
yading@10
|
2851 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
|
yading@10
|
2852 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
|
yading@10
|
2853 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
|
yading@10
|
2854 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
|
yading@10
|
2855
|
yading@10
|
2856 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
|
yading@10
|
2857 if(s->chroma_y_shift){
|
yading@10
|
2858 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
|
yading@10
|
2859 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
|
yading@10
|
2860 }else{
|
yading@10
|
2861 dct_offset >>=1;
|
yading@10
|
2862 dct_linesize >>=1;
|
yading@10
|
2863 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
|
yading@10
|
2864 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
|
yading@10
|
2865 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
|
yading@10
|
2866 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
|
yading@10
|
2867 }
|
yading@10
|
2868 }
|
yading@10
|
2869 }else{
|
yading@10
|
2870 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
|
yading@10
|
2871 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
|
yading@10
|
2872 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
|
yading@10
|
2873 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
|
yading@10
|
2874
|
yading@10
|
2875 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
|
yading@10
|
2876 if(s->chroma_y_shift){
|
yading@10
|
2877 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
|
yading@10
|
2878 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
|
yading@10
|
2879 }else{
|
yading@10
|
2880
|
yading@10
|
2881 dct_linesize = uvlinesize << s->interlaced_dct;
|
yading@10
|
2882 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
|
yading@10
|
2883
|
yading@10
|
2884 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
|
yading@10
|
2885 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
|
yading@10
|
2886 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
|
yading@10
|
2887 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
|
yading@10
|
2888 if(!s->chroma_x_shift){//Chroma444
|
yading@10
|
2889 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
|
yading@10
|
2890 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
|
yading@10
|
2891 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
|
yading@10
|
2892 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
|
yading@10
|
2893 }
|
yading@10
|
2894 }
|
yading@10
|
2895 }//gray
|
yading@10
|
2896 }
|
yading@10
|
2897 }
|
yading@10
|
2898 skip_idct:
|
yading@10
|
2899 if(!readable){
|
yading@10
|
2900 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
|
yading@10
|
2901 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
|
yading@10
|
2902 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
|
yading@10
|
2903 }
|
yading@10
|
2904 }
|
yading@10
|
2905 }
|
yading@10
|
2906
|
yading@10
|
2907 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
|
yading@10
|
2908 #if !CONFIG_SMALL
|
yading@10
|
2909 if(s->out_format == FMT_MPEG1) {
|
yading@10
|
2910 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
|
yading@10
|
2911 else MPV_decode_mb_internal(s, block, 0, 1);
|
yading@10
|
2912 } else
|
yading@10
|
2913 #endif
|
yading@10
|
2914 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
|
yading@10
|
2915 else MPV_decode_mb_internal(s, block, 0, 0);
|
yading@10
|
2916 }
|
yading@10
|
2917
|
yading@10
|
2918 /**
|
yading@10
|
2919 * @param h is the normal height, this will be reduced automatically if needed for the last row
|
yading@10
|
2920 */
|
yading@10
|
2921 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
|
yading@10
|
2922 Picture *last, int y, int h, int picture_structure,
|
yading@10
|
2923 int first_field, int draw_edges, int low_delay,
|
yading@10
|
2924 int v_edge_pos, int h_edge_pos)
|
yading@10
|
2925 {
|
yading@10
|
2926 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
|
yading@10
|
2927 int hshift = desc->log2_chroma_w;
|
yading@10
|
2928 int vshift = desc->log2_chroma_h;
|
yading@10
|
2929 const int field_pic = picture_structure != PICT_FRAME;
|
yading@10
|
2930 if(field_pic){
|
yading@10
|
2931 h <<= 1;
|
yading@10
|
2932 y <<= 1;
|
yading@10
|
2933 }
|
yading@10
|
2934
|
yading@10
|
2935 if (!avctx->hwaccel &&
|
yading@10
|
2936 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
|
yading@10
|
2937 draw_edges &&
|
yading@10
|
2938 cur->reference &&
|
yading@10
|
2939 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
|
yading@10
|
2940 int *linesize = cur->f.linesize;
|
yading@10
|
2941 int sides = 0, edge_h;
|
yading@10
|
2942 if (y==0) sides |= EDGE_TOP;
|
yading@10
|
2943 if (y + h >= v_edge_pos)
|
yading@10
|
2944 sides |= EDGE_BOTTOM;
|
yading@10
|
2945
|
yading@10
|
2946 edge_h= FFMIN(h, v_edge_pos - y);
|
yading@10
|
2947
|
yading@10
|
2948 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
|
yading@10
|
2949 linesize[0], h_edge_pos, edge_h,
|
yading@10
|
2950 EDGE_WIDTH, EDGE_WIDTH, sides);
|
yading@10
|
2951 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
|
yading@10
|
2952 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
|
yading@10
|
2953 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
|
yading@10
|
2954 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
|
yading@10
|
2955 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
|
yading@10
|
2956 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
|
yading@10
|
2957 }
|
yading@10
|
2958
|
yading@10
|
2959 h = FFMIN(h, avctx->height - y);
|
yading@10
|
2960
|
yading@10
|
2961 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
|
yading@10
|
2962
|
yading@10
|
2963 if (avctx->draw_horiz_band) {
|
yading@10
|
2964 AVFrame *src;
|
yading@10
|
2965 int offset[AV_NUM_DATA_POINTERS];
|
yading@10
|
2966 int i;
|
yading@10
|
2967
|
yading@10
|
2968 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
|
yading@10
|
2969 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
|
yading@10
|
2970 src = &cur->f;
|
yading@10
|
2971 else if (last)
|
yading@10
|
2972 src = &last->f;
|
yading@10
|
2973 else
|
yading@10
|
2974 return;
|
yading@10
|
2975
|
yading@10
|
2976 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
|
yading@10
|
2977 picture_structure == PICT_FRAME &&
|
yading@10
|
2978 avctx->codec_id != AV_CODEC_ID_SVQ3) {
|
yading@10
|
2979 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
|
yading@10
|
2980 offset[i] = 0;
|
yading@10
|
2981 }else{
|
yading@10
|
2982 offset[0]= y * src->linesize[0];
|
yading@10
|
2983 offset[1]=
|
yading@10
|
2984 offset[2]= (y >> vshift) * src->linesize[1];
|
yading@10
|
2985 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
|
yading@10
|
2986 offset[i] = 0;
|
yading@10
|
2987 }
|
yading@10
|
2988
|
yading@10
|
2989 emms_c();
|
yading@10
|
2990
|
yading@10
|
2991 avctx->draw_horiz_band(avctx, src, offset,
|
yading@10
|
2992 y, picture_structure, h);
|
yading@10
|
2993 }
|
yading@10
|
2994 }
|
yading@10
|
2995
|
yading@10
|
2996 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
|
yading@10
|
2997 {
|
yading@10
|
2998 int draw_edges = s->unrestricted_mv && !s->intra_only;
|
yading@10
|
2999 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
|
yading@10
|
3000 &s->last_picture, y, h, s->picture_structure,
|
yading@10
|
3001 s->first_field, draw_edges, s->low_delay,
|
yading@10
|
3002 s->v_edge_pos, s->h_edge_pos);
|
yading@10
|
3003 }
|
yading@10
|
3004
|
yading@10
|
3005 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
|
yading@10
|
3006 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
|
yading@10
|
3007 const int uvlinesize = s->current_picture.f.linesize[1];
|
yading@10
|
3008 const int mb_size= 4 - s->avctx->lowres;
|
yading@10
|
3009
|
yading@10
|
3010 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
|
yading@10
|
3011 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
|
yading@10
|
3012 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
|
yading@10
|
3013 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
|
yading@10
|
3014 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
|
yading@10
|
3015 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
|
yading@10
|
3016 //block_index is not used by mpeg2, so it is not affected by chroma_format
|
yading@10
|
3017
|
yading@10
|
3018 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
|
yading@10
|
3019 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
|
yading@10
|
3020 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
|
yading@10
|
3021
|
yading@10
|
3022 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
|
yading@10
|
3023 {
|
yading@10
|
3024 if(s->picture_structure==PICT_FRAME){
|
yading@10
|
3025 s->dest[0] += s->mb_y * linesize << mb_size;
|
yading@10
|
3026 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
|
yading@10
|
3027 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
|
yading@10
|
3028 }else{
|
yading@10
|
3029 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
|
yading@10
|
3030 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
|
yading@10
|
3031 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
|
yading@10
|
3032 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
|
yading@10
|
3033 }
|
yading@10
|
3034 }
|
yading@10
|
3035 }
|
yading@10
|
3036
|
yading@10
|
3037 /**
|
yading@10
|
3038 * Permute an 8x8 block.
|
yading@10
|
3039 * @param block the block which will be permuted according to the given permutation vector
|
yading@10
|
3040 * @param permutation the permutation vector
|
yading@10
|
3041 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
|
yading@10
|
3042 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
|
yading@10
|
3043 * (inverse) permutated to scantable order!
|
yading@10
|
3044 */
|
yading@10
|
3045 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
|
yading@10
|
3046 {
|
yading@10
|
3047 int i;
|
yading@10
|
3048 int16_t temp[64];
|
yading@10
|
3049
|
yading@10
|
3050 if(last<=0) return;
|
yading@10
|
3051 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
|
yading@10
|
3052
|
yading@10
|
3053 for(i=0; i<=last; i++){
|
yading@10
|
3054 const int j= scantable[i];
|
yading@10
|
3055 temp[j]= block[j];
|
yading@10
|
3056 block[j]=0;
|
yading@10
|
3057 }
|
yading@10
|
3058
|
yading@10
|
3059 for(i=0; i<=last; i++){
|
yading@10
|
3060 const int j= scantable[i];
|
yading@10
|
3061 const int perm_j= permutation[j];
|
yading@10
|
3062 block[perm_j]= temp[j];
|
yading@10
|
3063 }
|
yading@10
|
3064 }
|
yading@10
|
3065
|
yading@10
|
3066 void ff_mpeg_flush(AVCodecContext *avctx){
|
yading@10
|
3067 int i;
|
yading@10
|
3068 MpegEncContext *s = avctx->priv_data;
|
yading@10
|
3069
|
yading@10
|
3070 if(s==NULL || s->picture==NULL)
|
yading@10
|
3071 return;
|
yading@10
|
3072
|
yading@10
|
3073 for (i = 0; i < MAX_PICTURE_COUNT; i++)
|
yading@10
|
3074 ff_mpeg_unref_picture(s, &s->picture[i]);
|
yading@10
|
3075 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
|
yading@10
|
3076
|
yading@10
|
3077 s->mb_x= s->mb_y= 0;
|
yading@10
|
3078 s->closed_gop= 0;
|
yading@10
|
3079
|
yading@10
|
3080 s->parse_context.state= -1;
|
yading@10
|
3081 s->parse_context.frame_start_found= 0;
|
yading@10
|
3082 s->parse_context.overread= 0;
|
yading@10
|
3083 s->parse_context.overread_index= 0;
|
yading@10
|
3084 s->parse_context.index= 0;
|
yading@10
|
3085 s->parse_context.last_index= 0;
|
yading@10
|
3086 s->bitstream_buffer_size=0;
|
yading@10
|
3087 s->pp_time=0;
|
yading@10
|
3088 }
|
yading@10
|
3089
|
yading@10
|
3090 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
|
yading@10
|
3091 int16_t *block, int n, int qscale)
|
yading@10
|
3092 {
|
yading@10
|
3093 int i, level, nCoeffs;
|
yading@10
|
3094 const uint16_t *quant_matrix;
|
yading@10
|
3095
|
yading@10
|
3096 nCoeffs= s->block_last_index[n];
|
yading@10
|
3097
|
yading@10
|
3098 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
|
yading@10
|
3099 /* XXX: only mpeg1 */
|
yading@10
|
3100 quant_matrix = s->intra_matrix;
|
yading@10
|
3101 for(i=1;i<=nCoeffs;i++) {
|
yading@10
|
3102 int j= s->intra_scantable.permutated[i];
|
yading@10
|
3103 level = block[j];
|
yading@10
|
3104 if (level) {
|
yading@10
|
3105 if (level < 0) {
|
yading@10
|
3106 level = -level;
|
yading@10
|
3107 level = (int)(level * qscale * quant_matrix[j]) >> 3;
|
yading@10
|
3108 level = (level - 1) | 1;
|
yading@10
|
3109 level = -level;
|
yading@10
|
3110 } else {
|
yading@10
|
3111 level = (int)(level * qscale * quant_matrix[j]) >> 3;
|
yading@10
|
3112 level = (level - 1) | 1;
|
yading@10
|
3113 }
|
yading@10
|
3114 block[j] = level;
|
yading@10
|
3115 }
|
yading@10
|
3116 }
|
yading@10
|
3117 }
|
yading@10
|
3118
|
yading@10
|
3119 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
|
yading@10
|
3120 int16_t *block, int n, int qscale)
|
yading@10
|
3121 {
|
yading@10
|
3122 int i, level, nCoeffs;
|
yading@10
|
3123 const uint16_t *quant_matrix;
|
yading@10
|
3124
|
yading@10
|
3125 nCoeffs= s->block_last_index[n];
|
yading@10
|
3126
|
yading@10
|
3127 quant_matrix = s->inter_matrix;
|
yading@10
|
3128 for(i=0; i<=nCoeffs; i++) {
|
yading@10
|
3129 int j= s->intra_scantable.permutated[i];
|
yading@10
|
3130 level = block[j];
|
yading@10
|
3131 if (level) {
|
yading@10
|
3132 if (level < 0) {
|
yading@10
|
3133 level = -level;
|
yading@10
|
3134 level = (((level << 1) + 1) * qscale *
|
yading@10
|
3135 ((int) (quant_matrix[j]))) >> 4;
|
yading@10
|
3136 level = (level - 1) | 1;
|
yading@10
|
3137 level = -level;
|
yading@10
|
3138 } else {
|
yading@10
|
3139 level = (((level << 1) + 1) * qscale *
|
yading@10
|
3140 ((int) (quant_matrix[j]))) >> 4;
|
yading@10
|
3141 level = (level - 1) | 1;
|
yading@10
|
3142 }
|
yading@10
|
3143 block[j] = level;
|
yading@10
|
3144 }
|
yading@10
|
3145 }
|
yading@10
|
3146 }
|
yading@10
|
3147
|
yading@10
|
3148 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
|
yading@10
|
3149 int16_t *block, int n, int qscale)
|
yading@10
|
3150 {
|
yading@10
|
3151 int i, level, nCoeffs;
|
yading@10
|
3152 const uint16_t *quant_matrix;
|
yading@10
|
3153
|
yading@10
|
3154 if(s->alternate_scan) nCoeffs= 63;
|
yading@10
|
3155 else nCoeffs= s->block_last_index[n];
|
yading@10
|
3156
|
yading@10
|
3157 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
|
yading@10
|
3158 quant_matrix = s->intra_matrix;
|
yading@10
|
3159 for(i=1;i<=nCoeffs;i++) {
|
yading@10
|
3160 int j= s->intra_scantable.permutated[i];
|
yading@10
|
3161 level = block[j];
|
yading@10
|
3162 if (level) {
|
yading@10
|
3163 if (level < 0) {
|
yading@10
|
3164 level = -level;
|
yading@10
|
3165 level = (int)(level * qscale * quant_matrix[j]) >> 3;
|
yading@10
|
3166 level = -level;
|
yading@10
|
3167 } else {
|
yading@10
|
3168 level = (int)(level * qscale * quant_matrix[j]) >> 3;
|
yading@10
|
3169 }
|
yading@10
|
3170 block[j] = level;
|
yading@10
|
3171 }
|
yading@10
|
3172 }
|
yading@10
|
3173 }
|
yading@10
|
3174
|
yading@10
|
3175 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
|
yading@10
|
3176 int16_t *block, int n, int qscale)
|
yading@10
|
3177 {
|
yading@10
|
3178 int i, level, nCoeffs;
|
yading@10
|
3179 const uint16_t *quant_matrix;
|
yading@10
|
3180 int sum=-1;
|
yading@10
|
3181
|
yading@10
|
3182 if(s->alternate_scan) nCoeffs= 63;
|
yading@10
|
3183 else nCoeffs= s->block_last_index[n];
|
yading@10
|
3184
|
yading@10
|
3185 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
|
yading@10
|
3186 sum += block[0];
|
yading@10
|
3187 quant_matrix = s->intra_matrix;
|
yading@10
|
3188 for(i=1;i<=nCoeffs;i++) {
|
yading@10
|
3189 int j= s->intra_scantable.permutated[i];
|
yading@10
|
3190 level = block[j];
|
yading@10
|
3191 if (level) {
|
yading@10
|
3192 if (level < 0) {
|
yading@10
|
3193 level = -level;
|
yading@10
|
3194 level = (int)(level * qscale * quant_matrix[j]) >> 3;
|
yading@10
|
3195 level = -level;
|
yading@10
|
3196 } else {
|
yading@10
|
3197 level = (int)(level * qscale * quant_matrix[j]) >> 3;
|
yading@10
|
3198 }
|
yading@10
|
3199 block[j] = level;
|
yading@10
|
3200 sum+=level;
|
yading@10
|
3201 }
|
yading@10
|
3202 }
|
yading@10
|
3203 block[63]^=sum&1;
|
yading@10
|
3204 }
|
yading@10
|
3205
|
yading@10
|
3206 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
|
yading@10
|
3207 int16_t *block, int n, int qscale)
|
yading@10
|
3208 {
|
yading@10
|
3209 int i, level, nCoeffs;
|
yading@10
|
3210 const uint16_t *quant_matrix;
|
yading@10
|
3211 int sum=-1;
|
yading@10
|
3212
|
yading@10
|
3213 if(s->alternate_scan) nCoeffs= 63;
|
yading@10
|
3214 else nCoeffs= s->block_last_index[n];
|
yading@10
|
3215
|
yading@10
|
3216 quant_matrix = s->inter_matrix;
|
yading@10
|
3217 for(i=0; i<=nCoeffs; i++) {
|
yading@10
|
3218 int j= s->intra_scantable.permutated[i];
|
yading@10
|
3219 level = block[j];
|
yading@10
|
3220 if (level) {
|
yading@10
|
3221 if (level < 0) {
|
yading@10
|
3222 level = -level;
|
yading@10
|
3223 level = (((level << 1) + 1) * qscale *
|
yading@10
|
3224 ((int) (quant_matrix[j]))) >> 4;
|
yading@10
|
3225 level = -level;
|
yading@10
|
3226 } else {
|
yading@10
|
3227 level = (((level << 1) + 1) * qscale *
|
yading@10
|
3228 ((int) (quant_matrix[j]))) >> 4;
|
yading@10
|
3229 }
|
yading@10
|
3230 block[j] = level;
|
yading@10
|
3231 sum+=level;
|
yading@10
|
3232 }
|
yading@10
|
3233 }
|
yading@10
|
3234 block[63]^=sum&1;
|
yading@10
|
3235 }
|
yading@10
|
3236
|
yading@10
|
3237 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
|
yading@10
|
3238 int16_t *block, int n, int qscale)
|
yading@10
|
3239 {
|
yading@10
|
3240 int i, level, qmul, qadd;
|
yading@10
|
3241 int nCoeffs;
|
yading@10
|
3242
|
yading@10
|
3243 av_assert2(s->block_last_index[n]>=0);
|
yading@10
|
3244
|
yading@10
|
3245 qmul = qscale << 1;
|
yading@10
|
3246
|
yading@10
|
3247 if (!s->h263_aic) {
|
yading@10
|
3248 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
|
yading@10
|
3249 qadd = (qscale - 1) | 1;
|
yading@10
|
3250 }else{
|
yading@10
|
3251 qadd = 0;
|
yading@10
|
3252 }
|
yading@10
|
3253 if(s->ac_pred)
|
yading@10
|
3254 nCoeffs=63;
|
yading@10
|
3255 else
|
yading@10
|
3256 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
|
yading@10
|
3257
|
yading@10
|
3258 for(i=1; i<=nCoeffs; i++) {
|
yading@10
|
3259 level = block[i];
|
yading@10
|
3260 if (level) {
|
yading@10
|
3261 if (level < 0) {
|
yading@10
|
3262 level = level * qmul - qadd;
|
yading@10
|
3263 } else {
|
yading@10
|
3264 level = level * qmul + qadd;
|
yading@10
|
3265 }
|
yading@10
|
3266 block[i] = level;
|
yading@10
|
3267 }
|
yading@10
|
3268 }
|
yading@10
|
3269 }
|
yading@10
|
3270
|
yading@10
|
3271 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
|
yading@10
|
3272 int16_t *block, int n, int qscale)
|
yading@10
|
3273 {
|
yading@10
|
3274 int i, level, qmul, qadd;
|
yading@10
|
3275 int nCoeffs;
|
yading@10
|
3276
|
yading@10
|
3277 av_assert2(s->block_last_index[n]>=0);
|
yading@10
|
3278
|
yading@10
|
3279 qadd = (qscale - 1) | 1;
|
yading@10
|
3280 qmul = qscale << 1;
|
yading@10
|
3281
|
yading@10
|
3282 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
|
yading@10
|
3283
|
yading@10
|
3284 for(i=0; i<=nCoeffs; i++) {
|
yading@10
|
3285 level = block[i];
|
yading@10
|
3286 if (level) {
|
yading@10
|
3287 if (level < 0) {
|
yading@10
|
3288 level = level * qmul - qadd;
|
yading@10
|
3289 } else {
|
yading@10
|
3290 level = level * qmul + qadd;
|
yading@10
|
3291 }
|
yading@10
|
3292 block[i] = level;
|
yading@10
|
3293 }
|
yading@10
|
3294 }
|
yading@10
|
3295 }
|
yading@10
|
3296
|
yading@10
|
3297 /**
|
yading@10
|
3298 * set qscale and update qscale dependent variables.
|
yading@10
|
3299 */
|
yading@10
|
3300 void ff_set_qscale(MpegEncContext * s, int qscale)
|
yading@10
|
3301 {
|
yading@10
|
3302 if (qscale < 1)
|
yading@10
|
3303 qscale = 1;
|
yading@10
|
3304 else if (qscale > 31)
|
yading@10
|
3305 qscale = 31;
|
yading@10
|
3306
|
yading@10
|
3307 s->qscale = qscale;
|
yading@10
|
3308 s->chroma_qscale= s->chroma_qscale_table[qscale];
|
yading@10
|
3309
|
yading@10
|
3310 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
|
yading@10
|
3311 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
|
yading@10
|
3312 }
|
yading@10
|
3313
|
yading@10
|
3314 void ff_MPV_report_decode_progress(MpegEncContext *s)
|
yading@10
|
3315 {
|
yading@10
|
3316 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
|
yading@10
|
3317 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
|
yading@10
|
3318 }
|
yading@10
|
3319
|
yading@10
|
3320 #if CONFIG_ERROR_RESILIENCE
|
yading@10
|
3321 void ff_mpeg_er_frame_start(MpegEncContext *s)
|
yading@10
|
3322 {
|
yading@10
|
3323 ERContext *er = &s->er;
|
yading@10
|
3324
|
yading@10
|
3325 er->cur_pic = s->current_picture_ptr;
|
yading@10
|
3326 er->last_pic = s->last_picture_ptr;
|
yading@10
|
3327 er->next_pic = s->next_picture_ptr;
|
yading@10
|
3328
|
yading@10
|
3329 er->pp_time = s->pp_time;
|
yading@10
|
3330 er->pb_time = s->pb_time;
|
yading@10
|
3331 er->quarter_sample = s->quarter_sample;
|
yading@10
|
3332 er->partitioned_frame = s->partitioned_frame;
|
yading@10
|
3333
|
yading@10
|
3334 ff_er_frame_start(er);
|
yading@10
|
3335 }
|
yading@10
|
3336 #endif /* CONFIG_ERROR_RESILIENCE */
|