yading@10
|
1 /*
|
yading@10
|
2 * Copyright (c) 2003 Fabrice Bellard
|
yading@10
|
3 *
|
yading@10
|
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
|
yading@10
|
5 * of this software and associated documentation files (the "Software"), to deal
|
yading@10
|
6 * in the Software without restriction, including without limitation the rights
|
yading@10
|
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
yading@10
|
8 * copies of the Software, and to permit persons to whom the Software is
|
yading@10
|
9 * furnished to do so, subject to the following conditions:
|
yading@10
|
10 *
|
yading@10
|
11 * The above copyright notice and this permission notice shall be included in
|
yading@10
|
12 * all copies or substantial portions of the Software.
|
yading@10
|
13 *
|
yading@10
|
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
yading@10
|
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
yading@10
|
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
yading@10
|
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
yading@10
|
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
yading@10
|
19 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
yading@10
|
20 * THE SOFTWARE.
|
yading@10
|
21 */
|
yading@10
|
22
|
yading@10
|
23 /**
|
yading@10
|
24 * @file
|
yading@10
|
25 * libavformat API example.
|
yading@10
|
26 *
|
yading@10
|
27 * Output a media file in any supported libavformat format.
|
yading@10
|
28 * The default codecs are used.
|
yading@10
|
29 * @example doc/examples/muxing.c
|
yading@10
|
30 */
|
yading@10
|
31
|
yading@10
|
32 #include <stdlib.h>
|
yading@10
|
33 #include <stdio.h>
|
yading@10
|
34 #include <string.h>
|
yading@10
|
35 #include <math.h>
|
yading@10
|
36
|
yading@10
|
37 #include <libavutil/mathematics.h>
|
yading@10
|
38 #include <libavformat/avformat.h>
|
yading@10
|
39 #include <libswscale/swscale.h>
|
yading@10
|
40
|
yading@10
|
41 /* 5 seconds stream duration */
|
yading@10
|
42 #define STREAM_DURATION 200.0
|
yading@10
|
43 #define STREAM_FRAME_RATE 25 /* 25 images/s */
|
yading@10
|
44 #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
|
yading@10
|
45 #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
|
yading@10
|
46
|
yading@10
|
47 static int sws_flags = SWS_BICUBIC;
|
yading@10
|
48
|
yading@10
|
49 /**************************************************************/
|
yading@10
|
50 /* audio output */
|
yading@10
|
51
|
yading@10
|
52 static float t, tincr, tincr2;
|
yading@10
|
53 static int16_t *samples;
|
yading@10
|
54 static int audio_input_frame_size;
|
yading@10
|
55
|
yading@10
|
56 /* Add an output stream. */
|
yading@10
|
57 static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
|
yading@10
|
58 enum AVCodecID codec_id)
|
yading@10
|
59 {
|
yading@10
|
60 AVCodecContext *c;
|
yading@10
|
61 AVStream *st;
|
yading@10
|
62
|
yading@10
|
63 /* find the encoder */
|
yading@10
|
64 *codec = avcodec_find_encoder(codec_id);
|
yading@10
|
65 if (!(*codec)) {
|
yading@10
|
66 fprintf(stderr, "Could not find encoder for '%s'\n",
|
yading@10
|
67 avcodec_get_name(codec_id));
|
yading@10
|
68 exit(1);
|
yading@10
|
69 }
|
yading@10
|
70
|
yading@10
|
71 st = avformat_new_stream(oc, *codec);
|
yading@10
|
72 if (!st) {
|
yading@10
|
73 fprintf(stderr, "Could not allocate stream\n");
|
yading@10
|
74 exit(1);
|
yading@10
|
75 }
|
yading@10
|
76 st->id = oc->nb_streams-1;
|
yading@10
|
77 c = st->codec;
|
yading@10
|
78
|
yading@10
|
79 switch ((*codec)->type) {
|
yading@10
|
80 case AVMEDIA_TYPE_AUDIO:
|
yading@10
|
81 st->id = 1;
|
yading@10
|
82 c->sample_fmt = AV_SAMPLE_FMT_S16;
|
yading@10
|
83 c->bit_rate = 64000;
|
yading@10
|
84 c->sample_rate = 44100;
|
yading@10
|
85 c->channels = 2;
|
yading@10
|
86 break;
|
yading@10
|
87
|
yading@10
|
88 case AVMEDIA_TYPE_VIDEO:
|
yading@10
|
89 c->codec_id = codec_id;
|
yading@10
|
90
|
yading@10
|
91 c->bit_rate = 400000;
|
yading@10
|
92 /* Resolution must be a multiple of two. */
|
yading@10
|
93 c->width = 352;
|
yading@10
|
94 c->height = 288;
|
yading@10
|
95 /* timebase: This is the fundamental unit of time (in seconds) in terms
|
yading@10
|
96 * of which frame timestamps are represented. For fixed-fps content,
|
yading@10
|
97 * timebase should be 1/framerate and timestamp increments should be
|
yading@10
|
98 * identical to 1. */
|
yading@10
|
99 c->time_base.den = STREAM_FRAME_RATE;
|
yading@10
|
100 c->time_base.num = 1;
|
yading@10
|
101 c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
yading@10
|
102 c->pix_fmt = STREAM_PIX_FMT;
|
yading@10
|
103 if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
yading@10
|
104 /* just for testing, we also add B frames */
|
yading@10
|
105 c->max_b_frames = 2;
|
yading@10
|
106 }
|
yading@10
|
107 if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
|
yading@10
|
108 /* Needed to avoid using macroblocks in which some coeffs overflow.
|
yading@10
|
109 * This does not happen with normal video, it just happens here as
|
yading@10
|
110 * the motion of the chroma plane does not match the luma plane. */
|
yading@10
|
111 c->mb_decision = 2;
|
yading@10
|
112 }
|
yading@10
|
113 break;
|
yading@10
|
114
|
yading@10
|
115 default:
|
yading@10
|
116 break;
|
yading@10
|
117 }
|
yading@10
|
118
|
yading@10
|
119 /* Some formats want stream headers to be separate. */
|
yading@10
|
120 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
yading@10
|
121 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
yading@10
|
122
|
yading@10
|
123 return st;
|
yading@10
|
124 }
|
yading@10
|
125
|
yading@10
|
126 /**************************************************************/
|
yading@10
|
127 /* audio output */
|
yading@10
|
128
|
yading@10
|
129 static float t, tincr, tincr2;
|
yading@10
|
130 static int16_t *samples;
|
yading@10
|
131 static int audio_input_frame_size;
|
yading@10
|
132
|
yading@10
|
133 static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
|
yading@10
|
134 {
|
yading@10
|
135 AVCodecContext *c;
|
yading@10
|
136 int ret;
|
yading@10
|
137
|
yading@10
|
138 c = st->codec;
|
yading@10
|
139
|
yading@10
|
140 /* open it */
|
yading@10
|
141 ret = avcodec_open2(c, codec, NULL);
|
yading@10
|
142 if (ret < 0) {
|
yading@10
|
143 fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
|
yading@10
|
144 exit(1);
|
yading@10
|
145 }
|
yading@10
|
146
|
yading@10
|
147 /* init signal generator */
|
yading@10
|
148 t = 0;
|
yading@10
|
149 tincr = 2 * M_PI * 110.0 / c->sample_rate;
|
yading@10
|
150 /* increment frequency by 110 Hz per second */
|
yading@10
|
151 tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
yading@10
|
152
|
yading@10
|
153 if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
|
yading@10
|
154 audio_input_frame_size = 10000;
|
yading@10
|
155 else
|
yading@10
|
156 audio_input_frame_size = c->frame_size;
|
yading@10
|
157 samples = av_malloc(audio_input_frame_size *
|
yading@10
|
158 av_get_bytes_per_sample(c->sample_fmt) *
|
yading@10
|
159 c->channels);
|
yading@10
|
160 if (!samples) {
|
yading@10
|
161 fprintf(stderr, "Could not allocate audio samples buffer\n");
|
yading@10
|
162 exit(1);
|
yading@10
|
163 }
|
yading@10
|
164 }
|
yading@10
|
165
|
yading@10
|
166 /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
yading@10
|
167 * 'nb_channels' channels. */
|
yading@10
|
168 static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
|
yading@10
|
169 {
|
yading@10
|
170 int j, i, v;
|
yading@10
|
171 int16_t *q;
|
yading@10
|
172
|
yading@10
|
173 q = samples;
|
yading@10
|
174 for (j = 0; j < frame_size; j++) {
|
yading@10
|
175 v = (int)(sin(t) * 10000);
|
yading@10
|
176 for (i = 0; i < nb_channels; i++)
|
yading@10
|
177 *q++ = v;
|
yading@10
|
178 t += tincr;
|
yading@10
|
179 tincr += tincr2;
|
yading@10
|
180 }
|
yading@10
|
181 }
|
yading@10
|
182
|
yading@10
|
183 static void write_audio_frame(AVFormatContext *oc, AVStream *st)
|
yading@10
|
184 {
|
yading@10
|
185 AVCodecContext *c;
|
yading@10
|
186 AVPacket pkt = { 0 }; // data and size must be 0;
|
yading@10
|
187 AVFrame *frame = avcodec_alloc_frame();
|
yading@10
|
188 int got_packet, ret;
|
yading@10
|
189
|
yading@10
|
190 av_init_packet(&pkt);
|
yading@10
|
191 c = st->codec;
|
yading@10
|
192
|
yading@10
|
193 get_audio_frame(samples, audio_input_frame_size, c->channels);
|
yading@10
|
194 frame->nb_samples = audio_input_frame_size;
|
yading@10
|
195 avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
|
yading@10
|
196 (uint8_t *)samples,
|
yading@10
|
197 audio_input_frame_size *
|
yading@10
|
198 av_get_bytes_per_sample(c->sample_fmt) *
|
yading@10
|
199 c->channels, 1);
|
yading@10
|
200
|
yading@10
|
201 ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
yading@10
|
202 if (ret < 0) {
|
yading@10
|
203 fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
|
yading@10
|
204 exit(1);
|
yading@10
|
205 }
|
yading@10
|
206
|
yading@10
|
207 if (!got_packet)
|
yading@10
|
208 return;
|
yading@10
|
209
|
yading@10
|
210 pkt.stream_index = st->index;
|
yading@10
|
211
|
yading@10
|
212 /* Write the compressed frame to the media file. */
|
yading@10
|
213 ret = av_interleaved_write_frame(oc, &pkt);
|
yading@10
|
214 if (ret != 0) {
|
yading@10
|
215 fprintf(stderr, "Error while writing audio frame: %s\n",
|
yading@10
|
216 av_err2str(ret));
|
yading@10
|
217 exit(1);
|
yading@10
|
218 }
|
yading@10
|
219 avcodec_free_frame(&frame);
|
yading@10
|
220 }
|
yading@10
|
221
|
yading@10
|
222 static void close_audio(AVFormatContext *oc, AVStream *st)
|
yading@10
|
223 {
|
yading@10
|
224 avcodec_close(st->codec);
|
yading@10
|
225
|
yading@10
|
226 av_free(samples);
|
yading@10
|
227 }
|
yading@10
|
228
|
yading@10
|
229 /**************************************************************/
|
yading@10
|
230 /* video output */
|
yading@10
|
231
|
yading@10
|
232 static AVFrame *frame;
|
yading@10
|
233 static AVPicture src_picture, dst_picture;
|
yading@10
|
234 static int frame_count;
|
yading@10
|
235
|
yading@10
|
236 static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
|
yading@10
|
237 {
|
yading@10
|
238 int ret;
|
yading@10
|
239 AVCodecContext *c = st->codec;
|
yading@10
|
240
|
yading@10
|
241 /* open the codec */
|
yading@10
|
242 ret = avcodec_open2(c, codec, NULL);
|
yading@10
|
243 if (ret < 0) {
|
yading@10
|
244 fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
|
yading@10
|
245 exit(1);
|
yading@10
|
246 }
|
yading@10
|
247
|
yading@10
|
248 /* allocate and init a re-usable frame */
|
yading@10
|
249 frame = avcodec_alloc_frame();
|
yading@10
|
250 if (!frame) {
|
yading@10
|
251 fprintf(stderr, "Could not allocate video frame\n");
|
yading@10
|
252 exit(1);
|
yading@10
|
253 }
|
yading@10
|
254
|
yading@10
|
255 /* Allocate the encoded raw picture. */
|
yading@10
|
256 ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
|
yading@10
|
257 if (ret < 0) {
|
yading@10
|
258 fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
|
yading@10
|
259 exit(1);
|
yading@10
|
260 }
|
yading@10
|
261
|
yading@10
|
262 /* If the output format is not YUV420P, then a temporary YUV420P
|
yading@10
|
263 * picture is needed too. It is then converted to the required
|
yading@10
|
264 * output format. */
|
yading@10
|
265 if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
yading@10
|
266 ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
|
yading@10
|
267 if (ret < 0) {
|
yading@10
|
268 fprintf(stderr, "Could not allocate temporary picture: %s\n",
|
yading@10
|
269 av_err2str(ret));
|
yading@10
|
270 exit(1);
|
yading@10
|
271 }
|
yading@10
|
272 }
|
yading@10
|
273
|
yading@10
|
274 /* copy data and linesize picture pointers to frame */
|
yading@10
|
275 *((AVPicture *)frame) = dst_picture;
|
yading@10
|
276 }
|
yading@10
|
277
|
yading@10
|
278 /* Prepare a dummy image. */
|
yading@10
|
279 static void fill_yuv_image(AVPicture *pict, int frame_index,
|
yading@10
|
280 int width, int height)
|
yading@10
|
281 {
|
yading@10
|
282 int x, y, i;
|
yading@10
|
283
|
yading@10
|
284 i = frame_index;
|
yading@10
|
285
|
yading@10
|
286 /* Y */
|
yading@10
|
287 for (y = 0; y < height; y++)
|
yading@10
|
288 for (x = 0; x < width; x++)
|
yading@10
|
289 pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
|
yading@10
|
290
|
yading@10
|
291 /* Cb and Cr */
|
yading@10
|
292 for (y = 0; y < height / 2; y++) {
|
yading@10
|
293 for (x = 0; x < width / 2; x++) {
|
yading@10
|
294 pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
|
yading@10
|
295 pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
|
yading@10
|
296 }
|
yading@10
|
297 }
|
yading@10
|
298 }
|
yading@10
|
299
|
yading@10
|
300 static void write_video_frame(AVFormatContext *oc, AVStream *st)
|
yading@10
|
301 {
|
yading@10
|
302 int ret;
|
yading@10
|
303 static struct SwsContext *sws_ctx;
|
yading@10
|
304 AVCodecContext *c = st->codec;
|
yading@10
|
305
|
yading@10
|
306 if (frame_count >= STREAM_NB_FRAMES) {
|
yading@10
|
307 /* No more frames to compress. The codec has a latency of a few
|
yading@10
|
308 * frames if using B-frames, so we get the last frames by
|
yading@10
|
309 * passing the same picture again. */
|
yading@10
|
310 } else {
|
yading@10
|
311 if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
yading@10
|
312 /* as we only generate a YUV420P picture, we must convert it
|
yading@10
|
313 * to the codec pixel format if needed */
|
yading@10
|
314 if (!sws_ctx) {
|
yading@10
|
315 sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
|
yading@10
|
316 c->width, c->height, c->pix_fmt,
|
yading@10
|
317 sws_flags, NULL, NULL, NULL);
|
yading@10
|
318 if (!sws_ctx) {
|
yading@10
|
319 fprintf(stderr,
|
yading@10
|
320 "Could not initialize the conversion context\n");
|
yading@10
|
321 exit(1);
|
yading@10
|
322 }
|
yading@10
|
323 }
|
yading@10
|
324 fill_yuv_image(&src_picture, frame_count, c->width, c->height);
|
yading@10
|
325 sws_scale(sws_ctx,
|
yading@10
|
326 (const uint8_t * const *)src_picture.data, src_picture.linesize,
|
yading@10
|
327 0, c->height, dst_picture.data, dst_picture.linesize);
|
yading@10
|
328 } else {
|
yading@10
|
329 fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
|
yading@10
|
330 }
|
yading@10
|
331 }
|
yading@10
|
332
|
yading@10
|
333 if (oc->oformat->flags & AVFMT_RAWPICTURE) {
|
yading@10
|
334 /* Raw video case - directly store the picture in the packet */
|
yading@10
|
335 AVPacket pkt;
|
yading@10
|
336 av_init_packet(&pkt);
|
yading@10
|
337
|
yading@10
|
338 pkt.flags |= AV_PKT_FLAG_KEY;
|
yading@10
|
339 pkt.stream_index = st->index;
|
yading@10
|
340 pkt.data = dst_picture.data[0];
|
yading@10
|
341 pkt.size = sizeof(AVPicture);
|
yading@10
|
342
|
yading@10
|
343 ret = av_interleaved_write_frame(oc, &pkt);
|
yading@10
|
344 } else {
|
yading@10
|
345 AVPacket pkt = { 0 };
|
yading@10
|
346 int got_packet;
|
yading@10
|
347 av_init_packet(&pkt);
|
yading@10
|
348
|
yading@10
|
349 /* encode the image */
|
yading@10
|
350 ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
|
yading@10
|
351 if (ret < 0) {
|
yading@10
|
352 fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
|
yading@10
|
353 exit(1);
|
yading@10
|
354 }
|
yading@10
|
355 /* If size is zero, it means the image was buffered. */
|
yading@10
|
356
|
yading@10
|
357 if (!ret && got_packet && pkt.size) {
|
yading@10
|
358 pkt.stream_index = st->index;
|
yading@10
|
359
|
yading@10
|
360 /* Write the compressed frame to the media file. */
|
yading@10
|
361 ret = av_interleaved_write_frame(oc, &pkt);
|
yading@10
|
362 } else {
|
yading@10
|
363 ret = 0;
|
yading@10
|
364 }
|
yading@10
|
365 }
|
yading@10
|
366 if (ret != 0) {
|
yading@10
|
367 fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
|
yading@10
|
368 exit(1);
|
yading@10
|
369 }
|
yading@10
|
370 frame_count++;
|
yading@10
|
371 }
|
yading@10
|
372
|
yading@10
|
373 static void close_video(AVFormatContext *oc, AVStream *st)
|
yading@10
|
374 {
|
yading@10
|
375 avcodec_close(st->codec);
|
yading@10
|
376 av_free(src_picture.data[0]);
|
yading@10
|
377 av_free(dst_picture.data[0]);
|
yading@10
|
378 av_free(frame);
|
yading@10
|
379 }
|
yading@10
|
380
|
yading@10
|
381 /**************************************************************/
|
yading@10
|
382 /* media file output */
|
yading@10
|
383
|
yading@10
|
384 int main(int argc, char **argv)
|
yading@10
|
385 {
|
yading@10
|
386 const char *filename;
|
yading@10
|
387 AVOutputFormat *fmt;
|
yading@10
|
388 AVFormatContext *oc;
|
yading@10
|
389 AVStream *audio_st, *video_st;
|
yading@10
|
390 AVCodec *audio_codec, *video_codec;
|
yading@10
|
391 double audio_pts, video_pts;
|
yading@10
|
392 int ret;
|
yading@10
|
393
|
yading@10
|
394 /* Initialize libavcodec, and register all codecs and formats. */
|
yading@10
|
395 av_register_all();
|
yading@10
|
396
|
yading@10
|
397 if (argc != 2) {
|
yading@10
|
398 printf("usage: %s output_file\n"
|
yading@10
|
399 "API example program to output a media file with libavformat.\n"
|
yading@10
|
400 "This program generates a synthetic audio and video stream, encodes and\n"
|
yading@10
|
401 "muxes them into a file named output_file.\n"
|
yading@10
|
402 "The output format is automatically guessed according to the file extension.\n"
|
yading@10
|
403 "Raw images can also be output by using '%%d' in the filename.\n"
|
yading@10
|
404 "\n", argv[0]);
|
yading@10
|
405 return 1;
|
yading@10
|
406 }
|
yading@10
|
407
|
yading@10
|
408 filename = argv[1];
|
yading@10
|
409
|
yading@10
|
410 /* allocate the output media context */
|
yading@10
|
411 avformat_alloc_output_context2(&oc, NULL, NULL, filename);
|
yading@10
|
412 if (!oc) {
|
yading@10
|
413 printf("Could not deduce output format from file extension: using MPEG.\n");
|
yading@10
|
414 avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
|
yading@10
|
415 }
|
yading@10
|
416 if (!oc) {
|
yading@10
|
417 return 1;
|
yading@10
|
418 }
|
yading@10
|
419 fmt = oc->oformat;
|
yading@10
|
420
|
yading@10
|
421 /* Add the audio and video streams using the default format codecs
|
yading@10
|
422 * and initialize the codecs. */
|
yading@10
|
423 video_st = NULL;
|
yading@10
|
424 audio_st = NULL;
|
yading@10
|
425
|
yading@10
|
426 if (fmt->video_codec != AV_CODEC_ID_NONE) {
|
yading@10
|
427 video_st = add_stream(oc, &video_codec, fmt->video_codec);
|
yading@10
|
428 }
|
yading@10
|
429 if (fmt->audio_codec != AV_CODEC_ID_NONE) {
|
yading@10
|
430 audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
|
yading@10
|
431 }
|
yading@10
|
432
|
yading@10
|
433 /* Now that all the parameters are set, we can open the audio and
|
yading@10
|
434 * video codecs and allocate the necessary encode buffers. */
|
yading@10
|
435 if (video_st)
|
yading@10
|
436 open_video(oc, video_codec, video_st);
|
yading@10
|
437 if (audio_st)
|
yading@10
|
438 open_audio(oc, audio_codec, audio_st);
|
yading@10
|
439
|
yading@10
|
440 av_dump_format(oc, 0, filename, 1);
|
yading@10
|
441
|
yading@10
|
442 /* open the output file, if needed */
|
yading@10
|
443 if (!(fmt->flags & AVFMT_NOFILE)) {
|
yading@10
|
444 ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
|
yading@10
|
445 if (ret < 0) {
|
yading@10
|
446 fprintf(stderr, "Could not open '%s': %s\n", filename,
|
yading@10
|
447 av_err2str(ret));
|
yading@10
|
448 return 1;
|
yading@10
|
449 }
|
yading@10
|
450 }
|
yading@10
|
451
|
yading@10
|
452 /* Write the stream header, if any. */
|
yading@10
|
453 ret = avformat_write_header(oc, NULL);
|
yading@10
|
454 if (ret < 0) {
|
yading@10
|
455 fprintf(stderr, "Error occurred when opening output file: %s\n",
|
yading@10
|
456 av_err2str(ret));
|
yading@10
|
457 return 1;
|
yading@10
|
458 }
|
yading@10
|
459
|
yading@10
|
460 if (frame)
|
yading@10
|
461 frame->pts = 0;
|
yading@10
|
462 for (;;) {
|
yading@10
|
463 /* Compute current audio and video time. */
|
yading@10
|
464 if (audio_st)
|
yading@10
|
465 audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
|
yading@10
|
466 else
|
yading@10
|
467 audio_pts = 0.0;
|
yading@10
|
468
|
yading@10
|
469 if (video_st)
|
yading@10
|
470 video_pts = (double)video_st->pts.val * video_st->time_base.num /
|
yading@10
|
471 video_st->time_base.den;
|
yading@10
|
472 else
|
yading@10
|
473 video_pts = 0.0;
|
yading@10
|
474
|
yading@10
|
475 if ((!audio_st || audio_pts >= STREAM_DURATION) &&
|
yading@10
|
476 (!video_st || video_pts >= STREAM_DURATION))
|
yading@10
|
477 break;
|
yading@10
|
478
|
yading@10
|
479 /* write interleaved audio and video frames */
|
yading@10
|
480 if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
|
yading@10
|
481 write_audio_frame(oc, audio_st);
|
yading@10
|
482 } else {
|
yading@10
|
483 write_video_frame(oc, video_st);
|
yading@10
|
484 frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
|
yading@10
|
485 }
|
yading@10
|
486 }
|
yading@10
|
487
|
yading@10
|
488 /* Write the trailer, if any. The trailer must be written before you
|
yading@10
|
489 * close the CodecContexts open when you wrote the header; otherwise
|
yading@10
|
490 * av_write_trailer() may try to use memory that was freed on
|
yading@10
|
491 * av_codec_close(). */
|
yading@10
|
492 av_write_trailer(oc);
|
yading@10
|
493
|
yading@10
|
494 /* Close each codec. */
|
yading@10
|
495 if (video_st)
|
yading@10
|
496 close_video(oc, video_st);
|
yading@10
|
497 if (audio_st)
|
yading@10
|
498 close_audio(oc, audio_st);
|
yading@10
|
499
|
yading@10
|
500 if (!(fmt->flags & AVFMT_NOFILE))
|
yading@10
|
501 /* Close the output file. */
|
yading@10
|
502 avio_close(oc->pb);
|
yading@10
|
503
|
yading@10
|
504 /* free the stream */
|
yading@10
|
505 avformat_free_context(oc);
|
yading@10
|
506
|
yading@10
|
507 return 0;
|
yading@10
|
508 }
|