ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #if HAVE_ISATTY
34 #if HAVE_IO_H
35 #include <io.h>
36 #endif
37 #if HAVE_UNISTD_H
38 #include <unistd.h>
39 #endif
40 #endif
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
45 #include "libavutil/opt.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/colorspace.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavformat/os_support.h"
62 
63 #include "libavformat/ffm.h" // not public API
64 
65 # include "libavfilter/avcodec.h"
66 # include "libavfilter/avfilter.h"
67 # include "libavfilter/buffersrc.h"
68 # include "libavfilter/buffersink.h"
69 
70 #if HAVE_SYS_RESOURCE_H
71 #include <sys/time.h>
72 #include <sys/types.h>
73 #include <sys/resource.h>
74 #elif HAVE_GETPROCESSTIMES
75 #include <windows.h>
76 #endif
77 #if HAVE_GETPROCESSMEMORYINFO
78 #include <windows.h>
79 #include <psapi.h>
80 #endif
81 
82 #if HAVE_SYS_SELECT_H
83 #include <sys/select.h>
84 #endif
85 
86 #if HAVE_TERMIOS_H
87 #include <fcntl.h>
88 #include <sys/ioctl.h>
89 #include <sys/time.h>
90 #include <termios.h>
91 #elif HAVE_KBHIT
92 #include <conio.h>
93 #endif
94 
95 #if HAVE_PTHREADS
96 #include <pthread.h>
97 #endif
98 
99 #include <time.h>
100 
101 #include "ffmpeg.h"
102 #include "cmdutils.h"
103 
104 #include "libavutil/avassert.h"
105 
106 const char program_name[] = "ffmpeg";
107 const int program_birth_year = 2000;
108 
109 static FILE *vstats_file;
110 
111 const char *const forced_keyframes_const_names[] = {
112  "n",
113  "n_forced",
114  "prev_forced_n",
115  "prev_forced_t",
116  "t",
117  NULL
118 };
119 
120 static void do_video_stats(OutputStream *ost, int frame_size);
121 static int64_t getutime(void);
122 static int64_t getmaxrss(void);
123 
124 static int run_as_daemon = 0;
125 static int64_t video_size = 0;
126 static int64_t audio_size = 0;
127 static int64_t subtitle_size = 0;
128 static int64_t extra_size = 0;
129 static int nb_frames_dup = 0;
130 static int nb_frames_drop = 0;
131 static int64_t decode_error_stat[2];
132 
133 static int current_time;
135 
137 
138 #if HAVE_PTHREADS
139 /* signal to input threads that they should exit; set by the main thread */
141 #endif
142 
143 #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
144 
149 
154 
157 
158 #if HAVE_TERMIOS_H
159 
160 /* init terminal so that we can grab keys */
161 static struct termios oldtty;
162 static int restore_tty;
163 #endif
164 
165 
166 /* sub2video hack:
167  Convert subtitles to video with alpha to insert them in filter graphs.
168  This is a temporary solution until libavfilter gets real subtitles support.
169  */
170 
172 {
173  int ret;
174  AVFrame *frame = ist->sub2video.frame;
175 
176  av_frame_unref(frame);
177  ist->sub2video.frame->width = ist->sub2video.w;
178  ist->sub2video.frame->height = ist->sub2video.h;
180  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
181  return ret;
182  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
183  return 0;
184 }
185 
186 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
187  AVSubtitleRect *r)
188 {
189  uint32_t *pal, *dst2;
190  uint8_t *src, *src2;
191  int x, y;
192 
193  if (r->type != SUBTITLE_BITMAP) {
194  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
195  return;
196  }
197  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
198  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
199  return;
200  }
201 
202  dst += r->y * dst_linesize + r->x * 4;
203  src = r->pict.data[0];
204  pal = (uint32_t *)r->pict.data[1];
205  for (y = 0; y < r->h; y++) {
206  dst2 = (uint32_t *)dst;
207  src2 = src;
208  for (x = 0; x < r->w; x++)
209  *(dst2++) = pal[*(src2++)];
210  dst += dst_linesize;
211  src += r->pict.linesize[0];
212  }
213 }
214 
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 {
217  AVFrame *frame = ist->sub2video.frame;
218  int i;
219 
220  av_assert1(frame->data[0]);
221  ist->sub2video.last_pts = frame->pts = pts;
222  for (i = 0; i < ist->nb_filters; i++)
226 }
227 
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229 {
230  int w = ist->sub2video.w, h = ist->sub2video.h;
231  AVFrame *frame = ist->sub2video.frame;
232  int8_t *dst;
233  int dst_linesize;
234  int num_rects, i;
235  int64_t pts, end_pts;
236 
237  if (!frame)
238  return;
239  if (sub) {
240  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000,
241  AV_TIME_BASE_Q, ist->st->time_base);
242  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000,
243  AV_TIME_BASE_Q, ist->st->time_base);
244  num_rects = sub->num_rects;
245  } else {
246  pts = ist->sub2video.end_pts;
247  end_pts = INT64_MAX;
248  num_rects = 0;
249  }
250  if (sub2video_get_blank_frame(ist) < 0) {
251  av_log(ist->st->codec, AV_LOG_ERROR,
252  "Impossible to get a blank canvas.\n");
253  return;
254  }
255  dst = frame->data [0];
256  dst_linesize = frame->linesize[0];
257  for (i = 0; i < num_rects; i++)
258  sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
259  sub2video_push_ref(ist, pts);
260  ist->sub2video.end_pts = end_pts;
261 }
262 
263 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 {
265  InputFile *infile = input_files[ist->file_index];
266  int i, j, nb_reqs;
267  int64_t pts2;
268 
269  /* When a frame is read from a file, examine all sub2video streams in
270  the same file and send the sub2video frame again. Otherwise, decoded
271  video frames could be accumulating in the filter graph while a filter
272  (possibly overlay) is desperately waiting for a subtitle frame. */
273  for (i = 0; i < infile->nb_streams; i++) {
274  InputStream *ist2 = input_streams[infile->ist_index + i];
275  if (!ist2->sub2video.frame)
276  continue;
277  /* subtitles seem to be usually muxed ahead of other streams;
278  if not, substracting a larger time here is necessary */
279  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
280  /* do not send the heartbeat frame if the subtitle is already ahead */
281  if (pts2 <= ist2->sub2video.last_pts)
282  continue;
283  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
284  sub2video_update(ist2, NULL);
285  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
286  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287  if (nb_reqs)
288  sub2video_push_ref(ist2, pts2);
289  }
290 }
291 
292 static void sub2video_flush(InputStream *ist)
293 {
294  int i;
295 
296  for (i = 0; i < ist->nb_filters; i++)
297  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
298 }
299 
300 /* end of sub2video hack */
301 
302 void term_exit(void)
303 {
304  av_log(NULL, AV_LOG_QUIET, "%s", "");
305 #if HAVE_TERMIOS_H
306  if(restore_tty)
307  tcsetattr (0, TCSANOW, &oldtty);
308 #endif
309 }
310 
311 static volatile int received_sigterm = 0;
312 static volatile int received_nb_signals = 0;
313 
314 static void
316 {
317  received_sigterm = sig;
319  term_exit();
320  if(received_nb_signals > 3)
321  exit(123);
322 }
323 
324 void term_init(void)
325 {
326 #if HAVE_TERMIOS_H
327  if(!run_as_daemon){
328  struct termios tty;
329  int istty = 1;
330 #if HAVE_ISATTY
331  istty = isatty(0) && isatty(2);
332 #endif
333  if (istty && tcgetattr (0, &tty) == 0) {
334  oldtty = tty;
335  restore_tty = 1;
336  atexit(term_exit);
337 
338  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
339  |INLCR|IGNCR|ICRNL|IXON);
340  tty.c_oflag |= OPOST;
341  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
342  tty.c_cflag &= ~(CSIZE|PARENB);
343  tty.c_cflag |= CS8;
344  tty.c_cc[VMIN] = 1;
345  tty.c_cc[VTIME] = 0;
346 
347  tcsetattr (0, TCSANOW, &tty);
348  }
349  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
350  }
351 #endif
353 
354  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
355  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
356 #ifdef SIGXCPU
357  signal(SIGXCPU, sigterm_handler);
358 #endif
359 }
360 
361 /* read a key without blocking */
362 static int read_key(void)
363 {
364  unsigned char ch;
365 #if HAVE_TERMIOS_H
366  int n = 1;
367  struct timeval tv;
368  fd_set rfds;
369 
370  FD_ZERO(&rfds);
371  FD_SET(0, &rfds);
372  tv.tv_sec = 0;
373  tv.tv_usec = 0;
374  n = select(1, &rfds, NULL, NULL, &tv);
375  if (n > 0) {
376  n = read(0, &ch, 1);
377  if (n == 1)
378  return ch;
379 
380  return n;
381  }
382 #elif HAVE_KBHIT
383 # if HAVE_PEEKNAMEDPIPE
384  static int is_pipe;
385  static HANDLE input_handle;
386  DWORD dw, nchars;
387  if(!input_handle){
388  input_handle = GetStdHandle(STD_INPUT_HANDLE);
389  is_pipe = !GetConsoleMode(input_handle, &dw);
390  }
391 
392  if (stdin->_cnt > 0) {
393  read(0, &ch, 1);
394  return ch;
395  }
396  if (is_pipe) {
397  /* When running under a GUI, you will end here. */
398  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
399  // input pipe may have been closed by the program that ran ffmpeg
400  return -1;
401  }
402  //Read it
403  if(nchars != 0) {
404  read(0, &ch, 1);
405  return ch;
406  }else{
407  return -1;
408  }
409  }
410 # endif
411  if(kbhit())
412  return(getch());
413 #endif
414  return -1;
415 }
416 
417 static int decode_interrupt_cb(void *ctx)
418 {
419  return received_nb_signals > 1;
420 }
421 
423 
424 static void exit_program(void)
425 {
426  int i, j;
427 
428  if (do_benchmark) {
429  int maxrss = getmaxrss() / 1024;
430  printf("bench: maxrss=%ikB\n", maxrss);
431  }
432 
433  for (i = 0; i < nb_filtergraphs; i++) {
434  avfilter_graph_free(&filtergraphs[i]->graph);
435  for (j = 0; j < filtergraphs[i]->nb_inputs; j++) {
436  av_freep(&filtergraphs[i]->inputs[j]->name);
437  av_freep(&filtergraphs[i]->inputs[j]);
438  }
439  av_freep(&filtergraphs[i]->inputs);
440  for (j = 0; j < filtergraphs[i]->nb_outputs; j++) {
441  av_freep(&filtergraphs[i]->outputs[j]->name);
442  av_freep(&filtergraphs[i]->outputs[j]);
443  }
444  av_freep(&filtergraphs[i]->outputs);
445  av_freep(&filtergraphs[i]->graph_desc);
446  av_freep(&filtergraphs[i]);
447  }
448  av_freep(&filtergraphs);
449 
451 
452  /* close files */
453  for (i = 0; i < nb_output_files; i++) {
454  AVFormatContext *s = output_files[i]->ctx;
455  if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
456  avio_close(s->pb);
458  av_dict_free(&output_files[i]->opts);
459  av_freep(&output_files[i]);
460  }
461  for (i = 0; i < nb_output_streams; i++) {
462  AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
463  while (bsfc) {
464  AVBitStreamFilterContext *next = bsfc->next;
466  bsfc = next;
467  }
468  output_streams[i]->bitstream_filters = NULL;
469  avcodec_free_frame(&output_streams[i]->filtered_frame);
470 
471  av_freep(&output_streams[i]->forced_keyframes);
472  av_expr_free(output_streams[i]->forced_keyframes_pexpr);
473  av_freep(&output_streams[i]->avfilter);
474  av_freep(&output_streams[i]->logfile_prefix);
475  av_freep(&output_streams[i]);
476  }
477  for (i = 0; i < nb_input_files; i++) {
478  avformat_close_input(&input_files[i]->ctx);
479  av_freep(&input_files[i]);
480  }
481  for (i = 0; i < nb_input_streams; i++) {
482  av_frame_free(&input_streams[i]->decoded_frame);
483  av_frame_free(&input_streams[i]->filter_frame);
484  av_dict_free(&input_streams[i]->opts);
485  avsubtitle_free(&input_streams[i]->prev_sub.subtitle);
486  av_frame_free(&input_streams[i]->sub2video.frame);
487  av_freep(&input_streams[i]->filters);
488  av_freep(&input_streams[i]);
489  }
490 
491  if (vstats_file)
494 
495  av_freep(&input_streams);
496  av_freep(&input_files);
497  av_freep(&output_streams);
498  av_freep(&output_files);
499 
500  uninit_opts();
501 
503 
504  if (received_sigterm) {
505  av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
506  (int) received_sigterm);
507  }
508 }
509 
511 {
513  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
514  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
515  exit(1);
516  }
517 }
518 
519 static void abort_codec_experimental(AVCodec *c, int encoder)
520 {
521  exit(1);
522 }
523 
524 static void update_benchmark(const char *fmt, ...)
525 {
526  if (do_benchmark_all) {
527  int64_t t = getutime();
528  va_list va;
529  char buf[1024];
530 
531  if (fmt) {
532  va_start(va, fmt);
533  vsnprintf(buf, sizeof(buf), fmt, va);
534  va_end(va);
535  printf("bench: %8"PRIu64" %s \n", t - current_time, buf);
536  }
537  current_time = t;
538  }
539 }
540 
542 {
544  AVCodecContext *avctx = ost->st->codec;
545  int ret;
546 
549  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
550 
551  if ((avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) && pkt->dts != AV_NOPTS_VALUE) {
552  int64_t max = ost->st->cur_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
553  if (ost->st->cur_dts && ost->st->cur_dts != AV_NOPTS_VALUE && max > pkt->dts) {
554  av_log(s, max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG,
555  "st:%d PTS: %"PRId64" DTS: %"PRId64" < %"PRId64" invalid, clipping\n", pkt->stream_index, pkt->pts, pkt->dts, max);
556  if(pkt->pts >= pkt->dts)
557  pkt->pts = FFMAX(pkt->pts, max);
558  pkt->dts = max;
559  }
560  }
561 
562  /*
563  * Audio encoders may split the packets -- #frames in != #packets out.
564  * But there is no reordering, so we can limit the number of output packets
565  * by simply dropping them here.
566  * Counting encoded video frames needs to be done separately because of
567  * reordering, see do_video_out()
568  */
569  if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
570  if (ost->frame_number >= ost->max_frames) {
571  av_free_packet(pkt);
572  return;
573  }
574  ost->frame_number++;
575  }
576 
577  while (bsfc) {
578  AVPacket new_pkt = *pkt;
579  int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
580  &new_pkt.data, &new_pkt.size,
581  pkt->data, pkt->size,
582  pkt->flags & AV_PKT_FLAG_KEY);
583  if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
584  uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
585  if(t) {
586  memcpy(t, new_pkt.data, new_pkt.size);
587  memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
588  new_pkt.data = t;
589  new_pkt.buf = NULL;
590  a = 1;
591  } else
592  a = AVERROR(ENOMEM);
593  }
594  if (a > 0) {
595  av_free_packet(pkt);
596  new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
598  if (!new_pkt.buf)
599  exit(1);
600  } else if (a < 0) {
601  av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
602  bsfc->filter->name, pkt->stream_index,
603  avctx->codec ? avctx->codec->name : "copy");
604  print_error("", a);
605  if (exit_on_error)
606  exit(1);
607  }
608  *pkt = new_pkt;
609 
610  bsfc = bsfc->next;
611  }
612 
613  pkt->stream_index = ost->index;
614 
615  if (debug_ts) {
616  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
617  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
619  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
620  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
621  pkt->size
622  );
623  }
624 
625  ret = av_interleaved_write_frame(s, pkt);
626  if (ret < 0) {
627  print_error("av_interleaved_write_frame()", ret);
628  exit(1);
629  }
630 }
631 
633 {
634  OutputFile *of = output_files[ost->file_index];
635 
636  ost->finished = 1;
637  if (of->shortest) {
638  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, AV_TIME_BASE_Q);
639  of->recording_time = FFMIN(of->recording_time, end);
640  }
641 }
642 
644 {
645  OutputFile *of = output_files[ost->file_index];
646 
647  if (of->recording_time != INT64_MAX &&
649  AV_TIME_BASE_Q) >= 0) {
650  close_output_stream(ost);
651  return 0;
652  }
653  return 1;
654 }
655 
657  AVFrame *frame)
658 {
659  AVCodecContext *enc = ost->st->codec;
660  AVPacket pkt;
661  int got_packet = 0;
662 
663  av_init_packet(&pkt);
664  pkt.data = NULL;
665  pkt.size = 0;
666 
667  if (!check_recording_time(ost))
668  return;
669 
670  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
671  frame->pts = ost->sync_opts;
672  ost->sync_opts = frame->pts + frame->nb_samples;
673 
674  av_assert0(pkt.size || !pkt.data);
676  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
677  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
678  exit(1);
679  }
680  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
681 
682  if (got_packet) {
683  if (pkt.pts != AV_NOPTS_VALUE)
684  pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
685  if (pkt.dts != AV_NOPTS_VALUE)
686  pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
687  if (pkt.duration > 0)
688  pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
689 
690  if (debug_ts) {
691  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
692  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
693  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
694  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
695  }
696 
697  audio_size += pkt.size;
698  write_frame(s, &pkt, ost);
699 
700  av_free_packet(&pkt);
701  }
702 }
703 
705  OutputStream *ost,
706  InputStream *ist,
707  AVSubtitle *sub)
708 {
709  int subtitle_out_max_size = 1024 * 1024;
710  int subtitle_out_size, nb, i;
711  AVCodecContext *enc;
712  AVPacket pkt;
713  int64_t pts;
714 
715  if (sub->pts == AV_NOPTS_VALUE) {
716  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
717  if (exit_on_error)
718  exit(1);
719  return;
720  }
721 
722  enc = ost->st->codec;
723 
724  if (!subtitle_out) {
725  subtitle_out = av_malloc(subtitle_out_max_size);
726  }
727 
728  /* Note: DVB subtitle need one packet to draw them and one other
729  packet to clear them */
730  /* XXX: signal it in the codec context ? */
732  nb = 2;
733  else
734  nb = 1;
735 
736  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
737  pts = sub->pts - output_files[ost->file_index]->start_time;
738  for (i = 0; i < nb; i++) {
739  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
740  if (!check_recording_time(ost))
741  return;
742 
743  sub->pts = pts;
744  // start_display_time is required to be 0
745  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
747  sub->start_display_time = 0;
748  if (i == 1)
749  sub->num_rects = 0;
750  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
751  subtitle_out_max_size, sub);
752  if (subtitle_out_size < 0) {
753  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
754  exit(1);
755  }
756 
757  av_init_packet(&pkt);
758  pkt.data = subtitle_out;
759  pkt.size = subtitle_out_size;
760  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
761  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
762  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
763  /* XXX: the pts correction is handled here. Maybe handling
764  it in the codec would be better */
765  if (i == 0)
766  pkt.pts += 90 * sub->start_display_time;
767  else
768  pkt.pts += 90 * sub->end_display_time;
769  }
770  subtitle_size += pkt.size;
771  write_frame(s, &pkt, ost);
772  }
773 }
774 
776  OutputStream *ost,
777  AVFrame *in_picture)
778 {
779  int ret, format_video_sync;
780  AVPacket pkt;
781  AVCodecContext *enc = ost->st->codec;
782  int nb_frames, i;
783  double sync_ipts, delta;
784  double duration = 0;
785  int frame_size = 0;
786  InputStream *ist = NULL;
787 
788  if (ost->source_index >= 0)
789  ist = input_streams[ost->source_index];
790 
791  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
792  duration = 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base));
793 
794  sync_ipts = in_picture->pts;
795  delta = sync_ipts - ost->sync_opts + duration;
796 
797  /* by default, we output a single frame */
798  nb_frames = 1;
799 
800  format_video_sync = video_sync_method;
801  if (format_video_sync == VSYNC_AUTO)
803 
804  switch (format_video_sync) {
805  case VSYNC_CFR:
806  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
807  if (delta < -1.1)
808  nb_frames = 0;
809  else if (delta > 1.1)
810  nb_frames = lrintf(delta);
811  break;
812  case VSYNC_VFR:
813  if (delta <= -0.6)
814  nb_frames = 0;
815  else if (delta > 0.6)
816  ost->sync_opts = lrint(sync_ipts);
817  break;
818  case VSYNC_DROP:
819  case VSYNC_PASSTHROUGH:
820  ost->sync_opts = lrint(sync_ipts);
821  break;
822  default:
823  av_assert0(0);
824  }
825 
826  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
827  if (nb_frames == 0) {
828  nb_frames_drop++;
829  av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
830  return;
831  } else if (nb_frames > 1) {
832  if (nb_frames > dts_error_threshold * 30) {
833  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
834  nb_frames_drop++;
835  return;
836  }
837  nb_frames_dup += nb_frames - 1;
838  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
839  }
840 
841  /* duplicates frame if needed */
842  for (i = 0; i < nb_frames; i++) {
843  av_init_packet(&pkt);
844  pkt.data = NULL;
845  pkt.size = 0;
846 
847  in_picture->pts = ost->sync_opts;
848 
849  if (!check_recording_time(ost))
850  return;
851 
852  if (s->oformat->flags & AVFMT_RAWPICTURE &&
853  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
854  /* raw pictures are written as AVPicture structure to
855  avoid any copies. We support temporarily the older
856  method. */
857  enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
858  enc->coded_frame->top_field_first = in_picture->top_field_first;
859  if (enc->coded_frame->interlaced_frame)
861  else
863  pkt.data = (uint8_t *)in_picture;
864  pkt.size = sizeof(AVPicture);
865  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
866  pkt.flags |= AV_PKT_FLAG_KEY;
867 
868  video_size += pkt.size;
869  write_frame(s, &pkt, ost);
870  } else {
871  int got_packet, forced_keyframe = 0;
872  double pts_time;
873 
875  ost->top_field_first >= 0)
876  in_picture->top_field_first = !!ost->top_field_first;
877 
878  if (in_picture->interlaced_frame) {
879  if (enc->codec->id == AV_CODEC_ID_MJPEG)
880  enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
881  else
882  enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
883  } else
885 
886  in_picture->quality = ost->st->codec->global_quality;
887  if (!enc->me_threshold)
888  in_picture->pict_type = 0;
889 
890  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
891  in_picture->pts * av_q2d(enc->time_base) : NAN;
892  if (ost->forced_kf_index < ost->forced_kf_count &&
893  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
894  ost->forced_kf_index++;
895  forced_keyframe = 1;
896  } else if (ost->forced_keyframes_pexpr) {
897  double res;
898  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
901  av_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
907  res);
908  if (res) {
909  forced_keyframe = 1;
915  }
916 
918  }
919  if (forced_keyframe) {
920  in_picture->pict_type = AV_PICTURE_TYPE_I;
921  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
922  }
923 
925  ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
926  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
927  if (ret < 0) {
928  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
929  exit(1);
930  }
931 
932  if (got_packet) {
933  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
934  pkt.pts = ost->sync_opts;
935 
936  if (pkt.pts != AV_NOPTS_VALUE)
937  pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
938  if (pkt.dts != AV_NOPTS_VALUE)
939  pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
940 
941  if (debug_ts) {
942  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
943  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
944  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
945  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
946  }
947 
948  frame_size = pkt.size;
949  video_size += pkt.size;
950  write_frame(s, &pkt, ost);
951  av_free_packet(&pkt);
952 
953  /* if two pass, output log */
954  if (ost->logfile && enc->stats_out) {
955  fprintf(ost->logfile, "%s", enc->stats_out);
956  }
957  }
958  }
959  ost->sync_opts++;
960  /*
961  * For video, number of frames in == number of packets out.
962  * But there may be reordering, so we can't throw away frames on encoder
963  * flush, we need to limit them here, before they go into encoder.
964  */
965  ost->frame_number++;
966 
967  if (vstats_filename && frame_size)
968  do_video_stats(ost, frame_size);
969  }
970 }
971 
972 static double psnr(double d)
973 {
974  return -10.0 * log(d) / log(10.0);
975 }
976 
977 static void do_video_stats(OutputStream *ost, int frame_size)
978 {
979  AVCodecContext *enc;
980  int frame_number;
981  double ti1, bitrate, avg_bitrate;
982 
983  /* this is executed just the first time do_video_stats is called */
984  if (!vstats_file) {
985  vstats_file = fopen(vstats_filename, "w");
986  if (!vstats_file) {
987  perror("fopen");
988  exit(1);
989  }
990  }
991 
992  enc = ost->st->codec;
993  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
994  frame_number = ost->st->nb_frames;
995  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
996  if (enc->flags&CODEC_FLAG_PSNR)
997  fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
998 
999  fprintf(vstats_file,"f_size= %6d ", frame_size);
1000  /* compute pts value */
1001  ti1 = ost->st->pts.val * av_q2d(enc->time_base);
1002  if (ti1 < 0.01)
1003  ti1 = 0.01;
1004 
1005  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1006  avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1007  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1008  (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1009  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
1010  }
1011 }
1012 
1013 /**
1014  * Get and encode new output from any of the filtergraphs, without causing
1015  * activity.
1016  *
1017  * @return 0 for success, <0 for severe errors
1018  */
1019 static int reap_filters(void)
1020 {
1021  AVFrame *filtered_frame = NULL;
1022  int i;
1023  int64_t frame_pts;
1024 
1025  /* Reap all buffers present in the buffer sinks */
1026  for (i = 0; i < nb_output_streams; i++) {
1027  OutputStream *ost = output_streams[i];
1028  OutputFile *of = output_files[ost->file_index];
1029  int ret = 0;
1030 
1031  if (!ost->filter)
1032  continue;
1033 
1034  if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
1035  return AVERROR(ENOMEM);
1036  } else
1038  filtered_frame = ost->filtered_frame;
1039 
1040  while (1) {
1041  ret = av_buffersink_get_frame_flags(ost->filter->filter, filtered_frame,
1043  if (ret < 0) {
1044  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1046  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1047  }
1048  break;
1049  }
1050  frame_pts = AV_NOPTS_VALUE;
1051  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1052  filtered_frame->pts = frame_pts = av_rescale_q(filtered_frame->pts,
1053  ost->filter->filter->inputs[0]->time_base,
1054  ost->st->codec->time_base) -
1057  ost->st->codec->time_base);
1058 
1059  if (of->start_time && filtered_frame->pts < 0) {
1060  av_frame_unref(filtered_frame);
1061  continue;
1062  }
1063  }
1064  //if (ost->source_index >= 0)
1065  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1066 
1067 
1068  switch (ost->filter->filter->inputs[0]->type) {
1069  case AVMEDIA_TYPE_VIDEO:
1070  filtered_frame->pts = frame_pts;
1071  if (!ost->frame_aspect_ratio.num)
1072  ost->st->codec->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1073 
1074  do_video_out(of->ctx, ost, filtered_frame);
1075  break;
1076  case AVMEDIA_TYPE_AUDIO:
1077  filtered_frame->pts = frame_pts;
1078  if (!(ost->st->codec->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
1079  ost->st->codec->channels != av_frame_get_channels(filtered_frame)) {
1081  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1082  break;
1083  }
1084  do_audio_out(of->ctx, ost, filtered_frame);
1085  break;
1086  default:
1087  // TODO support subtitle filters
1088  av_assert0(0);
1089  }
1090 
1091  av_frame_unref(filtered_frame);
1092  }
1093  }
1094 
1095  return 0;
1096 }
1097 
1098 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1099 {
1100  char buf[1024];
1101  AVBPrint buf_script;
1102  OutputStream *ost;
1103  AVFormatContext *oc;
1104  int64_t total_size;
1105  AVCodecContext *enc;
1106  int frame_number, vid, i;
1107  double bitrate;
1108  int64_t pts = INT64_MIN;
1109  static int64_t last_time = -1;
1110  static int qp_histogram[52];
1111  int hours, mins, secs, us;
1112 
1113  if (!print_stats && !is_last_report && !progress_avio)
1114  return;
1115 
1116  if (!is_last_report) {
1117  if (last_time == -1) {
1118  last_time = cur_time;
1119  return;
1120  }
1121  if ((cur_time - last_time) < 500000)
1122  return;
1123  last_time = cur_time;
1124  }
1125 
1126 
1127  oc = output_files[0]->ctx;
1128 
1129  total_size = avio_size(oc->pb);
1130  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1131  total_size = avio_tell(oc->pb);
1132 
1133  buf[0] = '\0';
1134  vid = 0;
1135  av_bprint_init(&buf_script, 0, 1);
1136  for (i = 0; i < nb_output_streams; i++) {
1137  float q = -1;
1138  ost = output_streams[i];
1139  enc = ost->st->codec;
1140  if (!ost->stream_copy && enc->coded_frame)
1141  q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1142  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1143  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1144  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1145  ost->file_index, ost->index, q);
1146  }
1147  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1148  float fps, t = (cur_time-timer_start) / 1000000.0;
1149 
1150  frame_number = ost->frame_number;
1151  fps = t > 1 ? frame_number / t : 0;
1152  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1153  frame_number, fps < 9.95, fps, q);
1154  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1155  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1156  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1157  ost->file_index, ost->index, q);
1158  if (is_last_report)
1159  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1160  if (qp_hist) {
1161  int j;
1162  int qp = lrintf(q);
1163  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1164  qp_histogram[qp]++;
1165  for (j = 0; j < 32; j++)
1166  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1167  }
1168  if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
1169  int j;
1170  double error, error_sum = 0;
1171  double scale, scale_sum = 0;
1172  double p;
1173  char type[3] = { 'Y','U','V' };
1174  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1175  for (j = 0; j < 3; j++) {
1176  if (is_last_report) {
1177  error = enc->error[j];
1178  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1179  } else {
1180  error = enc->coded_frame->error[j];
1181  scale = enc->width * enc->height * 255.0 * 255.0;
1182  }
1183  if (j)
1184  scale /= 4;
1185  error_sum += error;
1186  scale_sum += scale;
1187  p = psnr(error / scale);
1188  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1189  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1190  ost->file_index, ost->index, type[j] | 32, p);
1191  }
1192  p = psnr(error_sum / scale_sum);
1193  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1194  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1195  ost->file_index, ost->index, p);
1196  }
1197  vid = 1;
1198  }
1199  /* compute min output value */
1200  if ((is_last_report || !ost->finished) && ost->st->pts.val != AV_NOPTS_VALUE)
1201  pts = FFMAX(pts, av_rescale_q(ost->st->pts.val,
1202  ost->st->time_base, AV_TIME_BASE_Q));
1203  }
1204 
1205  secs = pts / AV_TIME_BASE;
1206  us = pts % AV_TIME_BASE;
1207  mins = secs / 60;
1208  secs %= 60;
1209  hours = mins / 60;
1210  mins %= 60;
1211 
1212  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1213 
1214  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1215  "size=N/A time=");
1216  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1217  "size=%8.0fkB time=", total_size / 1024.0);
1218  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1219  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1220  (100 * us) / AV_TIME_BASE);
1221  if (bitrate < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1222  "bitrate=N/A");
1223  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1224  "bitrate=%6.1fkbits/s", bitrate);
1225  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1226  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1227  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1228  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1229  hours, mins, secs, us);
1230 
1232  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1234  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1235  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1236 
1237  if (print_stats || is_last_report) {
1238  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1239  fprintf(stderr, "%s \r", buf);
1240  } else
1241  av_log(NULL, AV_LOG_INFO, "%s \r", buf);
1242 
1243  fflush(stderr);
1244  }
1245 
1246  if (progress_avio) {
1247  av_bprintf(&buf_script, "progress=%s\n",
1248  is_last_report ? "end" : "continue");
1249  avio_write(progress_avio, buf_script.str,
1250  FFMIN(buf_script.len, buf_script.size - 1));
1251  avio_flush(progress_avio);
1252  av_bprint_finalize(&buf_script, NULL);
1253  if (is_last_report) {
1254  avio_close(progress_avio);
1255  progress_avio = NULL;
1256  }
1257  }
1258 
1259  if (is_last_report) {
1260  int64_t raw= audio_size + video_size + subtitle_size + extra_size;
1261  av_log(NULL, AV_LOG_INFO, "\n");
1262  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0f global headers:%1.0fkB muxing overhead %f%%\n",
1263  video_size / 1024.0,
1264  audio_size / 1024.0,
1265  subtitle_size / 1024.0,
1266  extra_size / 1024.0,
1267  100.0 * (total_size - raw) / raw
1268  );
1269  if(video_size + audio_size + subtitle_size + extra_size == 0){
1270  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n");
1271  }
1272  }
1273 }
1274 
1275 static void flush_encoders(void)
1276 {
1277  int i, ret;
1278 
1279  for (i = 0; i < nb_output_streams; i++) {
1280  OutputStream *ost = output_streams[i];
1281  AVCodecContext *enc = ost->st->codec;
1282  AVFormatContext *os = output_files[ost->file_index]->ctx;
1283  int stop_encoding = 0;
1284 
1285  if (!ost->encoding_needed)
1286  continue;
1287 
1288  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1289  continue;
1291  continue;
1292 
1293  for (;;) {
1294  int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1295  const char *desc;
1296  int64_t *size;
1297 
1298  switch (ost->st->codec->codec_type) {
1299  case AVMEDIA_TYPE_AUDIO:
1300  encode = avcodec_encode_audio2;
1301  desc = "Audio";
1302  size = &audio_size;
1303  break;
1304  case AVMEDIA_TYPE_VIDEO:
1305  encode = avcodec_encode_video2;
1306  desc = "Video";
1307  size = &video_size;
1308  break;
1309  default:
1310  stop_encoding = 1;
1311  }
1312 
1313  if (encode) {
1314  AVPacket pkt;
1315  int got_packet;
1316  av_init_packet(&pkt);
1317  pkt.data = NULL;
1318  pkt.size = 0;
1319 
1321  ret = encode(enc, &pkt, NULL, &got_packet);
1322  update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1323  if (ret < 0) {
1324  av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1325  exit(1);
1326  }
1327  *size += pkt.size;
1328  if (ost->logfile && enc->stats_out) {
1329  fprintf(ost->logfile, "%s", enc->stats_out);
1330  }
1331  if (!got_packet) {
1332  stop_encoding = 1;
1333  break;
1334  }
1335  if (pkt.pts != AV_NOPTS_VALUE)
1336  pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1337  if (pkt.dts != AV_NOPTS_VALUE)
1338  pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1339  if (pkt.duration > 0)
1340  pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1341  write_frame(os, &pkt, ost);
1343  do_video_stats(ost, pkt.size);
1344  }
1345  }
1346 
1347  if (stop_encoding)
1348  break;
1349  }
1350  }
1351 }
1352 
1353 /*
1354  * Check whether a packet from ist should be written into ost at this time
1355  */
1357 {
1358  OutputFile *of = output_files[ost->file_index];
1359  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1360 
1361  if (ost->source_index != ist_index)
1362  return 0;
1363 
1364  if (of->start_time && ist->pts < of->start_time)
1365  return 0;
1366 
1367  return 1;
1368 }
1369 
1370 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1371 {
1372  OutputFile *of = output_files[ost->file_index];
1373  int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
1374  AVPicture pict;
1375  AVPacket opkt;
1376 
1377  av_init_packet(&opkt);
1378 
1379  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1381  return;
1382 
1383  if (!ost->frame_number && ist->pts < of->start_time &&
1384  !ost->copy_prior_start)
1385  return;
1386 
1387  if (of->recording_time != INT64_MAX &&
1388  ist->pts >= of->recording_time + of->start_time) {
1389  close_output_stream(ost);
1390  return;
1391  }
1392 
1393  /* force the input stream PTS */
1394  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1395  audio_size += pkt->size;
1396  else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1397  video_size += pkt->size;
1398  ost->sync_opts++;
1399  } else if (ost->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
1400  subtitle_size += pkt->size;
1401  }
1402 
1403  if (pkt->pts != AV_NOPTS_VALUE)
1404  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1405  else
1406  opkt.pts = AV_NOPTS_VALUE;
1407 
1408  if (pkt->dts == AV_NOPTS_VALUE)
1409  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1410  else
1411  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1412  opkt.dts -= ost_tb_start_time;
1413 
1414  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1415  int duration = av_get_audio_frame_duration(ist->st->codec, pkt->size);
1416  if(!duration)
1417  duration = ist->st->codec->frame_size;
1418  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1419  (AVRational){1, ist->st->codec->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1420  ost->st->time_base) - ost_tb_start_time;
1421  }
1422 
1423  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1424  opkt.flags = pkt->flags;
1425 
1426  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1427  if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1430  && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1431  ) {
1432  if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY)) {
1433  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1434  if (!opkt.buf)
1435  exit(1);
1436  }
1437  } else {
1438  opkt.data = pkt->data;
1439  opkt.size = pkt->size;
1440  }
1441 
1442  if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1443  /* store AVPicture in AVPacket, as expected by the output format */
1444  avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1445  opkt.data = (uint8_t *)&pict;
1446  opkt.size = sizeof(AVPicture);
1447  opkt.flags |= AV_PKT_FLAG_KEY;
1448  }
1449 
1450  write_frame(of->ctx, &opkt, ost);
1451  ost->st->codec->frame_number++;
1452 }
1453 
1454 static void rate_emu_sleep(InputStream *ist)
1455 {
1456  if (input_files[ist->file_index]->rate_emu) {
1457  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
1458  int64_t now = av_gettime() - ist->start;
1459  if (pts > now)
1460  av_usleep(pts - now);
1461  }
1462 }
1463 
1465 {
1466  AVCodecContext *dec = ist->st->codec;
1467 
1468  if (!dec->channel_layout) {
1469  char layout_name[256];
1470 
1471  if (dec->channels > ist->guess_layout_max)
1472  return 0;
1474  if (!dec->channel_layout)
1475  return 0;
1476  av_get_channel_layout_string(layout_name, sizeof(layout_name),
1477  dec->channels, dec->channel_layout);
1478  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1479  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1480  }
1481  return 1;
1482 }
1483 
1484 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1485 {
1486  AVFrame *decoded_frame, *f;
1487  AVCodecContext *avctx = ist->st->codec;
1488  int i, ret, err = 0, resample_changed;
1489  AVRational decoded_frame_tb;
1490 
1491  if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1492  return AVERROR(ENOMEM);
1493  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1494  return AVERROR(ENOMEM);
1495  decoded_frame = ist->decoded_frame;
1496 
1498  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1499  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1500 
1501  if (ret >= 0 && avctx->sample_rate <= 0) {
1502  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1503  ret = AVERROR_INVALIDDATA;
1504  }
1505 
1506  if (*got_output || ret<0 || pkt->size)
1507  decode_error_stat[ret<0] ++;
1508 
1509  if (!*got_output || ret < 0) {
1510  if (!pkt->size) {
1511  for (i = 0; i < ist->nb_filters; i++)
1512 #if 1
1513  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
1514 #else
1516 #endif
1517  }
1518  return ret;
1519  }
1520 
1521 #if 1
1522  /* increment next_dts to use for the case where the input stream does not
1523  have timestamps or there are multiple frames in the packet */
1524  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1525  avctx->sample_rate;
1526  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1527  avctx->sample_rate;
1528 #endif
1529 
1530  rate_emu_sleep(ist);
1531 
1532  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1533  ist->resample_channels != avctx->channels ||
1534  ist->resample_channel_layout != decoded_frame->channel_layout ||
1535  ist->resample_sample_rate != decoded_frame->sample_rate;
1536  if (resample_changed) {
1537  char layout1[64], layout2[64];
1538 
1539  if (!guess_input_channel_layout(ist)) {
1540  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1541  "layout for Input Stream #%d.%d\n", ist->file_index,
1542  ist->st->index);
1543  exit(1);
1544  }
1545  decoded_frame->channel_layout = avctx->channel_layout;
1546 
1547  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1549  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1550  decoded_frame->channel_layout);
1551 
1553  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1554  ist->file_index, ist->st->index,
1556  ist->resample_channels, layout1,
1557  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1558  avctx->channels, layout2);
1559 
1560  ist->resample_sample_fmt = decoded_frame->format;
1561  ist->resample_sample_rate = decoded_frame->sample_rate;
1562  ist->resample_channel_layout = decoded_frame->channel_layout;
1563  ist->resample_channels = avctx->channels;
1564 
1565  for (i = 0; i < nb_filtergraphs; i++)
1566  if (ist_in_filtergraph(filtergraphs[i], ist)) {
1567  FilterGraph *fg = filtergraphs[i];
1568  int j;
1569  if (configure_filtergraph(fg) < 0) {
1570  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1571  exit(1);
1572  }
1573  for (j = 0; j < fg->nb_outputs; j++) {
1574  OutputStream *ost = fg->outputs[j]->ost;
1575  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
1578  ost->st->codec->frame_size);
1579  }
1580  }
1581  }
1582 
1583  /* if the decoder provides a pts, use it instead of the last packet pts.
1584  the decoder could be delaying output by a packet or more. */
1585  if (decoded_frame->pts != AV_NOPTS_VALUE) {
1586  ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
1587  decoded_frame_tb = avctx->time_base;
1588  } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
1589  decoded_frame->pts = decoded_frame->pkt_pts;
1590  pkt->pts = AV_NOPTS_VALUE;
1591  decoded_frame_tb = ist->st->time_base;
1592  } else if (pkt->pts != AV_NOPTS_VALUE) {
1593  decoded_frame->pts = pkt->pts;
1594  pkt->pts = AV_NOPTS_VALUE;
1595  decoded_frame_tb = ist->st->time_base;
1596  }else {
1597  decoded_frame->pts = ist->dts;
1598  decoded_frame_tb = AV_TIME_BASE_Q;
1599  }
1600  if (decoded_frame->pts != AV_NOPTS_VALUE)
1601  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
1602  (AVRational){1, ist->st->codec->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
1603  (AVRational){1, ist->st->codec->sample_rate});
1604  for (i = 0; i < ist->nb_filters; i++) {
1605  if (i < ist->nb_filters - 1) {
1606  f = ist->filter_frame;
1607  err = av_frame_ref(f, decoded_frame);
1608  if (err < 0)
1609  break;
1610  } else
1611  f = decoded_frame;
1612  err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
1614  if (err < 0)
1615  break;
1616  }
1617  decoded_frame->pts = AV_NOPTS_VALUE;
1618 
1620  av_frame_unref(decoded_frame);
1621  return err < 0 ? err : ret;
1622 }
1623 
1624 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1625 {
1626  AVFrame *decoded_frame, *f;
1627  void *buffer_to_free = NULL;
1628  int i, ret = 0, err = 0, resample_changed;
1629  int64_t best_effort_timestamp;
1630  AVRational *frame_sample_aspect;
1631 
1632  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1633  return AVERROR(ENOMEM);
1634  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1635  return AVERROR(ENOMEM);
1636  decoded_frame = ist->decoded_frame;
1637  pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
1638 
1640  ret = avcodec_decode_video2(ist->st->codec,
1641  decoded_frame, got_output, pkt);
1642  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
1643 
1644  if (*got_output || ret<0 || pkt->size)
1645  decode_error_stat[ret<0] ++;
1646 
1647  if (!*got_output || ret < 0) {
1648  if (!pkt->size) {
1649  for (i = 0; i < ist->nb_filters; i++)
1650 #if 1
1651  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
1652 #else
1654 #endif
1655  }
1656  return ret;
1657  }
1658 
1659  if(ist->top_field_first>=0)
1660  decoded_frame->top_field_first = ist->top_field_first;
1661 
1662  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
1663  if(best_effort_timestamp != AV_NOPTS_VALUE)
1664  ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
1665 
1666  if (debug_ts) {
1667  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
1668  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d \n",
1669  ist->st->index, av_ts2str(decoded_frame->pts),
1670  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
1671  best_effort_timestamp,
1672  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
1673  decoded_frame->key_frame, decoded_frame->pict_type);
1674  }
1675 
1676  pkt->size = 0;
1677 
1678  rate_emu_sleep(ist);
1679 
1680  if (ist->st->sample_aspect_ratio.num)
1681  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1682 
1683  resample_changed = ist->resample_width != decoded_frame->width ||
1684  ist->resample_height != decoded_frame->height ||
1685  ist->resample_pix_fmt != decoded_frame->format;
1686  if (resample_changed) {
1688  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1689  ist->file_index, ist->st->index,
1691  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
1692 
1693  ist->resample_width = decoded_frame->width;
1694  ist->resample_height = decoded_frame->height;
1695  ist->resample_pix_fmt = decoded_frame->format;
1696 
1697  for (i = 0; i < nb_filtergraphs; i++) {
1698  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
1699  configure_filtergraph(filtergraphs[i]) < 0) {
1700  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1701  exit(1);
1702  }
1703  }
1704  }
1705 
1706  frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
1707  for (i = 0; i < ist->nb_filters; i++) {
1708  if (!frame_sample_aspect->num)
1709  *frame_sample_aspect = ist->st->sample_aspect_ratio;
1710 
1711  if (i < ist->nb_filters - 1) {
1712  f = ist->filter_frame;
1713  err = av_frame_ref(f, decoded_frame);
1714  if (err < 0)
1715  break;
1716  } else
1717  f = decoded_frame;
1719  if (ret < 0) {
1721  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
1722  exit(1);
1723  }
1724  }
1725 
1727  av_frame_unref(decoded_frame);
1728  av_free(buffer_to_free);
1729  return err < 0 ? err : ret;
1730 }
1731 
1732 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
1733 {
1734  AVSubtitle subtitle;
1735  int i, ret = avcodec_decode_subtitle2(ist->st->codec,
1736  &subtitle, got_output, pkt);
1737 
1738  if (*got_output || ret<0 || pkt->size)
1739  decode_error_stat[ret<0] ++;
1740 
1741  if (ret < 0 || !*got_output) {
1742  if (!pkt->size)
1743  sub2video_flush(ist);
1744  return ret;
1745  }
1746 
1747  if (ist->fix_sub_duration) {
1748  if (ist->prev_sub.got_output) {
1749  int end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
1750  1000, AV_TIME_BASE);
1751  if (end < ist->prev_sub.subtitle.end_display_time) {
1752  av_log(ist->st->codec, AV_LOG_DEBUG,
1753  "Subtitle duration reduced from %d to %d\n",
1754  ist->prev_sub.subtitle.end_display_time, end);
1756  }
1757  }
1758  FFSWAP(int, *got_output, ist->prev_sub.got_output);
1759  FFSWAP(int, ret, ist->prev_sub.ret);
1760  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
1761  }
1762 
1763  sub2video_update(ist, &subtitle);
1764 
1765  if (!*got_output || !subtitle.num_rects)
1766  return ret;
1767 
1768  rate_emu_sleep(ist);
1769 
1770  for (i = 0; i < nb_output_streams; i++) {
1771  OutputStream *ost = output_streams[i];
1772 
1773  if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1774  continue;
1775 
1776  do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
1777  }
1778 
1779  avsubtitle_free(&subtitle);
1780  return ret;
1781 }
1782 
1783 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1784 static int output_packet(InputStream *ist, const AVPacket *pkt)
1785 {
1786  int ret = 0, i;
1787  int got_output = 0;
1788 
1789  AVPacket avpkt;
1790  if (!ist->saw_first_ts) {
1791  ist->dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
1792  ist->pts = 0;
1793  if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
1794  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
1795  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
1796  }
1797  ist->saw_first_ts = 1;
1798  }
1799 
1800  if (ist->next_dts == AV_NOPTS_VALUE)
1801  ist->next_dts = ist->dts;
1802  if (ist->next_pts == AV_NOPTS_VALUE)
1803  ist->next_pts = ist->pts;
1804 
1805  if (pkt == NULL) {
1806  /* EOF handling */
1807  av_init_packet(&avpkt);
1808  avpkt.data = NULL;
1809  avpkt.size = 0;
1810  goto handle_eof;
1811  } else {
1812  avpkt = *pkt;
1813  }
1814 
1815  if (pkt->dts != AV_NOPTS_VALUE) {
1816  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1817  if (ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
1818  ist->next_pts = ist->pts = ist->dts;
1819  }
1820 
1821  // while we have more to decode or while the decoder did output something on EOF
1822  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
1823  int duration;
1824  handle_eof:
1825 
1826  ist->pts = ist->next_pts;
1827  ist->dts = ist->next_dts;
1828 
1829  if (avpkt.size && avpkt.size != pkt->size) {
1831  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1832  ist->showed_multi_packet_warning = 1;
1833  }
1834 
1835  switch (ist->st->codec->codec_type) {
1836  case AVMEDIA_TYPE_AUDIO:
1837  ret = decode_audio (ist, &avpkt, &got_output);
1838  break;
1839  case AVMEDIA_TYPE_VIDEO:
1840  ret = decode_video (ist, &avpkt, &got_output);
1841  if (avpkt.duration) {
1842  duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
1843  } else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) {
1844  int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
1845  duration = ((int64_t)AV_TIME_BASE *
1846  ist->st->codec->time_base.num * ticks) /
1847  ist->st->codec->time_base.den;
1848  } else
1849  duration = 0;
1850 
1851  if(ist->dts != AV_NOPTS_VALUE && duration) {
1852  ist->next_dts += duration;
1853  }else
1854  ist->next_dts = AV_NOPTS_VALUE;
1855 
1856  if (got_output)
1857  ist->next_pts += duration; //FIXME the duration is not correct in some cases
1858  break;
1859  case AVMEDIA_TYPE_SUBTITLE:
1860  ret = transcode_subtitles(ist, &avpkt, &got_output);
1861  break;
1862  default:
1863  return -1;
1864  }
1865 
1866  if (ret < 0)
1867  return ret;
1868 
1869  avpkt.dts=
1870  avpkt.pts= AV_NOPTS_VALUE;
1871 
1872  // touch data and size only if not EOF
1873  if (pkt) {
1874  if(ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
1875  ret = avpkt.size;
1876  avpkt.data += ret;
1877  avpkt.size -= ret;
1878  }
1879  if (!got_output) {
1880  continue;
1881  }
1882  }
1883 
1884  /* handle stream copy */
1885  if (!ist->decoding_needed) {
1886  rate_emu_sleep(ist);
1887  ist->dts = ist->next_dts;
1888  switch (ist->st->codec->codec_type) {
1889  case AVMEDIA_TYPE_AUDIO:
1890  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
1891  ist->st->codec->sample_rate;
1892  break;
1893  case AVMEDIA_TYPE_VIDEO:
1894  if (ist->framerate.num) {
1895  // TODO: Remove work-around for c99-to-c89 issue 7
1896  AVRational time_base_q = AV_TIME_BASE_Q;
1897  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
1898  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
1899  } else if (pkt->duration) {
1900  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
1901  } else if(ist->st->codec->time_base.num != 0) {
1902  int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
1903  ist->next_dts += ((int64_t)AV_TIME_BASE *
1904  ist->st->codec->time_base.num * ticks) /
1905  ist->st->codec->time_base.den;
1906  }
1907  break;
1908  }
1909  ist->pts = ist->dts;
1910  ist->next_pts = ist->next_dts;
1911  }
1912  for (i = 0; pkt && i < nb_output_streams; i++) {
1913  OutputStream *ost = output_streams[i];
1914 
1915  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
1916  continue;
1917 
1918  do_streamcopy(ist, ost, pkt);
1919  }
1920 
1921  return 0;
1922 }
1923 
1924 static void print_sdp(void)
1925 {
1926  char sdp[16384];
1927  int i;
1928  AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
1929 
1930  if (!avc)
1931  exit(1);
1932  for (i = 0; i < nb_output_files; i++)
1933  avc[i] = output_files[i]->ctx;
1934 
1935  av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
1936  printf("SDP:\n%s\n", sdp);
1937  fflush(stdout);
1938  av_freep(&avc);
1939 }
1940 
1941 static int init_input_stream(int ist_index, char *error, int error_len)
1942 {
1943  int ret;
1944  InputStream *ist = input_streams[ist_index];
1945 
1946  if (ist->decoding_needed) {
1947  AVCodec *codec = ist->dec;
1948  if (!codec) {
1949  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
1950  avcodec_get_name(ist->st->codec->codec_id), ist->file_index, ist->st->index);
1951  return AVERROR(EINVAL);
1952  }
1953 
1954  av_opt_set_int(ist->st->codec, "refcounted_frames", 1, 0);
1955 
1956  if (!av_dict_get(ist->opts, "threads", NULL, 0))
1957  av_dict_set(&ist->opts, "threads", "auto", 0);
1958  if ((ret = avcodec_open2(ist->st->codec, codec, &ist->opts)) < 0) {
1959  if (ret == AVERROR_EXPERIMENTAL)
1960  abort_codec_experimental(codec, 0);
1961  snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
1962  ist->file_index, ist->st->index);
1963  return ret;
1964  }
1965  assert_avoptions(ist->opts);
1966  }
1967 
1968  ist->next_pts = AV_NOPTS_VALUE;
1969  ist->next_dts = AV_NOPTS_VALUE;
1970  ist->is_start = 1;
1971 
1972  return 0;
1973 }
1974 
1976 {
1977  if (ost->source_index >= 0)
1978  return input_streams[ost->source_index];
1979  return NULL;
1980 }
1981 
1982 static int compare_int64(const void *a, const void *b)
1983 {
1984  int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
1985  return va < vb ? -1 : va > vb ? +1 : 0;
1986 }
1987 
1988 static void parse_forced_key_frames(char *kf, OutputStream *ost,
1989  AVCodecContext *avctx)
1990 {
1991  char *p;
1992  int n = 1, i, size, index = 0;
1993  int64_t t, *pts;
1994 
1995  for (p = kf; *p; p++)
1996  if (*p == ',')
1997  n++;
1998  size = n;
1999  pts = av_malloc(sizeof(*pts) * size);
2000  if (!pts) {
2001  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2002  exit(1);
2003  }
2004 
2005  p = kf;
2006  for (i = 0; i < n; i++) {
2007  char *next = strchr(p, ',');
2008 
2009  if (next)
2010  *next++ = 0;
2011 
2012  if (!memcmp(p, "chapters", 8)) {
2013 
2014  AVFormatContext *avf = output_files[ost->file_index]->ctx;
2015  int j;
2016 
2017  if (avf->nb_chapters > INT_MAX - size ||
2018  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2019  sizeof(*pts)))) {
2021  "Could not allocate forced key frames array.\n");
2022  exit(1);
2023  }
2024  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2025  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2026 
2027  for (j = 0; j < avf->nb_chapters; j++) {
2028  AVChapter *c = avf->chapters[j];
2029  av_assert1(index < size);
2030  pts[index++] = av_rescale_q(c->start, c->time_base,
2031  avctx->time_base) + t;
2032  }
2033 
2034  } else {
2035 
2036  t = parse_time_or_die("force_key_frames", p, 1);
2037  av_assert1(index < size);
2038  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2039 
2040  }
2041 
2042  p = next;
2043  }
2044 
2045  av_assert0(index == size);
2046  qsort(pts, size, sizeof(*pts), compare_int64);
2047  ost->forced_kf_count = size;
2048  ost->forced_kf_pts = pts;
2049 }
2050 
2051 static void report_new_stream(int input_index, AVPacket *pkt)
2052 {
2053  InputFile *file = input_files[input_index];
2054  AVStream *st = file->ctx->streams[pkt->stream_index];
2055 
2056  if (pkt->stream_index < file->nb_streams_warn)
2057  return;
2058  av_log(file->ctx, AV_LOG_WARNING,
2059  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2061  input_index, pkt->stream_index,
2062  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2063  file->nb_streams_warn = pkt->stream_index + 1;
2064 }
2065 
2066 static int transcode_init(void)
2067 {
2068  int ret = 0, i, j, k;
2069  AVFormatContext *oc;
2070  AVCodecContext *codec;
2071  OutputStream *ost;
2072  InputStream *ist;
2073  char error[1024];
2074  int want_sdp = 1;
2075 
2076  /* init framerate emulation */
2077  for (i = 0; i < nb_input_files; i++) {
2078  InputFile *ifile = input_files[i];
2079  if (ifile->rate_emu)
2080  for (j = 0; j < ifile->nb_streams; j++)
2081  input_streams[j + ifile->ist_index]->start = av_gettime();
2082  }
2083 
2084  /* output stream init */
2085  for (i = 0; i < nb_output_files; i++) {
2086  oc = output_files[i]->ctx;
2087  if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2088  av_dump_format(oc, i, oc->filename, 1);
2089  av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2090  return AVERROR(EINVAL);
2091  }
2092  }
2093 
2094  /* init complex filtergraphs */
2095  for (i = 0; i < nb_filtergraphs; i++)
2096  if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2097  return ret;
2098 
2099  /* for each output stream, we compute the right encoding parameters */
2100  for (i = 0; i < nb_output_streams; i++) {
2101  AVCodecContext *icodec = NULL;
2102  ost = output_streams[i];
2103  oc = output_files[ost->file_index]->ctx;
2104  ist = get_input_stream(ost);
2105 
2106  if (ost->attachment_filename)
2107  continue;
2108 
2109  codec = ost->st->codec;
2110 
2111  if (ist) {
2112  icodec = ist->st->codec;
2113 
2114  ost->st->disposition = ist->st->disposition;
2115  codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
2117  }
2118 
2119  if (ost->stream_copy) {
2120  uint64_t extra_size;
2121 
2122  av_assert0(ist && !ost->filter);
2123 
2124  extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2125 
2126  if (extra_size > INT_MAX) {
2127  return AVERROR(EINVAL);
2128  }
2129 
2130  /* if stream_copy is selected, no need to decode or encode */
2131  codec->codec_id = icodec->codec_id;
2132  codec->codec_type = icodec->codec_type;
2133 
2134  if (!codec->codec_tag) {
2135  unsigned int codec_tag;
2136  if (!oc->oformat->codec_tag ||
2137  av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
2138  !av_codec_get_tag2(oc->oformat->codec_tag, icodec->codec_id, &codec_tag))
2139  codec->codec_tag = icodec->codec_tag;
2140  }
2141 
2142  codec->bit_rate = icodec->bit_rate;
2143  codec->rc_max_rate = icodec->rc_max_rate;
2144  codec->rc_buffer_size = icodec->rc_buffer_size;
2145  codec->field_order = icodec->field_order;
2146  codec->extradata = av_mallocz(extra_size);
2147  if (!codec->extradata) {
2148  return AVERROR(ENOMEM);
2149  }
2150  memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2151  codec->extradata_size= icodec->extradata_size;
2153 
2154  codec->time_base = ist->st->time_base;
2155  /*
2156  * Avi is a special case here because it supports variable fps but
2157  * having the fps and timebase differe significantly adds quite some
2158  * overhead
2159  */
2160  if(!strcmp(oc->oformat->name, "avi")) {
2161  if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2162  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2163  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(icodec->time_base)
2164  && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(icodec->time_base) < 1.0/500
2165  || copy_tb==2){
2166  codec->time_base.num = ist->st->r_frame_rate.den;
2167  codec->time_base.den = 2*ist->st->r_frame_rate.num;
2168  codec->ticks_per_frame = 2;
2169  } else if ( copy_tb<0 && av_q2d(icodec->time_base)*icodec->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2170  && av_q2d(ist->st->time_base) < 1.0/500
2171  || copy_tb==0){
2172  codec->time_base = icodec->time_base;
2173  codec->time_base.num *= icodec->ticks_per_frame;
2174  codec->time_base.den *= 2;
2175  codec->ticks_per_frame = 2;
2176  }
2177  } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2178  && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2179  && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2180  && strcmp(oc->oformat->name, "f4v")
2181  ) {
2182  if( copy_tb<0 && icodec->time_base.den
2183  && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base)
2184  && av_q2d(ist->st->time_base) < 1.0/500
2185  || copy_tb==0){
2186  codec->time_base = icodec->time_base;
2187  codec->time_base.num *= icodec->ticks_per_frame;
2188  }
2189  }
2190  if ( codec->codec_tag == AV_RL32("tmcd")
2191  && icodec->time_base.num < icodec->time_base.den
2192  && icodec->time_base.num > 0
2193  && 121LL*icodec->time_base.num > icodec->time_base.den) {
2194  codec->time_base = icodec->time_base;
2195  }
2196 
2197  if (ist && !ost->frame_rate.num)
2198  ost->frame_rate = ist->framerate;
2199  if(ost->frame_rate.num)
2200  codec->time_base = av_inv_q(ost->frame_rate);
2201 
2202  av_reduce(&codec->time_base.num, &codec->time_base.den,
2203  codec->time_base.num, codec->time_base.den, INT_MAX);
2204 
2205  switch (codec->codec_type) {
2206  case AVMEDIA_TYPE_AUDIO:
2207  if (audio_volume != 256) {
2208  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2209  exit(1);
2210  }
2211  codec->channel_layout = icodec->channel_layout;
2212  codec->sample_rate = icodec->sample_rate;
2213  codec->channels = icodec->channels;
2214  codec->frame_size = icodec->frame_size;
2215  codec->audio_service_type = icodec->audio_service_type;
2216  codec->block_align = icodec->block_align;
2217  if((codec->block_align == 1 || codec->block_align == 1152 || codec->block_align == 576) && codec->codec_id == AV_CODEC_ID_MP3)
2218  codec->block_align= 0;
2219  if(codec->codec_id == AV_CODEC_ID_AC3)
2220  codec->block_align= 0;
2221  break;
2222  case AVMEDIA_TYPE_VIDEO:
2223  codec->pix_fmt = icodec->pix_fmt;
2224  codec->width = icodec->width;
2225  codec->height = icodec->height;
2226  codec->has_b_frames = icodec->has_b_frames;
2227  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2228  codec->sample_aspect_ratio =
2229  ost->st->sample_aspect_ratio =
2231  (AVRational){ codec->height, codec->width });
2232  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2233  "with stream copy may produce invalid files\n");
2234  } else if (!codec->sample_aspect_ratio.num) {
2235  codec->sample_aspect_ratio =
2236  ost->st->sample_aspect_ratio =
2238  ist->st->codec->sample_aspect_ratio.num ?
2239  ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2240  }
2241  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2242  break;
2243  case AVMEDIA_TYPE_SUBTITLE:
2244  codec->width = icodec->width;
2245  codec->height = icodec->height;
2246  break;
2247  case AVMEDIA_TYPE_DATA:
2249  break;
2250  default:
2251  abort();
2252  }
2253  } else {
2254  if (!ost->enc)
2255  ost->enc = avcodec_find_encoder(codec->codec_id);
2256  if (!ost->enc) {
2257  /* should only happen when a default codec is not present. */
2258  snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
2259  avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
2260  ret = AVERROR(EINVAL);
2261  goto dump_format;
2262  }
2263 
2264  if (ist)
2265  ist->decoding_needed++;
2266  ost->encoding_needed = 1;
2267 
2268  if (!ost->filter &&
2269  (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2270  codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
2271  FilterGraph *fg;
2272  fg = init_simple_filtergraph(ist, ost);
2273  if (configure_filtergraph(fg)) {
2274  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2275  exit(1);
2276  }
2277  }
2278 
2279  if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2280  if (ost->filter && !ost->frame_rate.num)
2282  if (ist && !ost->frame_rate.num)
2283  ost->frame_rate = ist->framerate;
2284  if (ist && !ost->frame_rate.num)
2285  ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
2286 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
2287  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2289  ost->frame_rate = ost->enc->supported_framerates[idx];
2290  }
2291  }
2292 
2293  switch (codec->codec_type) {
2294  case AVMEDIA_TYPE_AUDIO:
2295  codec->sample_fmt = ost->filter->filter->inputs[0]->format;
2296  codec->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
2297  codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2299  codec->time_base = (AVRational){ 1, codec->sample_rate };
2300  break;
2301  case AVMEDIA_TYPE_VIDEO:
2302  codec->time_base = av_inv_q(ost->frame_rate);
2303  if (ost->filter && !(codec->time_base.num && codec->time_base.den))
2304  codec->time_base = ost->filter->filter->inputs[0]->time_base;
2305  if ( av_q2d(codec->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
2307  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
2308  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
2309  }
2310  for (j = 0; j < ost->forced_kf_count; j++)
2311  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
2313  codec->time_base);
2314 
2315  codec->width = ost->filter->filter->inputs[0]->w;
2316  codec->height = ost->filter->filter->inputs[0]->h;
2317  codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2318  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
2319  av_mul_q(ost->frame_aspect_ratio, (AVRational){ codec->height, codec->width }) :
2321  if (!strncmp(ost->enc->name, "libx264", 7) &&
2322  codec->pix_fmt == AV_PIX_FMT_NONE &&
2325  "No pixel format specified, %s for H.264 encoding chosen.\n"
2326  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
2328  codec->pix_fmt = ost->filter->filter->inputs[0]->format;
2329 
2330  if (!icodec ||
2331  codec->width != icodec->width ||
2332  codec->height != icodec->height ||
2333  codec->pix_fmt != icodec->pix_fmt) {
2335  }
2336 
2337  if (ost->forced_keyframes) {
2338  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
2341  if (ret < 0) {
2343  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
2344  return ret;
2345  }
2350  } else {
2352  }
2353  }
2354  break;
2355  case AVMEDIA_TYPE_SUBTITLE:
2356  codec->time_base = (AVRational){1, 1000};
2357  if (!codec->width) {
2358  codec->width = input_streams[ost->source_index]->st->codec->width;
2359  codec->height = input_streams[ost->source_index]->st->codec->height;
2360  }
2361  break;
2362  default:
2363  abort();
2364  break;
2365  }
2366  /* two pass mode */
2367  if (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
2368  char logfilename[1024];
2369  FILE *f;
2370 
2371  snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2372  ost->logfile_prefix ? ost->logfile_prefix :
2374  i);
2375  if (!strcmp(ost->enc->name, "libx264")) {
2376  av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2377  } else {
2378  if (codec->flags & CODEC_FLAG_PASS2) {
2379  char *logbuffer;
2380  size_t logbuffer_size;
2381  if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2382  av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2383  logfilename);
2384  exit(1);
2385  }
2386  codec->stats_in = logbuffer;
2387  }
2388  if (codec->flags & CODEC_FLAG_PASS1) {
2389  f = fopen(logfilename, "wb");
2390  if (!f) {
2391  av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2392  logfilename, strerror(errno));
2393  exit(1);
2394  }
2395  ost->logfile = f;
2396  }
2397  }
2398  }
2399  }
2400  }
2401 
2402  /* open each encoder */
2403  for (i = 0; i < nb_output_streams; i++) {
2404  ost = output_streams[i];
2405  if (ost->encoding_needed) {
2406  AVCodec *codec = ost->enc;
2407  AVCodecContext *dec = NULL;
2408 
2409  if ((ist = get_input_stream(ost)))
2410  dec = ist->st->codec;
2411  if (dec && dec->subtitle_header) {
2412  /* ASS code assumes this buffer is null terminated so add extra byte. */
2414  if (!ost->st->codec->subtitle_header) {
2415  ret = AVERROR(ENOMEM);
2416  goto dump_format;
2417  }
2418  memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2420  }
2421  if (!av_dict_get(ost->opts, "threads", NULL, 0))
2422  av_dict_set(&ost->opts, "threads", "auto", 0);
2423  if ((ret = avcodec_open2(ost->st->codec, codec, &ost->opts)) < 0) {
2424  if (ret == AVERROR_EXPERIMENTAL)
2425  abort_codec_experimental(codec, 1);
2426  snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2427  ost->file_index, ost->index);
2428  goto dump_format;
2429  }
2430  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2433  ost->st->codec->frame_size);
2434  assert_avoptions(ost->opts);
2435  if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
2436  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2437  " It takes bits/s as argument, not kbits/s\n");
2438  extra_size += ost->st->codec->extradata_size;
2439 
2440  if (ost->st->codec->me_threshold)
2441  input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
2442  } else {
2443  av_opt_set_dict(ost->st->codec, &ost->opts);
2444  }
2445  }
2446 
2447  /* init input streams */
2448  for (i = 0; i < nb_input_streams; i++)
2449  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
2450  for (i = 0; i < nb_output_streams; i++) {
2451  ost = output_streams[i];
2452  avcodec_close(ost->st->codec);
2453  }
2454  goto dump_format;
2455  }
2456 
2457  /* discard unused programs */
2458  for (i = 0; i < nb_input_files; i++) {
2459  InputFile *ifile = input_files[i];
2460  for (j = 0; j < ifile->ctx->nb_programs; j++) {
2461  AVProgram *p = ifile->ctx->programs[j];
2462  int discard = AVDISCARD_ALL;
2463 
2464  for (k = 0; k < p->nb_stream_indexes; k++)
2465  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2466  discard = AVDISCARD_DEFAULT;
2467  break;
2468  }
2469  p->discard = discard;
2470  }
2471  }
2472 
2473  /* open files and write file headers */
2474  for (i = 0; i < nb_output_files; i++) {
2475  oc = output_files[i]->ctx;
2476  oc->interrupt_callback = int_cb;
2477  if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
2478  char errbuf[128];
2479  const char *errbuf_ptr = errbuf;
2480  if (av_strerror(ret, errbuf, sizeof(errbuf)) < 0)
2481  errbuf_ptr = strerror(AVUNERROR(ret));
2482  snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?): %s", i, errbuf_ptr);
2483  ret = AVERROR(EINVAL);
2484  goto dump_format;
2485  }
2486 // assert_avoptions(output_files[i]->opts);
2487  if (strcmp(oc->oformat->name, "rtp")) {
2488  want_sdp = 0;
2489  }
2490  }
2491 
2492  dump_format:
2493  /* dump the file output parameters - cannot be done before in case
2494  of stream copy */
2495  for (i = 0; i < nb_output_files; i++) {
2496  av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2497  }
2498 
2499  /* dump the stream mapping */
2500  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2501  for (i = 0; i < nb_input_streams; i++) {
2502  ist = input_streams[i];
2503 
2504  for (j = 0; j < ist->nb_filters; j++) {
2505  if (ist->filters[j]->graph->graph_desc) {
2506  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2507  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2508  ist->filters[j]->name);
2509  if (nb_filtergraphs > 1)
2510  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2511  av_log(NULL, AV_LOG_INFO, "\n");
2512  }
2513  }
2514  }
2515 
2516  for (i = 0; i < nb_output_streams; i++) {
2517  ost = output_streams[i];
2518 
2519  if (ost->attachment_filename) {
2520  /* an attached file */
2521  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2522  ost->attachment_filename, ost->file_index, ost->index);
2523  continue;
2524  }
2525 
2526  if (ost->filter && ost->filter->graph->graph_desc) {
2527  /* output from a complex graph */
2528  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
2529  if (nb_filtergraphs > 1)
2530  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2531 
2532  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2533  ost->index, ost->enc ? ost->enc->name : "?");
2534  continue;
2535  }
2536 
2537  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2538  input_streams[ost->source_index]->file_index,
2539  input_streams[ost->source_index]->st->index,
2540  ost->file_index,
2541  ost->index);
2542  if (ost->sync_ist != input_streams[ost->source_index])
2543  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2544  ost->sync_ist->file_index,
2545  ost->sync_ist->st->index);
2546  if (ost->stream_copy)
2547  av_log(NULL, AV_LOG_INFO, " (copy)");
2548  else
2549  av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
2550  input_streams[ost->source_index]->dec->name : "?",
2551  ost->enc ? ost->enc->name : "?");
2552  av_log(NULL, AV_LOG_INFO, "\n");
2553  }
2554 
2555  if (ret) {
2556  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2557  return ret;
2558  }
2559 
2560  if (want_sdp) {
2561  print_sdp();
2562  }
2563 
2564  return 0;
2565 }
2566 
2567 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
2568 static int need_output(void)
2569 {
2570  int i;
2571 
2572  for (i = 0; i < nb_output_streams; i++) {
2573  OutputStream *ost = output_streams[i];
2574  OutputFile *of = output_files[ost->file_index];
2575  AVFormatContext *os = output_files[ost->file_index]->ctx;
2576 
2577  if (ost->finished ||
2578  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2579  continue;
2580  if (ost->frame_number >= ost->max_frames) {
2581  int j;
2582  for (j = 0; j < of->ctx->nb_streams; j++)
2583  close_output_stream(output_streams[of->ost_index + j]);
2584  continue;
2585  }
2586 
2587  return 1;
2588  }
2589 
2590  return 0;
2591 }
2592 
2593 /**
2594  * Select the output stream to process.
2595  *
2596  * @return selected output stream, or NULL if none available
2597  */
2599 {
2600  int i;
2601  int64_t opts_min = INT64_MAX;
2602  OutputStream *ost_min = NULL;
2603 
2604  for (i = 0; i < nb_output_streams; i++) {
2605  OutputStream *ost = output_streams[i];
2606  int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
2607  AV_TIME_BASE_Q);
2608  if (!ost->unavailable && !ost->finished && opts < opts_min) {
2609  opts_min = opts;
2610  ost_min = ost;
2611  }
2612  }
2613  return ost_min;
2614 }
2615 
2617 {
2618  int i, ret, key;
2619  static int64_t last_time;
2620  if (received_nb_signals)
2621  return AVERROR_EXIT;
2622  /* read_key() returns 0 on EOF */
2623  if(cur_time - last_time >= 100000 && !run_as_daemon){
2624  key = read_key();
2625  last_time = cur_time;
2626  }else
2627  key = -1;
2628  if (key == 'q')
2629  return AVERROR_EXIT;
2630  if (key == '+') av_log_set_level(av_log_get_level()+10);
2631  if (key == '-') av_log_set_level(av_log_get_level()-10);
2632  if (key == 's') qp_hist ^= 1;
2633  if (key == 'h'){
2634  if (do_hex_dump){
2635  do_hex_dump = do_pkt_dump = 0;
2636  } else if(do_pkt_dump){
2637  do_hex_dump = 1;
2638  } else
2639  do_pkt_dump = 1;
2641  }
2642  if (key == 'c' || key == 'C'){
2643  char buf[4096], target[64], command[256], arg[256] = {0};
2644  double time;
2645  int k, n = 0;
2646  fprintf(stderr, "\nEnter command: <target> <time> <command>[ <argument>]\n");
2647  i = 0;
2648  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
2649  if (k > 0)
2650  buf[i++] = k;
2651  buf[i] = 0;
2652  if (k > 0 &&
2653  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
2654  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
2655  target, time, command, arg);
2656  for (i = 0; i < nb_filtergraphs; i++) {
2657  FilterGraph *fg = filtergraphs[i];
2658  if (fg->graph) {
2659  if (time < 0) {
2660  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
2661  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
2662  fprintf(stderr, "Command reply for stream %d: ret:%d res:%s\n", i, ret, buf);
2663  } else {
2664  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
2665  }
2666  }
2667  }
2668  } else {
2670  "Parse error, at least 3 arguments were expected, "
2671  "only %d given in string '%s'\n", n, buf);
2672  }
2673  }
2674  if (key == 'd' || key == 'D'){
2675  int debug=0;
2676  if(key == 'D') {
2677  debug = input_streams[0]->st->codec->debug<<1;
2678  if(!debug) debug = 1;
2679  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
2680  debug += debug;
2681  }else
2682  if(scanf("%d", &debug)!=1)
2683  fprintf(stderr,"error parsing debug value\n");
2684  for(i=0;i<nb_input_streams;i++) {
2685  input_streams[i]->st->codec->debug = debug;
2686  }
2687  for(i=0;i<nb_output_streams;i++) {
2688  OutputStream *ost = output_streams[i];
2689  ost->st->codec->debug = debug;
2690  }
2691  if(debug) av_log_set_level(AV_LOG_DEBUG);
2692  fprintf(stderr,"debug=%d\n", debug);
2693  }
2694  if (key == '?'){
2695  fprintf(stderr, "key function\n"
2696  "? show this help\n"
2697  "+ increase verbosity\n"
2698  "- decrease verbosity\n"
2699  "c Send command to filtergraph\n"
2700  "D cycle through available debug modes\n"
2701  "h dump packets/hex press to cycle through the 3 states\n"
2702  "q quit\n"
2703  "s Show QP histogram\n"
2704  );
2705  }
2706  return 0;
2707 }
2708 
2709 #if HAVE_PTHREADS
2710 static void *input_thread(void *arg)
2711 {
2712  InputFile *f = arg;
2713  int ret = 0;
2714 
2715  while (!transcoding_finished && ret >= 0) {
2716  AVPacket pkt;
2717  ret = av_read_frame(f->ctx, &pkt);
2718 
2719  if (ret == AVERROR(EAGAIN)) {
2720  av_usleep(10000);
2721  ret = 0;
2722  continue;
2723  } else if (ret < 0)
2724  break;
2725 
2727  while (!av_fifo_space(f->fifo))
2729 
2730  av_dup_packet(&pkt);
2731  av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
2732 
2734  }
2735 
2736  f->finished = 1;
2737  return NULL;
2738 }
2739 
2740 static void free_input_threads(void)
2741 {
2742  int i;
2743 
2744  if (nb_input_files == 1)
2745  return;
2746 
2748 
2749  for (i = 0; i < nb_input_files; i++) {
2750  InputFile *f = input_files[i];
2751  AVPacket pkt;
2752 
2753  if (!f->fifo || f->joined)
2754  continue;
2755 
2757  while (av_fifo_size(f->fifo)) {
2758  av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2759  av_free_packet(&pkt);
2760  }
2763 
2764  pthread_join(f->thread, NULL);
2765  f->joined = 1;
2766 
2767  while (av_fifo_size(f->fifo)) {
2768  av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2769  av_free_packet(&pkt);
2770  }
2771  av_fifo_free(f->fifo);
2772  }
2773 }
2774 
2775 static int init_input_threads(void)
2776 {
2777  int i, ret;
2778 
2779  if (nb_input_files == 1)
2780  return 0;
2781 
2782  for (i = 0; i < nb_input_files; i++) {
2783  InputFile *f = input_files[i];
2784 
2785  if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
2786  return AVERROR(ENOMEM);
2787 
2790 
2791  if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
2792  return AVERROR(ret);
2793  }
2794  return 0;
2795 }
2796 
2798 {
2799  int ret = 0;
2800 
2802 
2803  if (av_fifo_size(f->fifo)) {
2804  av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
2806  } else {
2807  if (f->finished)
2808  ret = AVERROR_EOF;
2809  else
2810  ret = AVERROR(EAGAIN);
2811  }
2812 
2814 
2815  return ret;
2816 }
2817 #endif
2818 
2820 {
2821 #if HAVE_PTHREADS
2822  if (nb_input_files > 1)
2823  return get_input_packet_mt(f, pkt);
2824 #endif
2825  return av_read_frame(f->ctx, pkt);
2826 }
2827 
2828 static int got_eagain(void)
2829 {
2830  int i;
2831  for (i = 0; i < nb_output_streams; i++)
2832  if (output_streams[i]->unavailable)
2833  return 1;
2834  return 0;
2835 }
2836 
2837 static void reset_eagain(void)
2838 {
2839  int i;
2840  for (i = 0; i < nb_input_files; i++)
2841  input_files[i]->eagain = 0;
2842  for (i = 0; i < nb_output_streams; i++)
2843  output_streams[i]->unavailable = 0;
2844 }
2845 
2846 /*
2847  * Return
2848  * - 0 -- one packet was read and processed
2849  * - AVERROR(EAGAIN) -- no packets were available for selected file,
2850  * this function should be called again
2851  * - AVERROR_EOF -- this function should not be called again
2852  */
2853 static int process_input(int file_index)
2854 {
2855  InputFile *ifile = input_files[file_index];
2857  InputStream *ist;
2858  AVPacket pkt;
2859  int ret, i, j;
2860 
2861  is = ifile->ctx;
2862  ret = get_input_packet(ifile, &pkt);
2863 
2864  if (ret == AVERROR(EAGAIN)) {
2865  ifile->eagain = 1;
2866  return ret;
2867  }
2868  if (ret < 0) {
2869  if (ret != AVERROR_EOF) {
2870  print_error(is->filename, ret);
2871  if (exit_on_error)
2872  exit(1);
2873  }
2874  ifile->eof_reached = 1;
2875 
2876  for (i = 0; i < ifile->nb_streams; i++) {
2877  ist = input_streams[ifile->ist_index + i];
2878  if (ist->decoding_needed)
2879  output_packet(ist, NULL);
2880 
2881  /* mark all outputs that don't go through lavfi as finished */
2882  for (j = 0; j < nb_output_streams; j++) {
2883  OutputStream *ost = output_streams[j];
2884 
2885  if (ost->source_index == ifile->ist_index + i &&
2886  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
2887  close_output_stream(ost);
2888  }
2889  }
2890 
2891  return AVERROR(EAGAIN);
2892  }
2893 
2894  reset_eagain();
2895 
2896  if (do_pkt_dump) {
2898  is->streams[pkt.stream_index]);
2899  }
2900  /* the following test is needed in case new streams appear
2901  dynamically in stream : we ignore them */
2902  if (pkt.stream_index >= ifile->nb_streams) {
2903  report_new_stream(file_index, &pkt);
2904  goto discard_packet;
2905  }
2906 
2907  ist = input_streams[ifile->ist_index + pkt.stream_index];
2908  if (ist->discard)
2909  goto discard_packet;
2910 
2911  if (debug_ts) {
2912  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
2913  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
2917  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
2918  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
2919  av_ts2str(input_files[ist->file_index]->ts_offset),
2920  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
2921  }
2922 
2923  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
2924  int64_t stime, stime2;
2925  // Correcting starttime based on the enabled streams
2926  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
2927  // so we instead do it here as part of discontinuity handling
2928  if ( ist->next_dts == AV_NOPTS_VALUE
2929  && ifile->ts_offset == -is->start_time
2930  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
2931  int64_t new_start_time = INT64_MAX;
2932  for (i=0; i<is->nb_streams; i++) {
2933  AVStream *st = is->streams[i];
2934  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
2935  continue;
2936  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
2937  }
2938  if (new_start_time > is->start_time) {
2939  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
2940  ifile->ts_offset = -new_start_time;
2941  }
2942  }
2943 
2944  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
2945  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
2946  ist->wrap_correction_done = 1;
2947 
2948  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
2949  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
2950  ist->wrap_correction_done = 0;
2951  }
2952  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
2953  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
2954  ist->wrap_correction_done = 0;
2955  }
2956  }
2957 
2958  if (pkt.dts != AV_NOPTS_VALUE)
2959  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2960  if (pkt.pts != AV_NOPTS_VALUE)
2961  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2962 
2963  if (pkt.pts != AV_NOPTS_VALUE)
2964  pkt.pts *= ist->ts_scale;
2965  if (pkt.dts != AV_NOPTS_VALUE)
2966  pkt.dts *= ist->ts_scale;
2967 
2968  if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
2969  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
2970  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2971  int64_t delta = pkt_dts - ifile->last_ts;
2972  if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
2973  (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
2975  ifile->ts_offset -= delta;
2977  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2978  delta, ifile->ts_offset);
2979  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2980  if (pkt.pts != AV_NOPTS_VALUE)
2981  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2982  }
2983  }
2984 
2985  if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
2986  !copy_ts) {
2987  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2988  int64_t delta = pkt_dts - ist->next_dts;
2989  if (is->iformat->flags & AVFMT_TS_DISCONT) {
2990  if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
2991  (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
2993  pkt_dts+1<ist->pts){
2994  ifile->ts_offset -= delta;
2996  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2997  delta, ifile->ts_offset);
2998  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2999  if (pkt.pts != AV_NOPTS_VALUE)
3000  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3001  }
3002  } else {
3003  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3005  ) {
3006  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3007  pkt.dts = AV_NOPTS_VALUE;
3008  }
3009  if (pkt.pts != AV_NOPTS_VALUE){
3010  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3011  delta = pkt_pts - ist->next_dts;
3012  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3014  ) {
3015  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3016  pkt.pts = AV_NOPTS_VALUE;
3017  }
3018  }
3019  }
3020  }
3021 
3022  if (pkt.dts != AV_NOPTS_VALUE)
3023  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3024 
3025  if (debug_ts) {
3026  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3028  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3029  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3030  av_ts2str(input_files[ist->file_index]->ts_offset),
3031  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3032  }
3033 
3034  sub2video_heartbeat(ist, pkt.pts);
3035 
3036  ret = output_packet(ist, &pkt);
3037  if (ret < 0) {
3038  char buf[128];
3039  av_strerror(ret, buf, sizeof(buf));
3040  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
3041  ist->file_index, ist->st->index, buf);
3042  if (exit_on_error)
3043  exit(1);
3044  }
3045 
3046 discard_packet:
3047  av_free_packet(&pkt);
3048 
3049  return 0;
3050 }
3051 
3052 /**
3053  * Perform a step of transcoding for the specified filter graph.
3054  *
3055  * @param[in] graph filter graph to consider
3056  * @param[out] best_ist input stream where a frame would allow to continue
3057  * @return 0 for success, <0 for error
3058  */
3059 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3060 {
3061  int i, ret;
3062  int nb_requests, nb_requests_max = 0;
3063  InputFilter *ifilter;
3064  InputStream *ist;
3065 
3066  *best_ist = NULL;
3067  ret = avfilter_graph_request_oldest(graph->graph);
3068  if (ret >= 0)
3069  return reap_filters();
3070 
3071  if (ret == AVERROR_EOF) {
3072  ret = reap_filters();
3073  for (i = 0; i < graph->nb_outputs; i++)
3074  close_output_stream(graph->outputs[i]->ost);
3075  return ret;
3076  }
3077  if (ret != AVERROR(EAGAIN))
3078  return ret;
3079 
3080  for (i = 0; i < graph->nb_inputs; i++) {
3081  ifilter = graph->inputs[i];
3082  ist = ifilter->ist;
3083  if (input_files[ist->file_index]->eagain ||
3084  input_files[ist->file_index]->eof_reached)
3085  continue;
3086  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3087  if (nb_requests > nb_requests_max) {
3088  nb_requests_max = nb_requests;
3089  *best_ist = ist;
3090  }
3091  }
3092 
3093  if (!*best_ist)
3094  for (i = 0; i < graph->nb_outputs; i++)
3095  graph->outputs[i]->ost->unavailable = 1;
3096 
3097  return 0;
3098 }
3099 
3100 /**
3101  * Run a single step of transcoding.
3102  *
3103  * @return 0 for success, <0 for error
3104  */
3105 static int transcode_step(void)
3106 {
3107  OutputStream *ost;
3108  InputStream *ist;
3109  int ret;
3110 
3111  ost = choose_output();
3112  if (!ost) {
3113  if (got_eagain()) {
3114  reset_eagain();
3115  av_usleep(10000);
3116  return 0;
3117  }
3118  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3119  return AVERROR_EOF;
3120  }
3121 
3122  if (ost->filter) {
3123  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3124  return ret;
3125  if (!ist)
3126  return 0;
3127  } else {
3128  av_assert0(ost->source_index >= 0);
3129  ist = input_streams[ost->source_index];
3130  }
3131 
3132  ret = process_input(ist->file_index);
3133  if (ret == AVERROR(EAGAIN)) {
3134  if (input_files[ist->file_index]->eagain)
3135  ost->unavailable = 1;
3136  return 0;
3137  }
3138  if (ret < 0)
3139  return ret == AVERROR_EOF ? 0 : ret;
3140 
3141  return reap_filters();
3142 }
3143 
3144 /*
3145  * The following code is the main loop of the file converter
3146  */
3147 static int transcode(void)
3148 {
3149  int ret, i;
3150  AVFormatContext *os;
3151  OutputStream *ost;
3152  InputStream *ist;
3153  int64_t timer_start;
3154 
3155  ret = transcode_init();
3156  if (ret < 0)
3157  goto fail;
3158 
3159  if (stdin_interaction) {
3160  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3161  }
3162 
3163  timer_start = av_gettime();
3164 
3165 #if HAVE_PTHREADS
3166  if ((ret = init_input_threads()) < 0)
3167  goto fail;
3168 #endif
3169 
3170  while (!received_sigterm) {
3171  int64_t cur_time= av_gettime();
3172 
3173  /* if 'q' pressed, exits */
3174  if (stdin_interaction)
3175  if (check_keyboard_interaction(cur_time) < 0)
3176  break;
3177 
3178  /* check if there's any stream where output is still needed */
3179  if (!need_output()) {
3180  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3181  break;
3182  }
3183 
3184  ret = transcode_step();
3185  if (ret < 0) {
3186  if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
3187  continue;
3188 
3189  av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
3190  break;
3191  }
3192 
3193  /* dump report by using the output first video and audio streams */
3194  print_report(0, timer_start, cur_time);
3195  }
3196 #if HAVE_PTHREADS
3198 #endif
3199 
3200  /* at the end of stream, we must flush the decoder buffers */
3201  for (i = 0; i < nb_input_streams; i++) {
3202  ist = input_streams[i];
3203  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3204  output_packet(ist, NULL);
3205  }
3206  }
3207  flush_encoders();
3208 
3209  term_exit();
3210 
3211  /* write the trailer if needed and close file */
3212  for (i = 0; i < nb_output_files; i++) {
3213  os = output_files[i]->ctx;
3214  av_write_trailer(os);
3215  }
3216 
3217  /* dump report by using the first video and audio streams */
3218  print_report(1, timer_start, av_gettime());
3219 
3220  /* close each encoder */
3221  for (i = 0; i < nb_output_streams; i++) {
3222  ost = output_streams[i];
3223  if (ost->encoding_needed) {
3224  av_freep(&ost->st->codec->stats_in);
3225  avcodec_close(ost->st->codec);
3226  }
3227  }
3228 
3229  /* close each decoder */
3230  for (i = 0; i < nb_input_streams; i++) {
3231  ist = input_streams[i];
3232  if (ist->decoding_needed) {
3233  avcodec_close(ist->st->codec);
3234  }
3235  }
3236 
3237  /* finished ! */
3238  ret = 0;
3239 
3240  fail:
3241 #if HAVE_PTHREADS
3243 #endif
3244 
3245  if (output_streams) {
3246  for (i = 0; i < nb_output_streams; i++) {
3247  ost = output_streams[i];
3248  if (ost) {
3249  if (ost->stream_copy)
3250  av_freep(&ost->st->codec->extradata);
3251  if (ost->logfile) {
3252  fclose(ost->logfile);
3253  ost->logfile = NULL;
3254  }
3255  av_freep(&ost->st->codec->subtitle_header);
3256  av_free(ost->forced_kf_pts);
3257  av_dict_free(&ost->opts);
3258  av_dict_free(&ost->swr_opts);
3259  av_dict_free(&ost->resample_opts);
3260  }
3261  }
3262  }
3263  return ret;
3264 }
3265 
3266 
3267 static int64_t getutime(void)
3268 {
3269 #if HAVE_GETRUSAGE
3270  struct rusage rusage;
3271 
3272  getrusage(RUSAGE_SELF, &rusage);
3273  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
3274 #elif HAVE_GETPROCESSTIMES
3275  HANDLE proc;
3276  FILETIME c, e, k, u;
3277  proc = GetCurrentProcess();
3278  GetProcessTimes(proc, &c, &e, &k, &u);
3279  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
3280 #else
3281  return av_gettime();
3282 #endif
3283 }
3284 
3285 static int64_t getmaxrss(void)
3286 {
3287 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
3288  struct rusage rusage;
3289  getrusage(RUSAGE_SELF, &rusage);
3290  return (int64_t)rusage.ru_maxrss * 1024;
3291 #elif HAVE_GETPROCESSMEMORYINFO
3292  HANDLE proc;
3293  PROCESS_MEMORY_COUNTERS memcounters;
3294  proc = GetCurrentProcess();
3295  memcounters.cb = sizeof(memcounters);
3296  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
3297  return memcounters.PeakPagefileUsage;
3298 #else
3299  return 0;
3300 #endif
3301 }
3302 
3303 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
3304 {
3305 }
3306 
3307 int main(int argc, char **argv)
3308 {
3309  int ret;
3310  int64_t ti;
3311 
3312  atexit(exit_program);
3313 
3314  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
3315 
3317  parse_loglevel(argc, argv, options);
3318 
3319  if(argc>1 && !strcmp(argv[1], "-d")){
3320  run_as_daemon=1;
3322  argc--;
3323  argv++;
3324  }
3325 
3327 #if CONFIG_AVDEVICE
3329 #endif
3331  av_register_all();
3333 
3334  show_banner(argc, argv, options);
3335 
3336  term_init();
3337 
3338  /* parse options and open all input/output files */
3339  ret = ffmpeg_parse_options(argc, argv);
3340  if (ret < 0)
3341  exit(1);
3342 
3343  if (nb_output_files <= 0 && nb_input_files == 0) {
3344  show_usage();
3345  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3346  exit(1);
3347  }
3348 
3349  /* file converter / grab */
3350  if (nb_output_files <= 0) {
3351  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
3352  exit(1);
3353  }
3354 
3355 // if (nb_input_files == 0) {
3356 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
3357 // exit(1);
3358 // }
3359 
3360  current_time = ti = getutime();
3361  if (transcode() < 0)
3362  exit(1);
3363  ti = getutime() - ti;
3364  if (do_benchmark) {
3365  printf("bench: utime=%0.3fs\n", ti / 1000000.0);
3366  }
3367  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
3369  if (2*decode_error_stat[0] < decode_error_stat[1])
3370  exit(254);
3371 
3372  exit(received_nb_signals ? 255 : 0);
3373  return 0;
3374 }
unsigned int nb_chapters
Definition: avformat.h:1089
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:229
const char * name
Definition: avisynth_c.h:675
int got_output
Definition: ffmpeg.h:254
#define FF_DEBUG_DCT_COEFF
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1370
int64_t first_dts
Definition: avformat.h:784
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:1464
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
int frame_number
Definition: ffmpeg.h:315
const struct AVCodec * codec
Definition: ffmpeg.h:299
const char * s
Definition: avisynth_c.h:668
Bytestream IO Context.
Definition: avio.h:68
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:371
void term_init(void)
Definition: ffmpeg.c:324
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:261
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:93
uint8_t * name
Definition: ffmpeg.h:194
int nb_outputs
Definition: ffmpeg.h:210
#define FF_DEBUG_MV
int linesize[AV_NUM_DATA_POINTERS]
number of bytes per line
void av_free_packet(AVPacket *pkt)
Free a packet.
Definition: avpacket.c:242
AVDictionary * swr_opts
Definition: ffmpeg.h:355
int resample_channels
Definition: ffmpeg.h:249
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Immediately push the frame to the output.
Definition: buffersrc.h:48
void term_exit(void)
Definition: ffmpeg.c:302
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
int stream_copy
Definition: ffmpeg.h:359
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:726
int x
top left corner of pict, undefined when pict is not set
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1125
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:149
#define FF_DEBUG_VIS_QP
AVRational frame_rate
Definition: ffmpeg.h:329
int64_t * forced_kf_pts
Definition: ffmpeg.h:336
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:292
uint64_t error[AV_NUM_DATA_POINTERS]
error
#define CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
static int process_input(int file_index)
Definition: ffmpeg.c:2853
#define CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:383
int exit_on_error
Definition: ffmpeg_opt.c:82
const char * fmt
Definition: avisynth_c.h:669
misc image utilities
void * av_realloc_f(void *ptr, size_t nelem, size_t elsize)
Allocate or reallocate a block of memory.
Definition: mem.c:168
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:117
external API header
int64_t pos
byte position in stream, -1 if unknown
AVStream * st
Definition: ffmpeg.h:313
#define AV_DICT_DONT_OVERWRITE
Don&#39;t overwrite existing entries.
Definition: dict.h:75
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:215
#define CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
static int run_as_daemon
Definition: ffmpeg.c:124
Memory buffer source API.
AVFrame * coded_frame
the picture in the bitstream
if max(w)>1 w=0.9 *w/max(w)
AVRational framerate
Definition: ffmpeg.h:239
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:154
static int64_t audio_size
Definition: ffmpeg.c:126
static int64_t cur_time
Definition: ffserver.c:325
int decoding_needed
Definition: ffmpeg.h:217
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:709
int num
numerator
Definition: rational.h:44
FilterGraph * init_simple_filtergraph(InputStream *ist, OutputStream *ost)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1098
#define vsnprintf
Definition: snprintf.h:36
int index
stream index in AVFormatContext
Definition: avformat.h:644
static int64_t getmaxrss(void)
Definition: ffmpeg.c:3285
static int nb_frames_dup
Definition: ffmpeg.c:129
Sinusoidal phase f
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:1975
Various defines for YUV<->RGB conversion.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
int eagain
Definition: ffmpeg.h:279
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:75
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1055
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
AVBitStreamFilterContext * bitstream_filters
Definition: ffmpeg.h:323
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:519
AVFrame * filter_frame
Definition: ffmpeg.h:220
static int transcode_init(void)
Definition: ffmpeg.c:2066
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:1982
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
int do_benchmark_all
Definition: ffmpeg_opt.c:76
enum AVMediaType type
#define FF_ARRAY_ELEMS(a)
static int init_input_threads(void)
Definition: ffmpeg.c:2775
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:56
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:640
int do_hex_dump
Definition: ffmpeg_opt.c:77
int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define FF_DEBUG_VIS_MB_TYPE
int nb_input_streams
Definition: ffmpeg.c:146
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:67
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Definition: log.c:279
AVDictionaryEntry * av_dict_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
int av_dup_packet(AVPacket *pkt)
Definition: avpacket.c:221
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:2051
four components are given, that&#39;s all.
#define VSYNC_VFR
Definition: ffmpeg.h:53
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:314
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:363
int avpicture_fill(AVPicture *picture, const uint8_t *ptr, enum AVPixelFormat pix_fmt, int width, int height)
Fill in the AVPicture fields, always assume a linesize alignment of 1.
Definition: avpicture.c:34
int print_stats
Definition: ffmpeg_opt.c:83
float dts_error_threshold
Definition: ffmpeg_opt.c:69
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:372
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:193
int index
Definition: ffmpeg.h:201
output residual component w
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:417
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
struct FilterGraph * graph
Definition: ffmpeg.h:186
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
AVSubtitleRect ** rects
set threshold d
enum AVAudioServiceType audio_service_type
Type of service that the audio stream conveys.
int encoding_needed
Definition: ffmpeg.h:314
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:524
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:3303
initialize output if(nPeaks >3)%at least 3 peaks in spectrum for trying to find f0 nf0peaks
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:2616
Format I/O context.
Definition: avformat.h:944
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:185
unsigned int nb_stream_indexes
Definition: avformat.h:893
#define CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
int64_t cur_dts
Definition: avformat.h:785
int w
width of pict, undefined when pict is not set
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:532
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:204
Public dictionary API.
char * logfile_prefix
Definition: ffmpeg.h:347
static uint8_t * subtitle_out
Definition: ffmpeg.c:136
#define DEFAULT_PASS_LOGFILENAME_PREFIX
Definition: ffmpeg.c:143
int copy_initial_nonkeyframes
Definition: ffmpeg.h:361
static void exit_program(void)
Definition: ffmpeg.c:424
enum AVSampleFormat sample_fmt
audio sample format
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT
Definition: avformat.h:397
uint8_t
int av_log_get_level(void)
Definition: log.c:264
Opaque data information usually continuous.
Definition: avutil.h:145
static void * input_thread(void *arg)
Definition: ffmpeg.c:2710
int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Definition: parser.c:169
static void rate_emu_sleep(InputStream *ist)
Definition: ffmpeg.c:1454
float delta
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:215
AVOptions.
window constants for m
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:475
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
miscellaneous OS support macros and functions.
timestamp utils, mostly useful for debugging/logging purposes
int is_start
Definition: ffmpeg.h:235
uint8_t * data[AV_NUM_DATA_POINTERS]
int stdin_interaction
Definition: ffmpeg_opt.c:85
FILE * logfile
Definition: ffmpeg.h:348
static AVPacket pkt
Definition: demuxing.c:56
libavcodec/libavfilter gluing utilities
#define b
Definition: input.c:42
AVDictionary * opts
Definition: ffmpeg.h:354
end end
static int need_output(void)
Definition: ffmpeg.c:2568
#define NAN
Definition: math.h:7
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:219
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:159
static double psnr(double d)
Definition: ffmpeg.c:972
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define AV_LOG_QUIET
Definition: log.h:130
int do_benchmark
Definition: ffmpeg_opt.c:75
int audio_sync_method
Definition: ffmpeg_opt.c:72
int shortest
Definition: ffmpeg.h:375
AVStream ** streams
Definition: avformat.h:992
int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
static int64_t getutime(void)
Definition: ffmpeg.c:3267
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:142
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:69
int nb_streams
Definition: ffmpeg.h:283
pthread_t thread
Definition: ffmpeg.h:289
uint8_t * data
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVDictionary * resample_opts
Definition: ffmpeg.h:356
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:1988
AVFilterContext * filter
Definition: ffmpeg.h:191
int avformat_network_init(void)
Do global initialization of network components.
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
int nb_input_files
Definition: ffmpeg.c:148
#define AVERROR_EOF
End of file.
Definition: error.h:55
static int read_key(void)
Definition: ffmpeg.c:362
#define lrintf(x)
Definition: libm_mips.h:70
void av_fifo_free(AVFifoBuffer *f)
Free an AVFifoBuffer.
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:703
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:977
int resample_sample_rate
Definition: ffmpeg.h:248
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:127
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:270
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the &#39;-loglevel&#39; option in the command line args and apply it.
Definition: cmdutils.c:459
external API header
const AVClass * avcodec_get_frame_class(void)
Get the AVClass for AVFrame.
for audio filters
int h
height of pict, undefined when pict is not set
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:248
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
AVCodec * dec
Definition: ffmpeg.h:218
static int64_t duration
Definition: ffplay.c:294
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:891
char * stats_out
pass1 encoding statistics output buffer
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:173
int top_field_first
Definition: ffmpeg.h:240
int nb_output_streams
Definition: ffmpeg.c:151
int file_index
Definition: ffmpeg.h:214
int duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
const OptionDef options[]
Definition: ffserver.c:4697
struct AVBitStreamFilterContext * next
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
unsigned int * stream_index
Definition: avformat.h:892
struct InputStream::sub2video sub2video
int resample_pix_fmt
Definition: ffmpeg.h:245
int resample_height
Definition: ffmpeg.h:243
int wrap_correction_done
Definition: ffmpeg.h:230
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:232
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:263
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
struct AVOutputFormat * oformat
Definition: avformat.h:958
int64_t next_dts
Definition: ffmpeg.h:225
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
frame
Definition: stft.m:14
void av_buffer_default_free(void *opaque, uint8_t *data)
Default free callback, which calls av_free() on the buffer data.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:130
static const uint8_t frame_size[4]
Definition: g723_1_data.h:58
Discrete Time axis x
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Main libavdevice API header.
Callback for checking whether to abort blocking functions.
Definition: avio.h:51
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
libswresample public header
enum AVCodecID id
int rate_emu
Definition: ffmpeg.h:286
int width
width and height of the video frame
Definition: frame.h:122
int has_b_frames
Size of the frame reordering buffer in the decoder.
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:183
static void reset_eagain(void)
Definition: ffmpeg.c:2837
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
Definition: ffmpeg.c:541
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
void * av_opt_ptr(const AVClass *class, void *obj, const char *name)
Gets a pointer to the requested field in a struct.
Definition: opt.c:1285
int ffmpeg_parse_options(int argc, char **argv)
Definition: ffmpeg_opt.c:2490
FilterGraph ** filtergraphs
Definition: ffmpeg.c:155
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:422
AVFilterContext * filter
Definition: ffmpeg.h:184
#define CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
#define CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:354
int64_t start
Definition: ffmpeg.h:222
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
int y
top left corner of pict, undefined when pict is not set
int video_sync_method
Definition: ffmpeg_opt.c:73
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:131
int avfilter_link_get_channels(AVFilterLink *link)
Get the number of channels of a link.
Definition: avfilter.c:155
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:72
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:1732
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: aviobuf.c:821
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Init a print buffer.
Definition: bprint.c:68
static void do_video_out(AVFormatContext *s, OutputStream *ost, AVFrame *in_picture)
Definition: ffmpeg.c:775
const char * r
Definition: vf_curves.c:94
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:111
int capabilities
Codec capabilities.
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
void av_bitstream_filter_close(AVBitStreamFilterContext *bsf)
unsigned int nb_programs
Definition: avformat.h:1050
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:162
preferred ID for decoding MPEG audio layer 1, 2 or 3
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
const char * arg
int flags
CODEC_FLAG_*.
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:394
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:110
AVChapter ** chapters
Definition: avformat.h:1090
int finished
Definition: ffmpeg.h:357
int rc_max_rate
maximum bitrate
simple assert() macros that are a bit more flexible than ISO C assert().
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
const char * name
Name of the codec implementation.
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:643
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
void av_log_set_level(int level)
Definition: log.c:269
struct AVPicture AVPicture
four components are given, that&#39;s all.
int force_fps
Definition: ffmpeg.h:330
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:716
New fields can be added to the end with minor version bumps.
Definition: avformat.h:888
#define FFMAX(a, b)
Definition: common.h:56
int qp_hist
Definition: ffmpeg_opt.c:84
int size
static char logfilename[1024]
Definition: ffserver.c:271
int flags
A combination of AV_PKT_FLAG values.
uint64_t channel_layout
Audio channel layout.
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:135
uint32_t end_display_time
int64_t pts
Same as packet pts, in AV_TIME_BASE.
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:662
int rc_buffer_size
decoder bitstream buffer size
OutputFilter * filter
Definition: ffmpeg.h:350
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:331
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:36
AVRational frame_aspect_ratio
Definition: ffmpeg.h:333
AVDictionary * opts
Definition: ffmpeg.h:238
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
AVFrame * avcodec_alloc_frame(void)
Allocate an AVFrame and set its fields to default values.
unsigned int nb_streams
A list of all streams in the file.
Definition: avformat.h:991
#define AV_LOG_VERBOSE
Definition: log.h:157
struct AVRational AVRational
rational number numerator/denominator
static int nb_frames_drop
Definition: ffmpeg.c:130
A bitmap, pict will be set.
int nb_output_files
Definition: ffmpeg.c:153
Buffer to print data progressively.
Definition: bprint.h:75
int bit_rate
the average bitrate
static int64_t video_size
Definition: ffmpeg.c:125
int void avio_flush(AVIOContext *s)
Force flushing of buffered data to the output s.
Definition: aviobuf.c:193
audio channel layout utility functions
static int transcoding_finished
Definition: ffmpeg.c:140
int av_frame_get_channels(const AVFrame *frame)
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
static int transcode(void)
Definition: ffmpeg.c:3147
char filename[1024]
input or output filename
Definition: avformat.h:994
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AVPicture pict
data+linesize for the bitmap of this subtitle.
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:118
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:196
#define FFMIN(a, b)
Definition: common.h:58
#define VSYNC_AUTO
Definition: ffmpeg.h:50
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
int saw_first_ts
Definition: ffmpeg.h:236
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:1484
struct OutputStream * ost
Definition: ffmpeg.h:192
Keep a reference to the frame.
Definition: buffersrc.h:55
ret
Definition: avfilter.c:821
int width
picture width / height.
PVOID HANDLE
AVFifoBuffer * fifo
Definition: ffmpeg.h:294
FFmpeg Automated Testing Environment ************************************Table of Contents *****************FFmpeg Automated Testing Environment Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass target exec to configure or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script tests fate sh from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at doc fate_config sh template Create a configuration that suits your based on the configuration template The slot configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern< arch >< os >< compiler >< compiler version > The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the fate_recv variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration file
Definition: fate.txt:34
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:341
t
Definition: genspecsines3.m:6
const char * name
Definition: avformat.h:378
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:110
int nb_filtergraphs
Definition: ffmpeg.c:156
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:76
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:83
int64_t last_ts
Definition: ffmpeg.h:282
static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:2797
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:185
int do_pkt_dump
Definition: ffmpeg_opt.c:78
int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
int64_t max_frames
Definition: ffmpeg.h:325
#define AV_RL32
#define CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
float u
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:821
static int reap_filters(void)
Get and encode new output from any of the filtergraphs, without causing activity. ...
Definition: ffmpeg.c:1019
Opaque data information usually sparse.
Definition: avutil.h:147
static int output_packet(InputStream *ist, const AVPacket *pkt)
Definition: ffmpeg.c:1784
FIXME Range Coding of cr are level
Definition: snow.txt:367
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:106
static int restore_tty
Definition: ffmpeg.c:162
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:47
static int got_eagain(void)
Definition: ffmpeg.c:2828
preferred ID for MPEG-1/2 video decoding
int finished
Definition: ffmpeg.h:290
static void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:228
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the nearest value in q_list to q.
Definition: rational.c:138
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:126
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it...
Definition: error.h:72
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
int ret
Definition: ffmpeg.h:255
int audio_volume
Definition: ffmpeg_opt.c:71
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
Stream structure.
Definition: avformat.h:643
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:352
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:134
InputFilter ** filters
Definition: ffmpeg.h:270
int fix_sub_duration
Definition: ffmpeg.h:252
#define VSYNC_DROP
Definition: ffmpeg.h:54
for k
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
int frame_size
Number of samples per channel in an audio frame.
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:62
NULL
Definition: eval.c:55
int av_fifo_space(AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:171
pthread_mutex_t fifo_lock
Definition: ffmpeg.h:292
int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: frame.h:254
int ost_index
Definition: ffmpeg.h:370
struct InputStream * sync_ist
Definition: ffmpeg.h:318
AVS_Value src
Definition: avisynth_c.h:523
enum AVMediaType codec_type
double ts_scale
Definition: ffmpeg.h:234
int unavailable
Definition: ffmpeg.h:358
int me_threshold
Motion estimation threshold below which no motion estimation is performed, but instead the user speci...
const AVRational * supported_framerates
array of supported framerates, or NULL if any, array is terminated by {0,0}
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:1941
enum AVCodecID codec_id
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:302
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:202
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1202
static int64_t extra_size
Definition: ffmpeg.c:128
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:186
int sample_rate
samples per second
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:1624
AVIOContext * pb
I/O context.
Definition: avformat.h:977
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:152
Close file fclose(fid)
void av_log_set_flags(int arg)
Definition: log.c:274
int ist_index
Definition: ffmpeg.h:280
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:403
const char * graph_desc
Definition: ffmpeg.h:202
static void print_sdp(void)
Definition: ffmpeg.c:1924
int guess_layout_max
Definition: ffmpeg.h:241
#define AVFMT_RAWPICTURE
Format wants AVPicture structure for raw picture data.
Definition: avformat.h:348
main external API structure.
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:259
static int64_t subtitle_size
Definition: ffmpeg.c:127
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:596
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:154
const char * attachment_filename
Definition: ffmpeg.h:360
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1356
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
a very simple circular buffer FIFO implementation
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:510
void * buf
Definition: avisynth_c.h:594
AVFrame * decoded_frame
Definition: ffmpeg.h:219
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:62
Replacements for frequently missing libm functions.
struct AVBitStreamFilter * filter
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:3059
AVStream * st
Definition: ffmpeg.h:215
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
void avcodec_get_frame_defaults(AVFrame *frame)
Set the fields of the given AVFrame to default values.
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:51
int sample_rate
Sample rate of the audio data.
Definition: frame.h:326
int configure_filtergraph(FilterGraph *fg)
OutputStream ** output_streams
Definition: ffmpeg.c:150
int index
Definition: gxfenc.c:89
synthesis window for stochastic i
rational number numerator/denominator
Definition: rational.h:43
int file_index
Definition: ffmpeg.h:310
static int current_time
Definition: ffmpeg.c:133
int64_t sync_opts
Definition: ffmpeg.h:319
char * vstats_filename
Definition: ffmpeg_opt.c:65
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:135
struct InputStream::@24 prev_sub
discard useless packets like 0 size packets in avi
static av_always_inline av_const long int lrint(double x)
Definition: libm.h:148
int nb_streams_warn
Definition: ffmpeg.h:285
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:330
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
int showed_multi_packet_warning
Definition: ffmpeg.h:237
#define snprintf
Definition: snprintf.h:34
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:86
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
int64_t ts_offset
Definition: ffmpeg.h:281
uint32_t DWORD
static void do_subtitle_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:704
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:3105
static void free_input_threads(void)
Definition: ffmpeg.c:2740
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
misc parsing utilities
#define type
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:95
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:164
AVFrame * filtered_frame
Definition: ffmpeg.h:326
int source_index
Definition: ffmpeg.h:312
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:87
int av_buffersrc_add_ref(AVFilterContext *buffer_src, AVFilterBufferRef *picref, int flags)
Add buffer data in picref to buffer_src.
static volatile int received_nb_signals
Definition: ffmpeg.c:312
int copy_prior_start
Definition: ffmpeg.h:362
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
Read the file with name filename, and put its content in a newly allocated 0-terminated buffer...
Definition: cmdutils.c:1736
int global_quality
Global quality for codecs which cannot change it per frame.
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
int nb_filters
Definition: ffmpeg.h:271
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:122
int av_frame_ref(AVFrame *dst, AVFrame *src)
Setup a new reference to the data described by an given frame.
Definition: frame.c:228
int64_t val
Definition: avformat.h:323
int64_t start_time
Decoding: position of the first frame of the component, in AV_TIME_BASE fractional seconds...
Definition: avformat.h:1001
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:340
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:226
int av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:95
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
Definition: error.c:53
int resample_sample_fmt
Definition: ffmpeg.h:247
int forced_kf_count
Definition: ffmpeg.h:337
int64_t start
Definition: avformat.h:921
char * forced_keyframes
Definition: ffmpeg.h:339
int resample_width
Definition: ffmpeg.h:244
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:228
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:982
int av_fifo_size(AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
struct FilterGraph * graph
Definition: ffmpeg.h:193
uint64_t limit_filesize
Definition: ffmpeg.h:373
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
AVIOContext * progress_avio
Definition: ffmpeg.c:134
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
int main(int argc, char **argv)
Definition: ffmpeg.c:3307
int reinit_filters
Definition: ffmpeg.h:273
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:345
#define VSYNC_CFR
Definition: ffmpeg.h:52
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:689
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
static double c[64]
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:357
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:700
uint32_t start_display_time
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:773
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:111
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:920
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:56
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:698
Same thing on a dB scale
char * key
Definition: dict.h:81
static FILE * vstats_file
Definition: ffmpeg.c:109
int den
denominator
Definition: rational.h:45
function y
Definition: D.m:1
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:82
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int copy_ts
Definition: ffmpeg_opt.c:79
struct AVInputFormat * iformat
Can only be iformat or oformat, not both at the same time.
Definition: avformat.h:957
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
Definition: af_aconvert.c:140
AVFormatContext * ctx
Definition: ffmpeg.h:277
static struct termios oldtty
Definition: ffmpeg.c:161
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
Get the frame rate of the input.
Definition: buffersink.c:352
AVCodec * enc
Definition: ffmpeg.h:324
AVSubtitle subtitle
Definition: ffmpeg.h:256
#define AVUNERROR(e)
Definition: error.h:44
int eof_reached
Definition: ffmpeg.h:278
int forced_kf_index
Definition: ffmpeg.h:338
static void do_audio_out(AVFormatContext *s, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:656
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:355
uint8_t * name
Definition: ffmpeg.h:187
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:275
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:50
float dts_delta_threshold
Definition: ffmpeg_opt.c:68
struct AVFrac pts
encoding: pts generation when outputting stream
Definition: avformat.h:668
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:691
int channels
number of audio channels
static av_always_inline int pthread_mutex_unlock(pthread_mutex_t *mutex)
Definition: os2threads.h:104
int top_field_first
Definition: ffmpeg.h:331
printf("static const uint8_t my_array[100] = {\n")
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
OutputFilter ** outputs
Definition: ffmpeg.h:209
struct AVCodecParserContext * parser
Definition: avformat.h:812
The official guide to swscale for confused that is
Definition: swscale.txt:2
InputFile ** input_files
Definition: ffmpeg.c:147
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:139
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:169
AVFormatContext * ctx
Definition: ffmpeg.h:368
pthread_cond_t fifo_cond
Definition: ffmpeg.h:293
void avcodec_free_frame(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
void show_usage(void)
Definition: ffmpeg_opt.c:2440
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:769
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:107
int frame_number
Frame counter, set by libavcodec.
int repeat_pict
This field is used for proper frame duration computation in lavf.
int height
Definition: frame.h:122
#define AV_LOG_INFO
Definition: log.h:156
InputFilter ** inputs
Definition: ffmpeg.h:207
enum AVFieldOrder field_order
Field order.
#define AV_DICT_IGNORE_SUFFIX
Definition: dict.h:68
OutputFile ** output_files
Definition: ffmpeg.c:152
static av_always_inline int pthread_mutex_lock(pthread_mutex_t *mutex)
Definition: os2threads.h:97
static void flush_encoders(void)
Definition: ffmpeg.c:1275
int copy_tb
Definition: ffmpeg_opt.c:80
integer integer log2
Definition: avutil.txt:2
static volatile int received_sigterm
Definition: ffmpeg.c:311
#define FFSWAP(type, a, b)
Definition: common.h:61
int discard
Definition: ffmpeg.h:216
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:2819
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:1700
void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload, AVStream *st)
Send a nice dump of a packet to the log.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:679
enum AVSubtitleType type
int64_t first_pts
Definition: ffmpeg.h:322
int nb_inputs
Definition: ffmpeg.h:208
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:702
int index
Definition: ffmpeg.h:311
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:738
uint64_t resample_channel_layout
Definition: ffmpeg.h:250
This structure stores compressed data.
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:52
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
int debug_ts
Definition: ffmpeg_opt.c:81
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:2598
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:127
static void sigterm_handler(int sig)
Definition: ffmpeg.c:315
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
for(j=16;j >0;--j)
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:258
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:190
AVProgram ** programs
Definition: avformat.h:1051
int joined
Definition: ffmpeg.h:291
InputStream ** input_streams
Definition: ffmpeg.c:145
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Definition: ffmpeg.h:303
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:632
uint8_t * subtitle_header
Header containing style information for text subtitles.
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.