ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
47 
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/buffersink.h"
52 # include "libavfilter/buffersrc.h"
53 #endif
54 
55 #include <SDL.h>
56 #include <SDL_thread.h>
57 
58 #include "cmdutils.h"
59 
60 #include <assert.h>
61 
62 const char program_name[] = "ffplay";
63 const int program_birth_year = 2003;
64 
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_FRAMES 5
67 
68 /* SDL audio buffer size, in samples. Should be small to have precise
69  A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71 
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76 
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79 
80 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
81 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
82 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
83 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
84 
85 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
86 #define AUDIO_DIFF_AVG_NB 20
87 
88 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
89 #define REFRESH_RATE 0.01
90 
91 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
92 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
93 #define SAMPLE_ARRAY_SIZE (8 * 65536)
94 
95 #define CURSOR_HIDE_DELAY 1000000
96 
97 static int64_t sws_flags = SWS_BICUBIC;
98 
99 typedef struct MyAVPacketList {
102  int serial;
104 
105 typedef struct PacketQueue {
106  MyAVPacketList *first_pkt, *last_pkt;
108  int size;
110  int serial;
111  SDL_mutex *mutex;
112  SDL_cond *cond;
113 } PacketQueue;
114 
115 #define VIDEO_PICTURE_QUEUE_SIZE 4
116 #define SUBPICTURE_QUEUE_SIZE 4
117 
118 typedef struct VideoPicture {
119  double pts; // presentation timestamp for this picture
120  int64_t pos; // byte position in file
121  SDL_Overlay *bmp;
122  int width, height; /* source height & width */
125  int serial;
126 
128 } VideoPicture;
129 
130 typedef struct SubPicture {
131  double pts; /* presentation time stamp for this picture */
133 } SubPicture;
134 
135 typedef struct AudioParams {
136  int freq;
137  int channels;
138  int64_t channel_layout;
140 } AudioParams;
141 
142 enum {
143  AV_SYNC_AUDIO_MASTER, /* default choice */
145  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
146 };
147 
148 typedef struct VideoState {
149  SDL_Thread *read_tid;
150  SDL_Thread *video_tid;
155  int paused;
158  int seek_req;
160  int64_t seek_pos;
161  int64_t seek_rel;
164  int realtime;
165 
167 
169  double external_clock; ///< external clock base
170  double external_clock_drift; ///< external clock base - time (av_gettime) at which we updated external_clock
171  int64_t external_clock_time; ///< last reference time
172  double external_clock_speed; ///< speed of the external clock
173 
174  double audio_clock;
176  double audio_diff_cum; /* used for AV difference average computation */
186  unsigned int audio_buf_size; /* in bytes */
187  unsigned int audio_buf1_size;
188  int audio_buf_index; /* in bytes */
195  struct AudioParams audio_src;
196 #if CONFIG_AVFILTER
197  struct AudioParams audio_filter_src;
198 #endif
199  struct AudioParams audio_tgt;
206 
207  enum ShowMode {
208  SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
209  } show_mode;
210  int16_t sample_array[SAMPLE_ARRAY_SIZE];
216  int xpos;
218 
219  SDL_Thread *subtitle_tid;
225  int subpq_size, subpq_rindex, subpq_windex;
226  SDL_mutex *subpq_mutex;
227  SDL_cond *subpq_cond;
228 
229  double frame_timer;
240  double video_current_pts; // current displayed pts
241  double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
242  int64_t video_current_pos; // current displayed file pos
243  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
246  int pictq_size, pictq_rindex, pictq_windex;
247  SDL_mutex *pictq_mutex;
248  SDL_cond *pictq_cond;
249 #if !CONFIG_AVFILTER
250  struct SwsContext *img_convert_ctx;
251 #endif
253 
254  char filename[1024];
255  int width, height, xleft, ytop;
256  int step;
257 
258 #if CONFIG_AVFILTER
259  AVFilterContext *in_video_filter; // the first filter in the video chain
260  AVFilterContext *out_video_filter; // the last filter in the video chain
261  AVFilterContext *in_audio_filter; // the first filter in the audio chain
262  AVFilterContext *out_audio_filter; // the last filter in the audio chain
263  AVFilterGraph *agraph; // audio filter graph
264 #endif
265 
266  int last_video_stream, last_audio_stream, last_subtitle_stream;
267 
269 } VideoState;
270 
271 /* options specified by the user */
273 static const char *input_filename;
274 static const char *window_title;
275 static int fs_screen_width;
276 static int fs_screen_height;
277 static int default_width = 640;
278 static int default_height = 480;
279 static int screen_width = 0;
280 static int screen_height = 0;
281 static int audio_disable;
282 static int video_disable;
283 static int subtitle_disable;
285  [AVMEDIA_TYPE_AUDIO] = -1,
286  [AVMEDIA_TYPE_VIDEO] = -1,
287  [AVMEDIA_TYPE_SUBTITLE] = -1,
288 };
289 static int seek_by_bytes = -1;
290 static int display_disable;
291 static int show_status = 1;
293 static int64_t start_time = AV_NOPTS_VALUE;
294 static int64_t duration = AV_NOPTS_VALUE;
295 static int workaround_bugs = 1;
296 static int fast = 0;
297 static int genpts = 0;
298 static int lowres = 0;
299 static int idct = FF_IDCT_AUTO;
300 static int error_concealment = 3;
301 static int decoder_reorder_pts = -1;
302 static int autoexit;
303 static int exit_on_keydown;
304 static int exit_on_mousedown;
305 static int loop = 1;
306 static int framedrop = -1;
307 static int infinite_buffer = -1;
308 static enum ShowMode show_mode = SHOW_MODE_NONE;
309 static const char *audio_codec_name;
310 static const char *subtitle_codec_name;
311 static const char *video_codec_name;
312 double rdftspeed = 0.02;
313 static int64_t cursor_last_shown;
314 static int cursor_hidden = 0;
315 #if CONFIG_AVFILTER
316 static char *vfilters = NULL;
317 static char *afilters = NULL;
318 #endif
319 
320 /* current context */
321 static int is_full_screen;
322 static int64_t audio_callback_time;
323 
325 
326 #define FF_ALLOC_EVENT (SDL_USEREVENT)
327 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
328 
329 static SDL_Surface *screen;
330 
331 static inline
332 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
333  enum AVSampleFormat fmt2, int64_t channel_count2)
334 {
335  /* If channel count == 1, planar and non-planar formats are the same */
336  if (channel_count1 == 1 && channel_count2 == 1)
338  else
339  return channel_count1 != channel_count2 || fmt1 != fmt2;
340 }
341 
342 static inline
343 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
344 {
345  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
346  return channel_layout;
347  else
348  return 0;
349 }
350 
351 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
352 
354 {
355  MyAVPacketList *pkt1;
356 
357  if (q->abort_request)
358  return -1;
359 
360  pkt1 = av_malloc(sizeof(MyAVPacketList));
361  if (!pkt1)
362  return -1;
363  pkt1->pkt = *pkt;
364  pkt1->next = NULL;
365  if (pkt == &flush_pkt)
366  q->serial++;
367  pkt1->serial = q->serial;
368 
369  if (!q->last_pkt)
370  q->first_pkt = pkt1;
371  else
372  q->last_pkt->next = pkt1;
373  q->last_pkt = pkt1;
374  q->nb_packets++;
375  q->size += pkt1->pkt.size + sizeof(*pkt1);
376  /* XXX: should duplicate packet data in DV case */
377  SDL_CondSignal(q->cond);
378  return 0;
379 }
380 
382 {
383  int ret;
384 
385  /* duplicate the packet */
386  if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
387  return -1;
388 
389  SDL_LockMutex(q->mutex);
390  ret = packet_queue_put_private(q, pkt);
391  SDL_UnlockMutex(q->mutex);
392 
393  if (pkt != &flush_pkt && ret < 0)
394  av_free_packet(pkt);
395 
396  return ret;
397 }
398 
399 /* packet queue handling */
401 {
402  memset(q, 0, sizeof(PacketQueue));
403  q->mutex = SDL_CreateMutex();
404  q->cond = SDL_CreateCond();
405  q->abort_request = 1;
406 }
407 
409 {
410  MyAVPacketList *pkt, *pkt1;
411 
412  SDL_LockMutex(q->mutex);
413  for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
414  pkt1 = pkt->next;
415  av_free_packet(&pkt->pkt);
416  av_freep(&pkt);
417  }
418  q->last_pkt = NULL;
419  q->first_pkt = NULL;
420  q->nb_packets = 0;
421  q->size = 0;
422  SDL_UnlockMutex(q->mutex);
423 }
424 
426 {
428  SDL_DestroyMutex(q->mutex);
429  SDL_DestroyCond(q->cond);
430 }
431 
433 {
434  SDL_LockMutex(q->mutex);
435 
436  q->abort_request = 1;
437 
438  SDL_CondSignal(q->cond);
439 
440  SDL_UnlockMutex(q->mutex);
441 }
442 
444 {
445  SDL_LockMutex(q->mutex);
446  q->abort_request = 0;
447  packet_queue_put_private(q, &flush_pkt);
448  SDL_UnlockMutex(q->mutex);
449 }
450 
451 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
452 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
453 {
454  MyAVPacketList *pkt1;
455  int ret;
456 
457  SDL_LockMutex(q->mutex);
458 
459  for (;;) {
460  if (q->abort_request) {
461  ret = -1;
462  break;
463  }
464 
465  pkt1 = q->first_pkt;
466  if (pkt1) {
467  q->first_pkt = pkt1->next;
468  if (!q->first_pkt)
469  q->last_pkt = NULL;
470  q->nb_packets--;
471  q->size -= pkt1->pkt.size + sizeof(*pkt1);
472  *pkt = pkt1->pkt;
473  if (serial)
474  *serial = pkt1->serial;
475  av_free(pkt1);
476  ret = 1;
477  break;
478  } else if (!block) {
479  ret = 0;
480  break;
481  } else {
482  SDL_CondWait(q->cond, q->mutex);
483  }
484  }
485  SDL_UnlockMutex(q->mutex);
486  return ret;
487 }
488 
489 static inline void fill_rectangle(SDL_Surface *screen,
490  int x, int y, int w, int h, int color, int update)
491 {
492  SDL_Rect rect;
493  rect.x = x;
494  rect.y = y;
495  rect.w = w;
496  rect.h = h;
497  SDL_FillRect(screen, &rect, color);
498  if (update && w > 0 && h > 0)
499  SDL_UpdateRect(screen, x, y, w, h);
500 }
501 
502 /* draw only the border of a rectangle */
503 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
504 {
505  int w1, w2, h1, h2;
506 
507  /* fill the background */
508  w1 = x;
509  if (w1 < 0)
510  w1 = 0;
511  w2 = width - (x + w);
512  if (w2 < 0)
513  w2 = 0;
514  h1 = y;
515  if (h1 < 0)
516  h1 = 0;
517  h2 = height - (y + h);
518  if (h2 < 0)
519  h2 = 0;
521  xleft, ytop,
522  w1, height,
523  color, update);
525  xleft + width - w2, ytop,
526  w2, height,
527  color, update);
529  xleft + w1, ytop,
530  width - w1 - w2, h1,
531  color, update);
533  xleft + w1, ytop + height - h2,
534  width - w1 - w2, h2,
535  color, update);
536 }
537 
538 #define ALPHA_BLEND(a, oldp, newp, s)\
539 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
540 
541 #define RGBA_IN(r, g, b, a, s)\
542 {\
543  unsigned int v = ((const uint32_t *)(s))[0];\
544  a = (v >> 24) & 0xff;\
545  r = (v >> 16) & 0xff;\
546  g = (v >> 8) & 0xff;\
547  b = v & 0xff;\
548 }
549 
550 #define YUVA_IN(y, u, v, a, s, pal)\
551 {\
552  unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
553  a = (val >> 24) & 0xff;\
554  y = (val >> 16) & 0xff;\
555  u = (val >> 8) & 0xff;\
556  v = val & 0xff;\
557 }
558 
559 #define YUVA_OUT(d, y, u, v, a)\
560 {\
561  ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
562 }
563 
564 
565 #define BPP 1
566 
567 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
568 {
569  int wrap, wrap3, width2, skip2;
570  int y, u, v, a, u1, v1, a1, w, h;
571  uint8_t *lum, *cb, *cr;
572  const uint8_t *p;
573  const uint32_t *pal;
574  int dstx, dsty, dstw, dsth;
575 
576  dstw = av_clip(rect->w, 0, imgw);
577  dsth = av_clip(rect->h, 0, imgh);
578  dstx = av_clip(rect->x, 0, imgw - dstw);
579  dsty = av_clip(rect->y, 0, imgh - dsth);
580  lum = dst->data[0] + dsty * dst->linesize[0];
581  cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
582  cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
583 
584  width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
585  skip2 = dstx >> 1;
586  wrap = dst->linesize[0];
587  wrap3 = rect->pict.linesize[0];
588  p = rect->pict.data[0];
589  pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
590 
591  if (dsty & 1) {
592  lum += dstx;
593  cb += skip2;
594  cr += skip2;
595 
596  if (dstx & 1) {
597  YUVA_IN(y, u, v, a, p, pal);
598  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
599  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
600  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
601  cb++;
602  cr++;
603  lum++;
604  p += BPP;
605  }
606  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
607  YUVA_IN(y, u, v, a, p, pal);
608  u1 = u;
609  v1 = v;
610  a1 = a;
611  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612 
613  YUVA_IN(y, u, v, a, p + BPP, pal);
614  u1 += u;
615  v1 += v;
616  a1 += a;
617  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
618  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
619  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
620  cb++;
621  cr++;
622  p += 2 * BPP;
623  lum += 2;
624  }
625  if (w) {
626  YUVA_IN(y, u, v, a, p, pal);
627  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
628  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
629  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
630  p++;
631  lum++;
632  }
633  p += wrap3 - dstw * BPP;
634  lum += wrap - dstw - dstx;
635  cb += dst->linesize[1] - width2 - skip2;
636  cr += dst->linesize[2] - width2 - skip2;
637  }
638  for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
639  lum += dstx;
640  cb += skip2;
641  cr += skip2;
642 
643  if (dstx & 1) {
644  YUVA_IN(y, u, v, a, p, pal);
645  u1 = u;
646  v1 = v;
647  a1 = a;
648  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
649  p += wrap3;
650  lum += wrap;
651  YUVA_IN(y, u, v, a, p, pal);
652  u1 += u;
653  v1 += v;
654  a1 += a;
655  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
657  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
658  cb++;
659  cr++;
660  p += -wrap3 + BPP;
661  lum += -wrap + 1;
662  }
663  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
664  YUVA_IN(y, u, v, a, p, pal);
665  u1 = u;
666  v1 = v;
667  a1 = a;
668  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
669 
670  YUVA_IN(y, u, v, a, p + BPP, pal);
671  u1 += u;
672  v1 += v;
673  a1 += a;
674  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
675  p += wrap3;
676  lum += wrap;
677 
678  YUVA_IN(y, u, v, a, p, pal);
679  u1 += u;
680  v1 += v;
681  a1 += a;
682  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
683 
684  YUVA_IN(y, u, v, a, p + BPP, pal);
685  u1 += u;
686  v1 += v;
687  a1 += a;
688  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
689 
690  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
691  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
692 
693  cb++;
694  cr++;
695  p += -wrap3 + 2 * BPP;
696  lum += -wrap + 2;
697  }
698  if (w) {
699  YUVA_IN(y, u, v, a, p, pal);
700  u1 = u;
701  v1 = v;
702  a1 = a;
703  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
704  p += wrap3;
705  lum += wrap;
706  YUVA_IN(y, u, v, a, p, pal);
707  u1 += u;
708  v1 += v;
709  a1 += a;
710  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
711  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
712  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
713  cb++;
714  cr++;
715  p += -wrap3 + BPP;
716  lum += -wrap + 1;
717  }
718  p += wrap3 + (wrap3 - dstw * BPP);
719  lum += wrap + (wrap - dstw - dstx);
720  cb += dst->linesize[1] - width2 - skip2;
721  cr += dst->linesize[2] - width2 - skip2;
722  }
723  /* handle odd height */
724  if (h) {
725  lum += dstx;
726  cb += skip2;
727  cr += skip2;
728 
729  if (dstx & 1) {
730  YUVA_IN(y, u, v, a, p, pal);
731  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
732  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
733  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
734  cb++;
735  cr++;
736  lum++;
737  p += BPP;
738  }
739  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
740  YUVA_IN(y, u, v, a, p, pal);
741  u1 = u;
742  v1 = v;
743  a1 = a;
744  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
745 
746  YUVA_IN(y, u, v, a, p + BPP, pal);
747  u1 += u;
748  v1 += v;
749  a1 += a;
750  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
751  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
752  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
753  cb++;
754  cr++;
755  p += 2 * BPP;
756  lum += 2;
757  }
758  if (w) {
759  YUVA_IN(y, u, v, a, p, pal);
760  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
761  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
762  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
763  }
764  }
765 }
766 
768 {
769  avsubtitle_free(&sp->sub);
770 }
771 
772 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
773 {
774  float aspect_ratio;
775  int width, height, x, y;
776 
777  if (vp->sar.num == 0)
778  aspect_ratio = 0;
779  else
780  aspect_ratio = av_q2d(vp->sar);
781 
782  if (aspect_ratio <= 0.0)
783  aspect_ratio = 1.0;
784  aspect_ratio *= (float)vp->width / (float)vp->height;
785 
786  /* XXX: we suppose the screen has a 1.0 pixel ratio */
787  height = scr_height;
788  width = ((int)rint(height * aspect_ratio)) & ~1;
789  if (width > scr_width) {
790  width = scr_width;
791  height = ((int)rint(width / aspect_ratio)) & ~1;
792  }
793  x = (scr_width - width) / 2;
794  y = (scr_height - height) / 2;
795  rect->x = scr_xleft + x;
796  rect->y = scr_ytop + y;
797  rect->w = FFMAX(width, 1);
798  rect->h = FFMAX(height, 1);
799 }
800 
802 {
803  VideoPicture *vp;
804  SubPicture *sp;
805  AVPicture pict;
806  SDL_Rect rect;
807  int i;
808 
809  vp = &is->pictq[is->pictq_rindex];
810  if (vp->bmp) {
811  if (is->subtitle_st) {
812  if (is->subpq_size > 0) {
813  sp = &is->subpq[is->subpq_rindex];
814 
815  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
816  SDL_LockYUVOverlay (vp->bmp);
817 
818  pict.data[0] = vp->bmp->pixels[0];
819  pict.data[1] = vp->bmp->pixels[2];
820  pict.data[2] = vp->bmp->pixels[1];
821 
822  pict.linesize[0] = vp->bmp->pitches[0];
823  pict.linesize[1] = vp->bmp->pitches[2];
824  pict.linesize[2] = vp->bmp->pitches[1];
825 
826  for (i = 0; i < sp->sub.num_rects; i++)
827  blend_subrect(&pict, sp->sub.rects[i],
828  vp->bmp->w, vp->bmp->h);
829 
830  SDL_UnlockYUVOverlay (vp->bmp);
831  }
832  }
833  }
834 
835  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
836 
837  SDL_DisplayYUVOverlay(vp->bmp, &rect);
838 
839  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
840  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
841  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
842  is->last_display_rect = rect;
843  }
844  }
845 }
846 
847 static inline int compute_mod(int a, int b)
848 {
849  return a < 0 ? a%b + b : a%b;
850 }
851 
853 {
854  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
855  int ch, channels, h, h2, bgcolor, fgcolor;
856  int64_t time_diff;
857  int rdft_bits, nb_freq;
858 
859  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
860  ;
861  nb_freq = 1 << (rdft_bits - 1);
862 
863  /* compute display index : center on currently output samples */
864  channels = s->audio_tgt.channels;
865  nb_display_channels = channels;
866  if (!s->paused) {
867  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
868  n = 2 * channels;
869  delay = s->audio_write_buf_size;
870  delay /= n;
871 
872  /* to be more precise, we take into account the time spent since
873  the last buffer computation */
874  if (audio_callback_time) {
875  time_diff = av_gettime() - audio_callback_time;
876  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
877  }
878 
879  delay += 2 * data_used;
880  if (delay < data_used)
881  delay = data_used;
882 
883  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
884  if (s->show_mode == SHOW_MODE_WAVES) {
885  h = INT_MIN;
886  for (i = 0; i < 1000; i += channels) {
887  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
888  int a = s->sample_array[idx];
889  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
890  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
891  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
892  int score = a - d;
893  if (h < score && (b ^ c) < 0) {
894  h = score;
895  i_start = idx;
896  }
897  }
898  }
899 
900  s->last_i_start = i_start;
901  } else {
902  i_start = s->last_i_start;
903  }
904 
905  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
906  if (s->show_mode == SHOW_MODE_WAVES) {
908  s->xleft, s->ytop, s->width, s->height,
909  bgcolor, 0);
910 
911  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
912 
913  /* total height for one channel */
914  h = s->height / nb_display_channels;
915  /* graph height / 2 */
916  h2 = (h * 9) / 20;
917  for (ch = 0; ch < nb_display_channels; ch++) {
918  i = i_start + ch;
919  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
920  for (x = 0; x < s->width; x++) {
921  y = (s->sample_array[i] * h2) >> 15;
922  if (y < 0) {
923  y = -y;
924  ys = y1 - y;
925  } else {
926  ys = y1;
927  }
929  s->xleft + x, ys, 1, y,
930  fgcolor, 0);
931  i += channels;
932  if (i >= SAMPLE_ARRAY_SIZE)
933  i -= SAMPLE_ARRAY_SIZE;
934  }
935  }
936 
937  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
938 
939  for (ch = 1; ch < nb_display_channels; ch++) {
940  y = s->ytop + ch * h;
942  s->xleft, y, s->width, 1,
943  fgcolor, 0);
944  }
945  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
946  } else {
947  nb_display_channels= FFMIN(nb_display_channels, 2);
948  if (rdft_bits != s->rdft_bits) {
949  av_rdft_end(s->rdft);
950  av_free(s->rdft_data);
951  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
952  s->rdft_bits = rdft_bits;
953  s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
954  }
955  {
956  FFTSample *data[2];
957  for (ch = 0; ch < nb_display_channels; ch++) {
958  data[ch] = s->rdft_data + 2 * nb_freq * ch;
959  i = i_start + ch;
960  for (x = 0; x < 2 * nb_freq; x++) {
961  double w = (x-nb_freq) * (1.0 / nb_freq);
962  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
963  i += channels;
964  if (i >= SAMPLE_ARRAY_SIZE)
965  i -= SAMPLE_ARRAY_SIZE;
966  }
967  av_rdft_calc(s->rdft, data[ch]);
968  }
969  // least efficient way to do this, we should of course directly access it but its more than fast enough
970  for (y = 0; y < s->height; y++) {
971  double w = 1 / sqrt(nb_freq);
972  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
973  int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
974  + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
975  a = FFMIN(a, 255);
976  b = FFMIN(b, 255);
977  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
978 
980  s->xpos, s->height-y, 1, 1,
981  fgcolor, 0);
982  }
983  }
984  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
985  if (!s->paused)
986  s->xpos++;
987  if (s->xpos >= s->width)
988  s->xpos= s->xleft;
989  }
990 }
991 
993 {
994  VideoPicture *vp;
995  int i;
996  /* XXX: use a special url_shutdown call to abort parse cleanly */
997  is->abort_request = 1;
998  SDL_WaitThread(is->read_tid, NULL);
1002 
1003  /* free all pictures */
1004  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1005  vp = &is->pictq[i];
1006  if (vp->bmp) {
1007  SDL_FreeYUVOverlay(vp->bmp);
1008  vp->bmp = NULL;
1009  }
1010  }
1011  SDL_DestroyMutex(is->pictq_mutex);
1012  SDL_DestroyCond(is->pictq_cond);
1013  SDL_DestroyMutex(is->subpq_mutex);
1014  SDL_DestroyCond(is->subpq_cond);
1015  SDL_DestroyCond(is->continue_read_thread);
1016 #if !CONFIG_AVFILTER
1017  sws_freeContext(is->img_convert_ctx);
1018 #endif
1019  av_free(is);
1020 }
1021 
1022 static void do_exit(VideoState *is)
1023 {
1024  if (is) {
1025  stream_close(is);
1026  }
1028  uninit_opts();
1029 #if CONFIG_AVFILTER
1030  av_freep(&vfilters);
1031 #endif
1033  if (show_status)
1034  printf("\n");
1035  SDL_Quit();
1036  av_log(NULL, AV_LOG_QUIET, "%s", "");
1037  exit(0);
1038 }
1039 
1040 static void sigterm_handler(int sig)
1041 {
1042  exit(123);
1043 }
1044 
1045 static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
1046 {
1047  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1048  int w,h;
1049  SDL_Rect rect;
1050 
1051  if (is_full_screen) flags |= SDL_FULLSCREEN;
1052  else flags |= SDL_RESIZABLE;
1053 
1054  if (vp && vp->width) {
1055  calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1056  default_width = rect.w;
1057  default_height = rect.h;
1058  }
1059 
1061  w = fs_screen_width;
1062  h = fs_screen_height;
1063  } else if (!is_full_screen && screen_width) {
1064  w = screen_width;
1065  h = screen_height;
1066  } else {
1067  w = default_width;
1068  h = default_height;
1069  }
1070  if (screen && is->width == screen->w && screen->w == w
1071  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1072  return 0;
1073  screen = SDL_SetVideoMode(w, h, 0, flags);
1074  if (!screen) {
1075  fprintf(stderr, "SDL: could not set video mode - exiting\n");
1076  do_exit(is);
1077  }
1078  if (!window_title)
1080  SDL_WM_SetCaption(window_title, window_title);
1081 
1082  is->width = screen->w;
1083  is->height = screen->h;
1084 
1085  return 0;
1086 }
1087 
1088 /* display the current picture, if any */
1090 {
1091  if (!screen)
1092  video_open(is, 0, NULL);
1093  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1094  video_audio_display(is);
1095  else if (is->video_st)
1096  video_image_display(is);
1097 }
1098 
1099 /* get the current audio clock value */
1101 {
1102  if (is->audio_clock_serial != is->audioq.serial)
1103  return NAN;
1104  if (is->paused) {
1105  return is->audio_current_pts;
1106  } else {
1107  return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1108  }
1109 }
1110 
1111 /* get the current video clock value */
1113 {
1114  if (is->video_clock_serial != is->videoq.serial)
1115  return NAN;
1116  if (is->paused) {
1117  return is->video_current_pts;
1118  } else {
1119  return is->video_current_pts_drift + av_gettime() / 1000000.0;
1120  }
1121 }
1122 
1123 /* get the current external clock value */
1125 {
1126  if (is->paused) {
1127  return is->external_clock;
1128  } else {
1129  double time = av_gettime() / 1000000.0;
1130  return is->external_clock_drift + time - (time - is->external_clock_time / 1000000.0) * (1.0 - is->external_clock_speed);
1131  }
1132 }
1133 
1135  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1136  if (is->video_st)
1137  return AV_SYNC_VIDEO_MASTER;
1138  else
1139  return AV_SYNC_AUDIO_MASTER;
1140  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1141  if (is->audio_st)
1142  return AV_SYNC_AUDIO_MASTER;
1143  else
1144  return AV_SYNC_EXTERNAL_CLOCK;
1145  } else {
1146  return AV_SYNC_EXTERNAL_CLOCK;
1147  }
1148 }
1149 
1150 /* get the current master clock value */
1152 {
1153  double val;
1154 
1155  switch (get_master_sync_type(is)) {
1156  case AV_SYNC_VIDEO_MASTER:
1157  val = get_video_clock(is);
1158  break;
1159  case AV_SYNC_AUDIO_MASTER:
1160  val = get_audio_clock(is);
1161  break;
1162  default:
1163  val = get_external_clock(is);
1164  break;
1165  }
1166  return val;
1167 }
1168 
1169 static void update_external_clock_pts(VideoState *is, double pts)
1170 {
1172  is->external_clock = pts;
1173  is->external_clock_drift = pts - is->external_clock_time / 1000000.0;
1174 }
1175 
1176 static void check_external_clock_sync(VideoState *is, double pts) {
1177  double ext_clock = get_external_clock(is);
1178  if (isnan(ext_clock) || fabs(ext_clock - pts) > AV_NOSYNC_THRESHOLD) {
1179  update_external_clock_pts(is, pts);
1180  }
1181 }
1182 
1183 static void update_external_clock_speed(VideoState *is, double speed) {
1185  is->external_clock_speed = speed;
1186 }
1187 
1189  if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1190  is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1192  } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1193  (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1195  } else {
1196  double speed = is->external_clock_speed;
1197  if (speed != 1.0)
1198  update_external_clock_speed(is, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1199  }
1200 }
1201 
1202 /* seek in the stream */
1203 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1204 {
1205  if (!is->seek_req) {
1206  is->seek_pos = pos;
1207  is->seek_rel = rel;
1208  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1209  if (seek_by_bytes)
1211  is->seek_req = 1;
1212  SDL_CondSignal(is->continue_read_thread);
1213  }
1214 }
1215 
1216 /* pause or resume the video */
1218 {
1219  if (is->paused) {
1220  is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1221  if (is->read_pause_return != AVERROR(ENOSYS)) {
1222  is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1223  }
1224  is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1225  }
1227  is->paused = !is->paused;
1228 }
1229 
1231 {
1232  stream_toggle_pause(is);
1233  is->step = 0;
1234 }
1235 
1237 {
1238  /* if the stream is paused unpause it, then step */
1239  if (is->paused)
1240  stream_toggle_pause(is);
1241  is->step = 1;
1242 }
1243 
1244 static double compute_target_delay(double delay, VideoState *is)
1245 {
1246  double sync_threshold, diff;
1247 
1248  /* update delay to follow master synchronisation source */
1250  /* if video is slave, we try to correct big delays by
1251  duplicating or deleting a frame */
1252  diff = get_video_clock(is) - get_master_clock(is);
1253 
1254  /* skip or repeat frame. We take into account the
1255  delay to compute the threshold. I still don't know
1256  if it is the best guess */
1257  sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1258  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
1259  if (diff <= -sync_threshold)
1260  delay = 0;
1261  else if (diff >= sync_threshold)
1262  delay = 2 * delay;
1263  }
1264  }
1265 
1266  av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1267  delay, -diff);
1268 
1269  return delay;
1270 }
1271 
1273  /* update queue size and signal for next picture */
1275  is->pictq_rindex = 0;
1276 
1277  SDL_LockMutex(is->pictq_mutex);
1278  is->pictq_size--;
1279  SDL_CondSignal(is->pictq_cond);
1280  SDL_UnlockMutex(is->pictq_mutex);
1281 }
1282 
1284  VideoPicture *prevvp;
1285  int ret = 0;
1286  /* update queue size and signal for the previous picture */
1288  if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
1289  SDL_LockMutex(is->pictq_mutex);
1290  if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE - 1) {
1291  if (--is->pictq_rindex == -1)
1293  is->pictq_size++;
1294  ret = 1;
1295  }
1296  SDL_CondSignal(is->pictq_cond);
1297  SDL_UnlockMutex(is->pictq_mutex);
1298  }
1299  return ret;
1300 }
1301 
1302 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1303  double time = av_gettime() / 1000000.0;
1304  /* update current video pts */
1305  is->video_current_pts = pts;
1306  is->video_current_pts_drift = is->video_current_pts - time;
1307  is->video_current_pos = pos;
1308  is->frame_last_pts = pts;
1309  is->video_clock_serial = serial;
1310  if (is->videoq.serial == serial)
1312 }
1313 
1314 /* called to display each frame */
1315 static void video_refresh(void *opaque, double *remaining_time)
1316 {
1317  VideoState *is = opaque;
1318  VideoPicture *vp;
1319  double time;
1320 
1321  SubPicture *sp, *sp2;
1322 
1323  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1325 
1326  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1327  time = av_gettime() / 1000000.0;
1328  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1329  video_display(is);
1330  is->last_vis_time = time;
1331  }
1332  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1333  }
1334 
1335  if (is->video_st) {
1336  int redisplay = 0;
1337  if (is->force_refresh)
1338  redisplay = pictq_prev_picture(is);
1339 retry:
1340  if (is->pictq_size == 0) {
1341  SDL_LockMutex(is->pictq_mutex);
1345  }
1346  SDL_UnlockMutex(is->pictq_mutex);
1347  // nothing to do, no picture to display in the queue
1348  } else {
1349  double last_duration, duration, delay;
1350  /* dequeue the picture */
1351  vp = &is->pictq[is->pictq_rindex];
1352 
1353  if (vp->serial != is->videoq.serial) {
1354  pictq_next_picture(is);
1355  redisplay = 0;
1356  goto retry;
1357  }
1358 
1359  if (is->paused)
1360  goto display;
1361 
1362  /* compute nominal last_duration */
1363  last_duration = vp->pts - is->frame_last_pts;
1364  if (!isnan(last_duration) && last_duration > 0 && last_duration < is->max_frame_duration) {
1365  /* if duration of the last frame was sane, update last_duration in video state */
1366  is->frame_last_duration = last_duration;
1367  }
1368  delay = compute_target_delay(is->frame_last_duration, is);
1369 
1370  time= av_gettime()/1000000.0;
1371  if (time < is->frame_timer + delay) {
1372  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1373  return;
1374  }
1375 
1376  if (delay > 0)
1377  is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1378 
1379  SDL_LockMutex(is->pictq_mutex);
1380  if (!isnan(vp->pts))
1381  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1382  SDL_UnlockMutex(is->pictq_mutex);
1383 
1384  if (is->pictq_size > 1) {
1385  VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1386  duration = nextvp->pts - vp->pts;
1387  if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1388  if (!redisplay)
1389  is->frame_drops_late++;
1390  pictq_next_picture(is);
1391  redisplay = 0;
1392  goto retry;
1393  }
1394  }
1395 
1396  if (is->subtitle_st) {
1397  if (is->subtitle_stream_changed) {
1398  SDL_LockMutex(is->subpq_mutex);
1399 
1400  while (is->subpq_size) {
1401  free_subpicture(&is->subpq[is->subpq_rindex]);
1402 
1403  /* update queue size and signal for next picture */
1404  if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1405  is->subpq_rindex = 0;
1406 
1407  is->subpq_size--;
1408  }
1409  is->subtitle_stream_changed = 0;
1410 
1411  SDL_CondSignal(is->subpq_cond);
1412  SDL_UnlockMutex(is->subpq_mutex);
1413  } else {
1414  if (is->subpq_size > 0) {
1415  sp = &is->subpq[is->subpq_rindex];
1416 
1417  if (is->subpq_size > 1)
1418  sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1419  else
1420  sp2 = NULL;
1421 
1422  if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1423  || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1424  {
1425  free_subpicture(sp);
1426 
1427  /* update queue size and signal for next picture */
1428  if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1429  is->subpq_rindex = 0;
1430 
1431  SDL_LockMutex(is->subpq_mutex);
1432  is->subpq_size--;
1433  SDL_CondSignal(is->subpq_cond);
1434  SDL_UnlockMutex(is->subpq_mutex);
1435  }
1436  }
1437  }
1438  }
1439 
1440 display:
1441  /* display picture */
1442  if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1443  video_display(is);
1444 
1445  pictq_next_picture(is);
1446 
1447  if (is->step && !is->paused)
1448  stream_toggle_pause(is);
1449  }
1450  }
1451  is->force_refresh = 0;
1452  if (show_status) {
1453  static int64_t last_time;
1454  int64_t cur_time;
1455  int aqsize, vqsize, sqsize;
1456  double av_diff;
1457 
1458  cur_time = av_gettime();
1459  if (!last_time || (cur_time - last_time) >= 30000) {
1460  aqsize = 0;
1461  vqsize = 0;
1462  sqsize = 0;
1463  if (is->audio_st)
1464  aqsize = is->audioq.size;
1465  if (is->video_st)
1466  vqsize = is->videoq.size;
1467  if (is->subtitle_st)
1468  sqsize = is->subtitleq.size;
1469  av_diff = 0;
1470  if (is->audio_st && is->video_st)
1471  av_diff = get_audio_clock(is) - get_video_clock(is);
1472  printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1473  get_master_clock(is),
1474  av_diff,
1476  aqsize / 1024,
1477  vqsize / 1024,
1478  sqsize,
1481  fflush(stdout);
1482  last_time = cur_time;
1483  }
1484  }
1485 }
1486 
1487 /* allocate a picture (needs to do that in main thread to avoid
1488  potential locking problems */
1490 {
1491  VideoPicture *vp;
1492 
1493  vp = &is->pictq[is->pictq_windex];
1494 
1495  if (vp->bmp)
1496  SDL_FreeYUVOverlay(vp->bmp);
1497 
1498  video_open(is, 0, vp);
1499 
1500  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1501  SDL_YV12_OVERLAY,
1502  screen);
1503  if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1504  /* SDL allocates a buffer smaller than requested if the video
1505  * overlay hardware is unable to support the requested size. */
1506  fprintf(stderr, "Error: the video system does not support an image\n"
1507  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1508  "to reduce the image size.\n", vp->width, vp->height );
1509  do_exit(is);
1510  }
1511 
1512  SDL_LockMutex(is->pictq_mutex);
1513  vp->allocated = 1;
1514  SDL_CondSignal(is->pictq_cond);
1515  SDL_UnlockMutex(is->pictq_mutex);
1516 }
1517 
1518 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1519  int i, width, height;
1520  Uint8 *p, *maxp;
1521  for (i = 0; i < 3; i++) {
1522  width = bmp->w;
1523  height = bmp->h;
1524  if (i > 0) {
1525  width >>= 1;
1526  height >>= 1;
1527  }
1528  if (bmp->pitches[i] > width) {
1529  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1530  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1531  *(p+1) = *p;
1532  }
1533  }
1534 }
1535 
1536 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos, int serial)
1537 {
1538  VideoPicture *vp;
1539 
1540 #if defined(DEBUG_SYNC) && 0
1541  printf("frame_type=%c pts=%0.3f\n",
1542  av_get_picture_type_char(src_frame->pict_type), pts);
1543 #endif
1544 
1545  /* wait until we have space to put a new picture */
1546  SDL_LockMutex(is->pictq_mutex);
1547 
1548  /* keep the last already displayed picture in the queue */
1549  while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 2 &&
1550  !is->videoq.abort_request) {
1551  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1552  }
1553  SDL_UnlockMutex(is->pictq_mutex);
1554 
1555  if (is->videoq.abort_request)
1556  return -1;
1557 
1558  vp = &is->pictq[is->pictq_windex];
1559 
1560  vp->sar = src_frame->sample_aspect_ratio;
1561 
1562  /* alloc or resize hardware picture buffer */
1563  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1564  vp->width != src_frame->width ||
1565  vp->height != src_frame->height) {
1566  SDL_Event event;
1567 
1568  vp->allocated = 0;
1569  vp->reallocate = 0;
1570  vp->width = src_frame->width;
1571  vp->height = src_frame->height;
1572 
1573  /* the allocation must be done in the main thread to avoid
1574  locking problems. */
1575  event.type = FF_ALLOC_EVENT;
1576  event.user.data1 = is;
1577  SDL_PushEvent(&event);
1578 
1579  /* wait until the picture is allocated */
1580  SDL_LockMutex(is->pictq_mutex);
1581  while (!vp->allocated && !is->videoq.abort_request) {
1582  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1583  }
1584  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1585  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1586  while (!vp->allocated) {
1587  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1588  }
1589  }
1590  SDL_UnlockMutex(is->pictq_mutex);
1591 
1592  if (is->videoq.abort_request)
1593  return -1;
1594  }
1595 
1596  /* if the frame is not skipped, then display it */
1597  if (vp->bmp) {
1598  AVPicture pict = { { 0 } };
1599 
1600  /* get a pointer on the bitmap */
1601  SDL_LockYUVOverlay (vp->bmp);
1602 
1603  pict.data[0] = vp->bmp->pixels[0];
1604  pict.data[1] = vp->bmp->pixels[2];
1605  pict.data[2] = vp->bmp->pixels[1];
1606 
1607  pict.linesize[0] = vp->bmp->pitches[0];
1608  pict.linesize[1] = vp->bmp->pitches[2];
1609  pict.linesize[2] = vp->bmp->pitches[1];
1610 
1611 #if CONFIG_AVFILTER
1612  // FIXME use direct rendering
1613  av_picture_copy(&pict, (AVPicture *)src_frame,
1614  src_frame->format, vp->width, vp->height);
1615 #else
1616  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1617  is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1618  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1620  if (is->img_convert_ctx == NULL) {
1621  fprintf(stderr, "Cannot initialize the conversion context\n");
1622  exit(1);
1623  }
1624  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1625  0, vp->height, pict.data, pict.linesize);
1626 #endif
1627  /* workaround SDL PITCH_WORKAROUND */
1629  /* update the bitmap content */
1630  SDL_UnlockYUVOverlay(vp->bmp);
1631 
1632  vp->pts = pts;
1633  vp->pos = pos;
1634  vp->serial = serial;
1635 
1636  /* now we can update the picture count */
1638  is->pictq_windex = 0;
1639  SDL_LockMutex(is->pictq_mutex);
1640  is->pictq_size++;
1641  SDL_UnlockMutex(is->pictq_mutex);
1642  }
1643  return 0;
1644 }
1645 
1647 {
1648  int got_picture;
1649 
1650  if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1651  return -1;
1652 
1653  if (pkt->data == flush_pkt.data) {
1655 
1656  SDL_LockMutex(is->pictq_mutex);
1657  // Make sure there are no long delay timers (ideally we should just flush the queue but that's harder)
1658  while (is->pictq_size && !is->videoq.abort_request) {
1659  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1660  }
1661  is->video_current_pos = -1;
1663  is->frame_last_duration = 0;
1664  is->frame_timer = (double)av_gettime() / 1000000.0;
1666  SDL_UnlockMutex(is->pictq_mutex);
1667  return 0;
1668  }
1669 
1670  if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1671  return 0;
1672 
1673  if (got_picture) {
1674  int ret = 1;
1675  double dpts = NAN;
1676 
1677  if (decoder_reorder_pts == -1) {
1678  frame->pts = av_frame_get_best_effort_timestamp(frame);
1679  } else if (decoder_reorder_pts) {
1680  frame->pts = frame->pkt_pts;
1681  } else {
1682  frame->pts = frame->pkt_dts;
1683  }
1684 
1685  if (frame->pts != AV_NOPTS_VALUE)
1686  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1687 
1688  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1689 
1691  SDL_LockMutex(is->pictq_mutex);
1692  if (is->frame_last_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE) {
1693  double clockdiff = get_video_clock(is) - get_master_clock(is);
1694  double ptsdiff = dpts - is->frame_last_pts;
1695  if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1696  !isnan(ptsdiff) && ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1697  clockdiff + ptsdiff - is->frame_last_filter_delay < 0 &&
1698  is->videoq.nb_packets) {
1699  is->frame_last_dropped_pos = pkt->pos;
1700  is->frame_last_dropped_pts = dpts;
1702  is->frame_drops_early++;
1703  av_frame_unref(frame);
1704  ret = 0;
1705  }
1706  }
1707  SDL_UnlockMutex(is->pictq_mutex);
1708  }
1709 
1710  return ret;
1711  }
1712  return 0;
1713 }
1714 
1715 #if CONFIG_AVFILTER
1716 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1717  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1718 {
1719  int ret;
1721 
1722  if (filtergraph) {
1723  outputs = avfilter_inout_alloc();
1724  inputs = avfilter_inout_alloc();
1725  if (!outputs || !inputs) {
1726  ret = AVERROR(ENOMEM);
1727  goto fail;
1728  }
1729 
1730  outputs->name = av_strdup("in");
1731  outputs->filter_ctx = source_ctx;
1732  outputs->pad_idx = 0;
1733  outputs->next = NULL;
1734 
1735  inputs->name = av_strdup("out");
1736  inputs->filter_ctx = sink_ctx;
1737  inputs->pad_idx = 0;
1738  inputs->next = NULL;
1739 
1740  if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1741  goto fail;
1742  } else {
1743  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1744  goto fail;
1745  }
1746 
1747  ret = avfilter_graph_config(graph, NULL);
1748 fail:
1749  avfilter_inout_free(&outputs);
1750  avfilter_inout_free(&inputs);
1751  return ret;
1752 }
1753 
1755 {
1756  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1757  char sws_flags_str[128];
1758  char buffersrc_args[256];
1759  int ret;
1760  AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
1761  AVCodecContext *codec = is->video_st->codec;
1762  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1763 
1764  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1765  snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1766  graph->scale_sws_opts = av_strdup(sws_flags_str);
1767 
1768  snprintf(buffersrc_args, sizeof(buffersrc_args),
1769  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1770  frame->width, frame->height, frame->format,
1772  codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1773  if (fr.num && fr.den)
1774  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1775 
1776  if ((ret = avfilter_graph_create_filter(&filt_src,
1777  avfilter_get_by_name("buffer"),
1778  "ffplay_buffer", buffersrc_args, NULL,
1779  graph)) < 0)
1780  goto fail;
1781 
1782  ret = avfilter_graph_create_filter(&filt_out,
1783  avfilter_get_by_name("buffersink"),
1784  "ffplay_buffersink", NULL, NULL, graph);
1785  if (ret < 0)
1786  goto fail;
1787 
1788  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1789  goto fail;
1790 
1791  /* SDL YUV code is not handling odd width/height for some driver
1792  * combinations, therefore we crop the picture to an even width/height. */
1793  if ((ret = avfilter_graph_create_filter(&filt_crop,
1794  avfilter_get_by_name("crop"),
1795  "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1796  goto fail;
1797  if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
1798  goto fail;
1799 
1800  if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1801  goto fail;
1802 
1803  is->in_video_filter = filt_src;
1804  is->out_video_filter = filt_out;
1805 
1806 fail:
1807  return ret;
1808 }
1809 
1810 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1811 {
1813  int sample_rates[2] = { 0, -1 };
1814  int64_t channel_layouts[2] = { 0, -1 };
1815  int channels[2] = { 0, -1 };
1816  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1817  char asrc_args[256];
1818  int ret;
1819 
1821  if (!(is->agraph = avfilter_graph_alloc()))
1822  return AVERROR(ENOMEM);
1823 
1824  ret = snprintf(asrc_args, sizeof(asrc_args),
1825  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1828  1, is->audio_filter_src.freq);
1830  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1831  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1832 
1833  ret = avfilter_graph_create_filter(&filt_asrc,
1834  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1835  asrc_args, NULL, is->agraph);
1836  if (ret < 0)
1837  goto end;
1838 
1839 
1840  ret = avfilter_graph_create_filter(&filt_asink,
1841  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1842  NULL, NULL, is->agraph);
1843  if (ret < 0)
1844  goto end;
1845 
1846  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1847  goto end;
1848  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1849  goto end;
1850 
1851  if (force_output_format) {
1852  channel_layouts[0] = is->audio_tgt.channel_layout;
1853  channels [0] = is->audio_tgt.channels;
1854  sample_rates [0] = is->audio_tgt.freq;
1855  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1856  goto end;
1857  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1858  goto end;
1859  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1860  goto end;
1861  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1862  goto end;
1863  }
1864 
1865 
1866  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1867  goto end;
1868 
1869  is->in_audio_filter = filt_asrc;
1870  is->out_audio_filter = filt_asink;
1871 
1872 end:
1873  if (ret < 0)
1875  return ret;
1876 }
1877 #endif /* CONFIG_AVFILTER */
1878 
1879 static int video_thread(void *arg)
1880 {
1881  AVPacket pkt = { 0 };
1882  VideoState *is = arg;
1884  double pts;
1885  int ret;
1886  int serial = 0;
1887 
1888 #if CONFIG_AVFILTER
1890  AVFilterContext *filt_out = NULL, *filt_in = NULL;
1891  int last_w = 0;
1892  int last_h = 0;
1893  enum AVPixelFormat last_format = -2;
1894  int last_serial = -1;
1895 #endif
1896 
1897  for (;;) {
1898  while (is->paused && !is->videoq.abort_request)
1899  SDL_Delay(10);
1900 
1902  av_free_packet(&pkt);
1903 
1904  ret = get_video_frame(is, frame, &pkt, &serial);
1905  if (ret < 0)
1906  goto the_end;
1907  if (!ret)
1908  continue;
1909 
1910 #if CONFIG_AVFILTER
1911  if ( last_w != frame->width
1912  || last_h != frame->height
1913  || last_format != frame->format
1914  || last_serial != serial) {
1916  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
1917  last_w, last_h,
1918  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
1919  frame->width, frame->height,
1920  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
1921  avfilter_graph_free(&graph);
1922  graph = avfilter_graph_alloc();
1923  if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
1924  SDL_Event event;
1925  event.type = FF_QUIT_EVENT;
1926  event.user.data1 = is;
1927  SDL_PushEvent(&event);
1928  av_free_packet(&pkt);
1929  goto the_end;
1930  }
1931  filt_in = is->in_video_filter;
1932  filt_out = is->out_video_filter;
1933  last_w = frame->width;
1934  last_h = frame->height;
1935  last_format = frame->format;
1936  last_serial = serial;
1937  }
1938 
1939  ret = av_buffersrc_add_frame(filt_in, frame);
1940  if (ret < 0)
1941  goto the_end;
1942  av_frame_unref(frame);
1944  av_free_packet(&pkt);
1945 
1946  while (ret >= 0) {
1947  is->frame_last_returned_time = av_gettime() / 1000000.0;
1948 
1949  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
1950  if (ret < 0) {
1951  ret = 0;
1952  break;
1953  }
1954 
1956  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1957  is->frame_last_filter_delay = 0;
1958 
1959  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(filt_out->inputs[0]->time_base);
1960  ret = queue_picture(is, frame, pts, av_frame_get_pkt_pos(frame), serial);
1961  av_frame_unref(frame);
1962  }
1963 #else
1964  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(is->video_st->time_base);
1965  ret = queue_picture(is, frame, pts, pkt.pos, serial);
1966  av_frame_unref(frame);
1967 #endif
1968 
1969  if (ret < 0)
1970  goto the_end;
1971  }
1972  the_end:
1974 #if CONFIG_AVFILTER
1975  avfilter_graph_free(&graph);
1976 #endif
1977  av_free_packet(&pkt);
1978  av_frame_free(&frame);
1979  return 0;
1980 }
1981 
1982 static int subtitle_thread(void *arg)
1983 {
1984  VideoState *is = arg;
1985  SubPicture *sp;
1986  AVPacket pkt1, *pkt = &pkt1;
1987  int got_subtitle;
1988  double pts;
1989  int i, j;
1990  int r, g, b, y, u, v, a;
1991 
1992  for (;;) {
1993  while (is->paused && !is->subtitleq.abort_request) {
1994  SDL_Delay(10);
1995  }
1996  if (packet_queue_get(&is->subtitleq, pkt, 1, NULL) < 0)
1997  break;
1998 
1999  if (pkt->data == flush_pkt.data) {
2001  continue;
2002  }
2003  SDL_LockMutex(is->subpq_mutex);
2004  while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
2005  !is->subtitleq.abort_request) {
2006  SDL_CondWait(is->subpq_cond, is->subpq_mutex);
2007  }
2008  SDL_UnlockMutex(is->subpq_mutex);
2009 
2010  if (is->subtitleq.abort_request)
2011  return 0;
2012 
2013  sp = &is->subpq[is->subpq_windex];
2014 
2015  /* NOTE: ipts is the PTS of the _first_ picture beginning in
2016  this packet, if any */
2017  pts = 0;
2018  if (pkt->pts != AV_NOPTS_VALUE)
2019  pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
2020 
2022  &got_subtitle, pkt);
2023  if (got_subtitle && sp->sub.format == 0) {
2024  if (sp->sub.pts != AV_NOPTS_VALUE)
2025  pts = sp->sub.pts / (double)AV_TIME_BASE;
2026  sp->pts = pts;
2027 
2028  for (i = 0; i < sp->sub.num_rects; i++)
2029  {
2030  for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2031  {
2032  RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2033  y = RGB_TO_Y_CCIR(r, g, b);
2034  u = RGB_TO_U_CCIR(r, g, b, 0);
2035  v = RGB_TO_V_CCIR(r, g, b, 0);
2036  YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2037  }
2038  }
2039 
2040  /* now we can update the picture count */
2041  if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
2042  is->subpq_windex = 0;
2043  SDL_LockMutex(is->subpq_mutex);
2044  is->subpq_size++;
2045  SDL_UnlockMutex(is->subpq_mutex);
2046  }
2047  av_free_packet(pkt);
2048  }
2049  return 0;
2050 }
2051 
2052 /* copy samples for viewing in editor window */
2053 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2054 {
2055  int size, len;
2056 
2057  size = samples_size / sizeof(short);
2058  while (size > 0) {
2060  if (len > size)
2061  len = size;
2062  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2063  samples += len;
2064  is->sample_array_index += len;
2066  is->sample_array_index = 0;
2067  size -= len;
2068  }
2069 }
2070 
2071 /* return the wanted number of samples to get better sync if sync_type is video
2072  * or external master clock */
2073 static int synchronize_audio(VideoState *is, int nb_samples)
2074 {
2075  int wanted_nb_samples = nb_samples;
2076 
2077  /* if not master, then we try to remove or add samples to correct the clock */
2079  double diff, avg_diff;
2080  int min_nb_samples, max_nb_samples;
2081 
2082  diff = get_audio_clock(is) - get_master_clock(is);
2083 
2084  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2085  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2087  /* not enough measures to have a correct estimate */
2088  is->audio_diff_avg_count++;
2089  } else {
2090  /* estimate the A-V difference */
2091  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2092 
2093  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2094  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2095  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2096  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2097  wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2098  }
2099  av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2100  diff, avg_diff, wanted_nb_samples - nb_samples,
2102  }
2103  } else {
2104  /* too big difference : may be initial PTS errors, so
2105  reset A-V filter */
2106  is->audio_diff_avg_count = 0;
2107  is->audio_diff_cum = 0;
2108  }
2109  }
2110 
2111  return wanted_nb_samples;
2112 }
2113 
2114 /**
2115  * Decode one audio frame and return its uncompressed size.
2116  *
2117  * The processed audio frame is decoded, converted if required, and
2118  * stored in is->audio_buf, with size in bytes given by the return
2119  * value.
2120  */
2122 {
2123  AVPacket *pkt_temp = &is->audio_pkt_temp;
2124  AVPacket *pkt = &is->audio_pkt;
2125  AVCodecContext *dec = is->audio_st->codec;
2126  int len1, data_size, resampled_data_size;
2127  int64_t dec_channel_layout;
2128  int got_frame;
2129  av_unused double audio_clock0;
2130  int new_packet = 0;
2131  int flush_complete = 0;
2132  int wanted_nb_samples;
2133  AVRational tb;
2134  int ret;
2135  int reconfigure;
2136 
2137  for (;;) {
2138  /* NOTE: the audio packet can contain several frames */
2139  while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet) || is->audio_buf_frames_pending) {
2140  if (!is->frame) {
2141  if (!(is->frame = avcodec_alloc_frame()))
2142  return AVERROR(ENOMEM);
2143  } else {
2144  av_frame_unref(is->frame);
2146  }
2147 
2148  if (is->audioq.serial != is->audio_pkt_temp_serial)
2149  break;
2150 
2151  if (is->paused)
2152  return -1;
2153 
2154  if (!is->audio_buf_frames_pending) {
2155  if (flush_complete)
2156  break;
2157  new_packet = 0;
2158  len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2159  if (len1 < 0) {
2160  /* if error, we skip the frame */
2161  pkt_temp->size = 0;
2162  break;
2163  }
2164 
2165  pkt_temp->data += len1;
2166  pkt_temp->size -= len1;
2167 
2168  if (!got_frame) {
2169  /* stop sending empty packets if the decoder is finished */
2170  if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2171  flush_complete = 1;
2172  continue;
2173  }
2174 
2175  tb = (AVRational){1, is->frame->sample_rate};
2176  if (is->frame->pts != AV_NOPTS_VALUE)
2177  is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb);
2178  if (is->frame->pts == AV_NOPTS_VALUE && pkt_temp->pts != AV_NOPTS_VALUE)
2179  is->frame->pts = av_rescale_q(pkt_temp->pts, is->audio_st->time_base, tb);
2180  if (pkt_temp->pts != AV_NOPTS_VALUE)
2181  pkt_temp->pts += (double) is->frame->nb_samples / is->frame->sample_rate / av_q2d(is->audio_st->time_base);
2182 
2183 #if CONFIG_AVFILTER
2184  dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
2185 
2186  reconfigure =
2188  is->frame->format, av_frame_get_channels(is->frame)) ||
2189  is->audio_filter_src.channel_layout != dec_channel_layout ||
2190  is->audio_filter_src.freq != is->frame->sample_rate ||
2192 
2193  if (reconfigure) {
2194  char buf1[1024], buf2[1024];
2195  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2196  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2198  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2201 
2202  is->audio_filter_src.fmt = is->frame->format;
2204  is->audio_filter_src.channel_layout = dec_channel_layout;
2207 
2208  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2209  return ret;
2210  }
2211 
2212  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
2213  return ret;
2214  av_frame_unref(is->frame);
2215 #endif
2216  }
2217 #if CONFIG_AVFILTER
2218  if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) {
2219  if (ret == AVERROR(EAGAIN)) {
2220  is->audio_buf_frames_pending = 0;
2221  continue;
2222  }
2223  return ret;
2224  }
2225  is->audio_buf_frames_pending = 1;
2226  tb = is->out_audio_filter->inputs[0]->time_base;
2227 #endif
2228 
2230  is->frame->nb_samples,
2231  is->frame->format, 1);
2232 
2233  dec_channel_layout =
2236  wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2237 
2238  if (is->frame->format != is->audio_src.fmt ||
2239  dec_channel_layout != is->audio_src.channel_layout ||
2240  is->frame->sample_rate != is->audio_src.freq ||
2241  (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2242  swr_free(&is->swr_ctx);
2245  dec_channel_layout, is->frame->format, is->frame->sample_rate,
2246  0, NULL);
2247  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2248  fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2251  break;
2252  }
2253  is->audio_src.channel_layout = dec_channel_layout;
2255  is->audio_src.freq = is->frame->sample_rate;
2256  is->audio_src.fmt = is->frame->format;
2257  }
2258 
2259  if (is->swr_ctx) {
2260  const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2261  uint8_t **out = &is->audio_buf1;
2262  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2263  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2264  int len2;
2265  if (wanted_nb_samples != is->frame->nb_samples) {
2266  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2267  wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2268  fprintf(stderr, "swr_set_compensation() failed\n");
2269  break;
2270  }
2271  }
2272  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2273  if (!is->audio_buf1)
2274  return AVERROR(ENOMEM);
2275  len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2276  if (len2 < 0) {
2277  fprintf(stderr, "swr_convert() failed\n");
2278  break;
2279  }
2280  if (len2 == out_count) {
2281  fprintf(stderr, "warning: audio buffer is probably too small\n");
2282  swr_init(is->swr_ctx);
2283  }
2284  is->audio_buf = is->audio_buf1;
2285  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2286  } else {
2287  is->audio_buf = is->frame->data[0];
2288  resampled_data_size = data_size;
2289  }
2290 
2291  audio_clock0 = is->audio_clock;
2292  /* update the audio clock with the pts */
2293  if (is->frame->pts != AV_NOPTS_VALUE) {
2294  is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate;
2296  }
2297 #ifdef DEBUG
2298  {
2299  static double last_clock;
2300  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2301  is->audio_clock - last_clock,
2302  is->audio_clock, audio_clock0);
2303  last_clock = is->audio_clock;
2304  }
2305 #endif
2306  return resampled_data_size;
2307  }
2308 
2309  /* free the current packet */
2310  if (pkt->data)
2311  av_free_packet(pkt);
2312  memset(pkt_temp, 0, sizeof(*pkt_temp));
2313 
2314  if (is->audioq.abort_request) {
2315  return -1;
2316  }
2317 
2318  if (is->audioq.nb_packets == 0)
2319  SDL_CondSignal(is->continue_read_thread);
2320 
2321  /* read next packet */
2322  if ((new_packet = packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2323  return -1;
2324 
2325  if (pkt->data == flush_pkt.data) {
2326  avcodec_flush_buffers(dec);
2327  flush_complete = 0;
2328  is->audio_buf_frames_pending = 0;
2329  }
2330 
2331  *pkt_temp = *pkt;
2332  }
2333 }
2334 
2335 /* prepare a new audio buffer */
2336 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2337 {
2338  VideoState *is = opaque;
2339  int audio_size, len1;
2340  int bytes_per_sec;
2342 
2344 
2345  while (len > 0) {
2346  if (is->audio_buf_index >= is->audio_buf_size) {
2347  audio_size = audio_decode_frame(is);
2348  if (audio_size < 0) {
2349  /* if error, just output silence */
2350  is->audio_buf = is->silence_buf;
2351  is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2352  } else {
2353  if (is->show_mode != SHOW_MODE_VIDEO)
2354  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2355  is->audio_buf_size = audio_size;
2356  }
2357  is->audio_buf_index = 0;
2358  }
2359  len1 = is->audio_buf_size - is->audio_buf_index;
2360  if (len1 > len)
2361  len1 = len;
2362  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2363  len -= len1;
2364  stream += len1;
2365  is->audio_buf_index += len1;
2366  }
2367  bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2369  /* Let's assume the audio driver that is used by SDL has two periods. */
2370  is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2372  if (is->audioq.serial == is->audio_clock_serial)
2374 }
2375 
2376 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2377 {
2378  SDL_AudioSpec wanted_spec, spec;
2379  const char *env;
2380  const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2381 
2382  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2383  if (env) {
2384  wanted_nb_channels = atoi(env);
2385  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2386  }
2387  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2388  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2389  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2390  }
2391  wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2392  wanted_spec.freq = wanted_sample_rate;
2393  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2394  fprintf(stderr, "Invalid sample rate or channel count!\n");
2395  return -1;
2396  }
2397  wanted_spec.format = AUDIO_S16SYS;
2398  wanted_spec.silence = 0;
2399  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2400  wanted_spec.callback = sdl_audio_callback;
2401  wanted_spec.userdata = opaque;
2402  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2403  fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2404  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2405  if (!wanted_spec.channels) {
2406  fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2407  return -1;
2408  }
2409  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2410  }
2411  if (spec.format != AUDIO_S16SYS) {
2412  fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2413  return -1;
2414  }
2415  if (spec.channels != wanted_spec.channels) {
2416  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2417  if (!wanted_channel_layout) {
2418  fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2419  return -1;
2420  }
2421  }
2422 
2423  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2424  audio_hw_params->freq = spec.freq;
2425  audio_hw_params->channel_layout = wanted_channel_layout;
2426  audio_hw_params->channels = spec.channels;
2427  return spec.size;
2428 }
2429 
2430 /* open a given stream. Return 0 if OK */
2431 static int stream_component_open(VideoState *is, int stream_index)
2432 {
2433  AVFormatContext *ic = is->ic;
2434  AVCodecContext *avctx;
2435  AVCodec *codec;
2436  const char *forced_codec_name = NULL;
2437  AVDictionary *opts;
2439  int sample_rate, nb_channels;
2440  int64_t channel_layout;
2441  int ret;
2442 
2443  if (stream_index < 0 || stream_index >= ic->nb_streams)
2444  return -1;
2445  avctx = ic->streams[stream_index]->codec;
2446 
2447  codec = avcodec_find_decoder(avctx->codec_id);
2448 
2449  switch(avctx->codec_type){
2450  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2451  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2452  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2453  }
2454  if (forced_codec_name)
2455  codec = avcodec_find_decoder_by_name(forced_codec_name);
2456  if (!codec) {
2457  if (forced_codec_name) fprintf(stderr, "No codec could be found with name '%s'\n", forced_codec_name);
2458  else fprintf(stderr, "No codec could be found with id %d\n", avctx->codec_id);
2459  return -1;
2460  }
2461 
2462  avctx->codec_id = codec->id;
2464  avctx->lowres = lowres;
2465  if(avctx->lowres > codec->max_lowres){
2466  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2467  codec->max_lowres);
2468  avctx->lowres= codec->max_lowres;
2469  }
2470  avctx->idct_algo = idct;
2472 
2473  if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2474  if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2475  if(codec->capabilities & CODEC_CAP_DR1)
2476  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2477 
2478  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2479  if (!av_dict_get(opts, "threads", NULL, 0))
2480  av_dict_set(&opts, "threads", "auto", 0);
2481  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2482  av_dict_set(&opts, "refcounted_frames", "1", 0);
2483  if (avcodec_open2(avctx, codec, &opts) < 0)
2484  return -1;
2485  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2486  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2487  return AVERROR_OPTION_NOT_FOUND;
2488  }
2489 
2490  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2491  switch (avctx->codec_type) {
2492  case AVMEDIA_TYPE_AUDIO:
2493 #if CONFIG_AVFILTER
2494  {
2495  AVFilterLink *link;
2496 
2497  is->audio_filter_src.freq = avctx->sample_rate;
2498  is->audio_filter_src.channels = avctx->channels;
2500  is->audio_filter_src.fmt = avctx->sample_fmt;
2501  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2502  return ret;
2503  link = is->out_audio_filter->inputs[0];
2504  sample_rate = link->sample_rate;
2505  nb_channels = link->channels;
2506  channel_layout = link->channel_layout;
2507  }
2508 #else
2509  sample_rate = avctx->sample_rate;
2510  nb_channels = avctx->channels;
2511  channel_layout = avctx->channel_layout;
2512 #endif
2513 
2514  /* prepare audio output */
2515  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2516  return ret;
2517  is->audio_hw_buf_size = ret;
2518  is->audio_src = is->audio_tgt;
2519  is->audio_buf_size = 0;
2520  is->audio_buf_index = 0;
2521 
2522  /* init averaging filter */
2523  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2524  is->audio_diff_avg_count = 0;
2525  /* since we do not have a precise anough audio fifo fullness,
2526  we correct audio sync only if larger than this threshold */
2528 
2529  memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2530  memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2531 
2532  is->audio_stream = stream_index;
2533  is->audio_st = ic->streams[stream_index];
2534 
2535  packet_queue_start(&is->audioq);
2536  SDL_PauseAudio(0);
2537  break;
2538  case AVMEDIA_TYPE_VIDEO:
2539  is->video_stream = stream_index;
2540  is->video_st = ic->streams[stream_index];
2541 
2542  packet_queue_start(&is->videoq);
2543  is->video_tid = SDL_CreateThread(video_thread, is);
2544  is->queue_attachments_req = 1;
2545  break;
2546  case AVMEDIA_TYPE_SUBTITLE:
2547  is->subtitle_stream = stream_index;
2548  is->subtitle_st = ic->streams[stream_index];
2550 
2551  is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2552  break;
2553  default:
2554  break;
2555  }
2556  return 0;
2557 }
2558 
2559 static void stream_component_close(VideoState *is, int stream_index)
2560 {
2561  AVFormatContext *ic = is->ic;
2562  AVCodecContext *avctx;
2563 
2564  if (stream_index < 0 || stream_index >= ic->nb_streams)
2565  return;
2566  avctx = ic->streams[stream_index]->codec;
2567 
2568  switch (avctx->codec_type) {
2569  case AVMEDIA_TYPE_AUDIO:
2570  packet_queue_abort(&is->audioq);
2571 
2572  SDL_CloseAudio();
2573 
2574  packet_queue_flush(&is->audioq);
2575  av_free_packet(&is->audio_pkt);
2576  swr_free(&is->swr_ctx);
2577  av_freep(&is->audio_buf1);
2578  is->audio_buf1_size = 0;
2579  is->audio_buf = NULL;
2580  av_frame_free(&is->frame);
2581 
2582  if (is->rdft) {
2583  av_rdft_end(is->rdft);
2584  av_freep(&is->rdft_data);
2585  is->rdft = NULL;
2586  is->rdft_bits = 0;
2587  }
2588 #if CONFIG_AVFILTER
2590 #endif
2591  break;
2592  case AVMEDIA_TYPE_VIDEO:
2593  packet_queue_abort(&is->videoq);
2594 
2595  /* note: we also signal this mutex to make sure we deblock the
2596  video thread in all cases */
2597  SDL_LockMutex(is->pictq_mutex);
2598  SDL_CondSignal(is->pictq_cond);
2599  SDL_UnlockMutex(is->pictq_mutex);
2600 
2601  SDL_WaitThread(is->video_tid, NULL);
2602 
2603  packet_queue_flush(&is->videoq);
2604  break;
2605  case AVMEDIA_TYPE_SUBTITLE:
2607 
2608  /* note: we also signal this mutex to make sure we deblock the
2609  video thread in all cases */
2610  SDL_LockMutex(is->subpq_mutex);
2611  is->subtitle_stream_changed = 1;
2612 
2613  SDL_CondSignal(is->subpq_cond);
2614  SDL_UnlockMutex(is->subpq_mutex);
2615 
2616  SDL_WaitThread(is->subtitle_tid, NULL);
2617 
2619  break;
2620  default:
2621  break;
2622  }
2623 
2624  ic->streams[stream_index]->discard = AVDISCARD_ALL;
2625  avcodec_close(avctx);
2626  switch (avctx->codec_type) {
2627  case AVMEDIA_TYPE_AUDIO:
2628  is->audio_st = NULL;
2629  is->audio_stream = -1;
2630  break;
2631  case AVMEDIA_TYPE_VIDEO:
2632  is->video_st = NULL;
2633  is->video_stream = -1;
2634  break;
2635  case AVMEDIA_TYPE_SUBTITLE:
2636  is->subtitle_st = NULL;
2637  is->subtitle_stream = -1;
2638  break;
2639  default:
2640  break;
2641  }
2642 }
2643 
2644 static int decode_interrupt_cb(void *ctx)
2645 {
2646  VideoState *is = ctx;
2647  return is->abort_request;
2648 }
2649 
2651 {
2652  if( !strcmp(s->iformat->name, "rtp")
2653  || !strcmp(s->iformat->name, "rtsp")
2654  || !strcmp(s->iformat->name, "sdp")
2655  )
2656  return 1;
2657 
2658  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2659  || !strncmp(s->filename, "udp:", 4)
2660  )
2661  )
2662  return 1;
2663  return 0;
2664 }
2665 
2666 /* this thread gets the stream from the disk or the network */
2667 static int read_thread(void *arg)
2668 {
2669  VideoState *is = arg;
2670  AVFormatContext *ic = NULL;
2671  int err, i, ret;
2672  int st_index[AVMEDIA_TYPE_NB];
2673  AVPacket pkt1, *pkt = &pkt1;
2674  int eof = 0;
2675  int pkt_in_play_range = 0;
2677  AVDictionary **opts;
2678  int orig_nb_streams;
2679  SDL_mutex *wait_mutex = SDL_CreateMutex();
2680 
2681  memset(st_index, -1, sizeof(st_index));
2682  is->last_video_stream = is->video_stream = -1;
2683  is->last_audio_stream = is->audio_stream = -1;
2684  is->last_subtitle_stream = is->subtitle_stream = -1;
2685 
2686  ic = avformat_alloc_context();
2689  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2690  if (err < 0) {
2691  print_error(is->filename, err);
2692  ret = -1;
2693  goto fail;
2694  }
2696  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2698  goto fail;
2699  }
2700  is->ic = ic;
2701 
2702  if (genpts)
2703  ic->flags |= AVFMT_FLAG_GENPTS;
2704 
2706  orig_nb_streams = ic->nb_streams;
2707 
2708  err = avformat_find_stream_info(ic, opts);
2709  if (err < 0) {
2710  fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2711  ret = -1;
2712  goto fail;
2713  }
2714  for (i = 0; i < orig_nb_streams; i++)
2715  av_dict_free(&opts[i]);
2716  av_freep(&opts);
2717 
2718  if (ic->pb)
2719  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2720 
2721  if (seek_by_bytes < 0)
2722  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2723 
2724  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2725 
2726  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2727  window_title = av_asprintf("%s - %s", t->value, input_filename);
2728 
2729  /* if seeking requested, we execute it */
2730  if (start_time != AV_NOPTS_VALUE) {
2731  int64_t timestamp;
2732 
2733  timestamp = start_time;
2734  /* add the stream start time */
2735  if (ic->start_time != AV_NOPTS_VALUE)
2736  timestamp += ic->start_time;
2737  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2738  if (ret < 0) {
2739  fprintf(stderr, "%s: could not seek to position %0.3f\n",
2740  is->filename, (double)timestamp / AV_TIME_BASE);
2741  }
2742  }
2743 
2744  is->realtime = is_realtime(ic);
2745 
2746  for (i = 0; i < ic->nb_streams; i++)
2747  ic->streams[i]->discard = AVDISCARD_ALL;
2748  if (!video_disable)
2749  st_index[AVMEDIA_TYPE_VIDEO] =
2752  if (!audio_disable)
2753  st_index[AVMEDIA_TYPE_AUDIO] =
2756  st_index[AVMEDIA_TYPE_VIDEO],
2757  NULL, 0);
2759  st_index[AVMEDIA_TYPE_SUBTITLE] =
2762  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2763  st_index[AVMEDIA_TYPE_AUDIO] :
2764  st_index[AVMEDIA_TYPE_VIDEO]),
2765  NULL, 0);
2766  if (show_status) {
2767  av_dump_format(ic, 0, is->filename, 0);
2768  }
2769 
2770  is->show_mode = show_mode;
2771 
2772  /* open the streams */
2773  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2774  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2775  }
2776 
2777  ret = -1;
2778  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2779  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2780  }
2781  if (is->show_mode == SHOW_MODE_NONE)
2782  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2783 
2784  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2785  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2786  }
2787 
2788  if (is->video_stream < 0 && is->audio_stream < 0) {
2789  fprintf(stderr, "%s: could not open codecs\n", is->filename);
2790  ret = -1;
2791  goto fail;
2792  }
2793 
2794  if (infinite_buffer < 0 && is->realtime)
2795  infinite_buffer = 1;
2796 
2797  for (;;) {
2798  if (is->abort_request)
2799  break;
2800  if (is->paused != is->last_paused) {
2801  is->last_paused = is->paused;
2802  if (is->paused)
2803  is->read_pause_return = av_read_pause(ic);
2804  else
2805  av_read_play(ic);
2806  }
2807 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2808  if (is->paused &&
2809  (!strcmp(ic->iformat->name, "rtsp") ||
2810  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2811  /* wait 10 ms to avoid trying to get another packet */
2812  /* XXX: horrible */
2813  SDL_Delay(10);
2814  continue;
2815  }
2816 #endif
2817  if (is->seek_req) {
2818  int64_t seek_target = is->seek_pos;
2819  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2820  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2821 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2822 // of the seek_pos/seek_rel variables
2823 
2824  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2825  if (ret < 0) {
2826  fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2827  } else {
2828  if (is->audio_stream >= 0) {
2829  packet_queue_flush(&is->audioq);
2830  packet_queue_put(&is->audioq, &flush_pkt);
2831  }
2832  if (is->subtitle_stream >= 0) {
2834  packet_queue_put(&is->subtitleq, &flush_pkt);
2835  }
2836  if (is->video_stream >= 0) {
2837  packet_queue_flush(&is->videoq);
2838  packet_queue_put(&is->videoq, &flush_pkt);
2839  }
2840  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2842  } else {
2843  update_external_clock_pts(is, seek_target / (double)AV_TIME_BASE);
2844  }
2845  }
2846  is->seek_req = 0;
2847  is->queue_attachments_req = 1;
2848  eof = 0;
2849  if (is->paused)
2850  step_to_next_frame(is);
2851  }
2852  if (is->queue_attachments_req) {
2854  AVPacket copy;
2855  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
2856  goto fail;
2857  packet_queue_put(&is->videoq, &copy);
2858  }
2859  is->queue_attachments_req = 0;
2860  }
2861 
2862  /* if the queue are full, no need to read more */
2863  if (infinite_buffer<1 &&
2864  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2865  || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2866  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
2868  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2869  /* wait 10 ms */
2870  SDL_LockMutex(wait_mutex);
2871  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2872  SDL_UnlockMutex(wait_mutex);
2873  continue;
2874  }
2875  if (eof) {
2876  if (is->video_stream >= 0) {
2877  av_init_packet(pkt);
2878  pkt->data = NULL;
2879  pkt->size = 0;
2880  pkt->stream_index = is->video_stream;
2881  packet_queue_put(&is->videoq, pkt);
2882  }
2883  if (is->audio_stream >= 0 &&
2885  av_init_packet(pkt);
2886  pkt->data = NULL;
2887  pkt->size = 0;
2888  pkt->stream_index = is->audio_stream;
2889  packet_queue_put(&is->audioq, pkt);
2890  }
2891  SDL_Delay(10);
2892  if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2893  if (loop != 1 && (!loop || --loop)) {
2894  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2895  } else if (autoexit) {
2896  ret = AVERROR_EOF;
2897  goto fail;
2898  }
2899  }
2900  eof=0;
2901  continue;
2902  }
2903  ret = av_read_frame(ic, pkt);
2904  if (ret < 0) {
2905  if (ret == AVERROR_EOF || url_feof(ic->pb))
2906  eof = 1;
2907  if (ic->pb && ic->pb->error)
2908  break;
2909  SDL_LockMutex(wait_mutex);
2910  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2911  SDL_UnlockMutex(wait_mutex);
2912  continue;
2913  }
2914  /* check if packet is in play range specified by user, then queue, otherwise discard */
2915  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2916  (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2917  av_q2d(ic->streams[pkt->stream_index]->time_base) -
2918  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2919  <= ((double)duration / 1000000);
2920  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2921  packet_queue_put(&is->audioq, pkt);
2922  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
2924  packet_queue_put(&is->videoq, pkt);
2925  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2926  packet_queue_put(&is->subtitleq, pkt);
2927  } else {
2928  av_free_packet(pkt);
2929  }
2930  }
2931  /* wait until the end */
2932  while (!is->abort_request) {
2933  SDL_Delay(100);
2934  }
2935 
2936  ret = 0;
2937  fail:
2938  /* close each stream */
2939  if (is->audio_stream >= 0)
2941  if (is->video_stream >= 0)
2943  if (is->subtitle_stream >= 0)
2945  if (is->ic) {
2946  avformat_close_input(&is->ic);
2947  }
2948 
2949  if (ret != 0) {
2950  SDL_Event event;
2951 
2952  event.type = FF_QUIT_EVENT;
2953  event.user.data1 = is;
2954  SDL_PushEvent(&event);
2955  }
2956  SDL_DestroyMutex(wait_mutex);
2957  return 0;
2958 }
2959 
2960 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2961 {
2962  VideoState *is;
2963 
2964  is = av_mallocz(sizeof(VideoState));
2965  if (!is)
2966  return NULL;
2967  av_strlcpy(is->filename, filename, sizeof(is->filename));
2968  is->iformat = iformat;
2969  is->ytop = 0;
2970  is->xleft = 0;
2971 
2972  /* start video display */
2973  is->pictq_mutex = SDL_CreateMutex();
2974  is->pictq_cond = SDL_CreateCond();
2975 
2976  is->subpq_mutex = SDL_CreateMutex();
2977  is->subpq_cond = SDL_CreateCond();
2978 
2979  packet_queue_init(&is->videoq);
2980  packet_queue_init(&is->audioq);
2982 
2983  is->continue_read_thread = SDL_CreateCond();
2984 
2986  update_external_clock_speed(is, 1.0);
2987  is->audio_current_pts_drift = -av_gettime() / 1000000.0;
2989  is->audio_clock_serial = -1;
2990  is->video_clock_serial = -1;
2991  is->audio_last_serial = -1;
2992  is->av_sync_type = av_sync_type;
2993  is->read_tid = SDL_CreateThread(read_thread, is);
2994  if (!is->read_tid) {
2995  av_free(is);
2996  return NULL;
2997  }
2998  return is;
2999 }
3000 
3002 {
3003  AVFormatContext *ic = is->ic;
3004  int start_index, stream_index;
3005  int old_index;
3006  AVStream *st;
3007 
3008  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3009  start_index = is->last_video_stream;
3010  old_index = is->video_stream;
3011  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3012  start_index = is->last_audio_stream;
3013  old_index = is->audio_stream;
3014  } else {
3015  start_index = is->last_subtitle_stream;
3016  old_index = is->subtitle_stream;
3017  }
3018  stream_index = start_index;
3019  for (;;) {
3020  if (++stream_index >= is->ic->nb_streams)
3021  {
3022  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3023  {
3024  stream_index = -1;
3025  is->last_subtitle_stream = -1;
3026  goto the_end;
3027  }
3028  if (start_index == -1)
3029  return;
3030  stream_index = 0;
3031  }
3032  if (stream_index == start_index)
3033  return;
3034  st = ic->streams[stream_index];
3035  if (st->codec->codec_type == codec_type) {
3036  /* check that parameters are OK */
3037  switch (codec_type) {
3038  case AVMEDIA_TYPE_AUDIO:
3039  if (st->codec->sample_rate != 0 &&
3040  st->codec->channels != 0)
3041  goto the_end;
3042  break;
3043  case AVMEDIA_TYPE_VIDEO:
3044  case AVMEDIA_TYPE_SUBTITLE:
3045  goto the_end;
3046  default:
3047  break;
3048  }
3049  }
3050  }
3051  the_end:
3052  stream_component_close(is, old_index);
3053  stream_component_open(is, stream_index);
3054 }
3055 
3056 
3058 {
3059 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3060  /* OS X needs to reallocate the SDL overlays */
3061  int i;
3062  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3063  is->pictq[i].reallocate = 1;
3064 #endif
3066  video_open(is, 1, NULL);
3067 }
3068 
3070 {
3071  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3072  int next = is->show_mode;
3073  do {
3074  next = (next + 1) % SHOW_MODE_NB;
3075  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3076  if (is->show_mode != next) {
3078  is->xleft, is->ytop, is->width, is->height,
3079  bgcolor, 1);
3080  is->force_refresh = 1;
3081  is->show_mode = next;
3082  }
3083 }
3084 
3085 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3086  double remaining_time = 0.0;
3087  SDL_PumpEvents();
3088  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3090  SDL_ShowCursor(0);
3091  cursor_hidden = 1;
3092  }
3093  if (remaining_time > 0.0)
3094  av_usleep((int64_t)(remaining_time * 1000000.0));
3095  remaining_time = REFRESH_RATE;
3096  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3097  video_refresh(is, &remaining_time);
3098  SDL_PumpEvents();
3099  }
3100 }
3101 
3102 /* handle an event sent by the GUI */
3103 static void event_loop(VideoState *cur_stream)
3104 {
3105  SDL_Event event;
3106  double incr, pos, frac;
3107 
3108  for (;;) {
3109  double x;
3110  refresh_loop_wait_event(cur_stream, &event);
3111  switch (event.type) {
3112  case SDL_KEYDOWN:
3113  if (exit_on_keydown) {
3114  do_exit(cur_stream);
3115  break;
3116  }
3117  switch (event.key.keysym.sym) {
3118  case SDLK_ESCAPE:
3119  case SDLK_q:
3120  do_exit(cur_stream);
3121  break;
3122  case SDLK_f:
3123  toggle_full_screen(cur_stream);
3124  cur_stream->force_refresh = 1;
3125  break;
3126  case SDLK_p:
3127  case SDLK_SPACE:
3128  toggle_pause(cur_stream);
3129  break;
3130  case SDLK_s: // S: Step to next frame
3131  step_to_next_frame(cur_stream);
3132  break;
3133  case SDLK_a:
3135  break;
3136  case SDLK_v:
3138  break;
3139  case SDLK_t:
3141  break;
3142  case SDLK_w:
3143  toggle_audio_display(cur_stream);
3144  break;
3145  case SDLK_PAGEUP:
3146  incr = 600.0;
3147  goto do_seek;
3148  case SDLK_PAGEDOWN:
3149  incr = -600.0;
3150  goto do_seek;
3151  case SDLK_LEFT:
3152  incr = -10.0;
3153  goto do_seek;
3154  case SDLK_RIGHT:
3155  incr = 10.0;
3156  goto do_seek;
3157  case SDLK_UP:
3158  incr = 60.0;
3159  goto do_seek;
3160  case SDLK_DOWN:
3161  incr = -60.0;
3162  do_seek:
3163  if (seek_by_bytes) {
3164  if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3165  pos = cur_stream->video_current_pos;
3166  } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3167  pos = cur_stream->audio_pkt.pos;
3168  } else
3169  pos = avio_tell(cur_stream->ic->pb);
3170  if (cur_stream->ic->bit_rate)
3171  incr *= cur_stream->ic->bit_rate / 8.0;
3172  else
3173  incr *= 180000.0;
3174  pos += incr;
3175  stream_seek(cur_stream, pos, incr, 1);
3176  } else {
3177  pos = get_master_clock(cur_stream);
3178  if (isnan(pos))
3179  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3180  pos += incr;
3181  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3182  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3183  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3184  }
3185  break;
3186  default:
3187  break;
3188  }
3189  break;
3190  case SDL_VIDEOEXPOSE:
3191  cur_stream->force_refresh = 1;
3192  break;
3193  case SDL_MOUSEBUTTONDOWN:
3194  if (exit_on_mousedown) {
3195  do_exit(cur_stream);
3196  break;
3197  }
3198  case SDL_MOUSEMOTION:
3199  if (cursor_hidden) {
3200  SDL_ShowCursor(1);
3201  cursor_hidden = 0;
3202  }
3204  if (event.type == SDL_MOUSEBUTTONDOWN) {
3205  x = event.button.x;
3206  } else {
3207  if (event.motion.state != SDL_PRESSED)
3208  break;
3209  x = event.motion.x;
3210  }
3211  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3212  uint64_t size = avio_size(cur_stream->ic->pb);
3213  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3214  } else {
3215  int64_t ts;
3216  int ns, hh, mm, ss;
3217  int tns, thh, tmm, tss;
3218  tns = cur_stream->ic->duration / 1000000LL;
3219  thh = tns / 3600;
3220  tmm = (tns % 3600) / 60;
3221  tss = (tns % 60);
3222  frac = x / cur_stream->width;
3223  ns = frac * tns;
3224  hh = ns / 3600;
3225  mm = (ns % 3600) / 60;
3226  ss = (ns % 60);
3227  fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3228  hh, mm, ss, thh, tmm, tss);
3229  ts = frac * cur_stream->ic->duration;
3230  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3231  ts += cur_stream->ic->start_time;
3232  stream_seek(cur_stream, ts, 0, 0);
3233  }
3234  break;
3235  case SDL_VIDEORESIZE:
3236  screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
3237  SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3238  screen_width = cur_stream->width = event.resize.w;
3239  screen_height = cur_stream->height = event.resize.h;
3240  cur_stream->force_refresh = 1;
3241  break;
3242  case SDL_QUIT:
3243  case FF_QUIT_EVENT:
3244  do_exit(cur_stream);
3245  break;
3246  case FF_ALLOC_EVENT:
3247  alloc_picture(event.user.data1);
3248  break;
3249  default:
3250  break;
3251  }
3252  }
3253 }
3254 
3255 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3256 {
3257  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3258  return opt_default(NULL, "video_size", arg);
3259 }
3260 
3261 static int opt_width(void *optctx, const char *opt, const char *arg)
3262 {
3263  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3264  return 0;
3265 }
3266 
3267 static int opt_height(void *optctx, const char *opt, const char *arg)
3268 {
3269  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3270  return 0;
3271 }
3272 
3273 static int opt_format(void *optctx, const char *opt, const char *arg)
3274 {
3275  file_iformat = av_find_input_format(arg);
3276  if (!file_iformat) {
3277  fprintf(stderr, "Unknown input format: %s\n", arg);
3278  return AVERROR(EINVAL);
3279  }
3280  return 0;
3281 }
3282 
3283 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3284 {
3285  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3286  return opt_default(NULL, "pixel_format", arg);
3287 }
3288 
3289 static int opt_sync(void *optctx, const char *opt, const char *arg)
3290 {
3291  if (!strcmp(arg, "audio"))
3293  else if (!strcmp(arg, "video"))
3295  else if (!strcmp(arg, "ext"))
3297  else {
3298  fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3299  exit(1);
3300  }
3301  return 0;
3302 }
3303 
3304 static int opt_seek(void *optctx, const char *opt, const char *arg)
3305 {
3306  start_time = parse_time_or_die(opt, arg, 1);
3307  return 0;
3308 }
3309 
3310 static int opt_duration(void *optctx, const char *opt, const char *arg)
3311 {
3312  duration = parse_time_or_die(opt, arg, 1);
3313  return 0;
3314 }
3315 
3316 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3317 {
3318  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3319  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3320  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3321  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3322  return 0;
3323 }
3324 
3325 static void opt_input_file(void *optctx, const char *filename)
3326 {
3327  if (input_filename) {
3328  fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3329  filename, input_filename);
3330  exit(1);
3331  }
3332  if (!strcmp(filename, "-"))
3333  filename = "pipe:";
3334  input_filename = filename;
3335 }
3336 
3337 static int opt_codec(void *optctx, const char *opt, const char *arg)
3338 {
3339  const char *spec = strchr(opt, ':');
3340  if (!spec) {
3341  fprintf(stderr, "No media specifier was specified in '%s' in option '%s'\n",
3342  arg, opt);
3343  return AVERROR(EINVAL);
3344  }
3345  spec++;
3346  switch (spec[0]) {
3347  case 'a' : audio_codec_name = arg; break;
3348  case 's' : subtitle_codec_name = arg; break;
3349  case 'v' : video_codec_name = arg; break;
3350  default:
3351  fprintf(stderr, "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3352  return AVERROR(EINVAL);
3353  }
3354  return 0;
3355 }
3356 
3357 static int dummy;
3358 
3359 static const OptionDef options[] = {
3360 #include "cmdutils_common_opts.h"
3361  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3362  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3363  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3364  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3365  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3366  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3367  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3368  { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3369  { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3370  { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3371  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3372  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3373  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3374  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3375  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3376  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3377  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3378  { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3379  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3380  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3381  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3382  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3383  { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo", "algo" },
3384  { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options", "bit_mask" },
3385  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3386  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3387  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3388  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3389  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3390  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3391  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3392  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3393 #if CONFIG_AVFILTER
3394  { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "set video filters", "filter_graph" },
3395  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3396 #endif
3397  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3398  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3399  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3400  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3401  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3402  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3403  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3404  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3405  { NULL, },
3406 };
3407 
3408 static void show_usage(void)
3409 {
3410  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3411  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3412  av_log(NULL, AV_LOG_INFO, "\n");
3413 }
3414 
3415 void show_help_default(const char *opt, const char *arg)
3416 {
3418  show_usage();
3419  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3420  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3421  printf("\n");
3424 #if !CONFIG_AVFILTER
3426 #else
3428 #endif
3429  printf("\nWhile playing:\n"
3430  "q, ESC quit\n"
3431  "f toggle full screen\n"
3432  "p, SPC pause\n"
3433  "a cycle audio channel\n"
3434  "v cycle video channel\n"
3435  "t cycle subtitle channel\n"
3436  "w show audio waves\n"
3437  "s activate frame-step mode\n"
3438  "left/right seek backward/forward 10 seconds\n"
3439  "down/up seek backward/forward 1 minute\n"
3440  "page down/page up seek backward/forward 10 minutes\n"
3441  "mouse click seek to percentage in file corresponding to fraction of width\n"
3442  );
3443 }
3444 
3445 static int lockmgr(void **mtx, enum AVLockOp op)
3446 {
3447  switch(op) {
3448  case AV_LOCK_CREATE:
3449  *mtx = SDL_CreateMutex();
3450  if(!*mtx)
3451  return 1;
3452  return 0;
3453  case AV_LOCK_OBTAIN:
3454  return !!SDL_LockMutex(*mtx);
3455  case AV_LOCK_RELEASE:
3456  return !!SDL_UnlockMutex(*mtx);
3457  case AV_LOCK_DESTROY:
3458  SDL_DestroyMutex(*mtx);
3459  return 0;
3460  }
3461  return 1;
3462 }
3463 
3464 /* Called from the main */
3465 int main(int argc, char **argv)
3466 {
3467  int flags;
3468  VideoState *is;
3469  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3470 
3472  parse_loglevel(argc, argv, options);
3473 
3474  /* register all codecs, demux and protocols */
3476 #if CONFIG_AVDEVICE
3478 #endif
3479 #if CONFIG_AVFILTER
3481 #endif
3482  av_register_all();
3484 
3485  init_opts();
3486 
3487  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3488  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3489 
3490  show_banner(argc, argv, options);
3491 
3492  parse_options(NULL, argc, argv, options, opt_input_file);
3493 
3494  if (!input_filename) {
3495  show_usage();
3496  fprintf(stderr, "An input file must be specified\n");
3497  fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3498  exit(1);
3499  }
3500 
3501  if (display_disable) {
3502  video_disable = 1;
3503  }
3504  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3505  if (audio_disable)
3506  flags &= ~SDL_INIT_AUDIO;
3507  if (display_disable)
3508  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3509 #if !defined(__MINGW32__) && !defined(__APPLE__)
3510  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3511 #endif
3512  if (SDL_Init (flags)) {
3513  fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3514  fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3515  exit(1);
3516  }
3517 
3518  if (!display_disable) {
3519  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3520  fs_screen_width = vi->current_w;
3521  fs_screen_height = vi->current_h;
3522  }
3523 
3524  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3525  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3526  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3527 
3529  fprintf(stderr, "Could not initialize lock manager!\n");
3530  do_exit(NULL);
3531  }
3532 
3533  av_init_packet(&flush_pkt);
3534  flush_pkt.data = (char *)(intptr_t)"FLUSH";
3535 
3536  is = stream_open(input_filename, file_iformat);
3537  if (!is) {
3538  fprintf(stderr, "Failed to initialize VideoState!\n");
3539  do_exit(NULL);
3540  }
3541 
3542  event_loop(is);
3543 
3544  /* never returns */
3545 
3546  return 0;
3547 }
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:83
SDL_Overlay * bmp
Definition: ffplay.c:121
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:432
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3316
output sine component ys
static void video_image_display(VideoState *is)
Definition: ffplay.c:801
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
const struct AVCodec * codec
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:408
float v
const char * s
Definition: avisynth_c.h:668
int width
Definition: ffplay.c:255
static int error_concealment
Definition: ffplay.c:300
#define OPT_EXPERT
Definition: cmdutils.h:149
#define CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:261
enum AVSampleFormat fmt
Definition: ffplay.c:139
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3267
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:63
struct PacketQueue PacketQueue
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
struct AudioParams AudioParams
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2376
int linesize[AV_NUM_DATA_POINTERS]
number of bytes per line
void av_free_packet(AVPacket *pkt)
Free a packet.
Definition: avpacket.c:242
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:81
AVStream * subtitle_st
Definition: ffplay.c:222
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
int swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:735
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:115
static double rint(double x)
Definition: libm.h:141
int x
top left corner of pict, undefined when pict is not set
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:125
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1125
double rdftspeed
Definition: ffplay.c:312
double frame_timer
Definition: ffplay.c:229
int audio_last_serial
Definition: ffplay.c:194
static AVInputFormat * file_iformat
Definition: ffplay.c:272
#define OPT_VIDEO
Definition: cmdutils.h:151
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE]
Definition: ffplay.c:245
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3325
const char * fmt
Definition: avisynth_c.h:669
int av_lockmgr_register(int(*cb)(void **mutex, enum AVLockOp op))
Register a user provided lock manager supporting the operations specified by AVLockOp.
struct AudioParams audio_filter_src
Definition: ffplay.c:197
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3273
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:53
Unlock the mutex.
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:117
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1134
external API header
int64_t pos
byte position in stream, -1 if unknown
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
AVRational sar
Definition: ffplay.c:127
static int default_height
Definition: ffplay.c:278
Memory buffer source API.
double frame_last_filter_delay
Definition: ffplay.c:234
enum VideoState::ShowMode show_mode
AVFilterGraph * agraph
Definition: ffplay.c:263
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:175
int seek_flags
Definition: ffplay.c:159
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1134
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:154
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:671
int serial
Definition: ffplay.c:110
static int64_t audio_size
Definition: ffmpeg.c:126
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
static int64_t cur_time
Definition: ffserver.c:325
#define OPT_AUDIO
Definition: cmdutils.h:152
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
Definition: ffplay.c:1754
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
Definition: ffplay.c:772
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3001
int num
numerator
Definition: rational.h:44
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3283
int nb_colors
number of colors in pict, undefined when pict is not set
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1217
y1
Definition: lab5.m:33
MyAVPacketList * first_pkt
Definition: ffplay.c:106
int av_copy_packet(AVPacket *dst, AVPacket *src)
Copy packet, including contents.
Definition: avpacket.c:236
#define SWS_BICUBIC
Definition: swscale.h:60
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1203
static int seek_by_bytes
Definition: ffplay.c:289
About Git write you should know how to use GIT properly Luckily Git comes with excellent documentation git help man git shows you the available git< command > help man git< command > shows information about the subcommand< command > The most comprehensive manual is the website Git Reference visit they are quite exhaustive You do not need a special username or password All you need is to provide a ssh public key to the Git server admin What follows now is a basic introduction to Git and some FFmpeg specific guidelines Read it at least if you are granted commit privileges to the FFmpeg project you are expected to be familiar with these rules I if not You can get git from etc no matter how small Every one of them has been saved from looking like a fool by this many times It s very easy for stray debug output or cosmetic modifications to slip in
Definition: git-howto.txt:5
double audio_diff_cum
Definition: ffplay.c:176
static void packet_queue_init(PacketQueue *q)
Definition: ffplay.c:400
Various defines for YUV<->RGB conversion.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:75
#define REFRESH_RATE
Definition: ffplay.c:89
AVInputFormat * iformat
Definition: ffplay.c:151
enum AVMediaType codec_type
Definition: rtp.c:36
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1055
int paused
Definition: ffplay.c:155
int64_t frame_last_dropped_pos
Definition: ffplay.c:235
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3337
double video_current_pts_drift
Definition: ffplay.c:241
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph, AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
Definition: ffplay.c:1716
int abort_request
Definition: ffplay.c:109
SDL_cond * subpq_cond
Definition: ffplay.c:227
#define a1
Definition: regdef.h:47
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1230
SDL_Rect last_display_rect
Definition: ffplay.c:252
#define wrap(func)
Definition: w64xmmtest.h:70
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
double audio_diff_threshold
Definition: ffplay.c:178
int pictq_rindex
Definition: ffplay.c:246
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:56
int64_t channel_layout
Definition: ffplay.c:138
struct VideoState VideoState
static int audio_disable
Definition: ffplay.c:281
AVStream * audio_st
Definition: ffplay.c:180
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:67
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Definition: log.c:279
static const char * audio_codec_name
Definition: ffplay.c:309
AVDictionaryEntry * av_dict_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
signed 16 bits
Definition: samplefmt.h:52
int av_dup_packet(AVPacket *pkt)
Definition: avpacket.c:221
four components are given, that&#39;s all.
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:1836
AVLockOp
Lock operation used by lockmgr.
output residual component w
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:1002
AVStream * video_st
Definition: ffplay.c:238
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:2960
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
void * opaque
Definition: avio.h:53
AVSubtitleRect ** rects
set threshold d
initialize output if(nPeaks >3)%at least 3 peaks in spectrum for trying to find f0 nf0peaks
void av_picture_copy(AVPicture *dst, const AVPicture *src, enum AVPixelFormat pix_fmt, int width, int height)
Copy image src to dst.
Definition: avpicture.c:72
Format I/O context.
Definition: avformat.h:944
static int64_t sws_flags
Definition: ffplay.c:97
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3069
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
int av_sync_type
Definition: ffplay.c:168
int w
width of pict, undefined when pict is not set
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:532
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:177
static int workaround_bugs
Definition: ffplay.c:295
static int read_thread(void *arg)
Definition: ffplay.c:2667
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:114
int rdft_bits
Definition: ffplay.c:214
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation.
Definition: swresample.c:880
int size
Definition: ffplay.c:108
static int64_t start_time
Definition: ffplay.c:293
int subtitle_stream_changed
Definition: ffplay.c:221
enum AVSampleFormat sample_fmt
audio sample format
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:95
Lock the mutex.
uint8_t
double pts
Definition: ffplay.c:131
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:483
static int default_width
Definition: ffplay.c:277
int last_video_stream
Definition: ffplay.c:266
int last_subtitle_stream
Definition: ffplay.c:266
double external_clock
external clock base
Definition: ffplay.c:169
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:475
#define HAS_ARG
Definition: cmdutils.h:147
int audio_hw_buf_size
Definition: ffplay.c:182
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:2559
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:77
static av_always_inline av_const int isnan(float x)
Definition: libm.h:96
uint8_t * data[AV_NUM_DATA_POINTERS]
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2644
struct SwrContext * swr_ctx
Definition: ffplay.c:200
libavcodec/libavfilter gluing utilities
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3103
#define b
Definition: input.c:42
end end
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don&#39;t need to export the SwsContext.
Definition: swscale.c:798
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:332
#define NAN
Definition: math.h:7
int serial
Definition: ffplay.c:125
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:219
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:159
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:425
double external_clock_drift
external clock base - time (av_gettime) at which we updated external_clock
Definition: ffplay.c:170
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1022
static int framedrop
Definition: ffplay.c:306
static void alloc_picture(VideoState *is)
Definition: ffplay.c:1489
#define AV_LOG_QUIET
Definition: log.h:130
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:74
AVStream ** streams
Definition: avformat.h:992
AVPacket pkt
Definition: ffplay.c:100
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
static int64_t audio_callback_time
Definition: ffplay.c:322
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:69
double audio_current_pts
Definition: ffplay.c:201
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:343
int audio_buf_frames_pending
Definition: ffplay.c:190
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:452
static void sigterm_handler(int sig)
Definition: ffplay.c:1040
uint8_t * data
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:345
int freq
Definition: ffplay.c:136
static void update_external_clock_speed(VideoState *is, double speed)
Definition: ffplay.c:1183
int avformat_network_init(void)
Do global initialization of network components.
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
Definition: ffplay.c:1810
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
integer sqrt
Definition: avutil.txt:2
int width
Definition: ffplay.c:122
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
struct SubPicture SubPicture
static void copy(LZOContext *c, int cnt)
Copies bytes from input to output buffer with checking.
Definition: lzo.c:79
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the &#39;-loglevel&#39; option in the command line args and apply it.
Definition: cmdutils.c:459
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:489
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:75
int h
height of pict, undefined when pict is not set
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:248
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:147
static int64_t duration
Definition: ffplay.c:294
int subpq_windex
Definition: ffplay.c:225
static void duplicate_right_border_pixels(SDL_Overlay *bmp)
Definition: ffplay.c:1518
int(* callback)(void *)
Definition: avio.h:52
int audio_pkt_temp_serial
Definition: ffplay.c:193
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:281
PacketQueue videoq
Definition: ffplay.c:239
int subpq_rindex
Definition: ffplay.c:225
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:1892
AVDictionary * format_opts
Definition: cmdutils.c:68
frame
Definition: stft.m:14
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:250
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:93
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:130
static const uint8_t frame_size[4]
Definition: g723_1_data.h:58
Discrete Time axis x
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Main libavdevice API header.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
libswresample public header
enum AVCodecID id
int audio_diff_avg_count
Definition: ffplay.c:179
const AVS_VideoInfo * vi
Definition: avisynth_c.h:695
int ytop
Definition: ffplay.c:255
int width
width and height of the video frame
Definition: frame.h:122
AVDictionary * metadata
Definition: avformat.h:1092
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:183
int seek_req
Definition: ffplay.c:158
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Create a mutex.
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1315
int read_pause_return
Definition: ffplay.c:162
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:354
static double get_video_clock(VideoState *is)
Definition: ffplay.c:1112
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:287
static int wanted_stream[AVMEDIA_TYPE_NB]
Definition: ffplay.c:284
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
int y
top left corner of pict, undefined when pict is not set
#define MIN_FRAMES
Definition: ffplay.c:66
RDFTContext * rdft
Definition: ffplay.c:213
int error_concealment
error concealment flags
Spectrum Plot time data
const char * r
Definition: vf_curves.c:94
int capabilities
Codec capabilities.
#define RGBA_IN(r, g, b, a, s)
Definition: ffplay.c:541
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:95
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1244
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:162
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
const char * arg
struct VideoPicture VideoPicture
int flags
CODEC_FLAG_*.
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:394
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:353
uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE]
Definition: ffplay.c:183
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:992
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
int frame_last_dropped_serial
Definition: ffplay.c:236
int video_stream
Definition: ffplay.c:237
int xpos
Definition: ffplay.c:216
int channels
Definition: ffplay.c:137
int subpq_size
Definition: ffplay.c:225
static enum ShowMode show_mode
Definition: ffplay.c:308
#define FFMAX(a, b)
Definition: common.h:56
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:443
static const OptionDef options[]
Definition: ffplay.c:3359
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3357
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:82
#define BPP
Definition: ffplay.c:565
static void update_external_clock_pts(VideoState *is, double pts)
Definition: ffplay.c:1169
double audio_clock
Definition: ffplay.c:174
int force_refresh
Definition: ffplay.c:154
int size
uint64_t channel_layout
Audio channel layout.
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3289
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2053
void av_rdft_calc(RDFTContext *s, FFTSample *data)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
uint32_t end_display_time
int64_t pts
Same as packet pts, in AV_TIME_BASE.
static int genpts
Definition: ffplay.c:297
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:662
static AVPacket flush_pkt
Definition: ffplay.c:324
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:331
double frame_last_returned_time
Definition: ffplay.c:233
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:112
static const char * subtitle_codec_name
Definition: ffplay.c:310
static int subtitle_disable
Definition: ffplay.c:283
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos, int serial)
Definition: ffplay.c:1536
AVFrame * avcodec_alloc_frame(void)
Allocate an AVFrame and set its fields to default values.
unsigned int nb_streams
A list of all streams in the file.
Definition: avformat.h:991
int step
Definition: ffplay.c:256
double frame_last_pts
Definition: ffplay.c:230
static SDL_Surface * screen
Definition: ffplay.c:329
struct AVRational AVRational
rational number numerator/denominator
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:62
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
SDL_mutex * mutex
Definition: ffplay.c:111
int audio_write_buf_size
Definition: ffplay.c:189
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
FFT buffer for g
Definition: stft_peak.m:17
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:116
int av_frame_get_channels(const AVFrame *frame)
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
struct MyAVPacketList * next
Definition: ffplay.c:101
#define AV_CH_LAYOUT_STEREO_DOWNMIX
char filename[1024]
input or output filename
Definition: avformat.h:994
AVPicture pict
data+linesize for the bitmap of this subtitle.
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:196
#define FFMIN(a, b)
Definition: common.h:58
int reallocate
Definition: ffplay.c:124
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
SDL_Thread * subtitle_tid
Definition: ffplay.c:219
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:538
static int cursor_hidden
Definition: ffplay.c:314
ret
Definition: avfilter.c:821
static int lockmgr(void **mtx, enum AVLockOp op)
Definition: ffplay.c:3445
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
int main(int argc, char **argv)
Definition: ffplay.c:3465
static void show_usage(void)
Definition: ffplay.c:3408
int nb_packets
Definition: ffplay.c:107
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3261
double frame_last_dropped_pts
Definition: ffplay.c:232
int frame_drops_late
Definition: ffplay.c:204
struct AudioParams audio_src
Definition: ffplay.c:195
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3085
t
Definition: genspecsines3.m:6
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:186
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:73
static int fast
Definition: ffplay.c:296
int last_i_start
Definition: ffplay.c:212
char filename[1024]
Definition: ffplay.c:254
int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
Definition: ffplay.c:567
#define OPT_INT64
Definition: cmdutils.h:156
MyAVPacketList * last_pkt
Definition: ffplay.c:106
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1236
float u
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2121
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:321
SDL_mutex * subpq_mutex
Definition: ffplay.c:226
#define diff(a, as, b, bs)
Definition: vf_phase.c:80
AVSubtitle sub
Definition: ffplay.c:132
static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:503
int64_t external_clock_time
last reference time
Definition: ffplay.c:171
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:47
int url_feof(AVIOContext *s)
feof() equivalent for AVIOContext.
Definition: aviobuf.c:280
static int decoder_reorder_pts
Definition: ffplay.c:301
SDL_Thread * video_tid
Definition: ffplay.c:150
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:86
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:210
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1128
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:126
static const char * input_filename
Definition: ffplay.c:273
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:627
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3415
static double get_audio_clock(VideoState *is)
Definition: ffplay.c:1100
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:104
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
int last_audio_stream
Definition: ffplay.c:266
Stream structure.
Definition: avformat.h:643
void avcodec_flush_buffers(AVCodecContext *avctx)
Flush buffers, should be called when seeking or when switching to a different stream.
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:994
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1123
int64_t video_current_pos
Definition: ffplay.c:242
static int fs_screen_width
Definition: ffplay.c:275
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:78
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:134
int av_opt_get_int(void *obj, const char *name, int search_flags, int64_t *out_val)
Definition: opt.c:687
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
#define FF_IDCT_AUTO
static int screen_height
Definition: ffplay.c:280
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3310
1i.*Xphase exp()
NULL
Definition: eval.c:55
static int width
Definition: tests/utils.c:158
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:141
static int autoexit
Definition: ffplay.c:302
enum AVMediaType codec_type
AVFrame * frame
Definition: ffplay.c:205
double pts
Definition: ffplay.c:119
enum AVCodecID codec_id
static void do_exit(VideoState *is)
Definition: ffplay.c:1022
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:220
int sample_rate
samples per second
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
AVIOContext * pb
I/O context.
Definition: avformat.h:977
void av_log_set_flags(int arg)
Definition: log.c:274
static int loop
Definition: ffplay.c:305
int last_paused
Definition: ffplay.c:156
static int exit_on_keydown
Definition: ffplay.c:303
FIXME Range Coding of cb
Definition: snow.txt:367
FFT functions.
AVFilterContext * in_video_filter
Definition: ffplay.c:259
main external API structure.
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
int pictq_size
Definition: ffplay.c:246
SDL_mutex * pictq_mutex
Definition: ffplay.c:247
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:220
double max_frame_duration
Definition: ffplay.c:243
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:154
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
#define AV_SYNC_THRESHOLD
Definition: ffplay.c:73
int x
Definition: f_ebur128.c:90
SubPicture subpq[SUBPICTURE_QUEUE_SIZE]
Definition: ffplay.c:224
static const char * window_title
Definition: ffplay.c:274
static char * afilters
Definition: ffplay.c:317
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:62
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
Definition: ffplay.c:1045
static int av_sync_type
Definition: ffplay.c:292
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
void avcodec_get_frame_defaults(AVFrame *frame)
Set the fields of the given AVFrame to default values.
int sample_rate
Sample rate of the audio data.
Definition: frame.h:326
Definition: f_ebur128.c:90
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1131
PacketQueue audioq
Definition: ffplay.c:181
synthesis window for stochastic i
int64_t seek_pos
Definition: ffplay.c:160
rational number numerator/denominator
Definition: rational.h:43
static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial)
Definition: ffplay.c:1646
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:282
double audio_current_pts_drift
Definition: ffplay.c:202
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:135
#define OPT_STRING
Definition: cmdutils.h:150
static void video_audio_display(VideoState *s)
Definition: ffplay.c:852
SDL_cond * cond
Definition: ffplay.c:112
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:82
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:1744
struct SwsContext * sws_opts
Definition: cmdutils.c:66
discard useless packets like 0 size packets in avi
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:100
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2650
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1188
AVFilterContext * out_video_filter
Definition: ffplay.c:260
int queue_attachments_req
Definition: ffplay.c:157
double external_clock_speed
speed of the external clock
Definition: ffplay.c:172
AVFilterContext * in_audio_filter
Definition: ffplay.c:261
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:330
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
#define snprintf
Definition: snprintf.h:34
int error
contains the error code or 0 if no error happened
Definition: avio.h:102
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
misc parsing utilities
#define FF_ALLOC_EVENT
Definition: ffplay.c:326
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
int audio_stream
Definition: ffplay.c:166
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:95
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:164
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:114
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2431
char * name
unique name for this input/output in the list
Definition: avfilter.h:1125
static int64_t cursor_last_shown
Definition: ffplay.c:313
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:87
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3255
#define RGB_TO_U_CCIR(r1, g1, b1, shift)
Definition: colorspace.h:103
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
double frame_last_duration
Definition: ffplay.c:231
static int flags
Definition: cpu.c:23
int64_t start_time
Decoding: position of the first frame of the component, in AV_TIME_BASE fractional seconds...
Definition: avformat.h:1001
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
int frame_drops_early
Definition: ffplay.c:203
static double lum(void *priv, double x, double y)
Definition: vf_geq.c:83
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2073
#define RGB_TO_V_CCIR(r1, g1, b1, shift)
Definition: colorspace.h:107
int sample_array_index
Definition: ffplay.c:211
SDL_cond * continue_read_thread
Definition: ffplay.c:268
static char * vfilters
Definition: ffplay.c:316
uint8_t max_lowres
maximum value for lowres supported by the decoder
#define OPT_BOOL
Definition: cmdutils.h:148
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:171
static int exit_on_mousedown
Definition: ffplay.c:304
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
#define SDL_AUDIO_BUFFER_SIZE
Definition: ffplay.c:70
#define CODEC_FLAG_EMU_EDGE
Don&#39;t draw edges.
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:982
int pictq_windex
Definition: ffplay.c:246
int64_t pos
Definition: ffplay.c:120
static void check_external_clock_sync(VideoState *is, double pts)
Definition: ffplay.c:1176
static double get_external_clock(VideoState *is)
Definition: ffplay.c:1124
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
#define YUVA_OUT(d, y, u, v, a)
Definition: ffplay.c:559
static int video_thread(void *arg)
Definition: ffplay.c:1879
#define OPT_INT
Definition: cmdutils.h:153
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:170
AVDictionary * codec_opts
Definition: cmdutils.c:68
struct AudioParams audio_tgt
Definition: ffplay.c:199
#define ALPHA_BLEND(a, oldp, newp, s)
Definition: ffplay.c:538
int height
Definition: ffplay.c:122
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
Free mutex resources.
uint8_t * audio_buf
Definition: ffplay.c:184
static int display_disable
Definition: ffplay.c:290
static int video_disable
Definition: ffplay.c:282
int allocated
Definition: ffplay.c:123
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:689
int audio_buf_index
Definition: ffplay.c:188
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
uint8_t * audio_buf1
Definition: ffplay.c:185
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3304
static double c[64]
static int screen_width
Definition: ffplay.c:279
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:700
uint32_t start_display_time
FFTSample * rdft_data
Definition: ffplay.c:215
AVSampleFormat
Audio Sample Formats.
Definition: samplefmt.h:49
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1302
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
int audio_clock_serial
Definition: ffplay.c:175
struct MyAVPacketList MyAVPacketList
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:56
char * key
Definition: dict.h:81
int den
denominator
Definition: rational.h:45
function y
Definition: D.m:1
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:82
PacketQueue subtitleq
Definition: ffplay.c:223
struct AVInputFormat * iformat
Can only be iformat or oformat, not both at the same time.
Definition: avformat.h:957
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
static int idct
Definition: ffplay.c:299
static int lowres
Definition: ffplay.c:298
AVPacket audio_pkt
Definition: ffplay.c:192
#define RGB_TO_Y_CCIR(r, g, b)
Definition: colorspace.h:99
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
static int infinite_buffer
Definition: ffplay.c:307
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
char * value
Definition: dict.h:82
int eof_reached
true if eof reached
Definition: avio.h:96
int len
int channels
number of audio channels
unsigned int audio_buf1_size
Definition: ffplay.c:187
SDL_Thread * read_tid
Definition: ffplay.c:149
printf("static const uint8_t my_array[100] = {\n")
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
static int pictq_prev_picture(VideoState *is)
Definition: ffplay.c:1283
The official guide to swscale for confused that is
Definition: swscale.txt:2
int abort_request
Definition: ffplay.c:153
AVFilterContext * out_audio_filter
Definition: ffplay.c:262
int64_t av_frame_get_pkt_pos(const AVFrame *frame)
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:381
int height
Definition: ffplay.c:255
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:176
int flags2
CODEC_FLAG2_*.
An instance of a filter.
Definition: avfilter.h:524
AVPacket audio_pkt_temp
Definition: ffplay.c:191
int bit_rate
Decoding: total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1016
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:700
int64_t duration
Decoding: duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1009
int height
Definition: frame.h:122
#define AV_LOG_INFO
Definition: log.h:156
static const char * video_codec_name
Definition: ffplay.c:311
Filter the word “frame” indicates either a video frame or a group of audio samples
#define MAX_QUEUE_SIZE
Definition: ffplay.c:65
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:461
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
#define AV_DICT_IGNORE_SUFFIX
Definition: dict.h:68
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
static int subtitle_thread(void *arg)
Definition: ffplay.c:1982
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1151
#define FF_QUIT_EVENT
Definition: ffplay.c:327
int xleft
Definition: ffplay.c:255
int nb_channels
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:1700
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:679
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:85
int video_clock_serial
Definition: ffplay.c:244
AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: avfilter.c:391
SDL_cond * pictq_cond
Definition: ffplay.c:248
int subtitle_stream
Definition: ffplay.c:220
unsigned int audio_buf_size
Definition: ffplay.c:186
int64_t seek_rel
Definition: ffplay.c:161
int realtime
Definition: ffplay.c:164
static void free_subpicture(SubPicture *sp)
Definition: ffplay.c:767
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:117
#define YUVA_IN(y, u, v, a, s, pal)
Definition: ffplay.c:550
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:702
static void video_display(VideoState *is)
Definition: ffplay.c:1089
static int show_status
Definition: ffplay.c:291
static int compute_mod(int a, int b)
Definition: ffplay.c:847
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
This structure stores compressed data.
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:52
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2336
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:127
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3057
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static int fs_screen_height
Definition: ffplay.c:276
double last_vis_time
Definition: ffplay.c:217
AVPacket attached_pic
For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet will contain the attached pictu...
Definition: avformat.h:725
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:190
static void pictq_next_picture(VideoState *is)
Definition: ffplay.c:1272
#define av_unused
Definition: attributes.h:114
#define tb
Definition: regdef.h:68
double video_current_pts
Definition: ffplay.c:240
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:242
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
AVFormatContext * ic
Definition: ffplay.c:163
int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:447
int no_background
Definition: ffplay.c:152
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.