comparison sv/videoio/SDL_ffmpeg.cpp @ 125:66af7c1b10d9

(none)
author ivand_qmul
date Mon, 22 Oct 2007 13:59:27 +0000
parents
children 587ad94d6ac2
comparison
equal deleted inserted replaced
124:e795e4065870 125:66af7c1b10d9
1 /*******************************************************************************
2 * *
3 * SDL_ffmpeg is a library for basic multimedia functionality. *
4 * SDL_ffmpeg is based on ffmpeg. *
5 * *
6 * Copyright (C) 2007 Arjan Houben *
7 * *
8 * SDL_ffmpeg is free software: you can redistribute it and/or modify *
9 * it under the terms of the GNU Lesser General Public License as published *
10 * by the Free Software Foundation, either version 3 of the License, or any *
11 * later version. *
12 * *
13 * This program is distributed in the hope that it will be useful, *
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
16 * GNU Lesser General Public License for more details. *
17 * *
18 * You should have received a copy of the GNU Lesser General Public License *
19 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
20 * *
21 *******************************************************************************/
22
23 #include <stdio.h>
24 #include <stdlib.h>
25 #ifdef __cplusplus
26 extern "C" {
27 #endif
28 #ifdef WIN32
29 #include "SDL_ffmpeg.h"
30 #include <SDL.h>
31 #include <SDL_thread.h>
32 #endif
33
34 #ifdef __unix__
35 #include <SDL/SDL.h>
36 #include <SDL/SDL_thread.h>
37 #endif
38 #ifdef __cplusplus
39 }
40 #endif
41 #include "../../sv/main/MainWindow.h"
42
43 //const int SDL_FFMPEG_MAX_BUFFERED_FRAMES = 25;
44 //const int SDL_FFMPEG_MAX_BUFFERED_SAMPLES = 512 * 512;
45
46 int FFMPEG_init_was_called = 0;
47
48 SDL_ffmpegFile* SDL_ffmpegCreateFile() {
49
50 // create SDL_ffmpegFile pointer
51 SDL_ffmpegFile *file = (SDL_ffmpegFile*)malloc( sizeof(SDL_ffmpegFile) );
52 if(!file) return 0;
53 file->_ffmpeg=av_alloc_format_context();//(AVFormatContext*)malloc(sizeof(AVFormatContext));
54 // create a semaphore for every file
55 file->decode = SDL_CreateSemaphore(1);
56
57 // allocate room for VStreams
58 file->vs = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
59 if(!file->vs) {
60 free( file );
61 return 0;
62 }
63
64 // allocate room for AStreams
65 file->as = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
66 if(!file->as) {
67 free( file );
68 return 0;
69 }
70
71 // initialize variables with standard values
72 file->audioStream = -1;
73 file->videoStream = -1;
74
75 file->offset = 0;
76 file->videoOffset = 0;
77 file->startTime = 0;
78
79 file->threadID = 0;
80
81 return file;
82 }
83
84 void SDL_ffmpegFree(SDL_ffmpegFile* file) {
85
86 SDL_ffmpegStopDecoding(file);
87
88 SDL_ffmpegFlush(file);
89
90 free(file);
91 }
92
93 SDL_ffmpegFile* SDL_ffmpegOpen(const char* filename) {
94
95
96 // register all codecs
97 if(!FFMPEG_init_was_called) {
98 FFMPEG_init_was_called = 1;
99 av_register_all();
100 }
101
102 // open new ffmpegFile
103 SDL_ffmpegFile *file = SDL_ffmpegCreateFile();
104 if(!file) return 0;
105
106 // information about format is stored in file->_ffmpeg
107
108 // open the file
109 if(av_open_input_file( (AVFormatContext**)&file->_ffmpeg, filename, 0, 0, 0) != 0) {
110 fprintf(stderr, "could not open \"%s\"\n", filename);
111 free(file);
112 return 0;
113 }
114
115 // retrieve format information
116 if(av_find_stream_info((AVFormatContext *)(file->_ffmpeg)) < 0) {
117 fprintf(stderr, "could not retrieve video stream info");
118 free(file);
119 return 0;
120 }
121
122 // dump info to logfile
123 // dump_format(file->_ffmpeg, 0, filename, 0);
124
125 // find the streams in the file
126 file->VStreams = 0;
127 file->AStreams = 0;
128 file->threadActive = 0;
129
130 // iterate through all the streams and store audio/video streams
131 size_t i;
132 for(i=0; i<((AVFormatContext*)file->_ffmpeg)->nb_streams; i++) {
133
134 if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
135
136 // if this is a packet of the correct type we create a new stream
137 SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );
138
139 if(stream) {
140 // we set our stream to zero
141 memset(stream, 0, sizeof(SDL_ffmpegStream));
142
143 // save unique streamid
144 stream->id = i;
145
146 // the timeBase is what we use to calculate from/to pts
147 stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;
148
149 // save width, height and pixFmt of our outputframes
150 stream->width = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->width;
151 stream->height = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->height;
152 stream->pixFmt = PIX_FMT_RGB24;
153
154 // _ffmpeg holds data about streamcodec
155 stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;
156
157 // get the correct decoder for this stream
158 AVCodec *codec = avcodec_find_decoder(((AVCodecContext*)stream->_ffmpeg)->codec_id);
159
160 if(!codec) {
161 free(stream);
162 fprintf(stderr, "could not find codec\n");
163 } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
164 free(stream);
165 fprintf(stderr, "could not open decoder\n");
166 } else {
167
168 // copy metadata from AVStream into our stream
169 stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
170 stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
171 memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
172 stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
173 stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
174 memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);
175
176 stream->audio = 0;
177 stream->size = 0;
178 stream->imageBuffer = (bufferImage**)calloc( SDL_FFMPEG_MAX_BUFFERED_FRAMES, sizeof(bufferImage*) );
179
180 file->vs[file->VStreams] = stream;
181 file->VStreams++;
182
183 // create semaphore for thread-safe use
184 stream->sem = SDL_CreateSemaphore(1);
185 }
186 }
187 } else if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
188
189 // if this is a packet of the correct type we create a new stream
190 SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );
191
192 if(stream) {
193 // we set our stream to zero
194 memset(stream, 0, sizeof(SDL_ffmpegStream));
195
196 // save unique streamid
197 stream->id = i;
198
199 // the timeBase is what we use to calculate from/to pts
200 stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;
201
202 // _ffmpeg holds data about streamcodec
203 stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;
204
205 stream->width = 0;
206 stream->height = 0;
207 stream->pixFmt = PIX_FMT_RGB24;
208
209 // get the correct decoder for this stream
210 AVCodec *codec = avcodec_find_decoder(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_id);
211
212 if(!codec) {
213 free( stream );
214 fprintf(stderr, "could not find codec\n");
215 } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
216 free( stream );
217 fprintf(stderr, "could not open decoder\n");
218 } else {
219
220 // copy metadata from AVStream into our stream
221 stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
222 stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
223 memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
224 stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
225 stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
226 memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);
227
228 stream->audio = (int8_t*)malloc( sizeof(int8_t) * SDL_FFMPEG_MAX_BUFFERED_SAMPLES );
229 stream->size = 0;
230 stream->imageBuffer = 0;
231
232 file->as[file->AStreams] = stream;
233 file->AStreams++;
234
235 // create semaphore for thread-safe use
236 stream->sem = SDL_CreateSemaphore(1);
237 }
238 }
239 }
240 }
241
242 return file;
243 }
244
245 SDL_Surface* SDL_ffmpegGetVideo(SDL_ffmpegFile* file) {
246
247 MainWindow * MWinsA=MainWindow::instance();
248 if( !SDL_ffmpegValidVideo(file) || file->pause || file->skipVideo) return 0;
249
250 SDL_SemWait(file->vs[file->videoStream]->sem);
251
252 bufferImage *option = 0;
253 int i;
254
255 for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
256
257 // if this entry does not exist, continue
258 if(!file->vs[file->videoStream]->imageBuffer[i]) continue;
259
260
261 int64_t pos=MWinsA->Get_CurAudioTime();
262 // do we have an image that should have been shown?
263 if(file->vs[file->videoStream]->imageBuffer[i]->timestamp <= pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000) {
264
265 // if this is the first option we find, we simply save it
266 if(!option) {
267
268 option = file->vs[file->videoStream]->imageBuffer[i];
269
270 // set to 0 so we know this position in the buffer is available again
271 file->vs[file->videoStream]->imageBuffer[i] = 0;
272
273 } else {
274
275 // we found a newer possible timestamp, we delete the older one
276 if( option->timestamp < file->vs[file->videoStream]->imageBuffer[i]->timestamp) {
277
278 // this image is too old, we discard it
279 SDL_FreeSurface( option->img );
280
281 // free old option
282 free( option );
283
284 // new pointer to position in container
285 option = file->vs[file->videoStream]->imageBuffer[i];
286
287 // set to 0 so we know this position in the buffer is available again
288 file->vs[file->videoStream]->imageBuffer[i] = 0;
289 }
290 }
291 }
292 }
293
294 // if we did not found an option, we exit
295 if(!option) {
296 // release the lock
297 SDL_SemPost(file->vs[file->videoStream]->sem);
298 return 0;
299 }
300
301 // we did found an option, so we return the imagedata
302 return option->img;
303 }
304
305 int SDL_ffmpegReleaseVideo(SDL_ffmpegFile *file, SDL_Surface *bmp) {
306
307 // if there was no valid video stream, we should not release
308 if( !SDL_ffmpegValidVideo(file) || file->skipVideo) return -1;
309
310 // free surface
311 SDL_FreeSurface(bmp);
312
313 // release semaphore if needed
314 if( !SDL_SemValue(file->vs[file->videoStream]->sem) ) {
315 SDL_SemPost(file->vs[file->videoStream]->sem);
316 }
317
318 return 0;
319 }
320
321 SDL_ffmpegStream* SDL_ffmpegGetAudioStream(SDL_ffmpegFile *file, int audioID) {
322
323 // check if we have any audiostreams
324 if(!file->AStreams) return 0;
325
326 // check if the requested id is possible
327 if(audioID >= file->AStreams) return 0;
328
329 // return ausiostream linked to audioID
330 return file->as[audioID];
331 }
332
333 int SDL_ffmpegSelectAudioStream(SDL_ffmpegFile* file, int audioID) {
334
335 // check if we have any audiostreams
336 if(!file->AStreams) return -1;
337
338 // check if the requested id is possible
339 if(audioID >= file->AStreams) return -1;
340
341 // set current audiostream to stream linked to audioID
342 file->audioStream = audioID;
343
344 return 0;
345 }
346
347 SDL_ffmpegStream* SDL_ffmpegGetVideoStream(SDL_ffmpegFile *file, int videoID) {
348
349 // check if we have any videostreams
350 if(!file->VStreams) return 0;
351
352 // check if the requested id is possible
353 if(videoID >= file->VStreams) return 0;
354
355 // return ausiostream linked to videoID
356 return file->vs[videoID];
357 }
358
359 int SDL_ffmpegSelectVideoStream(SDL_ffmpegFile* file, int videoID) {
360
361 // check if we have any videostreams
362 if(!file->VStreams) return -1;
363
364 // check if the requested id is possible
365 if(videoID >= file->VStreams) return -1;
366
367 // set current videostream to stream linked to videoID
368 file->videoStream = videoID;
369
370 return 0;
371 }
372
373 int SDL_ffmpegStartDecoding(SDL_ffmpegFile* file) {
374
375 // start a thread that continues to fill audio/video buffers
376 if(!file->threadID) file->threadID = SDL_CreateThread(SDL_ffmpegDecodeThread, file);
377
378 return 0;
379 }
380
381 int SDL_ffmpegStopDecoding(SDL_ffmpegFile* file) {
382
383 // stop decode thread
384 file->threadActive = 0;
385 if(file->threadID) SDL_WaitThread(file->threadID, 0);
386
387 // set threadID to zero, so we can check for concurrent threads
388 file->threadID = 0;
389
390 return -1;
391 }
392
393 int SDL_ffmpegDecodeThread(void* data) {
394 static struct SwsContext *img_convert_ctx;
395 // unpack the void pointer
396 SDL_ffmpegFile* file = (SDL_ffmpegFile*)data;
397
398 // flag this thread as active, used for stopping
399 file->threadActive = 1;
400
401 // create a packet for our data
402 AVPacket pack;
403
404 // reserve some pointers for use in loop
405 AVFrame *inFrame, *inFrameRGB;
406
407 // allocate a frame
408 inFrame = avcodec_alloc_frame();
409
410 // allocate another frame for unknown->RGB conversion
411 inFrameRGB = avcodec_alloc_frame();
412
413 if(SDL_ffmpegValidVideo(file)) {
414 // allocate buffer
415 uint8_t *inVideoBuffer = (uint8_t*)malloc( avpicture_get_size(file->vs[file->videoStream]->pixFmt,
416 file->vs[file->videoStream]->width,
417 file->vs[file->videoStream]->height) );
418
419 // put buffer into our reserved frame
420 avpicture_fill( (AVPicture*)inFrameRGB,
421 inVideoBuffer,
422 file->vs[file->videoStream]->pixFmt,
423 file->vs[file->videoStream]->width,
424 file->vs[file->videoStream]->height);
425 }
426
427 // allocate temporary audiobuffer
428 int16_t *samples = (int16_t*)malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE );
429
430 // reserve integer for use in loop
431 int got_frame;
432
433 while(file->threadActive) {
434
435 // read a packet from the file
436 if(av_read_frame((AVFormatContext *)(file->_ffmpeg), &pack) < 0) {
437 // thread is idle
438 SDL_Delay(10);
439 continue;
440 }
441 if (file->skipAudio && pack.stream_index == file->as[file->audioStream]->id){
442 SDL_Delay(1);
443 continue;
444 }
445
446 // we got a packet, lets handle it
447
448 // let's start by entering the video semaphore
449 SDL_SemWait(file->decode);
450
451 // If it's a audio packet from our stream...
452 if( SDL_ffmpegValidAudio(file) && pack.stream_index == file->as[file->audioStream]->id && !file->skipAudio) {
453
454 uint8_t *data = pack.data;
455 int size = pack.size;
456 int len;
457
458 while(size > 0 && file->threadActive) {
459
460 // Decode the packet
461 len = avcodec_decode_audio((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg), samples, &got_frame, data, size);
462
463 // if error, we skip the frame
464 if(len < 0 || !got_frame) {
465 size = 0;
466 break;
467 }
468
469 // change pointers
470 data += got_frame;
471 size -= got_frame;
472
473 // if the audiobuffer is full, the thread waits
474 while( file->as[file->audioStream]->size + got_frame > SDL_FFMPEG_MAX_BUFFERED_SAMPLES &&
475 file->threadActive) {
476 SDL_Delay(5);
477 }
478
479 // write an audiopts
480 int64_t audiopts = pack.pts * file->as[file->audioStream]->timeBase;
481
482 // is the audioBuffer is empty
483 if(!file->as[file->audioStream]->size) {
484
485 // we set a new pts
486 file->as[file->audioStream]->hardPts = file->as[file->audioStream]->pts = audiopts;
487
488 // we set totalbytes to zero, as this represents the amount
489 // of bytes that were played since our last 'hardPts'
490 file->as[file->audioStream]->totalBytes = 0;
491 }
492
493 // no need to store old samples
494 if(audiopts >= SDL_ffmpegGetPosition(file)) {
495
496 // enter audio semaphore
497 SDL_SemWait(file->as[file->audioStream]->sem);
498
499 // copy data from temporary buffer to streambuffer
500 memcpy(file->as[file->audioStream]->audio+file->as[file->audioStream]->size, samples, got_frame);
501
502 // set the new size of the audiobuffer
503 file->as[file->audioStream]->size += got_frame;
504
505 // we leave the audio semaphore
506 SDL_SemPost(file->as[file->audioStream]->sem);
507 }
508 }
509 }
510
511 // If it's a video packet from our video stream...
512 if( SDL_ffmpegValidVideo(file) && pack.stream_index == file->vs[file->videoStream]->id && !file->skipVideo) {
513
514 got_frame = 0;
515
516 // Decode the packet
517 avcodec_decode_video((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg), inFrame, &got_frame, pack.data, pack.size);
518
519 if(got_frame) {
520
521 // create imagebuffer
522 bufferImage *buf = (bufferImage*)malloc( sizeof(bufferImage) );
523
524 // write timestamp into the buffer
525 buf->timestamp = file->vs[file->videoStream]->timeBase * pack.dts;
526
527 // usefull when dealing with B frames
528 if(pack.dts == AV_NOPTS_VALUE) {
529 // if we did not get a valid timestamp, we make one up based on the last
530 // valid timestamp + the duration of a frame
531 buf->timestamp = file->vs[file->videoStream]->lastTimeStamp + file->vs[file->videoStream]->timeBase;
532 }
533
534 // if new timestamp is from future, we proceed
535 // if(buf->timestamp >= SDL_ffmpegGetPosition(file))
536 // {
537 if (img_convert_ctx == NULL) {
538 img_convert_ctx = sws_getContext(file->vs[file->videoStream]->width, file->vs[file->videoStream]->height,
539 ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
540 file->vs[file->videoStream]->width, file->vs[file->videoStream]->height,
541 file->vs[file->videoStream]->pixFmt,
542 sws_flags, NULL, NULL, NULL);
543 if (img_convert_ctx == NULL) {
544 fprintf(stderr, "Cannot initialize the conversion context\n");
545 exit(1);
546 }
547 }
548
549 sws_scale(img_convert_ctx, ((AVPicture*)inFrame)->data, ((AVPicture*)inFrame)->linesize,
550 0, file->vs[file->videoStream]->height, ((AVPicture*)inFrameRGB)->data, ((AVPicture*)inFrameRGB)->linesize);
551
552 // we convert whatever type of data we got to RGB24
553 /* img_convert((AVPicture*)inFrameRGB,
554 file->vs[file->videoStream]->pixFmt,
555 (AVPicture*)inFrame,
556 ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
557 file->vs[file->videoStream]->width,
558 file->vs[file->videoStream]->height);
559 */
560 // allocate image room
561 buf->img = SDL_CreateRGBSurface(SDL_SWSURFACE,
562 file->vs[file->videoStream]->width,
563 file->vs[file->videoStream]->height,
564 24, 0x0000FF, 0x00FF00, 0xFF0000, 0);
565
566 // copy image data to image room
567 memcpy(buf->img->pixels, inFrameRGB->data[0],
568 file->vs[file->videoStream]->width * file->vs[file->videoStream]->height * 3);
569
570 // we write the lastTimestamp we got
571 file->vs[file->videoStream]->lastTimeStamp = buf->timestamp;
572
573 int i;
574 int again = 1;
575
576 // keep trying to fit in buffer, until the data was actually placed in the buffer
577 while(again && file->threadActive) {
578
579 // we enter the video semaphore
580 SDL_SemWait(file->vs[file->videoStream]->sem);
581
582 // loop through all positions in buffer until an empty
583 // space was found
584 for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
585 // if this place in the buffer is empty we write our new frame
586 if(file->vs[file->videoStream]->imageBuffer[i] == 0) {
587 file->vs[file->videoStream]->imageBuffer[i] = buf;
588 // we placed our image in the buffer, moving on
589 again = 0;
590 break;
591 }
592 }
593
594 // we leave the video semaphore
595 SDL_SemPost(file->vs[file->videoStream]->sem);
596
597 // frames aren't being release every ms, so we can take some
598 // time before we try and fit our new image again
599 if(again) SDL_Delay(5);
600 }
601 // }
602 //else {
603 // // if our decoded frame was too old, we don't bother putting
604 // // it in our buffer
605 // free( buf );
606 // }
607 }
608 }
609 // we leave the decode semaphore
610 SDL_SemPost(file->decode);
611 if ((file->skipAudio)&&(file->delay))
612 SDL_Delay(3);
613 }
614 // if we stop this thread, we can release the packet we reserved
615 av_free_packet(&pack);
616
617 return 0;
618 }
619
620 int SDL_ffmpegSeek(SDL_ffmpegFile* file, int64_t timestamp) {
621
622 // if the seekposition is out of bounds, return
623 if(timestamp >= SDL_ffmpegGetDuration(file)) return -1;
624
625 // start by flushing the buffers
626 SDL_ffmpegFlush(file);
627
628 // we enter the decode semaphore so the decode thread cannot be working on
629 // data we are trying to flush
630 SDL_SemWait(file->decode);
631
632 // if the stream has an offset, add it to the start time
633 int64_t startOffset = 0;
634 if(((AVFormatContext*)file->_ffmpeg)->start_time != AV_NOPTS_VALUE) {
635 // inFormatCtx->start_time is in AV_TIME_BASE fractional seconds
636 startOffset = ((AVFormatContext*)file->_ffmpeg)->start_time;
637 }
638 //if (file->skipAudio) startOffset=0;
639 // calculate the final timestamp for the seek action this is in AV_TIME_BASE fractional seconds
640 startOffset += (timestamp * AV_TIME_BASE) / 1000;
641
642 // do the actual seeking, AVSEEK_FLAG_BACKWARD means we jump to the point
643 // closest to the point we want, resulting in an earlier position if the jump
644 // could not go the the exaxt point we wanted
645 if(av_seek_frame((AVFormatContext *)(file->_ffmpeg), -1, startOffset, AVSEEK_FLAG_BACKWARD|AVSEEK_FLAG_ANY) >= 0) {
646 SDL_Delay(5);
647 // set some values in our file so we now were to start playing
648 file->offset = timestamp;
649 file->startTime = av_gettime()/1000;//SDL_GetTicks();
650
651 // if we have a valid video, we probably have some data we want to flush
652 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
653
654 // flushing happens inside the semaphore as not to interfere with the
655 // decoding thread
656 SDL_SemWait(file->vs[file->videoStream]->sem);
657 avcodec_flush_buffers((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg));
658 SDL_SemPost(file->vs[file->videoStream]->sem);
659 }
660
661 // same goes for audio, if there is data, we flush is
662 if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {
663
664 // make sure this is done thread-save, so inside the appropriate
665 // semaphore
666 SDL_SemWait(file->as[file->audioStream]->sem);
667 avcodec_flush_buffers((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg));
668 SDL_SemPost(file->as[file->audioStream]->sem);
669 }
670
671 // then there is our flush call
672 SDL_ffmpegFlush(file);
673
674 // and we are done, lets release the decode semaphore so the decode
675 // thread can move on, filling buffer from our new position
676 SDL_SemPost(file->decode);
677
678 return 0;
679 }
680
681 // if, for some reason, we could not seek, we still should flush our buffers
682 SDL_ffmpegFlush(file);
683
684 // and release our lock on the decodethread
685 SDL_SemPost(file->decode);
686
687 return -1;
688 }
689
690 int SDL_ffmpegSeekRelative(SDL_ffmpegFile *file, int64_t timestamp) {
691
692 // same thing as normal seek, just take into account the current position
693 return SDL_ffmpegSeek(file, SDL_ffmpegGetPosition(file) + timestamp);
694 }
695
696 int SDL_ffmpegFlush(SDL_ffmpegFile *file) {
697
698 // if we have a valid audio stream, we flush is
699 if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {
700
701 // flush audiobuffer from semaphore, be thread-safe!
702 SDL_SemWait(file->as[file->audioStream]->sem);
703
704 file->as[file->audioStream]->size = 0;
705
706 SDL_SemPost(file->as[file->audioStream]->sem);
707 }
708
709 // if we have a valid video stream, we flush some more
710 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
711
712 // flush videobuffer
713 int i;
714
715 // again, be thread safe!
716 SDL_SemWait(file->vs[file->videoStream]->sem);
717
718 // make sure we delete all frames from buffer
719 for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
720
721 // if this entry does not exist, continue
722 if(!file->vs[file->videoStream]->imageBuffer[i]) continue;
723
724 // free the actual image data
725 SDL_FreeSurface( file->vs[file->videoStream]->imageBuffer[i]->img );
726
727 // and free the struct containing it
728 free( file->vs[file->videoStream]->imageBuffer[i] );
729
730 // set position in buffer to 0, so we know it is empty
731 file->vs[file->videoStream]->imageBuffer[i] = 0;
732 }
733
734 SDL_SemPost(file->vs[file->videoStream]->sem);
735 }
736
737 return 0;
738 }
739
740 int8_t* SDL_ffmpegGetAudio(SDL_ffmpegFile *file, int *len) {
741
742 // no valid audio, means no audio to get
743 if( !SDL_ffmpegValidAudio(file) || file->pause||file->skipAudio ) return 0;
744
745 // working on audiobuffer should always be done from semaphore
746 SDL_SemWait(file->as[file->audioStream]->sem);
747
748 // if we ask for more audiodata than we can give, we sent wat we can
749 // actually give, writing the amount of bytes into len
750 if(*len > file->as[file->audioStream]->size) *len = file->as[file->audioStream]->size;
751
752 // decrease the size of our audiobuffer by len
753 file->as[file->audioStream]->size -= *len;
754
755 // len represents the nr of bytes we sent, so we increase the total
756 file->as[file->audioStream]->totalBytes += *len;
757
758 // the videooffset makes sure we are always in sync with the audio
759 // it is actually the difference between the position were we are in the
760 // stream (GetPosition) and were we should be (pts)
761 // we use the same offset when selecting the current videoframe
762 file->videoOffset = SDL_ffmpegGetPosition(file) - file->as[file->audioStream]->pts;
763
764 // we calculate the new pts for our audiodata based on the hardPts
765 // (that is the one we got from ffmpeg) and than calculating how for we
766 // have come since
767 file->as[file->audioStream]->pts = file->as[file->audioStream]->hardPts;
768 // since we use 16bit per sample, we devide totalbytes by 2 before deviding by samplerate
769 file->as[file->audioStream]->pts += ((double)file->as[file->audioStream]->totalBytes / (2 * file->as[file->audioStream]->channels)) / (file->as[file->audioStream]->sampleRate / 1000.0);
770
771 // we return the audiobuffer, notice we are still in the audiosemaphore!
772 // we only leave this by calling SDL_ffmpegReleaseAudio
773 return file->as[file->audioStream]->audio;
774 }
775
776 int SDL_ffmpegReleaseAudio(SDL_ffmpegFile *file, int len) {
777
778 // no audio, means no releasing
779 if( !SDL_ffmpegValidAudio(file) || file->skipAudio) return -1;
780
781 // this call should be paired with SDL_ffmpegGetAudio, as it provides us
782 // with the correct length so we move the correct amount of data
783 memmove( file->as[file->audioStream]->audio,
784 file->as[file->audioStream]->audio+len,
785 file->as[file->audioStream]->size );
786
787 // work on audiodata is done, so we release the semaphore
788 SDL_SemPost(file->as[file->audioStream]->sem);
789
790 return 0;
791 }
792
793 int64_t SDL_ffmpegGetPosition(SDL_ffmpegFile *file) {
794 //MainWindow * MWinsA=MainWindow::instance();
795
796 if (file->skipAudio){
797 return (av_gettime()/1000+ file->offset - file->startTime);
798 //int64_t pos=MWinsA->Get_CurAudioTime();
799 //return (pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000);//SDL_GetTicks();
800 }
801 else
802 return (av_gettime()/1000+ file->offset - file->startTime);//SDL_GetTicks();
803 // return the current playposition of our file
804
805 }
806
807 SDL_AudioSpec* SDL_ffmpegGetAudioSpec(SDL_ffmpegFile *file, int samples, void *callback) {
808
809 // create audio spec
810 SDL_AudioSpec *spec = (SDL_AudioSpec*)malloc( sizeof(SDL_AudioSpec) );
811
812 if(spec) {
813 spec->format = AUDIO_S16SYS;
814 spec->samples = samples;
815 spec->userdata = file;
816 spec->callback = (void (__cdecl *)(void *,Uint8 *,int))(callback);
817 spec->freq = 48000;
818 spec->channels = 2;
819
820 // if we have a valid audiofile, we can use its data to create a
821 // more appropriate audio spec
822 if( SDL_ffmpegValidAudio(file) && !file->skipAudio ) {
823 spec->freq = file->as[file->audioStream]->sampleRate;
824 spec->channels = file->as[file->audioStream]->channels;
825 }
826 }
827
828 return spec;
829 }
830
831 int64_t SDL_ffmpegGetDuration(SDL_ffmpegFile *file) {
832
833 // returns the duration of the entire file, please note that ffmpeg doesn't
834 // always get this value right! so don't bet your life on it...
835 return ((AVFormatContext*)file->_ffmpeg)->duration / (AV_TIME_BASE / 1000);
836 }
837
838 int SDL_ffmpegGetVideoSize(SDL_ffmpegFile *file, int *w, int *h) {
839
840 if(!w || !h) return -1;
841
842 // if we have a valid video file selected, we use it
843 // if not, we send default values and return.
844 // by checking the return value you can check if you got a valid size
845 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
846 *w = file->vs[file->videoStream]->width;
847 *h = file->vs[file->videoStream]->height;
848 return 0;
849 }
850
851 *w = 320;
852 *h = 240;
853 return -1;
854 }
855
856 int SDL_ffmpegValidAudio(SDL_ffmpegFile* file) {
857
858 // this function is used to check if we selected a valid audio stream
859 if(file->audioStream < 0 || file->audioStream >= file->AStreams) return 0;
860
861 return 1;
862 }
863
864 int SDL_ffmpegValidVideo(SDL_ffmpegFile* file) {
865
866 // this function is used to check if we selected a valid video stream
867 if(file->videoStream < 0 || file->videoStream >= file->VStreams) return 0;
868
869 return 1;
870 }
871
872 int SDL_ffmpegPause(SDL_ffmpegFile *file, int state) {
873
874 // by putting 0 into state, we play the file
875 // this behaviour is analogue to SDL audio
876 file->pause = state;
877
878 if(!file->pause) {
879 file->startTime = av_gettime()/1000;//SDL_GetTicks();
880 }
881
882 return 0;
883 }
884
885 int SDL_ffmpegGetState(SDL_ffmpegFile *file) {
886 return file->pause;
887 }