changeset 125:66af7c1b10d9

(none)
author ivand_qmul
date Mon, 22 Oct 2007 13:59:27 +0000
parents e795e4065870
children c26c73ca6d37
files data/fileio/VideoFileReader.cpp data/fileio/VideoFileReader.h layer/IntervalLayer.cpp sv/audioio/AudioCallbackPlaySource.cpp sv/main/main.cpp sv/videoio/SDL_ffmpeg.cpp sv/videoio/SDL_ffmpeg.h
diffstat 7 files changed, 1415 insertions(+), 5 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/data/fileio/VideoFileReader.cpp	Mon Oct 22 13:59:27 2007 +0000
@@ -0,0 +1,282 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*   
+	Sound Access
+		EASAIER client application.
+		Queen Mary 2007. Ivan Damnjanovic.
+
+	This program is free software; you can redistribute it and/or
+	modify it under the terms of the GNU General Public License as
+	published by the Free Software Foundation; either version 2 of the
+	License, or (at your option) any later version.  See the file
+	COPYING included with this distribution for more information.
+*/
+
+#ifdef HAVE_FFMPEG
+
+
+//#include "system/System.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include <iostream>
+
+#include <QApplication>
+#include <QFileInfo>
+#include <QProgressDialog>
+
+#include "VideoFileReader.h"
+
+
+VideoFileReader::VideoFileReader(QString path, bool showProgress, CacheMode mode) :
+    CodedAudioFileReader(mode),
+    m_path(path)
+{
+    m_frameCount = 0;
+    m_channelCount = 0;
+    m_sampleRate = 0;
+    m_fileSize = 0;
+    m_bitrateNum = 0;
+    m_bitrateDenom = 0;
+    m_cancelled = false;
+
+    struct stat stat;
+    if (::stat(path.toLocal8Bit().data(), &stat) == -1 || stat.st_size == 0) {
+	m_error = QString("File %1 does not exist.").arg(path);
+	return;
+    }
+	
+	// standard SDL initialization stuff
+    if(SDL_Init(SDL_INIT_VIDEO|SDL_DOUBLEBUF|SDL_INIT_AUDIO) < 0) {
+        m_error = QString("Failed to open file %1 for reading.").arg(SDL_GetError());
+        return;
+    }
+    m_fileSize = stat.st_size;
+
+    
+    // open file from arg[1]
+    film = SDL_ffmpegOpen(path.toLocal8Bit().data());
+    if (path.endsWith("mpg")) film->delay=1;
+	if (path.endsWith("divx")) film->delay=1;
+	if(!film) 
+			{
+				m_error = QString("Failed to open file %1 for reading.").arg(path);
+				return;
+			}	
+  // print some info on detected stream to output
+    film->skipAudio=0;
+	film->skipVideo=0;
+	int s;
+    SDL_ffmpegStream *str;
+	for(s = 0; s<film->VStreams; s++)
+		str = SDL_ffmpegGetVideoStream(film, s);
+	SDL_ffmpegSelectVideoStream(film, 0);
+	film->skipVideo=1;
+    for(s = 0; s<film->AStreams; s++) 
+        str = SDL_ffmpegGetAudioStream(film, s);
+
+	SDL_ffmpegSelectAudioStream(film, 0);
+
+
+    if (showProgress) {
+		m_progress = new QProgressDialog
+	    (QObject::tr("Decoding %1...").arg(QFileInfo(path).fileName()),
+	    QObject::tr("Stop"), 0, 100);
+		m_progress->hide();
+    }
+
+	m_sampleRate=str->sampleRate;
+	int channels=str->channels;
+	m_channelCount=channels;
+	SDL_ffmpegPause(film, 0);
+	SDL_ffmpegStartDecoding(film);
+	
+	if (!decodeAudio(film)) {
+		m_error = QString("Failed to decode audio from file %1 for reading.").arg(path);
+		return;
+	}	
+	film->skipAudio=1;
+	//SDL_ffmpegSeek(film, 0);
+    //SDL_ffmpegStopDecoding(film);
+	SDL_Delay(5);
+	film->skipVideo=0;
+	
+	film->videoThread=SDL_CreateThread(videoPlayCall,this);
+	
+ 
+    
+    
+
+    if (showProgress) {
+	delete m_progress;
+	m_progress = 0;
+    }
+
+    //delete[] filebuffer;
+}
+
+
+VideoFileReader::~VideoFileReader()
+{
+	film->videoThreadActive = 0;
+	if(film->videoThread) SDL_WaitThread(film->videoThread, 0);
+}
+bool 
+VideoFileReader::decodeAudio(SDL_ffmpegFile* file)
+{
+	
+	
+	int64_t duration=((AVFormatContext *)file->_ffmpeg)->duration;
+    double elapsed = 0;
+	m_cancelled=false;
+	int audio_ends=0;
+	while((elapsed < duration)&&!m_cancelled ) {
+          
+        elapsed = double(m_frameCount)*1000000 /(m_channelCount*m_sampleRate);
+        double percent = (elapsed * 100) / duration;
+        int progress = int(percent);
+        if (progress < 1) progress = 1;
+        if (progress > 99) progress = 99;
+        if (progress > m_progress->value()) {
+            m_progress->setValue(progress);
+            m_progress->show();
+            m_progress->raise();
+            qApp->processEvents();
+            if (m_progress->wasCanceled()) {
+                m_cancelled = true;
+            }
+        }
+		
+		// we tell SDL_ffmpegGetAudio how many bytes we need, the function then
+		// fills this pointer with the amount of bytes it could actually give
+		int gotLength = 100000;
+		if (!isDecodeCacheInitialised()) {
+				initialiseDecodeCache();
+		}
+		// we try to get some data from our file
+		// important! this call is paired with SDL_ffmpegReleaseAudio
+		int16_t* audio =(int16_t *) SDL_ffmpegGetAudio(file, &gotLength);
+		for (int i=0; i<gotLength/2;i++)
+		{
+			float sample=float(*audio++)/float(2*32768);
+			addSampleToDecodeCache(sample);
+		}
+		// copy the bytes we got to audiocard
+		// if(audio) memcpy(stream, audio, gotLength);
+
+		// we release our audio data, so the decode thread can fill it again
+		// we also inform this function of the amount of bytes we used, so it can
+		// move the buffer accordingly
+		// important! this call is paired with SDL_ffmpegGetAudio
+		SDL_ffmpegReleaseAudio(file, gotLength);
+			//decode_audio(film, 1000000);
+        m_frameCount+=gotLength/2;
+		if ((progress > 97)&&(gotLength<=0)) audio_ends++;
+		if (audio_ends>=2000) m_cancelled = true;
+    }
+	m_frameCount/=m_channelCount;
+	if (isDecodeCacheInitialised()) finishDecodeCache();
+	return true;
+}
+bool 
+VideoFileReader::videoInit(SDL_ffmpegFile* file)
+{
+	int w,h;
+    // we get the size from our active video stream, if no active video stream
+    // exists, width and height are set to default values (320x240)
+    SDL_ffmpegGetVideoSize(file, &w, &h);
+	
+    // Open the Video device
+    screen = SDL_SetVideoMode(w, h, 0, SDL_DOUBLEBUF|SDL_HWSURFACE);
+	SDL_WM_SetCaption("EASAIER Video Player", "EASAIER Video Player");
+    if(!screen) {
+        printf("Couldn't open video: %s\n", SDL_GetError());
+        return false;
+    }
+	return true;
+}
+
+int VideoFileReader::videoPlayCall(void *t)
+{
+        return ((VideoFileReader *)t)->videoPlay();
+} 
+
+int
+VideoFileReader::videoPlay()
+{
+
+
+	film->videoThreadActive = 1;
+	MainWindow * MWins=MainWindow::instance();
+	
+	if (!videoInit(film)) {
+		m_error = QString("Failed to failed to initalized video file for reading.");
+		return 0;
+	}
+	//const SDL_VideoInfo * vid=SDL_GetVideoInfo();
+	film->audioTime =0;
+	int w,h;
+	SDL_ffmpegGetVideoSize(film, &w, &h);
+	//SDL_ffmpegStartDecoding(film);
+    SDL_Delay(1000);
+	
+	while( film->videoThreadActive ) {
+
+		
+		if (MWins->isAudioPlaying())
+		{	
+			if ((SDL_ffmpegGetState(film))||((long)(abs((long)(film->audioTime - (int64_t)(MWins->Get_CurAudioTime()))))>=1000)) 
+			{
+				//SDL_Delay(1000);
+				film->audioTime = MWins->Get_CurAudioTime();
+				SDL_ffmpegSeek(film, film->audioTime);
+				SDL_ffmpegPause(film, 0);
+			}
+			else
+				film->audioTime = MWins->Get_CurAudioTime();
+		}
+		else
+		{
+			SDL_ffmpegPause(film, 1);	
+			
+		}
+
+        // we retrieve the current image from the file
+        // we get 0 if no file could be retrieved
+        // important! please note this call should be paired with SDL_ffmpegReleaseVideo
+        SDL_Surface* bmp = SDL_ffmpegGetVideo((SDL_ffmpegFile *)film);
+		
+        if(bmp) {
+
+			
+            // we got a frame, so we better show this one
+            SDL_BlitSurface(bmp, 0, screen, 0);
+
+            // we flip the double buffered screen so we might actually see something
+            SDL_Flip(screen);
+
+            // After releasing bmp, you can no longer use it.
+            // you should call this function every time you get a frame!
+            SDL_ffmpegReleaseVideo((SDL_ffmpegFile *)film, bmp);
+        }
+
+        // we wish not to kill our poor cpu, so we give it some timeoff
+        SDL_Delay(10);
+    }
+	// after all is said and done, we should call this
+    SDL_ffmpegFree(film);
+	return 0;
+}
+
+void
+VideoFileReader::getSupportedExtensions(std::set<QString> &extensions)
+{
+    extensions.insert("mpg");
+	extensions.insert("avi");
+	extensions.insert("divx");
+	extensions.insert("mov");
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/data/fileio/VideoFileReader.h	Mon Oct 22 13:59:27 2007 +0000
@@ -0,0 +1,68 @@
+/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
+
+/*   
+	Sound Access
+		EASAIER client application.
+		Queen Mary 2007. Ivan Damnjanovic.
+
+	This program is free software; you can redistribute it and/or
+	modify it under the terms of the GNU General Public License as
+	published by the Free Software Foundation; either version 2 of the
+	License, or (at your option) any later version.  See the file
+	COPYING included with this distribution for more information.
+*/
+
+#ifndef _VIDEO_FILE_READER_H_
+#define _VIDEO_FILE_READER_H_
+
+#ifdef HAVE_FFMPEG
+#ifdef __cplusplus
+extern "C" {
+#endif
+#include "SDL_ffmpeg.h"
+
+#ifdef __cplusplus
+}
+#endif
+#include "../../sv/main/MainWindow.h"
+#include "CodedAudioFileReader.h"
+//#include "base/AudioCallbackPlaySource.h"
+
+
+#include <set>
+//class AudioCallbackPlaySource;
+class QProgressDialog;
+
+class VideoFileReader : public CodedAudioFileReader
+{
+public:
+    VideoFileReader(QString path, bool showProgress, CacheMode cacheMode);
+    virtual ~VideoFileReader();
+
+    virtual QString getError() const { return m_error; }
+
+    static void getSupportedExtensions(std::set<QString> &extensions);
+    
+protected:
+    QString m_path;
+    QString m_error;
+    size_t m_fileSize;
+    double m_bitrateNum;
+    size_t m_bitrateDenom;
+	SDL_Surface *screen;
+	SDL_ffmpegFile* film;
+    QProgressDialog *m_progress;
+    bool m_cancelled;	
+	
+	
+
+   
+    bool decodeAudio(SDL_ffmpegFile* file);
+	bool videoInit(SDL_ffmpegFile* file);
+	int videoPlay(); 
+	static int videoPlayCall(void*);
+};
+
+#endif
+
+#endif
\ No newline at end of file
--- a/layer/IntervalLayer.cpp	Mon Oct 22 13:55:21 2007 +0000
+++ b/layer/IntervalLayer.cpp	Mon Oct 22 13:59:27 2007 +0000
@@ -237,7 +237,7 @@
 				xE = x1;	
 			draw = drawText = drawStart = true;
 		}
-		else if (ti->end() > frame0 && ti->end <= frame1)
+		else if (ti->end() > frame0 && ti->end() <= frame1)
 		{
 			xS = v->getXForFrame(ti->start());
 			if (xS < x0)
@@ -245,7 +245,7 @@
 			xE = v->getXForFrame(ti->end());
 			draw = drawEnd = true;
 		}
-		else if (ti->start <= frame0 && ti->end >= frame1)
+		else if (ti->start() <= frame0 && ti->end() >= frame1)
 		{
 			xS = x0;
 			xE = x1;
--- a/sv/audioio/AudioCallbackPlaySource.cpp	Mon Oct 22 13:55:21 2007 +0000
+++ b/sv/audioio/AudioCallbackPlaySource.cpp	Mon Oct 22 13:59:27 2007 +0000
@@ -5,7 +5,7 @@
     An audio file viewer and annotation editor.
     Centre for Digital Music, Queen Mary, University of London.
     This file copyright 2006 Chris Cannam and QMUL.
-    
+    +
     This program is free software; you can redistribute it and/or
     modify it under the terms of the GNU General Public License as
     published by the Free Software Foundation; either version 2 of the
--- a/sv/main/main.cpp	Mon Oct 22 13:55:21 2007 +0000
+++ b/sv/main/main.cpp	Mon Oct 22 13:59:27 2007 +0000
@@ -168,7 +168,8 @@
     cleanupMutex.lock();
     std::cerr << "signalHandler: cleaning up and exiting" << std::endl;
     TempDirectory::getInstance()->cleanup();
-    exit(0); // without releasing mutex
+    
+	exit(0); // without releasing mutex
 }
 
 class SVApplication : public QApplication
@@ -342,7 +343,7 @@
 */
     int rv = application.exec();
 //    std::cerr << "application.exec() returned " << rv << std::endl;
-
+	
     cleanupMutex.lock();
     TempDirectory::getInstance()->cleanup();
     application.releaseMainWindow();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/sv/videoio/SDL_ffmpeg.cpp	Mon Oct 22 13:59:27 2007 +0000
@@ -0,0 +1,887 @@
+/*******************************************************************************
+*                                                                              *
+*   SDL_ffmpeg is a library for basic multimedia functionality.                *
+*   SDL_ffmpeg is based on ffmpeg.                                             *
+*                                                                              *
+*   Copyright (C) 2007  Arjan Houben                                           *
+*                                                                              *
+*   SDL_ffmpeg is free software: you can redistribute it and/or modify         *
+*   it under the terms of the GNU Lesser General Public License as published   *
+*	by the Free Software Foundation, either version 3 of the License, or any   *
+*   later version.                                                             *
+*                                                                              *
+*   This program is distributed in the hope that it will be useful,            *
+*   but WITHOUT ANY WARRANTY; without even the implied warranty of             *
+*   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the               *
+*   GNU Lesser General Public License for more details.                        *
+*                                                                              *
+*   You should have received a copy of the GNU Lesser General Public License   *
+*   along with this program.  If not, see <http://www.gnu.org/licenses/>.      *
+*                                                                              *
+*******************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+#ifdef WIN32
+#include "SDL_ffmpeg.h"
+#include <SDL.h>
+#include <SDL_thread.h>
+#endif
+
+#ifdef __unix__
+#include <SDL/SDL.h>
+#include <SDL/SDL_thread.h>
+#endif
+#ifdef __cplusplus
+}
+#endif
+#include "../../sv/main/MainWindow.h"
+
+//const int SDL_FFMPEG_MAX_BUFFERED_FRAMES = 25;
+//const int SDL_FFMPEG_MAX_BUFFERED_SAMPLES = 512 * 512;
+
+int FFMPEG_init_was_called = 0;
+
+SDL_ffmpegFile* SDL_ffmpegCreateFile() {
+
+    // create SDL_ffmpegFile pointer
+    SDL_ffmpegFile *file = (SDL_ffmpegFile*)malloc( sizeof(SDL_ffmpegFile) );
+    if(!file) return 0;
+	file->_ffmpeg=av_alloc_format_context();//(AVFormatContext*)malloc(sizeof(AVFormatContext));
+    // create a semaphore for every file
+    file->decode = SDL_CreateSemaphore(1);
+
+    // allocate room for VStreams
+    file->vs = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
+    if(!file->vs) {
+        free( file );
+        return 0;
+    }
+
+    // allocate room for AStreams
+    file->as = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
+    if(!file->as) {
+        free( file );
+        return 0;
+    }
+
+    // initialize variables with standard values
+    file->audioStream = -1;
+    file->videoStream = -1;
+
+    file->offset = 0;
+    file->videoOffset = 0;
+    file->startTime = 0;
+
+    file->threadID = 0;
+
+    return file;
+}
+
+void SDL_ffmpegFree(SDL_ffmpegFile* file) {
+
+    SDL_ffmpegStopDecoding(file);
+
+    SDL_ffmpegFlush(file);
+
+    free(file);
+}
+
+SDL_ffmpegFile* SDL_ffmpegOpen(const char* filename) {
+
+    
+	// register all codecs
+    if(!FFMPEG_init_was_called) {
+        FFMPEG_init_was_called = 1;
+        av_register_all();
+    }
+	
+    // open new ffmpegFile
+    SDL_ffmpegFile *file = SDL_ffmpegCreateFile();
+    if(!file) return 0;
+
+    // information about format is stored in file->_ffmpeg
+
+    // open the file
+    if(av_open_input_file( (AVFormatContext**)&file->_ffmpeg, filename, 0, 0, 0) != 0) {
+        fprintf(stderr, "could not open \"%s\"\n", filename);
+        free(file);
+        return 0;
+    }
+
+    // retrieve format information
+    if(av_find_stream_info((AVFormatContext *)(file->_ffmpeg)) < 0) {
+        fprintf(stderr, "could not retrieve video stream info");
+        free(file);
+        return 0;
+    }
+
+    // dump info to logfile
+    // dump_format(file->_ffmpeg, 0, filename, 0);
+
+    // find the streams in the file
+    file->VStreams = 0;
+    file->AStreams = 0;
+    file->threadActive = 0;
+
+    // iterate through all the streams and store audio/video streams
+    size_t i;
+    for(i=0; i<((AVFormatContext*)file->_ffmpeg)->nb_streams; i++) {
+
+        if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
+
+            // if this is a packet of the correct type we create a new stream
+            SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );
+
+            if(stream) {
+                // we set our stream to zero
+                memset(stream, 0, sizeof(SDL_ffmpegStream));
+
+                // save unique streamid
+                stream->id = i;
+
+                // the timeBase is what we use to calculate from/to pts
+                stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;
+
+                // save width, height and pixFmt of our outputframes
+                stream->width = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->width;
+                stream->height = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->height;
+                stream->pixFmt = PIX_FMT_RGB24;
+
+                // _ffmpeg holds data about streamcodec
+                stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;
+
+                // get the correct decoder for this stream
+                AVCodec *codec = avcodec_find_decoder(((AVCodecContext*)stream->_ffmpeg)->codec_id);
+
+                if(!codec) {
+                    free(stream);
+                    fprintf(stderr, "could not find codec\n");
+                } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
+                    free(stream);
+                    fprintf(stderr, "could not open decoder\n");
+                } else {
+
+                    // copy metadata from AVStream into our stream
+                    stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
+                    stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
+                    memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
+                    stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
+                    stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
+                    memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);
+
+                    stream->audio = 0;
+                    stream->size = 0;
+                    stream->imageBuffer = (bufferImage**)calloc( SDL_FFMPEG_MAX_BUFFERED_FRAMES, sizeof(bufferImage*) );
+
+                    file->vs[file->VStreams] = stream;
+                    file->VStreams++;
+
+                    // create semaphore for thread-safe use
+                    stream->sem = SDL_CreateSemaphore(1);
+                }
+            }
+        } else if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
+
+            // if this is a packet of the correct type we create a new stream
+            SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );
+
+            if(stream) {
+                // we set our stream to zero
+                memset(stream, 0, sizeof(SDL_ffmpegStream));
+
+                // save unique streamid
+                stream->id = i;
+
+                // the timeBase is what we use to calculate from/to pts
+                stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;
+
+                // _ffmpeg holds data about streamcodec
+                stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;
+
+                stream->width = 0;
+                stream->height = 0;
+                stream->pixFmt = PIX_FMT_RGB24;
+
+                // get the correct decoder for this stream
+                AVCodec *codec = avcodec_find_decoder(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_id);
+
+                if(!codec) {
+                    free( stream );
+                    fprintf(stderr, "could not find codec\n");
+                } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
+                    free( stream );
+                    fprintf(stderr, "could not open decoder\n");
+                } else {
+
+                    // copy metadata from AVStream into our stream
+                    stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
+                    stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
+                    memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
+                    stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
+                    stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
+                    memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);
+
+                    stream->audio = (int8_t*)malloc( sizeof(int8_t) * SDL_FFMPEG_MAX_BUFFERED_SAMPLES );
+                    stream->size = 0;
+                    stream->imageBuffer = 0;
+
+                    file->as[file->AStreams] = stream;
+                    file->AStreams++;
+
+                    // create semaphore for thread-safe use
+                    stream->sem = SDL_CreateSemaphore(1);
+                }
+            }
+        }
+    }
+
+    return file;
+}
+
+SDL_Surface* SDL_ffmpegGetVideo(SDL_ffmpegFile* file) {
+
+	MainWindow * MWinsA=MainWindow::instance();
+    if( !SDL_ffmpegValidVideo(file) || file->pause || file->skipVideo) return 0;
+
+    SDL_SemWait(file->vs[file->videoStream]->sem);
+
+    bufferImage *option = 0;
+    int i;
+
+    for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
+
+        // if this entry does not exist, continue
+        if(!file->vs[file->videoStream]->imageBuffer[i]) continue;
+		
+
+		int64_t pos=MWinsA->Get_CurAudioTime();
+        // do we have an image that should have been shown?
+        if(file->vs[file->videoStream]->imageBuffer[i]->timestamp <= pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000) {
+
+            // if this is the first option we find, we simply save it
+            if(!option) {
+
+                option = file->vs[file->videoStream]->imageBuffer[i];
+
+                // set to 0 so we know this position in the buffer is available again
+                file->vs[file->videoStream]->imageBuffer[i] = 0;
+
+            } else {
+
+                // we found a newer possible timestamp, we delete the older one
+                if( option->timestamp < file->vs[file->videoStream]->imageBuffer[i]->timestamp) {
+
+                    // this image is too old, we discard it
+                    SDL_FreeSurface( option->img );
+
+                    // free old option
+                    free( option );
+
+                    // new pointer to position in container
+                    option = file->vs[file->videoStream]->imageBuffer[i];
+
+                    // set to 0 so we know this position in the buffer is available again
+                    file->vs[file->videoStream]->imageBuffer[i] = 0;
+                }
+            }
+        }
+    }
+
+    // if we did not found an option, we exit
+    if(!option) {
+        // release the lock
+        SDL_SemPost(file->vs[file->videoStream]->sem);
+        return 0;
+    }
+
+    // we did found an option, so we return the imagedata
+    return option->img;
+}
+
+int SDL_ffmpegReleaseVideo(SDL_ffmpegFile *file, SDL_Surface *bmp) {
+
+    // if there was no valid video stream, we should not release
+    if( !SDL_ffmpegValidVideo(file) || file->skipVideo) return -1;
+
+    // free surface
+    SDL_FreeSurface(bmp);
+
+    // release semaphore if needed
+    if( !SDL_SemValue(file->vs[file->videoStream]->sem) ) {
+        SDL_SemPost(file->vs[file->videoStream]->sem);
+    }
+
+    return 0;
+}
+
+SDL_ffmpegStream* SDL_ffmpegGetAudioStream(SDL_ffmpegFile *file, int audioID) {
+
+    // check if we have any audiostreams
+    if(!file->AStreams) return 0;
+
+    // check if the requested id is possible
+    if(audioID >= file->AStreams) return 0;
+
+    // return ausiostream linked to audioID
+    return file->as[audioID];
+}
+
+int SDL_ffmpegSelectAudioStream(SDL_ffmpegFile* file, int audioID) {
+
+    // check if we have any audiostreams
+    if(!file->AStreams) return -1;
+
+    // check if the requested id is possible
+    if(audioID >= file->AStreams) return -1;
+
+    // set current audiostream to stream linked to audioID
+    file->audioStream = audioID;
+
+    return 0;
+}
+
+SDL_ffmpegStream* SDL_ffmpegGetVideoStream(SDL_ffmpegFile *file, int videoID) {
+
+    // check if we have any videostreams
+    if(!file->VStreams) return 0;
+
+    // check if the requested id is possible
+    if(videoID >= file->VStreams) return 0;
+
+    // return ausiostream linked to videoID
+    return file->vs[videoID];
+}
+
+int SDL_ffmpegSelectVideoStream(SDL_ffmpegFile* file, int videoID) {
+
+    // check if we have any videostreams
+    if(!file->VStreams) return -1;
+
+    // check if the requested id is possible
+    if(videoID >= file->VStreams) return -1;
+
+    // set current videostream to stream linked to videoID
+    file->videoStream = videoID;
+
+    return 0;
+}
+
+int SDL_ffmpegStartDecoding(SDL_ffmpegFile* file) {
+
+    // start a thread that continues to fill audio/video buffers
+    if(!file->threadID) file->threadID = SDL_CreateThread(SDL_ffmpegDecodeThread, file);
+
+    return 0;
+}
+
+int SDL_ffmpegStopDecoding(SDL_ffmpegFile* file) {
+
+    // stop decode thread
+    file->threadActive = 0;
+    if(file->threadID) SDL_WaitThread(file->threadID, 0);
+
+    // set threadID to zero, so we can check for concurrent threads
+    file->threadID = 0;
+
+    return -1;
+}
+
+int SDL_ffmpegDecodeThread(void* data) {
+	static struct SwsContext *img_convert_ctx;
+    // unpack the void pointer
+    SDL_ffmpegFile* file = (SDL_ffmpegFile*)data;
+
+    // flag this thread as active, used for stopping
+    file->threadActive = 1;
+
+    // create a packet for our data
+    AVPacket pack;
+
+    // reserve some pointers for use in loop
+    AVFrame *inFrame, *inFrameRGB;
+
+    // allocate a frame
+    inFrame = avcodec_alloc_frame();
+
+    // allocate another frame for unknown->RGB conversion
+    inFrameRGB = avcodec_alloc_frame();
+
+    if(SDL_ffmpegValidVideo(file)) {
+        // allocate buffer
+        uint8_t *inVideoBuffer = (uint8_t*)malloc(  avpicture_get_size(file->vs[file->videoStream]->pixFmt,
+                                                        file->vs[file->videoStream]->width,
+                                                        file->vs[file->videoStream]->height) );
+
+        // put buffer into our reserved frame
+        avpicture_fill( (AVPicture*)inFrameRGB,
+                        inVideoBuffer,
+                        file->vs[file->videoStream]->pixFmt,
+                        file->vs[file->videoStream]->width,
+                        file->vs[file->videoStream]->height);
+    }
+
+    // allocate temporary audiobuffer
+    int16_t *samples = (int16_t*)malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE );
+
+    // reserve integer for use in loop
+    int got_frame;
+
+    while(file->threadActive) {
+
+        // read a packet from the file
+        if(av_read_frame((AVFormatContext *)(file->_ffmpeg), &pack) < 0) {
+            // thread is idle
+            SDL_Delay(10);
+            continue;
+        }
+		if (file->skipAudio && pack.stream_index == file->as[file->audioStream]->id){
+			SDL_Delay(1);
+			continue;
+		}
+
+        // we got a packet, lets handle it
+
+        // let's start by entering the video semaphore
+        SDL_SemWait(file->decode);
+
+        // If it's a audio packet from our stream...
+        if( SDL_ffmpegValidAudio(file) && pack.stream_index == file->as[file->audioStream]->id && !file->skipAudio) {
+
+            uint8_t *data = pack.data;
+            int size = pack.size;
+            int len;
+
+            while(size > 0 && file->threadActive) {
+
+                // Decode the packet
+                len = avcodec_decode_audio((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg), samples, &got_frame, data, size);
+
+                // if error, we skip the frame
+                if(len < 0 || !got_frame) {
+                    size = 0;
+                    break;
+                }
+
+                // change pointers
+                data += got_frame;
+                size -= got_frame;
+
+                // if the audiobuffer is full, the thread waits
+                while(  file->as[file->audioStream]->size + got_frame > SDL_FFMPEG_MAX_BUFFERED_SAMPLES &&
+                        file->threadActive) {
+                    SDL_Delay(5);
+                }
+
+                // write an audiopts
+                int64_t audiopts = pack.pts * file->as[file->audioStream]->timeBase;
+
+                // is the audioBuffer is empty
+                if(!file->as[file->audioStream]->size) {
+
+                    // we set a new pts
+                    file->as[file->audioStream]->hardPts = file->as[file->audioStream]->pts = audiopts;
+
+                    // we set totalbytes to zero, as this represents the amount
+                    // of bytes that were played since our last 'hardPts'
+                    file->as[file->audioStream]->totalBytes = 0;
+                }
+
+                // no need to store old samples
+                if(audiopts >= SDL_ffmpegGetPosition(file)) {
+
+                    // enter audio semaphore
+                    SDL_SemWait(file->as[file->audioStream]->sem);
+
+                        // copy data from temporary buffer to streambuffer
+                        memcpy(file->as[file->audioStream]->audio+file->as[file->audioStream]->size, samples, got_frame);
+
+                        // set the new size of the audiobuffer
+                        file->as[file->audioStream]->size += got_frame;
+
+                    // we leave the audio semaphore
+                    SDL_SemPost(file->as[file->audioStream]->sem);
+                }
+            }
+        }
+
+        // If it's a video packet from our video stream...
+        if( SDL_ffmpegValidVideo(file) && pack.stream_index == file->vs[file->videoStream]->id && !file->skipVideo) {
+
+            got_frame = 0;
+
+            // Decode the packet
+            avcodec_decode_video((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg), inFrame, &got_frame, pack.data, pack.size);
+
+            if(got_frame) {
+
+                // create imagebuffer
+                bufferImage *buf = (bufferImage*)malloc( sizeof(bufferImage) );
+
+                // write timestamp into the buffer
+                buf->timestamp = file->vs[file->videoStream]->timeBase * pack.dts;
+
+                // usefull when dealing with B frames
+                if(pack.dts == AV_NOPTS_VALUE) {
+                    // if we did not get a valid timestamp, we make one up based on the last
+                    // valid timestamp + the duration of a frame
+                    buf->timestamp = file->vs[file->videoStream]->lastTimeStamp + file->vs[file->videoStream]->timeBase;
+                }
+
+                // if new timestamp is from future, we proceed
+			//	if(buf->timestamp >= SDL_ffmpegGetPosition(file)) 
+			//	{
+					if (img_convert_ctx == NULL) {
+							img_convert_ctx = sws_getContext(file->vs[file->videoStream]->width, file->vs[file->videoStream]->height,
+                                                 ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
+                                                 file->vs[file->videoStream]->width, file->vs[file->videoStream]->height,
+                                                 file->vs[file->videoStream]->pixFmt,
+                                                 sws_flags, NULL, NULL, NULL);
+							if (img_convert_ctx == NULL) {
+								fprintf(stderr, "Cannot initialize the conversion context\n");
+								exit(1);
+							}
+					}
+				
+				sws_scale(img_convert_ctx, ((AVPicture*)inFrame)->data, ((AVPicture*)inFrame)->linesize,
+                      0, file->vs[file->videoStream]->height, ((AVPicture*)inFrameRGB)->data, ((AVPicture*)inFrameRGB)->linesize);
+				
+                    // we convert whatever type of data we got to RGB24
+                  /* img_convert((AVPicture*)inFrameRGB,
+                        file->vs[file->videoStream]->pixFmt,
+                        (AVPicture*)inFrame,
+                        ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
+                        file->vs[file->videoStream]->width,
+                        file->vs[file->videoStream]->height);
+*/
+                    // allocate image room
+                    buf->img = SDL_CreateRGBSurface(SDL_SWSURFACE,
+                        file->vs[file->videoStream]->width,
+                        file->vs[file->videoStream]->height,
+                        24, 0x0000FF, 0x00FF00, 0xFF0000, 0);
+
+                    // copy image data to image room
+                    memcpy(buf->img->pixels, inFrameRGB->data[0],
+                        file->vs[file->videoStream]->width * file->vs[file->videoStream]->height * 3);
+
+                    // we write the lastTimestamp we got
+                    file->vs[file->videoStream]->lastTimeStamp = buf->timestamp;
+
+                    int i;
+                    int again = 1;
+
+                    // keep trying to fit in buffer, until the data was actually placed in the buffer
+                    while(again && file->threadActive) {
+
+                        // we enter the video semaphore
+                        SDL_SemWait(file->vs[file->videoStream]->sem);
+
+                            // loop through all positions in buffer until an empty
+                            // space was found
+                            for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
+                                // if this place in the buffer is empty we write our new frame
+                                if(file->vs[file->videoStream]->imageBuffer[i] == 0) {
+                                    file->vs[file->videoStream]->imageBuffer[i] = buf;
+                                    // we placed our image in the buffer, moving on
+                                    again = 0;
+                                    break;
+                                }
+                            }
+
+                        // we leave the video semaphore
+                        SDL_SemPost(file->vs[file->videoStream]->sem);
+
+                        // frames aren't being release every ms, so we can take some
+                        // time before we try and fit our new image again
+                        if(again) SDL_Delay(5);
+                    }
+    //            } 
+				//else {
+    //                // if our decoded frame was too old, we don't bother putting
+    //                // it in our buffer
+    //                free( buf );
+    //            }
+            }
+        }
+        // we leave the decode semaphore
+        SDL_SemPost(file->decode);
+		if ((file->skipAudio)&&(file->delay)) 
+			SDL_Delay(3);
+    }
+    // if we stop this thread, we can release the packet we reserved
+    av_free_packet(&pack);
+
+    return 0;
+}
+
+int SDL_ffmpegSeek(SDL_ffmpegFile* file, int64_t timestamp) {
+
+    // if the seekposition is out of bounds, return
+    if(timestamp >= SDL_ffmpegGetDuration(file)) return -1;
+
+    // start by flushing the buffers
+    SDL_ffmpegFlush(file);
+
+    // we enter the decode semaphore so the decode thread cannot be working on
+    // data we are trying to flush
+    SDL_SemWait(file->decode);
+
+    // if the stream has an offset, add it to the start time
+    int64_t startOffset = 0;
+    if(((AVFormatContext*)file->_ffmpeg)->start_time != AV_NOPTS_VALUE) {
+        // inFormatCtx->start_time is in AV_TIME_BASE fractional seconds
+        startOffset = ((AVFormatContext*)file->_ffmpeg)->start_time;
+    }
+//if (file->skipAudio) startOffset=0;
+    // calculate the final timestamp for the seek action this is in AV_TIME_BASE fractional seconds
+    startOffset += (timestamp * AV_TIME_BASE) / 1000;
+
+    // do the actual seeking, AVSEEK_FLAG_BACKWARD means we jump to the point
+    // closest to the point we want, resulting in an earlier position if the jump
+    // could not go the the exaxt point we wanted
+    if(av_seek_frame((AVFormatContext *)(file->_ffmpeg), -1, startOffset, AVSEEK_FLAG_BACKWARD|AVSEEK_FLAG_ANY) >= 0) {
+SDL_Delay(5);
+        // set some values in our file so we now were to start playing
+        file->offset = timestamp;
+        file->startTime = av_gettime()/1000;//SDL_GetTicks();
+
+        // if we have a valid video, we probably have some data we want to flush
+        if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
+
+            // flushing happens inside the semaphore as not to interfere with the
+            // decoding thread
+            SDL_SemWait(file->vs[file->videoStream]->sem);
+                avcodec_flush_buffers((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg));
+            SDL_SemPost(file->vs[file->videoStream]->sem);
+        }
+
+        // same goes for audio, if there is data, we flush is
+        if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {
+
+            // make sure this is done thread-save, so inside the appropriate
+            // semaphore
+            SDL_SemWait(file->as[file->audioStream]->sem);
+                avcodec_flush_buffers((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg));
+            SDL_SemPost(file->as[file->audioStream]->sem);
+        }
+
+        // then there is our flush call
+        SDL_ffmpegFlush(file);
+
+        // and we are done, lets release the decode semaphore so the decode
+        // thread can move on, filling buffer from our new position
+        SDL_SemPost(file->decode);
+
+        return 0;
+    }
+
+    // if, for some reason, we could not seek, we still should flush our buffers
+    SDL_ffmpegFlush(file);
+
+    // and release our lock on the decodethread
+    SDL_SemPost(file->decode);
+
+    return -1;
+}
+
+int SDL_ffmpegSeekRelative(SDL_ffmpegFile *file, int64_t timestamp) {
+
+    // same thing as normal seek, just take into account the current position
+    return SDL_ffmpegSeek(file, SDL_ffmpegGetPosition(file) + timestamp);
+}
+
+int SDL_ffmpegFlush(SDL_ffmpegFile *file) {
+
+    // if we have a valid audio stream, we flush is
+    if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {
+
+        // flush audiobuffer from semaphore, be thread-safe!
+        SDL_SemWait(file->as[file->audioStream]->sem);
+
+            file->as[file->audioStream]->size = 0;
+
+        SDL_SemPost(file->as[file->audioStream]->sem);
+    }
+
+    // if we have a valid video stream, we flush some more
+    if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
+
+        // flush videobuffer
+        int i;
+
+        // again, be thread safe!
+        SDL_SemWait(file->vs[file->videoStream]->sem);
+
+            // make sure we delete all frames from buffer
+            for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
+
+                // if this entry does not exist, continue
+                if(!file->vs[file->videoStream]->imageBuffer[i]) continue;
+
+                // free the actual image data
+                SDL_FreeSurface( file->vs[file->videoStream]->imageBuffer[i]->img );
+
+                // and free the struct containing it
+                free( file->vs[file->videoStream]->imageBuffer[i] );
+
+                // set position in buffer to 0, so we know it is empty
+                file->vs[file->videoStream]->imageBuffer[i] = 0;
+            }
+
+        SDL_SemPost(file->vs[file->videoStream]->sem);
+    }
+
+    return 0;
+}
+
+int8_t* SDL_ffmpegGetAudio(SDL_ffmpegFile *file, int *len) {
+
+    // no valid audio, means no audio to get
+    if( !SDL_ffmpegValidAudio(file) || file->pause||file->skipAudio ) return 0;
+
+    // working on audiobuffer should always be done from semaphore
+    SDL_SemWait(file->as[file->audioStream]->sem);
+
+        // if we ask for more audiodata than we can give, we sent wat we can
+        // actually give, writing the amount of bytes into len
+        if(*len > file->as[file->audioStream]->size) *len = file->as[file->audioStream]->size;
+
+        // decrease the size of our audiobuffer by len
+        file->as[file->audioStream]->size -= *len;
+
+        // len represents the nr of bytes we sent, so we increase the total
+        file->as[file->audioStream]->totalBytes += *len;
+
+        // the videooffset makes sure we are always in sync with the audio
+        // it is actually the difference between the position were we are in the
+        // stream (GetPosition) and were we should be (pts)
+        // we use the same offset when selecting the current videoframe
+        file->videoOffset = SDL_ffmpegGetPosition(file) - file->as[file->audioStream]->pts;
+
+        // we calculate the new pts for our audiodata based on the hardPts
+        // (that is the one we got from ffmpeg) and than calculating how for we
+        // have come since
+        file->as[file->audioStream]->pts = file->as[file->audioStream]->hardPts;
+        // since we use 16bit per sample, we devide totalbytes by 2 before deviding by samplerate
+        file->as[file->audioStream]->pts += ((double)file->as[file->audioStream]->totalBytes / (2 * file->as[file->audioStream]->channels)) / (file->as[file->audioStream]->sampleRate / 1000.0);
+
+        // we return the audiobuffer, notice we are still in the audiosemaphore!
+        // we only leave this by calling SDL_ffmpegReleaseAudio
+        return file->as[file->audioStream]->audio;
+}
+
+int SDL_ffmpegReleaseAudio(SDL_ffmpegFile *file, int len) {
+
+        // no audio, means no releasing
+        if( !SDL_ffmpegValidAudio(file) || file->skipAudio) return -1;
+
+        // this call should be paired with SDL_ffmpegGetAudio, as it provides us
+        // with the correct length so we move the correct amount of data
+        memmove( file->as[file->audioStream]->audio,
+            file->as[file->audioStream]->audio+len,
+            file->as[file->audioStream]->size );
+
+    // work on audiodata is done, so we release the semaphore
+    SDL_SemPost(file->as[file->audioStream]->sem);
+
+    return 0;
+}
+
+int64_t SDL_ffmpegGetPosition(SDL_ffmpegFile *file) {
+    //MainWindow * MWinsA=MainWindow::instance();
+    
+	if (file->skipAudio){
+		return (av_gettime()/1000+ file->offset - file->startTime);
+		//int64_t pos=MWinsA->Get_CurAudioTime();
+		//return (pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000);//SDL_GetTicks();
+	}
+	else
+		return (av_gettime()/1000+ file->offset - file->startTime);//SDL_GetTicks();
+    // return the current playposition of our file
+    
+}
+
+SDL_AudioSpec* SDL_ffmpegGetAudioSpec(SDL_ffmpegFile *file, int samples, void *callback) {
+
+    // create audio spec
+    SDL_AudioSpec *spec = (SDL_AudioSpec*)malloc( sizeof(SDL_AudioSpec) );
+
+    if(spec) {
+        spec->format = AUDIO_S16SYS;
+        spec->samples = samples;
+        spec->userdata = file;
+        spec->callback = (void (__cdecl *)(void *,Uint8 *,int))(callback);
+        spec->freq = 48000;
+        spec->channels = 2;
+
+        // if we have a valid audiofile, we can use its data to create a
+        // more appropriate audio spec
+        if( SDL_ffmpegValidAudio(file) && !file->skipAudio ) {
+            spec->freq = file->as[file->audioStream]->sampleRate;
+            spec->channels = file->as[file->audioStream]->channels;
+        }
+    }
+
+    return spec;
+}
+
+int64_t SDL_ffmpegGetDuration(SDL_ffmpegFile *file) {
+
+    // returns the duration of the entire file, please note that ffmpeg doesn't
+    // always get this value right! so don't bet your life on it...
+    return ((AVFormatContext*)file->_ffmpeg)->duration / (AV_TIME_BASE / 1000);
+}
+
+int SDL_ffmpegGetVideoSize(SDL_ffmpegFile *file, int *w, int *h) {
+
+    if(!w || !h) return -1;
+
+    // if we have a valid video file selected, we use it
+    // if not, we send default values and return.
+    // by checking the return value you can check if you got a valid size
+    if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
+        *w = file->vs[file->videoStream]->width;
+        *h = file->vs[file->videoStream]->height;
+        return 0;
+    }
+
+    *w = 320;
+    *h = 240;
+    return -1;
+}
+
+int SDL_ffmpegValidAudio(SDL_ffmpegFile* file) {
+
+    // this function is used to check if we selected a valid audio stream
+    if(file->audioStream < 0 || file->audioStream >= file->AStreams) return 0;
+
+    return 1;
+}
+
+int SDL_ffmpegValidVideo(SDL_ffmpegFile* file) {
+
+    // this function is used to check if we selected a valid video stream
+    if(file->videoStream < 0 || file->videoStream >= file->VStreams) return 0;
+
+    return 1;
+}
+
+int SDL_ffmpegPause(SDL_ffmpegFile *file, int state) {
+
+    // by putting 0 into state, we play the file
+    // this behaviour is analogue to SDL audio
+    file->pause = state;
+
+    if(!file->pause) {
+        file->startTime = av_gettime()/1000;//SDL_GetTicks();
+    }
+
+    return 0;
+}
+
+int SDL_ffmpegGetState(SDL_ffmpegFile *file) {
+    return file->pause;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/sv/videoio/SDL_ffmpeg.h	Mon Oct 22 13:59:27 2007 +0000
@@ -0,0 +1,172 @@
+/*******************************************************************************
+*                                                                              *
+*   SDL_ffmpeg is a library for basic multimedia functionality.                *
+*   SDL_ffmpeg is based on ffmpeg.                                             *
+*                                                                              *
+*   Copyright (C) 2007  Arjan Houben                                           *
+*                                                                              *
+*   SDL_ffmpeg is free software: you can redistribute it and/or modify         *
+*   it under the terms of the GNU Lesser General Public License as published   *
+*	by the Free Software Foundation, either version 3 of the License, or any   *
+*   later version.                                                             *
+*                                                                              *
+*   This program is distributed in the hope that it will be useful,            *
+*   but WITHOUT ANY WARRANTY; without even the implied warranty of             *
+*   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the               *
+*   GNU Lesser General Public License for more details.                        *
+*                                                                              *
+*   You should have received a copy of the GNU Lesser General Public License   *
+*   along with this program.  If not, see <http://www.gnu.org/licenses/>.      *
+*                                                                              *
+*******************************************************************************/
+
+#ifndef SDL_FFMPEG_INCLUDED
+#define SDL_FFMPEG_INCLUDED
+#ifdef __cplusplus
+extern "C" {
+#endif
+#ifdef WIN32
+    #ifdef SDL_FFMPEG_LIBRARY
+	#define __STDC_LIMIT_MACROS
+	#define __STDC_CONSTANT_MACROS
+	#include "avformat.h"
+	#include "swscale.h"
+	#endif
+	#include "SDL_thread.h"
+    #include "SDL.h"
+   
+#endif
+
+#ifdef __unix__
+    #include "SDL/SDL_thread.h"
+    #include "SDL/SDL.h"
+    #ifdef SDL_FFMPEG_LIBRARY
+        #include "ffmpeg/avformat.h"
+    #endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#define SWS_BICUBIC           4
+const int SDL_FFMPEG_MAX_BUFFERED_FRAMES = 60;
+const int SDL_FFMPEG_MAX_BUFFERED_SAMPLES = 512*512;
+static int sws_flags = SWS_BICUBIC;
+// we pack our decoded images into bufferImage structs
+typedef struct bufferImage {
+    // pointer to image data
+    SDL_Surface *img;
+    // timestamp of current image
+    int64_t timestamp;
+} bufferImage;
+
+// this is the basic stream for SDL_ffmpeg
+typedef struct SDL_ffmpegStream {
+
+    // pointer to ffmpeg data, internal use only!
+    // points to AVCodecContext
+    int pixFmt;
+    void *_ffmpeg;
+
+    // semaphore for current stream
+    SDL_sem *sem;
+
+    // audio/video buffers
+    bufferImage **imageBuffer;
+    int8_t *audio;
+
+    // userinfo
+    double frameRate[2];
+    char language[4];
+    int sampleRate;
+    int channels;
+    char codecName[32];
+    double timeBase;
+    uint16_t width;
+    uint16_t height;
+
+    // extra data for audio
+    int32_t size;
+    int id;
+    int64_t lastTimeStamp;
+    int64_t pts, hardPts;
+    int64_t totalBytes;
+
+} SDL_ffmpegStream;
+
+typedef struct SDL_ffmpegFile {
+
+    // pointer to ffmpeg data, internal use only!
+    // points to AVFormatContext
+    void *_ffmpeg;
+
+    // our streams
+    SDL_ffmpegStream **vs;
+    SDL_ffmpegStream **as;
+
+    // data used for syncing/searching
+    int64_t offset, videoOffset, startTime;
+    int pause;
+
+    // streams and data about threads
+    int VStreams, AStreams, videoStream, audioStream, threadActive, videoThreadActive;
+    SDL_Thread *threadID, *videoThread;
+    SDL_sem *decode;
+	int skipAudio;
+	int skipVideo;
+	int delay;
+	int64_t audioTime;
+} SDL_ffmpegFile;
+
+
+int SDL_ffmpegStartDecoding(SDL_ffmpegFile* file);
+
+int SDL_ffmpegStopDecoding(SDL_ffmpegFile* file);
+
+SDL_Surface* SDL_ffmpegGetVideo(SDL_ffmpegFile* file);
+
+int SDL_ffmpegReleaseVideo(SDL_ffmpegFile *file, SDL_Surface *bmp);
+
+SDL_ffmpegStream* SDL_ffmpegGetAudioStream(SDL_ffmpegFile *file, int audioID);
+
+int SDL_ffmpegSelectAudioStream(SDL_ffmpegFile* file, int audioID);
+
+SDL_ffmpegStream* SDL_ffmpegGetVideoStream(SDL_ffmpegFile *file, int audioID);
+
+int SDL_ffmpegSelectVideoStream(SDL_ffmpegFile* file, int videoID);
+
+SDL_ffmpegFile* SDL_ffmpegCreateFile();
+
+void SDL_ffmpegFree(SDL_ffmpegFile* file);
+
+SDL_ffmpegFile* SDL_ffmpegOpen(const char* filename);
+
+int SDL_ffmpegDecodeThread(void* data);
+
+int SDL_ffmpegSeek(SDL_ffmpegFile* file, int64_t timestamp);
+
+int SDL_ffmpegSeekRelative(SDL_ffmpegFile* file, int64_t timestamp);
+
+int SDL_ffmpegFlush(SDL_ffmpegFile *file);
+
+int8_t* SDL_ffmpegGetAudio(SDL_ffmpegFile *file, int *len);
+
+int SDL_ffmpegReleaseAudio(SDL_ffmpegFile *file, int len);
+
+int64_t SDL_ffmpegGetPosition(SDL_ffmpegFile *file);
+
+SDL_AudioSpec* SDL_ffmpegGetAudioSpec(SDL_ffmpegFile *file, int samples, void *callback);
+
+int SDL_ffmpegGetVideoSize(SDL_ffmpegFile *file, int *w, int *h);
+
+int64_t SDL_ffmpegGetDuration(SDL_ffmpegFile *file);
+
+int SDL_ffmpegValidAudio(SDL_ffmpegFile *file);
+
+int SDL_ffmpegValidVideo(SDL_ffmpegFile *file);
+
+int SDL_ffmpegPause(SDL_ffmpegFile *file, int state);
+
+int SDL_ffmpegGetState(SDL_ffmpegFile *file);
+
+#endif // SDL_FFMPEG_INCLUDED