view sv/videoio/SDL_ffmpeg.cpp @ 167:2ac52ea3c1c4

video mouse events are in SDL now. The code is really better now, but there is a display bug when you zoom in, zoom out
author benoitrigolleau
date Fri, 16 Nov 2007 15:18:44 +0000
parents c946c19e6329
children 269562a2b89c
line wrap: on
line source
/*******************************************************************************
*                                                                              *
*   SDL_ffmpeg is a library for basic multimedia functionality.                *
*   SDL_ffmpeg is based on ffmpeg.                                             *
*                                                                              *
*   Copyright (C) 2007  Arjan Houben                                           *
*                                                                              *
*   SDL_ffmpeg is free software: you can redistribute it and/or modify         *
*   it under the terms of the GNU Lesser General Public License as published   *
*	by the Free Software Foundation, either version 3 of the License, or any   *
*   later version.                                                             *
*                                                                              *
*   This program is distributed in the hope that it will be useful,            *
*   but WITHOUT ANY WARRANTY; without even the implied warranty of             *
*   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the               *
*   GNU Lesser General Public License for more details.                        *
*                                                                              *
*   You should have received a copy of the GNU Lesser General Public License   *
*   along with this program.  If not, see <http://www.gnu.org/licenses/>.      *
*                                                                              *
*******************************************************************************/

#include <stdio.h>
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef WIN32
#include "SDL_ffmpeg.h"
#include <SDL.h>
#include <SDL_thread.h>
#include <stdio.h>
#include <Windows.h>
#endif

#ifdef __unix__
#include <SDL/SDL.h>
#include <SDL/SDL_thread.h>
#endif
#undef main
#ifdef __cplusplus
}
#endif
#include "../../sv/main/MainWindow.h"
#include <time.h>

//const int SDL_FFMPEG_MAX_BUFFERED_FRAMES = 25;
//const int SDL_FFMPEG_MAX_BUFFERED_SAMPLES = 512 * 512;
extern float zoomFivan;
int FFMPEG_init_was_called = 0;
FILE *pFile, *tFile;
int64_t Time,Time1;
int64_t realt=0;
  
SDL_ffmpegFile* SDL_ffmpegCreateFile() {

    // create SDL_ffmpegFile pointer
    SDL_ffmpegFile *file = (SDL_ffmpegFile*)malloc( sizeof(SDL_ffmpegFile) );
    if(!file) return 0;
	file->_ffmpeg=av_alloc_format_context();//(AVFormatContext*)malloc(sizeof(AVFormatContext));
    // create a semaphore for every file
    file->decode = SDL_CreateSemaphore(1);

	Time=0;
	Time1=0;
   fopen_s (&pFile,"myfile.txt","w");
	 fopen_s (&tFile,"Timestampfile.txt","w");
    // allocate room for VStreams
    file->vs = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
    if(!file->vs) {
        free( file );
        return 0;
    }

    // allocate room for AStreams
    file->as = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
    if(!file->as) {
        free( file );
        return 0;
    }

    // initialize variables with standard values
    file->audioStream = -1;
    file->videoStream = -1;

    file->offset = 0;
    file->videoOffset = 0;
    file->startTime = 0;

    file->threadID = 0;

    return file;
}

void SDL_ffmpegFree(SDL_ffmpegFile* file) {

    SDL_ffmpegStopDecoding(file);

    SDL_ffmpegFlush(file);

    free(file);
}

SDL_ffmpegFile* SDL_ffmpegOpen(const char* filename) {

    
	// register all codecs
    if(!FFMPEG_init_was_called) {
        FFMPEG_init_was_called = 1;
        av_register_all();
    }
	
    // open new ffmpegFile
    SDL_ffmpegFile *file = SDL_ffmpegCreateFile();
    if(!file) return 0;

    // information about format is stored in file->_ffmpeg

    // open the file
    if(av_open_input_file( (AVFormatContext**)&file->_ffmpeg, filename, 0, 0, 0) != 0) {
        fprintf(stderr, "could not open \"%s\"\n", filename);
        free(file);
        return 0;
    }

    // retrieve format information
    if(av_find_stream_info((AVFormatContext *)(file->_ffmpeg)) < 0) {
        fprintf(stderr, "could not retrieve video stream info");
        free(file);
        return 0;
    }

    // dump info to logfile
    // dump_format(file->_ffmpeg, 0, filename, 0);

    // find the streams in the file
    file->VStreams = 0;
    file->AStreams = 0;
    file->threadActive = 0;

    // iterate through all the streams and store audio/video streams
    size_t i;
    for(i=0; i<((AVFormatContext*)file->_ffmpeg)->nb_streams; i++) {

        if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {

            // if this is a packet of the correct type we create a new stream
            SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );

            if(stream) {
                // we set our stream to zero
                memset(stream, 0, sizeof(SDL_ffmpegStream));

                // save unique streamid
                stream->id = i;

                // the timeBase is what we use to calculate from/to pts
                stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;

                // save width, height and pixFmt of our outputframes
                stream->width = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->width;
                stream->height = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->height;
                stream->pixFmt = PIX_FMT_RGB24;

                // _ffmpeg holds data about streamcodec
                stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;

                // get the correct decoder for this stream
                AVCodec *codec = avcodec_find_decoder(((AVCodecContext*)stream->_ffmpeg)->codec_id);

                if(!codec) {
                    free(stream);
                    fprintf(stderr, "could not find codec\n");
                } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
                    free(stream);
                    fprintf(stderr, "could not open decoder\n");
                } else {

                    // copy metadata from AVStream into our stream
                    stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
                    stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
                    memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
                    stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
                    stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
                    memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);

                    stream->audio = 0;
                    stream->size = 0;
                    stream->imageBuffer = (bufferImage**)calloc( SDL_FFMPEG_MAX_BUFFERED_FRAMES, sizeof(bufferImage*) );
					stream->writeImage = 0;
					stream->readImage = 0;
                    file->vs[file->VStreams] = stream;
                    file->VStreams++;

                    // create semaphore for thread-safe use
                    stream->sem = SDL_CreateSemaphore(1);
                }
            }
        } else if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {

            // if this is a packet of the correct type we create a new stream
            SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );

            if(stream) {
                // we set our stream to zero
                memset(stream, 0, sizeof(SDL_ffmpegStream));

                // save unique streamid
                stream->id = i;

                // the timeBase is what we use to calculate from/to pts
                stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;

                // _ffmpeg holds data about streamcodec
                stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;

                stream->width = 0;
                stream->height = 0;
                stream->pixFmt = PIX_FMT_RGB24;

                // get the correct decoder for this stream
                AVCodec *codec = avcodec_find_decoder(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_id);

                if(!codec) {
                    free( stream );
                    fprintf(stderr, "could not find codec\n");
                } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
                    free( stream );
                    fprintf(stderr, "could not open decoder\n");
                } else {

                    // copy metadata from AVStream into our stream
                    stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
                    stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
                    memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
                    stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
                    stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
                    memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);

                    stream->audio = (int8_t*)malloc( sizeof(int8_t) * SDL_FFMPEG_MAX_BUFFERED_SAMPLES );
                    stream->size = 0;
                    stream->imageBuffer = 0;

                    file->as[file->AStreams] = stream;
                    file->AStreams++;

                    // create semaphore for thread-safe use
                    stream->sem = SDL_CreateSemaphore(1);
                }
            }
        }
    }

    return file;
}

SDL_Surface* SDL_ffmpegGetVideo(SDL_ffmpegFile* file) {

	MainWindow * MWinsA=MainWindow::instance();
    if( !SDL_ffmpegValidVideo(file) || file->pause || file->skipVideo) return 0;

    SDL_SemWait(file->vs[file->videoStream]->sem);

    bufferImage *option = 0;
    //int i;
	float ratio;
	int64_t pos,pos1, pos2, timestamp;
    //for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
		pos=MWinsA->Get_CurAudioTime();

		fprintf (pFile, "p: \t %u\t", pos);
		//if (MWinsA->Get_HardwareBufferTime()==0)
		//	pos1=0;
		//else {
		//	pos1=MWinsA->Get_HardwareBufferTime();
		//	//fprintf (tFile, "%u\t", pos1);
		//	int64_t timeTemp;
		//	QueryPerformanceCounter((LARGE_INTEGER *)(&timeTemp));
		//	
		//	pos1=(timeTemp-pos1)/(file->countFreq*hopfactor);
		//	fprintf (pFile, "%u\t", pos1);
		//}
		//pos2=pos+pos1;
		fprintf (pFile, "%u\n", pos);
		
        // if this entry does not exist, continue
        while(((file->vs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage)>0)&&(file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp <= pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000))//&& (file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp >= pos - file->timebase+ ((AVFormatContext*)file->_ffmpeg)->start_time/1000)) 
		{
			//pos=MWinsA->Get_CurAudioTime();
			//timestamp=file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp;
			//fprintf (tFile, "try: %d	%d\n", (pos+ ((AVFormatContext*)file->_ffmpeg)->start_time/1000), timestamp);
			// do we have an image that should have been shown?
			//if(file->vs[file->videoStream]->imageBuffer[mod(file->vs[file->videoStream]->readImage,SDL_FFMPEG_MAX_BUFFERED_FRAMES)]->timestamp <= pos + (file->vs[file->videoStream]->timeBase)/4+((AVFormatContext*)file->_ffmpeg)->start_time/1000) {
				
				// if this is the first option we find, we simply save it
				if(!option) {

					option = file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES];

					// set to 0 so we know this position in the buffer is available again
					file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = 0;
					file->vs[file->videoStream]->readImage++;

				} else {

					// we found a newer possible timestamp, we delete the older one
					if( option->timestamp < file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp) {

						// this image is too old, we discard it
						SDL_FreeSurface( option->img );

						// free old option
						free( option );

						// new pointer to position in container
						option = file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES];

						// set to 0 so we know this position in the buffer is available again
						file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = 0;
						file->vs[file->videoStream]->readImage++;
					}
					else {
						file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]=0;
						file->vs[file->videoStream]->readImage++;
					}
				}

				
				pos=MWinsA->Get_CurAudioTime();	
				fprintf (pFile, "e:\t%u\t", pos);
				//if (MWinsA->Get_HardwareBufferTime()==0)
				//	pos1=0;
				//else {
				//	pos1=MWinsA->Get_HardwareBufferTime();
				//	//fprintf (tFile, "%u\t", pos1);
				//	int64_t timeTemp;
				//	QueryPerformanceCounter((LARGE_INTEGER *)(&timeTemp));
			
				//	pos1=(timeTemp-pos1)/(file->countFreq*hopfactor);
				//	fprintf (pFile, "%u\t", pos1);
				//}
				//fprintf (pFile, "%u\n", pos2);
				//pos2=pos+pos1;
				//if (pos<pos2) pos=pos2;
		}
		//}
	//}
	int x=file->vs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage;
    // if we did not found an option, we exit
    if(!option) {
        // release the lock
		/*timestamp=0;
		int64_t tt=av_gettime()/1000-file->timer;
		file->timer=av_gettime()/1000;
		realt+=tt;
		fprintf (tFile, "%u\t", realt);
		fprintf (tFile, "%u\t", tt);
		fprintf (tFile, "%u\t", pos);
		fprintf (tFile, "%u\n", timestamp);*/
        SDL_SemPost(file->vs[file->videoStream]->sem);
        return 0;
    }
	int64_t tt;
	QueryPerformanceCounter((LARGE_INTEGER *)(&tt));
	tt=tt/(file->countFreq)-file->timer;
	
	QueryPerformanceCounter((LARGE_INTEGER *)(&file->timer));
	file->timer=file->timer/(file->countFreq);
	realt+=tt;
	fprintf (tFile, "%u\t", x);
	fprintf (tFile, "%u\t", realt);
	fprintf (tFile, "%u\t", tt);
	timestamp=(pos-option->timestamp+((AVFormatContext*)file->_ffmpeg)->start_time/1000)/MWinsA->getPlaySpeedVal();	
	fprintf (tFile, "%u\t", pos);//+ (file->vs[file->videoStream]->timeBase)/4+((AVFormatContext*)file->_ffmpeg)->start_time/1000);
	fprintf (tFile, "%d\n", timestamp);
    // we did found an option, so we return the imagedata
    return option->img;
}

int SDL_ffmpegReleaseVideo(SDL_ffmpegFile *file, SDL_Surface *bmp) {

    // if there was no valid video stream, we should not release
    if( !SDL_ffmpegValidVideo(file) || file->skipVideo) return -1;

    // free surface
    SDL_FreeSurface(bmp);

    // release semaphore if needed
    if( !SDL_SemValue(file->vs[file->videoStream]->sem) ) {
        SDL_SemPost(file->vs[file->videoStream]->sem);
    }

    return 0;
}

SDL_ffmpegStream* SDL_ffmpegGetAudioStream(SDL_ffmpegFile *file, int audioID) {

    // check if we have any audiostreams
    if(!file->AStreams) return 0;

    // check if the requested id is possible
    if(audioID >= file->AStreams) return 0;

    // return ausiostream linked to audioID
    return file->as[audioID];
}

int SDL_ffmpegSelectAudioStream(SDL_ffmpegFile* file, int audioID) {

    // check if we have any audiostreams
    if(!file->AStreams) return -1;

    // check if the requested id is possible
    if(audioID >= file->AStreams) return -1;

    // set current audiostream to stream linked to audioID
    file->audioStream = audioID;

    return 0;
}

SDL_ffmpegStream* SDL_ffmpegGetVideoStream(SDL_ffmpegFile *file, int videoID) {

    // check if we have any videostreams
    if(!file->VStreams) return 0;

    // check if the requested id is possible
    if(videoID >= file->VStreams) return 0;

    // return ausiostream linked to videoID
    return file->vs[videoID];
}

int SDL_ffmpegSelectVideoStream(SDL_ffmpegFile* file, int videoID) {

    // check if we have any videostreams
    if(!file->VStreams) return -1;

    // check if the requested id is possible
    if(videoID >= file->VStreams) return -1;

    // set current videostream to stream linked to videoID
    file->videoStream = videoID;

    return 0;
}

int SDL_ffmpegStartDecoding(SDL_ffmpegFile* file) {

    // start a thread that continues to fill audio/video buffers
    if(!file->threadID) file->threadID = SDL_CreateThread(SDL_ffmpegDecodeThread, file);

    return 0;
}

int SDL_ffmpegStopDecoding(SDL_ffmpegFile* file) {

    // stop decode thread
    file->threadActive = 0;
    if(file->threadID) SDL_WaitThread(file->threadID, 0);

    // set threadID to zero, so we can check for concurrent threads
    file->threadID = 0;

    return -1;
}

int SDL_ffmpegDecodeThread(void* data) {
	static struct SwsContext *img_convert_ctx;
    // unpack the void pointer
    SDL_ffmpegFile* file = (SDL_ffmpegFile*)data;

    // flag this thread as active, used for stopping
    file->threadActive = 1;

    // create a packet for our data
    AVPacket pack;

    // reserve some pointers for use in loop
    AVFrame *inFrame, *inFrameRGB;

    // allocate a frame
    inFrame = avcodec_alloc_frame();

    // allocate another frame for unknown->RGB conversion
    inFrameRGB = avcodec_alloc_frame();

    if(SDL_ffmpegValidVideo(file)) {
        // allocate buffer
        uint8_t *inVideoBuffer = (uint8_t*)malloc(  avpicture_get_size(file->vs[file->videoStream]->pixFmt,
                                                        file->vs[file->videoStream]->width,
                                                        file->vs[file->videoStream]->height) );

        // put buffer into our reserved frame
        avpicture_fill( (AVPicture*)inFrameRGB,
                        inVideoBuffer,
                        file->vs[file->videoStream]->pixFmt,
                        file->vs[file->videoStream]->width,
                        file->vs[file->videoStream]->height);
    }

    // allocate temporary audiobuffer
    int16_t *samples = (int16_t*)malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE );

    // reserve integer for use in loop
    int got_frame;

    while(file->threadActive) {

        // read a packet from the file
        if(av_read_frame((AVFormatContext *)(file->_ffmpeg), &pack) < 0) {
            // thread is idle
            SDL_Delay(10);
            continue;
        }
		if (file->skipAudio && pack.stream_index == file->as[file->audioStream]->id){
			SDL_Delay(1);
			continue;
		}

        // we got a packet, lets handle it

        // let's start by entering the video semaphore
        SDL_SemWait(file->decode);

        // If it's a audio packet from our stream...
        if( SDL_ffmpegValidAudio(file) && pack.stream_index == file->as[file->audioStream]->id && !file->skipAudio) {

            uint8_t *data = pack.data;
            int size = pack.size;
            int len;

            while(size > 0 && file->threadActive) {

                // Decode the packet
                len = avcodec_decode_audio((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg), samples, &got_frame, data, size);

                // if error, we skip the frame
                if(len < 0 || !got_frame) {
                    size = 0;
                    break;
                }

                // change pointers
                data += got_frame;
                size -= got_frame;

                // if the audiobuffer is full, the thread waits
                while(  file->as[file->audioStream]->size + got_frame > SDL_FFMPEG_MAX_BUFFERED_SAMPLES &&
                        file->threadActive) {
                    SDL_Delay(5);
                }

                // write an audiopts
                int64_t audiopts = pack.pts * file->as[file->audioStream]->timeBase;

                // is the audioBuffer is empty
                if(!file->as[file->audioStream]->size) {

                    // we set a new pts
                    file->as[file->audioStream]->hardPts = file->as[file->audioStream]->pts = audiopts;

                    // we set totalbytes to zero, as this represents the amount
                    // of bytes that were played since our last 'hardPts'
                    file->as[file->audioStream]->totalBytes = 0;
                }

                // no need to store old samples
                if(audiopts >= SDL_ffmpegGetPosition(file)) {

                    // enter audio semaphore
                    SDL_SemWait(file->as[file->audioStream]->sem);

                        // copy data from temporary buffer to streambuffer
                        memcpy(file->as[file->audioStream]->audio+file->as[file->audioStream]->size, samples, got_frame);

                        // set the new size of the audiobuffer
                        file->as[file->audioStream]->size += got_frame;

                    // we leave the audio semaphore
                    SDL_SemPost(file->as[file->audioStream]->sem);
                }
            }
        }

        // If it's a video packet from our video stream...
        if( SDL_ffmpegValidVideo(file) && pack.stream_index == file->vs[file->videoStream]->id && !file->skipVideo) {

            got_frame = 0;
//Time1=av_gettime();
            // Decode the packet
            avcodec_decode_video((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg), inFrame, &got_frame, pack.data, pack.size);

            if(got_frame) {

                // create imagebuffer
                bufferImage *buf = (bufferImage*)malloc( sizeof(bufferImage) );

                // write timestamp into the buffer
                buf->timestamp = file->vs[file->videoStream]->timeBase * pack.dts;

                // usefull when dealing with B frames
                if(pack.dts == AV_NOPTS_VALUE) {
                    // if we did not get a valid timestamp, we make one up based on the last
                    // valid timestamp + the duration of a frame
                    buf->timestamp = file->vs[file->videoStream]->lastTimeStamp + file->vs[file->videoStream]->timeBase;
                }

                // if new timestamp is from future, we proceed
			//	if(buf->timestamp >= SDL_ffmpegGetPosition(file)) 
			//	{
				int w=(int)(zoomFivan*320+0.5);
				int h=(int)(zoomFivan*240+0.5);
				
				if ((w>file->vs[file->videoStream]->width)||(h>file->vs[file->videoStream]->height)){
					w=file->vs[file->videoStream]->width;
					h=file->vs[file->videoStream]->height;
				}
				if (img_convert_ctx == NULL) {
							img_convert_ctx = sws_getContext(file->vs[file->videoStream]->width, file->vs[file->videoStream]->height,
                                                 ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
                                                 w,h,
                                                 file->vs[file->videoStream]->pixFmt,
                                                 sws_flags, NULL, NULL, NULL);
							if (img_convert_ctx == NULL) {
								fprintf(stderr, "Cannot initialize the conversion context\n");
								exit(1);
							}
					}
				
					((AVPicture*)inFrameRGB)->linesize[0]=(int)w*3;
				sws_scale(img_convert_ctx, ((AVPicture*)inFrame)->data, ((AVPicture*)inFrame)->linesize,
                      0, file->vs[file->videoStream]->height, ((AVPicture*)inFrameRGB)->data, ((AVPicture*)inFrameRGB)->linesize);
				 sws_freeContext(img_convert_ctx);
				 img_convert_ctx=NULL;
                    // we convert whatever type of data we got to RGB24
                  /* img_convert((AVPicture*)inFrameRGB,
                        file->vs[file->videoStream]->pixFmt,
                        (AVPicture*)inFrame,
                        ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
                        file->vs[file->videoStream]->width,
                        file->vs[file->videoStream]->height);
*/
                    // allocate image room
                    buf->img = SDL_CreateRGBSurface(SDL_SWSURFACE,
                        w,
                        h,
                        24, 0x0000FF, 0x00FF00, 0xFF0000, 0);

                    // copy image data to image room
                    memcpy(buf->img->pixels, inFrameRGB->data[0],
                        w*h* 3);
					file->timebase=buf->timestamp-file->vs[file->videoStream]->lastTimeStamp;
                    // we write the lastTimestamp we got
                    file->vs[file->videoStream]->lastTimeStamp = buf->timestamp;

                    //int i;
                    int again = 1;
//Time=av_gettime()-Time1;

//fprintf (pFile, "%d	\n",Time);
                    // keep trying to fit in buffer, until the data was actually placed in the buffer
                    while(again && file->threadActive) {

                        // we enter the video semaphore
                        SDL_SemWait(file->vs[file->videoStream]->sem);

                            // loop through all positions in buffer until an empty
                            // space was found
                            //for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
                                // if this place in the buffer is empty we write our new frame
                                if((file->vs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage) < SDL_FFMPEG_MAX_BUFFERED_FRAMES) {
                                    file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->writeImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = buf;
									file->vs[file->videoStream]->writeImage++;
                                    // we placed our image in the buffer, moving on
                                    again = 0;
                                    
                                }
                            //}

                        // we leave the video semaphore
                        SDL_SemPost(file->vs[file->videoStream]->sem);
						
                        // frames aren't being release every ms, so we can take some
                        // time before we try and fit our new image again
                        if(again) 
						{
							SDL_SemPost(file->decode);
							SDL_Delay(3);
							SDL_SemWait(file->decode);
						}
                    }
    //            } 
				//else {
    //                // if our decoded frame was too old, we don't bother putting
    //                // it in our buffer
    //                free( buf );
    //            }
            }
        }
        // we leave the decode semaphore
        SDL_SemPost(file->decode);
		if ((file->skipAudio)&&(file->delay)) 
			SDL_Delay(3);
    }
    // if we stop this thread, we can release the packet we reserved
    av_free_packet(&pack);

    return 0;
}

int SDL_ffmpegSeek(SDL_ffmpegFile* file, int64_t timestamp) {

    // if the seekposition is out of bounds, return
    if(timestamp >= SDL_ffmpegGetDuration(file)) return -1;

    // start by flushing the buffers
    SDL_ffmpegFlush(file);

    // we enter the decode semaphore so the decode thread cannot be working on
    // data we are trying to flush
    SDL_SemWait(file->decode);

    // if the stream has an offset, add it to the start time
    int64_t startOffset = 0;
    if(((AVFormatContext*)file->_ffmpeg)->start_time != AV_NOPTS_VALUE) {
        // inFormatCtx->start_time is in AV_TIME_BASE fractional seconds
        startOffset = ((AVFormatContext*)file->_ffmpeg)->start_time;
    }
//if (file->skipAudio) startOffset=0;
    // calculate the final timestamp for the seek action this is in AV_TIME_BASE fractional seconds
    startOffset += (timestamp * AV_TIME_BASE) / 1000;

    // do the actual seeking, AVSEEK_FLAG_BACKWARD means we jump to the point
    // closest to the point we want, resulting in an earlier position if the jump
    // could not go the the exaxt point we wanted
    if(av_seek_frame((AVFormatContext *)(file->_ffmpeg), -1, startOffset, AVSEEK_FLAG_BACKWARD|AVSEEK_FLAG_ANY) >= 0) {
SDL_Delay(5);
        // set some values in our file so we now were to start playing
        file->offset = timestamp;
        file->startTime = av_gettime()/1000;//SDL_GetTicks();

        // if we have a valid video, we probably have some data we want to flush
        if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {

            // flushing happens inside the semaphore as not to interfere with the
            // decoding thread
            SDL_SemWait(file->vs[file->videoStream]->sem);
                avcodec_flush_buffers((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg));
            SDL_SemPost(file->vs[file->videoStream]->sem);
        }

        // same goes for audio, if there is data, we flush is
        if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {

            // make sure this is done thread-save, so inside the appropriate
            // semaphore
            SDL_SemWait(file->as[file->audioStream]->sem);
                avcodec_flush_buffers((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg));
            SDL_SemPost(file->as[file->audioStream]->sem);
        }

        // then there is our flush call
        SDL_ffmpegFlush(file);

        // and we are done, lets release the decode semaphore so the decode
        // thread can move on, filling buffer from our new position
        SDL_SemPost(file->decode);

        return 0;
    }

    // if, for some reason, we could not seek, we still should flush our buffers
    SDL_ffmpegFlush(file);

    // and release our lock on the decodethread
    SDL_SemPost(file->decode);

    return -1;
}

int SDL_ffmpegSeekRelative(SDL_ffmpegFile *file, int64_t timestamp) {

    // same thing as normal seek, just take into account the current position
    return SDL_ffmpegSeek(file, SDL_ffmpegGetPosition(file) + timestamp);
}

int SDL_ffmpegFlush(SDL_ffmpegFile *file) {

    // if we have a valid audio stream, we flush is
    if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {

        // flush audiobuffer from semaphore, be thread-safe!
        SDL_SemWait(file->as[file->audioStream]->sem);

            file->as[file->audioStream]->size = 0;

        SDL_SemPost(file->as[file->audioStream]->sem);
    }

    // if we have a valid video stream, we flush some more
    if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {

        // flush videobuffer
        int i;

        // again, be thread safe!
        SDL_SemWait(file->vs[file->videoStream]->sem);

            // make sure we delete all frames from buffer
            for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {

                // if this entry does not exist, continue
                if(!file->vs[file->videoStream]->imageBuffer[i]) continue;

                // free the actual image data
                SDL_FreeSurface( file->vs[file->videoStream]->imageBuffer[i]->img );

                // and free the struct containing it
                free( file->vs[file->videoStream]->imageBuffer[i] );

                // set position in buffer to 0, so we know it is empty
                file->vs[file->videoStream]->imageBuffer[i] = 0;
            }
			file->vs[file->videoStream]->writeImage=0;
			file->vs[file->videoStream]->readImage=0;
        SDL_SemPost(file->vs[file->videoStream]->sem);
    }

    return 0;
}

int8_t* SDL_ffmpegGetAudio(SDL_ffmpegFile *file, int *len) {

    // no valid audio, means no audio to get
    if( !SDL_ffmpegValidAudio(file) || file->pause||file->skipAudio ) return 0;

    // working on audiobuffer should always be done from semaphore
    SDL_SemWait(file->as[file->audioStream]->sem);

        // if we ask for more audiodata than we can give, we sent wat we can
        // actually give, writing the amount of bytes into len
        if(*len > file->as[file->audioStream]->size) *len = file->as[file->audioStream]->size;

        // decrease the size of our audiobuffer by len
        file->as[file->audioStream]->size -= *len;

        // len represents the nr of bytes we sent, so we increase the total
        file->as[file->audioStream]->totalBytes += *len;

        // the videooffset makes sure we are always in sync with the audio
        // it is actually the difference between the position were we are in the
        // stream (GetPosition) and were we should be (pts)
        // we use the same offset when selecting the current videoframe
        file->videoOffset = SDL_ffmpegGetPosition(file) - file->as[file->audioStream]->pts;

        // we calculate the new pts for our audiodata based on the hardPts
        // (that is the one we got from ffmpeg) and than calculating how for we
        // have come since
        file->as[file->audioStream]->pts = file->as[file->audioStream]->hardPts;
        // since we use 16bit per sample, we devide totalbytes by 2 before deviding by samplerate
        file->as[file->audioStream]->pts += ((double)file->as[file->audioStream]->totalBytes / (2 * file->as[file->audioStream]->channels)) / (file->as[file->audioStream]->sampleRate / 1000.0);

        // we return the audiobuffer, notice we are still in the audiosemaphore!
        // we only leave this by calling SDL_ffmpegReleaseAudio
        return file->as[file->audioStream]->audio;
}

int SDL_ffmpegReleaseAudio(SDL_ffmpegFile *file, int len) {

        // no audio, means no releasing
        if( !SDL_ffmpegValidAudio(file) || file->skipAudio) return -1;

        // this call should be paired with SDL_ffmpegGetAudio, as it provides us
        // with the correct length so we move the correct amount of data
        memmove( file->as[file->audioStream]->audio,
            file->as[file->audioStream]->audio+len,
            file->as[file->audioStream]->size );

    // work on audiodata is done, so we release the semaphore
    SDL_SemPost(file->as[file->audioStream]->sem);

    return 0;
}

int64_t SDL_ffmpegGetPosition(SDL_ffmpegFile *file) {
    //MainWindow * MWinsA=MainWindow::instance();
    
	if (file->skipAudio){
		return (av_gettime()/1000+ file->offset - file->startTime);
		//int64_t pos=MWinsA->Get_CurAudioTime();
		//return (pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000);//SDL_GetTicks();
	}
	else
		return (av_gettime()/1000+ file->offset - file->startTime);//SDL_GetTicks();
    // return the current playposition of our file
    
}

SDL_AudioSpec* SDL_ffmpegGetAudioSpec(SDL_ffmpegFile *file, int samples, void *callback) {

    // create audio spec
    SDL_AudioSpec *spec = (SDL_AudioSpec*)malloc( sizeof(SDL_AudioSpec) );

    if(spec) {
        spec->format = AUDIO_S16SYS;
        spec->samples = samples;
        spec->userdata = file;
        spec->callback = (void (__cdecl *)(void *,Uint8 *,int))(callback);
        spec->freq = 48000;
        spec->channels = 2;

        // if we have a valid audiofile, we can use its data to create a
        // more appropriate audio spec
        if( SDL_ffmpegValidAudio(file) && !file->skipAudio ) {
            spec->freq = file->as[file->audioStream]->sampleRate;
            spec->channels = file->as[file->audioStream]->channels;
        }
    }

    return spec;
}

int64_t SDL_ffmpegGetDuration(SDL_ffmpegFile *file) {

    // returns the duration of the entire file, please note that ffmpeg doesn't
    // always get this value right! so don't bet your life on it...
    return ((AVFormatContext*)file->_ffmpeg)->duration / (AV_TIME_BASE / 1000);
}

int SDL_ffmpegGetVideoSize(SDL_ffmpegFile *file, int *w, int *h) {

    if(!w || !h) return -1;

    // if we have a valid video file selected, we use it
    // if not, we send default values and return.
    // by checking the return value you can check if you got a valid size
    if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
        *w = file->vs[file->videoStream]->width;
        *h = file->vs[file->videoStream]->height;
        return 0;
    }

    *w = 320;
    *h = 240;
    return -1;
}

int SDL_ffmpegValidAudio(SDL_ffmpegFile* file) {

    // this function is used to check if we selected a valid audio stream
    if(file->audioStream < 0 || file->audioStream >= file->AStreams) return 0;

    return 1;
}

int SDL_ffmpegValidVideo(SDL_ffmpegFile* file) {

    // this function is used to check if we selected a valid video stream
    if(file->videoStream < 0 || file->videoStream >= file->VStreams) return 0;

    return 1;
}

int SDL_ffmpegPause(SDL_ffmpegFile *file, int state) {

    // by putting 0 into state, we play the file
    // this behaviour is analogue to SDL audio
    file->pause = state;

    if(!file->pause) {
        file->startTime = av_gettime()/1000;//SDL_GetTicks();
    }

    return 0;
}

int SDL_ffmpegGetState(SDL_ffmpegFile *file) {
    return file->pause;
}