ivand_qmul@125: /*******************************************************************************
ivand_qmul@125: * *
ivand_qmul@125: * SDL_ffmpeg is a library for basic multimedia functionality. *
ivand_qmul@125: * SDL_ffmpeg is based on ffmpeg. *
ivand_qmul@125: * *
ivand_qmul@125: * Copyright (C) 2007 Arjan Houben *
ivand_qmul@125: * *
ivand_qmul@125: * SDL_ffmpeg is free software: you can redistribute it and/or modify *
ivand_qmul@125: * it under the terms of the GNU Lesser General Public License as published *
ivand_qmul@125: * by the Free Software Foundation, either version 3 of the License, or any *
ivand_qmul@125: * later version. *
ivand_qmul@125: * *
ivand_qmul@125: * This program is distributed in the hope that it will be useful, *
ivand_qmul@125: * but WITHOUT ANY WARRANTY; without even the implied warranty of *
ivand_qmul@125: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
ivand_qmul@125: * GNU Lesser General Public License for more details. *
ivand_qmul@125: * *
ivand_qmul@125: * You should have received a copy of the GNU Lesser General Public License *
ivand_qmul@125: * along with this program. If not, see . *
ivand_qmul@125: * *
ivand_qmul@125: *******************************************************************************/
ivand_qmul@125:
ivand_qmul@125: #include
ivand_qmul@125: #include
ivand_qmul@125: #ifdef __cplusplus
ivand_qmul@125: extern "C" {
ivand_qmul@125: #endif
ivand_qmul@125: #ifdef WIN32
ivand_qmul@125: #include "SDL_ffmpeg.h"
ivand_qmul@125: #include
ivand_qmul@125: #include
ivand_qmul@129: #include
ivand_qmul@129: #include
ivand_qmul@125: #endif
ivand_qmul@125:
ivand_qmul@125: #ifdef __unix__
ivand_qmul@125: #include
ivand_qmul@125: #include
ivand_qmul@125: #endif
benoitrigolleau@130: #undef main
ivand_qmul@125: #ifdef __cplusplus
ivand_qmul@125: }
ivand_qmul@125: #endif
ivand_qmul@125: #include "../../sv/main/MainWindow.h"
ivand_qmul@129: #include
ivand_qmul@125:
ivand_qmul@125: //const int SDL_FFMPEG_MAX_BUFFERED_FRAMES = 25;
ivand_qmul@125: //const int SDL_FFMPEG_MAX_BUFFERED_SAMPLES = 512 * 512;
benoitrigolleau@256: extern int zoomWivan;
benoitrigolleau@256: extern int zoomHivan;
ivand_qmul@125: int FFMPEG_init_was_called = 0;
benoitrigolleau@256: //FILE *pFile, *tFile;
ivand_qmul@129: int64_t Time,Time1;
ivand_qmul@129: int64_t realt=0;
ivand_qmul@129:
ivand_qmul@125: SDL_ffmpegFile* SDL_ffmpegCreateFile() {
ivand_qmul@125:
ivand_qmul@125: // create SDL_ffmpegFile pointer
ivand_qmul@125: SDL_ffmpegFile *file = (SDL_ffmpegFile*)malloc( sizeof(SDL_ffmpegFile) );
ivand_qmul@125: if(!file) return 0;
ivand_qmul@125: file->_ffmpeg=av_alloc_format_context();//(AVFormatContext*)malloc(sizeof(AVFormatContext));
ivand_qmul@125: // create a semaphore for every file
ivand_qmul@125: file->decode = SDL_CreateSemaphore(1);
ivand_qmul@125:
ivand_qmul@129: Time=0;
ivand_qmul@129: Time1=0;
benoitrigolleau@256: //fopen_s (&pFile,"myfile.txt","w");
benoitrigolleau@256: // fopen_s (&tFile,"Timestampfile.txt","w");
ivand_qmul@125: // allocate room for VStreams
ivand_qmul@125: file->vs = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
ivand_qmul@125: if(!file->vs) {
ivand_qmul@125: free( file );
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // allocate room for AStreams
ivand_qmul@125: file->as = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
ivand_qmul@125: if(!file->as) {
ivand_qmul@125: free( file );
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // initialize variables with standard values
ivand_qmul@125: file->audioStream = -1;
ivand_qmul@125: file->videoStream = -1;
ivand_qmul@125:
ivand_qmul@125: file->offset = 0;
ivand_qmul@125: file->videoOffset = 0;
ivand_qmul@125: file->startTime = 0;
ivand_qmul@125:
ivand_qmul@125: file->threadID = 0;
ivand_qmul@125:
ivand_qmul@125: return file;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: void SDL_ffmpegFree(SDL_ffmpegFile* file) {
ivand_qmul@125:
ivand_qmul@125: SDL_ffmpegStopDecoding(file);
ivand_qmul@125:
ivand_qmul@125: SDL_ffmpegFlush(file);
ivand_qmul@125:
ivand_qmul@125: free(file);
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: SDL_ffmpegFile* SDL_ffmpegOpen(const char* filename) {
ivand_qmul@125:
ivand_qmul@125:
ivand_qmul@125: // register all codecs
ivand_qmul@125: if(!FFMPEG_init_was_called) {
ivand_qmul@125: FFMPEG_init_was_called = 1;
ivand_qmul@125: av_register_all();
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // open new ffmpegFile
ivand_qmul@125: SDL_ffmpegFile *file = SDL_ffmpegCreateFile();
ivand_qmul@125: if(!file) return 0;
ivand_qmul@125:
ivand_qmul@125: // information about format is stored in file->_ffmpeg
ivand_qmul@125:
ivand_qmul@125: // open the file
ivand_qmul@125: if(av_open_input_file( (AVFormatContext**)&file->_ffmpeg, filename, 0, 0, 0) != 0) {
ivand_qmul@125: fprintf(stderr, "could not open \"%s\"\n", filename);
ivand_qmul@125: free(file);
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // retrieve format information
ivand_qmul@125: if(av_find_stream_info((AVFormatContext *)(file->_ffmpeg)) < 0) {
ivand_qmul@125: fprintf(stderr, "could not retrieve video stream info");
ivand_qmul@125: free(file);
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // dump info to logfile
ivand_qmul@125: // dump_format(file->_ffmpeg, 0, filename, 0);
ivand_qmul@125:
ivand_qmul@125: // find the streams in the file
ivand_qmul@125: file->VStreams = 0;
ivand_qmul@125: file->AStreams = 0;
ivand_qmul@125: file->threadActive = 0;
ivand_qmul@125:
ivand_qmul@125: // iterate through all the streams and store audio/video streams
ivand_qmul@125: size_t i;
ivand_qmul@125: for(i=0; i<((AVFormatContext*)file->_ffmpeg)->nb_streams; i++) {
ivand_qmul@125:
ivand_qmul@125: if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
ivand_qmul@125:
ivand_qmul@125: // if this is a packet of the correct type we create a new stream
ivand_qmul@125: SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );
ivand_qmul@125:
ivand_qmul@125: if(stream) {
ivand_qmul@125: // we set our stream to zero
ivand_qmul@125: memset(stream, 0, sizeof(SDL_ffmpegStream));
ivand_qmul@125:
ivand_qmul@125: // save unique streamid
ivand_qmul@125: stream->id = i;
ivand_qmul@125:
ivand_qmul@125: // the timeBase is what we use to calculate from/to pts
ivand_qmul@125: stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;
ivand_qmul@125:
ivand_qmul@125: // save width, height and pixFmt of our outputframes
ivand_qmul@125: stream->width = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->width;
ivand_qmul@125: stream->height = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->height;
ivand_qmul@125: stream->pixFmt = PIX_FMT_RGB24;
ivand_qmul@125:
ivand_qmul@125: // _ffmpeg holds data about streamcodec
ivand_qmul@125: stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;
ivand_qmul@125:
ivand_qmul@125: // get the correct decoder for this stream
ivand_qmul@125: AVCodec *codec = avcodec_find_decoder(((AVCodecContext*)stream->_ffmpeg)->codec_id);
ivand_qmul@125:
ivand_qmul@125: if(!codec) {
ivand_qmul@125: free(stream);
ivand_qmul@125: fprintf(stderr, "could not find codec\n");
ivand_qmul@125: } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
ivand_qmul@125: free(stream);
ivand_qmul@125: fprintf(stderr, "could not open decoder\n");
ivand_qmul@125: } else {
ivand_qmul@125:
ivand_qmul@125: // copy metadata from AVStream into our stream
ivand_qmul@125: stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
ivand_qmul@125: stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
ivand_qmul@125: memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
ivand_qmul@125: stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
ivand_qmul@125: stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
ivand_qmul@125: memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);
ivand_qmul@125:
ivand_qmul@125: stream->audio = 0;
ivand_qmul@125: stream->size = 0;
ivand_qmul@125: stream->imageBuffer = (bufferImage**)calloc( SDL_FFMPEG_MAX_BUFFERED_FRAMES, sizeof(bufferImage*) );
ivand_qmul@129: stream->writeImage = 0;
ivand_qmul@129: stream->readImage = 0;
ivand_qmul@125: file->vs[file->VStreams] = stream;
ivand_qmul@125: file->VStreams++;
ivand_qmul@125:
ivand_qmul@125: // create semaphore for thread-safe use
ivand_qmul@125: stream->sem = SDL_CreateSemaphore(1);
ivand_qmul@125: }
ivand_qmul@125: }
ivand_qmul@125: } else if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
ivand_qmul@125:
ivand_qmul@125: // if this is a packet of the correct type we create a new stream
ivand_qmul@125: SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );
ivand_qmul@125:
ivand_qmul@125: if(stream) {
ivand_qmul@125: // we set our stream to zero
ivand_qmul@125: memset(stream, 0, sizeof(SDL_ffmpegStream));
ivand_qmul@125:
ivand_qmul@125: // save unique streamid
ivand_qmul@125: stream->id = i;
ivand_qmul@125:
ivand_qmul@125: // the timeBase is what we use to calculate from/to pts
ivand_qmul@125: stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;
ivand_qmul@125:
ivand_qmul@125: // _ffmpeg holds data about streamcodec
ivand_qmul@125: stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;
ivand_qmul@125:
ivand_qmul@125: stream->width = 0;
ivand_qmul@125: stream->height = 0;
ivand_qmul@125: stream->pixFmt = PIX_FMT_RGB24;
ivand_qmul@125:
ivand_qmul@125: // get the correct decoder for this stream
ivand_qmul@125: AVCodec *codec = avcodec_find_decoder(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_id);
ivand_qmul@125:
ivand_qmul@125: if(!codec) {
ivand_qmul@125: free( stream );
ivand_qmul@125: fprintf(stderr, "could not find codec\n");
ivand_qmul@125: } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
ivand_qmul@125: free( stream );
ivand_qmul@125: fprintf(stderr, "could not open decoder\n");
ivand_qmul@125: } else {
ivand_qmul@125:
ivand_qmul@125: // copy metadata from AVStream into our stream
ivand_qmul@125: stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
ivand_qmul@125: stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
ivand_qmul@125: memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
ivand_qmul@125: stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
ivand_qmul@125: stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
ivand_qmul@125: memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);
ivand_qmul@125:
ivand_qmul@125: stream->audio = (int8_t*)malloc( sizeof(int8_t) * SDL_FFMPEG_MAX_BUFFERED_SAMPLES );
ivand_qmul@125: stream->size = 0;
ivand_qmul@125: stream->imageBuffer = 0;
ivand_qmul@125:
ivand_qmul@125: file->as[file->AStreams] = stream;
ivand_qmul@125: file->AStreams++;
ivand_qmul@125:
ivand_qmul@125: // create semaphore for thread-safe use
ivand_qmul@125: stream->sem = SDL_CreateSemaphore(1);
ivand_qmul@125: }
ivand_qmul@125: }
ivand_qmul@125: }
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: return file;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: SDL_Surface* SDL_ffmpegGetVideo(SDL_ffmpegFile* file) {
ivand_qmul@125:
ivand_qmul@125: MainWindow * MWinsA=MainWindow::instance();
ivand_qmul@125: if( !SDL_ffmpegValidVideo(file) || file->pause || file->skipVideo) return 0;
ivand_qmul@125:
ivand_qmul@125: SDL_SemWait(file->vs[file->videoStream]->sem);
ivand_qmul@125:
ivand_qmul@125: bufferImage *option = 0;
ivand_qmul@129: //int i;
ivand_qmul@129: float ratio;
ivand_qmul@129: int64_t pos,pos1, pos2, timestamp;
ivand_qmul@129: //for(i=0; iGet_CurAudioTime();
ivand_qmul@125:
benoitrigolleau@256: /* if (pFile)
lbajardsilogic@178: {
lbajardsilogic@178: fprintf (pFile, "p: \t %u\t", pos);
benoitrigolleau@256: }*/
ivand_qmul@129: //if (MWinsA->Get_HardwareBufferTime()==0)
ivand_qmul@129: // pos1=0;
ivand_qmul@129: //else {
ivand_qmul@129: // pos1=MWinsA->Get_HardwareBufferTime();
ivand_qmul@129: // //fprintf (tFile, "%u\t", pos1);
ivand_qmul@129: // int64_t timeTemp;
ivand_qmul@129: // QueryPerformanceCounter((LARGE_INTEGER *)(&timeTemp));
ivand_qmul@129: //
ivand_qmul@129: // pos1=(timeTemp-pos1)/(file->countFreq*hopfactor);
ivand_qmul@129: // fprintf (pFile, "%u\t", pos1);
ivand_qmul@129: //}
ivand_qmul@129: //pos2=pos+pos1;
benoitrigolleau@256: /*if (pFile)
lbajardsilogic@178: {
lbajardsilogic@178: fprintf (pFile, "%u\n", pos);
benoitrigolleau@256: }*/
lbajardsilogic@178:
ivand_qmul@129: // if this entry does not exist, continue
ivand_qmul@129: while(((file->vs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage)>0)&&(file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp <= pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000))//&& (file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp >= pos - file->timebase+ ((AVFormatContext*)file->_ffmpeg)->start_time/1000))
ivand_qmul@129: {
ivand_qmul@129: //pos=MWinsA->Get_CurAudioTime();
ivand_qmul@129: //timestamp=file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp;
ivand_qmul@129: //fprintf (tFile, "try: %d %d\n", (pos+ ((AVFormatContext*)file->_ffmpeg)->start_time/1000), timestamp);
ivand_qmul@129: // do we have an image that should have been shown?
ivand_qmul@129: //if(file->vs[file->videoStream]->imageBuffer[mod(file->vs[file->videoStream]->readImage,SDL_FFMPEG_MAX_BUFFERED_FRAMES)]->timestamp <= pos + (file->vs[file->videoStream]->timeBase)/4+((AVFormatContext*)file->_ffmpeg)->start_time/1000) {
ivand_qmul@129:
ivand_qmul@129: // if this is the first option we find, we simply save it
ivand_qmul@129: if(!option) {
ivand_qmul@125:
ivand_qmul@129: option = file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES];
ivand_qmul@125:
ivand_qmul@129: // set to 0 so we know this position in the buffer is available again
ivand_qmul@129: file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = 0;
ivand_qmul@129: file->vs[file->videoStream]->readImage++;
ivand_qmul@125:
ivand_qmul@129: } else {
ivand_qmul@125:
ivand_qmul@129: // we found a newer possible timestamp, we delete the older one
ivand_qmul@129: if( option->timestamp < file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp) {
ivand_qmul@125:
ivand_qmul@129: // this image is too old, we discard it
ivand_qmul@129: SDL_FreeSurface( option->img );
ivand_qmul@125:
ivand_qmul@129: // free old option
ivand_qmul@129: free( option );
ivand_qmul@125:
ivand_qmul@129: // new pointer to position in container
ivand_qmul@129: option = file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES];
ivand_qmul@125:
ivand_qmul@129: // set to 0 so we know this position in the buffer is available again
ivand_qmul@129: file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = 0;
ivand_qmul@129: file->vs[file->videoStream]->readImage++;
ivand_qmul@129: }
ivand_qmul@129: else {
ivand_qmul@129: file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]=0;
ivand_qmul@129: file->vs[file->videoStream]->readImage++;
ivand_qmul@129: }
ivand_qmul@129: }
ivand_qmul@125:
ivand_qmul@129:
ivand_qmul@129: pos=MWinsA->Get_CurAudioTime();
benoitrigolleau@256: /* if (pFile)
lbajardsilogic@178: {
lbajardsilogic@178: fprintf (pFile, "e:\t%u\t", pos);
benoitrigolleau@256: }*/
ivand_qmul@129: //if (MWinsA->Get_HardwareBufferTime()==0)
ivand_qmul@129: // pos1=0;
ivand_qmul@129: //else {
ivand_qmul@129: // pos1=MWinsA->Get_HardwareBufferTime();
ivand_qmul@129: // //fprintf (tFile, "%u\t", pos1);
ivand_qmul@129: // int64_t timeTemp;
ivand_qmul@129: // QueryPerformanceCounter((LARGE_INTEGER *)(&timeTemp));
ivand_qmul@129:
ivand_qmul@129: // pos1=(timeTemp-pos1)/(file->countFreq*hopfactor);
ivand_qmul@129: // fprintf (pFile, "%u\t", pos1);
ivand_qmul@129: //}
ivand_qmul@129: //fprintf (pFile, "%u\n", pos2);
ivand_qmul@129: //pos2=pos+pos1;
ivand_qmul@129: //if (posvs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage;
ivand_qmul@125: // if we did not found an option, we exit
ivand_qmul@125: if(!option) {
ivand_qmul@125: // release the lock
ivand_qmul@129: /*timestamp=0;
ivand_qmul@129: int64_t tt=av_gettime()/1000-file->timer;
ivand_qmul@129: file->timer=av_gettime()/1000;
ivand_qmul@129: realt+=tt;
ivand_qmul@129: fprintf (tFile, "%u\t", realt);
ivand_qmul@129: fprintf (tFile, "%u\t", tt);
ivand_qmul@129: fprintf (tFile, "%u\t", pos);
ivand_qmul@129: fprintf (tFile, "%u\n", timestamp);*/
ivand_qmul@125: SDL_SemPost(file->vs[file->videoStream]->sem);
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@129: int64_t tt;
ivand_qmul@129: QueryPerformanceCounter((LARGE_INTEGER *)(&tt));
ivand_qmul@129: tt=tt/(file->countFreq)-file->timer;
ivand_qmul@129:
ivand_qmul@129: QueryPerformanceCounter((LARGE_INTEGER *)(&file->timer));
ivand_qmul@129: file->timer=file->timer/(file->countFreq);
ivand_qmul@129: realt+=tt;
benoitrigolleau@256: /*fprintf (tFile, "%u\t", x);
ivand_qmul@129: fprintf (tFile, "%u\t", realt);
benoitrigolleau@256: fprintf (tFile, "%u\t", tt);*/
ivand_qmul@138: timestamp=(pos-option->timestamp+((AVFormatContext*)file->_ffmpeg)->start_time/1000)/MWinsA->getPlaySpeedVal();
benoitrigolleau@256: /*fprintf (tFile, "%u\t", option->timestamp);//+ (file->vs[file->videoStream]->timeBase)/4+((AVFormatContext*)file->_ffmpeg)->start_time/1000);
benoitrigolleau@256: fprintf (tFile, "%d\n", timestamp);*/
ivand_qmul@125: // we did found an option, so we return the imagedata
ivand_qmul@125: return option->img;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegReleaseVideo(SDL_ffmpegFile *file, SDL_Surface *bmp) {
ivand_qmul@125:
ivand_qmul@125: // if there was no valid video stream, we should not release
ivand_qmul@125: if( !SDL_ffmpegValidVideo(file) || file->skipVideo) return -1;
ivand_qmul@125:
ivand_qmul@125: // free surface
ivand_qmul@125: SDL_FreeSurface(bmp);
ivand_qmul@125:
ivand_qmul@125: // release semaphore if needed
ivand_qmul@125: if( !SDL_SemValue(file->vs[file->videoStream]->sem) ) {
ivand_qmul@125: SDL_SemPost(file->vs[file->videoStream]->sem);
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: SDL_ffmpegStream* SDL_ffmpegGetAudioStream(SDL_ffmpegFile *file, int audioID) {
ivand_qmul@125:
ivand_qmul@125: // check if we have any audiostreams
ivand_qmul@125: if(!file->AStreams) return 0;
ivand_qmul@125:
ivand_qmul@125: // check if the requested id is possible
ivand_qmul@125: if(audioID >= file->AStreams) return 0;
ivand_qmul@125:
ivand_qmul@125: // return ausiostream linked to audioID
ivand_qmul@125: return file->as[audioID];
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegSelectAudioStream(SDL_ffmpegFile* file, int audioID) {
ivand_qmul@125:
ivand_qmul@125: // check if we have any audiostreams
ivand_qmul@125: if(!file->AStreams) return -1;
ivand_qmul@125:
ivand_qmul@125: // check if the requested id is possible
ivand_qmul@125: if(audioID >= file->AStreams) return -1;
ivand_qmul@125:
ivand_qmul@125: // set current audiostream to stream linked to audioID
ivand_qmul@125: file->audioStream = audioID;
ivand_qmul@125:
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: SDL_ffmpegStream* SDL_ffmpegGetVideoStream(SDL_ffmpegFile *file, int videoID) {
ivand_qmul@125:
ivand_qmul@125: // check if we have any videostreams
ivand_qmul@125: if(!file->VStreams) return 0;
ivand_qmul@125:
ivand_qmul@125: // check if the requested id is possible
ivand_qmul@125: if(videoID >= file->VStreams) return 0;
ivand_qmul@125:
ivand_qmul@125: // return ausiostream linked to videoID
ivand_qmul@125: return file->vs[videoID];
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegSelectVideoStream(SDL_ffmpegFile* file, int videoID) {
ivand_qmul@125:
ivand_qmul@125: // check if we have any videostreams
ivand_qmul@125: if(!file->VStreams) return -1;
ivand_qmul@125:
ivand_qmul@125: // check if the requested id is possible
ivand_qmul@125: if(videoID >= file->VStreams) return -1;
ivand_qmul@125:
ivand_qmul@125: // set current videostream to stream linked to videoID
ivand_qmul@125: file->videoStream = videoID;
ivand_qmul@125:
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegStartDecoding(SDL_ffmpegFile* file) {
ivand_qmul@125:
ivand_qmul@125: // start a thread that continues to fill audio/video buffers
ivand_qmul@125: if(!file->threadID) file->threadID = SDL_CreateThread(SDL_ffmpegDecodeThread, file);
ivand_qmul@125:
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegStopDecoding(SDL_ffmpegFile* file) {
ivand_qmul@125:
ivand_qmul@125: // stop decode thread
ivand_qmul@125: file->threadActive = 0;
ivand_qmul@125: if(file->threadID) SDL_WaitThread(file->threadID, 0);
ivand_qmul@125:
ivand_qmul@125: // set threadID to zero, so we can check for concurrent threads
ivand_qmul@125: file->threadID = 0;
ivand_qmul@125:
ivand_qmul@125: return -1;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegDecodeThread(void* data) {
ivand_qmul@125: static struct SwsContext *img_convert_ctx;
ivand_qmul@125: // unpack the void pointer
ivand_qmul@125: SDL_ffmpegFile* file = (SDL_ffmpegFile*)data;
ivand_qmul@125:
ivand_qmul@125: // flag this thread as active, used for stopping
ivand_qmul@125: file->threadActive = 1;
ivand_qmul@125:
ivand_qmul@125: // create a packet for our data
ivand_qmul@125: AVPacket pack;
ivand_qmul@125:
ivand_qmul@125: // reserve some pointers for use in loop
ivand_qmul@125: AVFrame *inFrame, *inFrameRGB;
lbarthelemy@170: uint8_t *inVideoBuffer = NULL;
ivand_qmul@125:
ivand_qmul@125: // allocate a frame
ivand_qmul@125: inFrame = avcodec_alloc_frame();
ivand_qmul@125:
ivand_qmul@125: // allocate another frame for unknown->RGB conversion
ivand_qmul@125: inFrameRGB = avcodec_alloc_frame();
ivand_qmul@125:
ivand_qmul@125: if(SDL_ffmpegValidVideo(file)) {
ivand_qmul@125: // allocate buffer
lbarthelemy@170: inVideoBuffer = (uint8_t*)malloc( avpicture_get_size(file->vs[file->videoStream]->pixFmt,
ivand_qmul@125: file->vs[file->videoStream]->width,
ivand_qmul@125: file->vs[file->videoStream]->height) );
ivand_qmul@125:
ivand_qmul@125: // put buffer into our reserved frame
ivand_qmul@125: avpicture_fill( (AVPicture*)inFrameRGB,
ivand_qmul@125: inVideoBuffer,
ivand_qmul@125: file->vs[file->videoStream]->pixFmt,
ivand_qmul@125: file->vs[file->videoStream]->width,
ivand_qmul@125: file->vs[file->videoStream]->height);
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // allocate temporary audiobuffer
ivand_qmul@125: int16_t *samples = (int16_t*)malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE );
ivand_qmul@125:
ivand_qmul@125: // reserve integer for use in loop
ivand_qmul@125: int got_frame;
ivand_qmul@125:
ivand_qmul@125: while(file->threadActive) {
ivand_qmul@125:
ivand_qmul@125: // read a packet from the file
ivand_qmul@125: if(av_read_frame((AVFormatContext *)(file->_ffmpeg), &pack) < 0) {
ivand_qmul@125: // thread is idle
ivand_qmul@125: SDL_Delay(10);
ivand_qmul@125: continue;
ivand_qmul@125: }
ivand_qmul@125: if (file->skipAudio && pack.stream_index == file->as[file->audioStream]->id){
ivand_qmul@125: SDL_Delay(1);
ivand_qmul@125: continue;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // we got a packet, lets handle it
ivand_qmul@125:
ivand_qmul@125: // let's start by entering the video semaphore
ivand_qmul@125: SDL_SemWait(file->decode);
ivand_qmul@125:
ivand_qmul@125: // If it's a audio packet from our stream...
ivand_qmul@125: if( SDL_ffmpegValidAudio(file) && pack.stream_index == file->as[file->audioStream]->id && !file->skipAudio) {
ivand_qmul@125:
ivand_qmul@125: uint8_t *data = pack.data;
ivand_qmul@125: int size = pack.size;
ivand_qmul@125: int len;
ivand_qmul@125:
ivand_qmul@125: while(size > 0 && file->threadActive) {
ivand_qmul@125:
ivand_qmul@125: // Decode the packet
ivand_qmul@125: len = avcodec_decode_audio((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg), samples, &got_frame, data, size);
ivand_qmul@125:
ivand_qmul@125: // if error, we skip the frame
ivand_qmul@125: if(len < 0 || !got_frame) {
ivand_qmul@125: size = 0;
ivand_qmul@125: break;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // change pointers
ivand_qmul@125: data += got_frame;
ivand_qmul@125: size -= got_frame;
ivand_qmul@125:
ivand_qmul@125: // if the audiobuffer is full, the thread waits
ivand_qmul@125: while( file->as[file->audioStream]->size + got_frame > SDL_FFMPEG_MAX_BUFFERED_SAMPLES &&
ivand_qmul@125: file->threadActive) {
ivand_qmul@125: SDL_Delay(5);
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // write an audiopts
ivand_qmul@125: int64_t audiopts = pack.pts * file->as[file->audioStream]->timeBase;
ivand_qmul@125:
ivand_qmul@125: // is the audioBuffer is empty
ivand_qmul@125: if(!file->as[file->audioStream]->size) {
ivand_qmul@125:
ivand_qmul@125: // we set a new pts
ivand_qmul@125: file->as[file->audioStream]->hardPts = file->as[file->audioStream]->pts = audiopts;
ivand_qmul@125:
ivand_qmul@125: // we set totalbytes to zero, as this represents the amount
ivand_qmul@125: // of bytes that were played since our last 'hardPts'
ivand_qmul@125: file->as[file->audioStream]->totalBytes = 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // no need to store old samples
ivand_qmul@125: if(audiopts >= SDL_ffmpegGetPosition(file)) {
ivand_qmul@125:
ivand_qmul@125: // enter audio semaphore
ivand_qmul@125: SDL_SemWait(file->as[file->audioStream]->sem);
ivand_qmul@125:
ivand_qmul@125: // copy data from temporary buffer to streambuffer
ivand_qmul@125: memcpy(file->as[file->audioStream]->audio+file->as[file->audioStream]->size, samples, got_frame);
ivand_qmul@125:
ivand_qmul@125: // set the new size of the audiobuffer
ivand_qmul@125: file->as[file->audioStream]->size += got_frame;
ivand_qmul@125:
ivand_qmul@125: // we leave the audio semaphore
ivand_qmul@125: SDL_SemPost(file->as[file->audioStream]->sem);
ivand_qmul@125: }
ivand_qmul@125: }
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // If it's a video packet from our video stream...
ivand_qmul@125: if( SDL_ffmpegValidVideo(file) && pack.stream_index == file->vs[file->videoStream]->id && !file->skipVideo) {
ivand_qmul@125:
ivand_qmul@125: got_frame = 0;
ivand_qmul@129: //Time1=av_gettime();
ivand_qmul@125: // Decode the packet
ivand_qmul@125: avcodec_decode_video((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg), inFrame, &got_frame, pack.data, pack.size);
ivand_qmul@125:
ivand_qmul@125: if(got_frame) {
ivand_qmul@125:
ivand_qmul@125: // create imagebuffer
ivand_qmul@125: bufferImage *buf = (bufferImage*)malloc( sizeof(bufferImage) );
ivand_qmul@125:
ivand_qmul@125: // write timestamp into the buffer
ivand_qmul@125: buf->timestamp = file->vs[file->videoStream]->timeBase * pack.dts;
ivand_qmul@125:
ivand_qmul@125: // usefull when dealing with B frames
ivand_qmul@125: if(pack.dts == AV_NOPTS_VALUE) {
ivand_qmul@125: // if we did not get a valid timestamp, we make one up based on the last
ivand_qmul@125: // valid timestamp + the duration of a frame
ivand_qmul@125: buf->timestamp = file->vs[file->videoStream]->lastTimeStamp + file->vs[file->videoStream]->timeBase;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // if new timestamp is from future, we proceed
ivand_qmul@125: // if(buf->timestamp >= SDL_ffmpegGetPosition(file))
ivand_qmul@125: // {
benoitrigolleau@256: int w=zoomWivan;//(int)(zoomFivan*320+0.5);
benoitrigolleau@256: int h=zoomHivan;//(int)(zoomFivan*240+0.5);
lbarthelemy@170: //if ((w>file->vs[file->videoStream]->width)||(h>file->vs[file->videoStream]->height)){
lbarthelemy@170: // w=file->vs[file->videoStream]->width;
lbarthelemy@170: // h=file->vs[file->videoStream]->height;
lbarthelemy@170: //}
lbarthelemy@169: // Be sure we have a multiple of 4
lbarthelemy@169: w &= 0xFFFFFFFC;
lbarthelemy@169: h &= 0xFFFFFFFC;
ivand_qmul@150: if (img_convert_ctx == NULL) {
lbarthelemy@169:
ivand_qmul@125: img_convert_ctx = sws_getContext(file->vs[file->videoStream]->width, file->vs[file->videoStream]->height,
ivand_qmul@125: ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
ivand_qmul@150: w,h,
ivand_qmul@125: file->vs[file->videoStream]->pixFmt,
lbarthelemy@169: SWS_FAST_BILINEAR, NULL, NULL, NULL);
ivand_qmul@125: if (img_convert_ctx == NULL) {
ivand_qmul@125: fprintf(stderr, "Cannot initialize the conversion context\n");
ivand_qmul@125: exit(1);
ivand_qmul@125: }
ivand_qmul@125: }
lbarthelemy@170:
lbarthelemy@170: // check to see if buffer is at the same size than the screen
lbarthelemy@170: if (inFrameRGB->linesize[0]/3 != w ) {
lbarthelemy@170: av_free(inFrameRGB);
lbarthelemy@170: free(inVideoBuffer);
lbarthelemy@170: //avcodec_default_release_buffer(img_convert_ctx , inFrameRGB);
lbarthelemy@170: inFrameRGB = avcodec_alloc_frame();
lbarthelemy@170: // allocate buffer
lbarthelemy@170: inVideoBuffer = (uint8_t*)malloc( avpicture_get_size(file->vs[file->videoStream]->pixFmt,
lbarthelemy@170: w,
lbarthelemy@170: h) );
lbarthelemy@170:
lbarthelemy@170: // put buffer into our reserved frame
lbarthelemy@170: avpicture_fill( (AVPicture*)inFrameRGB,
lbarthelemy@170: inVideoBuffer,
lbarthelemy@170: file->vs[file->videoStream]->pixFmt,
lbarthelemy@170: w,
lbarthelemy@170: h);
lbarthelemy@170:
lbarthelemy@170: }
lbarthelemy@170:
ivand_qmul@150: ((AVPicture*)inFrameRGB)->linesize[0]=(int)w*3;
lbarthelemy@170: sws_scale(img_convert_ctx,
lbarthelemy@170: ((AVPicture*)inFrame)->data, ((AVPicture*)inFrame)->linesize, 0, file->vs[file->videoStream]->height,
lbarthelemy@170: ((AVPicture*)inFrameRGB)->data, ((AVPicture*)inFrameRGB)->linesize);
lbarthelemy@170:
lbarthelemy@170: sws_freeContext(img_convert_ctx);
lbarthelemy@170: img_convert_ctx=NULL;
lbarthelemy@170:
lbarthelemy@170: // we convert whatever type of data we got to RGB24
ivand_qmul@125: /* img_convert((AVPicture*)inFrameRGB,
ivand_qmul@125: file->vs[file->videoStream]->pixFmt,
ivand_qmul@125: (AVPicture*)inFrame,
ivand_qmul@125: ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
ivand_qmul@125: file->vs[file->videoStream]->width,
ivand_qmul@125: file->vs[file->videoStream]->height);
ivand_qmul@125: */
lbarthelemy@170:
ivand_qmul@125: // allocate image room
ivand_qmul@125: buf->img = SDL_CreateRGBSurface(SDL_SWSURFACE,
ivand_qmul@150: w,
ivand_qmul@150: h,
ivand_qmul@125: 24, 0x0000FF, 0x00FF00, 0xFF0000, 0);
ivand_qmul@125: // copy image data to image room
ivand_qmul@125: memcpy(buf->img->pixels, inFrameRGB->data[0],
ivand_qmul@150: w*h* 3);
ivand_qmul@129: file->timebase=buf->timestamp-file->vs[file->videoStream]->lastTimeStamp;
ivand_qmul@125: // we write the lastTimestamp we got
ivand_qmul@125: file->vs[file->videoStream]->lastTimeStamp = buf->timestamp;
ivand_qmul@125:
ivand_qmul@129: //int i;
ivand_qmul@125: int again = 1;
ivand_qmul@129: //Time=av_gettime()-Time1;
ivand_qmul@125:
ivand_qmul@129: //fprintf (pFile, "%d \n",Time);
ivand_qmul@125: // keep trying to fit in buffer, until the data was actually placed in the buffer
ivand_qmul@125: while(again && file->threadActive) {
ivand_qmul@125:
ivand_qmul@125: // we enter the video semaphore
ivand_qmul@125: SDL_SemWait(file->vs[file->videoStream]->sem);
ivand_qmul@125:
ivand_qmul@125: // loop through all positions in buffer until an empty
ivand_qmul@125: // space was found
ivand_qmul@129: //for(i=0; ivs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage) < SDL_FFMPEG_MAX_BUFFERED_FRAMES) {
ivand_qmul@129: file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->writeImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = buf;
ivand_qmul@129: file->vs[file->videoStream]->writeImage++;
ivand_qmul@125: // we placed our image in the buffer, moving on
ivand_qmul@125: again = 0;
ivand_qmul@129:
ivand_qmul@125: }
ivand_qmul@129: //}
ivand_qmul@125:
ivand_qmul@125: // we leave the video semaphore
ivand_qmul@125: SDL_SemPost(file->vs[file->videoStream]->sem);
ivand_qmul@129:
ivand_qmul@125: // frames aren't being release every ms, so we can take some
ivand_qmul@125: // time before we try and fit our new image again
ivand_qmul@129: if(again)
ivand_qmul@129: {
ivand_qmul@129: SDL_SemPost(file->decode);
ivand_qmul@129: SDL_Delay(3);
ivand_qmul@129: SDL_SemWait(file->decode);
ivand_qmul@129: }
ivand_qmul@125: }
ivand_qmul@125: // }
ivand_qmul@125: //else {
ivand_qmul@125: // // if our decoded frame was too old, we don't bother putting
ivand_qmul@125: // // it in our buffer
ivand_qmul@125: // free( buf );
ivand_qmul@125: // }
ivand_qmul@125: }
ivand_qmul@125: }
ivand_qmul@125: // we leave the decode semaphore
ivand_qmul@125: SDL_SemPost(file->decode);
ivand_qmul@125: if ((file->skipAudio)&&(file->delay))
ivand_qmul@125: SDL_Delay(3);
ivand_qmul@125: }
lbarthelemy@170:
lbarthelemy@170: if (inVideoBuffer)
lbarthelemy@170: {
lbarthelemy@170: free(inVideoBuffer);
lbarthelemy@170: inVideoBuffer = NULL;
lbarthelemy@170: }
lbarthelemy@170:
ivand_qmul@125: // if we stop this thread, we can release the packet we reserved
ivand_qmul@125: av_free_packet(&pack);
lbarthelemy@170: free(samples);
lbarthelemy@170: av_free(inFrameRGB);
lbarthelemy@170: av_free(inFrame);
ivand_qmul@125:
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegSeek(SDL_ffmpegFile* file, int64_t timestamp) {
ivand_qmul@125:
ivand_qmul@125: // if the seekposition is out of bounds, return
ivand_qmul@125: if(timestamp >= SDL_ffmpegGetDuration(file)) return -1;
ivand_qmul@125:
ivand_qmul@125: // start by flushing the buffers
ivand_qmul@125: SDL_ffmpegFlush(file);
ivand_qmul@125:
ivand_qmul@125: // we enter the decode semaphore so the decode thread cannot be working on
ivand_qmul@125: // data we are trying to flush
ivand_qmul@125: SDL_SemWait(file->decode);
ivand_qmul@125:
ivand_qmul@125: // if the stream has an offset, add it to the start time
ivand_qmul@125: int64_t startOffset = 0;
ivand_qmul@125: if(((AVFormatContext*)file->_ffmpeg)->start_time != AV_NOPTS_VALUE) {
ivand_qmul@125: // inFormatCtx->start_time is in AV_TIME_BASE fractional seconds
ivand_qmul@125: startOffset = ((AVFormatContext*)file->_ffmpeg)->start_time;
ivand_qmul@125: }
ivand_qmul@125: //if (file->skipAudio) startOffset=0;
ivand_qmul@125: // calculate the final timestamp for the seek action this is in AV_TIME_BASE fractional seconds
ivand_qmul@125: startOffset += (timestamp * AV_TIME_BASE) / 1000;
ivand_qmul@125:
ivand_qmul@125: // do the actual seeking, AVSEEK_FLAG_BACKWARD means we jump to the point
ivand_qmul@125: // closest to the point we want, resulting in an earlier position if the jump
ivand_qmul@125: // could not go the the exaxt point we wanted
ivand_qmul@125: if(av_seek_frame((AVFormatContext *)(file->_ffmpeg), -1, startOffset, AVSEEK_FLAG_BACKWARD|AVSEEK_FLAG_ANY) >= 0) {
ivand_qmul@125: SDL_Delay(5);
ivand_qmul@125: // set some values in our file so we now were to start playing
ivand_qmul@125: file->offset = timestamp;
ivand_qmul@125: file->startTime = av_gettime()/1000;//SDL_GetTicks();
ivand_qmul@125:
ivand_qmul@125: // if we have a valid video, we probably have some data we want to flush
ivand_qmul@125: if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
ivand_qmul@125:
ivand_qmul@125: // flushing happens inside the semaphore as not to interfere with the
ivand_qmul@125: // decoding thread
ivand_qmul@125: SDL_SemWait(file->vs[file->videoStream]->sem);
ivand_qmul@125: avcodec_flush_buffers((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg));
ivand_qmul@125: SDL_SemPost(file->vs[file->videoStream]->sem);
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // same goes for audio, if there is data, we flush is
ivand_qmul@125: if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {
ivand_qmul@125:
ivand_qmul@125: // make sure this is done thread-save, so inside the appropriate
ivand_qmul@125: // semaphore
ivand_qmul@125: SDL_SemWait(file->as[file->audioStream]->sem);
ivand_qmul@125: avcodec_flush_buffers((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg));
ivand_qmul@125: SDL_SemPost(file->as[file->audioStream]->sem);
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // then there is our flush call
ivand_qmul@125: SDL_ffmpegFlush(file);
ivand_qmul@125:
ivand_qmul@125: // and we are done, lets release the decode semaphore so the decode
ivand_qmul@125: // thread can move on, filling buffer from our new position
ivand_qmul@125: SDL_SemPost(file->decode);
ivand_qmul@125:
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // if, for some reason, we could not seek, we still should flush our buffers
ivand_qmul@125: SDL_ffmpegFlush(file);
ivand_qmul@125:
ivand_qmul@125: // and release our lock on the decodethread
ivand_qmul@125: SDL_SemPost(file->decode);
ivand_qmul@125:
ivand_qmul@125: return -1;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegSeekRelative(SDL_ffmpegFile *file, int64_t timestamp) {
ivand_qmul@125:
ivand_qmul@125: // same thing as normal seek, just take into account the current position
ivand_qmul@125: return SDL_ffmpegSeek(file, SDL_ffmpegGetPosition(file) + timestamp);
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegFlush(SDL_ffmpegFile *file) {
ivand_qmul@125:
ivand_qmul@125: // if we have a valid audio stream, we flush is
ivand_qmul@125: if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {
ivand_qmul@125:
ivand_qmul@125: // flush audiobuffer from semaphore, be thread-safe!
ivand_qmul@125: SDL_SemWait(file->as[file->audioStream]->sem);
ivand_qmul@125:
ivand_qmul@125: file->as[file->audioStream]->size = 0;
ivand_qmul@125:
ivand_qmul@125: SDL_SemPost(file->as[file->audioStream]->sem);
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: // if we have a valid video stream, we flush some more
ivand_qmul@125: if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
ivand_qmul@125:
ivand_qmul@125: // flush videobuffer
ivand_qmul@125: int i;
ivand_qmul@125:
ivand_qmul@125: // again, be thread safe!
ivand_qmul@125: SDL_SemWait(file->vs[file->videoStream]->sem);
ivand_qmul@125:
ivand_qmul@125: // make sure we delete all frames from buffer
ivand_qmul@125: for(i=0; ivs[file->videoStream]->imageBuffer[i]) continue;
ivand_qmul@125:
ivand_qmul@125: // free the actual image data
ivand_qmul@125: SDL_FreeSurface( file->vs[file->videoStream]->imageBuffer[i]->img );
ivand_qmul@125:
ivand_qmul@125: // and free the struct containing it
ivand_qmul@125: free( file->vs[file->videoStream]->imageBuffer[i] );
ivand_qmul@125:
ivand_qmul@125: // set position in buffer to 0, so we know it is empty
ivand_qmul@125: file->vs[file->videoStream]->imageBuffer[i] = 0;
ivand_qmul@125: }
ivand_qmul@129: file->vs[file->videoStream]->writeImage=0;
ivand_qmul@129: file->vs[file->videoStream]->readImage=0;
ivand_qmul@125: SDL_SemPost(file->vs[file->videoStream]->sem);
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int8_t* SDL_ffmpegGetAudio(SDL_ffmpegFile *file, int *len) {
ivand_qmul@125:
ivand_qmul@125: // no valid audio, means no audio to get
ivand_qmul@125: if( !SDL_ffmpegValidAudio(file) || file->pause||file->skipAudio ) return 0;
ivand_qmul@125:
ivand_qmul@125: // working on audiobuffer should always be done from semaphore
ivand_qmul@125: SDL_SemWait(file->as[file->audioStream]->sem);
ivand_qmul@125:
ivand_qmul@125: // if we ask for more audiodata than we can give, we sent wat we can
ivand_qmul@125: // actually give, writing the amount of bytes into len
ivand_qmul@125: if(*len > file->as[file->audioStream]->size) *len = file->as[file->audioStream]->size;
ivand_qmul@125:
ivand_qmul@125: // decrease the size of our audiobuffer by len
ivand_qmul@125: file->as[file->audioStream]->size -= *len;
ivand_qmul@125:
ivand_qmul@125: // len represents the nr of bytes we sent, so we increase the total
ivand_qmul@125: file->as[file->audioStream]->totalBytes += *len;
ivand_qmul@125:
ivand_qmul@125: // the videooffset makes sure we are always in sync with the audio
ivand_qmul@125: // it is actually the difference between the position were we are in the
ivand_qmul@125: // stream (GetPosition) and were we should be (pts)
ivand_qmul@125: // we use the same offset when selecting the current videoframe
ivand_qmul@125: file->videoOffset = SDL_ffmpegGetPosition(file) - file->as[file->audioStream]->pts;
ivand_qmul@125:
ivand_qmul@125: // we calculate the new pts for our audiodata based on the hardPts
ivand_qmul@125: // (that is the one we got from ffmpeg) and than calculating how for we
ivand_qmul@125: // have come since
ivand_qmul@125: file->as[file->audioStream]->pts = file->as[file->audioStream]->hardPts;
ivand_qmul@125: // since we use 16bit per sample, we devide totalbytes by 2 before deviding by samplerate
ivand_qmul@125: file->as[file->audioStream]->pts += ((double)file->as[file->audioStream]->totalBytes / (2 * file->as[file->audioStream]->channels)) / (file->as[file->audioStream]->sampleRate / 1000.0);
ivand_qmul@125:
ivand_qmul@125: // we return the audiobuffer, notice we are still in the audiosemaphore!
ivand_qmul@125: // we only leave this by calling SDL_ffmpegReleaseAudio
ivand_qmul@125: return file->as[file->audioStream]->audio;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegReleaseAudio(SDL_ffmpegFile *file, int len) {
ivand_qmul@125:
ivand_qmul@125: // no audio, means no releasing
ivand_qmul@125: if( !SDL_ffmpegValidAudio(file) || file->skipAudio) return -1;
ivand_qmul@125:
ivand_qmul@125: // this call should be paired with SDL_ffmpegGetAudio, as it provides us
ivand_qmul@125: // with the correct length so we move the correct amount of data
ivand_qmul@125: memmove( file->as[file->audioStream]->audio,
ivand_qmul@125: file->as[file->audioStream]->audio+len,
ivand_qmul@125: file->as[file->audioStream]->size );
ivand_qmul@125:
ivand_qmul@125: // work on audiodata is done, so we release the semaphore
ivand_qmul@125: SDL_SemPost(file->as[file->audioStream]->sem);
ivand_qmul@125:
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int64_t SDL_ffmpegGetPosition(SDL_ffmpegFile *file) {
ivand_qmul@125: //MainWindow * MWinsA=MainWindow::instance();
ivand_qmul@125:
ivand_qmul@125: if (file->skipAudio){
ivand_qmul@125: return (av_gettime()/1000+ file->offset - file->startTime);
ivand_qmul@125: //int64_t pos=MWinsA->Get_CurAudioTime();
ivand_qmul@125: //return (pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000);//SDL_GetTicks();
ivand_qmul@125: }
ivand_qmul@125: else
ivand_qmul@125: return (av_gettime()/1000+ file->offset - file->startTime);//SDL_GetTicks();
ivand_qmul@125: // return the current playposition of our file
ivand_qmul@125:
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: SDL_AudioSpec* SDL_ffmpegGetAudioSpec(SDL_ffmpegFile *file, int samples, void *callback) {
ivand_qmul@125:
ivand_qmul@125: // create audio spec
ivand_qmul@125: SDL_AudioSpec *spec = (SDL_AudioSpec*)malloc( sizeof(SDL_AudioSpec) );
ivand_qmul@125:
ivand_qmul@125: if(spec) {
ivand_qmul@125: spec->format = AUDIO_S16SYS;
ivand_qmul@125: spec->samples = samples;
ivand_qmul@125: spec->userdata = file;
ivand_qmul@125: spec->callback = (void (__cdecl *)(void *,Uint8 *,int))(callback);
ivand_qmul@125: spec->freq = 48000;
ivand_qmul@125: spec->channels = 2;
ivand_qmul@125:
ivand_qmul@125: // if we have a valid audiofile, we can use its data to create a
ivand_qmul@125: // more appropriate audio spec
ivand_qmul@125: if( SDL_ffmpegValidAudio(file) && !file->skipAudio ) {
ivand_qmul@125: spec->freq = file->as[file->audioStream]->sampleRate;
ivand_qmul@125: spec->channels = file->as[file->audioStream]->channels;
ivand_qmul@125: }
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: return spec;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int64_t SDL_ffmpegGetDuration(SDL_ffmpegFile *file) {
ivand_qmul@125:
ivand_qmul@125: // returns the duration of the entire file, please note that ffmpeg doesn't
ivand_qmul@125: // always get this value right! so don't bet your life on it...
ivand_qmul@125: return ((AVFormatContext*)file->_ffmpeg)->duration / (AV_TIME_BASE / 1000);
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegGetVideoSize(SDL_ffmpegFile *file, int *w, int *h) {
ivand_qmul@125:
ivand_qmul@125: if(!w || !h) return -1;
ivand_qmul@125:
ivand_qmul@125: // if we have a valid video file selected, we use it
ivand_qmul@125: // if not, we send default values and return.
ivand_qmul@125: // by checking the return value you can check if you got a valid size
ivand_qmul@125: if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
ivand_qmul@125: *w = file->vs[file->videoStream]->width;
ivand_qmul@125: *h = file->vs[file->videoStream]->height;
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: *w = 320;
ivand_qmul@125: *h = 240;
ivand_qmul@125: return -1;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegValidAudio(SDL_ffmpegFile* file) {
ivand_qmul@125:
ivand_qmul@125: // this function is used to check if we selected a valid audio stream
ivand_qmul@125: if(file->audioStream < 0 || file->audioStream >= file->AStreams) return 0;
ivand_qmul@125:
ivand_qmul@125: return 1;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegValidVideo(SDL_ffmpegFile* file) {
ivand_qmul@125:
ivand_qmul@125: // this function is used to check if we selected a valid video stream
ivand_qmul@125: if(file->videoStream < 0 || file->videoStream >= file->VStreams) return 0;
ivand_qmul@125:
ivand_qmul@125: return 1;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegPause(SDL_ffmpegFile *file, int state) {
ivand_qmul@125:
ivand_qmul@125: // by putting 0 into state, we play the file
ivand_qmul@125: // this behaviour is analogue to SDL audio
ivand_qmul@125: file->pause = state;
ivand_qmul@125:
ivand_qmul@125: if(!file->pause) {
ivand_qmul@125: file->startTime = av_gettime()/1000;//SDL_GetTicks();
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: return 0;
ivand_qmul@125: }
ivand_qmul@125:
ivand_qmul@125: int SDL_ffmpegGetState(SDL_ffmpegFile *file) {
ivand_qmul@125: return file->pause;
ivand_qmul@125: }