ivand_qmul@125
|
1 /*******************************************************************************
|
ivand_qmul@125
|
2 * *
|
ivand_qmul@125
|
3 * SDL_ffmpeg is a library for basic multimedia functionality. *
|
ivand_qmul@125
|
4 * SDL_ffmpeg is based on ffmpeg. *
|
ivand_qmul@125
|
5 * *
|
ivand_qmul@125
|
6 * Copyright (C) 2007 Arjan Houben *
|
ivand_qmul@125
|
7 * *
|
ivand_qmul@125
|
8 * SDL_ffmpeg is free software: you can redistribute it and/or modify *
|
ivand_qmul@125
|
9 * it under the terms of the GNU Lesser General Public License as published *
|
ivand_qmul@125
|
10 * by the Free Software Foundation, either version 3 of the License, or any *
|
ivand_qmul@125
|
11 * later version. *
|
ivand_qmul@125
|
12 * *
|
ivand_qmul@125
|
13 * This program is distributed in the hope that it will be useful, *
|
ivand_qmul@125
|
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
|
ivand_qmul@125
|
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
|
ivand_qmul@125
|
16 * GNU Lesser General Public License for more details. *
|
ivand_qmul@125
|
17 * *
|
ivand_qmul@125
|
18 * You should have received a copy of the GNU Lesser General Public License *
|
ivand_qmul@125
|
19 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
|
ivand_qmul@125
|
20 * *
|
ivand_qmul@125
|
21 *******************************************************************************/
|
ivand_qmul@125
|
22
|
ivand_qmul@125
|
23 #include <stdio.h>
|
ivand_qmul@125
|
24 #include <stdlib.h>
|
ivand_qmul@125
|
25 #ifdef __cplusplus
|
ivand_qmul@125
|
26 extern "C" {
|
ivand_qmul@125
|
27 #endif
|
ivand_qmul@125
|
28 #ifdef WIN32
|
ivand_qmul@125
|
29 #include "SDL_ffmpeg.h"
|
ivand_qmul@125
|
30 #include <SDL.h>
|
ivand_qmul@125
|
31 #include <SDL_thread.h>
|
ivand_qmul@129
|
32 #include <stdio.h>
|
ivand_qmul@129
|
33 #include <Windows.h>
|
ivand_qmul@125
|
34 #endif
|
ivand_qmul@125
|
35
|
ivand_qmul@125
|
36 #ifdef __unix__
|
ivand_qmul@125
|
37 #include <SDL/SDL.h>
|
ivand_qmul@125
|
38 #include <SDL/SDL_thread.h>
|
ivand_qmul@125
|
39 #endif
|
benoitrigolleau@130
|
40 #undef main
|
ivand_qmul@125
|
41 #ifdef __cplusplus
|
ivand_qmul@125
|
42 }
|
ivand_qmul@125
|
43 #endif
|
ivand_qmul@125
|
44 #include "../../sv/main/MainWindow.h"
|
ivand_qmul@129
|
45 #include <time.h>
|
ivand_qmul@125
|
46
|
ivand_qmul@125
|
47 //const int SDL_FFMPEG_MAX_BUFFERED_FRAMES = 25;
|
ivand_qmul@125
|
48 //const int SDL_FFMPEG_MAX_BUFFERED_SAMPLES = 512 * 512;
|
ivand_qmul@150
|
49 extern float zoomFivan;
|
ivand_qmul@125
|
50 int FFMPEG_init_was_called = 0;
|
ivand_qmul@129
|
51 FILE *pFile, *tFile;
|
ivand_qmul@129
|
52 int64_t Time,Time1;
|
ivand_qmul@129
|
53 int64_t realt=0;
|
ivand_qmul@129
|
54
|
ivand_qmul@125
|
55 SDL_ffmpegFile* SDL_ffmpegCreateFile() {
|
ivand_qmul@125
|
56
|
ivand_qmul@125
|
57 // create SDL_ffmpegFile pointer
|
ivand_qmul@125
|
58 SDL_ffmpegFile *file = (SDL_ffmpegFile*)malloc( sizeof(SDL_ffmpegFile) );
|
ivand_qmul@125
|
59 if(!file) return 0;
|
ivand_qmul@125
|
60 file->_ffmpeg=av_alloc_format_context();//(AVFormatContext*)malloc(sizeof(AVFormatContext));
|
ivand_qmul@125
|
61 // create a semaphore for every file
|
ivand_qmul@125
|
62 file->decode = SDL_CreateSemaphore(1);
|
ivand_qmul@125
|
63
|
ivand_qmul@129
|
64 Time=0;
|
ivand_qmul@129
|
65 Time1=0;
|
ivand_qmul@129
|
66 fopen_s (&pFile,"myfile.txt","w");
|
ivand_qmul@129
|
67 fopen_s (&tFile,"Timestampfile.txt","w");
|
ivand_qmul@125
|
68 // allocate room for VStreams
|
ivand_qmul@125
|
69 file->vs = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
|
ivand_qmul@125
|
70 if(!file->vs) {
|
ivand_qmul@125
|
71 free( file );
|
ivand_qmul@125
|
72 return 0;
|
ivand_qmul@125
|
73 }
|
ivand_qmul@125
|
74
|
ivand_qmul@125
|
75 // allocate room for AStreams
|
ivand_qmul@125
|
76 file->as = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
|
ivand_qmul@125
|
77 if(!file->as) {
|
ivand_qmul@125
|
78 free( file );
|
ivand_qmul@125
|
79 return 0;
|
ivand_qmul@125
|
80 }
|
ivand_qmul@125
|
81
|
ivand_qmul@125
|
82 // initialize variables with standard values
|
ivand_qmul@125
|
83 file->audioStream = -1;
|
ivand_qmul@125
|
84 file->videoStream = -1;
|
ivand_qmul@125
|
85
|
ivand_qmul@125
|
86 file->offset = 0;
|
ivand_qmul@125
|
87 file->videoOffset = 0;
|
ivand_qmul@125
|
88 file->startTime = 0;
|
ivand_qmul@125
|
89
|
ivand_qmul@125
|
90 file->threadID = 0;
|
ivand_qmul@125
|
91
|
ivand_qmul@125
|
92 return file;
|
ivand_qmul@125
|
93 }
|
ivand_qmul@125
|
94
|
ivand_qmul@125
|
95 void SDL_ffmpegFree(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
96
|
ivand_qmul@125
|
97 SDL_ffmpegStopDecoding(file);
|
ivand_qmul@125
|
98
|
ivand_qmul@125
|
99 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
100
|
ivand_qmul@125
|
101 free(file);
|
ivand_qmul@125
|
102 }
|
ivand_qmul@125
|
103
|
ivand_qmul@125
|
104 SDL_ffmpegFile* SDL_ffmpegOpen(const char* filename) {
|
ivand_qmul@125
|
105
|
ivand_qmul@125
|
106
|
ivand_qmul@125
|
107 // register all codecs
|
ivand_qmul@125
|
108 if(!FFMPEG_init_was_called) {
|
ivand_qmul@125
|
109 FFMPEG_init_was_called = 1;
|
ivand_qmul@125
|
110 av_register_all();
|
ivand_qmul@125
|
111 }
|
ivand_qmul@125
|
112
|
ivand_qmul@125
|
113 // open new ffmpegFile
|
ivand_qmul@125
|
114 SDL_ffmpegFile *file = SDL_ffmpegCreateFile();
|
ivand_qmul@125
|
115 if(!file) return 0;
|
ivand_qmul@125
|
116
|
ivand_qmul@125
|
117 // information about format is stored in file->_ffmpeg
|
ivand_qmul@125
|
118
|
ivand_qmul@125
|
119 // open the file
|
ivand_qmul@125
|
120 if(av_open_input_file( (AVFormatContext**)&file->_ffmpeg, filename, 0, 0, 0) != 0) {
|
ivand_qmul@125
|
121 fprintf(stderr, "could not open \"%s\"\n", filename);
|
ivand_qmul@125
|
122 free(file);
|
ivand_qmul@125
|
123 return 0;
|
ivand_qmul@125
|
124 }
|
ivand_qmul@125
|
125
|
ivand_qmul@125
|
126 // retrieve format information
|
ivand_qmul@125
|
127 if(av_find_stream_info((AVFormatContext *)(file->_ffmpeg)) < 0) {
|
ivand_qmul@125
|
128 fprintf(stderr, "could not retrieve video stream info");
|
ivand_qmul@125
|
129 free(file);
|
ivand_qmul@125
|
130 return 0;
|
ivand_qmul@125
|
131 }
|
ivand_qmul@125
|
132
|
ivand_qmul@125
|
133 // dump info to logfile
|
ivand_qmul@125
|
134 // dump_format(file->_ffmpeg, 0, filename, 0);
|
ivand_qmul@125
|
135
|
ivand_qmul@125
|
136 // find the streams in the file
|
ivand_qmul@125
|
137 file->VStreams = 0;
|
ivand_qmul@125
|
138 file->AStreams = 0;
|
ivand_qmul@125
|
139 file->threadActive = 0;
|
ivand_qmul@125
|
140
|
ivand_qmul@125
|
141 // iterate through all the streams and store audio/video streams
|
ivand_qmul@125
|
142 size_t i;
|
ivand_qmul@125
|
143 for(i=0; i<((AVFormatContext*)file->_ffmpeg)->nb_streams; i++) {
|
ivand_qmul@125
|
144
|
ivand_qmul@125
|
145 if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
|
ivand_qmul@125
|
146
|
ivand_qmul@125
|
147 // if this is a packet of the correct type we create a new stream
|
ivand_qmul@125
|
148 SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );
|
ivand_qmul@125
|
149
|
ivand_qmul@125
|
150 if(stream) {
|
ivand_qmul@125
|
151 // we set our stream to zero
|
ivand_qmul@125
|
152 memset(stream, 0, sizeof(SDL_ffmpegStream));
|
ivand_qmul@125
|
153
|
ivand_qmul@125
|
154 // save unique streamid
|
ivand_qmul@125
|
155 stream->id = i;
|
ivand_qmul@125
|
156
|
ivand_qmul@125
|
157 // the timeBase is what we use to calculate from/to pts
|
ivand_qmul@125
|
158 stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;
|
ivand_qmul@125
|
159
|
ivand_qmul@125
|
160 // save width, height and pixFmt of our outputframes
|
ivand_qmul@125
|
161 stream->width = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->width;
|
ivand_qmul@125
|
162 stream->height = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->height;
|
ivand_qmul@125
|
163 stream->pixFmt = PIX_FMT_RGB24;
|
ivand_qmul@125
|
164
|
ivand_qmul@125
|
165 // _ffmpeg holds data about streamcodec
|
ivand_qmul@125
|
166 stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;
|
ivand_qmul@125
|
167
|
ivand_qmul@125
|
168 // get the correct decoder for this stream
|
ivand_qmul@125
|
169 AVCodec *codec = avcodec_find_decoder(((AVCodecContext*)stream->_ffmpeg)->codec_id);
|
ivand_qmul@125
|
170
|
ivand_qmul@125
|
171 if(!codec) {
|
ivand_qmul@125
|
172 free(stream);
|
ivand_qmul@125
|
173 fprintf(stderr, "could not find codec\n");
|
ivand_qmul@125
|
174 } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
|
ivand_qmul@125
|
175 free(stream);
|
ivand_qmul@125
|
176 fprintf(stderr, "could not open decoder\n");
|
ivand_qmul@125
|
177 } else {
|
ivand_qmul@125
|
178
|
ivand_qmul@125
|
179 // copy metadata from AVStream into our stream
|
ivand_qmul@125
|
180 stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
|
ivand_qmul@125
|
181 stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
|
ivand_qmul@125
|
182 memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
|
ivand_qmul@125
|
183 stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
|
ivand_qmul@125
|
184 stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
|
ivand_qmul@125
|
185 memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);
|
ivand_qmul@125
|
186
|
ivand_qmul@125
|
187 stream->audio = 0;
|
ivand_qmul@125
|
188 stream->size = 0;
|
ivand_qmul@125
|
189 stream->imageBuffer = (bufferImage**)calloc( SDL_FFMPEG_MAX_BUFFERED_FRAMES, sizeof(bufferImage*) );
|
ivand_qmul@129
|
190 stream->writeImage = 0;
|
ivand_qmul@129
|
191 stream->readImage = 0;
|
ivand_qmul@125
|
192 file->vs[file->VStreams] = stream;
|
ivand_qmul@125
|
193 file->VStreams++;
|
ivand_qmul@125
|
194
|
ivand_qmul@125
|
195 // create semaphore for thread-safe use
|
ivand_qmul@125
|
196 stream->sem = SDL_CreateSemaphore(1);
|
ivand_qmul@125
|
197 }
|
ivand_qmul@125
|
198 }
|
ivand_qmul@125
|
199 } else if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
|
ivand_qmul@125
|
200
|
ivand_qmul@125
|
201 // if this is a packet of the correct type we create a new stream
|
ivand_qmul@125
|
202 SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );
|
ivand_qmul@125
|
203
|
ivand_qmul@125
|
204 if(stream) {
|
ivand_qmul@125
|
205 // we set our stream to zero
|
ivand_qmul@125
|
206 memset(stream, 0, sizeof(SDL_ffmpegStream));
|
ivand_qmul@125
|
207
|
ivand_qmul@125
|
208 // save unique streamid
|
ivand_qmul@125
|
209 stream->id = i;
|
ivand_qmul@125
|
210
|
ivand_qmul@125
|
211 // the timeBase is what we use to calculate from/to pts
|
ivand_qmul@125
|
212 stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;
|
ivand_qmul@125
|
213
|
ivand_qmul@125
|
214 // _ffmpeg holds data about streamcodec
|
ivand_qmul@125
|
215 stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;
|
ivand_qmul@125
|
216
|
ivand_qmul@125
|
217 stream->width = 0;
|
ivand_qmul@125
|
218 stream->height = 0;
|
ivand_qmul@125
|
219 stream->pixFmt = PIX_FMT_RGB24;
|
ivand_qmul@125
|
220
|
ivand_qmul@125
|
221 // get the correct decoder for this stream
|
ivand_qmul@125
|
222 AVCodec *codec = avcodec_find_decoder(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_id);
|
ivand_qmul@125
|
223
|
ivand_qmul@125
|
224 if(!codec) {
|
ivand_qmul@125
|
225 free( stream );
|
ivand_qmul@125
|
226 fprintf(stderr, "could not find codec\n");
|
ivand_qmul@125
|
227 } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
|
ivand_qmul@125
|
228 free( stream );
|
ivand_qmul@125
|
229 fprintf(stderr, "could not open decoder\n");
|
ivand_qmul@125
|
230 } else {
|
ivand_qmul@125
|
231
|
ivand_qmul@125
|
232 // copy metadata from AVStream into our stream
|
ivand_qmul@125
|
233 stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
|
ivand_qmul@125
|
234 stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
|
ivand_qmul@125
|
235 memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
|
ivand_qmul@125
|
236 stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
|
ivand_qmul@125
|
237 stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
|
ivand_qmul@125
|
238 memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);
|
ivand_qmul@125
|
239
|
ivand_qmul@125
|
240 stream->audio = (int8_t*)malloc( sizeof(int8_t) * SDL_FFMPEG_MAX_BUFFERED_SAMPLES );
|
ivand_qmul@125
|
241 stream->size = 0;
|
ivand_qmul@125
|
242 stream->imageBuffer = 0;
|
ivand_qmul@125
|
243
|
ivand_qmul@125
|
244 file->as[file->AStreams] = stream;
|
ivand_qmul@125
|
245 file->AStreams++;
|
ivand_qmul@125
|
246
|
ivand_qmul@125
|
247 // create semaphore for thread-safe use
|
ivand_qmul@125
|
248 stream->sem = SDL_CreateSemaphore(1);
|
ivand_qmul@125
|
249 }
|
ivand_qmul@125
|
250 }
|
ivand_qmul@125
|
251 }
|
ivand_qmul@125
|
252 }
|
ivand_qmul@125
|
253
|
ivand_qmul@125
|
254 return file;
|
ivand_qmul@125
|
255 }
|
ivand_qmul@125
|
256
|
ivand_qmul@125
|
257 SDL_Surface* SDL_ffmpegGetVideo(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
258
|
ivand_qmul@125
|
259 MainWindow * MWinsA=MainWindow::instance();
|
ivand_qmul@125
|
260 if( !SDL_ffmpegValidVideo(file) || file->pause || file->skipVideo) return 0;
|
ivand_qmul@125
|
261
|
ivand_qmul@125
|
262 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
263
|
ivand_qmul@125
|
264 bufferImage *option = 0;
|
ivand_qmul@129
|
265 //int i;
|
ivand_qmul@129
|
266 float ratio;
|
ivand_qmul@129
|
267 int64_t pos,pos1, pos2, timestamp;
|
ivand_qmul@129
|
268 //for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
|
ivand_qmul@129
|
269 pos=MWinsA->Get_CurAudioTime();
|
ivand_qmul@125
|
270
|
lbajardsilogic@178
|
271 if (pFile)
|
lbajardsilogic@178
|
272 {
|
lbajardsilogic@178
|
273 fprintf (pFile, "p: \t %u\t", pos);
|
lbajardsilogic@178
|
274 }
|
ivand_qmul@129
|
275 //if (MWinsA->Get_HardwareBufferTime()==0)
|
ivand_qmul@129
|
276 // pos1=0;
|
ivand_qmul@129
|
277 //else {
|
ivand_qmul@129
|
278 // pos1=MWinsA->Get_HardwareBufferTime();
|
ivand_qmul@129
|
279 // //fprintf (tFile, "%u\t", pos1);
|
ivand_qmul@129
|
280 // int64_t timeTemp;
|
ivand_qmul@129
|
281 // QueryPerformanceCounter((LARGE_INTEGER *)(&timeTemp));
|
ivand_qmul@129
|
282 //
|
ivand_qmul@129
|
283 // pos1=(timeTemp-pos1)/(file->countFreq*hopfactor);
|
ivand_qmul@129
|
284 // fprintf (pFile, "%u\t", pos1);
|
ivand_qmul@129
|
285 //}
|
ivand_qmul@129
|
286 //pos2=pos+pos1;
|
lbajardsilogic@178
|
287 if (pFile)
|
lbajardsilogic@178
|
288 {
|
lbajardsilogic@178
|
289 fprintf (pFile, "%u\n", pos);
|
lbajardsilogic@178
|
290 }
|
lbajardsilogic@178
|
291
|
ivand_qmul@129
|
292 // if this entry does not exist, continue
|
ivand_qmul@129
|
293 while(((file->vs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage)>0)&&(file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp <= pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000))//&& (file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp >= pos - file->timebase+ ((AVFormatContext*)file->_ffmpeg)->start_time/1000))
|
ivand_qmul@129
|
294 {
|
ivand_qmul@129
|
295 //pos=MWinsA->Get_CurAudioTime();
|
ivand_qmul@129
|
296 //timestamp=file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp;
|
ivand_qmul@129
|
297 //fprintf (tFile, "try: %d %d\n", (pos+ ((AVFormatContext*)file->_ffmpeg)->start_time/1000), timestamp);
|
ivand_qmul@129
|
298 // do we have an image that should have been shown?
|
ivand_qmul@129
|
299 //if(file->vs[file->videoStream]->imageBuffer[mod(file->vs[file->videoStream]->readImage,SDL_FFMPEG_MAX_BUFFERED_FRAMES)]->timestamp <= pos + (file->vs[file->videoStream]->timeBase)/4+((AVFormatContext*)file->_ffmpeg)->start_time/1000) {
|
ivand_qmul@129
|
300
|
ivand_qmul@129
|
301 // if this is the first option we find, we simply save it
|
ivand_qmul@129
|
302 if(!option) {
|
ivand_qmul@125
|
303
|
ivand_qmul@129
|
304 option = file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES];
|
ivand_qmul@125
|
305
|
ivand_qmul@129
|
306 // set to 0 so we know this position in the buffer is available again
|
ivand_qmul@129
|
307 file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = 0;
|
ivand_qmul@129
|
308 file->vs[file->videoStream]->readImage++;
|
ivand_qmul@125
|
309
|
ivand_qmul@129
|
310 } else {
|
ivand_qmul@125
|
311
|
ivand_qmul@129
|
312 // we found a newer possible timestamp, we delete the older one
|
ivand_qmul@129
|
313 if( option->timestamp < file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp) {
|
ivand_qmul@125
|
314
|
ivand_qmul@129
|
315 // this image is too old, we discard it
|
ivand_qmul@129
|
316 SDL_FreeSurface( option->img );
|
ivand_qmul@125
|
317
|
ivand_qmul@129
|
318 // free old option
|
ivand_qmul@129
|
319 free( option );
|
ivand_qmul@125
|
320
|
ivand_qmul@129
|
321 // new pointer to position in container
|
ivand_qmul@129
|
322 option = file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES];
|
ivand_qmul@125
|
323
|
ivand_qmul@129
|
324 // set to 0 so we know this position in the buffer is available again
|
ivand_qmul@129
|
325 file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = 0;
|
ivand_qmul@129
|
326 file->vs[file->videoStream]->readImage++;
|
ivand_qmul@129
|
327 }
|
ivand_qmul@129
|
328 else {
|
ivand_qmul@129
|
329 file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]=0;
|
ivand_qmul@129
|
330 file->vs[file->videoStream]->readImage++;
|
ivand_qmul@129
|
331 }
|
ivand_qmul@129
|
332 }
|
ivand_qmul@125
|
333
|
ivand_qmul@129
|
334
|
ivand_qmul@129
|
335 pos=MWinsA->Get_CurAudioTime();
|
lbajardsilogic@178
|
336 if (pFile)
|
lbajardsilogic@178
|
337 {
|
lbajardsilogic@178
|
338 fprintf (pFile, "e:\t%u\t", pos);
|
lbajardsilogic@178
|
339 }
|
ivand_qmul@129
|
340 //if (MWinsA->Get_HardwareBufferTime()==0)
|
ivand_qmul@129
|
341 // pos1=0;
|
ivand_qmul@129
|
342 //else {
|
ivand_qmul@129
|
343 // pos1=MWinsA->Get_HardwareBufferTime();
|
ivand_qmul@129
|
344 // //fprintf (tFile, "%u\t", pos1);
|
ivand_qmul@129
|
345 // int64_t timeTemp;
|
ivand_qmul@129
|
346 // QueryPerformanceCounter((LARGE_INTEGER *)(&timeTemp));
|
ivand_qmul@129
|
347
|
ivand_qmul@129
|
348 // pos1=(timeTemp-pos1)/(file->countFreq*hopfactor);
|
ivand_qmul@129
|
349 // fprintf (pFile, "%u\t", pos1);
|
ivand_qmul@129
|
350 //}
|
ivand_qmul@129
|
351 //fprintf (pFile, "%u\n", pos2);
|
ivand_qmul@129
|
352 //pos2=pos+pos1;
|
ivand_qmul@129
|
353 //if (pos<pos2) pos=pos2;
|
ivand_qmul@129
|
354 }
|
ivand_qmul@129
|
355 //}
|
ivand_qmul@129
|
356 //}
|
ivand_qmul@129
|
357 int x=file->vs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage;
|
ivand_qmul@125
|
358 // if we did not found an option, we exit
|
ivand_qmul@125
|
359 if(!option) {
|
ivand_qmul@125
|
360 // release the lock
|
ivand_qmul@129
|
361 /*timestamp=0;
|
ivand_qmul@129
|
362 int64_t tt=av_gettime()/1000-file->timer;
|
ivand_qmul@129
|
363 file->timer=av_gettime()/1000;
|
ivand_qmul@129
|
364 realt+=tt;
|
ivand_qmul@129
|
365 fprintf (tFile, "%u\t", realt);
|
ivand_qmul@129
|
366 fprintf (tFile, "%u\t", tt);
|
ivand_qmul@129
|
367 fprintf (tFile, "%u\t", pos);
|
ivand_qmul@129
|
368 fprintf (tFile, "%u\n", timestamp);*/
|
ivand_qmul@125
|
369 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
370 return 0;
|
ivand_qmul@125
|
371 }
|
ivand_qmul@129
|
372 int64_t tt;
|
ivand_qmul@129
|
373 QueryPerformanceCounter((LARGE_INTEGER *)(&tt));
|
ivand_qmul@129
|
374 tt=tt/(file->countFreq)-file->timer;
|
ivand_qmul@129
|
375
|
ivand_qmul@129
|
376 QueryPerformanceCounter((LARGE_INTEGER *)(&file->timer));
|
ivand_qmul@129
|
377 file->timer=file->timer/(file->countFreq);
|
ivand_qmul@129
|
378 realt+=tt;
|
ivand_qmul@129
|
379 fprintf (tFile, "%u\t", x);
|
ivand_qmul@129
|
380 fprintf (tFile, "%u\t", realt);
|
ivand_qmul@129
|
381 fprintf (tFile, "%u\t", tt);
|
ivand_qmul@138
|
382 timestamp=(pos-option->timestamp+((AVFormatContext*)file->_ffmpeg)->start_time/1000)/MWinsA->getPlaySpeedVal();
|
ivand_qmul@129
|
383 fprintf (tFile, "%u\t", pos);//+ (file->vs[file->videoStream]->timeBase)/4+((AVFormatContext*)file->_ffmpeg)->start_time/1000);
|
ivand_qmul@129
|
384 fprintf (tFile, "%d\n", timestamp);
|
ivand_qmul@125
|
385 // we did found an option, so we return the imagedata
|
ivand_qmul@125
|
386 return option->img;
|
ivand_qmul@125
|
387 }
|
ivand_qmul@125
|
388
|
ivand_qmul@125
|
389 int SDL_ffmpegReleaseVideo(SDL_ffmpegFile *file, SDL_Surface *bmp) {
|
ivand_qmul@125
|
390
|
ivand_qmul@125
|
391 // if there was no valid video stream, we should not release
|
ivand_qmul@125
|
392 if( !SDL_ffmpegValidVideo(file) || file->skipVideo) return -1;
|
ivand_qmul@125
|
393
|
ivand_qmul@125
|
394 // free surface
|
ivand_qmul@125
|
395 SDL_FreeSurface(bmp);
|
ivand_qmul@125
|
396
|
ivand_qmul@125
|
397 // release semaphore if needed
|
ivand_qmul@125
|
398 if( !SDL_SemValue(file->vs[file->videoStream]->sem) ) {
|
ivand_qmul@125
|
399 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
400 }
|
ivand_qmul@125
|
401
|
ivand_qmul@125
|
402 return 0;
|
ivand_qmul@125
|
403 }
|
ivand_qmul@125
|
404
|
ivand_qmul@125
|
405 SDL_ffmpegStream* SDL_ffmpegGetAudioStream(SDL_ffmpegFile *file, int audioID) {
|
ivand_qmul@125
|
406
|
ivand_qmul@125
|
407 // check if we have any audiostreams
|
ivand_qmul@125
|
408 if(!file->AStreams) return 0;
|
ivand_qmul@125
|
409
|
ivand_qmul@125
|
410 // check if the requested id is possible
|
ivand_qmul@125
|
411 if(audioID >= file->AStreams) return 0;
|
ivand_qmul@125
|
412
|
ivand_qmul@125
|
413 // return ausiostream linked to audioID
|
ivand_qmul@125
|
414 return file->as[audioID];
|
ivand_qmul@125
|
415 }
|
ivand_qmul@125
|
416
|
ivand_qmul@125
|
417 int SDL_ffmpegSelectAudioStream(SDL_ffmpegFile* file, int audioID) {
|
ivand_qmul@125
|
418
|
ivand_qmul@125
|
419 // check if we have any audiostreams
|
ivand_qmul@125
|
420 if(!file->AStreams) return -1;
|
ivand_qmul@125
|
421
|
ivand_qmul@125
|
422 // check if the requested id is possible
|
ivand_qmul@125
|
423 if(audioID >= file->AStreams) return -1;
|
ivand_qmul@125
|
424
|
ivand_qmul@125
|
425 // set current audiostream to stream linked to audioID
|
ivand_qmul@125
|
426 file->audioStream = audioID;
|
ivand_qmul@125
|
427
|
ivand_qmul@125
|
428 return 0;
|
ivand_qmul@125
|
429 }
|
ivand_qmul@125
|
430
|
ivand_qmul@125
|
431 SDL_ffmpegStream* SDL_ffmpegGetVideoStream(SDL_ffmpegFile *file, int videoID) {
|
ivand_qmul@125
|
432
|
ivand_qmul@125
|
433 // check if we have any videostreams
|
ivand_qmul@125
|
434 if(!file->VStreams) return 0;
|
ivand_qmul@125
|
435
|
ivand_qmul@125
|
436 // check if the requested id is possible
|
ivand_qmul@125
|
437 if(videoID >= file->VStreams) return 0;
|
ivand_qmul@125
|
438
|
ivand_qmul@125
|
439 // return ausiostream linked to videoID
|
ivand_qmul@125
|
440 return file->vs[videoID];
|
ivand_qmul@125
|
441 }
|
ivand_qmul@125
|
442
|
ivand_qmul@125
|
443 int SDL_ffmpegSelectVideoStream(SDL_ffmpegFile* file, int videoID) {
|
ivand_qmul@125
|
444
|
ivand_qmul@125
|
445 // check if we have any videostreams
|
ivand_qmul@125
|
446 if(!file->VStreams) return -1;
|
ivand_qmul@125
|
447
|
ivand_qmul@125
|
448 // check if the requested id is possible
|
ivand_qmul@125
|
449 if(videoID >= file->VStreams) return -1;
|
ivand_qmul@125
|
450
|
ivand_qmul@125
|
451 // set current videostream to stream linked to videoID
|
ivand_qmul@125
|
452 file->videoStream = videoID;
|
ivand_qmul@125
|
453
|
ivand_qmul@125
|
454 return 0;
|
ivand_qmul@125
|
455 }
|
ivand_qmul@125
|
456
|
ivand_qmul@125
|
457 int SDL_ffmpegStartDecoding(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
458
|
ivand_qmul@125
|
459 // start a thread that continues to fill audio/video buffers
|
ivand_qmul@125
|
460 if(!file->threadID) file->threadID = SDL_CreateThread(SDL_ffmpegDecodeThread, file);
|
ivand_qmul@125
|
461
|
ivand_qmul@125
|
462 return 0;
|
ivand_qmul@125
|
463 }
|
ivand_qmul@125
|
464
|
ivand_qmul@125
|
465 int SDL_ffmpegStopDecoding(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
466
|
ivand_qmul@125
|
467 // stop decode thread
|
ivand_qmul@125
|
468 file->threadActive = 0;
|
ivand_qmul@125
|
469 if(file->threadID) SDL_WaitThread(file->threadID, 0);
|
ivand_qmul@125
|
470
|
ivand_qmul@125
|
471 // set threadID to zero, so we can check for concurrent threads
|
ivand_qmul@125
|
472 file->threadID = 0;
|
ivand_qmul@125
|
473
|
ivand_qmul@125
|
474 return -1;
|
ivand_qmul@125
|
475 }
|
ivand_qmul@125
|
476
|
ivand_qmul@125
|
477 int SDL_ffmpegDecodeThread(void* data) {
|
ivand_qmul@125
|
478 static struct SwsContext *img_convert_ctx;
|
ivand_qmul@125
|
479 // unpack the void pointer
|
ivand_qmul@125
|
480 SDL_ffmpegFile* file = (SDL_ffmpegFile*)data;
|
ivand_qmul@125
|
481
|
ivand_qmul@125
|
482 // flag this thread as active, used for stopping
|
ivand_qmul@125
|
483 file->threadActive = 1;
|
ivand_qmul@125
|
484
|
ivand_qmul@125
|
485 // create a packet for our data
|
ivand_qmul@125
|
486 AVPacket pack;
|
ivand_qmul@125
|
487
|
ivand_qmul@125
|
488 // reserve some pointers for use in loop
|
ivand_qmul@125
|
489 AVFrame *inFrame, *inFrameRGB;
|
lbarthelemy@170
|
490 uint8_t *inVideoBuffer = NULL;
|
ivand_qmul@125
|
491
|
ivand_qmul@125
|
492 // allocate a frame
|
ivand_qmul@125
|
493 inFrame = avcodec_alloc_frame();
|
ivand_qmul@125
|
494
|
ivand_qmul@125
|
495 // allocate another frame for unknown->RGB conversion
|
ivand_qmul@125
|
496 inFrameRGB = avcodec_alloc_frame();
|
ivand_qmul@125
|
497
|
ivand_qmul@125
|
498 if(SDL_ffmpegValidVideo(file)) {
|
ivand_qmul@125
|
499 // allocate buffer
|
lbarthelemy@170
|
500 inVideoBuffer = (uint8_t*)malloc( avpicture_get_size(file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
501 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
502 file->vs[file->videoStream]->height) );
|
ivand_qmul@125
|
503
|
ivand_qmul@125
|
504 // put buffer into our reserved frame
|
ivand_qmul@125
|
505 avpicture_fill( (AVPicture*)inFrameRGB,
|
ivand_qmul@125
|
506 inVideoBuffer,
|
ivand_qmul@125
|
507 file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
508 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
509 file->vs[file->videoStream]->height);
|
ivand_qmul@125
|
510 }
|
ivand_qmul@125
|
511
|
ivand_qmul@125
|
512 // allocate temporary audiobuffer
|
ivand_qmul@125
|
513 int16_t *samples = (int16_t*)malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE );
|
ivand_qmul@125
|
514
|
ivand_qmul@125
|
515 // reserve integer for use in loop
|
ivand_qmul@125
|
516 int got_frame;
|
ivand_qmul@125
|
517
|
ivand_qmul@125
|
518 while(file->threadActive) {
|
ivand_qmul@125
|
519
|
ivand_qmul@125
|
520 // read a packet from the file
|
ivand_qmul@125
|
521 if(av_read_frame((AVFormatContext *)(file->_ffmpeg), &pack) < 0) {
|
ivand_qmul@125
|
522 // thread is idle
|
ivand_qmul@125
|
523 SDL_Delay(10);
|
ivand_qmul@125
|
524 continue;
|
ivand_qmul@125
|
525 }
|
ivand_qmul@125
|
526 if (file->skipAudio && pack.stream_index == file->as[file->audioStream]->id){
|
ivand_qmul@125
|
527 SDL_Delay(1);
|
ivand_qmul@125
|
528 continue;
|
ivand_qmul@125
|
529 }
|
ivand_qmul@125
|
530
|
ivand_qmul@125
|
531 // we got a packet, lets handle it
|
ivand_qmul@125
|
532
|
ivand_qmul@125
|
533 // let's start by entering the video semaphore
|
ivand_qmul@125
|
534 SDL_SemWait(file->decode);
|
ivand_qmul@125
|
535
|
ivand_qmul@125
|
536 // If it's a audio packet from our stream...
|
ivand_qmul@125
|
537 if( SDL_ffmpegValidAudio(file) && pack.stream_index == file->as[file->audioStream]->id && !file->skipAudio) {
|
ivand_qmul@125
|
538
|
ivand_qmul@125
|
539 uint8_t *data = pack.data;
|
ivand_qmul@125
|
540 int size = pack.size;
|
ivand_qmul@125
|
541 int len;
|
ivand_qmul@125
|
542
|
ivand_qmul@125
|
543 while(size > 0 && file->threadActive) {
|
ivand_qmul@125
|
544
|
ivand_qmul@125
|
545 // Decode the packet
|
ivand_qmul@125
|
546 len = avcodec_decode_audio((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg), samples, &got_frame, data, size);
|
ivand_qmul@125
|
547
|
ivand_qmul@125
|
548 // if error, we skip the frame
|
ivand_qmul@125
|
549 if(len < 0 || !got_frame) {
|
ivand_qmul@125
|
550 size = 0;
|
ivand_qmul@125
|
551 break;
|
ivand_qmul@125
|
552 }
|
ivand_qmul@125
|
553
|
ivand_qmul@125
|
554 // change pointers
|
ivand_qmul@125
|
555 data += got_frame;
|
ivand_qmul@125
|
556 size -= got_frame;
|
ivand_qmul@125
|
557
|
ivand_qmul@125
|
558 // if the audiobuffer is full, the thread waits
|
ivand_qmul@125
|
559 while( file->as[file->audioStream]->size + got_frame > SDL_FFMPEG_MAX_BUFFERED_SAMPLES &&
|
ivand_qmul@125
|
560 file->threadActive) {
|
ivand_qmul@125
|
561 SDL_Delay(5);
|
ivand_qmul@125
|
562 }
|
ivand_qmul@125
|
563
|
ivand_qmul@125
|
564 // write an audiopts
|
ivand_qmul@125
|
565 int64_t audiopts = pack.pts * file->as[file->audioStream]->timeBase;
|
ivand_qmul@125
|
566
|
ivand_qmul@125
|
567 // is the audioBuffer is empty
|
ivand_qmul@125
|
568 if(!file->as[file->audioStream]->size) {
|
ivand_qmul@125
|
569
|
ivand_qmul@125
|
570 // we set a new pts
|
ivand_qmul@125
|
571 file->as[file->audioStream]->hardPts = file->as[file->audioStream]->pts = audiopts;
|
ivand_qmul@125
|
572
|
ivand_qmul@125
|
573 // we set totalbytes to zero, as this represents the amount
|
ivand_qmul@125
|
574 // of bytes that were played since our last 'hardPts'
|
ivand_qmul@125
|
575 file->as[file->audioStream]->totalBytes = 0;
|
ivand_qmul@125
|
576 }
|
ivand_qmul@125
|
577
|
ivand_qmul@125
|
578 // no need to store old samples
|
ivand_qmul@125
|
579 if(audiopts >= SDL_ffmpegGetPosition(file)) {
|
ivand_qmul@125
|
580
|
ivand_qmul@125
|
581 // enter audio semaphore
|
ivand_qmul@125
|
582 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
583
|
ivand_qmul@125
|
584 // copy data from temporary buffer to streambuffer
|
ivand_qmul@125
|
585 memcpy(file->as[file->audioStream]->audio+file->as[file->audioStream]->size, samples, got_frame);
|
ivand_qmul@125
|
586
|
ivand_qmul@125
|
587 // set the new size of the audiobuffer
|
ivand_qmul@125
|
588 file->as[file->audioStream]->size += got_frame;
|
ivand_qmul@125
|
589
|
ivand_qmul@125
|
590 // we leave the audio semaphore
|
ivand_qmul@125
|
591 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
592 }
|
ivand_qmul@125
|
593 }
|
ivand_qmul@125
|
594 }
|
ivand_qmul@125
|
595
|
ivand_qmul@125
|
596 // If it's a video packet from our video stream...
|
ivand_qmul@125
|
597 if( SDL_ffmpegValidVideo(file) && pack.stream_index == file->vs[file->videoStream]->id && !file->skipVideo) {
|
ivand_qmul@125
|
598
|
ivand_qmul@125
|
599 got_frame = 0;
|
ivand_qmul@129
|
600 //Time1=av_gettime();
|
ivand_qmul@125
|
601 // Decode the packet
|
ivand_qmul@125
|
602 avcodec_decode_video((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg), inFrame, &got_frame, pack.data, pack.size);
|
ivand_qmul@125
|
603
|
ivand_qmul@125
|
604 if(got_frame) {
|
ivand_qmul@125
|
605
|
ivand_qmul@125
|
606 // create imagebuffer
|
ivand_qmul@125
|
607 bufferImage *buf = (bufferImage*)malloc( sizeof(bufferImage) );
|
ivand_qmul@125
|
608
|
ivand_qmul@125
|
609 // write timestamp into the buffer
|
ivand_qmul@125
|
610 buf->timestamp = file->vs[file->videoStream]->timeBase * pack.dts;
|
ivand_qmul@125
|
611
|
ivand_qmul@125
|
612 // usefull when dealing with B frames
|
ivand_qmul@125
|
613 if(pack.dts == AV_NOPTS_VALUE) {
|
ivand_qmul@125
|
614 // if we did not get a valid timestamp, we make one up based on the last
|
ivand_qmul@125
|
615 // valid timestamp + the duration of a frame
|
ivand_qmul@125
|
616 buf->timestamp = file->vs[file->videoStream]->lastTimeStamp + file->vs[file->videoStream]->timeBase;
|
ivand_qmul@125
|
617 }
|
ivand_qmul@125
|
618
|
ivand_qmul@125
|
619 // if new timestamp is from future, we proceed
|
ivand_qmul@125
|
620 // if(buf->timestamp >= SDL_ffmpegGetPosition(file))
|
ivand_qmul@125
|
621 // {
|
benoitrigolleau@167
|
622 int w=(int)(zoomFivan*320+0.5);
|
benoitrigolleau@167
|
623 int h=(int)(zoomFivan*240+0.5);
|
lbarthelemy@170
|
624 //if ((w>file->vs[file->videoStream]->width)||(h>file->vs[file->videoStream]->height)){
|
lbarthelemy@170
|
625 // w=file->vs[file->videoStream]->width;
|
lbarthelemy@170
|
626 // h=file->vs[file->videoStream]->height;
|
lbarthelemy@170
|
627 //}
|
lbarthelemy@169
|
628 // Be sure we have a multiple of 4
|
lbarthelemy@169
|
629 w &= 0xFFFFFFFC;
|
lbarthelemy@169
|
630 h &= 0xFFFFFFFC;
|
ivand_qmul@150
|
631 if (img_convert_ctx == NULL) {
|
lbarthelemy@169
|
632
|
ivand_qmul@125
|
633 img_convert_ctx = sws_getContext(file->vs[file->videoStream]->width, file->vs[file->videoStream]->height,
|
ivand_qmul@125
|
634 ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
|
ivand_qmul@150
|
635 w,h,
|
ivand_qmul@125
|
636 file->vs[file->videoStream]->pixFmt,
|
lbarthelemy@169
|
637 SWS_FAST_BILINEAR, NULL, NULL, NULL);
|
ivand_qmul@125
|
638 if (img_convert_ctx == NULL) {
|
ivand_qmul@125
|
639 fprintf(stderr, "Cannot initialize the conversion context\n");
|
ivand_qmul@125
|
640 exit(1);
|
ivand_qmul@125
|
641 }
|
ivand_qmul@125
|
642 }
|
lbarthelemy@170
|
643
|
lbarthelemy@170
|
644 // check to see if buffer is at the same size than the screen
|
lbarthelemy@170
|
645 if (inFrameRGB->linesize[0]/3 != w ) {
|
lbarthelemy@170
|
646 av_free(inFrameRGB);
|
lbarthelemy@170
|
647 free(inVideoBuffer);
|
lbarthelemy@170
|
648 //avcodec_default_release_buffer(img_convert_ctx , inFrameRGB);
|
lbarthelemy@170
|
649 inFrameRGB = avcodec_alloc_frame();
|
lbarthelemy@170
|
650 // allocate buffer
|
lbarthelemy@170
|
651 inVideoBuffer = (uint8_t*)malloc( avpicture_get_size(file->vs[file->videoStream]->pixFmt,
|
lbarthelemy@170
|
652 w,
|
lbarthelemy@170
|
653 h) );
|
lbarthelemy@170
|
654
|
lbarthelemy@170
|
655 // put buffer into our reserved frame
|
lbarthelemy@170
|
656 avpicture_fill( (AVPicture*)inFrameRGB,
|
lbarthelemy@170
|
657 inVideoBuffer,
|
lbarthelemy@170
|
658 file->vs[file->videoStream]->pixFmt,
|
lbarthelemy@170
|
659 w,
|
lbarthelemy@170
|
660 h);
|
lbarthelemy@170
|
661
|
lbarthelemy@170
|
662 }
|
lbarthelemy@170
|
663
|
ivand_qmul@150
|
664 ((AVPicture*)inFrameRGB)->linesize[0]=(int)w*3;
|
lbarthelemy@170
|
665 sws_scale(img_convert_ctx,
|
lbarthelemy@170
|
666 ((AVPicture*)inFrame)->data, ((AVPicture*)inFrame)->linesize, 0, file->vs[file->videoStream]->height,
|
lbarthelemy@170
|
667 ((AVPicture*)inFrameRGB)->data, ((AVPicture*)inFrameRGB)->linesize);
|
lbarthelemy@170
|
668
|
lbarthelemy@170
|
669 sws_freeContext(img_convert_ctx);
|
lbarthelemy@170
|
670 img_convert_ctx=NULL;
|
lbarthelemy@170
|
671
|
lbarthelemy@170
|
672 // we convert whatever type of data we got to RGB24
|
ivand_qmul@125
|
673 /* img_convert((AVPicture*)inFrameRGB,
|
ivand_qmul@125
|
674 file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
675 (AVPicture*)inFrame,
|
ivand_qmul@125
|
676 ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
|
ivand_qmul@125
|
677 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
678 file->vs[file->videoStream]->height);
|
ivand_qmul@125
|
679 */
|
lbarthelemy@170
|
680
|
ivand_qmul@125
|
681 // allocate image room
|
ivand_qmul@125
|
682 buf->img = SDL_CreateRGBSurface(SDL_SWSURFACE,
|
ivand_qmul@150
|
683 w,
|
ivand_qmul@150
|
684 h,
|
ivand_qmul@125
|
685 24, 0x0000FF, 0x00FF00, 0xFF0000, 0);
|
ivand_qmul@125
|
686 // copy image data to image room
|
ivand_qmul@125
|
687 memcpy(buf->img->pixels, inFrameRGB->data[0],
|
ivand_qmul@150
|
688 w*h* 3);
|
ivand_qmul@129
|
689 file->timebase=buf->timestamp-file->vs[file->videoStream]->lastTimeStamp;
|
ivand_qmul@125
|
690 // we write the lastTimestamp we got
|
ivand_qmul@125
|
691 file->vs[file->videoStream]->lastTimeStamp = buf->timestamp;
|
ivand_qmul@125
|
692
|
ivand_qmul@129
|
693 //int i;
|
ivand_qmul@125
|
694 int again = 1;
|
ivand_qmul@129
|
695 //Time=av_gettime()-Time1;
|
ivand_qmul@125
|
696
|
ivand_qmul@129
|
697 //fprintf (pFile, "%d \n",Time);
|
ivand_qmul@125
|
698 // keep trying to fit in buffer, until the data was actually placed in the buffer
|
ivand_qmul@125
|
699 while(again && file->threadActive) {
|
ivand_qmul@125
|
700
|
ivand_qmul@125
|
701 // we enter the video semaphore
|
ivand_qmul@125
|
702 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
703
|
ivand_qmul@125
|
704 // loop through all positions in buffer until an empty
|
ivand_qmul@125
|
705 // space was found
|
ivand_qmul@129
|
706 //for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
|
ivand_qmul@125
|
707 // if this place in the buffer is empty we write our new frame
|
ivand_qmul@129
|
708 if((file->vs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage) < SDL_FFMPEG_MAX_BUFFERED_FRAMES) {
|
ivand_qmul@129
|
709 file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->writeImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = buf;
|
ivand_qmul@129
|
710 file->vs[file->videoStream]->writeImage++;
|
ivand_qmul@125
|
711 // we placed our image in the buffer, moving on
|
ivand_qmul@125
|
712 again = 0;
|
ivand_qmul@129
|
713
|
ivand_qmul@125
|
714 }
|
ivand_qmul@129
|
715 //}
|
ivand_qmul@125
|
716
|
ivand_qmul@125
|
717 // we leave the video semaphore
|
ivand_qmul@125
|
718 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@129
|
719
|
ivand_qmul@125
|
720 // frames aren't being release every ms, so we can take some
|
ivand_qmul@125
|
721 // time before we try and fit our new image again
|
ivand_qmul@129
|
722 if(again)
|
ivand_qmul@129
|
723 {
|
ivand_qmul@129
|
724 SDL_SemPost(file->decode);
|
ivand_qmul@129
|
725 SDL_Delay(3);
|
ivand_qmul@129
|
726 SDL_SemWait(file->decode);
|
ivand_qmul@129
|
727 }
|
ivand_qmul@125
|
728 }
|
ivand_qmul@125
|
729 // }
|
ivand_qmul@125
|
730 //else {
|
ivand_qmul@125
|
731 // // if our decoded frame was too old, we don't bother putting
|
ivand_qmul@125
|
732 // // it in our buffer
|
ivand_qmul@125
|
733 // free( buf );
|
ivand_qmul@125
|
734 // }
|
ivand_qmul@125
|
735 }
|
ivand_qmul@125
|
736 }
|
ivand_qmul@125
|
737 // we leave the decode semaphore
|
ivand_qmul@125
|
738 SDL_SemPost(file->decode);
|
ivand_qmul@125
|
739 if ((file->skipAudio)&&(file->delay))
|
ivand_qmul@125
|
740 SDL_Delay(3);
|
ivand_qmul@125
|
741 }
|
lbarthelemy@170
|
742
|
lbarthelemy@170
|
743 if (inVideoBuffer)
|
lbarthelemy@170
|
744 {
|
lbarthelemy@170
|
745 free(inVideoBuffer);
|
lbarthelemy@170
|
746 inVideoBuffer = NULL;
|
lbarthelemy@170
|
747 }
|
lbarthelemy@170
|
748
|
ivand_qmul@125
|
749 // if we stop this thread, we can release the packet we reserved
|
ivand_qmul@125
|
750 av_free_packet(&pack);
|
lbarthelemy@170
|
751 free(samples);
|
lbarthelemy@170
|
752 av_free(inFrameRGB);
|
lbarthelemy@170
|
753 av_free(inFrame);
|
ivand_qmul@125
|
754
|
ivand_qmul@125
|
755 return 0;
|
ivand_qmul@125
|
756 }
|
ivand_qmul@125
|
757
|
ivand_qmul@125
|
758 int SDL_ffmpegSeek(SDL_ffmpegFile* file, int64_t timestamp) {
|
ivand_qmul@125
|
759
|
ivand_qmul@125
|
760 // if the seekposition is out of bounds, return
|
ivand_qmul@125
|
761 if(timestamp >= SDL_ffmpegGetDuration(file)) return -1;
|
ivand_qmul@125
|
762
|
ivand_qmul@125
|
763 // start by flushing the buffers
|
ivand_qmul@125
|
764 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
765
|
ivand_qmul@125
|
766 // we enter the decode semaphore so the decode thread cannot be working on
|
ivand_qmul@125
|
767 // data we are trying to flush
|
ivand_qmul@125
|
768 SDL_SemWait(file->decode);
|
ivand_qmul@125
|
769
|
ivand_qmul@125
|
770 // if the stream has an offset, add it to the start time
|
ivand_qmul@125
|
771 int64_t startOffset = 0;
|
ivand_qmul@125
|
772 if(((AVFormatContext*)file->_ffmpeg)->start_time != AV_NOPTS_VALUE) {
|
ivand_qmul@125
|
773 // inFormatCtx->start_time is in AV_TIME_BASE fractional seconds
|
ivand_qmul@125
|
774 startOffset = ((AVFormatContext*)file->_ffmpeg)->start_time;
|
ivand_qmul@125
|
775 }
|
ivand_qmul@125
|
776 //if (file->skipAudio) startOffset=0;
|
ivand_qmul@125
|
777 // calculate the final timestamp for the seek action this is in AV_TIME_BASE fractional seconds
|
ivand_qmul@125
|
778 startOffset += (timestamp * AV_TIME_BASE) / 1000;
|
ivand_qmul@125
|
779
|
ivand_qmul@125
|
780 // do the actual seeking, AVSEEK_FLAG_BACKWARD means we jump to the point
|
ivand_qmul@125
|
781 // closest to the point we want, resulting in an earlier position if the jump
|
ivand_qmul@125
|
782 // could not go the the exaxt point we wanted
|
ivand_qmul@125
|
783 if(av_seek_frame((AVFormatContext *)(file->_ffmpeg), -1, startOffset, AVSEEK_FLAG_BACKWARD|AVSEEK_FLAG_ANY) >= 0) {
|
ivand_qmul@125
|
784 SDL_Delay(5);
|
ivand_qmul@125
|
785 // set some values in our file so we now were to start playing
|
ivand_qmul@125
|
786 file->offset = timestamp;
|
ivand_qmul@125
|
787 file->startTime = av_gettime()/1000;//SDL_GetTicks();
|
ivand_qmul@125
|
788
|
ivand_qmul@125
|
789 // if we have a valid video, we probably have some data we want to flush
|
ivand_qmul@125
|
790 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
|
ivand_qmul@125
|
791
|
ivand_qmul@125
|
792 // flushing happens inside the semaphore as not to interfere with the
|
ivand_qmul@125
|
793 // decoding thread
|
ivand_qmul@125
|
794 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
795 avcodec_flush_buffers((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg));
|
ivand_qmul@125
|
796 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
797 }
|
ivand_qmul@125
|
798
|
ivand_qmul@125
|
799 // same goes for audio, if there is data, we flush is
|
ivand_qmul@125
|
800 if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {
|
ivand_qmul@125
|
801
|
ivand_qmul@125
|
802 // make sure this is done thread-save, so inside the appropriate
|
ivand_qmul@125
|
803 // semaphore
|
ivand_qmul@125
|
804 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
805 avcodec_flush_buffers((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg));
|
ivand_qmul@125
|
806 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
807 }
|
ivand_qmul@125
|
808
|
ivand_qmul@125
|
809 // then there is our flush call
|
ivand_qmul@125
|
810 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
811
|
ivand_qmul@125
|
812 // and we are done, lets release the decode semaphore so the decode
|
ivand_qmul@125
|
813 // thread can move on, filling buffer from our new position
|
ivand_qmul@125
|
814 SDL_SemPost(file->decode);
|
ivand_qmul@125
|
815
|
ivand_qmul@125
|
816 return 0;
|
ivand_qmul@125
|
817 }
|
ivand_qmul@125
|
818
|
ivand_qmul@125
|
819 // if, for some reason, we could not seek, we still should flush our buffers
|
ivand_qmul@125
|
820 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
821
|
ivand_qmul@125
|
822 // and release our lock on the decodethread
|
ivand_qmul@125
|
823 SDL_SemPost(file->decode);
|
ivand_qmul@125
|
824
|
ivand_qmul@125
|
825 return -1;
|
ivand_qmul@125
|
826 }
|
ivand_qmul@125
|
827
|
ivand_qmul@125
|
828 int SDL_ffmpegSeekRelative(SDL_ffmpegFile *file, int64_t timestamp) {
|
ivand_qmul@125
|
829
|
ivand_qmul@125
|
830 // same thing as normal seek, just take into account the current position
|
ivand_qmul@125
|
831 return SDL_ffmpegSeek(file, SDL_ffmpegGetPosition(file) + timestamp);
|
ivand_qmul@125
|
832 }
|
ivand_qmul@125
|
833
|
ivand_qmul@125
|
834 int SDL_ffmpegFlush(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
835
|
ivand_qmul@125
|
836 // if we have a valid audio stream, we flush is
|
ivand_qmul@125
|
837 if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {
|
ivand_qmul@125
|
838
|
ivand_qmul@125
|
839 // flush audiobuffer from semaphore, be thread-safe!
|
ivand_qmul@125
|
840 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
841
|
ivand_qmul@125
|
842 file->as[file->audioStream]->size = 0;
|
ivand_qmul@125
|
843
|
ivand_qmul@125
|
844 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
845 }
|
ivand_qmul@125
|
846
|
ivand_qmul@125
|
847 // if we have a valid video stream, we flush some more
|
ivand_qmul@125
|
848 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
|
ivand_qmul@125
|
849
|
ivand_qmul@125
|
850 // flush videobuffer
|
ivand_qmul@125
|
851 int i;
|
ivand_qmul@125
|
852
|
ivand_qmul@125
|
853 // again, be thread safe!
|
ivand_qmul@125
|
854 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
855
|
ivand_qmul@125
|
856 // make sure we delete all frames from buffer
|
ivand_qmul@125
|
857 for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
|
ivand_qmul@125
|
858
|
ivand_qmul@125
|
859 // if this entry does not exist, continue
|
ivand_qmul@125
|
860 if(!file->vs[file->videoStream]->imageBuffer[i]) continue;
|
ivand_qmul@125
|
861
|
ivand_qmul@125
|
862 // free the actual image data
|
ivand_qmul@125
|
863 SDL_FreeSurface( file->vs[file->videoStream]->imageBuffer[i]->img );
|
ivand_qmul@125
|
864
|
ivand_qmul@125
|
865 // and free the struct containing it
|
ivand_qmul@125
|
866 free( file->vs[file->videoStream]->imageBuffer[i] );
|
ivand_qmul@125
|
867
|
ivand_qmul@125
|
868 // set position in buffer to 0, so we know it is empty
|
ivand_qmul@125
|
869 file->vs[file->videoStream]->imageBuffer[i] = 0;
|
ivand_qmul@125
|
870 }
|
ivand_qmul@129
|
871 file->vs[file->videoStream]->writeImage=0;
|
ivand_qmul@129
|
872 file->vs[file->videoStream]->readImage=0;
|
ivand_qmul@125
|
873 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
874 }
|
ivand_qmul@125
|
875
|
ivand_qmul@125
|
876 return 0;
|
ivand_qmul@125
|
877 }
|
ivand_qmul@125
|
878
|
ivand_qmul@125
|
879 int8_t* SDL_ffmpegGetAudio(SDL_ffmpegFile *file, int *len) {
|
ivand_qmul@125
|
880
|
ivand_qmul@125
|
881 // no valid audio, means no audio to get
|
ivand_qmul@125
|
882 if( !SDL_ffmpegValidAudio(file) || file->pause||file->skipAudio ) return 0;
|
ivand_qmul@125
|
883
|
ivand_qmul@125
|
884 // working on audiobuffer should always be done from semaphore
|
ivand_qmul@125
|
885 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
886
|
ivand_qmul@125
|
887 // if we ask for more audiodata than we can give, we sent wat we can
|
ivand_qmul@125
|
888 // actually give, writing the amount of bytes into len
|
ivand_qmul@125
|
889 if(*len > file->as[file->audioStream]->size) *len = file->as[file->audioStream]->size;
|
ivand_qmul@125
|
890
|
ivand_qmul@125
|
891 // decrease the size of our audiobuffer by len
|
ivand_qmul@125
|
892 file->as[file->audioStream]->size -= *len;
|
ivand_qmul@125
|
893
|
ivand_qmul@125
|
894 // len represents the nr of bytes we sent, so we increase the total
|
ivand_qmul@125
|
895 file->as[file->audioStream]->totalBytes += *len;
|
ivand_qmul@125
|
896
|
ivand_qmul@125
|
897 // the videooffset makes sure we are always in sync with the audio
|
ivand_qmul@125
|
898 // it is actually the difference between the position were we are in the
|
ivand_qmul@125
|
899 // stream (GetPosition) and were we should be (pts)
|
ivand_qmul@125
|
900 // we use the same offset when selecting the current videoframe
|
ivand_qmul@125
|
901 file->videoOffset = SDL_ffmpegGetPosition(file) - file->as[file->audioStream]->pts;
|
ivand_qmul@125
|
902
|
ivand_qmul@125
|
903 // we calculate the new pts for our audiodata based on the hardPts
|
ivand_qmul@125
|
904 // (that is the one we got from ffmpeg) and than calculating how for we
|
ivand_qmul@125
|
905 // have come since
|
ivand_qmul@125
|
906 file->as[file->audioStream]->pts = file->as[file->audioStream]->hardPts;
|
ivand_qmul@125
|
907 // since we use 16bit per sample, we devide totalbytes by 2 before deviding by samplerate
|
ivand_qmul@125
|
908 file->as[file->audioStream]->pts += ((double)file->as[file->audioStream]->totalBytes / (2 * file->as[file->audioStream]->channels)) / (file->as[file->audioStream]->sampleRate / 1000.0);
|
ivand_qmul@125
|
909
|
ivand_qmul@125
|
910 // we return the audiobuffer, notice we are still in the audiosemaphore!
|
ivand_qmul@125
|
911 // we only leave this by calling SDL_ffmpegReleaseAudio
|
ivand_qmul@125
|
912 return file->as[file->audioStream]->audio;
|
ivand_qmul@125
|
913 }
|
ivand_qmul@125
|
914
|
ivand_qmul@125
|
915 int SDL_ffmpegReleaseAudio(SDL_ffmpegFile *file, int len) {
|
ivand_qmul@125
|
916
|
ivand_qmul@125
|
917 // no audio, means no releasing
|
ivand_qmul@125
|
918 if( !SDL_ffmpegValidAudio(file) || file->skipAudio) return -1;
|
ivand_qmul@125
|
919
|
ivand_qmul@125
|
920 // this call should be paired with SDL_ffmpegGetAudio, as it provides us
|
ivand_qmul@125
|
921 // with the correct length so we move the correct amount of data
|
ivand_qmul@125
|
922 memmove( file->as[file->audioStream]->audio,
|
ivand_qmul@125
|
923 file->as[file->audioStream]->audio+len,
|
ivand_qmul@125
|
924 file->as[file->audioStream]->size );
|
ivand_qmul@125
|
925
|
ivand_qmul@125
|
926 // work on audiodata is done, so we release the semaphore
|
ivand_qmul@125
|
927 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
928
|
ivand_qmul@125
|
929 return 0;
|
ivand_qmul@125
|
930 }
|
ivand_qmul@125
|
931
|
ivand_qmul@125
|
932 int64_t SDL_ffmpegGetPosition(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
933 //MainWindow * MWinsA=MainWindow::instance();
|
ivand_qmul@125
|
934
|
ivand_qmul@125
|
935 if (file->skipAudio){
|
ivand_qmul@125
|
936 return (av_gettime()/1000+ file->offset - file->startTime);
|
ivand_qmul@125
|
937 //int64_t pos=MWinsA->Get_CurAudioTime();
|
ivand_qmul@125
|
938 //return (pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000);//SDL_GetTicks();
|
ivand_qmul@125
|
939 }
|
ivand_qmul@125
|
940 else
|
ivand_qmul@125
|
941 return (av_gettime()/1000+ file->offset - file->startTime);//SDL_GetTicks();
|
ivand_qmul@125
|
942 // return the current playposition of our file
|
ivand_qmul@125
|
943
|
ivand_qmul@125
|
944 }
|
ivand_qmul@125
|
945
|
ivand_qmul@125
|
946 SDL_AudioSpec* SDL_ffmpegGetAudioSpec(SDL_ffmpegFile *file, int samples, void *callback) {
|
ivand_qmul@125
|
947
|
ivand_qmul@125
|
948 // create audio spec
|
ivand_qmul@125
|
949 SDL_AudioSpec *spec = (SDL_AudioSpec*)malloc( sizeof(SDL_AudioSpec) );
|
ivand_qmul@125
|
950
|
ivand_qmul@125
|
951 if(spec) {
|
ivand_qmul@125
|
952 spec->format = AUDIO_S16SYS;
|
ivand_qmul@125
|
953 spec->samples = samples;
|
ivand_qmul@125
|
954 spec->userdata = file;
|
ivand_qmul@125
|
955 spec->callback = (void (__cdecl *)(void *,Uint8 *,int))(callback);
|
ivand_qmul@125
|
956 spec->freq = 48000;
|
ivand_qmul@125
|
957 spec->channels = 2;
|
ivand_qmul@125
|
958
|
ivand_qmul@125
|
959 // if we have a valid audiofile, we can use its data to create a
|
ivand_qmul@125
|
960 // more appropriate audio spec
|
ivand_qmul@125
|
961 if( SDL_ffmpegValidAudio(file) && !file->skipAudio ) {
|
ivand_qmul@125
|
962 spec->freq = file->as[file->audioStream]->sampleRate;
|
ivand_qmul@125
|
963 spec->channels = file->as[file->audioStream]->channels;
|
ivand_qmul@125
|
964 }
|
ivand_qmul@125
|
965 }
|
ivand_qmul@125
|
966
|
ivand_qmul@125
|
967 return spec;
|
ivand_qmul@125
|
968 }
|
ivand_qmul@125
|
969
|
ivand_qmul@125
|
970 int64_t SDL_ffmpegGetDuration(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
971
|
ivand_qmul@125
|
972 // returns the duration of the entire file, please note that ffmpeg doesn't
|
ivand_qmul@125
|
973 // always get this value right! so don't bet your life on it...
|
ivand_qmul@125
|
974 return ((AVFormatContext*)file->_ffmpeg)->duration / (AV_TIME_BASE / 1000);
|
ivand_qmul@125
|
975 }
|
ivand_qmul@125
|
976
|
ivand_qmul@125
|
977 int SDL_ffmpegGetVideoSize(SDL_ffmpegFile *file, int *w, int *h) {
|
ivand_qmul@125
|
978
|
ivand_qmul@125
|
979 if(!w || !h) return -1;
|
ivand_qmul@125
|
980
|
ivand_qmul@125
|
981 // if we have a valid video file selected, we use it
|
ivand_qmul@125
|
982 // if not, we send default values and return.
|
ivand_qmul@125
|
983 // by checking the return value you can check if you got a valid size
|
ivand_qmul@125
|
984 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
|
ivand_qmul@125
|
985 *w = file->vs[file->videoStream]->width;
|
ivand_qmul@125
|
986 *h = file->vs[file->videoStream]->height;
|
ivand_qmul@125
|
987 return 0;
|
ivand_qmul@125
|
988 }
|
ivand_qmul@125
|
989
|
ivand_qmul@125
|
990 *w = 320;
|
ivand_qmul@125
|
991 *h = 240;
|
ivand_qmul@125
|
992 return -1;
|
ivand_qmul@125
|
993 }
|
ivand_qmul@125
|
994
|
ivand_qmul@125
|
995 int SDL_ffmpegValidAudio(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
996
|
ivand_qmul@125
|
997 // this function is used to check if we selected a valid audio stream
|
ivand_qmul@125
|
998 if(file->audioStream < 0 || file->audioStream >= file->AStreams) return 0;
|
ivand_qmul@125
|
999
|
ivand_qmul@125
|
1000 return 1;
|
ivand_qmul@125
|
1001 }
|
ivand_qmul@125
|
1002
|
ivand_qmul@125
|
1003 int SDL_ffmpegValidVideo(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
1004
|
ivand_qmul@125
|
1005 // this function is used to check if we selected a valid video stream
|
ivand_qmul@125
|
1006 if(file->videoStream < 0 || file->videoStream >= file->VStreams) return 0;
|
ivand_qmul@125
|
1007
|
ivand_qmul@125
|
1008 return 1;
|
ivand_qmul@125
|
1009 }
|
ivand_qmul@125
|
1010
|
ivand_qmul@125
|
1011 int SDL_ffmpegPause(SDL_ffmpegFile *file, int state) {
|
ivand_qmul@125
|
1012
|
ivand_qmul@125
|
1013 // by putting 0 into state, we play the file
|
ivand_qmul@125
|
1014 // this behaviour is analogue to SDL audio
|
ivand_qmul@125
|
1015 file->pause = state;
|
ivand_qmul@125
|
1016
|
ivand_qmul@125
|
1017 if(!file->pause) {
|
ivand_qmul@125
|
1018 file->startTime = av_gettime()/1000;//SDL_GetTicks();
|
ivand_qmul@125
|
1019 }
|
ivand_qmul@125
|
1020
|
ivand_qmul@125
|
1021 return 0;
|
ivand_qmul@125
|
1022 }
|
ivand_qmul@125
|
1023
|
ivand_qmul@125
|
1024 int SDL_ffmpegGetState(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
1025 return file->pause;
|
ivand_qmul@125
|
1026 }
|