ivand_qmul@125
|
1 /*******************************************************************************
|
ivand_qmul@125
|
2 * *
|
ivand_qmul@125
|
3 * SDL_ffmpeg is a library for basic multimedia functionality. *
|
ivand_qmul@125
|
4 * SDL_ffmpeg is based on ffmpeg. *
|
ivand_qmul@125
|
5 * *
|
ivand_qmul@125
|
6 * Copyright (C) 2007 Arjan Houben *
|
ivand_qmul@125
|
7 * *
|
ivand_qmul@125
|
8 * SDL_ffmpeg is free software: you can redistribute it and/or modify *
|
ivand_qmul@125
|
9 * it under the terms of the GNU Lesser General Public License as published *
|
ivand_qmul@125
|
10 * by the Free Software Foundation, either version 3 of the License, or any *
|
ivand_qmul@125
|
11 * later version. *
|
ivand_qmul@125
|
12 * *
|
ivand_qmul@125
|
13 * This program is distributed in the hope that it will be useful, *
|
ivand_qmul@125
|
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
|
ivand_qmul@125
|
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
|
ivand_qmul@125
|
16 * GNU Lesser General Public License for more details. *
|
ivand_qmul@125
|
17 * *
|
ivand_qmul@125
|
18 * You should have received a copy of the GNU Lesser General Public License *
|
ivand_qmul@125
|
19 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
|
ivand_qmul@125
|
20 * *
|
ivand_qmul@125
|
21 *******************************************************************************/
|
ivand_qmul@125
|
22
|
ivand_qmul@125
|
23 #include <stdio.h>
|
ivand_qmul@125
|
24 #include <stdlib.h>
|
ivand_qmul@125
|
25 #ifdef __cplusplus
|
ivand_qmul@125
|
26 extern "C" {
|
ivand_qmul@125
|
27 #endif
|
ivand_qmul@125
|
28 #ifdef WIN32
|
ivand_qmul@125
|
29 #include "SDL_ffmpeg.h"
|
ivand_qmul@125
|
30 #include <SDL.h>
|
ivand_qmul@125
|
31 #include <SDL_thread.h>
|
ivand_qmul@125
|
32 #endif
|
ivand_qmul@125
|
33
|
ivand_qmul@125
|
34 #ifdef __unix__
|
ivand_qmul@125
|
35 #include <SDL/SDL.h>
|
ivand_qmul@125
|
36 #include <SDL/SDL_thread.h>
|
ivand_qmul@125
|
37 #endif
|
ivand_qmul@125
|
38 #ifdef __cplusplus
|
ivand_qmul@125
|
39 }
|
ivand_qmul@125
|
40 #endif
|
ivand_qmul@125
|
41 #include "../../sv/main/MainWindow.h"
|
ivand_qmul@125
|
42
|
ivand_qmul@125
|
43 //const int SDL_FFMPEG_MAX_BUFFERED_FRAMES = 25;
|
ivand_qmul@125
|
44 //const int SDL_FFMPEG_MAX_BUFFERED_SAMPLES = 512 * 512;
|
ivand_qmul@125
|
45
|
ivand_qmul@125
|
46 int FFMPEG_init_was_called = 0;
|
ivand_qmul@125
|
47
|
ivand_qmul@125
|
48 SDL_ffmpegFile* SDL_ffmpegCreateFile() {
|
ivand_qmul@125
|
49
|
ivand_qmul@125
|
50 // create SDL_ffmpegFile pointer
|
ivand_qmul@125
|
51 SDL_ffmpegFile *file = (SDL_ffmpegFile*)malloc( sizeof(SDL_ffmpegFile) );
|
ivand_qmul@125
|
52 if(!file) return 0;
|
ivand_qmul@125
|
53 file->_ffmpeg=av_alloc_format_context();//(AVFormatContext*)malloc(sizeof(AVFormatContext));
|
ivand_qmul@125
|
54 // create a semaphore for every file
|
ivand_qmul@125
|
55 file->decode = SDL_CreateSemaphore(1);
|
ivand_qmul@125
|
56
|
ivand_qmul@125
|
57 // allocate room for VStreams
|
ivand_qmul@125
|
58 file->vs = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
|
ivand_qmul@125
|
59 if(!file->vs) {
|
ivand_qmul@125
|
60 free( file );
|
ivand_qmul@125
|
61 return 0;
|
ivand_qmul@125
|
62 }
|
ivand_qmul@125
|
63
|
ivand_qmul@125
|
64 // allocate room for AStreams
|
ivand_qmul@125
|
65 file->as = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
|
ivand_qmul@125
|
66 if(!file->as) {
|
ivand_qmul@125
|
67 free( file );
|
ivand_qmul@125
|
68 return 0;
|
ivand_qmul@125
|
69 }
|
ivand_qmul@125
|
70
|
ivand_qmul@125
|
71 // initialize variables with standard values
|
ivand_qmul@125
|
72 file->audioStream = -1;
|
ivand_qmul@125
|
73 file->videoStream = -1;
|
ivand_qmul@125
|
74
|
ivand_qmul@125
|
75 file->offset = 0;
|
ivand_qmul@125
|
76 file->videoOffset = 0;
|
ivand_qmul@125
|
77 file->startTime = 0;
|
ivand_qmul@125
|
78
|
ivand_qmul@125
|
79 file->threadID = 0;
|
ivand_qmul@125
|
80
|
ivand_qmul@125
|
81 return file;
|
ivand_qmul@125
|
82 }
|
ivand_qmul@125
|
83
|
ivand_qmul@125
|
84 void SDL_ffmpegFree(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
85
|
ivand_qmul@125
|
86 SDL_ffmpegStopDecoding(file);
|
ivand_qmul@125
|
87
|
ivand_qmul@125
|
88 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
89
|
ivand_qmul@125
|
90 free(file);
|
ivand_qmul@125
|
91 }
|
ivand_qmul@125
|
92
|
ivand_qmul@125
|
93 SDL_ffmpegFile* SDL_ffmpegOpen(const char* filename) {
|
ivand_qmul@125
|
94
|
ivand_qmul@125
|
95
|
ivand_qmul@125
|
96 // register all codecs
|
ivand_qmul@125
|
97 if(!FFMPEG_init_was_called) {
|
ivand_qmul@125
|
98 FFMPEG_init_was_called = 1;
|
ivand_qmul@125
|
99 av_register_all();
|
ivand_qmul@125
|
100 }
|
ivand_qmul@125
|
101
|
ivand_qmul@125
|
102 // open new ffmpegFile
|
ivand_qmul@125
|
103 SDL_ffmpegFile *file = SDL_ffmpegCreateFile();
|
ivand_qmul@125
|
104 if(!file) return 0;
|
ivand_qmul@125
|
105
|
ivand_qmul@125
|
106 // information about format is stored in file->_ffmpeg
|
ivand_qmul@125
|
107
|
ivand_qmul@125
|
108 // open the file
|
ivand_qmul@125
|
109 if(av_open_input_file( (AVFormatContext**)&file->_ffmpeg, filename, 0, 0, 0) != 0) {
|
ivand_qmul@125
|
110 fprintf(stderr, "could not open \"%s\"\n", filename);
|
ivand_qmul@125
|
111 free(file);
|
ivand_qmul@125
|
112 return 0;
|
ivand_qmul@125
|
113 }
|
ivand_qmul@125
|
114
|
ivand_qmul@125
|
115 // retrieve format information
|
ivand_qmul@125
|
116 if(av_find_stream_info((AVFormatContext *)(file->_ffmpeg)) < 0) {
|
ivand_qmul@125
|
117 fprintf(stderr, "could not retrieve video stream info");
|
ivand_qmul@125
|
118 free(file);
|
ivand_qmul@125
|
119 return 0;
|
ivand_qmul@125
|
120 }
|
ivand_qmul@125
|
121
|
ivand_qmul@125
|
122 // dump info to logfile
|
ivand_qmul@125
|
123 // dump_format(file->_ffmpeg, 0, filename, 0);
|
ivand_qmul@125
|
124
|
ivand_qmul@125
|
125 // find the streams in the file
|
ivand_qmul@125
|
126 file->VStreams = 0;
|
ivand_qmul@125
|
127 file->AStreams = 0;
|
ivand_qmul@125
|
128 file->threadActive = 0;
|
ivand_qmul@125
|
129
|
ivand_qmul@125
|
130 // iterate through all the streams and store audio/video streams
|
ivand_qmul@125
|
131 size_t i;
|
ivand_qmul@125
|
132 for(i=0; i<((AVFormatContext*)file->_ffmpeg)->nb_streams; i++) {
|
ivand_qmul@125
|
133
|
ivand_qmul@125
|
134 if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
|
ivand_qmul@125
|
135
|
ivand_qmul@125
|
136 // if this is a packet of the correct type we create a new stream
|
ivand_qmul@125
|
137 SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );
|
ivand_qmul@125
|
138
|
ivand_qmul@125
|
139 if(stream) {
|
ivand_qmul@125
|
140 // we set our stream to zero
|
ivand_qmul@125
|
141 memset(stream, 0, sizeof(SDL_ffmpegStream));
|
ivand_qmul@125
|
142
|
ivand_qmul@125
|
143 // save unique streamid
|
ivand_qmul@125
|
144 stream->id = i;
|
ivand_qmul@125
|
145
|
ivand_qmul@125
|
146 // the timeBase is what we use to calculate from/to pts
|
ivand_qmul@125
|
147 stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;
|
ivand_qmul@125
|
148
|
ivand_qmul@125
|
149 // save width, height and pixFmt of our outputframes
|
ivand_qmul@125
|
150 stream->width = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->width;
|
ivand_qmul@125
|
151 stream->height = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->height;
|
ivand_qmul@125
|
152 stream->pixFmt = PIX_FMT_RGB24;
|
ivand_qmul@125
|
153
|
ivand_qmul@125
|
154 // _ffmpeg holds data about streamcodec
|
ivand_qmul@125
|
155 stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;
|
ivand_qmul@125
|
156
|
ivand_qmul@125
|
157 // get the correct decoder for this stream
|
ivand_qmul@125
|
158 AVCodec *codec = avcodec_find_decoder(((AVCodecContext*)stream->_ffmpeg)->codec_id);
|
ivand_qmul@125
|
159
|
ivand_qmul@125
|
160 if(!codec) {
|
ivand_qmul@125
|
161 free(stream);
|
ivand_qmul@125
|
162 fprintf(stderr, "could not find codec\n");
|
ivand_qmul@125
|
163 } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
|
ivand_qmul@125
|
164 free(stream);
|
ivand_qmul@125
|
165 fprintf(stderr, "could not open decoder\n");
|
ivand_qmul@125
|
166 } else {
|
ivand_qmul@125
|
167
|
ivand_qmul@125
|
168 // copy metadata from AVStream into our stream
|
ivand_qmul@125
|
169 stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
|
ivand_qmul@125
|
170 stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
|
ivand_qmul@125
|
171 memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
|
ivand_qmul@125
|
172 stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
|
ivand_qmul@125
|
173 stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
|
ivand_qmul@125
|
174 memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);
|
ivand_qmul@125
|
175
|
ivand_qmul@125
|
176 stream->audio = 0;
|
ivand_qmul@125
|
177 stream->size = 0;
|
ivand_qmul@125
|
178 stream->imageBuffer = (bufferImage**)calloc( SDL_FFMPEG_MAX_BUFFERED_FRAMES, sizeof(bufferImage*) );
|
ivand_qmul@125
|
179
|
ivand_qmul@125
|
180 file->vs[file->VStreams] = stream;
|
ivand_qmul@125
|
181 file->VStreams++;
|
ivand_qmul@125
|
182
|
ivand_qmul@125
|
183 // create semaphore for thread-safe use
|
ivand_qmul@125
|
184 stream->sem = SDL_CreateSemaphore(1);
|
ivand_qmul@125
|
185 }
|
ivand_qmul@125
|
186 }
|
ivand_qmul@125
|
187 } else if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
|
ivand_qmul@125
|
188
|
ivand_qmul@125
|
189 // if this is a packet of the correct type we create a new stream
|
ivand_qmul@125
|
190 SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );
|
ivand_qmul@125
|
191
|
ivand_qmul@125
|
192 if(stream) {
|
ivand_qmul@125
|
193 // we set our stream to zero
|
ivand_qmul@125
|
194 memset(stream, 0, sizeof(SDL_ffmpegStream));
|
ivand_qmul@125
|
195
|
ivand_qmul@125
|
196 // save unique streamid
|
ivand_qmul@125
|
197 stream->id = i;
|
ivand_qmul@125
|
198
|
ivand_qmul@125
|
199 // the timeBase is what we use to calculate from/to pts
|
ivand_qmul@125
|
200 stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;
|
ivand_qmul@125
|
201
|
ivand_qmul@125
|
202 // _ffmpeg holds data about streamcodec
|
ivand_qmul@125
|
203 stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;
|
ivand_qmul@125
|
204
|
ivand_qmul@125
|
205 stream->width = 0;
|
ivand_qmul@125
|
206 stream->height = 0;
|
ivand_qmul@125
|
207 stream->pixFmt = PIX_FMT_RGB24;
|
ivand_qmul@125
|
208
|
ivand_qmul@125
|
209 // get the correct decoder for this stream
|
ivand_qmul@125
|
210 AVCodec *codec = avcodec_find_decoder(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_id);
|
ivand_qmul@125
|
211
|
ivand_qmul@125
|
212 if(!codec) {
|
ivand_qmul@125
|
213 free( stream );
|
ivand_qmul@125
|
214 fprintf(stderr, "could not find codec\n");
|
ivand_qmul@125
|
215 } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
|
ivand_qmul@125
|
216 free( stream );
|
ivand_qmul@125
|
217 fprintf(stderr, "could not open decoder\n");
|
ivand_qmul@125
|
218 } else {
|
ivand_qmul@125
|
219
|
ivand_qmul@125
|
220 // copy metadata from AVStream into our stream
|
ivand_qmul@125
|
221 stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
|
ivand_qmul@125
|
222 stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
|
ivand_qmul@125
|
223 memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
|
ivand_qmul@125
|
224 stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
|
ivand_qmul@125
|
225 stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
|
ivand_qmul@125
|
226 memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);
|
ivand_qmul@125
|
227
|
ivand_qmul@125
|
228 stream->audio = (int8_t*)malloc( sizeof(int8_t) * SDL_FFMPEG_MAX_BUFFERED_SAMPLES );
|
ivand_qmul@125
|
229 stream->size = 0;
|
ivand_qmul@125
|
230 stream->imageBuffer = 0;
|
ivand_qmul@125
|
231
|
ivand_qmul@125
|
232 file->as[file->AStreams] = stream;
|
ivand_qmul@125
|
233 file->AStreams++;
|
ivand_qmul@125
|
234
|
ivand_qmul@125
|
235 // create semaphore for thread-safe use
|
ivand_qmul@125
|
236 stream->sem = SDL_CreateSemaphore(1);
|
ivand_qmul@125
|
237 }
|
ivand_qmul@125
|
238 }
|
ivand_qmul@125
|
239 }
|
ivand_qmul@125
|
240 }
|
ivand_qmul@125
|
241
|
ivand_qmul@125
|
242 return file;
|
ivand_qmul@125
|
243 }
|
ivand_qmul@125
|
244
|
ivand_qmul@125
|
245 SDL_Surface* SDL_ffmpegGetVideo(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
246
|
ivand_qmul@125
|
247 MainWindow * MWinsA=MainWindow::instance();
|
ivand_qmul@125
|
248 if( !SDL_ffmpegValidVideo(file) || file->pause || file->skipVideo) return 0;
|
ivand_qmul@125
|
249
|
ivand_qmul@125
|
250 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
251
|
ivand_qmul@125
|
252 bufferImage *option = 0;
|
ivand_qmul@125
|
253 int i;
|
ivand_qmul@125
|
254
|
ivand_qmul@125
|
255 for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
|
ivand_qmul@125
|
256
|
ivand_qmul@125
|
257 // if this entry does not exist, continue
|
ivand_qmul@125
|
258 if(!file->vs[file->videoStream]->imageBuffer[i]) continue;
|
ivand_qmul@125
|
259
|
ivand_qmul@125
|
260
|
ivand_qmul@125
|
261 int64_t pos=MWinsA->Get_CurAudioTime();
|
ivand_qmul@125
|
262 // do we have an image that should have been shown?
|
ivand_qmul@125
|
263 if(file->vs[file->videoStream]->imageBuffer[i]->timestamp <= pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000) {
|
ivand_qmul@125
|
264
|
ivand_qmul@125
|
265 // if this is the first option we find, we simply save it
|
ivand_qmul@125
|
266 if(!option) {
|
ivand_qmul@125
|
267
|
ivand_qmul@125
|
268 option = file->vs[file->videoStream]->imageBuffer[i];
|
ivand_qmul@125
|
269
|
ivand_qmul@125
|
270 // set to 0 so we know this position in the buffer is available again
|
ivand_qmul@125
|
271 file->vs[file->videoStream]->imageBuffer[i] = 0;
|
ivand_qmul@125
|
272
|
ivand_qmul@125
|
273 } else {
|
ivand_qmul@125
|
274
|
ivand_qmul@125
|
275 // we found a newer possible timestamp, we delete the older one
|
ivand_qmul@125
|
276 if( option->timestamp < file->vs[file->videoStream]->imageBuffer[i]->timestamp) {
|
ivand_qmul@125
|
277
|
ivand_qmul@125
|
278 // this image is too old, we discard it
|
ivand_qmul@125
|
279 SDL_FreeSurface( option->img );
|
ivand_qmul@125
|
280
|
ivand_qmul@125
|
281 // free old option
|
ivand_qmul@125
|
282 free( option );
|
ivand_qmul@125
|
283
|
ivand_qmul@125
|
284 // new pointer to position in container
|
ivand_qmul@125
|
285 option = file->vs[file->videoStream]->imageBuffer[i];
|
ivand_qmul@125
|
286
|
ivand_qmul@125
|
287 // set to 0 so we know this position in the buffer is available again
|
ivand_qmul@125
|
288 file->vs[file->videoStream]->imageBuffer[i] = 0;
|
ivand_qmul@125
|
289 }
|
ivand_qmul@125
|
290 }
|
ivand_qmul@125
|
291 }
|
ivand_qmul@125
|
292 }
|
ivand_qmul@125
|
293
|
ivand_qmul@125
|
294 // if we did not found an option, we exit
|
ivand_qmul@125
|
295 if(!option) {
|
ivand_qmul@125
|
296 // release the lock
|
ivand_qmul@125
|
297 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
298 return 0;
|
ivand_qmul@125
|
299 }
|
ivand_qmul@125
|
300
|
ivand_qmul@125
|
301 // we did found an option, so we return the imagedata
|
ivand_qmul@125
|
302 return option->img;
|
ivand_qmul@125
|
303 }
|
ivand_qmul@125
|
304
|
ivand_qmul@125
|
305 int SDL_ffmpegReleaseVideo(SDL_ffmpegFile *file, SDL_Surface *bmp) {
|
ivand_qmul@125
|
306
|
ivand_qmul@125
|
307 // if there was no valid video stream, we should not release
|
ivand_qmul@125
|
308 if( !SDL_ffmpegValidVideo(file) || file->skipVideo) return -1;
|
ivand_qmul@125
|
309
|
ivand_qmul@125
|
310 // free surface
|
ivand_qmul@125
|
311 SDL_FreeSurface(bmp);
|
ivand_qmul@125
|
312
|
ivand_qmul@125
|
313 // release semaphore if needed
|
ivand_qmul@125
|
314 if( !SDL_SemValue(file->vs[file->videoStream]->sem) ) {
|
ivand_qmul@125
|
315 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
316 }
|
ivand_qmul@125
|
317
|
ivand_qmul@125
|
318 return 0;
|
ivand_qmul@125
|
319 }
|
ivand_qmul@125
|
320
|
ivand_qmul@125
|
321 SDL_ffmpegStream* SDL_ffmpegGetAudioStream(SDL_ffmpegFile *file, int audioID) {
|
ivand_qmul@125
|
322
|
ivand_qmul@125
|
323 // check if we have any audiostreams
|
ivand_qmul@125
|
324 if(!file->AStreams) return 0;
|
ivand_qmul@125
|
325
|
ivand_qmul@125
|
326 // check if the requested id is possible
|
ivand_qmul@125
|
327 if(audioID >= file->AStreams) return 0;
|
ivand_qmul@125
|
328
|
ivand_qmul@125
|
329 // return ausiostream linked to audioID
|
ivand_qmul@125
|
330 return file->as[audioID];
|
ivand_qmul@125
|
331 }
|
ivand_qmul@125
|
332
|
ivand_qmul@125
|
333 int SDL_ffmpegSelectAudioStream(SDL_ffmpegFile* file, int audioID) {
|
ivand_qmul@125
|
334
|
ivand_qmul@125
|
335 // check if we have any audiostreams
|
ivand_qmul@125
|
336 if(!file->AStreams) return -1;
|
ivand_qmul@125
|
337
|
ivand_qmul@125
|
338 // check if the requested id is possible
|
ivand_qmul@125
|
339 if(audioID >= file->AStreams) return -1;
|
ivand_qmul@125
|
340
|
ivand_qmul@125
|
341 // set current audiostream to stream linked to audioID
|
ivand_qmul@125
|
342 file->audioStream = audioID;
|
ivand_qmul@125
|
343
|
ivand_qmul@125
|
344 return 0;
|
ivand_qmul@125
|
345 }
|
ivand_qmul@125
|
346
|
ivand_qmul@125
|
347 SDL_ffmpegStream* SDL_ffmpegGetVideoStream(SDL_ffmpegFile *file, int videoID) {
|
ivand_qmul@125
|
348
|
ivand_qmul@125
|
349 // check if we have any videostreams
|
ivand_qmul@125
|
350 if(!file->VStreams) return 0;
|
ivand_qmul@125
|
351
|
ivand_qmul@125
|
352 // check if the requested id is possible
|
ivand_qmul@125
|
353 if(videoID >= file->VStreams) return 0;
|
ivand_qmul@125
|
354
|
ivand_qmul@125
|
355 // return ausiostream linked to videoID
|
ivand_qmul@125
|
356 return file->vs[videoID];
|
ivand_qmul@125
|
357 }
|
ivand_qmul@125
|
358
|
ivand_qmul@125
|
359 int SDL_ffmpegSelectVideoStream(SDL_ffmpegFile* file, int videoID) {
|
ivand_qmul@125
|
360
|
ivand_qmul@125
|
361 // check if we have any videostreams
|
ivand_qmul@125
|
362 if(!file->VStreams) return -1;
|
ivand_qmul@125
|
363
|
ivand_qmul@125
|
364 // check if the requested id is possible
|
ivand_qmul@125
|
365 if(videoID >= file->VStreams) return -1;
|
ivand_qmul@125
|
366
|
ivand_qmul@125
|
367 // set current videostream to stream linked to videoID
|
ivand_qmul@125
|
368 file->videoStream = videoID;
|
ivand_qmul@125
|
369
|
ivand_qmul@125
|
370 return 0;
|
ivand_qmul@125
|
371 }
|
ivand_qmul@125
|
372
|
ivand_qmul@125
|
373 int SDL_ffmpegStartDecoding(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
374
|
ivand_qmul@125
|
375 // start a thread that continues to fill audio/video buffers
|
ivand_qmul@125
|
376 if(!file->threadID) file->threadID = SDL_CreateThread(SDL_ffmpegDecodeThread, file);
|
ivand_qmul@125
|
377
|
ivand_qmul@125
|
378 return 0;
|
ivand_qmul@125
|
379 }
|
ivand_qmul@125
|
380
|
ivand_qmul@125
|
381 int SDL_ffmpegStopDecoding(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
382
|
ivand_qmul@125
|
383 // stop decode thread
|
ivand_qmul@125
|
384 file->threadActive = 0;
|
ivand_qmul@125
|
385 if(file->threadID) SDL_WaitThread(file->threadID, 0);
|
ivand_qmul@125
|
386
|
ivand_qmul@125
|
387 // set threadID to zero, so we can check for concurrent threads
|
ivand_qmul@125
|
388 file->threadID = 0;
|
ivand_qmul@125
|
389
|
ivand_qmul@125
|
390 return -1;
|
ivand_qmul@125
|
391 }
|
ivand_qmul@125
|
392
|
ivand_qmul@125
|
393 int SDL_ffmpegDecodeThread(void* data) {
|
ivand_qmul@125
|
394 static struct SwsContext *img_convert_ctx;
|
ivand_qmul@125
|
395 // unpack the void pointer
|
ivand_qmul@125
|
396 SDL_ffmpegFile* file = (SDL_ffmpegFile*)data;
|
ivand_qmul@125
|
397
|
ivand_qmul@125
|
398 // flag this thread as active, used for stopping
|
ivand_qmul@125
|
399 file->threadActive = 1;
|
ivand_qmul@125
|
400
|
ivand_qmul@125
|
401 // create a packet for our data
|
ivand_qmul@125
|
402 AVPacket pack;
|
ivand_qmul@125
|
403
|
ivand_qmul@125
|
404 // reserve some pointers for use in loop
|
ivand_qmul@125
|
405 AVFrame *inFrame, *inFrameRGB;
|
ivand_qmul@125
|
406
|
ivand_qmul@125
|
407 // allocate a frame
|
ivand_qmul@125
|
408 inFrame = avcodec_alloc_frame();
|
ivand_qmul@125
|
409
|
ivand_qmul@125
|
410 // allocate another frame for unknown->RGB conversion
|
ivand_qmul@125
|
411 inFrameRGB = avcodec_alloc_frame();
|
ivand_qmul@125
|
412
|
ivand_qmul@125
|
413 if(SDL_ffmpegValidVideo(file)) {
|
ivand_qmul@125
|
414 // allocate buffer
|
ivand_qmul@125
|
415 uint8_t *inVideoBuffer = (uint8_t*)malloc( avpicture_get_size(file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
416 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
417 file->vs[file->videoStream]->height) );
|
ivand_qmul@125
|
418
|
ivand_qmul@125
|
419 // put buffer into our reserved frame
|
ivand_qmul@125
|
420 avpicture_fill( (AVPicture*)inFrameRGB,
|
ivand_qmul@125
|
421 inVideoBuffer,
|
ivand_qmul@125
|
422 file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
423 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
424 file->vs[file->videoStream]->height);
|
ivand_qmul@125
|
425 }
|
ivand_qmul@125
|
426
|
ivand_qmul@125
|
427 // allocate temporary audiobuffer
|
ivand_qmul@125
|
428 int16_t *samples = (int16_t*)malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE );
|
ivand_qmul@125
|
429
|
ivand_qmul@125
|
430 // reserve integer for use in loop
|
ivand_qmul@125
|
431 int got_frame;
|
ivand_qmul@125
|
432
|
ivand_qmul@125
|
433 while(file->threadActive) {
|
ivand_qmul@125
|
434
|
ivand_qmul@125
|
435 // read a packet from the file
|
ivand_qmul@125
|
436 if(av_read_frame((AVFormatContext *)(file->_ffmpeg), &pack) < 0) {
|
ivand_qmul@125
|
437 // thread is idle
|
ivand_qmul@125
|
438 SDL_Delay(10);
|
ivand_qmul@125
|
439 continue;
|
ivand_qmul@125
|
440 }
|
ivand_qmul@125
|
441 if (file->skipAudio && pack.stream_index == file->as[file->audioStream]->id){
|
ivand_qmul@125
|
442 SDL_Delay(1);
|
ivand_qmul@125
|
443 continue;
|
ivand_qmul@125
|
444 }
|
ivand_qmul@125
|
445
|
ivand_qmul@125
|
446 // we got a packet, lets handle it
|
ivand_qmul@125
|
447
|
ivand_qmul@125
|
448 // let's start by entering the video semaphore
|
ivand_qmul@125
|
449 SDL_SemWait(file->decode);
|
ivand_qmul@125
|
450
|
ivand_qmul@125
|
451 // If it's a audio packet from our stream...
|
ivand_qmul@125
|
452 if( SDL_ffmpegValidAudio(file) && pack.stream_index == file->as[file->audioStream]->id && !file->skipAudio) {
|
ivand_qmul@125
|
453
|
ivand_qmul@125
|
454 uint8_t *data = pack.data;
|
ivand_qmul@125
|
455 int size = pack.size;
|
ivand_qmul@125
|
456 int len;
|
ivand_qmul@125
|
457
|
ivand_qmul@125
|
458 while(size > 0 && file->threadActive) {
|
ivand_qmul@125
|
459
|
ivand_qmul@125
|
460 // Decode the packet
|
ivand_qmul@125
|
461 len = avcodec_decode_audio((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg), samples, &got_frame, data, size);
|
ivand_qmul@125
|
462
|
ivand_qmul@125
|
463 // if error, we skip the frame
|
ivand_qmul@125
|
464 if(len < 0 || !got_frame) {
|
ivand_qmul@125
|
465 size = 0;
|
ivand_qmul@125
|
466 break;
|
ivand_qmul@125
|
467 }
|
ivand_qmul@125
|
468
|
ivand_qmul@125
|
469 // change pointers
|
ivand_qmul@125
|
470 data += got_frame;
|
ivand_qmul@125
|
471 size -= got_frame;
|
ivand_qmul@125
|
472
|
ivand_qmul@125
|
473 // if the audiobuffer is full, the thread waits
|
ivand_qmul@125
|
474 while( file->as[file->audioStream]->size + got_frame > SDL_FFMPEG_MAX_BUFFERED_SAMPLES &&
|
ivand_qmul@125
|
475 file->threadActive) {
|
ivand_qmul@125
|
476 SDL_Delay(5);
|
ivand_qmul@125
|
477 }
|
ivand_qmul@125
|
478
|
ivand_qmul@125
|
479 // write an audiopts
|
ivand_qmul@125
|
480 int64_t audiopts = pack.pts * file->as[file->audioStream]->timeBase;
|
ivand_qmul@125
|
481
|
ivand_qmul@125
|
482 // is the audioBuffer is empty
|
ivand_qmul@125
|
483 if(!file->as[file->audioStream]->size) {
|
ivand_qmul@125
|
484
|
ivand_qmul@125
|
485 // we set a new pts
|
ivand_qmul@125
|
486 file->as[file->audioStream]->hardPts = file->as[file->audioStream]->pts = audiopts;
|
ivand_qmul@125
|
487
|
ivand_qmul@125
|
488 // we set totalbytes to zero, as this represents the amount
|
ivand_qmul@125
|
489 // of bytes that were played since our last 'hardPts'
|
ivand_qmul@125
|
490 file->as[file->audioStream]->totalBytes = 0;
|
ivand_qmul@125
|
491 }
|
ivand_qmul@125
|
492
|
ivand_qmul@125
|
493 // no need to store old samples
|
ivand_qmul@125
|
494 if(audiopts >= SDL_ffmpegGetPosition(file)) {
|
ivand_qmul@125
|
495
|
ivand_qmul@125
|
496 // enter audio semaphore
|
ivand_qmul@125
|
497 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
498
|
ivand_qmul@125
|
499 // copy data from temporary buffer to streambuffer
|
ivand_qmul@125
|
500 memcpy(file->as[file->audioStream]->audio+file->as[file->audioStream]->size, samples, got_frame);
|
ivand_qmul@125
|
501
|
ivand_qmul@125
|
502 // set the new size of the audiobuffer
|
ivand_qmul@125
|
503 file->as[file->audioStream]->size += got_frame;
|
ivand_qmul@125
|
504
|
ivand_qmul@125
|
505 // we leave the audio semaphore
|
ivand_qmul@125
|
506 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
507 }
|
ivand_qmul@125
|
508 }
|
ivand_qmul@125
|
509 }
|
ivand_qmul@125
|
510
|
ivand_qmul@125
|
511 // If it's a video packet from our video stream...
|
ivand_qmul@125
|
512 if( SDL_ffmpegValidVideo(file) && pack.stream_index == file->vs[file->videoStream]->id && !file->skipVideo) {
|
ivand_qmul@125
|
513
|
ivand_qmul@125
|
514 got_frame = 0;
|
ivand_qmul@125
|
515
|
ivand_qmul@125
|
516 // Decode the packet
|
ivand_qmul@125
|
517 avcodec_decode_video((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg), inFrame, &got_frame, pack.data, pack.size);
|
ivand_qmul@125
|
518
|
ivand_qmul@125
|
519 if(got_frame) {
|
ivand_qmul@125
|
520
|
ivand_qmul@125
|
521 // create imagebuffer
|
ivand_qmul@125
|
522 bufferImage *buf = (bufferImage*)malloc( sizeof(bufferImage) );
|
ivand_qmul@125
|
523
|
ivand_qmul@125
|
524 // write timestamp into the buffer
|
ivand_qmul@125
|
525 buf->timestamp = file->vs[file->videoStream]->timeBase * pack.dts;
|
ivand_qmul@125
|
526
|
ivand_qmul@125
|
527 // usefull when dealing with B frames
|
ivand_qmul@125
|
528 if(pack.dts == AV_NOPTS_VALUE) {
|
ivand_qmul@125
|
529 // if we did not get a valid timestamp, we make one up based on the last
|
ivand_qmul@125
|
530 // valid timestamp + the duration of a frame
|
ivand_qmul@125
|
531 buf->timestamp = file->vs[file->videoStream]->lastTimeStamp + file->vs[file->videoStream]->timeBase;
|
ivand_qmul@125
|
532 }
|
ivand_qmul@125
|
533
|
ivand_qmul@125
|
534 // if new timestamp is from future, we proceed
|
ivand_qmul@125
|
535 // if(buf->timestamp >= SDL_ffmpegGetPosition(file))
|
ivand_qmul@125
|
536 // {
|
ivand_qmul@125
|
537 if (img_convert_ctx == NULL) {
|
ivand_qmul@125
|
538 img_convert_ctx = sws_getContext(file->vs[file->videoStream]->width, file->vs[file->videoStream]->height,
|
ivand_qmul@125
|
539 ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
|
ivand_qmul@125
|
540 file->vs[file->videoStream]->width, file->vs[file->videoStream]->height,
|
ivand_qmul@125
|
541 file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
542 sws_flags, NULL, NULL, NULL);
|
ivand_qmul@125
|
543 if (img_convert_ctx == NULL) {
|
ivand_qmul@125
|
544 fprintf(stderr, "Cannot initialize the conversion context\n");
|
ivand_qmul@125
|
545 exit(1);
|
ivand_qmul@125
|
546 }
|
ivand_qmul@125
|
547 }
|
ivand_qmul@125
|
548
|
ivand_qmul@125
|
549 sws_scale(img_convert_ctx, ((AVPicture*)inFrame)->data, ((AVPicture*)inFrame)->linesize,
|
ivand_qmul@125
|
550 0, file->vs[file->videoStream]->height, ((AVPicture*)inFrameRGB)->data, ((AVPicture*)inFrameRGB)->linesize);
|
ivand_qmul@125
|
551
|
ivand_qmul@125
|
552 // we convert whatever type of data we got to RGB24
|
ivand_qmul@125
|
553 /* img_convert((AVPicture*)inFrameRGB,
|
ivand_qmul@125
|
554 file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
555 (AVPicture*)inFrame,
|
ivand_qmul@125
|
556 ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
|
ivand_qmul@125
|
557 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
558 file->vs[file->videoStream]->height);
|
ivand_qmul@125
|
559 */
|
ivand_qmul@125
|
560 // allocate image room
|
ivand_qmul@125
|
561 buf->img = SDL_CreateRGBSurface(SDL_SWSURFACE,
|
ivand_qmul@125
|
562 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
563 file->vs[file->videoStream]->height,
|
ivand_qmul@125
|
564 24, 0x0000FF, 0x00FF00, 0xFF0000, 0);
|
ivand_qmul@125
|
565
|
ivand_qmul@125
|
566 // copy image data to image room
|
ivand_qmul@125
|
567 memcpy(buf->img->pixels, inFrameRGB->data[0],
|
ivand_qmul@125
|
568 file->vs[file->videoStream]->width * file->vs[file->videoStream]->height * 3);
|
ivand_qmul@125
|
569
|
ivand_qmul@125
|
570 // we write the lastTimestamp we got
|
ivand_qmul@125
|
571 file->vs[file->videoStream]->lastTimeStamp = buf->timestamp;
|
ivand_qmul@125
|
572
|
ivand_qmul@125
|
573 int i;
|
ivand_qmul@125
|
574 int again = 1;
|
ivand_qmul@125
|
575
|
ivand_qmul@125
|
576 // keep trying to fit in buffer, until the data was actually placed in the buffer
|
ivand_qmul@125
|
577 while(again && file->threadActive) {
|
ivand_qmul@125
|
578
|
ivand_qmul@125
|
579 // we enter the video semaphore
|
ivand_qmul@125
|
580 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
581
|
ivand_qmul@125
|
582 // loop through all positions in buffer until an empty
|
ivand_qmul@125
|
583 // space was found
|
ivand_qmul@125
|
584 for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
|
ivand_qmul@125
|
585 // if this place in the buffer is empty we write our new frame
|
ivand_qmul@125
|
586 if(file->vs[file->videoStream]->imageBuffer[i] == 0) {
|
ivand_qmul@125
|
587 file->vs[file->videoStream]->imageBuffer[i] = buf;
|
ivand_qmul@125
|
588 // we placed our image in the buffer, moving on
|
ivand_qmul@125
|
589 again = 0;
|
ivand_qmul@125
|
590 break;
|
ivand_qmul@125
|
591 }
|
ivand_qmul@125
|
592 }
|
ivand_qmul@125
|
593
|
ivand_qmul@125
|
594 // we leave the video semaphore
|
ivand_qmul@125
|
595 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
596
|
ivand_qmul@125
|
597 // frames aren't being release every ms, so we can take some
|
ivand_qmul@125
|
598 // time before we try and fit our new image again
|
ivand_qmul@125
|
599 if(again) SDL_Delay(5);
|
ivand_qmul@125
|
600 }
|
ivand_qmul@125
|
601 // }
|
ivand_qmul@125
|
602 //else {
|
ivand_qmul@125
|
603 // // if our decoded frame was too old, we don't bother putting
|
ivand_qmul@125
|
604 // // it in our buffer
|
ivand_qmul@125
|
605 // free( buf );
|
ivand_qmul@125
|
606 // }
|
ivand_qmul@125
|
607 }
|
ivand_qmul@125
|
608 }
|
ivand_qmul@125
|
609 // we leave the decode semaphore
|
ivand_qmul@125
|
610 SDL_SemPost(file->decode);
|
ivand_qmul@125
|
611 if ((file->skipAudio)&&(file->delay))
|
ivand_qmul@125
|
612 SDL_Delay(3);
|
ivand_qmul@125
|
613 }
|
ivand_qmul@125
|
614 // if we stop this thread, we can release the packet we reserved
|
ivand_qmul@125
|
615 av_free_packet(&pack);
|
ivand_qmul@125
|
616
|
ivand_qmul@125
|
617 return 0;
|
ivand_qmul@125
|
618 }
|
ivand_qmul@125
|
619
|
ivand_qmul@125
|
620 int SDL_ffmpegSeek(SDL_ffmpegFile* file, int64_t timestamp) {
|
ivand_qmul@125
|
621
|
ivand_qmul@125
|
622 // if the seekposition is out of bounds, return
|
ivand_qmul@125
|
623 if(timestamp >= SDL_ffmpegGetDuration(file)) return -1;
|
ivand_qmul@125
|
624
|
ivand_qmul@125
|
625 // start by flushing the buffers
|
ivand_qmul@125
|
626 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
627
|
ivand_qmul@125
|
628 // we enter the decode semaphore so the decode thread cannot be working on
|
ivand_qmul@125
|
629 // data we are trying to flush
|
ivand_qmul@125
|
630 SDL_SemWait(file->decode);
|
ivand_qmul@125
|
631
|
ivand_qmul@125
|
632 // if the stream has an offset, add it to the start time
|
ivand_qmul@125
|
633 int64_t startOffset = 0;
|
ivand_qmul@125
|
634 if(((AVFormatContext*)file->_ffmpeg)->start_time != AV_NOPTS_VALUE) {
|
ivand_qmul@125
|
635 // inFormatCtx->start_time is in AV_TIME_BASE fractional seconds
|
ivand_qmul@125
|
636 startOffset = ((AVFormatContext*)file->_ffmpeg)->start_time;
|
ivand_qmul@125
|
637 }
|
ivand_qmul@125
|
638 //if (file->skipAudio) startOffset=0;
|
ivand_qmul@125
|
639 // calculate the final timestamp for the seek action this is in AV_TIME_BASE fractional seconds
|
ivand_qmul@125
|
640 startOffset += (timestamp * AV_TIME_BASE) / 1000;
|
ivand_qmul@125
|
641
|
ivand_qmul@125
|
642 // do the actual seeking, AVSEEK_FLAG_BACKWARD means we jump to the point
|
ivand_qmul@125
|
643 // closest to the point we want, resulting in an earlier position if the jump
|
ivand_qmul@125
|
644 // could not go the the exaxt point we wanted
|
ivand_qmul@125
|
645 if(av_seek_frame((AVFormatContext *)(file->_ffmpeg), -1, startOffset, AVSEEK_FLAG_BACKWARD|AVSEEK_FLAG_ANY) >= 0) {
|
ivand_qmul@125
|
646 SDL_Delay(5);
|
ivand_qmul@125
|
647 // set some values in our file so we now were to start playing
|
ivand_qmul@125
|
648 file->offset = timestamp;
|
ivand_qmul@125
|
649 file->startTime = av_gettime()/1000;//SDL_GetTicks();
|
ivand_qmul@125
|
650
|
ivand_qmul@125
|
651 // if we have a valid video, we probably have some data we want to flush
|
ivand_qmul@125
|
652 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
|
ivand_qmul@125
|
653
|
ivand_qmul@125
|
654 // flushing happens inside the semaphore as not to interfere with the
|
ivand_qmul@125
|
655 // decoding thread
|
ivand_qmul@125
|
656 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
657 avcodec_flush_buffers((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg));
|
ivand_qmul@125
|
658 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
659 }
|
ivand_qmul@125
|
660
|
ivand_qmul@125
|
661 // same goes for audio, if there is data, we flush is
|
ivand_qmul@125
|
662 if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {
|
ivand_qmul@125
|
663
|
ivand_qmul@125
|
664 // make sure this is done thread-save, so inside the appropriate
|
ivand_qmul@125
|
665 // semaphore
|
ivand_qmul@125
|
666 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
667 avcodec_flush_buffers((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg));
|
ivand_qmul@125
|
668 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
669 }
|
ivand_qmul@125
|
670
|
ivand_qmul@125
|
671 // then there is our flush call
|
ivand_qmul@125
|
672 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
673
|
ivand_qmul@125
|
674 // and we are done, lets release the decode semaphore so the decode
|
ivand_qmul@125
|
675 // thread can move on, filling buffer from our new position
|
ivand_qmul@125
|
676 SDL_SemPost(file->decode);
|
ivand_qmul@125
|
677
|
ivand_qmul@125
|
678 return 0;
|
ivand_qmul@125
|
679 }
|
ivand_qmul@125
|
680
|
ivand_qmul@125
|
681 // if, for some reason, we could not seek, we still should flush our buffers
|
ivand_qmul@125
|
682 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
683
|
ivand_qmul@125
|
684 // and release our lock on the decodethread
|
ivand_qmul@125
|
685 SDL_SemPost(file->decode);
|
ivand_qmul@125
|
686
|
ivand_qmul@125
|
687 return -1;
|
ivand_qmul@125
|
688 }
|
ivand_qmul@125
|
689
|
ivand_qmul@125
|
690 int SDL_ffmpegSeekRelative(SDL_ffmpegFile *file, int64_t timestamp) {
|
ivand_qmul@125
|
691
|
ivand_qmul@125
|
692 // same thing as normal seek, just take into account the current position
|
ivand_qmul@125
|
693 return SDL_ffmpegSeek(file, SDL_ffmpegGetPosition(file) + timestamp);
|
ivand_qmul@125
|
694 }
|
ivand_qmul@125
|
695
|
ivand_qmul@125
|
696 int SDL_ffmpegFlush(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
697
|
ivand_qmul@125
|
698 // if we have a valid audio stream, we flush is
|
ivand_qmul@125
|
699 if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {
|
ivand_qmul@125
|
700
|
ivand_qmul@125
|
701 // flush audiobuffer from semaphore, be thread-safe!
|
ivand_qmul@125
|
702 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
703
|
ivand_qmul@125
|
704 file->as[file->audioStream]->size = 0;
|
ivand_qmul@125
|
705
|
ivand_qmul@125
|
706 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
707 }
|
ivand_qmul@125
|
708
|
ivand_qmul@125
|
709 // if we have a valid video stream, we flush some more
|
ivand_qmul@125
|
710 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
|
ivand_qmul@125
|
711
|
ivand_qmul@125
|
712 // flush videobuffer
|
ivand_qmul@125
|
713 int i;
|
ivand_qmul@125
|
714
|
ivand_qmul@125
|
715 // again, be thread safe!
|
ivand_qmul@125
|
716 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
717
|
ivand_qmul@125
|
718 // make sure we delete all frames from buffer
|
ivand_qmul@125
|
719 for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
|
ivand_qmul@125
|
720
|
ivand_qmul@125
|
721 // if this entry does not exist, continue
|
ivand_qmul@125
|
722 if(!file->vs[file->videoStream]->imageBuffer[i]) continue;
|
ivand_qmul@125
|
723
|
ivand_qmul@125
|
724 // free the actual image data
|
ivand_qmul@125
|
725 SDL_FreeSurface( file->vs[file->videoStream]->imageBuffer[i]->img );
|
ivand_qmul@125
|
726
|
ivand_qmul@125
|
727 // and free the struct containing it
|
ivand_qmul@125
|
728 free( file->vs[file->videoStream]->imageBuffer[i] );
|
ivand_qmul@125
|
729
|
ivand_qmul@125
|
730 // set position in buffer to 0, so we know it is empty
|
ivand_qmul@125
|
731 file->vs[file->videoStream]->imageBuffer[i] = 0;
|
ivand_qmul@125
|
732 }
|
ivand_qmul@125
|
733
|
ivand_qmul@125
|
734 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
735 }
|
ivand_qmul@125
|
736
|
ivand_qmul@125
|
737 return 0;
|
ivand_qmul@125
|
738 }
|
ivand_qmul@125
|
739
|
ivand_qmul@125
|
740 int8_t* SDL_ffmpegGetAudio(SDL_ffmpegFile *file, int *len) {
|
ivand_qmul@125
|
741
|
ivand_qmul@125
|
742 // no valid audio, means no audio to get
|
ivand_qmul@125
|
743 if( !SDL_ffmpegValidAudio(file) || file->pause||file->skipAudio ) return 0;
|
ivand_qmul@125
|
744
|
ivand_qmul@125
|
745 // working on audiobuffer should always be done from semaphore
|
ivand_qmul@125
|
746 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
747
|
ivand_qmul@125
|
748 // if we ask for more audiodata than we can give, we sent wat we can
|
ivand_qmul@125
|
749 // actually give, writing the amount of bytes into len
|
ivand_qmul@125
|
750 if(*len > file->as[file->audioStream]->size) *len = file->as[file->audioStream]->size;
|
ivand_qmul@125
|
751
|
ivand_qmul@125
|
752 // decrease the size of our audiobuffer by len
|
ivand_qmul@125
|
753 file->as[file->audioStream]->size -= *len;
|
ivand_qmul@125
|
754
|
ivand_qmul@125
|
755 // len represents the nr of bytes we sent, so we increase the total
|
ivand_qmul@125
|
756 file->as[file->audioStream]->totalBytes += *len;
|
ivand_qmul@125
|
757
|
ivand_qmul@125
|
758 // the videooffset makes sure we are always in sync with the audio
|
ivand_qmul@125
|
759 // it is actually the difference between the position were we are in the
|
ivand_qmul@125
|
760 // stream (GetPosition) and were we should be (pts)
|
ivand_qmul@125
|
761 // we use the same offset when selecting the current videoframe
|
ivand_qmul@125
|
762 file->videoOffset = SDL_ffmpegGetPosition(file) - file->as[file->audioStream]->pts;
|
ivand_qmul@125
|
763
|
ivand_qmul@125
|
764 // we calculate the new pts for our audiodata based on the hardPts
|
ivand_qmul@125
|
765 // (that is the one we got from ffmpeg) and than calculating how for we
|
ivand_qmul@125
|
766 // have come since
|
ivand_qmul@125
|
767 file->as[file->audioStream]->pts = file->as[file->audioStream]->hardPts;
|
ivand_qmul@125
|
768 // since we use 16bit per sample, we devide totalbytes by 2 before deviding by samplerate
|
ivand_qmul@125
|
769 file->as[file->audioStream]->pts += ((double)file->as[file->audioStream]->totalBytes / (2 * file->as[file->audioStream]->channels)) / (file->as[file->audioStream]->sampleRate / 1000.0);
|
ivand_qmul@125
|
770
|
ivand_qmul@125
|
771 // we return the audiobuffer, notice we are still in the audiosemaphore!
|
ivand_qmul@125
|
772 // we only leave this by calling SDL_ffmpegReleaseAudio
|
ivand_qmul@125
|
773 return file->as[file->audioStream]->audio;
|
ivand_qmul@125
|
774 }
|
ivand_qmul@125
|
775
|
ivand_qmul@125
|
776 int SDL_ffmpegReleaseAudio(SDL_ffmpegFile *file, int len) {
|
ivand_qmul@125
|
777
|
ivand_qmul@125
|
778 // no audio, means no releasing
|
ivand_qmul@125
|
779 if( !SDL_ffmpegValidAudio(file) || file->skipAudio) return -1;
|
ivand_qmul@125
|
780
|
ivand_qmul@125
|
781 // this call should be paired with SDL_ffmpegGetAudio, as it provides us
|
ivand_qmul@125
|
782 // with the correct length so we move the correct amount of data
|
ivand_qmul@125
|
783 memmove( file->as[file->audioStream]->audio,
|
ivand_qmul@125
|
784 file->as[file->audioStream]->audio+len,
|
ivand_qmul@125
|
785 file->as[file->audioStream]->size );
|
ivand_qmul@125
|
786
|
ivand_qmul@125
|
787 // work on audiodata is done, so we release the semaphore
|
ivand_qmul@125
|
788 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
789
|
ivand_qmul@125
|
790 return 0;
|
ivand_qmul@125
|
791 }
|
ivand_qmul@125
|
792
|
ivand_qmul@125
|
793 int64_t SDL_ffmpegGetPosition(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
794 //MainWindow * MWinsA=MainWindow::instance();
|
ivand_qmul@125
|
795
|
ivand_qmul@125
|
796 if (file->skipAudio){
|
ivand_qmul@125
|
797 return (av_gettime()/1000+ file->offset - file->startTime);
|
ivand_qmul@125
|
798 //int64_t pos=MWinsA->Get_CurAudioTime();
|
ivand_qmul@125
|
799 //return (pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000);//SDL_GetTicks();
|
ivand_qmul@125
|
800 }
|
ivand_qmul@125
|
801 else
|
ivand_qmul@125
|
802 return (av_gettime()/1000+ file->offset - file->startTime);//SDL_GetTicks();
|
ivand_qmul@125
|
803 // return the current playposition of our file
|
ivand_qmul@125
|
804
|
ivand_qmul@125
|
805 }
|
ivand_qmul@125
|
806
|
ivand_qmul@125
|
807 SDL_AudioSpec* SDL_ffmpegGetAudioSpec(SDL_ffmpegFile *file, int samples, void *callback) {
|
ivand_qmul@125
|
808
|
ivand_qmul@125
|
809 // create audio spec
|
ivand_qmul@125
|
810 SDL_AudioSpec *spec = (SDL_AudioSpec*)malloc( sizeof(SDL_AudioSpec) );
|
ivand_qmul@125
|
811
|
ivand_qmul@125
|
812 if(spec) {
|
ivand_qmul@125
|
813 spec->format = AUDIO_S16SYS;
|
ivand_qmul@125
|
814 spec->samples = samples;
|
ivand_qmul@125
|
815 spec->userdata = file;
|
ivand_qmul@125
|
816 spec->callback = (void (__cdecl *)(void *,Uint8 *,int))(callback);
|
ivand_qmul@125
|
817 spec->freq = 48000;
|
ivand_qmul@125
|
818 spec->channels = 2;
|
ivand_qmul@125
|
819
|
ivand_qmul@125
|
820 // if we have a valid audiofile, we can use its data to create a
|
ivand_qmul@125
|
821 // more appropriate audio spec
|
ivand_qmul@125
|
822 if( SDL_ffmpegValidAudio(file) && !file->skipAudio ) {
|
ivand_qmul@125
|
823 spec->freq = file->as[file->audioStream]->sampleRate;
|
ivand_qmul@125
|
824 spec->channels = file->as[file->audioStream]->channels;
|
ivand_qmul@125
|
825 }
|
ivand_qmul@125
|
826 }
|
ivand_qmul@125
|
827
|
ivand_qmul@125
|
828 return spec;
|
ivand_qmul@125
|
829 }
|
ivand_qmul@125
|
830
|
ivand_qmul@125
|
831 int64_t SDL_ffmpegGetDuration(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
832
|
ivand_qmul@125
|
833 // returns the duration of the entire file, please note that ffmpeg doesn't
|
ivand_qmul@125
|
834 // always get this value right! so don't bet your life on it...
|
ivand_qmul@125
|
835 return ((AVFormatContext*)file->_ffmpeg)->duration / (AV_TIME_BASE / 1000);
|
ivand_qmul@125
|
836 }
|
ivand_qmul@125
|
837
|
ivand_qmul@125
|
838 int SDL_ffmpegGetVideoSize(SDL_ffmpegFile *file, int *w, int *h) {
|
ivand_qmul@125
|
839
|
ivand_qmul@125
|
840 if(!w || !h) return -1;
|
ivand_qmul@125
|
841
|
ivand_qmul@125
|
842 // if we have a valid video file selected, we use it
|
ivand_qmul@125
|
843 // if not, we send default values and return.
|
ivand_qmul@125
|
844 // by checking the return value you can check if you got a valid size
|
ivand_qmul@125
|
845 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
|
ivand_qmul@125
|
846 *w = file->vs[file->videoStream]->width;
|
ivand_qmul@125
|
847 *h = file->vs[file->videoStream]->height;
|
ivand_qmul@125
|
848 return 0;
|
ivand_qmul@125
|
849 }
|
ivand_qmul@125
|
850
|
ivand_qmul@125
|
851 *w = 320;
|
ivand_qmul@125
|
852 *h = 240;
|
ivand_qmul@125
|
853 return -1;
|
ivand_qmul@125
|
854 }
|
ivand_qmul@125
|
855
|
ivand_qmul@125
|
856 int SDL_ffmpegValidAudio(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
857
|
ivand_qmul@125
|
858 // this function is used to check if we selected a valid audio stream
|
ivand_qmul@125
|
859 if(file->audioStream < 0 || file->audioStream >= file->AStreams) return 0;
|
ivand_qmul@125
|
860
|
ivand_qmul@125
|
861 return 1;
|
ivand_qmul@125
|
862 }
|
ivand_qmul@125
|
863
|
ivand_qmul@125
|
864 int SDL_ffmpegValidVideo(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
865
|
ivand_qmul@125
|
866 // this function is used to check if we selected a valid video stream
|
ivand_qmul@125
|
867 if(file->videoStream < 0 || file->videoStream >= file->VStreams) return 0;
|
ivand_qmul@125
|
868
|
ivand_qmul@125
|
869 return 1;
|
ivand_qmul@125
|
870 }
|
ivand_qmul@125
|
871
|
ivand_qmul@125
|
872 int SDL_ffmpegPause(SDL_ffmpegFile *file, int state) {
|
ivand_qmul@125
|
873
|
ivand_qmul@125
|
874 // by putting 0 into state, we play the file
|
ivand_qmul@125
|
875 // this behaviour is analogue to SDL audio
|
ivand_qmul@125
|
876 file->pause = state;
|
ivand_qmul@125
|
877
|
ivand_qmul@125
|
878 if(!file->pause) {
|
ivand_qmul@125
|
879 file->startTime = av_gettime()/1000;//SDL_GetTicks();
|
ivand_qmul@125
|
880 }
|
ivand_qmul@125
|
881
|
ivand_qmul@125
|
882 return 0;
|
ivand_qmul@125
|
883 }
|
ivand_qmul@125
|
884
|
ivand_qmul@125
|
885 int SDL_ffmpegGetState(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
886 return file->pause;
|
ivand_qmul@125
|
887 }
|