ivand_qmul@125
|
1 /*******************************************************************************
|
ivand_qmul@125
|
2 * *
|
ivand_qmul@125
|
3 * SDL_ffmpeg is a library for basic multimedia functionality. *
|
ivand_qmul@125
|
4 * SDL_ffmpeg is based on ffmpeg. *
|
ivand_qmul@125
|
5 * *
|
ivand_qmul@125
|
6 * Copyright (C) 2007 Arjan Houben *
|
ivand_qmul@125
|
7 * *
|
ivand_qmul@125
|
8 * SDL_ffmpeg is free software: you can redistribute it and/or modify *
|
ivand_qmul@125
|
9 * it under the terms of the GNU Lesser General Public License as published *
|
ivand_qmul@125
|
10 * by the Free Software Foundation, either version 3 of the License, or any *
|
ivand_qmul@125
|
11 * later version. *
|
ivand_qmul@125
|
12 * *
|
ivand_qmul@125
|
13 * This program is distributed in the hope that it will be useful, *
|
ivand_qmul@125
|
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
|
ivand_qmul@125
|
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
|
ivand_qmul@125
|
16 * GNU Lesser General Public License for more details. *
|
ivand_qmul@125
|
17 * *
|
ivand_qmul@125
|
18 * You should have received a copy of the GNU Lesser General Public License *
|
ivand_qmul@125
|
19 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
|
ivand_qmul@125
|
20 * *
|
ivand_qmul@125
|
21 *******************************************************************************/
|
ivand_qmul@125
|
22
|
ivand_qmul@125
|
23 #include <stdio.h>
|
ivand_qmul@125
|
24 #include <stdlib.h>
|
ivand_qmul@125
|
25 #ifdef __cplusplus
|
ivand_qmul@125
|
26 extern "C" {
|
ivand_qmul@125
|
27 #endif
|
ivand_qmul@125
|
28 #ifdef WIN32
|
ivand_qmul@125
|
29 #include "SDL_ffmpeg.h"
|
ivand_qmul@125
|
30 #include <SDL.h>
|
ivand_qmul@125
|
31 #include <SDL_thread.h>
|
ivand_qmul@129
|
32 #include <stdio.h>
|
ivand_qmul@129
|
33 #include <Windows.h>
|
ivand_qmul@125
|
34 #endif
|
ivand_qmul@125
|
35
|
ivand_qmul@125
|
36 #ifdef __unix__
|
ivand_qmul@125
|
37 #include <SDL/SDL.h>
|
ivand_qmul@125
|
38 #include <SDL/SDL_thread.h>
|
ivand_qmul@125
|
39 #endif
|
benoitrigolleau@130
|
40 #undef main
|
ivand_qmul@125
|
41 #ifdef __cplusplus
|
ivand_qmul@125
|
42 }
|
ivand_qmul@125
|
43 #endif
|
ivand_qmul@125
|
44 #include "../../sv/main/MainWindow.h"
|
ivand_qmul@129
|
45 #include <time.h>
|
ivand_qmul@125
|
46
|
ivand_qmul@125
|
47 //const int SDL_FFMPEG_MAX_BUFFERED_FRAMES = 25;
|
ivand_qmul@125
|
48 //const int SDL_FFMPEG_MAX_BUFFERED_SAMPLES = 512 * 512;
|
ivand_qmul@138
|
49
|
ivand_qmul@125
|
50 int FFMPEG_init_was_called = 0;
|
ivand_qmul@129
|
51 FILE *pFile, *tFile;
|
ivand_qmul@129
|
52 int64_t Time,Time1;
|
ivand_qmul@129
|
53 int64_t realt=0;
|
ivand_qmul@129
|
54
|
ivand_qmul@125
|
55 SDL_ffmpegFile* SDL_ffmpegCreateFile() {
|
ivand_qmul@125
|
56
|
ivand_qmul@125
|
57 // create SDL_ffmpegFile pointer
|
ivand_qmul@125
|
58 SDL_ffmpegFile *file = (SDL_ffmpegFile*)malloc( sizeof(SDL_ffmpegFile) );
|
ivand_qmul@125
|
59 if(!file) return 0;
|
ivand_qmul@125
|
60 file->_ffmpeg=av_alloc_format_context();//(AVFormatContext*)malloc(sizeof(AVFormatContext));
|
ivand_qmul@125
|
61 // create a semaphore for every file
|
ivand_qmul@125
|
62 file->decode = SDL_CreateSemaphore(1);
|
ivand_qmul@125
|
63
|
ivand_qmul@129
|
64 Time=0;
|
ivand_qmul@129
|
65 Time1=0;
|
ivand_qmul@129
|
66 fopen_s (&pFile,"myfile.txt","w");
|
ivand_qmul@129
|
67 fopen_s (&tFile,"Timestampfile.txt","w");
|
ivand_qmul@125
|
68 // allocate room for VStreams
|
ivand_qmul@125
|
69 file->vs = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
|
ivand_qmul@125
|
70 if(!file->vs) {
|
ivand_qmul@125
|
71 free( file );
|
ivand_qmul@125
|
72 return 0;
|
ivand_qmul@125
|
73 }
|
ivand_qmul@125
|
74
|
ivand_qmul@125
|
75 // allocate room for AStreams
|
ivand_qmul@125
|
76 file->as = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
|
ivand_qmul@125
|
77 if(!file->as) {
|
ivand_qmul@125
|
78 free( file );
|
ivand_qmul@125
|
79 return 0;
|
ivand_qmul@125
|
80 }
|
ivand_qmul@125
|
81
|
ivand_qmul@125
|
82 // initialize variables with standard values
|
ivand_qmul@125
|
83 file->audioStream = -1;
|
ivand_qmul@125
|
84 file->videoStream = -1;
|
ivand_qmul@125
|
85
|
ivand_qmul@125
|
86 file->offset = 0;
|
ivand_qmul@125
|
87 file->videoOffset = 0;
|
ivand_qmul@125
|
88 file->startTime = 0;
|
ivand_qmul@125
|
89
|
ivand_qmul@125
|
90 file->threadID = 0;
|
ivand_qmul@125
|
91
|
ivand_qmul@125
|
92 return file;
|
ivand_qmul@125
|
93 }
|
ivand_qmul@125
|
94
|
ivand_qmul@125
|
95 void SDL_ffmpegFree(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
96
|
ivand_qmul@125
|
97 SDL_ffmpegStopDecoding(file);
|
ivand_qmul@125
|
98
|
ivand_qmul@125
|
99 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
100
|
ivand_qmul@125
|
101 free(file);
|
ivand_qmul@125
|
102 }
|
ivand_qmul@125
|
103
|
ivand_qmul@125
|
104 SDL_ffmpegFile* SDL_ffmpegOpen(const char* filename) {
|
ivand_qmul@125
|
105
|
ivand_qmul@125
|
106
|
ivand_qmul@125
|
107 // register all codecs
|
ivand_qmul@125
|
108 if(!FFMPEG_init_was_called) {
|
ivand_qmul@125
|
109 FFMPEG_init_was_called = 1;
|
ivand_qmul@125
|
110 av_register_all();
|
ivand_qmul@125
|
111 }
|
ivand_qmul@125
|
112
|
ivand_qmul@125
|
113 // open new ffmpegFile
|
ivand_qmul@125
|
114 SDL_ffmpegFile *file = SDL_ffmpegCreateFile();
|
ivand_qmul@125
|
115 if(!file) return 0;
|
ivand_qmul@125
|
116
|
ivand_qmul@125
|
117 // information about format is stored in file->_ffmpeg
|
ivand_qmul@125
|
118
|
ivand_qmul@125
|
119 // open the file
|
ivand_qmul@125
|
120 if(av_open_input_file( (AVFormatContext**)&file->_ffmpeg, filename, 0, 0, 0) != 0) {
|
ivand_qmul@125
|
121 fprintf(stderr, "could not open \"%s\"\n", filename);
|
ivand_qmul@125
|
122 free(file);
|
ivand_qmul@125
|
123 return 0;
|
ivand_qmul@125
|
124 }
|
ivand_qmul@125
|
125
|
ivand_qmul@125
|
126 // retrieve format information
|
ivand_qmul@125
|
127 if(av_find_stream_info((AVFormatContext *)(file->_ffmpeg)) < 0) {
|
ivand_qmul@125
|
128 fprintf(stderr, "could not retrieve video stream info");
|
ivand_qmul@125
|
129 free(file);
|
ivand_qmul@125
|
130 return 0;
|
ivand_qmul@125
|
131 }
|
ivand_qmul@125
|
132
|
ivand_qmul@125
|
133 // dump info to logfile
|
ivand_qmul@125
|
134 // dump_format(file->_ffmpeg, 0, filename, 0);
|
ivand_qmul@125
|
135
|
ivand_qmul@125
|
136 // find the streams in the file
|
ivand_qmul@125
|
137 file->VStreams = 0;
|
ivand_qmul@125
|
138 file->AStreams = 0;
|
ivand_qmul@125
|
139 file->threadActive = 0;
|
ivand_qmul@125
|
140
|
ivand_qmul@125
|
141 // iterate through all the streams and store audio/video streams
|
ivand_qmul@125
|
142 size_t i;
|
ivand_qmul@125
|
143 for(i=0; i<((AVFormatContext*)file->_ffmpeg)->nb_streams; i++) {
|
ivand_qmul@125
|
144
|
ivand_qmul@125
|
145 if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
|
ivand_qmul@125
|
146
|
ivand_qmul@125
|
147 // if this is a packet of the correct type we create a new stream
|
ivand_qmul@125
|
148 SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );
|
ivand_qmul@125
|
149
|
ivand_qmul@125
|
150 if(stream) {
|
ivand_qmul@125
|
151 // we set our stream to zero
|
ivand_qmul@125
|
152 memset(stream, 0, sizeof(SDL_ffmpegStream));
|
ivand_qmul@125
|
153
|
ivand_qmul@125
|
154 // save unique streamid
|
ivand_qmul@125
|
155 stream->id = i;
|
ivand_qmul@125
|
156
|
ivand_qmul@125
|
157 // the timeBase is what we use to calculate from/to pts
|
ivand_qmul@125
|
158 stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;
|
ivand_qmul@125
|
159
|
ivand_qmul@125
|
160 // save width, height and pixFmt of our outputframes
|
ivand_qmul@125
|
161 stream->width = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->width;
|
ivand_qmul@125
|
162 stream->height = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->height;
|
ivand_qmul@125
|
163 stream->pixFmt = PIX_FMT_RGB24;
|
ivand_qmul@125
|
164
|
ivand_qmul@125
|
165 // _ffmpeg holds data about streamcodec
|
ivand_qmul@125
|
166 stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;
|
ivand_qmul@125
|
167
|
ivand_qmul@125
|
168 // get the correct decoder for this stream
|
ivand_qmul@125
|
169 AVCodec *codec = avcodec_find_decoder(((AVCodecContext*)stream->_ffmpeg)->codec_id);
|
ivand_qmul@125
|
170
|
ivand_qmul@125
|
171 if(!codec) {
|
ivand_qmul@125
|
172 free(stream);
|
ivand_qmul@125
|
173 fprintf(stderr, "could not find codec\n");
|
ivand_qmul@125
|
174 } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
|
ivand_qmul@125
|
175 free(stream);
|
ivand_qmul@125
|
176 fprintf(stderr, "could not open decoder\n");
|
ivand_qmul@125
|
177 } else {
|
ivand_qmul@125
|
178
|
ivand_qmul@125
|
179 // copy metadata from AVStream into our stream
|
ivand_qmul@125
|
180 stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
|
ivand_qmul@125
|
181 stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
|
ivand_qmul@125
|
182 memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
|
ivand_qmul@125
|
183 stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
|
ivand_qmul@125
|
184 stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
|
ivand_qmul@125
|
185 memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);
|
ivand_qmul@125
|
186
|
ivand_qmul@125
|
187 stream->audio = 0;
|
ivand_qmul@125
|
188 stream->size = 0;
|
ivand_qmul@125
|
189 stream->imageBuffer = (bufferImage**)calloc( SDL_FFMPEG_MAX_BUFFERED_FRAMES, sizeof(bufferImage*) );
|
ivand_qmul@129
|
190 stream->writeImage = 0;
|
ivand_qmul@129
|
191 stream->readImage = 0;
|
ivand_qmul@125
|
192 file->vs[file->VStreams] = stream;
|
ivand_qmul@125
|
193 file->VStreams++;
|
ivand_qmul@125
|
194
|
ivand_qmul@125
|
195 // create semaphore for thread-safe use
|
ivand_qmul@125
|
196 stream->sem = SDL_CreateSemaphore(1);
|
ivand_qmul@125
|
197 }
|
ivand_qmul@125
|
198 }
|
ivand_qmul@125
|
199 } else if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
|
ivand_qmul@125
|
200
|
ivand_qmul@125
|
201 // if this is a packet of the correct type we create a new stream
|
ivand_qmul@125
|
202 SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );
|
ivand_qmul@125
|
203
|
ivand_qmul@125
|
204 if(stream) {
|
ivand_qmul@125
|
205 // we set our stream to zero
|
ivand_qmul@125
|
206 memset(stream, 0, sizeof(SDL_ffmpegStream));
|
ivand_qmul@125
|
207
|
ivand_qmul@125
|
208 // save unique streamid
|
ivand_qmul@125
|
209 stream->id = i;
|
ivand_qmul@125
|
210
|
ivand_qmul@125
|
211 // the timeBase is what we use to calculate from/to pts
|
ivand_qmul@125
|
212 stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;
|
ivand_qmul@125
|
213
|
ivand_qmul@125
|
214 // _ffmpeg holds data about streamcodec
|
ivand_qmul@125
|
215 stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;
|
ivand_qmul@125
|
216
|
ivand_qmul@125
|
217 stream->width = 0;
|
ivand_qmul@125
|
218 stream->height = 0;
|
ivand_qmul@125
|
219 stream->pixFmt = PIX_FMT_RGB24;
|
ivand_qmul@125
|
220
|
ivand_qmul@125
|
221 // get the correct decoder for this stream
|
ivand_qmul@125
|
222 AVCodec *codec = avcodec_find_decoder(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_id);
|
ivand_qmul@125
|
223
|
ivand_qmul@125
|
224 if(!codec) {
|
ivand_qmul@125
|
225 free( stream );
|
ivand_qmul@125
|
226 fprintf(stderr, "could not find codec\n");
|
ivand_qmul@125
|
227 } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
|
ivand_qmul@125
|
228 free( stream );
|
ivand_qmul@125
|
229 fprintf(stderr, "could not open decoder\n");
|
ivand_qmul@125
|
230 } else {
|
ivand_qmul@125
|
231
|
ivand_qmul@125
|
232 // copy metadata from AVStream into our stream
|
ivand_qmul@125
|
233 stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
|
ivand_qmul@125
|
234 stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
|
ivand_qmul@125
|
235 memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
|
ivand_qmul@125
|
236 stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
|
ivand_qmul@125
|
237 stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
|
ivand_qmul@125
|
238 memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);
|
ivand_qmul@125
|
239
|
ivand_qmul@125
|
240 stream->audio = (int8_t*)malloc( sizeof(int8_t) * SDL_FFMPEG_MAX_BUFFERED_SAMPLES );
|
ivand_qmul@125
|
241 stream->size = 0;
|
ivand_qmul@125
|
242 stream->imageBuffer = 0;
|
ivand_qmul@125
|
243
|
ivand_qmul@125
|
244 file->as[file->AStreams] = stream;
|
ivand_qmul@125
|
245 file->AStreams++;
|
ivand_qmul@125
|
246
|
ivand_qmul@125
|
247 // create semaphore for thread-safe use
|
ivand_qmul@125
|
248 stream->sem = SDL_CreateSemaphore(1);
|
ivand_qmul@125
|
249 }
|
ivand_qmul@125
|
250 }
|
ivand_qmul@125
|
251 }
|
ivand_qmul@125
|
252 }
|
ivand_qmul@125
|
253
|
ivand_qmul@125
|
254 return file;
|
ivand_qmul@125
|
255 }
|
ivand_qmul@125
|
256
|
ivand_qmul@125
|
257 SDL_Surface* SDL_ffmpegGetVideo(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
258
|
ivand_qmul@125
|
259 MainWindow * MWinsA=MainWindow::instance();
|
ivand_qmul@125
|
260 if( !SDL_ffmpegValidVideo(file) || file->pause || file->skipVideo) return 0;
|
ivand_qmul@125
|
261
|
ivand_qmul@125
|
262 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
263
|
ivand_qmul@125
|
264 bufferImage *option = 0;
|
ivand_qmul@129
|
265 //int i;
|
ivand_qmul@129
|
266 float ratio;
|
ivand_qmul@129
|
267 int64_t pos,pos1, pos2, timestamp;
|
ivand_qmul@129
|
268 //for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
|
ivand_qmul@129
|
269 pos=MWinsA->Get_CurAudioTime();
|
ivand_qmul@125
|
270
|
ivand_qmul@129
|
271 fprintf (pFile, "p: \t %u\t", pos);
|
ivand_qmul@129
|
272 //if (MWinsA->Get_HardwareBufferTime()==0)
|
ivand_qmul@129
|
273 // pos1=0;
|
ivand_qmul@129
|
274 //else {
|
ivand_qmul@129
|
275 // pos1=MWinsA->Get_HardwareBufferTime();
|
ivand_qmul@129
|
276 // //fprintf (tFile, "%u\t", pos1);
|
ivand_qmul@129
|
277 // int64_t timeTemp;
|
ivand_qmul@129
|
278 // QueryPerformanceCounter((LARGE_INTEGER *)(&timeTemp));
|
ivand_qmul@129
|
279 //
|
ivand_qmul@129
|
280 // pos1=(timeTemp-pos1)/(file->countFreq*hopfactor);
|
ivand_qmul@129
|
281 // fprintf (pFile, "%u\t", pos1);
|
ivand_qmul@129
|
282 //}
|
ivand_qmul@129
|
283 //pos2=pos+pos1;
|
ivand_qmul@129
|
284 fprintf (pFile, "%u\n", pos);
|
ivand_qmul@129
|
285
|
ivand_qmul@129
|
286 // if this entry does not exist, continue
|
ivand_qmul@129
|
287 while(((file->vs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage)>0)&&(file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp <= pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000))//&& (file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp >= pos - file->timebase+ ((AVFormatContext*)file->_ffmpeg)->start_time/1000))
|
ivand_qmul@129
|
288 {
|
ivand_qmul@129
|
289 //pos=MWinsA->Get_CurAudioTime();
|
ivand_qmul@129
|
290 //timestamp=file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp;
|
ivand_qmul@129
|
291 //fprintf (tFile, "try: %d %d\n", (pos+ ((AVFormatContext*)file->_ffmpeg)->start_time/1000), timestamp);
|
ivand_qmul@129
|
292 // do we have an image that should have been shown?
|
ivand_qmul@129
|
293 //if(file->vs[file->videoStream]->imageBuffer[mod(file->vs[file->videoStream]->readImage,SDL_FFMPEG_MAX_BUFFERED_FRAMES)]->timestamp <= pos + (file->vs[file->videoStream]->timeBase)/4+((AVFormatContext*)file->_ffmpeg)->start_time/1000) {
|
ivand_qmul@129
|
294
|
ivand_qmul@129
|
295 // if this is the first option we find, we simply save it
|
ivand_qmul@129
|
296 if(!option) {
|
ivand_qmul@125
|
297
|
ivand_qmul@129
|
298 option = file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES];
|
ivand_qmul@125
|
299
|
ivand_qmul@129
|
300 // set to 0 so we know this position in the buffer is available again
|
ivand_qmul@129
|
301 file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = 0;
|
ivand_qmul@129
|
302 file->vs[file->videoStream]->readImage++;
|
ivand_qmul@125
|
303
|
ivand_qmul@129
|
304 } else {
|
ivand_qmul@125
|
305
|
ivand_qmul@129
|
306 // we found a newer possible timestamp, we delete the older one
|
ivand_qmul@129
|
307 if( option->timestamp < file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp) {
|
ivand_qmul@125
|
308
|
ivand_qmul@129
|
309 // this image is too old, we discard it
|
ivand_qmul@129
|
310 SDL_FreeSurface( option->img );
|
ivand_qmul@125
|
311
|
ivand_qmul@129
|
312 // free old option
|
ivand_qmul@129
|
313 free( option );
|
ivand_qmul@125
|
314
|
ivand_qmul@129
|
315 // new pointer to position in container
|
ivand_qmul@129
|
316 option = file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES];
|
ivand_qmul@125
|
317
|
ivand_qmul@129
|
318 // set to 0 so we know this position in the buffer is available again
|
ivand_qmul@129
|
319 file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = 0;
|
ivand_qmul@129
|
320 file->vs[file->videoStream]->readImage++;
|
ivand_qmul@129
|
321 }
|
ivand_qmul@129
|
322 else {
|
ivand_qmul@129
|
323 file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]=0;
|
ivand_qmul@129
|
324 file->vs[file->videoStream]->readImage++;
|
ivand_qmul@129
|
325 }
|
ivand_qmul@129
|
326 }
|
ivand_qmul@125
|
327
|
ivand_qmul@129
|
328
|
ivand_qmul@129
|
329 pos=MWinsA->Get_CurAudioTime();
|
ivand_qmul@129
|
330 fprintf (pFile, "e:\t%u\t", pos);
|
ivand_qmul@129
|
331 //if (MWinsA->Get_HardwareBufferTime()==0)
|
ivand_qmul@129
|
332 // pos1=0;
|
ivand_qmul@129
|
333 //else {
|
ivand_qmul@129
|
334 // pos1=MWinsA->Get_HardwareBufferTime();
|
ivand_qmul@129
|
335 // //fprintf (tFile, "%u\t", pos1);
|
ivand_qmul@129
|
336 // int64_t timeTemp;
|
ivand_qmul@129
|
337 // QueryPerformanceCounter((LARGE_INTEGER *)(&timeTemp));
|
ivand_qmul@129
|
338
|
ivand_qmul@129
|
339 // pos1=(timeTemp-pos1)/(file->countFreq*hopfactor);
|
ivand_qmul@129
|
340 // fprintf (pFile, "%u\t", pos1);
|
ivand_qmul@129
|
341 //}
|
ivand_qmul@129
|
342 //fprintf (pFile, "%u\n", pos2);
|
ivand_qmul@129
|
343 //pos2=pos+pos1;
|
ivand_qmul@129
|
344 //if (pos<pos2) pos=pos2;
|
ivand_qmul@129
|
345 }
|
ivand_qmul@129
|
346 //}
|
ivand_qmul@129
|
347 //}
|
ivand_qmul@129
|
348 int x=file->vs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage;
|
ivand_qmul@125
|
349 // if we did not found an option, we exit
|
ivand_qmul@125
|
350 if(!option) {
|
ivand_qmul@125
|
351 // release the lock
|
ivand_qmul@129
|
352 /*timestamp=0;
|
ivand_qmul@129
|
353 int64_t tt=av_gettime()/1000-file->timer;
|
ivand_qmul@129
|
354 file->timer=av_gettime()/1000;
|
ivand_qmul@129
|
355 realt+=tt;
|
ivand_qmul@129
|
356 fprintf (tFile, "%u\t", realt);
|
ivand_qmul@129
|
357 fprintf (tFile, "%u\t", tt);
|
ivand_qmul@129
|
358 fprintf (tFile, "%u\t", pos);
|
ivand_qmul@129
|
359 fprintf (tFile, "%u\n", timestamp);*/
|
ivand_qmul@125
|
360 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
361 return 0;
|
ivand_qmul@125
|
362 }
|
ivand_qmul@129
|
363 int64_t tt;
|
ivand_qmul@129
|
364 QueryPerformanceCounter((LARGE_INTEGER *)(&tt));
|
ivand_qmul@129
|
365 tt=tt/(file->countFreq)-file->timer;
|
ivand_qmul@129
|
366
|
ivand_qmul@129
|
367 QueryPerformanceCounter((LARGE_INTEGER *)(&file->timer));
|
ivand_qmul@129
|
368 file->timer=file->timer/(file->countFreq);
|
ivand_qmul@129
|
369 realt+=tt;
|
ivand_qmul@129
|
370 fprintf (tFile, "%u\t", x);
|
ivand_qmul@129
|
371 fprintf (tFile, "%u\t", realt);
|
ivand_qmul@129
|
372 fprintf (tFile, "%u\t", tt);
|
ivand_qmul@138
|
373 timestamp=(pos-option->timestamp+((AVFormatContext*)file->_ffmpeg)->start_time/1000)/MWinsA->getPlaySpeedVal();
|
ivand_qmul@129
|
374 fprintf (tFile, "%u\t", pos);//+ (file->vs[file->videoStream]->timeBase)/4+((AVFormatContext*)file->_ffmpeg)->start_time/1000);
|
ivand_qmul@129
|
375 fprintf (tFile, "%d\n", timestamp);
|
ivand_qmul@125
|
376 // we did found an option, so we return the imagedata
|
ivand_qmul@125
|
377 return option->img;
|
ivand_qmul@125
|
378 }
|
ivand_qmul@125
|
379
|
ivand_qmul@125
|
380 int SDL_ffmpegReleaseVideo(SDL_ffmpegFile *file, SDL_Surface *bmp) {
|
ivand_qmul@125
|
381
|
ivand_qmul@125
|
382 // if there was no valid video stream, we should not release
|
ivand_qmul@125
|
383 if( !SDL_ffmpegValidVideo(file) || file->skipVideo) return -1;
|
ivand_qmul@125
|
384
|
ivand_qmul@125
|
385 // free surface
|
ivand_qmul@125
|
386 SDL_FreeSurface(bmp);
|
ivand_qmul@125
|
387
|
ivand_qmul@125
|
388 // release semaphore if needed
|
ivand_qmul@125
|
389 if( !SDL_SemValue(file->vs[file->videoStream]->sem) ) {
|
ivand_qmul@125
|
390 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
391 }
|
ivand_qmul@125
|
392
|
ivand_qmul@125
|
393 return 0;
|
ivand_qmul@125
|
394 }
|
ivand_qmul@125
|
395
|
ivand_qmul@125
|
396 SDL_ffmpegStream* SDL_ffmpegGetAudioStream(SDL_ffmpegFile *file, int audioID) {
|
ivand_qmul@125
|
397
|
ivand_qmul@125
|
398 // check if we have any audiostreams
|
ivand_qmul@125
|
399 if(!file->AStreams) return 0;
|
ivand_qmul@125
|
400
|
ivand_qmul@125
|
401 // check if the requested id is possible
|
ivand_qmul@125
|
402 if(audioID >= file->AStreams) return 0;
|
ivand_qmul@125
|
403
|
ivand_qmul@125
|
404 // return ausiostream linked to audioID
|
ivand_qmul@125
|
405 return file->as[audioID];
|
ivand_qmul@125
|
406 }
|
ivand_qmul@125
|
407
|
ivand_qmul@125
|
408 int SDL_ffmpegSelectAudioStream(SDL_ffmpegFile* file, int audioID) {
|
ivand_qmul@125
|
409
|
ivand_qmul@125
|
410 // check if we have any audiostreams
|
ivand_qmul@125
|
411 if(!file->AStreams) return -1;
|
ivand_qmul@125
|
412
|
ivand_qmul@125
|
413 // check if the requested id is possible
|
ivand_qmul@125
|
414 if(audioID >= file->AStreams) return -1;
|
ivand_qmul@125
|
415
|
ivand_qmul@125
|
416 // set current audiostream to stream linked to audioID
|
ivand_qmul@125
|
417 file->audioStream = audioID;
|
ivand_qmul@125
|
418
|
ivand_qmul@125
|
419 return 0;
|
ivand_qmul@125
|
420 }
|
ivand_qmul@125
|
421
|
ivand_qmul@125
|
422 SDL_ffmpegStream* SDL_ffmpegGetVideoStream(SDL_ffmpegFile *file, int videoID) {
|
ivand_qmul@125
|
423
|
ivand_qmul@125
|
424 // check if we have any videostreams
|
ivand_qmul@125
|
425 if(!file->VStreams) return 0;
|
ivand_qmul@125
|
426
|
ivand_qmul@125
|
427 // check if the requested id is possible
|
ivand_qmul@125
|
428 if(videoID >= file->VStreams) return 0;
|
ivand_qmul@125
|
429
|
ivand_qmul@125
|
430 // return ausiostream linked to videoID
|
ivand_qmul@125
|
431 return file->vs[videoID];
|
ivand_qmul@125
|
432 }
|
ivand_qmul@125
|
433
|
ivand_qmul@125
|
434 int SDL_ffmpegSelectVideoStream(SDL_ffmpegFile* file, int videoID) {
|
ivand_qmul@125
|
435
|
ivand_qmul@125
|
436 // check if we have any videostreams
|
ivand_qmul@125
|
437 if(!file->VStreams) return -1;
|
ivand_qmul@125
|
438
|
ivand_qmul@125
|
439 // check if the requested id is possible
|
ivand_qmul@125
|
440 if(videoID >= file->VStreams) return -1;
|
ivand_qmul@125
|
441
|
ivand_qmul@125
|
442 // set current videostream to stream linked to videoID
|
ivand_qmul@125
|
443 file->videoStream = videoID;
|
ivand_qmul@125
|
444
|
ivand_qmul@125
|
445 return 0;
|
ivand_qmul@125
|
446 }
|
ivand_qmul@125
|
447
|
ivand_qmul@125
|
448 int SDL_ffmpegStartDecoding(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
449
|
ivand_qmul@125
|
450 // start a thread that continues to fill audio/video buffers
|
ivand_qmul@125
|
451 if(!file->threadID) file->threadID = SDL_CreateThread(SDL_ffmpegDecodeThread, file);
|
ivand_qmul@125
|
452
|
ivand_qmul@125
|
453 return 0;
|
ivand_qmul@125
|
454 }
|
ivand_qmul@125
|
455
|
ivand_qmul@125
|
456 int SDL_ffmpegStopDecoding(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
457
|
ivand_qmul@125
|
458 // stop decode thread
|
ivand_qmul@125
|
459 file->threadActive = 0;
|
ivand_qmul@125
|
460 if(file->threadID) SDL_WaitThread(file->threadID, 0);
|
ivand_qmul@125
|
461
|
ivand_qmul@125
|
462 // set threadID to zero, so we can check for concurrent threads
|
ivand_qmul@125
|
463 file->threadID = 0;
|
ivand_qmul@125
|
464
|
ivand_qmul@125
|
465 return -1;
|
ivand_qmul@125
|
466 }
|
ivand_qmul@125
|
467
|
ivand_qmul@125
|
468 int SDL_ffmpegDecodeThread(void* data) {
|
ivand_qmul@125
|
469 static struct SwsContext *img_convert_ctx;
|
ivand_qmul@125
|
470 // unpack the void pointer
|
ivand_qmul@125
|
471 SDL_ffmpegFile* file = (SDL_ffmpegFile*)data;
|
ivand_qmul@125
|
472
|
ivand_qmul@125
|
473 // flag this thread as active, used for stopping
|
ivand_qmul@125
|
474 file->threadActive = 1;
|
ivand_qmul@125
|
475
|
ivand_qmul@125
|
476 // create a packet for our data
|
ivand_qmul@125
|
477 AVPacket pack;
|
ivand_qmul@125
|
478
|
ivand_qmul@125
|
479 // reserve some pointers for use in loop
|
ivand_qmul@125
|
480 AVFrame *inFrame, *inFrameRGB;
|
ivand_qmul@125
|
481
|
ivand_qmul@125
|
482 // allocate a frame
|
ivand_qmul@125
|
483 inFrame = avcodec_alloc_frame();
|
ivand_qmul@125
|
484
|
ivand_qmul@125
|
485 // allocate another frame for unknown->RGB conversion
|
ivand_qmul@125
|
486 inFrameRGB = avcodec_alloc_frame();
|
ivand_qmul@125
|
487
|
ivand_qmul@125
|
488 if(SDL_ffmpegValidVideo(file)) {
|
ivand_qmul@125
|
489 // allocate buffer
|
ivand_qmul@125
|
490 uint8_t *inVideoBuffer = (uint8_t*)malloc( avpicture_get_size(file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
491 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
492 file->vs[file->videoStream]->height) );
|
ivand_qmul@125
|
493
|
ivand_qmul@125
|
494 // put buffer into our reserved frame
|
ivand_qmul@125
|
495 avpicture_fill( (AVPicture*)inFrameRGB,
|
ivand_qmul@125
|
496 inVideoBuffer,
|
ivand_qmul@125
|
497 file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
498 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
499 file->vs[file->videoStream]->height);
|
ivand_qmul@125
|
500 }
|
ivand_qmul@125
|
501
|
ivand_qmul@125
|
502 // allocate temporary audiobuffer
|
ivand_qmul@125
|
503 int16_t *samples = (int16_t*)malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE );
|
ivand_qmul@125
|
504
|
ivand_qmul@125
|
505 // reserve integer for use in loop
|
ivand_qmul@125
|
506 int got_frame;
|
ivand_qmul@125
|
507
|
ivand_qmul@125
|
508 while(file->threadActive) {
|
ivand_qmul@125
|
509
|
ivand_qmul@125
|
510 // read a packet from the file
|
ivand_qmul@125
|
511 if(av_read_frame((AVFormatContext *)(file->_ffmpeg), &pack) < 0) {
|
ivand_qmul@125
|
512 // thread is idle
|
ivand_qmul@125
|
513 SDL_Delay(10);
|
ivand_qmul@125
|
514 continue;
|
ivand_qmul@125
|
515 }
|
ivand_qmul@125
|
516 if (file->skipAudio && pack.stream_index == file->as[file->audioStream]->id){
|
ivand_qmul@125
|
517 SDL_Delay(1);
|
ivand_qmul@125
|
518 continue;
|
ivand_qmul@125
|
519 }
|
ivand_qmul@125
|
520
|
ivand_qmul@125
|
521 // we got a packet, lets handle it
|
ivand_qmul@125
|
522
|
ivand_qmul@125
|
523 // let's start by entering the video semaphore
|
ivand_qmul@125
|
524 SDL_SemWait(file->decode);
|
ivand_qmul@125
|
525
|
ivand_qmul@125
|
526 // If it's a audio packet from our stream...
|
ivand_qmul@125
|
527 if( SDL_ffmpegValidAudio(file) && pack.stream_index == file->as[file->audioStream]->id && !file->skipAudio) {
|
ivand_qmul@125
|
528
|
ivand_qmul@125
|
529 uint8_t *data = pack.data;
|
ivand_qmul@125
|
530 int size = pack.size;
|
ivand_qmul@125
|
531 int len;
|
ivand_qmul@125
|
532
|
ivand_qmul@125
|
533 while(size > 0 && file->threadActive) {
|
ivand_qmul@125
|
534
|
ivand_qmul@125
|
535 // Decode the packet
|
ivand_qmul@125
|
536 len = avcodec_decode_audio((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg), samples, &got_frame, data, size);
|
ivand_qmul@125
|
537
|
ivand_qmul@125
|
538 // if error, we skip the frame
|
ivand_qmul@125
|
539 if(len < 0 || !got_frame) {
|
ivand_qmul@125
|
540 size = 0;
|
ivand_qmul@125
|
541 break;
|
ivand_qmul@125
|
542 }
|
ivand_qmul@125
|
543
|
ivand_qmul@125
|
544 // change pointers
|
ivand_qmul@125
|
545 data += got_frame;
|
ivand_qmul@125
|
546 size -= got_frame;
|
ivand_qmul@125
|
547
|
ivand_qmul@125
|
548 // if the audiobuffer is full, the thread waits
|
ivand_qmul@125
|
549 while( file->as[file->audioStream]->size + got_frame > SDL_FFMPEG_MAX_BUFFERED_SAMPLES &&
|
ivand_qmul@125
|
550 file->threadActive) {
|
ivand_qmul@125
|
551 SDL_Delay(5);
|
ivand_qmul@125
|
552 }
|
ivand_qmul@125
|
553
|
ivand_qmul@125
|
554 // write an audiopts
|
ivand_qmul@125
|
555 int64_t audiopts = pack.pts * file->as[file->audioStream]->timeBase;
|
ivand_qmul@125
|
556
|
ivand_qmul@125
|
557 // is the audioBuffer is empty
|
ivand_qmul@125
|
558 if(!file->as[file->audioStream]->size) {
|
ivand_qmul@125
|
559
|
ivand_qmul@125
|
560 // we set a new pts
|
ivand_qmul@125
|
561 file->as[file->audioStream]->hardPts = file->as[file->audioStream]->pts = audiopts;
|
ivand_qmul@125
|
562
|
ivand_qmul@125
|
563 // we set totalbytes to zero, as this represents the amount
|
ivand_qmul@125
|
564 // of bytes that were played since our last 'hardPts'
|
ivand_qmul@125
|
565 file->as[file->audioStream]->totalBytes = 0;
|
ivand_qmul@125
|
566 }
|
ivand_qmul@125
|
567
|
ivand_qmul@125
|
568 // no need to store old samples
|
ivand_qmul@125
|
569 if(audiopts >= SDL_ffmpegGetPosition(file)) {
|
ivand_qmul@125
|
570
|
ivand_qmul@125
|
571 // enter audio semaphore
|
ivand_qmul@125
|
572 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
573
|
ivand_qmul@125
|
574 // copy data from temporary buffer to streambuffer
|
ivand_qmul@125
|
575 memcpy(file->as[file->audioStream]->audio+file->as[file->audioStream]->size, samples, got_frame);
|
ivand_qmul@125
|
576
|
ivand_qmul@125
|
577 // set the new size of the audiobuffer
|
ivand_qmul@125
|
578 file->as[file->audioStream]->size += got_frame;
|
ivand_qmul@125
|
579
|
ivand_qmul@125
|
580 // we leave the audio semaphore
|
ivand_qmul@125
|
581 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
582 }
|
ivand_qmul@125
|
583 }
|
ivand_qmul@125
|
584 }
|
ivand_qmul@125
|
585
|
ivand_qmul@125
|
586 // If it's a video packet from our video stream...
|
ivand_qmul@125
|
587 if( SDL_ffmpegValidVideo(file) && pack.stream_index == file->vs[file->videoStream]->id && !file->skipVideo) {
|
ivand_qmul@125
|
588
|
ivand_qmul@125
|
589 got_frame = 0;
|
ivand_qmul@129
|
590 //Time1=av_gettime();
|
ivand_qmul@125
|
591 // Decode the packet
|
ivand_qmul@125
|
592 avcodec_decode_video((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg), inFrame, &got_frame, pack.data, pack.size);
|
ivand_qmul@125
|
593
|
ivand_qmul@125
|
594 if(got_frame) {
|
ivand_qmul@125
|
595
|
ivand_qmul@125
|
596 // create imagebuffer
|
ivand_qmul@125
|
597 bufferImage *buf = (bufferImage*)malloc( sizeof(bufferImage) );
|
ivand_qmul@125
|
598
|
ivand_qmul@125
|
599 // write timestamp into the buffer
|
ivand_qmul@125
|
600 buf->timestamp = file->vs[file->videoStream]->timeBase * pack.dts;
|
ivand_qmul@125
|
601
|
ivand_qmul@125
|
602 // usefull when dealing with B frames
|
ivand_qmul@125
|
603 if(pack.dts == AV_NOPTS_VALUE) {
|
ivand_qmul@125
|
604 // if we did not get a valid timestamp, we make one up based on the last
|
ivand_qmul@125
|
605 // valid timestamp + the duration of a frame
|
ivand_qmul@125
|
606 buf->timestamp = file->vs[file->videoStream]->lastTimeStamp + file->vs[file->videoStream]->timeBase;
|
ivand_qmul@125
|
607 }
|
ivand_qmul@125
|
608
|
ivand_qmul@125
|
609 // if new timestamp is from future, we proceed
|
ivand_qmul@125
|
610 // if(buf->timestamp >= SDL_ffmpegGetPosition(file))
|
ivand_qmul@125
|
611 // {
|
ivand_qmul@125
|
612 if (img_convert_ctx == NULL) {
|
ivand_qmul@125
|
613 img_convert_ctx = sws_getContext(file->vs[file->videoStream]->width, file->vs[file->videoStream]->height,
|
ivand_qmul@125
|
614 ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
|
ivand_qmul@125
|
615 file->vs[file->videoStream]->width, file->vs[file->videoStream]->height,
|
ivand_qmul@125
|
616 file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
617 sws_flags, NULL, NULL, NULL);
|
ivand_qmul@125
|
618 if (img_convert_ctx == NULL) {
|
ivand_qmul@125
|
619 fprintf(stderr, "Cannot initialize the conversion context\n");
|
ivand_qmul@125
|
620 exit(1);
|
ivand_qmul@125
|
621 }
|
ivand_qmul@125
|
622 }
|
ivand_qmul@125
|
623
|
ivand_qmul@125
|
624 sws_scale(img_convert_ctx, ((AVPicture*)inFrame)->data, ((AVPicture*)inFrame)->linesize,
|
ivand_qmul@125
|
625 0, file->vs[file->videoStream]->height, ((AVPicture*)inFrameRGB)->data, ((AVPicture*)inFrameRGB)->linesize);
|
ivand_qmul@125
|
626
|
ivand_qmul@125
|
627 // we convert whatever type of data we got to RGB24
|
ivand_qmul@125
|
628 /* img_convert((AVPicture*)inFrameRGB,
|
ivand_qmul@125
|
629 file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
630 (AVPicture*)inFrame,
|
ivand_qmul@125
|
631 ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
|
ivand_qmul@125
|
632 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
633 file->vs[file->videoStream]->height);
|
ivand_qmul@125
|
634 */
|
ivand_qmul@125
|
635 // allocate image room
|
ivand_qmul@125
|
636 buf->img = SDL_CreateRGBSurface(SDL_SWSURFACE,
|
ivand_qmul@125
|
637 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
638 file->vs[file->videoStream]->height,
|
ivand_qmul@125
|
639 24, 0x0000FF, 0x00FF00, 0xFF0000, 0);
|
ivand_qmul@125
|
640
|
ivand_qmul@125
|
641 // copy image data to image room
|
ivand_qmul@125
|
642 memcpy(buf->img->pixels, inFrameRGB->data[0],
|
ivand_qmul@125
|
643 file->vs[file->videoStream]->width * file->vs[file->videoStream]->height * 3);
|
ivand_qmul@129
|
644 file->timebase=buf->timestamp-file->vs[file->videoStream]->lastTimeStamp;
|
ivand_qmul@125
|
645 // we write the lastTimestamp we got
|
ivand_qmul@125
|
646 file->vs[file->videoStream]->lastTimeStamp = buf->timestamp;
|
ivand_qmul@125
|
647
|
ivand_qmul@129
|
648 //int i;
|
ivand_qmul@125
|
649 int again = 1;
|
ivand_qmul@129
|
650 //Time=av_gettime()-Time1;
|
ivand_qmul@125
|
651
|
ivand_qmul@129
|
652 //fprintf (pFile, "%d \n",Time);
|
ivand_qmul@125
|
653 // keep trying to fit in buffer, until the data was actually placed in the buffer
|
ivand_qmul@125
|
654 while(again && file->threadActive) {
|
ivand_qmul@125
|
655
|
ivand_qmul@125
|
656 // we enter the video semaphore
|
ivand_qmul@125
|
657 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
658
|
ivand_qmul@125
|
659 // loop through all positions in buffer until an empty
|
ivand_qmul@125
|
660 // space was found
|
ivand_qmul@129
|
661 //for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
|
ivand_qmul@125
|
662 // if this place in the buffer is empty we write our new frame
|
ivand_qmul@129
|
663 if((file->vs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage) < SDL_FFMPEG_MAX_BUFFERED_FRAMES) {
|
ivand_qmul@129
|
664 file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->writeImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = buf;
|
ivand_qmul@129
|
665 file->vs[file->videoStream]->writeImage++;
|
ivand_qmul@125
|
666 // we placed our image in the buffer, moving on
|
ivand_qmul@125
|
667 again = 0;
|
ivand_qmul@129
|
668
|
ivand_qmul@125
|
669 }
|
ivand_qmul@129
|
670 //}
|
ivand_qmul@125
|
671
|
ivand_qmul@125
|
672 // we leave the video semaphore
|
ivand_qmul@125
|
673 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@129
|
674
|
ivand_qmul@125
|
675 // frames aren't being release every ms, so we can take some
|
ivand_qmul@125
|
676 // time before we try and fit our new image again
|
ivand_qmul@129
|
677 if(again)
|
ivand_qmul@129
|
678 {
|
ivand_qmul@129
|
679 SDL_SemPost(file->decode);
|
ivand_qmul@129
|
680 SDL_Delay(3);
|
ivand_qmul@129
|
681 SDL_SemWait(file->decode);
|
ivand_qmul@129
|
682 }
|
ivand_qmul@125
|
683 }
|
ivand_qmul@125
|
684 // }
|
ivand_qmul@125
|
685 //else {
|
ivand_qmul@125
|
686 // // if our decoded frame was too old, we don't bother putting
|
ivand_qmul@125
|
687 // // it in our buffer
|
ivand_qmul@125
|
688 // free( buf );
|
ivand_qmul@125
|
689 // }
|
ivand_qmul@125
|
690 }
|
ivand_qmul@125
|
691 }
|
ivand_qmul@125
|
692 // we leave the decode semaphore
|
ivand_qmul@125
|
693 SDL_SemPost(file->decode);
|
ivand_qmul@125
|
694 if ((file->skipAudio)&&(file->delay))
|
ivand_qmul@125
|
695 SDL_Delay(3);
|
ivand_qmul@125
|
696 }
|
ivand_qmul@125
|
697 // if we stop this thread, we can release the packet we reserved
|
ivand_qmul@125
|
698 av_free_packet(&pack);
|
ivand_qmul@125
|
699
|
ivand_qmul@125
|
700 return 0;
|
ivand_qmul@125
|
701 }
|
ivand_qmul@125
|
702
|
ivand_qmul@125
|
703 int SDL_ffmpegSeek(SDL_ffmpegFile* file, int64_t timestamp) {
|
ivand_qmul@125
|
704
|
ivand_qmul@125
|
705 // if the seekposition is out of bounds, return
|
ivand_qmul@125
|
706 if(timestamp >= SDL_ffmpegGetDuration(file)) return -1;
|
ivand_qmul@125
|
707
|
ivand_qmul@125
|
708 // start by flushing the buffers
|
ivand_qmul@125
|
709 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
710
|
ivand_qmul@125
|
711 // we enter the decode semaphore so the decode thread cannot be working on
|
ivand_qmul@125
|
712 // data we are trying to flush
|
ivand_qmul@125
|
713 SDL_SemWait(file->decode);
|
ivand_qmul@125
|
714
|
ivand_qmul@125
|
715 // if the stream has an offset, add it to the start time
|
ivand_qmul@125
|
716 int64_t startOffset = 0;
|
ivand_qmul@125
|
717 if(((AVFormatContext*)file->_ffmpeg)->start_time != AV_NOPTS_VALUE) {
|
ivand_qmul@125
|
718 // inFormatCtx->start_time is in AV_TIME_BASE fractional seconds
|
ivand_qmul@125
|
719 startOffset = ((AVFormatContext*)file->_ffmpeg)->start_time;
|
ivand_qmul@125
|
720 }
|
ivand_qmul@125
|
721 //if (file->skipAudio) startOffset=0;
|
ivand_qmul@125
|
722 // calculate the final timestamp for the seek action this is in AV_TIME_BASE fractional seconds
|
ivand_qmul@125
|
723 startOffset += (timestamp * AV_TIME_BASE) / 1000;
|
ivand_qmul@125
|
724
|
ivand_qmul@125
|
725 // do the actual seeking, AVSEEK_FLAG_BACKWARD means we jump to the point
|
ivand_qmul@125
|
726 // closest to the point we want, resulting in an earlier position if the jump
|
ivand_qmul@125
|
727 // could not go the the exaxt point we wanted
|
ivand_qmul@125
|
728 if(av_seek_frame((AVFormatContext *)(file->_ffmpeg), -1, startOffset, AVSEEK_FLAG_BACKWARD|AVSEEK_FLAG_ANY) >= 0) {
|
ivand_qmul@125
|
729 SDL_Delay(5);
|
ivand_qmul@125
|
730 // set some values in our file so we now were to start playing
|
ivand_qmul@125
|
731 file->offset = timestamp;
|
ivand_qmul@125
|
732 file->startTime = av_gettime()/1000;//SDL_GetTicks();
|
ivand_qmul@125
|
733
|
ivand_qmul@125
|
734 // if we have a valid video, we probably have some data we want to flush
|
ivand_qmul@125
|
735 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
|
ivand_qmul@125
|
736
|
ivand_qmul@125
|
737 // flushing happens inside the semaphore as not to interfere with the
|
ivand_qmul@125
|
738 // decoding thread
|
ivand_qmul@125
|
739 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
740 avcodec_flush_buffers((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg));
|
ivand_qmul@125
|
741 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
742 }
|
ivand_qmul@125
|
743
|
ivand_qmul@125
|
744 // same goes for audio, if there is data, we flush is
|
ivand_qmul@125
|
745 if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {
|
ivand_qmul@125
|
746
|
ivand_qmul@125
|
747 // make sure this is done thread-save, so inside the appropriate
|
ivand_qmul@125
|
748 // semaphore
|
ivand_qmul@125
|
749 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
750 avcodec_flush_buffers((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg));
|
ivand_qmul@125
|
751 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
752 }
|
ivand_qmul@125
|
753
|
ivand_qmul@125
|
754 // then there is our flush call
|
ivand_qmul@125
|
755 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
756
|
ivand_qmul@125
|
757 // and we are done, lets release the decode semaphore so the decode
|
ivand_qmul@125
|
758 // thread can move on, filling buffer from our new position
|
ivand_qmul@125
|
759 SDL_SemPost(file->decode);
|
ivand_qmul@125
|
760
|
ivand_qmul@125
|
761 return 0;
|
ivand_qmul@125
|
762 }
|
ivand_qmul@125
|
763
|
ivand_qmul@125
|
764 // if, for some reason, we could not seek, we still should flush our buffers
|
ivand_qmul@125
|
765 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
766
|
ivand_qmul@125
|
767 // and release our lock on the decodethread
|
ivand_qmul@125
|
768 SDL_SemPost(file->decode);
|
ivand_qmul@125
|
769
|
ivand_qmul@125
|
770 return -1;
|
ivand_qmul@125
|
771 }
|
ivand_qmul@125
|
772
|
ivand_qmul@125
|
773 int SDL_ffmpegSeekRelative(SDL_ffmpegFile *file, int64_t timestamp) {
|
ivand_qmul@125
|
774
|
ivand_qmul@125
|
775 // same thing as normal seek, just take into account the current position
|
ivand_qmul@125
|
776 return SDL_ffmpegSeek(file, SDL_ffmpegGetPosition(file) + timestamp);
|
ivand_qmul@125
|
777 }
|
ivand_qmul@125
|
778
|
ivand_qmul@125
|
779 int SDL_ffmpegFlush(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
780
|
ivand_qmul@125
|
781 // if we have a valid audio stream, we flush is
|
ivand_qmul@125
|
782 if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {
|
ivand_qmul@125
|
783
|
ivand_qmul@125
|
784 // flush audiobuffer from semaphore, be thread-safe!
|
ivand_qmul@125
|
785 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
786
|
ivand_qmul@125
|
787 file->as[file->audioStream]->size = 0;
|
ivand_qmul@125
|
788
|
ivand_qmul@125
|
789 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
790 }
|
ivand_qmul@125
|
791
|
ivand_qmul@125
|
792 // if we have a valid video stream, we flush some more
|
ivand_qmul@125
|
793 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
|
ivand_qmul@125
|
794
|
ivand_qmul@125
|
795 // flush videobuffer
|
ivand_qmul@125
|
796 int i;
|
ivand_qmul@125
|
797
|
ivand_qmul@125
|
798 // again, be thread safe!
|
ivand_qmul@125
|
799 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
800
|
ivand_qmul@125
|
801 // make sure we delete all frames from buffer
|
ivand_qmul@125
|
802 for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
|
ivand_qmul@125
|
803
|
ivand_qmul@125
|
804 // if this entry does not exist, continue
|
ivand_qmul@125
|
805 if(!file->vs[file->videoStream]->imageBuffer[i]) continue;
|
ivand_qmul@125
|
806
|
ivand_qmul@125
|
807 // free the actual image data
|
ivand_qmul@125
|
808 SDL_FreeSurface( file->vs[file->videoStream]->imageBuffer[i]->img );
|
ivand_qmul@125
|
809
|
ivand_qmul@125
|
810 // and free the struct containing it
|
ivand_qmul@125
|
811 free( file->vs[file->videoStream]->imageBuffer[i] );
|
ivand_qmul@125
|
812
|
ivand_qmul@125
|
813 // set position in buffer to 0, so we know it is empty
|
ivand_qmul@125
|
814 file->vs[file->videoStream]->imageBuffer[i] = 0;
|
ivand_qmul@125
|
815 }
|
ivand_qmul@129
|
816 file->vs[file->videoStream]->writeImage=0;
|
ivand_qmul@129
|
817 file->vs[file->videoStream]->readImage=0;
|
ivand_qmul@125
|
818 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
819 }
|
ivand_qmul@125
|
820
|
ivand_qmul@125
|
821 return 0;
|
ivand_qmul@125
|
822 }
|
ivand_qmul@125
|
823
|
ivand_qmul@125
|
824 int8_t* SDL_ffmpegGetAudio(SDL_ffmpegFile *file, int *len) {
|
ivand_qmul@125
|
825
|
ivand_qmul@125
|
826 // no valid audio, means no audio to get
|
ivand_qmul@125
|
827 if( !SDL_ffmpegValidAudio(file) || file->pause||file->skipAudio ) return 0;
|
ivand_qmul@125
|
828
|
ivand_qmul@125
|
829 // working on audiobuffer should always be done from semaphore
|
ivand_qmul@125
|
830 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
831
|
ivand_qmul@125
|
832 // if we ask for more audiodata than we can give, we sent wat we can
|
ivand_qmul@125
|
833 // actually give, writing the amount of bytes into len
|
ivand_qmul@125
|
834 if(*len > file->as[file->audioStream]->size) *len = file->as[file->audioStream]->size;
|
ivand_qmul@125
|
835
|
ivand_qmul@125
|
836 // decrease the size of our audiobuffer by len
|
ivand_qmul@125
|
837 file->as[file->audioStream]->size -= *len;
|
ivand_qmul@125
|
838
|
ivand_qmul@125
|
839 // len represents the nr of bytes we sent, so we increase the total
|
ivand_qmul@125
|
840 file->as[file->audioStream]->totalBytes += *len;
|
ivand_qmul@125
|
841
|
ivand_qmul@125
|
842 // the videooffset makes sure we are always in sync with the audio
|
ivand_qmul@125
|
843 // it is actually the difference between the position were we are in the
|
ivand_qmul@125
|
844 // stream (GetPosition) and were we should be (pts)
|
ivand_qmul@125
|
845 // we use the same offset when selecting the current videoframe
|
ivand_qmul@125
|
846 file->videoOffset = SDL_ffmpegGetPosition(file) - file->as[file->audioStream]->pts;
|
ivand_qmul@125
|
847
|
ivand_qmul@125
|
848 // we calculate the new pts for our audiodata based on the hardPts
|
ivand_qmul@125
|
849 // (that is the one we got from ffmpeg) and than calculating how for we
|
ivand_qmul@125
|
850 // have come since
|
ivand_qmul@125
|
851 file->as[file->audioStream]->pts = file->as[file->audioStream]->hardPts;
|
ivand_qmul@125
|
852 // since we use 16bit per sample, we devide totalbytes by 2 before deviding by samplerate
|
ivand_qmul@125
|
853 file->as[file->audioStream]->pts += ((double)file->as[file->audioStream]->totalBytes / (2 * file->as[file->audioStream]->channels)) / (file->as[file->audioStream]->sampleRate / 1000.0);
|
ivand_qmul@125
|
854
|
ivand_qmul@125
|
855 // we return the audiobuffer, notice we are still in the audiosemaphore!
|
ivand_qmul@125
|
856 // we only leave this by calling SDL_ffmpegReleaseAudio
|
ivand_qmul@125
|
857 return file->as[file->audioStream]->audio;
|
ivand_qmul@125
|
858 }
|
ivand_qmul@125
|
859
|
ivand_qmul@125
|
860 int SDL_ffmpegReleaseAudio(SDL_ffmpegFile *file, int len) {
|
ivand_qmul@125
|
861
|
ivand_qmul@125
|
862 // no audio, means no releasing
|
ivand_qmul@125
|
863 if( !SDL_ffmpegValidAudio(file) || file->skipAudio) return -1;
|
ivand_qmul@125
|
864
|
ivand_qmul@125
|
865 // this call should be paired with SDL_ffmpegGetAudio, as it provides us
|
ivand_qmul@125
|
866 // with the correct length so we move the correct amount of data
|
ivand_qmul@125
|
867 memmove( file->as[file->audioStream]->audio,
|
ivand_qmul@125
|
868 file->as[file->audioStream]->audio+len,
|
ivand_qmul@125
|
869 file->as[file->audioStream]->size );
|
ivand_qmul@125
|
870
|
ivand_qmul@125
|
871 // work on audiodata is done, so we release the semaphore
|
ivand_qmul@125
|
872 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
873
|
ivand_qmul@125
|
874 return 0;
|
ivand_qmul@125
|
875 }
|
ivand_qmul@125
|
876
|
ivand_qmul@125
|
877 int64_t SDL_ffmpegGetPosition(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
878 //MainWindow * MWinsA=MainWindow::instance();
|
ivand_qmul@125
|
879
|
ivand_qmul@125
|
880 if (file->skipAudio){
|
ivand_qmul@125
|
881 return (av_gettime()/1000+ file->offset - file->startTime);
|
ivand_qmul@125
|
882 //int64_t pos=MWinsA->Get_CurAudioTime();
|
ivand_qmul@125
|
883 //return (pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000);//SDL_GetTicks();
|
ivand_qmul@125
|
884 }
|
ivand_qmul@125
|
885 else
|
ivand_qmul@125
|
886 return (av_gettime()/1000+ file->offset - file->startTime);//SDL_GetTicks();
|
ivand_qmul@125
|
887 // return the current playposition of our file
|
ivand_qmul@125
|
888
|
ivand_qmul@125
|
889 }
|
ivand_qmul@125
|
890
|
ivand_qmul@125
|
891 SDL_AudioSpec* SDL_ffmpegGetAudioSpec(SDL_ffmpegFile *file, int samples, void *callback) {
|
ivand_qmul@125
|
892
|
ivand_qmul@125
|
893 // create audio spec
|
ivand_qmul@125
|
894 SDL_AudioSpec *spec = (SDL_AudioSpec*)malloc( sizeof(SDL_AudioSpec) );
|
ivand_qmul@125
|
895
|
ivand_qmul@125
|
896 if(spec) {
|
ivand_qmul@125
|
897 spec->format = AUDIO_S16SYS;
|
ivand_qmul@125
|
898 spec->samples = samples;
|
ivand_qmul@125
|
899 spec->userdata = file;
|
ivand_qmul@125
|
900 spec->callback = (void (__cdecl *)(void *,Uint8 *,int))(callback);
|
ivand_qmul@125
|
901 spec->freq = 48000;
|
ivand_qmul@125
|
902 spec->channels = 2;
|
ivand_qmul@125
|
903
|
ivand_qmul@125
|
904 // if we have a valid audiofile, we can use its data to create a
|
ivand_qmul@125
|
905 // more appropriate audio spec
|
ivand_qmul@125
|
906 if( SDL_ffmpegValidAudio(file) && !file->skipAudio ) {
|
ivand_qmul@125
|
907 spec->freq = file->as[file->audioStream]->sampleRate;
|
ivand_qmul@125
|
908 spec->channels = file->as[file->audioStream]->channels;
|
ivand_qmul@125
|
909 }
|
ivand_qmul@125
|
910 }
|
ivand_qmul@125
|
911
|
ivand_qmul@125
|
912 return spec;
|
ivand_qmul@125
|
913 }
|
ivand_qmul@125
|
914
|
ivand_qmul@125
|
915 int64_t SDL_ffmpegGetDuration(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
916
|
ivand_qmul@125
|
917 // returns the duration of the entire file, please note that ffmpeg doesn't
|
ivand_qmul@125
|
918 // always get this value right! so don't bet your life on it...
|
ivand_qmul@125
|
919 return ((AVFormatContext*)file->_ffmpeg)->duration / (AV_TIME_BASE / 1000);
|
ivand_qmul@125
|
920 }
|
ivand_qmul@125
|
921
|
ivand_qmul@125
|
922 int SDL_ffmpegGetVideoSize(SDL_ffmpegFile *file, int *w, int *h) {
|
ivand_qmul@125
|
923
|
ivand_qmul@125
|
924 if(!w || !h) return -1;
|
ivand_qmul@125
|
925
|
ivand_qmul@125
|
926 // if we have a valid video file selected, we use it
|
ivand_qmul@125
|
927 // if not, we send default values and return.
|
ivand_qmul@125
|
928 // by checking the return value you can check if you got a valid size
|
ivand_qmul@125
|
929 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
|
ivand_qmul@125
|
930 *w = file->vs[file->videoStream]->width;
|
ivand_qmul@125
|
931 *h = file->vs[file->videoStream]->height;
|
ivand_qmul@125
|
932 return 0;
|
ivand_qmul@125
|
933 }
|
ivand_qmul@125
|
934
|
ivand_qmul@125
|
935 *w = 320;
|
ivand_qmul@125
|
936 *h = 240;
|
ivand_qmul@125
|
937 return -1;
|
ivand_qmul@125
|
938 }
|
ivand_qmul@125
|
939
|
ivand_qmul@125
|
940 int SDL_ffmpegValidAudio(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
941
|
ivand_qmul@125
|
942 // this function is used to check if we selected a valid audio stream
|
ivand_qmul@125
|
943 if(file->audioStream < 0 || file->audioStream >= file->AStreams) return 0;
|
ivand_qmul@125
|
944
|
ivand_qmul@125
|
945 return 1;
|
ivand_qmul@125
|
946 }
|
ivand_qmul@125
|
947
|
ivand_qmul@125
|
948 int SDL_ffmpegValidVideo(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
949
|
ivand_qmul@125
|
950 // this function is used to check if we selected a valid video stream
|
ivand_qmul@125
|
951 if(file->videoStream < 0 || file->videoStream >= file->VStreams) return 0;
|
ivand_qmul@125
|
952
|
ivand_qmul@125
|
953 return 1;
|
ivand_qmul@125
|
954 }
|
ivand_qmul@125
|
955
|
ivand_qmul@125
|
956 int SDL_ffmpegPause(SDL_ffmpegFile *file, int state) {
|
ivand_qmul@125
|
957
|
ivand_qmul@125
|
958 // by putting 0 into state, we play the file
|
ivand_qmul@125
|
959 // this behaviour is analogue to SDL audio
|
ivand_qmul@125
|
960 file->pause = state;
|
ivand_qmul@125
|
961
|
ivand_qmul@125
|
962 if(!file->pause) {
|
ivand_qmul@125
|
963 file->startTime = av_gettime()/1000;//SDL_GetTicks();
|
ivand_qmul@125
|
964 }
|
ivand_qmul@125
|
965
|
ivand_qmul@125
|
966 return 0;
|
ivand_qmul@125
|
967 }
|
ivand_qmul@125
|
968
|
ivand_qmul@125
|
969 int SDL_ffmpegGetState(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
970 return file->pause;
|
ivand_qmul@125
|
971 }
|