ivand_qmul@125
|
1 /*******************************************************************************
|
ivand_qmul@125
|
2 * *
|
ivand_qmul@125
|
3 * SDL_ffmpeg is a library for basic multimedia functionality. *
|
ivand_qmul@125
|
4 * SDL_ffmpeg is based on ffmpeg. *
|
ivand_qmul@125
|
5 * *
|
ivand_qmul@125
|
6 * Copyright (C) 2007 Arjan Houben *
|
ivand_qmul@125
|
7 * *
|
ivand_qmul@125
|
8 * SDL_ffmpeg is free software: you can redistribute it and/or modify *
|
ivand_qmul@125
|
9 * it under the terms of the GNU Lesser General Public License as published *
|
ivand_qmul@125
|
10 * by the Free Software Foundation, either version 3 of the License, or any *
|
ivand_qmul@125
|
11 * later version. *
|
ivand_qmul@125
|
12 * *
|
ivand_qmul@125
|
13 * This program is distributed in the hope that it will be useful, *
|
ivand_qmul@125
|
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
|
ivand_qmul@125
|
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
|
ivand_qmul@125
|
16 * GNU Lesser General Public License for more details. *
|
ivand_qmul@125
|
17 * *
|
ivand_qmul@125
|
18 * You should have received a copy of the GNU Lesser General Public License *
|
ivand_qmul@125
|
19 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
|
ivand_qmul@125
|
20 * *
|
ivand_qmul@125
|
21 *******************************************************************************/
|
ivand_qmul@125
|
22
|
ivand_qmul@125
|
23 #include <stdio.h>
|
ivand_qmul@125
|
24 #include <stdlib.h>
|
ivand_qmul@125
|
25 #ifdef __cplusplus
|
ivand_qmul@125
|
26 extern "C" {
|
ivand_qmul@125
|
27 #endif
|
ivand_qmul@125
|
28 #ifdef WIN32
|
ivand_qmul@125
|
29 #include "SDL_ffmpeg.h"
|
ivand_qmul@125
|
30 #include <SDL.h>
|
ivand_qmul@125
|
31 #include <SDL_thread.h>
|
ivand_qmul@129
|
32 #include <stdio.h>
|
ivand_qmul@129
|
33 #include <Windows.h>
|
ivand_qmul@125
|
34 #endif
|
ivand_qmul@125
|
35
|
ivand_qmul@125
|
36 #ifdef __unix__
|
ivand_qmul@125
|
37 #include <SDL/SDL.h>
|
ivand_qmul@125
|
38 #include <SDL/SDL_thread.h>
|
ivand_qmul@125
|
39 #endif
|
ivand_qmul@125
|
40 #ifdef __cplusplus
|
ivand_qmul@125
|
41 }
|
ivand_qmul@125
|
42 #endif
|
ivand_qmul@125
|
43 #include "../../sv/main/MainWindow.h"
|
ivand_qmul@129
|
44 #include <time.h>
|
ivand_qmul@125
|
45
|
ivand_qmul@125
|
46 //const int SDL_FFMPEG_MAX_BUFFERED_FRAMES = 25;
|
ivand_qmul@125
|
47 //const int SDL_FFMPEG_MAX_BUFFERED_SAMPLES = 512 * 512;
|
ivand_qmul@129
|
48 extern float hopfactor;
|
ivand_qmul@125
|
49 int FFMPEG_init_was_called = 0;
|
ivand_qmul@129
|
50 FILE *pFile, *tFile;
|
ivand_qmul@129
|
51 int64_t Time,Time1;
|
ivand_qmul@129
|
52 int64_t realt=0;
|
ivand_qmul@129
|
53
|
ivand_qmul@125
|
54 SDL_ffmpegFile* SDL_ffmpegCreateFile() {
|
ivand_qmul@125
|
55
|
ivand_qmul@125
|
56 // create SDL_ffmpegFile pointer
|
ivand_qmul@125
|
57 SDL_ffmpegFile *file = (SDL_ffmpegFile*)malloc( sizeof(SDL_ffmpegFile) );
|
ivand_qmul@125
|
58 if(!file) return 0;
|
ivand_qmul@125
|
59 file->_ffmpeg=av_alloc_format_context();//(AVFormatContext*)malloc(sizeof(AVFormatContext));
|
ivand_qmul@125
|
60 // create a semaphore for every file
|
ivand_qmul@125
|
61 file->decode = SDL_CreateSemaphore(1);
|
ivand_qmul@125
|
62
|
ivand_qmul@129
|
63 Time=0;
|
ivand_qmul@129
|
64 Time1=0;
|
ivand_qmul@129
|
65 fopen_s (&pFile,"myfile.txt","w");
|
ivand_qmul@129
|
66 fopen_s (&tFile,"Timestampfile.txt","w");
|
ivand_qmul@125
|
67 // allocate room for VStreams
|
ivand_qmul@125
|
68 file->vs = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
|
ivand_qmul@125
|
69 if(!file->vs) {
|
ivand_qmul@125
|
70 free( file );
|
ivand_qmul@125
|
71 return 0;
|
ivand_qmul@125
|
72 }
|
ivand_qmul@125
|
73
|
ivand_qmul@125
|
74 // allocate room for AStreams
|
ivand_qmul@125
|
75 file->as = (SDL_ffmpegStream**)malloc( sizeof(SDL_ffmpegStream*) * MAX_STREAMS );
|
ivand_qmul@125
|
76 if(!file->as) {
|
ivand_qmul@125
|
77 free( file );
|
ivand_qmul@125
|
78 return 0;
|
ivand_qmul@125
|
79 }
|
ivand_qmul@125
|
80
|
ivand_qmul@125
|
81 // initialize variables with standard values
|
ivand_qmul@125
|
82 file->audioStream = -1;
|
ivand_qmul@125
|
83 file->videoStream = -1;
|
ivand_qmul@125
|
84
|
ivand_qmul@125
|
85 file->offset = 0;
|
ivand_qmul@125
|
86 file->videoOffset = 0;
|
ivand_qmul@125
|
87 file->startTime = 0;
|
ivand_qmul@125
|
88
|
ivand_qmul@125
|
89 file->threadID = 0;
|
ivand_qmul@125
|
90
|
ivand_qmul@125
|
91 return file;
|
ivand_qmul@125
|
92 }
|
ivand_qmul@125
|
93
|
ivand_qmul@125
|
94 void SDL_ffmpegFree(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
95
|
ivand_qmul@125
|
96 SDL_ffmpegStopDecoding(file);
|
ivand_qmul@125
|
97
|
ivand_qmul@125
|
98 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
99
|
ivand_qmul@125
|
100 free(file);
|
ivand_qmul@125
|
101 }
|
ivand_qmul@125
|
102
|
ivand_qmul@125
|
103 SDL_ffmpegFile* SDL_ffmpegOpen(const char* filename) {
|
ivand_qmul@125
|
104
|
ivand_qmul@125
|
105
|
ivand_qmul@125
|
106 // register all codecs
|
ivand_qmul@125
|
107 if(!FFMPEG_init_was_called) {
|
ivand_qmul@125
|
108 FFMPEG_init_was_called = 1;
|
ivand_qmul@125
|
109 av_register_all();
|
ivand_qmul@125
|
110 }
|
ivand_qmul@125
|
111
|
ivand_qmul@125
|
112 // open new ffmpegFile
|
ivand_qmul@125
|
113 SDL_ffmpegFile *file = SDL_ffmpegCreateFile();
|
ivand_qmul@125
|
114 if(!file) return 0;
|
ivand_qmul@125
|
115
|
ivand_qmul@125
|
116 // information about format is stored in file->_ffmpeg
|
ivand_qmul@125
|
117
|
ivand_qmul@125
|
118 // open the file
|
ivand_qmul@125
|
119 if(av_open_input_file( (AVFormatContext**)&file->_ffmpeg, filename, 0, 0, 0) != 0) {
|
ivand_qmul@125
|
120 fprintf(stderr, "could not open \"%s\"\n", filename);
|
ivand_qmul@125
|
121 free(file);
|
ivand_qmul@125
|
122 return 0;
|
ivand_qmul@125
|
123 }
|
ivand_qmul@125
|
124
|
ivand_qmul@125
|
125 // retrieve format information
|
ivand_qmul@125
|
126 if(av_find_stream_info((AVFormatContext *)(file->_ffmpeg)) < 0) {
|
ivand_qmul@125
|
127 fprintf(stderr, "could not retrieve video stream info");
|
ivand_qmul@125
|
128 free(file);
|
ivand_qmul@125
|
129 return 0;
|
ivand_qmul@125
|
130 }
|
ivand_qmul@125
|
131
|
ivand_qmul@125
|
132 // dump info to logfile
|
ivand_qmul@125
|
133 // dump_format(file->_ffmpeg, 0, filename, 0);
|
ivand_qmul@125
|
134
|
ivand_qmul@125
|
135 // find the streams in the file
|
ivand_qmul@125
|
136 file->VStreams = 0;
|
ivand_qmul@125
|
137 file->AStreams = 0;
|
ivand_qmul@125
|
138 file->threadActive = 0;
|
ivand_qmul@125
|
139
|
ivand_qmul@125
|
140 // iterate through all the streams and store audio/video streams
|
ivand_qmul@125
|
141 size_t i;
|
ivand_qmul@125
|
142 for(i=0; i<((AVFormatContext*)file->_ffmpeg)->nb_streams; i++) {
|
ivand_qmul@125
|
143
|
ivand_qmul@125
|
144 if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
|
ivand_qmul@125
|
145
|
ivand_qmul@125
|
146 // if this is a packet of the correct type we create a new stream
|
ivand_qmul@125
|
147 SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );
|
ivand_qmul@125
|
148
|
ivand_qmul@125
|
149 if(stream) {
|
ivand_qmul@125
|
150 // we set our stream to zero
|
ivand_qmul@125
|
151 memset(stream, 0, sizeof(SDL_ffmpegStream));
|
ivand_qmul@125
|
152
|
ivand_qmul@125
|
153 // save unique streamid
|
ivand_qmul@125
|
154 stream->id = i;
|
ivand_qmul@125
|
155
|
ivand_qmul@125
|
156 // the timeBase is what we use to calculate from/to pts
|
ivand_qmul@125
|
157 stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;
|
ivand_qmul@125
|
158
|
ivand_qmul@125
|
159 // save width, height and pixFmt of our outputframes
|
ivand_qmul@125
|
160 stream->width = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->width;
|
ivand_qmul@125
|
161 stream->height = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->height;
|
ivand_qmul@125
|
162 stream->pixFmt = PIX_FMT_RGB24;
|
ivand_qmul@125
|
163
|
ivand_qmul@125
|
164 // _ffmpeg holds data about streamcodec
|
ivand_qmul@125
|
165 stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;
|
ivand_qmul@125
|
166
|
ivand_qmul@125
|
167 // get the correct decoder for this stream
|
ivand_qmul@125
|
168 AVCodec *codec = avcodec_find_decoder(((AVCodecContext*)stream->_ffmpeg)->codec_id);
|
ivand_qmul@125
|
169
|
ivand_qmul@125
|
170 if(!codec) {
|
ivand_qmul@125
|
171 free(stream);
|
ivand_qmul@125
|
172 fprintf(stderr, "could not find codec\n");
|
ivand_qmul@125
|
173 } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
|
ivand_qmul@125
|
174 free(stream);
|
ivand_qmul@125
|
175 fprintf(stderr, "could not open decoder\n");
|
ivand_qmul@125
|
176 } else {
|
ivand_qmul@125
|
177
|
ivand_qmul@125
|
178 // copy metadata from AVStream into our stream
|
ivand_qmul@125
|
179 stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
|
ivand_qmul@125
|
180 stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
|
ivand_qmul@125
|
181 memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
|
ivand_qmul@125
|
182 stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
|
ivand_qmul@125
|
183 stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
|
ivand_qmul@125
|
184 memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);
|
ivand_qmul@125
|
185
|
ivand_qmul@125
|
186 stream->audio = 0;
|
ivand_qmul@125
|
187 stream->size = 0;
|
ivand_qmul@125
|
188 stream->imageBuffer = (bufferImage**)calloc( SDL_FFMPEG_MAX_BUFFERED_FRAMES, sizeof(bufferImage*) );
|
ivand_qmul@129
|
189 stream->writeImage = 0;
|
ivand_qmul@129
|
190 stream->readImage = 0;
|
ivand_qmul@125
|
191 file->vs[file->VStreams] = stream;
|
ivand_qmul@125
|
192 file->VStreams++;
|
ivand_qmul@125
|
193
|
ivand_qmul@125
|
194 // create semaphore for thread-safe use
|
ivand_qmul@125
|
195 stream->sem = SDL_CreateSemaphore(1);
|
ivand_qmul@125
|
196 }
|
ivand_qmul@125
|
197 }
|
ivand_qmul@125
|
198 } else if(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
|
ivand_qmul@125
|
199
|
ivand_qmul@125
|
200 // if this is a packet of the correct type we create a new stream
|
ivand_qmul@125
|
201 SDL_ffmpegStream* stream = (SDL_ffmpegStream*)malloc( sizeof(SDL_ffmpegStream) );
|
ivand_qmul@125
|
202
|
ivand_qmul@125
|
203 if(stream) {
|
ivand_qmul@125
|
204 // we set our stream to zero
|
ivand_qmul@125
|
205 memset(stream, 0, sizeof(SDL_ffmpegStream));
|
ivand_qmul@125
|
206
|
ivand_qmul@125
|
207 // save unique streamid
|
ivand_qmul@125
|
208 stream->id = i;
|
ivand_qmul@125
|
209
|
ivand_qmul@125
|
210 // the timeBase is what we use to calculate from/to pts
|
ivand_qmul@125
|
211 stream->timeBase = av_q2d(((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base) * 1000;
|
ivand_qmul@125
|
212
|
ivand_qmul@125
|
213 // _ffmpeg holds data about streamcodec
|
ivand_qmul@125
|
214 stream->_ffmpeg = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec;
|
ivand_qmul@125
|
215
|
ivand_qmul@125
|
216 stream->width = 0;
|
ivand_qmul@125
|
217 stream->height = 0;
|
ivand_qmul@125
|
218 stream->pixFmt = PIX_FMT_RGB24;
|
ivand_qmul@125
|
219
|
ivand_qmul@125
|
220 // get the correct decoder for this stream
|
ivand_qmul@125
|
221 AVCodec *codec = avcodec_find_decoder(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_id);
|
ivand_qmul@125
|
222
|
ivand_qmul@125
|
223 if(!codec) {
|
ivand_qmul@125
|
224 free( stream );
|
ivand_qmul@125
|
225 fprintf(stderr, "could not find codec\n");
|
ivand_qmul@125
|
226 } else if(avcodec_open(((AVFormatContext*)file->_ffmpeg)->streams[i]->codec, codec) < 0) {
|
ivand_qmul@125
|
227 free( stream );
|
ivand_qmul@125
|
228 fprintf(stderr, "could not open decoder\n");
|
ivand_qmul@125
|
229 } else {
|
ivand_qmul@125
|
230
|
ivand_qmul@125
|
231 // copy metadata from AVStream into our stream
|
ivand_qmul@125
|
232 stream->frameRate[0] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.num;
|
ivand_qmul@125
|
233 stream->frameRate[1] = ((AVFormatContext*)file->_ffmpeg)->streams[i]->time_base.den;
|
ivand_qmul@125
|
234 memcpy(stream->language, ((AVFormatContext*)file->_ffmpeg)->streams[i]->language, 4);
|
ivand_qmul@125
|
235 stream->sampleRate = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->sample_rate;
|
ivand_qmul@125
|
236 stream->channels = ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->channels;
|
ivand_qmul@125
|
237 memcpy(stream->codecName, ((AVFormatContext*)file->_ffmpeg)->streams[i]->codec->codec_name, 32);
|
ivand_qmul@125
|
238
|
ivand_qmul@125
|
239 stream->audio = (int8_t*)malloc( sizeof(int8_t) * SDL_FFMPEG_MAX_BUFFERED_SAMPLES );
|
ivand_qmul@125
|
240 stream->size = 0;
|
ivand_qmul@125
|
241 stream->imageBuffer = 0;
|
ivand_qmul@125
|
242
|
ivand_qmul@125
|
243 file->as[file->AStreams] = stream;
|
ivand_qmul@125
|
244 file->AStreams++;
|
ivand_qmul@125
|
245
|
ivand_qmul@125
|
246 // create semaphore for thread-safe use
|
ivand_qmul@125
|
247 stream->sem = SDL_CreateSemaphore(1);
|
ivand_qmul@125
|
248 }
|
ivand_qmul@125
|
249 }
|
ivand_qmul@125
|
250 }
|
ivand_qmul@125
|
251 }
|
ivand_qmul@125
|
252
|
ivand_qmul@125
|
253 return file;
|
ivand_qmul@125
|
254 }
|
ivand_qmul@125
|
255
|
ivand_qmul@125
|
256 SDL_Surface* SDL_ffmpegGetVideo(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
257
|
ivand_qmul@125
|
258 MainWindow * MWinsA=MainWindow::instance();
|
ivand_qmul@125
|
259 if( !SDL_ffmpegValidVideo(file) || file->pause || file->skipVideo) return 0;
|
ivand_qmul@125
|
260
|
ivand_qmul@125
|
261 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
262
|
ivand_qmul@125
|
263 bufferImage *option = 0;
|
ivand_qmul@129
|
264 //int i;
|
ivand_qmul@129
|
265 float ratio;
|
ivand_qmul@129
|
266 int64_t pos,pos1, pos2, timestamp;
|
ivand_qmul@129
|
267 //for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
|
ivand_qmul@129
|
268 pos=MWinsA->Get_CurAudioTime();
|
ivand_qmul@125
|
269
|
ivand_qmul@129
|
270 fprintf (pFile, "p: \t %u\t", pos);
|
ivand_qmul@129
|
271 //if (MWinsA->Get_HardwareBufferTime()==0)
|
ivand_qmul@129
|
272 // pos1=0;
|
ivand_qmul@129
|
273 //else {
|
ivand_qmul@129
|
274 // pos1=MWinsA->Get_HardwareBufferTime();
|
ivand_qmul@129
|
275 // //fprintf (tFile, "%u\t", pos1);
|
ivand_qmul@129
|
276 // int64_t timeTemp;
|
ivand_qmul@129
|
277 // QueryPerformanceCounter((LARGE_INTEGER *)(&timeTemp));
|
ivand_qmul@129
|
278 //
|
ivand_qmul@129
|
279 // pos1=(timeTemp-pos1)/(file->countFreq*hopfactor);
|
ivand_qmul@129
|
280 // fprintf (pFile, "%u\t", pos1);
|
ivand_qmul@129
|
281 //}
|
ivand_qmul@129
|
282 //pos2=pos+pos1;
|
ivand_qmul@129
|
283 fprintf (pFile, "%u\n", pos);
|
ivand_qmul@129
|
284
|
ivand_qmul@129
|
285 // if this entry does not exist, continue
|
ivand_qmul@129
|
286 while(((file->vs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage)>0)&&(file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp <= pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000))//&& (file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp >= pos - file->timebase+ ((AVFormatContext*)file->_ffmpeg)->start_time/1000))
|
ivand_qmul@129
|
287 {
|
ivand_qmul@129
|
288 //pos=MWinsA->Get_CurAudioTime();
|
ivand_qmul@129
|
289 //timestamp=file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp;
|
ivand_qmul@129
|
290 //fprintf (tFile, "try: %d %d\n", (pos+ ((AVFormatContext*)file->_ffmpeg)->start_time/1000), timestamp);
|
ivand_qmul@129
|
291 // do we have an image that should have been shown?
|
ivand_qmul@129
|
292 //if(file->vs[file->videoStream]->imageBuffer[mod(file->vs[file->videoStream]->readImage,SDL_FFMPEG_MAX_BUFFERED_FRAMES)]->timestamp <= pos + (file->vs[file->videoStream]->timeBase)/4+((AVFormatContext*)file->_ffmpeg)->start_time/1000) {
|
ivand_qmul@129
|
293
|
ivand_qmul@129
|
294 // if this is the first option we find, we simply save it
|
ivand_qmul@129
|
295 if(!option) {
|
ivand_qmul@125
|
296
|
ivand_qmul@129
|
297 option = file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES];
|
ivand_qmul@125
|
298
|
ivand_qmul@129
|
299 // set to 0 so we know this position in the buffer is available again
|
ivand_qmul@129
|
300 file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = 0;
|
ivand_qmul@129
|
301 file->vs[file->videoStream]->readImage++;
|
ivand_qmul@125
|
302
|
ivand_qmul@129
|
303 } else {
|
ivand_qmul@125
|
304
|
ivand_qmul@129
|
305 // we found a newer possible timestamp, we delete the older one
|
ivand_qmul@129
|
306 if( option->timestamp < file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]->timestamp) {
|
ivand_qmul@125
|
307
|
ivand_qmul@129
|
308 // this image is too old, we discard it
|
ivand_qmul@129
|
309 SDL_FreeSurface( option->img );
|
ivand_qmul@125
|
310
|
ivand_qmul@129
|
311 // free old option
|
ivand_qmul@129
|
312 free( option );
|
ivand_qmul@125
|
313
|
ivand_qmul@129
|
314 // new pointer to position in container
|
ivand_qmul@129
|
315 option = file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES];
|
ivand_qmul@125
|
316
|
ivand_qmul@129
|
317 // set to 0 so we know this position in the buffer is available again
|
ivand_qmul@129
|
318 file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = 0;
|
ivand_qmul@129
|
319 file->vs[file->videoStream]->readImage++;
|
ivand_qmul@129
|
320 }
|
ivand_qmul@129
|
321 else {
|
ivand_qmul@129
|
322 file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->readImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES]=0;
|
ivand_qmul@129
|
323 file->vs[file->videoStream]->readImage++;
|
ivand_qmul@129
|
324 }
|
ivand_qmul@129
|
325 }
|
ivand_qmul@125
|
326
|
ivand_qmul@129
|
327
|
ivand_qmul@129
|
328 pos=MWinsA->Get_CurAudioTime();
|
ivand_qmul@129
|
329 fprintf (pFile, "e:\t%u\t", pos);
|
ivand_qmul@129
|
330 //if (MWinsA->Get_HardwareBufferTime()==0)
|
ivand_qmul@129
|
331 // pos1=0;
|
ivand_qmul@129
|
332 //else {
|
ivand_qmul@129
|
333 // pos1=MWinsA->Get_HardwareBufferTime();
|
ivand_qmul@129
|
334 // //fprintf (tFile, "%u\t", pos1);
|
ivand_qmul@129
|
335 // int64_t timeTemp;
|
ivand_qmul@129
|
336 // QueryPerformanceCounter((LARGE_INTEGER *)(&timeTemp));
|
ivand_qmul@129
|
337
|
ivand_qmul@129
|
338 // pos1=(timeTemp-pos1)/(file->countFreq*hopfactor);
|
ivand_qmul@129
|
339 // fprintf (pFile, "%u\t", pos1);
|
ivand_qmul@129
|
340 //}
|
ivand_qmul@129
|
341 //fprintf (pFile, "%u\n", pos2);
|
ivand_qmul@129
|
342 //pos2=pos+pos1;
|
ivand_qmul@129
|
343 //if (pos<pos2) pos=pos2;
|
ivand_qmul@129
|
344 }
|
ivand_qmul@129
|
345 //}
|
ivand_qmul@129
|
346 //}
|
ivand_qmul@129
|
347 int x=file->vs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage;
|
ivand_qmul@125
|
348 // if we did not found an option, we exit
|
ivand_qmul@125
|
349 if(!option) {
|
ivand_qmul@125
|
350 // release the lock
|
ivand_qmul@129
|
351 /*timestamp=0;
|
ivand_qmul@129
|
352 int64_t tt=av_gettime()/1000-file->timer;
|
ivand_qmul@129
|
353 file->timer=av_gettime()/1000;
|
ivand_qmul@129
|
354 realt+=tt;
|
ivand_qmul@129
|
355 fprintf (tFile, "%u\t", realt);
|
ivand_qmul@129
|
356 fprintf (tFile, "%u\t", tt);
|
ivand_qmul@129
|
357 fprintf (tFile, "%u\t", pos);
|
ivand_qmul@129
|
358 fprintf (tFile, "%u\n", timestamp);*/
|
ivand_qmul@125
|
359 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
360 return 0;
|
ivand_qmul@125
|
361 }
|
ivand_qmul@129
|
362 int64_t tt;
|
ivand_qmul@129
|
363 QueryPerformanceCounter((LARGE_INTEGER *)(&tt));
|
ivand_qmul@129
|
364 tt=tt/(file->countFreq)-file->timer;
|
ivand_qmul@129
|
365
|
ivand_qmul@129
|
366 QueryPerformanceCounter((LARGE_INTEGER *)(&file->timer));
|
ivand_qmul@129
|
367 file->timer=file->timer/(file->countFreq);
|
ivand_qmul@129
|
368 realt+=tt;
|
ivand_qmul@129
|
369 fprintf (tFile, "%u\t", x);
|
ivand_qmul@129
|
370 fprintf (tFile, "%u\t", realt);
|
ivand_qmul@129
|
371 fprintf (tFile, "%u\t", tt);
|
ivand_qmul@129
|
372 timestamp=(pos-option->timestamp+((AVFormatContext*)file->_ffmpeg)->start_time/1000)/hopfactor;
|
ivand_qmul@129
|
373 fprintf (tFile, "%u\t", pos);//+ (file->vs[file->videoStream]->timeBase)/4+((AVFormatContext*)file->_ffmpeg)->start_time/1000);
|
ivand_qmul@129
|
374 fprintf (tFile, "%d\n", timestamp);
|
ivand_qmul@125
|
375 // we did found an option, so we return the imagedata
|
ivand_qmul@125
|
376 return option->img;
|
ivand_qmul@125
|
377 }
|
ivand_qmul@125
|
378
|
ivand_qmul@125
|
379 int SDL_ffmpegReleaseVideo(SDL_ffmpegFile *file, SDL_Surface *bmp) {
|
ivand_qmul@125
|
380
|
ivand_qmul@125
|
381 // if there was no valid video stream, we should not release
|
ivand_qmul@125
|
382 if( !SDL_ffmpegValidVideo(file) || file->skipVideo) return -1;
|
ivand_qmul@125
|
383
|
ivand_qmul@125
|
384 // free surface
|
ivand_qmul@125
|
385 SDL_FreeSurface(bmp);
|
ivand_qmul@125
|
386
|
ivand_qmul@125
|
387 // release semaphore if needed
|
ivand_qmul@125
|
388 if( !SDL_SemValue(file->vs[file->videoStream]->sem) ) {
|
ivand_qmul@125
|
389 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
390 }
|
ivand_qmul@125
|
391
|
ivand_qmul@125
|
392 return 0;
|
ivand_qmul@125
|
393 }
|
ivand_qmul@125
|
394
|
ivand_qmul@125
|
395 SDL_ffmpegStream* SDL_ffmpegGetAudioStream(SDL_ffmpegFile *file, int audioID) {
|
ivand_qmul@125
|
396
|
ivand_qmul@125
|
397 // check if we have any audiostreams
|
ivand_qmul@125
|
398 if(!file->AStreams) return 0;
|
ivand_qmul@125
|
399
|
ivand_qmul@125
|
400 // check if the requested id is possible
|
ivand_qmul@125
|
401 if(audioID >= file->AStreams) return 0;
|
ivand_qmul@125
|
402
|
ivand_qmul@125
|
403 // return ausiostream linked to audioID
|
ivand_qmul@125
|
404 return file->as[audioID];
|
ivand_qmul@125
|
405 }
|
ivand_qmul@125
|
406
|
ivand_qmul@125
|
407 int SDL_ffmpegSelectAudioStream(SDL_ffmpegFile* file, int audioID) {
|
ivand_qmul@125
|
408
|
ivand_qmul@125
|
409 // check if we have any audiostreams
|
ivand_qmul@125
|
410 if(!file->AStreams) return -1;
|
ivand_qmul@125
|
411
|
ivand_qmul@125
|
412 // check if the requested id is possible
|
ivand_qmul@125
|
413 if(audioID >= file->AStreams) return -1;
|
ivand_qmul@125
|
414
|
ivand_qmul@125
|
415 // set current audiostream to stream linked to audioID
|
ivand_qmul@125
|
416 file->audioStream = audioID;
|
ivand_qmul@125
|
417
|
ivand_qmul@125
|
418 return 0;
|
ivand_qmul@125
|
419 }
|
ivand_qmul@125
|
420
|
ivand_qmul@125
|
421 SDL_ffmpegStream* SDL_ffmpegGetVideoStream(SDL_ffmpegFile *file, int videoID) {
|
ivand_qmul@125
|
422
|
ivand_qmul@125
|
423 // check if we have any videostreams
|
ivand_qmul@125
|
424 if(!file->VStreams) return 0;
|
ivand_qmul@125
|
425
|
ivand_qmul@125
|
426 // check if the requested id is possible
|
ivand_qmul@125
|
427 if(videoID >= file->VStreams) return 0;
|
ivand_qmul@125
|
428
|
ivand_qmul@125
|
429 // return ausiostream linked to videoID
|
ivand_qmul@125
|
430 return file->vs[videoID];
|
ivand_qmul@125
|
431 }
|
ivand_qmul@125
|
432
|
ivand_qmul@125
|
433 int SDL_ffmpegSelectVideoStream(SDL_ffmpegFile* file, int videoID) {
|
ivand_qmul@125
|
434
|
ivand_qmul@125
|
435 // check if we have any videostreams
|
ivand_qmul@125
|
436 if(!file->VStreams) return -1;
|
ivand_qmul@125
|
437
|
ivand_qmul@125
|
438 // check if the requested id is possible
|
ivand_qmul@125
|
439 if(videoID >= file->VStreams) return -1;
|
ivand_qmul@125
|
440
|
ivand_qmul@125
|
441 // set current videostream to stream linked to videoID
|
ivand_qmul@125
|
442 file->videoStream = videoID;
|
ivand_qmul@125
|
443
|
ivand_qmul@125
|
444 return 0;
|
ivand_qmul@125
|
445 }
|
ivand_qmul@125
|
446
|
ivand_qmul@125
|
447 int SDL_ffmpegStartDecoding(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
448
|
ivand_qmul@125
|
449 // start a thread that continues to fill audio/video buffers
|
ivand_qmul@125
|
450 if(!file->threadID) file->threadID = SDL_CreateThread(SDL_ffmpegDecodeThread, file);
|
ivand_qmul@125
|
451
|
ivand_qmul@125
|
452 return 0;
|
ivand_qmul@125
|
453 }
|
ivand_qmul@125
|
454
|
ivand_qmul@125
|
455 int SDL_ffmpegStopDecoding(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
456
|
ivand_qmul@125
|
457 // stop decode thread
|
ivand_qmul@125
|
458 file->threadActive = 0;
|
ivand_qmul@125
|
459 if(file->threadID) SDL_WaitThread(file->threadID, 0);
|
ivand_qmul@125
|
460
|
ivand_qmul@125
|
461 // set threadID to zero, so we can check for concurrent threads
|
ivand_qmul@125
|
462 file->threadID = 0;
|
ivand_qmul@125
|
463
|
ivand_qmul@125
|
464 return -1;
|
ivand_qmul@125
|
465 }
|
ivand_qmul@125
|
466
|
ivand_qmul@125
|
467 int SDL_ffmpegDecodeThread(void* data) {
|
ivand_qmul@125
|
468 static struct SwsContext *img_convert_ctx;
|
ivand_qmul@125
|
469 // unpack the void pointer
|
ivand_qmul@125
|
470 SDL_ffmpegFile* file = (SDL_ffmpegFile*)data;
|
ivand_qmul@125
|
471
|
ivand_qmul@125
|
472 // flag this thread as active, used for stopping
|
ivand_qmul@125
|
473 file->threadActive = 1;
|
ivand_qmul@125
|
474
|
ivand_qmul@125
|
475 // create a packet for our data
|
ivand_qmul@125
|
476 AVPacket pack;
|
ivand_qmul@125
|
477
|
ivand_qmul@125
|
478 // reserve some pointers for use in loop
|
ivand_qmul@125
|
479 AVFrame *inFrame, *inFrameRGB;
|
ivand_qmul@125
|
480
|
ivand_qmul@125
|
481 // allocate a frame
|
ivand_qmul@125
|
482 inFrame = avcodec_alloc_frame();
|
ivand_qmul@125
|
483
|
ivand_qmul@125
|
484 // allocate another frame for unknown->RGB conversion
|
ivand_qmul@125
|
485 inFrameRGB = avcodec_alloc_frame();
|
ivand_qmul@125
|
486
|
ivand_qmul@125
|
487 if(SDL_ffmpegValidVideo(file)) {
|
ivand_qmul@125
|
488 // allocate buffer
|
ivand_qmul@125
|
489 uint8_t *inVideoBuffer = (uint8_t*)malloc( avpicture_get_size(file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
490 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
491 file->vs[file->videoStream]->height) );
|
ivand_qmul@125
|
492
|
ivand_qmul@125
|
493 // put buffer into our reserved frame
|
ivand_qmul@125
|
494 avpicture_fill( (AVPicture*)inFrameRGB,
|
ivand_qmul@125
|
495 inVideoBuffer,
|
ivand_qmul@125
|
496 file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
497 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
498 file->vs[file->videoStream]->height);
|
ivand_qmul@125
|
499 }
|
ivand_qmul@125
|
500
|
ivand_qmul@125
|
501 // allocate temporary audiobuffer
|
ivand_qmul@125
|
502 int16_t *samples = (int16_t*)malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE );
|
ivand_qmul@125
|
503
|
ivand_qmul@125
|
504 // reserve integer for use in loop
|
ivand_qmul@125
|
505 int got_frame;
|
ivand_qmul@125
|
506
|
ivand_qmul@125
|
507 while(file->threadActive) {
|
ivand_qmul@125
|
508
|
ivand_qmul@125
|
509 // read a packet from the file
|
ivand_qmul@125
|
510 if(av_read_frame((AVFormatContext *)(file->_ffmpeg), &pack) < 0) {
|
ivand_qmul@125
|
511 // thread is idle
|
ivand_qmul@125
|
512 SDL_Delay(10);
|
ivand_qmul@125
|
513 continue;
|
ivand_qmul@125
|
514 }
|
ivand_qmul@125
|
515 if (file->skipAudio && pack.stream_index == file->as[file->audioStream]->id){
|
ivand_qmul@125
|
516 SDL_Delay(1);
|
ivand_qmul@125
|
517 continue;
|
ivand_qmul@125
|
518 }
|
ivand_qmul@125
|
519
|
ivand_qmul@125
|
520 // we got a packet, lets handle it
|
ivand_qmul@125
|
521
|
ivand_qmul@125
|
522 // let's start by entering the video semaphore
|
ivand_qmul@125
|
523 SDL_SemWait(file->decode);
|
ivand_qmul@125
|
524
|
ivand_qmul@125
|
525 // If it's a audio packet from our stream...
|
ivand_qmul@125
|
526 if( SDL_ffmpegValidAudio(file) && pack.stream_index == file->as[file->audioStream]->id && !file->skipAudio) {
|
ivand_qmul@125
|
527
|
ivand_qmul@125
|
528 uint8_t *data = pack.data;
|
ivand_qmul@125
|
529 int size = pack.size;
|
ivand_qmul@125
|
530 int len;
|
ivand_qmul@125
|
531
|
ivand_qmul@125
|
532 while(size > 0 && file->threadActive) {
|
ivand_qmul@125
|
533
|
ivand_qmul@125
|
534 // Decode the packet
|
ivand_qmul@125
|
535 len = avcodec_decode_audio((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg), samples, &got_frame, data, size);
|
ivand_qmul@125
|
536
|
ivand_qmul@125
|
537 // if error, we skip the frame
|
ivand_qmul@125
|
538 if(len < 0 || !got_frame) {
|
ivand_qmul@125
|
539 size = 0;
|
ivand_qmul@125
|
540 break;
|
ivand_qmul@125
|
541 }
|
ivand_qmul@125
|
542
|
ivand_qmul@125
|
543 // change pointers
|
ivand_qmul@125
|
544 data += got_frame;
|
ivand_qmul@125
|
545 size -= got_frame;
|
ivand_qmul@125
|
546
|
ivand_qmul@125
|
547 // if the audiobuffer is full, the thread waits
|
ivand_qmul@125
|
548 while( file->as[file->audioStream]->size + got_frame > SDL_FFMPEG_MAX_BUFFERED_SAMPLES &&
|
ivand_qmul@125
|
549 file->threadActive) {
|
ivand_qmul@125
|
550 SDL_Delay(5);
|
ivand_qmul@125
|
551 }
|
ivand_qmul@125
|
552
|
ivand_qmul@125
|
553 // write an audiopts
|
ivand_qmul@125
|
554 int64_t audiopts = pack.pts * file->as[file->audioStream]->timeBase;
|
ivand_qmul@125
|
555
|
ivand_qmul@125
|
556 // is the audioBuffer is empty
|
ivand_qmul@125
|
557 if(!file->as[file->audioStream]->size) {
|
ivand_qmul@125
|
558
|
ivand_qmul@125
|
559 // we set a new pts
|
ivand_qmul@125
|
560 file->as[file->audioStream]->hardPts = file->as[file->audioStream]->pts = audiopts;
|
ivand_qmul@125
|
561
|
ivand_qmul@125
|
562 // we set totalbytes to zero, as this represents the amount
|
ivand_qmul@125
|
563 // of bytes that were played since our last 'hardPts'
|
ivand_qmul@125
|
564 file->as[file->audioStream]->totalBytes = 0;
|
ivand_qmul@125
|
565 }
|
ivand_qmul@125
|
566
|
ivand_qmul@125
|
567 // no need to store old samples
|
ivand_qmul@125
|
568 if(audiopts >= SDL_ffmpegGetPosition(file)) {
|
ivand_qmul@125
|
569
|
ivand_qmul@125
|
570 // enter audio semaphore
|
ivand_qmul@125
|
571 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
572
|
ivand_qmul@125
|
573 // copy data from temporary buffer to streambuffer
|
ivand_qmul@125
|
574 memcpy(file->as[file->audioStream]->audio+file->as[file->audioStream]->size, samples, got_frame);
|
ivand_qmul@125
|
575
|
ivand_qmul@125
|
576 // set the new size of the audiobuffer
|
ivand_qmul@125
|
577 file->as[file->audioStream]->size += got_frame;
|
ivand_qmul@125
|
578
|
ivand_qmul@125
|
579 // we leave the audio semaphore
|
ivand_qmul@125
|
580 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
581 }
|
ivand_qmul@125
|
582 }
|
ivand_qmul@125
|
583 }
|
ivand_qmul@125
|
584
|
ivand_qmul@125
|
585 // If it's a video packet from our video stream...
|
ivand_qmul@125
|
586 if( SDL_ffmpegValidVideo(file) && pack.stream_index == file->vs[file->videoStream]->id && !file->skipVideo) {
|
ivand_qmul@125
|
587
|
ivand_qmul@125
|
588 got_frame = 0;
|
ivand_qmul@129
|
589 //Time1=av_gettime();
|
ivand_qmul@125
|
590 // Decode the packet
|
ivand_qmul@125
|
591 avcodec_decode_video((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg), inFrame, &got_frame, pack.data, pack.size);
|
ivand_qmul@125
|
592
|
ivand_qmul@125
|
593 if(got_frame) {
|
ivand_qmul@125
|
594
|
ivand_qmul@125
|
595 // create imagebuffer
|
ivand_qmul@125
|
596 bufferImage *buf = (bufferImage*)malloc( sizeof(bufferImage) );
|
ivand_qmul@125
|
597
|
ivand_qmul@125
|
598 // write timestamp into the buffer
|
ivand_qmul@125
|
599 buf->timestamp = file->vs[file->videoStream]->timeBase * pack.dts;
|
ivand_qmul@125
|
600
|
ivand_qmul@125
|
601 // usefull when dealing with B frames
|
ivand_qmul@125
|
602 if(pack.dts == AV_NOPTS_VALUE) {
|
ivand_qmul@125
|
603 // if we did not get a valid timestamp, we make one up based on the last
|
ivand_qmul@125
|
604 // valid timestamp + the duration of a frame
|
ivand_qmul@125
|
605 buf->timestamp = file->vs[file->videoStream]->lastTimeStamp + file->vs[file->videoStream]->timeBase;
|
ivand_qmul@125
|
606 }
|
ivand_qmul@125
|
607
|
ivand_qmul@125
|
608 // if new timestamp is from future, we proceed
|
ivand_qmul@125
|
609 // if(buf->timestamp >= SDL_ffmpegGetPosition(file))
|
ivand_qmul@125
|
610 // {
|
ivand_qmul@125
|
611 if (img_convert_ctx == NULL) {
|
ivand_qmul@125
|
612 img_convert_ctx = sws_getContext(file->vs[file->videoStream]->width, file->vs[file->videoStream]->height,
|
ivand_qmul@125
|
613 ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
|
ivand_qmul@125
|
614 file->vs[file->videoStream]->width, file->vs[file->videoStream]->height,
|
ivand_qmul@125
|
615 file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
616 sws_flags, NULL, NULL, NULL);
|
ivand_qmul@125
|
617 if (img_convert_ctx == NULL) {
|
ivand_qmul@125
|
618 fprintf(stderr, "Cannot initialize the conversion context\n");
|
ivand_qmul@125
|
619 exit(1);
|
ivand_qmul@125
|
620 }
|
ivand_qmul@125
|
621 }
|
ivand_qmul@125
|
622
|
ivand_qmul@125
|
623 sws_scale(img_convert_ctx, ((AVPicture*)inFrame)->data, ((AVPicture*)inFrame)->linesize,
|
ivand_qmul@125
|
624 0, file->vs[file->videoStream]->height, ((AVPicture*)inFrameRGB)->data, ((AVPicture*)inFrameRGB)->linesize);
|
ivand_qmul@125
|
625
|
ivand_qmul@125
|
626 // we convert whatever type of data we got to RGB24
|
ivand_qmul@125
|
627 /* img_convert((AVPicture*)inFrameRGB,
|
ivand_qmul@125
|
628 file->vs[file->videoStream]->pixFmt,
|
ivand_qmul@125
|
629 (AVPicture*)inFrame,
|
ivand_qmul@125
|
630 ((AVCodecContext*)file->vs[file->videoStream]->_ffmpeg)->pix_fmt,
|
ivand_qmul@125
|
631 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
632 file->vs[file->videoStream]->height);
|
ivand_qmul@125
|
633 */
|
ivand_qmul@125
|
634 // allocate image room
|
ivand_qmul@125
|
635 buf->img = SDL_CreateRGBSurface(SDL_SWSURFACE,
|
ivand_qmul@125
|
636 file->vs[file->videoStream]->width,
|
ivand_qmul@125
|
637 file->vs[file->videoStream]->height,
|
ivand_qmul@125
|
638 24, 0x0000FF, 0x00FF00, 0xFF0000, 0);
|
ivand_qmul@125
|
639
|
ivand_qmul@125
|
640 // copy image data to image room
|
ivand_qmul@125
|
641 memcpy(buf->img->pixels, inFrameRGB->data[0],
|
ivand_qmul@125
|
642 file->vs[file->videoStream]->width * file->vs[file->videoStream]->height * 3);
|
ivand_qmul@129
|
643 file->timebase=buf->timestamp-file->vs[file->videoStream]->lastTimeStamp;
|
ivand_qmul@125
|
644 // we write the lastTimestamp we got
|
ivand_qmul@125
|
645 file->vs[file->videoStream]->lastTimeStamp = buf->timestamp;
|
ivand_qmul@125
|
646
|
ivand_qmul@129
|
647 //int i;
|
ivand_qmul@125
|
648 int again = 1;
|
ivand_qmul@129
|
649 //Time=av_gettime()-Time1;
|
ivand_qmul@125
|
650
|
ivand_qmul@129
|
651 //fprintf (pFile, "%d \n",Time);
|
ivand_qmul@125
|
652 // keep trying to fit in buffer, until the data was actually placed in the buffer
|
ivand_qmul@125
|
653 while(again && file->threadActive) {
|
ivand_qmul@125
|
654
|
ivand_qmul@125
|
655 // we enter the video semaphore
|
ivand_qmul@125
|
656 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
657
|
ivand_qmul@125
|
658 // loop through all positions in buffer until an empty
|
ivand_qmul@125
|
659 // space was found
|
ivand_qmul@129
|
660 //for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
|
ivand_qmul@125
|
661 // if this place in the buffer is empty we write our new frame
|
ivand_qmul@129
|
662 if((file->vs[file->videoStream]->writeImage - file->vs[file->videoStream]->readImage) < SDL_FFMPEG_MAX_BUFFERED_FRAMES) {
|
ivand_qmul@129
|
663 file->vs[file->videoStream]->imageBuffer[file->vs[file->videoStream]->writeImage%SDL_FFMPEG_MAX_BUFFERED_FRAMES] = buf;
|
ivand_qmul@129
|
664 file->vs[file->videoStream]->writeImage++;
|
ivand_qmul@125
|
665 // we placed our image in the buffer, moving on
|
ivand_qmul@125
|
666 again = 0;
|
ivand_qmul@129
|
667
|
ivand_qmul@125
|
668 }
|
ivand_qmul@129
|
669 //}
|
ivand_qmul@125
|
670
|
ivand_qmul@125
|
671 // we leave the video semaphore
|
ivand_qmul@125
|
672 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@129
|
673
|
ivand_qmul@125
|
674 // frames aren't being release every ms, so we can take some
|
ivand_qmul@125
|
675 // time before we try and fit our new image again
|
ivand_qmul@129
|
676 if(again)
|
ivand_qmul@129
|
677 {
|
ivand_qmul@129
|
678 SDL_SemPost(file->decode);
|
ivand_qmul@129
|
679 SDL_Delay(3);
|
ivand_qmul@129
|
680 SDL_SemWait(file->decode);
|
ivand_qmul@129
|
681 }
|
ivand_qmul@125
|
682 }
|
ivand_qmul@125
|
683 // }
|
ivand_qmul@125
|
684 //else {
|
ivand_qmul@125
|
685 // // if our decoded frame was too old, we don't bother putting
|
ivand_qmul@125
|
686 // // it in our buffer
|
ivand_qmul@125
|
687 // free( buf );
|
ivand_qmul@125
|
688 // }
|
ivand_qmul@125
|
689 }
|
ivand_qmul@125
|
690 }
|
ivand_qmul@125
|
691 // we leave the decode semaphore
|
ivand_qmul@125
|
692 SDL_SemPost(file->decode);
|
ivand_qmul@125
|
693 if ((file->skipAudio)&&(file->delay))
|
ivand_qmul@125
|
694 SDL_Delay(3);
|
ivand_qmul@125
|
695 }
|
ivand_qmul@125
|
696 // if we stop this thread, we can release the packet we reserved
|
ivand_qmul@125
|
697 av_free_packet(&pack);
|
ivand_qmul@125
|
698
|
ivand_qmul@125
|
699 return 0;
|
ivand_qmul@125
|
700 }
|
ivand_qmul@125
|
701
|
ivand_qmul@125
|
702 int SDL_ffmpegSeek(SDL_ffmpegFile* file, int64_t timestamp) {
|
ivand_qmul@125
|
703
|
ivand_qmul@125
|
704 // if the seekposition is out of bounds, return
|
ivand_qmul@125
|
705 if(timestamp >= SDL_ffmpegGetDuration(file)) return -1;
|
ivand_qmul@125
|
706
|
ivand_qmul@125
|
707 // start by flushing the buffers
|
ivand_qmul@125
|
708 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
709
|
ivand_qmul@125
|
710 // we enter the decode semaphore so the decode thread cannot be working on
|
ivand_qmul@125
|
711 // data we are trying to flush
|
ivand_qmul@125
|
712 SDL_SemWait(file->decode);
|
ivand_qmul@125
|
713
|
ivand_qmul@125
|
714 // if the stream has an offset, add it to the start time
|
ivand_qmul@125
|
715 int64_t startOffset = 0;
|
ivand_qmul@125
|
716 if(((AVFormatContext*)file->_ffmpeg)->start_time != AV_NOPTS_VALUE) {
|
ivand_qmul@125
|
717 // inFormatCtx->start_time is in AV_TIME_BASE fractional seconds
|
ivand_qmul@125
|
718 startOffset = ((AVFormatContext*)file->_ffmpeg)->start_time;
|
ivand_qmul@125
|
719 }
|
ivand_qmul@125
|
720 //if (file->skipAudio) startOffset=0;
|
ivand_qmul@125
|
721 // calculate the final timestamp for the seek action this is in AV_TIME_BASE fractional seconds
|
ivand_qmul@125
|
722 startOffset += (timestamp * AV_TIME_BASE) / 1000;
|
ivand_qmul@125
|
723
|
ivand_qmul@125
|
724 // do the actual seeking, AVSEEK_FLAG_BACKWARD means we jump to the point
|
ivand_qmul@125
|
725 // closest to the point we want, resulting in an earlier position if the jump
|
ivand_qmul@125
|
726 // could not go the the exaxt point we wanted
|
ivand_qmul@125
|
727 if(av_seek_frame((AVFormatContext *)(file->_ffmpeg), -1, startOffset, AVSEEK_FLAG_BACKWARD|AVSEEK_FLAG_ANY) >= 0) {
|
ivand_qmul@125
|
728 SDL_Delay(5);
|
ivand_qmul@125
|
729 // set some values in our file so we now were to start playing
|
ivand_qmul@125
|
730 file->offset = timestamp;
|
ivand_qmul@125
|
731 file->startTime = av_gettime()/1000;//SDL_GetTicks();
|
ivand_qmul@125
|
732
|
ivand_qmul@125
|
733 // if we have a valid video, we probably have some data we want to flush
|
ivand_qmul@125
|
734 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
|
ivand_qmul@125
|
735
|
ivand_qmul@125
|
736 // flushing happens inside the semaphore as not to interfere with the
|
ivand_qmul@125
|
737 // decoding thread
|
ivand_qmul@125
|
738 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
739 avcodec_flush_buffers((AVCodecContext *)(file->vs[file->videoStream]->_ffmpeg));
|
ivand_qmul@125
|
740 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
741 }
|
ivand_qmul@125
|
742
|
ivand_qmul@125
|
743 // same goes for audio, if there is data, we flush is
|
ivand_qmul@125
|
744 if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {
|
ivand_qmul@125
|
745
|
ivand_qmul@125
|
746 // make sure this is done thread-save, so inside the appropriate
|
ivand_qmul@125
|
747 // semaphore
|
ivand_qmul@125
|
748 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
749 avcodec_flush_buffers((AVCodecContext *)(file->as[file->audioStream]->_ffmpeg));
|
ivand_qmul@125
|
750 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
751 }
|
ivand_qmul@125
|
752
|
ivand_qmul@125
|
753 // then there is our flush call
|
ivand_qmul@125
|
754 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
755
|
ivand_qmul@125
|
756 // and we are done, lets release the decode semaphore so the decode
|
ivand_qmul@125
|
757 // thread can move on, filling buffer from our new position
|
ivand_qmul@125
|
758 SDL_SemPost(file->decode);
|
ivand_qmul@125
|
759
|
ivand_qmul@125
|
760 return 0;
|
ivand_qmul@125
|
761 }
|
ivand_qmul@125
|
762
|
ivand_qmul@125
|
763 // if, for some reason, we could not seek, we still should flush our buffers
|
ivand_qmul@125
|
764 SDL_ffmpegFlush(file);
|
ivand_qmul@125
|
765
|
ivand_qmul@125
|
766 // and release our lock on the decodethread
|
ivand_qmul@125
|
767 SDL_SemPost(file->decode);
|
ivand_qmul@125
|
768
|
ivand_qmul@125
|
769 return -1;
|
ivand_qmul@125
|
770 }
|
ivand_qmul@125
|
771
|
ivand_qmul@125
|
772 int SDL_ffmpegSeekRelative(SDL_ffmpegFile *file, int64_t timestamp) {
|
ivand_qmul@125
|
773
|
ivand_qmul@125
|
774 // same thing as normal seek, just take into account the current position
|
ivand_qmul@125
|
775 return SDL_ffmpegSeek(file, SDL_ffmpegGetPosition(file) + timestamp);
|
ivand_qmul@125
|
776 }
|
ivand_qmul@125
|
777
|
ivand_qmul@125
|
778 int SDL_ffmpegFlush(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
779
|
ivand_qmul@125
|
780 // if we have a valid audio stream, we flush is
|
ivand_qmul@125
|
781 if( SDL_ffmpegValidAudio(file)&& !file->skipAudio ) {
|
ivand_qmul@125
|
782
|
ivand_qmul@125
|
783 // flush audiobuffer from semaphore, be thread-safe!
|
ivand_qmul@125
|
784 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
785
|
ivand_qmul@125
|
786 file->as[file->audioStream]->size = 0;
|
ivand_qmul@125
|
787
|
ivand_qmul@125
|
788 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
789 }
|
ivand_qmul@125
|
790
|
ivand_qmul@125
|
791 // if we have a valid video stream, we flush some more
|
ivand_qmul@125
|
792 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
|
ivand_qmul@125
|
793
|
ivand_qmul@125
|
794 // flush videobuffer
|
ivand_qmul@125
|
795 int i;
|
ivand_qmul@125
|
796
|
ivand_qmul@125
|
797 // again, be thread safe!
|
ivand_qmul@125
|
798 SDL_SemWait(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
799
|
ivand_qmul@125
|
800 // make sure we delete all frames from buffer
|
ivand_qmul@125
|
801 for(i=0; i<SDL_FFMPEG_MAX_BUFFERED_FRAMES; i++) {
|
ivand_qmul@125
|
802
|
ivand_qmul@125
|
803 // if this entry does not exist, continue
|
ivand_qmul@125
|
804 if(!file->vs[file->videoStream]->imageBuffer[i]) continue;
|
ivand_qmul@125
|
805
|
ivand_qmul@125
|
806 // free the actual image data
|
ivand_qmul@125
|
807 SDL_FreeSurface( file->vs[file->videoStream]->imageBuffer[i]->img );
|
ivand_qmul@125
|
808
|
ivand_qmul@125
|
809 // and free the struct containing it
|
ivand_qmul@125
|
810 free( file->vs[file->videoStream]->imageBuffer[i] );
|
ivand_qmul@125
|
811
|
ivand_qmul@125
|
812 // set position in buffer to 0, so we know it is empty
|
ivand_qmul@125
|
813 file->vs[file->videoStream]->imageBuffer[i] = 0;
|
ivand_qmul@125
|
814 }
|
ivand_qmul@129
|
815 file->vs[file->videoStream]->writeImage=0;
|
ivand_qmul@129
|
816 file->vs[file->videoStream]->readImage=0;
|
ivand_qmul@125
|
817 SDL_SemPost(file->vs[file->videoStream]->sem);
|
ivand_qmul@125
|
818 }
|
ivand_qmul@125
|
819
|
ivand_qmul@125
|
820 return 0;
|
ivand_qmul@125
|
821 }
|
ivand_qmul@125
|
822
|
ivand_qmul@125
|
823 int8_t* SDL_ffmpegGetAudio(SDL_ffmpegFile *file, int *len) {
|
ivand_qmul@125
|
824
|
ivand_qmul@125
|
825 // no valid audio, means no audio to get
|
ivand_qmul@125
|
826 if( !SDL_ffmpegValidAudio(file) || file->pause||file->skipAudio ) return 0;
|
ivand_qmul@125
|
827
|
ivand_qmul@125
|
828 // working on audiobuffer should always be done from semaphore
|
ivand_qmul@125
|
829 SDL_SemWait(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
830
|
ivand_qmul@125
|
831 // if we ask for more audiodata than we can give, we sent wat we can
|
ivand_qmul@125
|
832 // actually give, writing the amount of bytes into len
|
ivand_qmul@125
|
833 if(*len > file->as[file->audioStream]->size) *len = file->as[file->audioStream]->size;
|
ivand_qmul@125
|
834
|
ivand_qmul@125
|
835 // decrease the size of our audiobuffer by len
|
ivand_qmul@125
|
836 file->as[file->audioStream]->size -= *len;
|
ivand_qmul@125
|
837
|
ivand_qmul@125
|
838 // len represents the nr of bytes we sent, so we increase the total
|
ivand_qmul@125
|
839 file->as[file->audioStream]->totalBytes += *len;
|
ivand_qmul@125
|
840
|
ivand_qmul@125
|
841 // the videooffset makes sure we are always in sync with the audio
|
ivand_qmul@125
|
842 // it is actually the difference between the position were we are in the
|
ivand_qmul@125
|
843 // stream (GetPosition) and were we should be (pts)
|
ivand_qmul@125
|
844 // we use the same offset when selecting the current videoframe
|
ivand_qmul@125
|
845 file->videoOffset = SDL_ffmpegGetPosition(file) - file->as[file->audioStream]->pts;
|
ivand_qmul@125
|
846
|
ivand_qmul@125
|
847 // we calculate the new pts for our audiodata based on the hardPts
|
ivand_qmul@125
|
848 // (that is the one we got from ffmpeg) and than calculating how for we
|
ivand_qmul@125
|
849 // have come since
|
ivand_qmul@125
|
850 file->as[file->audioStream]->pts = file->as[file->audioStream]->hardPts;
|
ivand_qmul@125
|
851 // since we use 16bit per sample, we devide totalbytes by 2 before deviding by samplerate
|
ivand_qmul@125
|
852 file->as[file->audioStream]->pts += ((double)file->as[file->audioStream]->totalBytes / (2 * file->as[file->audioStream]->channels)) / (file->as[file->audioStream]->sampleRate / 1000.0);
|
ivand_qmul@125
|
853
|
ivand_qmul@125
|
854 // we return the audiobuffer, notice we are still in the audiosemaphore!
|
ivand_qmul@125
|
855 // we only leave this by calling SDL_ffmpegReleaseAudio
|
ivand_qmul@125
|
856 return file->as[file->audioStream]->audio;
|
ivand_qmul@125
|
857 }
|
ivand_qmul@125
|
858
|
ivand_qmul@125
|
859 int SDL_ffmpegReleaseAudio(SDL_ffmpegFile *file, int len) {
|
ivand_qmul@125
|
860
|
ivand_qmul@125
|
861 // no audio, means no releasing
|
ivand_qmul@125
|
862 if( !SDL_ffmpegValidAudio(file) || file->skipAudio) return -1;
|
ivand_qmul@125
|
863
|
ivand_qmul@125
|
864 // this call should be paired with SDL_ffmpegGetAudio, as it provides us
|
ivand_qmul@125
|
865 // with the correct length so we move the correct amount of data
|
ivand_qmul@125
|
866 memmove( file->as[file->audioStream]->audio,
|
ivand_qmul@125
|
867 file->as[file->audioStream]->audio+len,
|
ivand_qmul@125
|
868 file->as[file->audioStream]->size );
|
ivand_qmul@125
|
869
|
ivand_qmul@125
|
870 // work on audiodata is done, so we release the semaphore
|
ivand_qmul@125
|
871 SDL_SemPost(file->as[file->audioStream]->sem);
|
ivand_qmul@125
|
872
|
ivand_qmul@125
|
873 return 0;
|
ivand_qmul@125
|
874 }
|
ivand_qmul@125
|
875
|
ivand_qmul@125
|
876 int64_t SDL_ffmpegGetPosition(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
877 //MainWindow * MWinsA=MainWindow::instance();
|
ivand_qmul@125
|
878
|
ivand_qmul@125
|
879 if (file->skipAudio){
|
ivand_qmul@125
|
880 return (av_gettime()/1000+ file->offset - file->startTime);
|
ivand_qmul@125
|
881 //int64_t pos=MWinsA->Get_CurAudioTime();
|
ivand_qmul@125
|
882 //return (pos + ((AVFormatContext*)file->_ffmpeg)->start_time/1000);//SDL_GetTicks();
|
ivand_qmul@125
|
883 }
|
ivand_qmul@125
|
884 else
|
ivand_qmul@125
|
885 return (av_gettime()/1000+ file->offset - file->startTime);//SDL_GetTicks();
|
ivand_qmul@125
|
886 // return the current playposition of our file
|
ivand_qmul@125
|
887
|
ivand_qmul@125
|
888 }
|
ivand_qmul@125
|
889
|
ivand_qmul@125
|
890 SDL_AudioSpec* SDL_ffmpegGetAudioSpec(SDL_ffmpegFile *file, int samples, void *callback) {
|
ivand_qmul@125
|
891
|
ivand_qmul@125
|
892 // create audio spec
|
ivand_qmul@125
|
893 SDL_AudioSpec *spec = (SDL_AudioSpec*)malloc( sizeof(SDL_AudioSpec) );
|
ivand_qmul@125
|
894
|
ivand_qmul@125
|
895 if(spec) {
|
ivand_qmul@125
|
896 spec->format = AUDIO_S16SYS;
|
ivand_qmul@125
|
897 spec->samples = samples;
|
ivand_qmul@125
|
898 spec->userdata = file;
|
ivand_qmul@125
|
899 spec->callback = (void (__cdecl *)(void *,Uint8 *,int))(callback);
|
ivand_qmul@125
|
900 spec->freq = 48000;
|
ivand_qmul@125
|
901 spec->channels = 2;
|
ivand_qmul@125
|
902
|
ivand_qmul@125
|
903 // if we have a valid audiofile, we can use its data to create a
|
ivand_qmul@125
|
904 // more appropriate audio spec
|
ivand_qmul@125
|
905 if( SDL_ffmpegValidAudio(file) && !file->skipAudio ) {
|
ivand_qmul@125
|
906 spec->freq = file->as[file->audioStream]->sampleRate;
|
ivand_qmul@125
|
907 spec->channels = file->as[file->audioStream]->channels;
|
ivand_qmul@125
|
908 }
|
ivand_qmul@125
|
909 }
|
ivand_qmul@125
|
910
|
ivand_qmul@125
|
911 return spec;
|
ivand_qmul@125
|
912 }
|
ivand_qmul@125
|
913
|
ivand_qmul@125
|
914 int64_t SDL_ffmpegGetDuration(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
915
|
ivand_qmul@125
|
916 // returns the duration of the entire file, please note that ffmpeg doesn't
|
ivand_qmul@125
|
917 // always get this value right! so don't bet your life on it...
|
ivand_qmul@125
|
918 return ((AVFormatContext*)file->_ffmpeg)->duration / (AV_TIME_BASE / 1000);
|
ivand_qmul@125
|
919 }
|
ivand_qmul@125
|
920
|
ivand_qmul@125
|
921 int SDL_ffmpegGetVideoSize(SDL_ffmpegFile *file, int *w, int *h) {
|
ivand_qmul@125
|
922
|
ivand_qmul@125
|
923 if(!w || !h) return -1;
|
ivand_qmul@125
|
924
|
ivand_qmul@125
|
925 // if we have a valid video file selected, we use it
|
ivand_qmul@125
|
926 // if not, we send default values and return.
|
ivand_qmul@125
|
927 // by checking the return value you can check if you got a valid size
|
ivand_qmul@125
|
928 if( SDL_ffmpegValidVideo(file) && !file->skipVideo) {
|
ivand_qmul@125
|
929 *w = file->vs[file->videoStream]->width;
|
ivand_qmul@125
|
930 *h = file->vs[file->videoStream]->height;
|
ivand_qmul@125
|
931 return 0;
|
ivand_qmul@125
|
932 }
|
ivand_qmul@125
|
933
|
ivand_qmul@125
|
934 *w = 320;
|
ivand_qmul@125
|
935 *h = 240;
|
ivand_qmul@125
|
936 return -1;
|
ivand_qmul@125
|
937 }
|
ivand_qmul@125
|
938
|
ivand_qmul@125
|
939 int SDL_ffmpegValidAudio(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
940
|
ivand_qmul@125
|
941 // this function is used to check if we selected a valid audio stream
|
ivand_qmul@125
|
942 if(file->audioStream < 0 || file->audioStream >= file->AStreams) return 0;
|
ivand_qmul@125
|
943
|
ivand_qmul@125
|
944 return 1;
|
ivand_qmul@125
|
945 }
|
ivand_qmul@125
|
946
|
ivand_qmul@125
|
947 int SDL_ffmpegValidVideo(SDL_ffmpegFile* file) {
|
ivand_qmul@125
|
948
|
ivand_qmul@125
|
949 // this function is used to check if we selected a valid video stream
|
ivand_qmul@125
|
950 if(file->videoStream < 0 || file->videoStream >= file->VStreams) return 0;
|
ivand_qmul@125
|
951
|
ivand_qmul@125
|
952 return 1;
|
ivand_qmul@125
|
953 }
|
ivand_qmul@125
|
954
|
ivand_qmul@125
|
955 int SDL_ffmpegPause(SDL_ffmpegFile *file, int state) {
|
ivand_qmul@125
|
956
|
ivand_qmul@125
|
957 // by putting 0 into state, we play the file
|
ivand_qmul@125
|
958 // this behaviour is analogue to SDL audio
|
ivand_qmul@125
|
959 file->pause = state;
|
ivand_qmul@125
|
960
|
ivand_qmul@125
|
961 if(!file->pause) {
|
ivand_qmul@125
|
962 file->startTime = av_gettime()/1000;//SDL_GetTicks();
|
ivand_qmul@125
|
963 }
|
ivand_qmul@125
|
964
|
ivand_qmul@125
|
965 return 0;
|
ivand_qmul@125
|
966 }
|
ivand_qmul@125
|
967
|
ivand_qmul@125
|
968 int SDL_ffmpegGetState(SDL_ffmpegFile *file) {
|
ivand_qmul@125
|
969 return file->pause;
|
ivand_qmul@125
|
970 }
|