f@0
|
1 /************************************************************************/
|
f@0
|
2 /*! \class RtAudio
|
f@0
|
3 \brief Realtime audio i/o C++ classes.
|
f@0
|
4
|
f@0
|
5 RtAudio provides a common API (Application Programming Interface)
|
f@0
|
6 for realtime audio input/output across Linux (native ALSA, Jack,
|
f@0
|
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
|
f@0
|
8 (DirectSound, ASIO and WASAPI) operating systems.
|
f@0
|
9
|
f@0
|
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
|
f@0
|
11
|
f@0
|
12 RtAudio: realtime audio i/o C++ classes
|
f@0
|
13 Copyright (c) 2001-2014 Gary P. Scavone
|
f@0
|
14
|
f@0
|
15 Permission is hereby granted, free of charge, to any person
|
f@0
|
16 obtaining a copy of this software and associated documentation files
|
f@0
|
17 (the "Software"), to deal in the Software without restriction,
|
f@0
|
18 including without limitation the rights to use, copy, modify, merge,
|
f@0
|
19 publish, distribute, sublicense, and/or sell copies of the Software,
|
f@0
|
20 and to permit persons to whom the Software is furnished to do so,
|
f@0
|
21 subject to the following conditions:
|
f@0
|
22
|
f@0
|
23 The above copyright notice and this permission notice shall be
|
f@0
|
24 included in all copies or substantial portions of the Software.
|
f@0
|
25
|
f@0
|
26 Any person wishing to distribute modifications to the Software is
|
f@0
|
27 asked to send the modifications to the original developer so that
|
f@0
|
28 they can be incorporated into the canonical version. This is,
|
f@0
|
29 however, not a binding provision of this license.
|
f@0
|
30
|
f@0
|
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
f@0
|
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
f@0
|
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
f@0
|
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
|
f@0
|
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
|
f@0
|
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
f@0
|
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
f@0
|
38 */
|
f@0
|
39 /************************************************************************/
|
f@0
|
40
|
f@0
|
41 // RtAudio: Version 4.1.1
|
f@0
|
42 #pragma once
|
f@0
|
43
|
f@0
|
44
|
f@0
|
45 #include "../include/RtAudio.h"
|
f@0
|
46 #include <iostream>
|
f@0
|
47 #include <cstdlib>
|
f@0
|
48 #include <cstring>
|
f@0
|
49 #include <climits>
|
f@0
|
50
|
f@0
|
51 template <class T> const T& max(const T& a, const T& b) {
|
f@0
|
52 return (a<b) ? b : a; // or: return comp(a,b)?b:a; for version (2)
|
f@0
|
53 }
|
f@0
|
54
|
f@0
|
55 // Static variable definitions.
|
f@0
|
56 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
|
f@0
|
57 const unsigned int RtApi::SAMPLE_RATES[] = {
|
f@0
|
58 4000, 5512, 8000, 9600, 11025, 16000, 22050,
|
f@0
|
59 32000, 44100, 48000, 88200, 96000, 176400, 192000
|
f@0
|
60 };
|
f@0
|
61
|
f@0
|
62 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
|
f@0
|
63 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
|
f@0
|
64 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
|
f@0
|
65 #define MUTEX_LOCK(A) EnterCriticalSection(A)
|
f@0
|
66 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
|
f@0
|
67 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
|
f@0
|
68 // pthread API
|
f@0
|
69 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
|
f@0
|
70 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
|
f@0
|
71 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
|
f@0
|
72 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
|
f@0
|
73 #else
|
f@0
|
74 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
|
f@0
|
75 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
|
f@0
|
76 #endif
|
f@0
|
77
|
f@0
|
78 // *************************************************** //
|
f@0
|
79 //
|
f@0
|
80 // RtAudio definitions.
|
f@0
|
81 //
|
f@0
|
82 // *************************************************** //
|
f@0
|
83
|
f@0
|
84 std::string RtAudio :: getVersion( void ) throw()
|
f@0
|
85 {
|
f@0
|
86 return RTAUDIO_VERSION;
|
f@0
|
87 }
|
f@0
|
88
|
f@0
|
89 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
|
f@0
|
90 {
|
f@0
|
91 apis.clear();
|
f@0
|
92
|
f@0
|
93 // The order here will control the order of RtAudio's API search in
|
f@0
|
94 // the constructor.
|
f@0
|
95 #if defined(__UNIX_JACK__)
|
f@0
|
96 apis.push_back( UNIX_JACK );
|
f@0
|
97 #endif
|
f@0
|
98 #if defined(__LINUX_ALSA__)
|
f@0
|
99 apis.push_back( LINUX_ALSA );
|
f@0
|
100 #endif
|
f@0
|
101 #if defined(__LINUX_PULSE__)
|
f@0
|
102 apis.push_back( LINUX_PULSE );
|
f@0
|
103 #endif
|
f@0
|
104 #if defined(__LINUX_OSS__)
|
f@0
|
105 apis.push_back( LINUX_OSS );
|
f@0
|
106 #endif
|
f@0
|
107 #if defined(__WINDOWS_ASIO__)
|
f@0
|
108 apis.push_back( WINDOWS_ASIO );
|
f@0
|
109 #endif
|
f@0
|
110 #if defined(__WINDOWS_WASAPI__)
|
f@0
|
111 apis.push_back( WINDOWS_WASAPI );
|
f@0
|
112 #endif
|
f@0
|
113 #if defined(__WINDOWS_DS__)
|
f@0
|
114 apis.push_back( WINDOWS_DS );
|
f@0
|
115 #endif
|
f@0
|
116 #if defined(__MACOSX_CORE__)
|
f@0
|
117 apis.push_back( MACOSX_CORE );
|
f@0
|
118 #endif
|
f@0
|
119 #if defined(__RTAUDIO_DUMMY__)
|
f@0
|
120 apis.push_back( RTAUDIO_DUMMY );
|
f@0
|
121 #endif
|
f@0
|
122 }
|
f@0
|
123
|
f@0
|
124 void RtAudio :: openRtApi( RtAudio::Api api )
|
f@0
|
125 {
|
f@0
|
126 if ( rtapi_ )
|
f@0
|
127 delete rtapi_;
|
f@0
|
128 rtapi_ = 0;
|
f@0
|
129
|
f@0
|
130 #if defined(__UNIX_JACK__)
|
f@0
|
131 if ( api == UNIX_JACK )
|
f@0
|
132 rtapi_ = new RtApiJack();
|
f@0
|
133 #endif
|
f@0
|
134 #if defined(__LINUX_ALSA__)
|
f@0
|
135 if ( api == LINUX_ALSA )
|
f@0
|
136 rtapi_ = new RtApiAlsa();
|
f@0
|
137 #endif
|
f@0
|
138 #if defined(__LINUX_PULSE__)
|
f@0
|
139 if ( api == LINUX_PULSE )
|
f@0
|
140 rtapi_ = new RtApiPulse();
|
f@0
|
141 #endif
|
f@0
|
142 #if defined(__LINUX_OSS__)
|
f@0
|
143 if ( api == LINUX_OSS )
|
f@0
|
144 rtapi_ = new RtApiOss();
|
f@0
|
145 #endif
|
f@0
|
146 #if defined(__WINDOWS_ASIO__)
|
f@0
|
147 if ( api == WINDOWS_ASIO )
|
f@0
|
148 rtapi_ = new RtApiAsio();
|
f@0
|
149 #endif
|
f@0
|
150 #if defined(__WINDOWS_WASAPI__)
|
f@0
|
151 if ( api == WINDOWS_WASAPI )
|
f@0
|
152 rtapi_ = new RtApiWasapi();
|
f@0
|
153 #endif
|
f@0
|
154 #if defined(__WINDOWS_DS__)
|
f@0
|
155 if ( api == WINDOWS_DS )
|
f@0
|
156 rtapi_ = new RtApiDs();
|
f@0
|
157 #endif
|
f@0
|
158 #if defined(__MACOSX_CORE__)
|
f@0
|
159 if ( api == MACOSX_CORE )
|
f@0
|
160 rtapi_ = new RtApiCore();
|
f@0
|
161 #endif
|
f@0
|
162 #if defined(__RTAUDIO_DUMMY__)
|
f@0
|
163 if ( api == RTAUDIO_DUMMY )
|
f@0
|
164 rtapi_ = new RtApiDummy();
|
f@0
|
165 #endif
|
f@0
|
166 }
|
f@0
|
167
|
f@0
|
168 RtAudio :: RtAudio( RtAudio::Api api )
|
f@0
|
169 {
|
f@0
|
170 rtapi_ = 0;
|
f@0
|
171
|
f@0
|
172 if ( api != UNSPECIFIED ) {
|
f@0
|
173 // Attempt to open the specified API.
|
f@0
|
174 openRtApi( api );
|
f@0
|
175 if ( rtapi_ ) return;
|
f@0
|
176
|
f@0
|
177 // No compiled support for specified API value. Issue a debug
|
f@0
|
178 // warning and continue as if no API was specified.
|
f@0
|
179 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
|
f@0
|
180 }
|
f@0
|
181
|
f@0
|
182 // Iterate through the compiled APIs and return as soon as we find
|
f@0
|
183 // one with at least one device or we reach the end of the list.
|
f@0
|
184 std::vector< RtAudio::Api > apis;
|
f@0
|
185 getCompiledApi( apis );
|
f@0
|
186 for ( unsigned int i=0; i<apis.size(); i++ ) {
|
f@0
|
187 openRtApi( apis[i] );
|
f@0
|
188 if ( rtapi_->getDeviceCount() ) break;
|
f@0
|
189 }
|
f@0
|
190
|
f@0
|
191 if ( rtapi_ ) return;
|
f@0
|
192
|
f@0
|
193 // It should not be possible to get here because the preprocessor
|
f@0
|
194 // definition __RTAUDIO_DUMMY__ is automatically defined if no
|
f@0
|
195 // API-specific definitions are passed to the compiler. But just in
|
f@0
|
196 // case something weird happens, we'll thow an error.
|
f@0
|
197 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
|
f@0
|
198 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
|
f@0
|
199 }
|
f@0
|
200
|
f@0
|
201 RtAudio :: ~RtAudio() throw()
|
f@0
|
202 {
|
f@0
|
203 if ( rtapi_ )
|
f@0
|
204 delete rtapi_;
|
f@0
|
205 }
|
f@0
|
206
|
f@0
|
207 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
|
f@0
|
208 RtAudio::StreamParameters *inputParameters,
|
f@0
|
209 RtAudioFormat format, unsigned int sampleRate,
|
f@0
|
210 unsigned int *bufferFrames,
|
f@0
|
211 RtAudioCallback callback, void *userData,
|
f@0
|
212 RtAudio::StreamOptions *options,
|
f@0
|
213 RtAudioErrorCallback errorCallback )
|
f@0
|
214 {
|
f@0
|
215 return rtapi_->openStream( outputParameters, inputParameters, format,
|
f@0
|
216 sampleRate, bufferFrames, callback,
|
f@0
|
217 userData, options, errorCallback );
|
f@0
|
218 }
|
f@0
|
219
|
f@0
|
220 // *************************************************** //
|
f@0
|
221 //
|
f@0
|
222 // Public RtApi definitions (see end of file for
|
f@0
|
223 // private or protected utility functions).
|
f@0
|
224 //
|
f@0
|
225 // *************************************************** //
|
f@0
|
226
|
f@0
|
227 RtApi :: RtApi()
|
f@0
|
228 {
|
f@0
|
229 stream_.state = STREAM_CLOSED;
|
f@0
|
230 stream_.mode = UNINITIALIZED;
|
f@0
|
231 stream_.apiHandle = 0;
|
f@0
|
232 stream_.userBuffer[0] = 0;
|
f@0
|
233 stream_.userBuffer[1] = 0;
|
f@0
|
234 MUTEX_INITIALIZE( &stream_.mutex );
|
f@0
|
235 showWarnings_ = true;
|
f@0
|
236 firstErrorOccurred_ = false;
|
f@0
|
237 }
|
f@0
|
238
|
f@0
|
239 RtApi :: ~RtApi()
|
f@0
|
240 {
|
f@0
|
241 MUTEX_DESTROY( &stream_.mutex );
|
f@0
|
242 }
|
f@0
|
243
|
f@0
|
244 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
|
f@0
|
245 RtAudio::StreamParameters *iParams,
|
f@0
|
246 RtAudioFormat format, unsigned int sampleRate,
|
f@0
|
247 unsigned int *bufferFrames,
|
f@0
|
248 RtAudioCallback callback, void *userData,
|
f@0
|
249 RtAudio::StreamOptions *options,
|
f@0
|
250 RtAudioErrorCallback errorCallback )
|
f@0
|
251 {
|
f@0
|
252 if ( stream_.state != STREAM_CLOSED ) {
|
f@0
|
253 errorText_ = "RtApi::openStream: a stream is already open!";
|
f@0
|
254 error( RtAudioError::INVALID_USE );
|
f@0
|
255 return;
|
f@0
|
256 }
|
f@0
|
257
|
f@0
|
258 // Clear stream information potentially left from a previously open stream.
|
f@0
|
259 clearStreamInfo();
|
f@0
|
260
|
f@0
|
261 if ( oParams && oParams->nChannels < 1 ) {
|
f@0
|
262 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
|
f@0
|
263 error( RtAudioError::INVALID_USE );
|
f@0
|
264 return;
|
f@0
|
265 }
|
f@0
|
266
|
f@0
|
267 if ( iParams && iParams->nChannels < 1 ) {
|
f@0
|
268 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
|
f@0
|
269 error( RtAudioError::INVALID_USE );
|
f@0
|
270 return;
|
f@0
|
271 }
|
f@0
|
272
|
f@0
|
273 if ( oParams == NULL && iParams == NULL ) {
|
f@0
|
274 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
|
f@0
|
275 error( RtAudioError::INVALID_USE );
|
f@0
|
276 return;
|
f@0
|
277 }
|
f@0
|
278
|
f@0
|
279 if ( formatBytes(format) == 0 ) {
|
f@0
|
280 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
|
f@0
|
281 error( RtAudioError::INVALID_USE );
|
f@0
|
282 return;
|
f@0
|
283 }
|
f@0
|
284
|
f@0
|
285 unsigned int nDevices = getDeviceCount();
|
f@0
|
286 unsigned int oChannels = 0;
|
f@0
|
287 if ( oParams ) {
|
f@0
|
288 oChannels = oParams->nChannels;
|
f@0
|
289 if ( oParams->deviceId >= nDevices ) {
|
f@0
|
290 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
|
f@0
|
291 error( RtAudioError::INVALID_USE );
|
f@0
|
292 return;
|
f@0
|
293 }
|
f@0
|
294 }
|
f@0
|
295
|
f@0
|
296 unsigned int iChannels = 0;
|
f@0
|
297 if ( iParams ) {
|
f@0
|
298 iChannels = iParams->nChannels;
|
f@0
|
299 if ( iParams->deviceId >= nDevices ) {
|
f@0
|
300 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
|
f@0
|
301 error( RtAudioError::INVALID_USE );
|
f@0
|
302 return;
|
f@0
|
303 }
|
f@0
|
304 }
|
f@0
|
305
|
f@0
|
306 bool result;
|
f@0
|
307
|
f@0
|
308 if ( oChannels > 0 ) {
|
f@0
|
309
|
f@0
|
310 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
|
f@0
|
311 sampleRate, format, bufferFrames, options );
|
f@0
|
312 if ( result == false ) {
|
f@0
|
313 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
314 return;
|
f@0
|
315 }
|
f@0
|
316 }
|
f@0
|
317
|
f@0
|
318 if ( iChannels > 0 ) {
|
f@0
|
319
|
f@0
|
320 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
|
f@0
|
321 sampleRate, format, bufferFrames, options );
|
f@0
|
322 if ( result == false ) {
|
f@0
|
323 if ( oChannels > 0 ) closeStream();
|
f@0
|
324 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
325 return;
|
f@0
|
326 }
|
f@0
|
327 }
|
f@0
|
328
|
f@0
|
329 stream_.callbackInfo.callback = (void *) callback;
|
f@0
|
330 stream_.callbackInfo.userData = userData;
|
f@0
|
331 stream_.callbackInfo.errorCallback = (void *) errorCallback;
|
f@0
|
332
|
f@0
|
333 if ( options ) options->numberOfBuffers = stream_.nBuffers;
|
f@0
|
334 stream_.state = STREAM_STOPPED;
|
f@0
|
335 }
|
f@0
|
336
|
f@0
|
337 unsigned int RtApi :: getDefaultInputDevice( void )
|
f@0
|
338 {
|
f@0
|
339 // Should be implemented in subclasses if possible.
|
f@0
|
340 return 0;
|
f@0
|
341 }
|
f@0
|
342
|
f@0
|
343 unsigned int RtApi :: getDefaultOutputDevice( void )
|
f@0
|
344 {
|
f@0
|
345 // Should be implemented in subclasses if possible.
|
f@0
|
346 return 0;
|
f@0
|
347 }
|
f@0
|
348
|
f@0
|
349 void RtApi :: closeStream( void )
|
f@0
|
350 {
|
f@0
|
351 // MUST be implemented in subclasses!
|
f@0
|
352 return;
|
f@0
|
353 }
|
f@0
|
354
|
f@0
|
355 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
|
f@0
|
356 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
|
f@0
|
357 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
|
f@0
|
358 RtAudio::StreamOptions * /*options*/ )
|
f@0
|
359 {
|
f@0
|
360 // MUST be implemented in subclasses!
|
f@0
|
361 return FAILURE;
|
f@0
|
362 }
|
f@0
|
363
|
f@0
|
364 void RtApi :: tickStreamTime( void )
|
f@0
|
365 {
|
f@0
|
366 // Subclasses that do not provide their own implementation of
|
f@0
|
367 // getStreamTime should call this function once per buffer I/O to
|
f@0
|
368 // provide basic stream time support.
|
f@0
|
369
|
f@0
|
370 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
|
f@0
|
371
|
f@0
|
372 #if defined( HAVE_GETTIMEOFDAY )
|
f@0
|
373 gettimeofday( &stream_.lastTickTimestamp, NULL );
|
f@0
|
374 #endif
|
f@0
|
375 }
|
f@0
|
376
|
f@0
|
377 long RtApi :: getStreamLatency( void )
|
f@0
|
378 {
|
f@0
|
379 verifyStream();
|
f@0
|
380
|
f@0
|
381 long totalLatency = 0;
|
f@0
|
382 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
|
f@0
|
383 totalLatency = stream_.latency[0];
|
f@0
|
384 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
|
f@0
|
385 totalLatency += stream_.latency[1];
|
f@0
|
386
|
f@0
|
387 return totalLatency;
|
f@0
|
388 }
|
f@0
|
389
|
f@0
|
390 double RtApi :: getStreamTime( void )
|
f@0
|
391 {
|
f@0
|
392 verifyStream();
|
f@0
|
393
|
f@0
|
394 #if defined( HAVE_GETTIMEOFDAY )
|
f@0
|
395 // Return a very accurate estimate of the stream time by
|
f@0
|
396 // adding in the elapsed time since the last tick.
|
f@0
|
397 struct timeval then;
|
f@0
|
398 struct timeval now;
|
f@0
|
399
|
f@0
|
400 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
|
f@0
|
401 return stream_.streamTime;
|
f@0
|
402
|
f@0
|
403 gettimeofday( &now, NULL );
|
f@0
|
404 then = stream_.lastTickTimestamp;
|
f@0
|
405 return stream_.streamTime +
|
f@0
|
406 ((now.tv_sec + 0.000001 * now.tv_usec) -
|
f@0
|
407 (then.tv_sec + 0.000001 * then.tv_usec));
|
f@0
|
408 #else
|
f@0
|
409 return stream_.streamTime;
|
f@0
|
410 #endif
|
f@0
|
411 }
|
f@0
|
412
|
f@0
|
413 void RtApi :: setStreamTime( double time )
|
f@0
|
414 {
|
f@0
|
415 verifyStream();
|
f@0
|
416
|
f@0
|
417 if ( time >= 0.0 )
|
f@0
|
418 stream_.streamTime = time;
|
f@0
|
419 }
|
f@0
|
420
|
f@0
|
421 unsigned int RtApi :: getStreamSampleRate( void )
|
f@0
|
422 {
|
f@0
|
423 verifyStream();
|
f@0
|
424
|
f@0
|
425 return stream_.sampleRate;
|
f@0
|
426 }
|
f@0
|
427
|
f@0
|
428
|
f@0
|
429 // *************************************************** //
|
f@0
|
430 //
|
f@0
|
431 // OS/API-specific methods.
|
f@0
|
432 //
|
f@0
|
433 // *************************************************** //
|
f@0
|
434
|
f@0
|
435 #if defined(__MACOSX_CORE__)
|
f@0
|
436
|
f@0
|
437 // The OS X CoreAudio API is designed to use a separate callback
|
f@0
|
438 // procedure for each of its audio devices. A single RtAudio duplex
|
f@0
|
439 // stream using two different devices is supported here, though it
|
f@0
|
440 // cannot be guaranteed to always behave correctly because we cannot
|
f@0
|
441 // synchronize these two callbacks.
|
f@0
|
442 //
|
f@0
|
443 // A property listener is installed for over/underrun information.
|
f@0
|
444 // However, no functionality is currently provided to allow property
|
f@0
|
445 // listeners to trigger user handlers because it is unclear what could
|
f@0
|
446 // be done if a critical stream parameter (buffer size, sample rate,
|
f@0
|
447 // device disconnect) notification arrived. The listeners entail
|
f@0
|
448 // quite a bit of extra code and most likely, a user program wouldn't
|
f@0
|
449 // be prepared for the result anyway. However, we do provide a flag
|
f@0
|
450 // to the client callback function to inform of an over/underrun.
|
f@0
|
451
|
f@0
|
452 // A structure to hold various information related to the CoreAudio API
|
f@0
|
453 // implementation.
|
f@0
|
454 struct CoreHandle {
|
f@0
|
455 AudioDeviceID id[2]; // device ids
|
f@0
|
456 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
|
f@0
|
457 AudioDeviceIOProcID procId[2];
|
f@0
|
458 #endif
|
f@0
|
459 UInt32 iStream[2]; // device stream index (or first if using multiple)
|
f@0
|
460 UInt32 nStreams[2]; // number of streams to use
|
f@0
|
461 bool xrun[2];
|
f@0
|
462 char *deviceBuffer;
|
f@0
|
463 pthread_cond_t condition;
|
f@0
|
464 int drainCounter; // Tracks callback counts when draining
|
f@0
|
465 bool internalDrain; // Indicates if stop is initiated from callback or not.
|
f@0
|
466
|
f@0
|
467 CoreHandle()
|
f@0
|
468 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
|
f@0
|
469 };
|
f@0
|
470
|
f@0
|
471 RtApiCore:: RtApiCore()
|
f@0
|
472 {
|
f@0
|
473 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
|
f@0
|
474 // This is a largely undocumented but absolutely necessary
|
f@0
|
475 // requirement starting with OS-X 10.6. If not called, queries and
|
f@0
|
476 // updates to various audio device properties are not handled
|
f@0
|
477 // correctly.
|
f@0
|
478 CFRunLoopRef theRunLoop = NULL;
|
f@0
|
479 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
|
f@0
|
480 kAudioObjectPropertyScopeGlobal,
|
f@0
|
481 kAudioObjectPropertyElementMaster };
|
f@0
|
482 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
|
f@0
|
483 if ( result != noErr ) {
|
f@0
|
484 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
|
f@0
|
485 error( RtAudioError::WARNING );
|
f@0
|
486 }
|
f@0
|
487 #endif
|
f@0
|
488 }
|
f@0
|
489
|
f@0
|
490 RtApiCore :: ~RtApiCore()
|
f@0
|
491 {
|
f@0
|
492 // The subclass destructor gets called before the base class
|
f@0
|
493 // destructor, so close an existing stream before deallocating
|
f@0
|
494 // apiDeviceId memory.
|
f@0
|
495 if ( stream_.state != STREAM_CLOSED ) closeStream();
|
f@0
|
496 }
|
f@0
|
497
|
f@0
|
498 unsigned int RtApiCore :: getDeviceCount( void )
|
f@0
|
499 {
|
f@0
|
500 // Find out how many audio devices there are, if any.
|
f@0
|
501 UInt32 dataSize;
|
f@0
|
502 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
|
f@0
|
503 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
|
f@0
|
504 if ( result != noErr ) {
|
f@0
|
505 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
|
f@0
|
506 error( RtAudioError::WARNING );
|
f@0
|
507 return 0;
|
f@0
|
508 }
|
f@0
|
509
|
f@0
|
510 return dataSize / sizeof( AudioDeviceID );
|
f@0
|
511 }
|
f@0
|
512
|
f@0
|
513 unsigned int RtApiCore :: getDefaultInputDevice( void )
|
f@0
|
514 {
|
f@0
|
515 unsigned int nDevices = getDeviceCount();
|
f@0
|
516 if ( nDevices <= 1 ) return 0;
|
f@0
|
517
|
f@0
|
518 AudioDeviceID id;
|
f@0
|
519 UInt32 dataSize = sizeof( AudioDeviceID );
|
f@0
|
520 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
|
f@0
|
521 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
|
f@0
|
522 if ( result != noErr ) {
|
f@0
|
523 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
|
f@0
|
524 error( RtAudioError::WARNING );
|
f@0
|
525 return 0;
|
f@0
|
526 }
|
f@0
|
527
|
f@0
|
528 dataSize *= nDevices;
|
f@0
|
529 AudioDeviceID deviceList[ nDevices ];
|
f@0
|
530 property.mSelector = kAudioHardwarePropertyDevices;
|
f@0
|
531 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
|
f@0
|
532 if ( result != noErr ) {
|
f@0
|
533 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
|
f@0
|
534 error( RtAudioError::WARNING );
|
f@0
|
535 return 0;
|
f@0
|
536 }
|
f@0
|
537
|
f@0
|
538 for ( unsigned int i=0; i<nDevices; i++ )
|
f@0
|
539 if ( id == deviceList[i] ) return i;
|
f@0
|
540
|
f@0
|
541 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
|
f@0
|
542 error( RtAudioError::WARNING );
|
f@0
|
543 return 0;
|
f@0
|
544 }
|
f@0
|
545
|
f@0
|
546 unsigned int RtApiCore :: getDefaultOutputDevice( void )
|
f@0
|
547 {
|
f@0
|
548 unsigned int nDevices = getDeviceCount();
|
f@0
|
549 if ( nDevices <= 1 ) return 0;
|
f@0
|
550
|
f@0
|
551 AudioDeviceID id;
|
f@0
|
552 UInt32 dataSize = sizeof( AudioDeviceID );
|
f@0
|
553 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
|
f@0
|
554 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
|
f@0
|
555 if ( result != noErr ) {
|
f@0
|
556 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
|
f@0
|
557 error( RtAudioError::WARNING );
|
f@0
|
558 return 0;
|
f@0
|
559 }
|
f@0
|
560
|
f@0
|
561 dataSize = sizeof( AudioDeviceID ) * nDevices;
|
f@0
|
562 AudioDeviceID deviceList[ nDevices ];
|
f@0
|
563 property.mSelector = kAudioHardwarePropertyDevices;
|
f@0
|
564 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
|
f@0
|
565 if ( result != noErr ) {
|
f@0
|
566 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
|
f@0
|
567 error( RtAudioError::WARNING );
|
f@0
|
568 return 0;
|
f@0
|
569 }
|
f@0
|
570
|
f@0
|
571 for ( unsigned int i=0; i<nDevices; i++ )
|
f@0
|
572 if ( id == deviceList[i] ) return i;
|
f@0
|
573
|
f@0
|
574 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
|
f@0
|
575 error( RtAudioError::WARNING );
|
f@0
|
576 return 0;
|
f@0
|
577 }
|
f@0
|
578
|
f@0
|
579 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
|
f@0
|
580 {
|
f@0
|
581 RtAudio::DeviceInfo info;
|
f@0
|
582 info.probed = false;
|
f@0
|
583
|
f@0
|
584 // Get device ID
|
f@0
|
585 unsigned int nDevices = getDeviceCount();
|
f@0
|
586 if ( nDevices == 0 ) {
|
f@0
|
587 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
|
f@0
|
588 error( RtAudioError::INVALID_USE );
|
f@0
|
589 return info;
|
f@0
|
590 }
|
f@0
|
591
|
f@0
|
592 if ( device >= nDevices ) {
|
f@0
|
593 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
|
f@0
|
594 error( RtAudioError::INVALID_USE );
|
f@0
|
595 return info;
|
f@0
|
596 }
|
f@0
|
597
|
f@0
|
598 AudioDeviceID deviceList[ nDevices ];
|
f@0
|
599 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
|
f@0
|
600 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
|
f@0
|
601 kAudioObjectPropertyScopeGlobal,
|
f@0
|
602 kAudioObjectPropertyElementMaster };
|
f@0
|
603 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
|
f@0
|
604 0, NULL, &dataSize, (void *) &deviceList );
|
f@0
|
605 if ( result != noErr ) {
|
f@0
|
606 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
|
f@0
|
607 error( RtAudioError::WARNING );
|
f@0
|
608 return info;
|
f@0
|
609 }
|
f@0
|
610
|
f@0
|
611 AudioDeviceID id = deviceList[ device ];
|
f@0
|
612
|
f@0
|
613 // Get the device name.
|
f@0
|
614 info.name.erase();
|
f@0
|
615 CFStringRef cfname;
|
f@0
|
616 dataSize = sizeof( CFStringRef );
|
f@0
|
617 property.mSelector = kAudioObjectPropertyManufacturer;
|
f@0
|
618 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
|
f@0
|
619 if ( result != noErr ) {
|
f@0
|
620 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
|
f@0
|
621 errorText_ = errorStream_.str();
|
f@0
|
622 error( RtAudioError::WARNING );
|
f@0
|
623 return info;
|
f@0
|
624 }
|
f@0
|
625
|
f@0
|
626 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
|
f@0
|
627 int length = CFStringGetLength(cfname);
|
f@0
|
628 char *mname = (char *)malloc(length * 3 + 1);
|
f@0
|
629 #if defined( UNICODE ) || defined( _UNICODE )
|
f@0
|
630 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
|
f@0
|
631 #else
|
f@0
|
632 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
|
f@0
|
633 #endif
|
f@0
|
634 info.name.append( (const char *)mname, strlen(mname) );
|
f@0
|
635 info.name.append( ": " );
|
f@0
|
636 CFRelease( cfname );
|
f@0
|
637 free(mname);
|
f@0
|
638
|
f@0
|
639 property.mSelector = kAudioObjectPropertyName;
|
f@0
|
640 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
|
f@0
|
641 if ( result != noErr ) {
|
f@0
|
642 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
|
f@0
|
643 errorText_ = errorStream_.str();
|
f@0
|
644 error( RtAudioError::WARNING );
|
f@0
|
645 return info;
|
f@0
|
646 }
|
f@0
|
647
|
f@0
|
648 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
|
f@0
|
649 length = CFStringGetLength(cfname);
|
f@0
|
650 char *name = (char *)malloc(length * 3 + 1);
|
f@0
|
651 #if defined( UNICODE ) || defined( _UNICODE )
|
f@0
|
652 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
|
f@0
|
653 #else
|
f@0
|
654 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
|
f@0
|
655 #endif
|
f@0
|
656 info.name.append( (const char *)name, strlen(name) );
|
f@0
|
657 CFRelease( cfname );
|
f@0
|
658 free(name);
|
f@0
|
659
|
f@0
|
660 // Get the output stream "configuration".
|
f@0
|
661 AudioBufferList *bufferList = nil;
|
f@0
|
662 property.mSelector = kAudioDevicePropertyStreamConfiguration;
|
f@0
|
663 property.mScope = kAudioDevicePropertyScopeOutput;
|
f@0
|
664 // property.mElement = kAudioObjectPropertyElementWildcard;
|
f@0
|
665 dataSize = 0;
|
f@0
|
666 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
|
f@0
|
667 if ( result != noErr || dataSize == 0 ) {
|
f@0
|
668 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
|
f@0
|
669 errorText_ = errorStream_.str();
|
f@0
|
670 error( RtAudioError::WARNING );
|
f@0
|
671 return info;
|
f@0
|
672 }
|
f@0
|
673
|
f@0
|
674 // Allocate the AudioBufferList.
|
f@0
|
675 bufferList = (AudioBufferList *) malloc( dataSize );
|
f@0
|
676 if ( bufferList == NULL ) {
|
f@0
|
677 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
|
f@0
|
678 error( RtAudioError::WARNING );
|
f@0
|
679 return info;
|
f@0
|
680 }
|
f@0
|
681
|
f@0
|
682 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
|
f@0
|
683 if ( result != noErr || dataSize == 0 ) {
|
f@0
|
684 free( bufferList );
|
f@0
|
685 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
|
f@0
|
686 errorText_ = errorStream_.str();
|
f@0
|
687 error( RtAudioError::WARNING );
|
f@0
|
688 return info;
|
f@0
|
689 }
|
f@0
|
690
|
f@0
|
691 // Get output channel information.
|
f@0
|
692 unsigned int i, nStreams = bufferList->mNumberBuffers;
|
f@0
|
693 for ( i=0; i<nStreams; i++ )
|
f@0
|
694 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
|
f@0
|
695 free( bufferList );
|
f@0
|
696
|
f@0
|
697 // Get the input stream "configuration".
|
f@0
|
698 property.mScope = kAudioDevicePropertyScopeInput;
|
f@0
|
699 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
|
f@0
|
700 if ( result != noErr || dataSize == 0 ) {
|
f@0
|
701 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
|
f@0
|
702 errorText_ = errorStream_.str();
|
f@0
|
703 error( RtAudioError::WARNING );
|
f@0
|
704 return info;
|
f@0
|
705 }
|
f@0
|
706
|
f@0
|
707 // Allocate the AudioBufferList.
|
f@0
|
708 bufferList = (AudioBufferList *) malloc( dataSize );
|
f@0
|
709 if ( bufferList == NULL ) {
|
f@0
|
710 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
|
f@0
|
711 error( RtAudioError::WARNING );
|
f@0
|
712 return info;
|
f@0
|
713 }
|
f@0
|
714
|
f@0
|
715 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
|
f@0
|
716 if (result != noErr || dataSize == 0) {
|
f@0
|
717 free( bufferList );
|
f@0
|
718 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
|
f@0
|
719 errorText_ = errorStream_.str();
|
f@0
|
720 error( RtAudioError::WARNING );
|
f@0
|
721 return info;
|
f@0
|
722 }
|
f@0
|
723
|
f@0
|
724 // Get input channel information.
|
f@0
|
725 nStreams = bufferList->mNumberBuffers;
|
f@0
|
726 for ( i=0; i<nStreams; i++ )
|
f@0
|
727 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
|
f@0
|
728 free( bufferList );
|
f@0
|
729
|
f@0
|
730 // If device opens for both playback and capture, we determine the channels.
|
f@0
|
731 if ( info.outputChannels > 0 && info.inputChannels > 0 )
|
f@0
|
732 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
|
f@0
|
733
|
f@0
|
734 // Probe the device sample rates.
|
f@0
|
735 bool isInput = false;
|
f@0
|
736 if ( info.outputChannels == 0 ) isInput = true;
|
f@0
|
737
|
f@0
|
738 // Determine the supported sample rates.
|
f@0
|
739 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
|
f@0
|
740 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
|
f@0
|
741 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
|
f@0
|
742 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
|
f@0
|
743 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
|
f@0
|
744 errorText_ = errorStream_.str();
|
f@0
|
745 error( RtAudioError::WARNING );
|
f@0
|
746 return info;
|
f@0
|
747 }
|
f@0
|
748
|
f@0
|
749 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
|
f@0
|
750 AudioValueRange rangeList[ nRanges ];
|
f@0
|
751 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
|
f@0
|
752 if ( result != kAudioHardwareNoError ) {
|
f@0
|
753 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
|
f@0
|
754 errorText_ = errorStream_.str();
|
f@0
|
755 error( RtAudioError::WARNING );
|
f@0
|
756 return info;
|
f@0
|
757 }
|
f@0
|
758
|
f@0
|
759 // The sample rate reporting mechanism is a bit of a mystery. It
|
f@0
|
760 // seems that it can either return individual rates or a range of
|
f@0
|
761 // rates. I assume that if the min / max range values are the same,
|
f@0
|
762 // then that represents a single supported rate and if the min / max
|
f@0
|
763 // range values are different, the device supports an arbitrary
|
f@0
|
764 // range of values (though there might be multiple ranges, so we'll
|
f@0
|
765 // use the most conservative range).
|
f@0
|
766 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
|
f@0
|
767 bool haveValueRange = false;
|
f@0
|
768 info.sampleRates.clear();
|
f@0
|
769 for ( UInt32 i=0; i<nRanges; i++ ) {
|
f@0
|
770 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
|
f@0
|
771 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
|
f@0
|
772 else {
|
f@0
|
773 haveValueRange = true;
|
f@0
|
774 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
|
f@0
|
775 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
|
f@0
|
776 }
|
f@0
|
777 }
|
f@0
|
778
|
f@0
|
779 if ( haveValueRange ) {
|
f@0
|
780 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
|
f@0
|
781 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
|
f@0
|
782 info.sampleRates.push_back( SAMPLE_RATES[k] );
|
f@0
|
783 }
|
f@0
|
784 }
|
f@0
|
785
|
f@0
|
786 // Sort and remove any redundant values
|
f@0
|
787 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
|
f@0
|
788 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
|
f@0
|
789
|
f@0
|
790 if ( info.sampleRates.size() == 0 ) {
|
f@0
|
791 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
|
f@0
|
792 errorText_ = errorStream_.str();
|
f@0
|
793 error( RtAudioError::WARNING );
|
f@0
|
794 return info;
|
f@0
|
795 }
|
f@0
|
796
|
f@0
|
797 // CoreAudio always uses 32-bit floating point data for PCM streams.
|
f@0
|
798 // Thus, any other "physical" formats supported by the device are of
|
f@0
|
799 // no interest to the client.
|
f@0
|
800 info.nativeFormats = RTAUDIO_FLOAT32;
|
f@0
|
801
|
f@0
|
802 if ( info.outputChannels > 0 )
|
f@0
|
803 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
|
f@0
|
804 if ( info.inputChannels > 0 )
|
f@0
|
805 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
|
f@0
|
806
|
f@0
|
807 info.probed = true;
|
f@0
|
808 return info;
|
f@0
|
809 }
|
f@0
|
810
|
f@0
|
811 static OSStatus callbackHandler( AudioDeviceID inDevice,
|
f@0
|
812 const AudioTimeStamp* /*inNow*/,
|
f@0
|
813 const AudioBufferList* inInputData,
|
f@0
|
814 const AudioTimeStamp* /*inInputTime*/,
|
f@0
|
815 AudioBufferList* outOutputData,
|
f@0
|
816 const AudioTimeStamp* /*inOutputTime*/,
|
f@0
|
817 void* infoPointer )
|
f@0
|
818 {
|
f@0
|
819 CallbackInfo *info = (CallbackInfo *) infoPointer;
|
f@0
|
820
|
f@0
|
821 RtApiCore *object = (RtApiCore *) info->object;
|
f@0
|
822 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
|
f@0
|
823 return kAudioHardwareUnspecifiedError;
|
f@0
|
824 else
|
f@0
|
825 return kAudioHardwareNoError;
|
f@0
|
826 }
|
f@0
|
827
|
f@0
|
828 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
|
f@0
|
829 UInt32 nAddresses,
|
f@0
|
830 const AudioObjectPropertyAddress properties[],
|
f@0
|
831 void* handlePointer )
|
f@0
|
832 {
|
f@0
|
833 CoreHandle *handle = (CoreHandle *) handlePointer;
|
f@0
|
834 for ( UInt32 i=0; i<nAddresses; i++ ) {
|
f@0
|
835 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
|
f@0
|
836 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
|
f@0
|
837 handle->xrun[1] = true;
|
f@0
|
838 else
|
f@0
|
839 handle->xrun[0] = true;
|
f@0
|
840 }
|
f@0
|
841 }
|
f@0
|
842
|
f@0
|
843 return kAudioHardwareNoError;
|
f@0
|
844 }
|
f@0
|
845
|
f@0
|
846 static OSStatus rateListener( AudioObjectID inDevice,
|
f@0
|
847 UInt32 /*nAddresses*/,
|
f@0
|
848 const AudioObjectPropertyAddress /*properties*/[],
|
f@0
|
849 void* ratePointer )
|
f@0
|
850 {
|
f@0
|
851 Float64 *rate = (Float64 *) ratePointer;
|
f@0
|
852 UInt32 dataSize = sizeof( Float64 );
|
f@0
|
853 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
|
f@0
|
854 kAudioObjectPropertyScopeGlobal,
|
f@0
|
855 kAudioObjectPropertyElementMaster };
|
f@0
|
856 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
|
f@0
|
857 return kAudioHardwareNoError;
|
f@0
|
858 }
|
f@0
|
859
|
f@0
|
860 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
|
f@0
|
861 unsigned int firstChannel, unsigned int sampleRate,
|
f@0
|
862 RtAudioFormat format, unsigned int *bufferSize,
|
f@0
|
863 RtAudio::StreamOptions *options )
|
f@0
|
864 {
|
f@0
|
865 // Get device ID
|
f@0
|
866 unsigned int nDevices = getDeviceCount();
|
f@0
|
867 if ( nDevices == 0 ) {
|
f@0
|
868 // This should not happen because a check is made before this function is called.
|
f@0
|
869 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
|
f@0
|
870 return FAILURE;
|
f@0
|
871 }
|
f@0
|
872
|
f@0
|
873 if ( device >= nDevices ) {
|
f@0
|
874 // This should not happen because a check is made before this function is called.
|
f@0
|
875 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
|
f@0
|
876 return FAILURE;
|
f@0
|
877 }
|
f@0
|
878
|
f@0
|
879 AudioDeviceID deviceList[ nDevices ];
|
f@0
|
880 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
|
f@0
|
881 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
|
f@0
|
882 kAudioObjectPropertyScopeGlobal,
|
f@0
|
883 kAudioObjectPropertyElementMaster };
|
f@0
|
884 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
|
f@0
|
885 0, NULL, &dataSize, (void *) &deviceList );
|
f@0
|
886 if ( result != noErr ) {
|
f@0
|
887 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
|
f@0
|
888 return FAILURE;
|
f@0
|
889 }
|
f@0
|
890
|
f@0
|
891 AudioDeviceID id = deviceList[ device ];
|
f@0
|
892
|
f@0
|
893 // Setup for stream mode.
|
f@0
|
894 bool isInput = false;
|
f@0
|
895 if ( mode == INPUT ) {
|
f@0
|
896 isInput = true;
|
f@0
|
897 property.mScope = kAudioDevicePropertyScopeInput;
|
f@0
|
898 }
|
f@0
|
899 else
|
f@0
|
900 property.mScope = kAudioDevicePropertyScopeOutput;
|
f@0
|
901
|
f@0
|
902 // Get the stream "configuration".
|
f@0
|
903 AudioBufferList *bufferList = nil;
|
f@0
|
904 dataSize = 0;
|
f@0
|
905 property.mSelector = kAudioDevicePropertyStreamConfiguration;
|
f@0
|
906 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
|
f@0
|
907 if ( result != noErr || dataSize == 0 ) {
|
f@0
|
908 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
|
f@0
|
909 errorText_ = errorStream_.str();
|
f@0
|
910 return FAILURE;
|
f@0
|
911 }
|
f@0
|
912
|
f@0
|
913 // Allocate the AudioBufferList.
|
f@0
|
914 bufferList = (AudioBufferList *) malloc( dataSize );
|
f@0
|
915 if ( bufferList == NULL ) {
|
f@0
|
916 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
|
f@0
|
917 return FAILURE;
|
f@0
|
918 }
|
f@0
|
919
|
f@0
|
920 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
|
f@0
|
921 if (result != noErr || dataSize == 0) {
|
f@0
|
922 free( bufferList );
|
f@0
|
923 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
|
f@0
|
924 errorText_ = errorStream_.str();
|
f@0
|
925 return FAILURE;
|
f@0
|
926 }
|
f@0
|
927
|
f@0
|
928 // Search for one or more streams that contain the desired number of
|
f@0
|
929 // channels. CoreAudio devices can have an arbitrary number of
|
f@0
|
930 // streams and each stream can have an arbitrary number of channels.
|
f@0
|
931 // For each stream, a single buffer of interleaved samples is
|
f@0
|
932 // provided. RtAudio prefers the use of one stream of interleaved
|
f@0
|
933 // data or multiple consecutive single-channel streams. However, we
|
f@0
|
934 // now support multiple consecutive multi-channel streams of
|
f@0
|
935 // interleaved data as well.
|
f@0
|
936 UInt32 iStream, offsetCounter = firstChannel;
|
f@0
|
937 UInt32 nStreams = bufferList->mNumberBuffers;
|
f@0
|
938 bool monoMode = false;
|
f@0
|
939 bool foundStream = false;
|
f@0
|
940
|
f@0
|
941 // First check that the device supports the requested number of
|
f@0
|
942 // channels.
|
f@0
|
943 UInt32 deviceChannels = 0;
|
f@0
|
944 for ( iStream=0; iStream<nStreams; iStream++ )
|
f@0
|
945 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
|
f@0
|
946
|
f@0
|
947 if ( deviceChannels < ( channels + firstChannel ) ) {
|
f@0
|
948 free( bufferList );
|
f@0
|
949 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
|
f@0
|
950 errorText_ = errorStream_.str();
|
f@0
|
951 return FAILURE;
|
f@0
|
952 }
|
f@0
|
953
|
f@0
|
954 // Look for a single stream meeting our needs.
|
f@0
|
955 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
|
f@0
|
956 for ( iStream=0; iStream<nStreams; iStream++ ) {
|
f@0
|
957 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
|
f@0
|
958 if ( streamChannels >= channels + offsetCounter ) {
|
f@0
|
959 firstStream = iStream;
|
f@0
|
960 channelOffset = offsetCounter;
|
f@0
|
961 foundStream = true;
|
f@0
|
962 break;
|
f@0
|
963 }
|
f@0
|
964 if ( streamChannels > offsetCounter ) break;
|
f@0
|
965 offsetCounter -= streamChannels;
|
f@0
|
966 }
|
f@0
|
967
|
f@0
|
968 // If we didn't find a single stream above, then we should be able
|
f@0
|
969 // to meet the channel specification with multiple streams.
|
f@0
|
970 if ( foundStream == false ) {
|
f@0
|
971 monoMode = true;
|
f@0
|
972 offsetCounter = firstChannel;
|
f@0
|
973 for ( iStream=0; iStream<nStreams; iStream++ ) {
|
f@0
|
974 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
|
f@0
|
975 if ( streamChannels > offsetCounter ) break;
|
f@0
|
976 offsetCounter -= streamChannels;
|
f@0
|
977 }
|
f@0
|
978
|
f@0
|
979 firstStream = iStream;
|
f@0
|
980 channelOffset = offsetCounter;
|
f@0
|
981 Int32 channelCounter = channels + offsetCounter - streamChannels;
|
f@0
|
982
|
f@0
|
983 if ( streamChannels > 1 ) monoMode = false;
|
f@0
|
984 while ( channelCounter > 0 ) {
|
f@0
|
985 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
|
f@0
|
986 if ( streamChannels > 1 ) monoMode = false;
|
f@0
|
987 channelCounter -= streamChannels;
|
f@0
|
988 streamCount++;
|
f@0
|
989 }
|
f@0
|
990 }
|
f@0
|
991
|
f@0
|
992 free( bufferList );
|
f@0
|
993
|
f@0
|
994 // Determine the buffer size.
|
f@0
|
995 AudioValueRange bufferRange;
|
f@0
|
996 dataSize = sizeof( AudioValueRange );
|
f@0
|
997 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
|
f@0
|
998 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
|
f@0
|
999
|
f@0
|
1000 if ( result != noErr ) {
|
f@0
|
1001 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
|
f@0
|
1002 errorText_ = errorStream_.str();
|
f@0
|
1003 return FAILURE;
|
f@0
|
1004 }
|
f@0
|
1005
|
f@0
|
1006 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
|
f@0
|
1007 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
|
f@0
|
1008 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
|
f@0
|
1009
|
f@0
|
1010 // Set the buffer size. For multiple streams, I'm assuming we only
|
f@0
|
1011 // need to make this setting for the master channel.
|
f@0
|
1012 UInt32 theSize = (UInt32) *bufferSize;
|
f@0
|
1013 dataSize = sizeof( UInt32 );
|
f@0
|
1014 property.mSelector = kAudioDevicePropertyBufferFrameSize;
|
f@0
|
1015 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
|
f@0
|
1016
|
f@0
|
1017 if ( result != noErr ) {
|
f@0
|
1018 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
|
f@0
|
1019 errorText_ = errorStream_.str();
|
f@0
|
1020 return FAILURE;
|
f@0
|
1021 }
|
f@0
|
1022
|
f@0
|
1023 // If attempting to setup a duplex stream, the bufferSize parameter
|
f@0
|
1024 // MUST be the same in both directions!
|
f@0
|
1025 *bufferSize = theSize;
|
f@0
|
1026 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
|
f@0
|
1027 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
|
f@0
|
1028 errorText_ = errorStream_.str();
|
f@0
|
1029 return FAILURE;
|
f@0
|
1030 }
|
f@0
|
1031
|
f@0
|
1032 stream_.bufferSize = *bufferSize;
|
f@0
|
1033 stream_.nBuffers = 1;
|
f@0
|
1034
|
f@0
|
1035 // Try to set "hog" mode ... it's not clear to me this is working.
|
f@0
|
1036 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
|
f@0
|
1037 pid_t hog_pid;
|
f@0
|
1038 dataSize = sizeof( hog_pid );
|
f@0
|
1039 property.mSelector = kAudioDevicePropertyHogMode;
|
f@0
|
1040 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
|
f@0
|
1041 if ( result != noErr ) {
|
f@0
|
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
|
f@0
|
1043 errorText_ = errorStream_.str();
|
f@0
|
1044 return FAILURE;
|
f@0
|
1045 }
|
f@0
|
1046
|
f@0
|
1047 if ( hog_pid != getpid() ) {
|
f@0
|
1048 hog_pid = getpid();
|
f@0
|
1049 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
|
f@0
|
1050 if ( result != noErr ) {
|
f@0
|
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
|
f@0
|
1052 errorText_ = errorStream_.str();
|
f@0
|
1053 return FAILURE;
|
f@0
|
1054 }
|
f@0
|
1055 }
|
f@0
|
1056 }
|
f@0
|
1057
|
f@0
|
1058 // Check and if necessary, change the sample rate for the device.
|
f@0
|
1059 Float64 nominalRate;
|
f@0
|
1060 dataSize = sizeof( Float64 );
|
f@0
|
1061 property.mSelector = kAudioDevicePropertyNominalSampleRate;
|
f@0
|
1062 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
|
f@0
|
1063 if ( result != noErr ) {
|
f@0
|
1064 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
|
f@0
|
1065 errorText_ = errorStream_.str();
|
f@0
|
1066 return FAILURE;
|
f@0
|
1067 }
|
f@0
|
1068
|
f@0
|
1069 // Only change the sample rate if off by more than 1 Hz.
|
f@0
|
1070 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
|
f@0
|
1071
|
f@0
|
1072 // Set a property listener for the sample rate change
|
f@0
|
1073 Float64 reportedRate = 0.0;
|
f@0
|
1074 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
|
f@0
|
1075 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
|
f@0
|
1076 if ( result != noErr ) {
|
f@0
|
1077 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
|
f@0
|
1078 errorText_ = errorStream_.str();
|
f@0
|
1079 return FAILURE;
|
f@0
|
1080 }
|
f@0
|
1081
|
f@0
|
1082 nominalRate = (Float64) sampleRate;
|
f@0
|
1083 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
|
f@0
|
1084 if ( result != noErr ) {
|
f@0
|
1085 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
|
f@0
|
1086 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
|
f@0
|
1087 errorText_ = errorStream_.str();
|
f@0
|
1088 return FAILURE;
|
f@0
|
1089 }
|
f@0
|
1090
|
f@0
|
1091 // Now wait until the reported nominal rate is what we just set.
|
f@0
|
1092 UInt32 microCounter = 0;
|
f@0
|
1093 while ( reportedRate != nominalRate ) {
|
f@0
|
1094 microCounter += 5000;
|
f@0
|
1095 if ( microCounter > 5000000 ) break;
|
f@0
|
1096 usleep( 5000 );
|
f@0
|
1097 }
|
f@0
|
1098
|
f@0
|
1099 // Remove the property listener.
|
f@0
|
1100 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
|
f@0
|
1101
|
f@0
|
1102 if ( microCounter > 5000000 ) {
|
f@0
|
1103 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
|
f@0
|
1104 errorText_ = errorStream_.str();
|
f@0
|
1105 return FAILURE;
|
f@0
|
1106 }
|
f@0
|
1107 }
|
f@0
|
1108
|
f@0
|
1109 // Now set the stream format for all streams. Also, check the
|
f@0
|
1110 // physical format of the device and change that if necessary.
|
f@0
|
1111 AudioStreamBasicDescription description;
|
f@0
|
1112 dataSize = sizeof( AudioStreamBasicDescription );
|
f@0
|
1113 property.mSelector = kAudioStreamPropertyVirtualFormat;
|
f@0
|
1114 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
|
f@0
|
1115 if ( result != noErr ) {
|
f@0
|
1116 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
|
f@0
|
1117 errorText_ = errorStream_.str();
|
f@0
|
1118 return FAILURE;
|
f@0
|
1119 }
|
f@0
|
1120
|
f@0
|
1121 // Set the sample rate and data format id. However, only make the
|
f@0
|
1122 // change if the sample rate is not within 1.0 of the desired
|
f@0
|
1123 // rate and the format is not linear pcm.
|
f@0
|
1124 bool updateFormat = false;
|
f@0
|
1125 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
|
f@0
|
1126 description.mSampleRate = (Float64) sampleRate;
|
f@0
|
1127 updateFormat = true;
|
f@0
|
1128 }
|
f@0
|
1129
|
f@0
|
1130 if ( description.mFormatID != kAudioFormatLinearPCM ) {
|
f@0
|
1131 description.mFormatID = kAudioFormatLinearPCM;
|
f@0
|
1132 updateFormat = true;
|
f@0
|
1133 }
|
f@0
|
1134
|
f@0
|
1135 if ( updateFormat ) {
|
f@0
|
1136 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
|
f@0
|
1137 if ( result != noErr ) {
|
f@0
|
1138 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
|
f@0
|
1139 errorText_ = errorStream_.str();
|
f@0
|
1140 return FAILURE;
|
f@0
|
1141 }
|
f@0
|
1142 }
|
f@0
|
1143
|
f@0
|
1144 // Now check the physical format.
|
f@0
|
1145 property.mSelector = kAudioStreamPropertyPhysicalFormat;
|
f@0
|
1146 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
|
f@0
|
1147 if ( result != noErr ) {
|
f@0
|
1148 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
|
f@0
|
1149 errorText_ = errorStream_.str();
|
f@0
|
1150 return FAILURE;
|
f@0
|
1151 }
|
f@0
|
1152
|
f@0
|
1153 //std::cout << "Current physical stream format:" << std::endl;
|
f@0
|
1154 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
|
f@0
|
1155 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
|
f@0
|
1156 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
|
f@0
|
1157 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
|
f@0
|
1158
|
f@0
|
1159 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
|
f@0
|
1160 description.mFormatID = kAudioFormatLinearPCM;
|
f@0
|
1161 //description.mSampleRate = (Float64) sampleRate;
|
f@0
|
1162 AudioStreamBasicDescription testDescription = description;
|
f@0
|
1163 UInt32 formatFlags;
|
f@0
|
1164
|
f@0
|
1165 // We'll try higher bit rates first and then work our way down.
|
f@0
|
1166 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
|
f@0
|
1167 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
|
f@0
|
1168 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
|
f@0
|
1169 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
|
f@0
|
1170 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
|
f@0
|
1171 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
|
f@0
|
1172 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
|
f@0
|
1173 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
|
f@0
|
1174 formatFlags |= kAudioFormatFlagIsAlignedHigh;
|
f@0
|
1175 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
|
f@0
|
1176 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
|
f@0
|
1177 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
|
f@0
|
1178 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
|
f@0
|
1179
|
f@0
|
1180 bool setPhysicalFormat = false;
|
f@0
|
1181 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
|
f@0
|
1182 testDescription = description;
|
f@0
|
1183 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
|
f@0
|
1184 testDescription.mFormatFlags = physicalFormats[i].second;
|
f@0
|
1185 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
|
f@0
|
1186 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
|
f@0
|
1187 else
|
f@0
|
1188 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
|
f@0
|
1189 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
|
f@0
|
1190 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
|
f@0
|
1191 if ( result == noErr ) {
|
f@0
|
1192 setPhysicalFormat = true;
|
f@0
|
1193 //std::cout << "Updated physical stream format:" << std::endl;
|
f@0
|
1194 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
|
f@0
|
1195 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
|
f@0
|
1196 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
|
f@0
|
1197 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
|
f@0
|
1198 break;
|
f@0
|
1199 }
|
f@0
|
1200 }
|
f@0
|
1201
|
f@0
|
1202 if ( !setPhysicalFormat ) {
|
f@0
|
1203 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
|
f@0
|
1204 errorText_ = errorStream_.str();
|
f@0
|
1205 return FAILURE;
|
f@0
|
1206 }
|
f@0
|
1207 } // done setting virtual/physical formats.
|
f@0
|
1208
|
f@0
|
1209 // Get the stream / device latency.
|
f@0
|
1210 UInt32 latency;
|
f@0
|
1211 dataSize = sizeof( UInt32 );
|
f@0
|
1212 property.mSelector = kAudioDevicePropertyLatency;
|
f@0
|
1213 if ( AudioObjectHasProperty( id, &property ) == true ) {
|
f@0
|
1214 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
|
f@0
|
1215 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
|
f@0
|
1216 else {
|
f@0
|
1217 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
|
f@0
|
1218 errorText_ = errorStream_.str();
|
f@0
|
1219 error( RtAudioError::WARNING );
|
f@0
|
1220 }
|
f@0
|
1221 }
|
f@0
|
1222
|
f@0
|
1223 // Byte-swapping: According to AudioHardware.h, the stream data will
|
f@0
|
1224 // always be presented in native-endian format, so we should never
|
f@0
|
1225 // need to byte swap.
|
f@0
|
1226 stream_.doByteSwap[mode] = false;
|
f@0
|
1227
|
f@0
|
1228 // From the CoreAudio documentation, PCM data must be supplied as
|
f@0
|
1229 // 32-bit floats.
|
f@0
|
1230 stream_.userFormat = format;
|
f@0
|
1231 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
|
f@0
|
1232
|
f@0
|
1233 if ( streamCount == 1 )
|
f@0
|
1234 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
|
f@0
|
1235 else // multiple streams
|
f@0
|
1236 stream_.nDeviceChannels[mode] = channels;
|
f@0
|
1237 stream_.nUserChannels[mode] = channels;
|
f@0
|
1238 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
|
f@0
|
1239 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
|
f@0
|
1240 else stream_.userInterleaved = true;
|
f@0
|
1241 stream_.deviceInterleaved[mode] = true;
|
f@0
|
1242 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
|
f@0
|
1243
|
f@0
|
1244 // Set flags for buffer conversion.
|
f@0
|
1245 stream_.doConvertBuffer[mode] = false;
|
f@0
|
1246 if ( stream_.userFormat != stream_.deviceFormat[mode] )
|
f@0
|
1247 stream_.doConvertBuffer[mode] = true;
|
f@0
|
1248 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
|
f@0
|
1249 stream_.doConvertBuffer[mode] = true;
|
f@0
|
1250 if ( streamCount == 1 ) {
|
f@0
|
1251 if ( stream_.nUserChannels[mode] > 1 &&
|
f@0
|
1252 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
|
f@0
|
1253 stream_.doConvertBuffer[mode] = true;
|
f@0
|
1254 }
|
f@0
|
1255 else if ( monoMode && stream_.userInterleaved )
|
f@0
|
1256 stream_.doConvertBuffer[mode] = true;
|
f@0
|
1257
|
f@0
|
1258 // Allocate our CoreHandle structure for the stream.
|
f@0
|
1259 CoreHandle *handle = 0;
|
f@0
|
1260 if ( stream_.apiHandle == 0 ) {
|
f@0
|
1261 try {
|
f@0
|
1262 handle = new CoreHandle;
|
f@0
|
1263 }
|
f@0
|
1264 catch ( std::bad_alloc& ) {
|
f@0
|
1265 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
|
f@0
|
1266 goto error;
|
f@0
|
1267 }
|
f@0
|
1268
|
f@0
|
1269 if ( pthread_cond_init( &handle->condition, NULL ) ) {
|
f@0
|
1270 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
|
f@0
|
1271 goto error;
|
f@0
|
1272 }
|
f@0
|
1273 stream_.apiHandle = (void *) handle;
|
f@0
|
1274 }
|
f@0
|
1275 else
|
f@0
|
1276 handle = (CoreHandle *) stream_.apiHandle;
|
f@0
|
1277 handle->iStream[mode] = firstStream;
|
f@0
|
1278 handle->nStreams[mode] = streamCount;
|
f@0
|
1279 handle->id[mode] = id;
|
f@0
|
1280
|
f@0
|
1281 // Allocate necessary internal buffers.
|
f@0
|
1282 unsigned long bufferBytes;
|
f@0
|
1283 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
|
f@0
|
1284 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
|
f@0
|
1285 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
|
f@0
|
1286 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
|
f@0
|
1287 if ( stream_.userBuffer[mode] == NULL ) {
|
f@0
|
1288 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
|
f@0
|
1289 goto error;
|
f@0
|
1290 }
|
f@0
|
1291
|
f@0
|
1292 // If possible, we will make use of the CoreAudio stream buffers as
|
f@0
|
1293 // "device buffers". However, we can't do this if using multiple
|
f@0
|
1294 // streams.
|
f@0
|
1295 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
|
f@0
|
1296
|
f@0
|
1297 bool makeBuffer = true;
|
f@0
|
1298 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
|
f@0
|
1299 if ( mode == INPUT ) {
|
f@0
|
1300 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
|
f@0
|
1301 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
|
f@0
|
1302 if ( bufferBytes <= bytesOut ) makeBuffer = false;
|
f@0
|
1303 }
|
f@0
|
1304 }
|
f@0
|
1305
|
f@0
|
1306 if ( makeBuffer ) {
|
f@0
|
1307 bufferBytes *= *bufferSize;
|
f@0
|
1308 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
|
f@0
|
1309 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
|
f@0
|
1310 if ( stream_.deviceBuffer == NULL ) {
|
f@0
|
1311 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
|
f@0
|
1312 goto error;
|
f@0
|
1313 }
|
f@0
|
1314 }
|
f@0
|
1315 }
|
f@0
|
1316
|
f@0
|
1317 stream_.sampleRate = sampleRate;
|
f@0
|
1318 stream_.device[mode] = device;
|
f@0
|
1319 stream_.state = STREAM_STOPPED;
|
f@0
|
1320 stream_.callbackInfo.object = (void *) this;
|
f@0
|
1321
|
f@0
|
1322 // Setup the buffer conversion information structure.
|
f@0
|
1323 if ( stream_.doConvertBuffer[mode] ) {
|
f@0
|
1324 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
|
f@0
|
1325 else setConvertInfo( mode, channelOffset );
|
f@0
|
1326 }
|
f@0
|
1327
|
f@0
|
1328 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
|
f@0
|
1329 // Only one callback procedure per device.
|
f@0
|
1330 stream_.mode = DUPLEX;
|
f@0
|
1331 else {
|
f@0
|
1332 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
|
f@0
|
1333 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
|
f@0
|
1334 #else
|
f@0
|
1335 // deprecated in favor of AudioDeviceCreateIOProcID()
|
f@0
|
1336 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
|
f@0
|
1337 #endif
|
f@0
|
1338 if ( result != noErr ) {
|
f@0
|
1339 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
|
f@0
|
1340 errorText_ = errorStream_.str();
|
f@0
|
1341 goto error;
|
f@0
|
1342 }
|
f@0
|
1343 if ( stream_.mode == OUTPUT && mode == INPUT )
|
f@0
|
1344 stream_.mode = DUPLEX;
|
f@0
|
1345 else
|
f@0
|
1346 stream_.mode = mode;
|
f@0
|
1347 }
|
f@0
|
1348
|
f@0
|
1349 // Setup the device property listener for over/underload.
|
f@0
|
1350 property.mSelector = kAudioDeviceProcessorOverload;
|
f@0
|
1351 property.mScope = kAudioObjectPropertyScopeGlobal;
|
f@0
|
1352 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
|
f@0
|
1353
|
f@0
|
1354 return SUCCESS;
|
f@0
|
1355
|
f@0
|
1356 error:
|
f@0
|
1357 if ( handle ) {
|
f@0
|
1358 pthread_cond_destroy( &handle->condition );
|
f@0
|
1359 delete handle;
|
f@0
|
1360 stream_.apiHandle = 0;
|
f@0
|
1361 }
|
f@0
|
1362
|
f@0
|
1363 for ( int i=0; i<2; i++ ) {
|
f@0
|
1364 if ( stream_.userBuffer[i] ) {
|
f@0
|
1365 free( stream_.userBuffer[i] );
|
f@0
|
1366 stream_.userBuffer[i] = 0;
|
f@0
|
1367 }
|
f@0
|
1368 }
|
f@0
|
1369
|
f@0
|
1370 if ( stream_.deviceBuffer ) {
|
f@0
|
1371 free( stream_.deviceBuffer );
|
f@0
|
1372 stream_.deviceBuffer = 0;
|
f@0
|
1373 }
|
f@0
|
1374
|
f@0
|
1375 stream_.state = STREAM_CLOSED;
|
f@0
|
1376 return FAILURE;
|
f@0
|
1377 }
|
f@0
|
1378
|
f@0
|
1379 void RtApiCore :: closeStream( void )
|
f@0
|
1380 {
|
f@0
|
1381 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
1382 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
|
f@0
|
1383 error( RtAudioError::WARNING );
|
f@0
|
1384 return;
|
f@0
|
1385 }
|
f@0
|
1386
|
f@0
|
1387 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
|
f@0
|
1388 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
1389 if ( stream_.state == STREAM_RUNNING )
|
f@0
|
1390 AudioDeviceStop( handle->id[0], callbackHandler );
|
f@0
|
1391 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
|
f@0
|
1392 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
|
f@0
|
1393 #else
|
f@0
|
1394 // deprecated in favor of AudioDeviceDestroyIOProcID()
|
f@0
|
1395 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
|
f@0
|
1396 #endif
|
f@0
|
1397 }
|
f@0
|
1398
|
f@0
|
1399 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
|
f@0
|
1400 if ( stream_.state == STREAM_RUNNING )
|
f@0
|
1401 AudioDeviceStop( handle->id[1], callbackHandler );
|
f@0
|
1402 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
|
f@0
|
1403 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
|
f@0
|
1404 #else
|
f@0
|
1405 // deprecated in favor of AudioDeviceDestroyIOProcID()
|
f@0
|
1406 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
|
f@0
|
1407 #endif
|
f@0
|
1408 }
|
f@0
|
1409
|
f@0
|
1410 for ( int i=0; i<2; i++ ) {
|
f@0
|
1411 if ( stream_.userBuffer[i] ) {
|
f@0
|
1412 free( stream_.userBuffer[i] );
|
f@0
|
1413 stream_.userBuffer[i] = 0;
|
f@0
|
1414 }
|
f@0
|
1415 }
|
f@0
|
1416
|
f@0
|
1417 if ( stream_.deviceBuffer ) {
|
f@0
|
1418 free( stream_.deviceBuffer );
|
f@0
|
1419 stream_.deviceBuffer = 0;
|
f@0
|
1420 }
|
f@0
|
1421
|
f@0
|
1422 // Destroy pthread condition variable.
|
f@0
|
1423 pthread_cond_destroy( &handle->condition );
|
f@0
|
1424 delete handle;
|
f@0
|
1425 stream_.apiHandle = 0;
|
f@0
|
1426
|
f@0
|
1427 stream_.mode = UNINITIALIZED;
|
f@0
|
1428 stream_.state = STREAM_CLOSED;
|
f@0
|
1429 }
|
f@0
|
1430
|
f@0
|
1431 void RtApiCore :: startStream( void )
|
f@0
|
1432 {
|
f@0
|
1433 verifyStream();
|
f@0
|
1434 if ( stream_.state == STREAM_RUNNING ) {
|
f@0
|
1435 errorText_ = "RtApiCore::startStream(): the stream is already running!";
|
f@0
|
1436 error( RtAudioError::WARNING );
|
f@0
|
1437 return;
|
f@0
|
1438 }
|
f@0
|
1439
|
f@0
|
1440 OSStatus result = noErr;
|
f@0
|
1441 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
|
f@0
|
1442 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
1443
|
f@0
|
1444 result = AudioDeviceStart( handle->id[0], callbackHandler );
|
f@0
|
1445 if ( result != noErr ) {
|
f@0
|
1446 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
|
f@0
|
1447 errorText_ = errorStream_.str();
|
f@0
|
1448 goto unlock;
|
f@0
|
1449 }
|
f@0
|
1450 }
|
f@0
|
1451
|
f@0
|
1452 if ( stream_.mode == INPUT ||
|
f@0
|
1453 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
|
f@0
|
1454
|
f@0
|
1455 result = AudioDeviceStart( handle->id[1], callbackHandler );
|
f@0
|
1456 if ( result != noErr ) {
|
f@0
|
1457 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
|
f@0
|
1458 errorText_ = errorStream_.str();
|
f@0
|
1459 goto unlock;
|
f@0
|
1460 }
|
f@0
|
1461 }
|
f@0
|
1462
|
f@0
|
1463 handle->drainCounter = 0;
|
f@0
|
1464 handle->internalDrain = false;
|
f@0
|
1465 stream_.state = STREAM_RUNNING;
|
f@0
|
1466
|
f@0
|
1467 unlock:
|
f@0
|
1468 if ( result == noErr ) return;
|
f@0
|
1469 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
1470 }
|
f@0
|
1471
|
f@0
|
1472 void RtApiCore :: stopStream( void )
|
f@0
|
1473 {
|
f@0
|
1474 verifyStream();
|
f@0
|
1475 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
1476 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
|
f@0
|
1477 error( RtAudioError::WARNING );
|
f@0
|
1478 return;
|
f@0
|
1479 }
|
f@0
|
1480
|
f@0
|
1481 OSStatus result = noErr;
|
f@0
|
1482 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
|
f@0
|
1483 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
1484
|
f@0
|
1485 if ( handle->drainCounter == 0 ) {
|
f@0
|
1486 handle->drainCounter = 2;
|
f@0
|
1487 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
|
f@0
|
1488 }
|
f@0
|
1489
|
f@0
|
1490 result = AudioDeviceStop( handle->id[0], callbackHandler );
|
f@0
|
1491 if ( result != noErr ) {
|
f@0
|
1492 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
|
f@0
|
1493 errorText_ = errorStream_.str();
|
f@0
|
1494 goto unlock;
|
f@0
|
1495 }
|
f@0
|
1496 }
|
f@0
|
1497
|
f@0
|
1498 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
|
f@0
|
1499
|
f@0
|
1500 result = AudioDeviceStop( handle->id[1], callbackHandler );
|
f@0
|
1501 if ( result != noErr ) {
|
f@0
|
1502 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
|
f@0
|
1503 errorText_ = errorStream_.str();
|
f@0
|
1504 goto unlock;
|
f@0
|
1505 }
|
f@0
|
1506 }
|
f@0
|
1507
|
f@0
|
1508 stream_.state = STREAM_STOPPED;
|
f@0
|
1509
|
f@0
|
1510 unlock:
|
f@0
|
1511 if ( result == noErr ) return;
|
f@0
|
1512 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
1513 }
|
f@0
|
1514
|
f@0
|
1515 void RtApiCore :: abortStream( void )
|
f@0
|
1516 {
|
f@0
|
1517 verifyStream();
|
f@0
|
1518 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
1519 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
|
f@0
|
1520 error( RtAudioError::WARNING );
|
f@0
|
1521 return;
|
f@0
|
1522 }
|
f@0
|
1523
|
f@0
|
1524 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
|
f@0
|
1525 handle->drainCounter = 2;
|
f@0
|
1526
|
f@0
|
1527 stopStream();
|
f@0
|
1528 }
|
f@0
|
1529
|
f@0
|
1530 // This function will be called by a spawned thread when the user
|
f@0
|
1531 // callback function signals that the stream should be stopped or
|
f@0
|
1532 // aborted. It is better to handle it this way because the
|
f@0
|
1533 // callbackEvent() function probably should return before the AudioDeviceStop()
|
f@0
|
1534 // function is called.
|
f@0
|
1535 static void *coreStopStream( void *ptr )
|
f@0
|
1536 {
|
f@0
|
1537 CallbackInfo *info = (CallbackInfo *) ptr;
|
f@0
|
1538 RtApiCore *object = (RtApiCore *) info->object;
|
f@0
|
1539
|
f@0
|
1540 object->stopStream();
|
f@0
|
1541 pthread_exit( NULL );
|
f@0
|
1542 }
|
f@0
|
1543
|
f@0
|
1544 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
|
f@0
|
1545 const AudioBufferList *inBufferList,
|
f@0
|
1546 const AudioBufferList *outBufferList )
|
f@0
|
1547 {
|
f@0
|
1548 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
|
f@0
|
1549 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
1550 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
|
f@0
|
1551 error( RtAudioError::WARNING );
|
f@0
|
1552 return FAILURE;
|
f@0
|
1553 }
|
f@0
|
1554
|
f@0
|
1555 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
|
f@0
|
1556 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
|
f@0
|
1557
|
f@0
|
1558 // Check if we were draining the stream and signal is finished.
|
f@0
|
1559 if ( handle->drainCounter > 3 ) {
|
f@0
|
1560 ThreadHandle threadId;
|
f@0
|
1561
|
f@0
|
1562 stream_.state = STREAM_STOPPING;
|
f@0
|
1563 if ( handle->internalDrain == true )
|
f@0
|
1564 pthread_create( &threadId, NULL, coreStopStream, info );
|
f@0
|
1565 else // external call to stopStream()
|
f@0
|
1566 pthread_cond_signal( &handle->condition );
|
f@0
|
1567 return SUCCESS;
|
f@0
|
1568 }
|
f@0
|
1569
|
f@0
|
1570 AudioDeviceID outputDevice = handle->id[0];
|
f@0
|
1571
|
f@0
|
1572 // Invoke user callback to get fresh output data UNLESS we are
|
f@0
|
1573 // draining stream or duplex mode AND the input/output devices are
|
f@0
|
1574 // different AND this function is called for the input device.
|
f@0
|
1575 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
|
f@0
|
1576 RtAudioCallback callback = (RtAudioCallback) info->callback;
|
f@0
|
1577 double streamTime = getStreamTime();
|
f@0
|
1578 RtAudioStreamStatus status = 0;
|
f@0
|
1579 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
|
f@0
|
1580 status |= RTAUDIO_OUTPUT_UNDERFLOW;
|
f@0
|
1581 handle->xrun[0] = false;
|
f@0
|
1582 }
|
f@0
|
1583 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
|
f@0
|
1584 status |= RTAUDIO_INPUT_OVERFLOW;
|
f@0
|
1585 handle->xrun[1] = false;
|
f@0
|
1586 }
|
f@0
|
1587
|
f@0
|
1588 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
|
f@0
|
1589 stream_.bufferSize, streamTime, status, info->userData );
|
f@0
|
1590 if ( cbReturnValue == 2 ) {
|
f@0
|
1591 stream_.state = STREAM_STOPPING;
|
f@0
|
1592 handle->drainCounter = 2;
|
f@0
|
1593 abortStream();
|
f@0
|
1594 return SUCCESS;
|
f@0
|
1595 }
|
f@0
|
1596 else if ( cbReturnValue == 1 ) {
|
f@0
|
1597 handle->drainCounter = 1;
|
f@0
|
1598 handle->internalDrain = true;
|
f@0
|
1599 }
|
f@0
|
1600 }
|
f@0
|
1601
|
f@0
|
1602 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
|
f@0
|
1603
|
f@0
|
1604 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
|
f@0
|
1605
|
f@0
|
1606 if ( handle->nStreams[0] == 1 ) {
|
f@0
|
1607 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
|
f@0
|
1608 0,
|
f@0
|
1609 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
|
f@0
|
1610 }
|
f@0
|
1611 else { // fill multiple streams with zeros
|
f@0
|
1612 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
|
f@0
|
1613 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
|
f@0
|
1614 0,
|
f@0
|
1615 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
|
f@0
|
1616 }
|
f@0
|
1617 }
|
f@0
|
1618 }
|
f@0
|
1619 else if ( handle->nStreams[0] == 1 ) {
|
f@0
|
1620 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
|
f@0
|
1621 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
|
f@0
|
1622 stream_.userBuffer[0], stream_.convertInfo[0] );
|
f@0
|
1623 }
|
f@0
|
1624 else { // copy from user buffer
|
f@0
|
1625 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
|
f@0
|
1626 stream_.userBuffer[0],
|
f@0
|
1627 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
|
f@0
|
1628 }
|
f@0
|
1629 }
|
f@0
|
1630 else { // fill multiple streams
|
f@0
|
1631 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
|
f@0
|
1632 if ( stream_.doConvertBuffer[0] ) {
|
f@0
|
1633 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
|
f@0
|
1634 inBuffer = (Float32 *) stream_.deviceBuffer;
|
f@0
|
1635 }
|
f@0
|
1636
|
f@0
|
1637 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
|
f@0
|
1638 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
|
f@0
|
1639 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
|
f@0
|
1640 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
|
f@0
|
1641 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
|
f@0
|
1642 }
|
f@0
|
1643 }
|
f@0
|
1644 else { // fill multiple multi-channel streams with interleaved data
|
f@0
|
1645 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
|
f@0
|
1646 Float32 *out, *in;
|
f@0
|
1647
|
f@0
|
1648 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
|
f@0
|
1649 UInt32 inChannels = stream_.nUserChannels[0];
|
f@0
|
1650 if ( stream_.doConvertBuffer[0] ) {
|
f@0
|
1651 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
|
f@0
|
1652 inChannels = stream_.nDeviceChannels[0];
|
f@0
|
1653 }
|
f@0
|
1654
|
f@0
|
1655 if ( inInterleaved ) inOffset = 1;
|
f@0
|
1656 else inOffset = stream_.bufferSize;
|
f@0
|
1657
|
f@0
|
1658 channelsLeft = inChannels;
|
f@0
|
1659 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
|
f@0
|
1660 in = inBuffer;
|
f@0
|
1661 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
|
f@0
|
1662 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
|
f@0
|
1663
|
f@0
|
1664 outJump = 0;
|
f@0
|
1665 // Account for possible channel offset in first stream
|
f@0
|
1666 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
|
f@0
|
1667 streamChannels -= stream_.channelOffset[0];
|
f@0
|
1668 outJump = stream_.channelOffset[0];
|
f@0
|
1669 out += outJump;
|
f@0
|
1670 }
|
f@0
|
1671
|
f@0
|
1672 // Account for possible unfilled channels at end of the last stream
|
f@0
|
1673 if ( streamChannels > channelsLeft ) {
|
f@0
|
1674 outJump = streamChannels - channelsLeft;
|
f@0
|
1675 streamChannels = channelsLeft;
|
f@0
|
1676 }
|
f@0
|
1677
|
f@0
|
1678 // Determine input buffer offsets and skips
|
f@0
|
1679 if ( inInterleaved ) {
|
f@0
|
1680 inJump = inChannels;
|
f@0
|
1681 in += inChannels - channelsLeft;
|
f@0
|
1682 }
|
f@0
|
1683 else {
|
f@0
|
1684 inJump = 1;
|
f@0
|
1685 in += (inChannels - channelsLeft) * inOffset;
|
f@0
|
1686 }
|
f@0
|
1687
|
f@0
|
1688 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
|
f@0
|
1689 for ( unsigned int j=0; j<streamChannels; j++ ) {
|
f@0
|
1690 *out++ = in[j*inOffset];
|
f@0
|
1691 }
|
f@0
|
1692 out += outJump;
|
f@0
|
1693 in += inJump;
|
f@0
|
1694 }
|
f@0
|
1695 channelsLeft -= streamChannels;
|
f@0
|
1696 }
|
f@0
|
1697 }
|
f@0
|
1698 }
|
f@0
|
1699 }
|
f@0
|
1700
|
f@0
|
1701 // Don't bother draining input
|
f@0
|
1702 if ( handle->drainCounter ) {
|
f@0
|
1703 handle->drainCounter++;
|
f@0
|
1704 goto unlock;
|
f@0
|
1705 }
|
f@0
|
1706
|
f@0
|
1707 AudioDeviceID inputDevice;
|
f@0
|
1708 inputDevice = handle->id[1];
|
f@0
|
1709 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
|
f@0
|
1710
|
f@0
|
1711 if ( handle->nStreams[1] == 1 ) {
|
f@0
|
1712 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
|
f@0
|
1713 convertBuffer( stream_.userBuffer[1],
|
f@0
|
1714 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
|
f@0
|
1715 stream_.convertInfo[1] );
|
f@0
|
1716 }
|
f@0
|
1717 else { // copy to user buffer
|
f@0
|
1718 memcpy( stream_.userBuffer[1],
|
f@0
|
1719 inBufferList->mBuffers[handle->iStream[1]].mData,
|
f@0
|
1720 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
|
f@0
|
1721 }
|
f@0
|
1722 }
|
f@0
|
1723 else { // read from multiple streams
|
f@0
|
1724 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
|
f@0
|
1725 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
|
f@0
|
1726
|
f@0
|
1727 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
|
f@0
|
1728 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
|
f@0
|
1729 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
|
f@0
|
1730 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
|
f@0
|
1731 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
|
f@0
|
1732 }
|
f@0
|
1733 }
|
f@0
|
1734 else { // read from multiple multi-channel streams
|
f@0
|
1735 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
|
f@0
|
1736 Float32 *out, *in;
|
f@0
|
1737
|
f@0
|
1738 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
|
f@0
|
1739 UInt32 outChannels = stream_.nUserChannels[1];
|
f@0
|
1740 if ( stream_.doConvertBuffer[1] ) {
|
f@0
|
1741 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
|
f@0
|
1742 outChannels = stream_.nDeviceChannels[1];
|
f@0
|
1743 }
|
f@0
|
1744
|
f@0
|
1745 if ( outInterleaved ) outOffset = 1;
|
f@0
|
1746 else outOffset = stream_.bufferSize;
|
f@0
|
1747
|
f@0
|
1748 channelsLeft = outChannels;
|
f@0
|
1749 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
|
f@0
|
1750 out = outBuffer;
|
f@0
|
1751 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
|
f@0
|
1752 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
|
f@0
|
1753
|
f@0
|
1754 inJump = 0;
|
f@0
|
1755 // Account for possible channel offset in first stream
|
f@0
|
1756 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
|
f@0
|
1757 streamChannels -= stream_.channelOffset[1];
|
f@0
|
1758 inJump = stream_.channelOffset[1];
|
f@0
|
1759 in += inJump;
|
f@0
|
1760 }
|
f@0
|
1761
|
f@0
|
1762 // Account for possible unread channels at end of the last stream
|
f@0
|
1763 if ( streamChannels > channelsLeft ) {
|
f@0
|
1764 inJump = streamChannels - channelsLeft;
|
f@0
|
1765 streamChannels = channelsLeft;
|
f@0
|
1766 }
|
f@0
|
1767
|
f@0
|
1768 // Determine output buffer offsets and skips
|
f@0
|
1769 if ( outInterleaved ) {
|
f@0
|
1770 outJump = outChannels;
|
f@0
|
1771 out += outChannels - channelsLeft;
|
f@0
|
1772 }
|
f@0
|
1773 else {
|
f@0
|
1774 outJump = 1;
|
f@0
|
1775 out += (outChannels - channelsLeft) * outOffset;
|
f@0
|
1776 }
|
f@0
|
1777
|
f@0
|
1778 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
|
f@0
|
1779 for ( unsigned int j=0; j<streamChannels; j++ ) {
|
f@0
|
1780 out[j*outOffset] = *in++;
|
f@0
|
1781 }
|
f@0
|
1782 out += outJump;
|
f@0
|
1783 in += inJump;
|
f@0
|
1784 }
|
f@0
|
1785 channelsLeft -= streamChannels;
|
f@0
|
1786 }
|
f@0
|
1787 }
|
f@0
|
1788
|
f@0
|
1789 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
|
f@0
|
1790 convertBuffer( stream_.userBuffer[1],
|
f@0
|
1791 stream_.deviceBuffer,
|
f@0
|
1792 stream_.convertInfo[1] );
|
f@0
|
1793 }
|
f@0
|
1794 }
|
f@0
|
1795 }
|
f@0
|
1796
|
f@0
|
1797 unlock:
|
f@0
|
1798 //MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
1799
|
f@0
|
1800 RtApi::tickStreamTime();
|
f@0
|
1801 return SUCCESS;
|
f@0
|
1802 }
|
f@0
|
1803
|
f@0
|
1804 const char* RtApiCore :: getErrorCode( OSStatus code )
|
f@0
|
1805 {
|
f@0
|
1806 switch( code ) {
|
f@0
|
1807
|
f@0
|
1808 case kAudioHardwareNotRunningError:
|
f@0
|
1809 return "kAudioHardwareNotRunningError";
|
f@0
|
1810
|
f@0
|
1811 case kAudioHardwareUnspecifiedError:
|
f@0
|
1812 return "kAudioHardwareUnspecifiedError";
|
f@0
|
1813
|
f@0
|
1814 case kAudioHardwareUnknownPropertyError:
|
f@0
|
1815 return "kAudioHardwareUnknownPropertyError";
|
f@0
|
1816
|
f@0
|
1817 case kAudioHardwareBadPropertySizeError:
|
f@0
|
1818 return "kAudioHardwareBadPropertySizeError";
|
f@0
|
1819
|
f@0
|
1820 case kAudioHardwareIllegalOperationError:
|
f@0
|
1821 return "kAudioHardwareIllegalOperationError";
|
f@0
|
1822
|
f@0
|
1823 case kAudioHardwareBadObjectError:
|
f@0
|
1824 return "kAudioHardwareBadObjectError";
|
f@0
|
1825
|
f@0
|
1826 case kAudioHardwareBadDeviceError:
|
f@0
|
1827 return "kAudioHardwareBadDeviceError";
|
f@0
|
1828
|
f@0
|
1829 case kAudioHardwareBadStreamError:
|
f@0
|
1830 return "kAudioHardwareBadStreamError";
|
f@0
|
1831
|
f@0
|
1832 case kAudioHardwareUnsupportedOperationError:
|
f@0
|
1833 return "kAudioHardwareUnsupportedOperationError";
|
f@0
|
1834
|
f@0
|
1835 case kAudioDeviceUnsupportedFormatError:
|
f@0
|
1836 return "kAudioDeviceUnsupportedFormatError";
|
f@0
|
1837
|
f@0
|
1838 case kAudioDevicePermissionsError:
|
f@0
|
1839 return "kAudioDevicePermissionsError";
|
f@0
|
1840
|
f@0
|
1841 default:
|
f@0
|
1842 return "CoreAudio unknown error";
|
f@0
|
1843 }
|
f@0
|
1844 }
|
f@0
|
1845
|
f@0
|
1846 //******************** End of __MACOSX_CORE__ *********************//
|
f@0
|
1847 #endif
|
f@0
|
1848
|
f@0
|
1849 #if defined(__UNIX_JACK__)
|
f@0
|
1850
|
f@0
|
1851 // JACK is a low-latency audio server, originally written for the
|
f@0
|
1852 // GNU/Linux operating system and now also ported to OS-X. It can
|
f@0
|
1853 // connect a number of different applications to an audio device, as
|
f@0
|
1854 // well as allowing them to share audio between themselves.
|
f@0
|
1855 //
|
f@0
|
1856 // When using JACK with RtAudio, "devices" refer to JACK clients that
|
f@0
|
1857 // have ports connected to the server. The JACK server is typically
|
f@0
|
1858 // started in a terminal as follows:
|
f@0
|
1859 //
|
f@0
|
1860 // .jackd -d alsa -d hw:0
|
f@0
|
1861 //
|
f@0
|
1862 // or through an interface program such as qjackctl. Many of the
|
f@0
|
1863 // parameters normally set for a stream are fixed by the JACK server
|
f@0
|
1864 // and can be specified when the JACK server is started. In
|
f@0
|
1865 // particular,
|
f@0
|
1866 //
|
f@0
|
1867 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
|
f@0
|
1868 //
|
f@0
|
1869 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
|
f@0
|
1870 // frames, and number of buffers = 4. Once the server is running, it
|
f@0
|
1871 // is not possible to override these values. If the values are not
|
f@0
|
1872 // specified in the command-line, the JACK server uses default values.
|
f@0
|
1873 //
|
f@0
|
1874 // The JACK server does not have to be running when an instance of
|
f@0
|
1875 // RtApiJack is created, though the function getDeviceCount() will
|
f@0
|
1876 // report 0 devices found until JACK has been started. When no
|
f@0
|
1877 // devices are available (i.e., the JACK server is not running), a
|
f@0
|
1878 // stream cannot be opened.
|
f@0
|
1879
|
f@0
|
1880 #include <jack/jack.h>
|
f@0
|
1881 #include <unistd.h>
|
f@0
|
1882 #include <cstdio>
|
f@0
|
1883
|
f@0
|
1884 // A structure to hold various information related to the Jack API
|
f@0
|
1885 // implementation.
|
f@0
|
1886 struct JackHandle {
|
f@0
|
1887 jack_client_t *client;
|
f@0
|
1888 jack_port_t **ports[2];
|
f@0
|
1889 std::string deviceName[2];
|
f@0
|
1890 bool xrun[2];
|
f@0
|
1891 pthread_cond_t condition;
|
f@0
|
1892 int drainCounter; // Tracks callback counts when draining
|
f@0
|
1893 bool internalDrain; // Indicates if stop is initiated from callback or not.
|
f@0
|
1894
|
f@0
|
1895 JackHandle()
|
f@0
|
1896 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
|
f@0
|
1897 };
|
f@0
|
1898
|
f@0
|
1899 static void jackSilentError( const char * ) {};
|
f@0
|
1900
|
f@0
|
1901 RtApiJack :: RtApiJack()
|
f@0
|
1902 {
|
f@0
|
1903 // Nothing to do here.
|
f@0
|
1904 #if !defined(__RTAUDIO_DEBUG__)
|
f@0
|
1905 // Turn off Jack's internal error reporting.
|
f@0
|
1906 jack_set_error_function( &jackSilentError );
|
f@0
|
1907 #endif
|
f@0
|
1908 }
|
f@0
|
1909
|
f@0
|
1910 RtApiJack :: ~RtApiJack()
|
f@0
|
1911 {
|
f@0
|
1912 if ( stream_.state != STREAM_CLOSED ) closeStream();
|
f@0
|
1913 }
|
f@0
|
1914
|
f@0
|
1915 unsigned int RtApiJack :: getDeviceCount( void )
|
f@0
|
1916 {
|
f@0
|
1917 // See if we can become a jack client.
|
f@0
|
1918 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
|
f@0
|
1919 jack_status_t *status = NULL;
|
f@0
|
1920 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
|
f@0
|
1921 if ( client == 0 ) return 0;
|
f@0
|
1922
|
f@0
|
1923 const char **ports;
|
f@0
|
1924 std::string port, previousPort;
|
f@0
|
1925 unsigned int nChannels = 0, nDevices = 0;
|
f@0
|
1926 ports = jack_get_ports( client, NULL, NULL, 0 );
|
f@0
|
1927 if ( ports ) {
|
f@0
|
1928 // Parse the port names up to the first colon (:).
|
f@0
|
1929 size_t iColon = 0;
|
f@0
|
1930 do {
|
f@0
|
1931 port = (char *) ports[ nChannels ];
|
f@0
|
1932 iColon = port.find(":");
|
f@0
|
1933 if ( iColon != std::string::npos ) {
|
f@0
|
1934 port = port.substr( 0, iColon + 1 );
|
f@0
|
1935 if ( port != previousPort ) {
|
f@0
|
1936 nDevices++;
|
f@0
|
1937 previousPort = port;
|
f@0
|
1938 }
|
f@0
|
1939 }
|
f@0
|
1940 } while ( ports[++nChannels] );
|
f@0
|
1941 free( ports );
|
f@0
|
1942 }
|
f@0
|
1943
|
f@0
|
1944 jack_client_close( client );
|
f@0
|
1945 return nDevices;
|
f@0
|
1946 }
|
f@0
|
1947
|
f@0
|
1948 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
|
f@0
|
1949 {
|
f@0
|
1950 RtAudio::DeviceInfo info;
|
f@0
|
1951 info.probed = false;
|
f@0
|
1952
|
f@0
|
1953 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
|
f@0
|
1954 jack_status_t *status = NULL;
|
f@0
|
1955 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
|
f@0
|
1956 if ( client == 0 ) {
|
f@0
|
1957 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
|
f@0
|
1958 error( RtAudioError::WARNING );
|
f@0
|
1959 return info;
|
f@0
|
1960 }
|
f@0
|
1961
|
f@0
|
1962 const char **ports;
|
f@0
|
1963 std::string port, previousPort;
|
f@0
|
1964 unsigned int nPorts = 0, nDevices = 0;
|
f@0
|
1965 ports = jack_get_ports( client, NULL, NULL, 0 );
|
f@0
|
1966 if ( ports ) {
|
f@0
|
1967 // Parse the port names up to the first colon (:).
|
f@0
|
1968 size_t iColon = 0;
|
f@0
|
1969 do {
|
f@0
|
1970 port = (char *) ports[ nPorts ];
|
f@0
|
1971 iColon = port.find(":");
|
f@0
|
1972 if ( iColon != std::string::npos ) {
|
f@0
|
1973 port = port.substr( 0, iColon );
|
f@0
|
1974 if ( port != previousPort ) {
|
f@0
|
1975 if ( nDevices == device ) info.name = port;
|
f@0
|
1976 nDevices++;
|
f@0
|
1977 previousPort = port;
|
f@0
|
1978 }
|
f@0
|
1979 }
|
f@0
|
1980 } while ( ports[++nPorts] );
|
f@0
|
1981 free( ports );
|
f@0
|
1982 }
|
f@0
|
1983
|
f@0
|
1984 if ( device >= nDevices ) {
|
f@0
|
1985 jack_client_close( client );
|
f@0
|
1986 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
|
f@0
|
1987 error( RtAudioError::INVALID_USE );
|
f@0
|
1988 return info;
|
f@0
|
1989 }
|
f@0
|
1990
|
f@0
|
1991 // Get the current jack server sample rate.
|
f@0
|
1992 info.sampleRates.clear();
|
f@0
|
1993 info.sampleRates.push_back( jack_get_sample_rate( client ) );
|
f@0
|
1994
|
f@0
|
1995 // Count the available ports containing the client name as device
|
f@0
|
1996 // channels. Jack "input ports" equal RtAudio output channels.
|
f@0
|
1997 unsigned int nChannels = 0;
|
f@0
|
1998 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
|
f@0
|
1999 if ( ports ) {
|
f@0
|
2000 while ( ports[ nChannels ] ) nChannels++;
|
f@0
|
2001 free( ports );
|
f@0
|
2002 info.outputChannels = nChannels;
|
f@0
|
2003 }
|
f@0
|
2004
|
f@0
|
2005 // Jack "output ports" equal RtAudio input channels.
|
f@0
|
2006 nChannels = 0;
|
f@0
|
2007 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
|
f@0
|
2008 if ( ports ) {
|
f@0
|
2009 while ( ports[ nChannels ] ) nChannels++;
|
f@0
|
2010 free( ports );
|
f@0
|
2011 info.inputChannels = nChannels;
|
f@0
|
2012 }
|
f@0
|
2013
|
f@0
|
2014 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
|
f@0
|
2015 jack_client_close(client);
|
f@0
|
2016 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
|
f@0
|
2017 error( RtAudioError::WARNING );
|
f@0
|
2018 return info;
|
f@0
|
2019 }
|
f@0
|
2020
|
f@0
|
2021 // If device opens for both playback and capture, we determine the channels.
|
f@0
|
2022 if ( info.outputChannels > 0 && info.inputChannels > 0 )
|
f@0
|
2023 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
|
f@0
|
2024
|
f@0
|
2025 // Jack always uses 32-bit floats.
|
f@0
|
2026 info.nativeFormats = RTAUDIO_FLOAT32;
|
f@0
|
2027
|
f@0
|
2028 // Jack doesn't provide default devices so we'll use the first available one.
|
f@0
|
2029 if ( device == 0 && info.outputChannels > 0 )
|
f@0
|
2030 info.isDefaultOutput = true;
|
f@0
|
2031 if ( device == 0 && info.inputChannels > 0 )
|
f@0
|
2032 info.isDefaultInput = true;
|
f@0
|
2033
|
f@0
|
2034 jack_client_close(client);
|
f@0
|
2035 info.probed = true;
|
f@0
|
2036 return info;
|
f@0
|
2037 }
|
f@0
|
2038
|
f@0
|
2039 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
|
f@0
|
2040 {
|
f@0
|
2041 CallbackInfo *info = (CallbackInfo *) infoPointer;
|
f@0
|
2042
|
f@0
|
2043 RtApiJack *object = (RtApiJack *) info->object;
|
f@0
|
2044 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
|
f@0
|
2045
|
f@0
|
2046 return 0;
|
f@0
|
2047 }
|
f@0
|
2048
|
f@0
|
2049 // This function will be called by a spawned thread when the Jack
|
f@0
|
2050 // server signals that it is shutting down. It is necessary to handle
|
f@0
|
2051 // it this way because the jackShutdown() function must return before
|
f@0
|
2052 // the jack_deactivate() function (in closeStream()) will return.
|
f@0
|
2053 static void *jackCloseStream( void *ptr )
|
f@0
|
2054 {
|
f@0
|
2055 CallbackInfo *info = (CallbackInfo *) ptr;
|
f@0
|
2056 RtApiJack *object = (RtApiJack *) info->object;
|
f@0
|
2057
|
f@0
|
2058 object->closeStream();
|
f@0
|
2059
|
f@0
|
2060 pthread_exit( NULL );
|
f@0
|
2061 }
|
f@0
|
2062 static void jackShutdown( void *infoPointer )
|
f@0
|
2063 {
|
f@0
|
2064 CallbackInfo *info = (CallbackInfo *) infoPointer;
|
f@0
|
2065 RtApiJack *object = (RtApiJack *) info->object;
|
f@0
|
2066
|
f@0
|
2067 // Check current stream state. If stopped, then we'll assume this
|
f@0
|
2068 // was called as a result of a call to RtApiJack::stopStream (the
|
f@0
|
2069 // deactivation of a client handle causes this function to be called).
|
f@0
|
2070 // If not, we'll assume the Jack server is shutting down or some
|
f@0
|
2071 // other problem occurred and we should close the stream.
|
f@0
|
2072 if ( object->isStreamRunning() == false ) return;
|
f@0
|
2073
|
f@0
|
2074 ThreadHandle threadId;
|
f@0
|
2075 pthread_create( &threadId, NULL, jackCloseStream, info );
|
f@0
|
2076 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
|
f@0
|
2077 }
|
f@0
|
2078
|
f@0
|
2079 static int jackXrun( void *infoPointer )
|
f@0
|
2080 {
|
f@0
|
2081 JackHandle *handle = (JackHandle *) infoPointer;
|
f@0
|
2082
|
f@0
|
2083 if ( handle->ports[0] ) handle->xrun[0] = true;
|
f@0
|
2084 if ( handle->ports[1] ) handle->xrun[1] = true;
|
f@0
|
2085
|
f@0
|
2086 return 0;
|
f@0
|
2087 }
|
f@0
|
2088
|
f@0
|
2089 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
|
f@0
|
2090 unsigned int firstChannel, unsigned int sampleRate,
|
f@0
|
2091 RtAudioFormat format, unsigned int *bufferSize,
|
f@0
|
2092 RtAudio::StreamOptions *options )
|
f@0
|
2093 {
|
f@0
|
2094 JackHandle *handle = (JackHandle *) stream_.apiHandle;
|
f@0
|
2095
|
f@0
|
2096 // Look for jack server and try to become a client (only do once per stream).
|
f@0
|
2097 jack_client_t *client = 0;
|
f@0
|
2098 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
|
f@0
|
2099 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
|
f@0
|
2100 jack_status_t *status = NULL;
|
f@0
|
2101 if ( options && !options->streamName.empty() )
|
f@0
|
2102 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
|
f@0
|
2103 else
|
f@0
|
2104 client = jack_client_open( "RtApiJack", jackoptions, status );
|
f@0
|
2105 if ( client == 0 ) {
|
f@0
|
2106 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
|
f@0
|
2107 error( RtAudioError::WARNING );
|
f@0
|
2108 return FAILURE;
|
f@0
|
2109 }
|
f@0
|
2110 }
|
f@0
|
2111 else {
|
f@0
|
2112 // The handle must have been created on an earlier pass.
|
f@0
|
2113 client = handle->client;
|
f@0
|
2114 }
|
f@0
|
2115
|
f@0
|
2116 const char **ports;
|
f@0
|
2117 std::string port, previousPort, deviceName;
|
f@0
|
2118 unsigned int nPorts = 0, nDevices = 0;
|
f@0
|
2119 ports = jack_get_ports( client, NULL, NULL, 0 );
|
f@0
|
2120 if ( ports ) {
|
f@0
|
2121 // Parse the port names up to the first colon (:).
|
f@0
|
2122 size_t iColon = 0;
|
f@0
|
2123 do {
|
f@0
|
2124 port = (char *) ports[ nPorts ];
|
f@0
|
2125 iColon = port.find(":");
|
f@0
|
2126 if ( iColon != std::string::npos ) {
|
f@0
|
2127 port = port.substr( 0, iColon );
|
f@0
|
2128 if ( port != previousPort ) {
|
f@0
|
2129 if ( nDevices == device ) deviceName = port;
|
f@0
|
2130 nDevices++;
|
f@0
|
2131 previousPort = port;
|
f@0
|
2132 }
|
f@0
|
2133 }
|
f@0
|
2134 } while ( ports[++nPorts] );
|
f@0
|
2135 free( ports );
|
f@0
|
2136 }
|
f@0
|
2137
|
f@0
|
2138 if ( device >= nDevices ) {
|
f@0
|
2139 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
|
f@0
|
2140 return FAILURE;
|
f@0
|
2141 }
|
f@0
|
2142
|
f@0
|
2143 // Count the available ports containing the client name as device
|
f@0
|
2144 // channels. Jack "input ports" equal RtAudio output channels.
|
f@0
|
2145 unsigned int nChannels = 0;
|
f@0
|
2146 unsigned long flag = JackPortIsInput;
|
f@0
|
2147 if ( mode == INPUT ) flag = JackPortIsOutput;
|
f@0
|
2148 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
|
f@0
|
2149 if ( ports ) {
|
f@0
|
2150 while ( ports[ nChannels ] ) nChannels++;
|
f@0
|
2151 free( ports );
|
f@0
|
2152 }
|
f@0
|
2153
|
f@0
|
2154 // Compare the jack ports for specified client to the requested number of channels.
|
f@0
|
2155 if ( nChannels < (channels + firstChannel) ) {
|
f@0
|
2156 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
|
f@0
|
2157 errorText_ = errorStream_.str();
|
f@0
|
2158 return FAILURE;
|
f@0
|
2159 }
|
f@0
|
2160
|
f@0
|
2161 // Check the jack server sample rate.
|
f@0
|
2162 unsigned int jackRate = jack_get_sample_rate( client );
|
f@0
|
2163 if ( sampleRate != jackRate ) {
|
f@0
|
2164 jack_client_close( client );
|
f@0
|
2165 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
|
f@0
|
2166 errorText_ = errorStream_.str();
|
f@0
|
2167 return FAILURE;
|
f@0
|
2168 }
|
f@0
|
2169 stream_.sampleRate = jackRate;
|
f@0
|
2170
|
f@0
|
2171 // Get the latency of the JACK port.
|
f@0
|
2172 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
|
f@0
|
2173 if ( ports[ firstChannel ] ) {
|
f@0
|
2174 // Added by Ge Wang
|
f@0
|
2175 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
|
f@0
|
2176 // the range (usually the min and max are equal)
|
f@0
|
2177 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
|
f@0
|
2178 // get the latency range
|
f@0
|
2179 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
|
f@0
|
2180 // be optimistic, use the min!
|
f@0
|
2181 stream_.latency[mode] = latrange.min;
|
f@0
|
2182 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
|
f@0
|
2183 }
|
f@0
|
2184 free( ports );
|
f@0
|
2185
|
f@0
|
2186 // The jack server always uses 32-bit floating-point data.
|
f@0
|
2187 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
|
f@0
|
2188 stream_.userFormat = format;
|
f@0
|
2189
|
f@0
|
2190 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
|
f@0
|
2191 else stream_.userInterleaved = true;
|
f@0
|
2192
|
f@0
|
2193 // Jack always uses non-interleaved buffers.
|
f@0
|
2194 stream_.deviceInterleaved[mode] = false;
|
f@0
|
2195
|
f@0
|
2196 // Jack always provides host byte-ordered data.
|
f@0
|
2197 stream_.doByteSwap[mode] = false;
|
f@0
|
2198
|
f@0
|
2199 // Get the buffer size. The buffer size and number of buffers
|
f@0
|
2200 // (periods) is set when the jack server is started.
|
f@0
|
2201 stream_.bufferSize = (int) jack_get_buffer_size( client );
|
f@0
|
2202 *bufferSize = stream_.bufferSize;
|
f@0
|
2203
|
f@0
|
2204 stream_.nDeviceChannels[mode] = channels;
|
f@0
|
2205 stream_.nUserChannels[mode] = channels;
|
f@0
|
2206
|
f@0
|
2207 // Set flags for buffer conversion.
|
f@0
|
2208 stream_.doConvertBuffer[mode] = false;
|
f@0
|
2209 if ( stream_.userFormat != stream_.deviceFormat[mode] )
|
f@0
|
2210 stream_.doConvertBuffer[mode] = true;
|
f@0
|
2211 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
|
f@0
|
2212 stream_.nUserChannels[mode] > 1 )
|
f@0
|
2213 stream_.doConvertBuffer[mode] = true;
|
f@0
|
2214
|
f@0
|
2215 // Allocate our JackHandle structure for the stream.
|
f@0
|
2216 if ( handle == 0 ) {
|
f@0
|
2217 try {
|
f@0
|
2218 handle = new JackHandle;
|
f@0
|
2219 }
|
f@0
|
2220 catch ( std::bad_alloc& ) {
|
f@0
|
2221 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
|
f@0
|
2222 goto error;
|
f@0
|
2223 }
|
f@0
|
2224
|
f@0
|
2225 if ( pthread_cond_init(&handle->condition, NULL) ) {
|
f@0
|
2226 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
|
f@0
|
2227 goto error;
|
f@0
|
2228 }
|
f@0
|
2229 stream_.apiHandle = (void *) handle;
|
f@0
|
2230 handle->client = client;
|
f@0
|
2231 }
|
f@0
|
2232 handle->deviceName[mode] = deviceName;
|
f@0
|
2233
|
f@0
|
2234 // Allocate necessary internal buffers.
|
f@0
|
2235 unsigned long bufferBytes;
|
f@0
|
2236 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
|
f@0
|
2237 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
|
f@0
|
2238 if ( stream_.userBuffer[mode] == NULL ) {
|
f@0
|
2239 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
|
f@0
|
2240 goto error;
|
f@0
|
2241 }
|
f@0
|
2242
|
f@0
|
2243 if ( stream_.doConvertBuffer[mode] ) {
|
f@0
|
2244
|
f@0
|
2245 bool makeBuffer = true;
|
f@0
|
2246 if ( mode == OUTPUT )
|
f@0
|
2247 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
|
f@0
|
2248 else { // mode == INPUT
|
f@0
|
2249 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
|
f@0
|
2250 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
|
f@0
|
2251 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
|
f@0
|
2252 if ( bufferBytes < bytesOut ) makeBuffer = false;
|
f@0
|
2253 }
|
f@0
|
2254 }
|
f@0
|
2255
|
f@0
|
2256 if ( makeBuffer ) {
|
f@0
|
2257 bufferBytes *= *bufferSize;
|
f@0
|
2258 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
|
f@0
|
2259 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
|
f@0
|
2260 if ( stream_.deviceBuffer == NULL ) {
|
f@0
|
2261 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
|
f@0
|
2262 goto error;
|
f@0
|
2263 }
|
f@0
|
2264 }
|
f@0
|
2265 }
|
f@0
|
2266
|
f@0
|
2267 // Allocate memory for the Jack ports (channels) identifiers.
|
f@0
|
2268 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
|
f@0
|
2269 if ( handle->ports[mode] == NULL ) {
|
f@0
|
2270 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
|
f@0
|
2271 goto error;
|
f@0
|
2272 }
|
f@0
|
2273
|
f@0
|
2274 stream_.device[mode] = device;
|
f@0
|
2275 stream_.channelOffset[mode] = firstChannel;
|
f@0
|
2276 stream_.state = STREAM_STOPPED;
|
f@0
|
2277 stream_.callbackInfo.object = (void *) this;
|
f@0
|
2278
|
f@0
|
2279 if ( stream_.mode == OUTPUT && mode == INPUT )
|
f@0
|
2280 // We had already set up the stream for output.
|
f@0
|
2281 stream_.mode = DUPLEX;
|
f@0
|
2282 else {
|
f@0
|
2283 stream_.mode = mode;
|
f@0
|
2284 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
|
f@0
|
2285 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
|
f@0
|
2286 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
|
f@0
|
2287 }
|
f@0
|
2288
|
f@0
|
2289 // Register our ports.
|
f@0
|
2290 char label[64];
|
f@0
|
2291 if ( mode == OUTPUT ) {
|
f@0
|
2292 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
|
f@0
|
2293 snprintf( label, 64, "outport %d", i );
|
f@0
|
2294 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
|
f@0
|
2295 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
|
f@0
|
2296 }
|
f@0
|
2297 }
|
f@0
|
2298 else {
|
f@0
|
2299 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
|
f@0
|
2300 snprintf( label, 64, "inport %d", i );
|
f@0
|
2301 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
|
f@0
|
2302 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
|
f@0
|
2303 }
|
f@0
|
2304 }
|
f@0
|
2305
|
f@0
|
2306 // Setup the buffer conversion information structure. We don't use
|
f@0
|
2307 // buffers to do channel offsets, so we override that parameter
|
f@0
|
2308 // here.
|
f@0
|
2309 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
|
f@0
|
2310
|
f@0
|
2311 return SUCCESS;
|
f@0
|
2312
|
f@0
|
2313 error:
|
f@0
|
2314 if ( handle ) {
|
f@0
|
2315 pthread_cond_destroy( &handle->condition );
|
f@0
|
2316 jack_client_close( handle->client );
|
f@0
|
2317
|
f@0
|
2318 if ( handle->ports[0] ) free( handle->ports[0] );
|
f@0
|
2319 if ( handle->ports[1] ) free( handle->ports[1] );
|
f@0
|
2320
|
f@0
|
2321 delete handle;
|
f@0
|
2322 stream_.apiHandle = 0;
|
f@0
|
2323 }
|
f@0
|
2324
|
f@0
|
2325 for ( int i=0; i<2; i++ ) {
|
f@0
|
2326 if ( stream_.userBuffer[i] ) {
|
f@0
|
2327 free( stream_.userBuffer[i] );
|
f@0
|
2328 stream_.userBuffer[i] = 0;
|
f@0
|
2329 }
|
f@0
|
2330 }
|
f@0
|
2331
|
f@0
|
2332 if ( stream_.deviceBuffer ) {
|
f@0
|
2333 free( stream_.deviceBuffer );
|
f@0
|
2334 stream_.deviceBuffer = 0;
|
f@0
|
2335 }
|
f@0
|
2336
|
f@0
|
2337 return FAILURE;
|
f@0
|
2338 }
|
f@0
|
2339
|
f@0
|
2340 void RtApiJack :: closeStream( void )
|
f@0
|
2341 {
|
f@0
|
2342 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
2343 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
|
f@0
|
2344 error( RtAudioError::WARNING );
|
f@0
|
2345 return;
|
f@0
|
2346 }
|
f@0
|
2347
|
f@0
|
2348 JackHandle *handle = (JackHandle *) stream_.apiHandle;
|
f@0
|
2349 if ( handle ) {
|
f@0
|
2350
|
f@0
|
2351 if ( stream_.state == STREAM_RUNNING )
|
f@0
|
2352 jack_deactivate( handle->client );
|
f@0
|
2353
|
f@0
|
2354 jack_client_close( handle->client );
|
f@0
|
2355 }
|
f@0
|
2356
|
f@0
|
2357 if ( handle ) {
|
f@0
|
2358 if ( handle->ports[0] ) free( handle->ports[0] );
|
f@0
|
2359 if ( handle->ports[1] ) free( handle->ports[1] );
|
f@0
|
2360 pthread_cond_destroy( &handle->condition );
|
f@0
|
2361 delete handle;
|
f@0
|
2362 stream_.apiHandle = 0;
|
f@0
|
2363 }
|
f@0
|
2364
|
f@0
|
2365 for ( int i=0; i<2; i++ ) {
|
f@0
|
2366 if ( stream_.userBuffer[i] ) {
|
f@0
|
2367 free( stream_.userBuffer[i] );
|
f@0
|
2368 stream_.userBuffer[i] = 0;
|
f@0
|
2369 }
|
f@0
|
2370 }
|
f@0
|
2371
|
f@0
|
2372 if ( stream_.deviceBuffer ) {
|
f@0
|
2373 free( stream_.deviceBuffer );
|
f@0
|
2374 stream_.deviceBuffer = 0;
|
f@0
|
2375 }
|
f@0
|
2376
|
f@0
|
2377 stream_.mode = UNINITIALIZED;
|
f@0
|
2378 stream_.state = STREAM_CLOSED;
|
f@0
|
2379 }
|
f@0
|
2380
|
f@0
|
2381 void RtApiJack :: startStream( void )
|
f@0
|
2382 {
|
f@0
|
2383 verifyStream();
|
f@0
|
2384 if ( stream_.state == STREAM_RUNNING ) {
|
f@0
|
2385 errorText_ = "RtApiJack::startStream(): the stream is already running!";
|
f@0
|
2386 error( RtAudioError::WARNING );
|
f@0
|
2387 return;
|
f@0
|
2388 }
|
f@0
|
2389
|
f@0
|
2390 JackHandle *handle = (JackHandle *) stream_.apiHandle;
|
f@0
|
2391 int result = jack_activate( handle->client );
|
f@0
|
2392 if ( result ) {
|
f@0
|
2393 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
|
f@0
|
2394 goto unlock;
|
f@0
|
2395 }
|
f@0
|
2396
|
f@0
|
2397 const char **ports;
|
f@0
|
2398
|
f@0
|
2399 // Get the list of available ports.
|
f@0
|
2400 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
2401 result = 1;
|
f@0
|
2402 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
|
f@0
|
2403 if ( ports == NULL) {
|
f@0
|
2404 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
|
f@0
|
2405 goto unlock;
|
f@0
|
2406 }
|
f@0
|
2407
|
f@0
|
2408 // Now make the port connections. Since RtAudio wasn't designed to
|
f@0
|
2409 // allow the user to select particular channels of a device, we'll
|
f@0
|
2410 // just open the first "nChannels" ports with offset.
|
f@0
|
2411 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
|
f@0
|
2412 result = 1;
|
f@0
|
2413 if ( ports[ stream_.channelOffset[0] + i ] )
|
f@0
|
2414 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
|
f@0
|
2415 if ( result ) {
|
f@0
|
2416 free( ports );
|
f@0
|
2417 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
|
f@0
|
2418 goto unlock;
|
f@0
|
2419 }
|
f@0
|
2420 }
|
f@0
|
2421 free(ports);
|
f@0
|
2422 }
|
f@0
|
2423
|
f@0
|
2424 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
|
f@0
|
2425 result = 1;
|
f@0
|
2426 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
|
f@0
|
2427 if ( ports == NULL) {
|
f@0
|
2428 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
|
f@0
|
2429 goto unlock;
|
f@0
|
2430 }
|
f@0
|
2431
|
f@0
|
2432 // Now make the port connections. See note above.
|
f@0
|
2433 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
|
f@0
|
2434 result = 1;
|
f@0
|
2435 if ( ports[ stream_.channelOffset[1] + i ] )
|
f@0
|
2436 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
|
f@0
|
2437 if ( result ) {
|
f@0
|
2438 free( ports );
|
f@0
|
2439 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
|
f@0
|
2440 goto unlock;
|
f@0
|
2441 }
|
f@0
|
2442 }
|
f@0
|
2443 free(ports);
|
f@0
|
2444 }
|
f@0
|
2445
|
f@0
|
2446 handle->drainCounter = 0;
|
f@0
|
2447 handle->internalDrain = false;
|
f@0
|
2448 stream_.state = STREAM_RUNNING;
|
f@0
|
2449
|
f@0
|
2450 unlock:
|
f@0
|
2451 if ( result == 0 ) return;
|
f@0
|
2452 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
2453 }
|
f@0
|
2454
|
f@0
|
2455 void RtApiJack :: stopStream( void )
|
f@0
|
2456 {
|
f@0
|
2457 verifyStream();
|
f@0
|
2458 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
2459 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
|
f@0
|
2460 error( RtAudioError::WARNING );
|
f@0
|
2461 return;
|
f@0
|
2462 }
|
f@0
|
2463
|
f@0
|
2464 JackHandle *handle = (JackHandle *) stream_.apiHandle;
|
f@0
|
2465 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
2466
|
f@0
|
2467 if ( handle->drainCounter == 0 ) {
|
f@0
|
2468 handle->drainCounter = 2;
|
f@0
|
2469 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
|
f@0
|
2470 }
|
f@0
|
2471 }
|
f@0
|
2472
|
f@0
|
2473 jack_deactivate( handle->client );
|
f@0
|
2474 stream_.state = STREAM_STOPPED;
|
f@0
|
2475 }
|
f@0
|
2476
|
f@0
|
2477 void RtApiJack :: abortStream( void )
|
f@0
|
2478 {
|
f@0
|
2479 verifyStream();
|
f@0
|
2480 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
2481 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
|
f@0
|
2482 error( RtAudioError::WARNING );
|
f@0
|
2483 return;
|
f@0
|
2484 }
|
f@0
|
2485
|
f@0
|
2486 JackHandle *handle = (JackHandle *) stream_.apiHandle;
|
f@0
|
2487 handle->drainCounter = 2;
|
f@0
|
2488
|
f@0
|
2489 stopStream();
|
f@0
|
2490 }
|
f@0
|
2491
|
f@0
|
2492 // This function will be called by a spawned thread when the user
|
f@0
|
2493 // callback function signals that the stream should be stopped or
|
f@0
|
2494 // aborted. It is necessary to handle it this way because the
|
f@0
|
2495 // callbackEvent() function must return before the jack_deactivate()
|
f@0
|
2496 // function will return.
|
f@0
|
2497 static void *jackStopStream( void *ptr )
|
f@0
|
2498 {
|
f@0
|
2499 CallbackInfo *info = (CallbackInfo *) ptr;
|
f@0
|
2500 RtApiJack *object = (RtApiJack *) info->object;
|
f@0
|
2501
|
f@0
|
2502 object->stopStream();
|
f@0
|
2503 pthread_exit( NULL );
|
f@0
|
2504 }
|
f@0
|
2505
|
f@0
|
2506 bool RtApiJack :: callbackEvent( unsigned long nframes )
|
f@0
|
2507 {
|
f@0
|
2508 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
|
f@0
|
2509 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
2510 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
|
f@0
|
2511 error( RtAudioError::WARNING );
|
f@0
|
2512 return FAILURE;
|
f@0
|
2513 }
|
f@0
|
2514 if ( stream_.bufferSize != nframes ) {
|
f@0
|
2515 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
|
f@0
|
2516 error( RtAudioError::WARNING );
|
f@0
|
2517 return FAILURE;
|
f@0
|
2518 }
|
f@0
|
2519
|
f@0
|
2520 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
|
f@0
|
2521 JackHandle *handle = (JackHandle *) stream_.apiHandle;
|
f@0
|
2522
|
f@0
|
2523 // Check if we were draining the stream and signal is finished.
|
f@0
|
2524 if ( handle->drainCounter > 3 ) {
|
f@0
|
2525 ThreadHandle threadId;
|
f@0
|
2526
|
f@0
|
2527 stream_.state = STREAM_STOPPING;
|
f@0
|
2528 if ( handle->internalDrain == true )
|
f@0
|
2529 pthread_create( &threadId, NULL, jackStopStream, info );
|
f@0
|
2530 else
|
f@0
|
2531 pthread_cond_signal( &handle->condition );
|
f@0
|
2532 return SUCCESS;
|
f@0
|
2533 }
|
f@0
|
2534
|
f@0
|
2535 // Invoke user callback first, to get fresh output data.
|
f@0
|
2536 if ( handle->drainCounter == 0 ) {
|
f@0
|
2537 RtAudioCallback callback = (RtAudioCallback) info->callback;
|
f@0
|
2538 double streamTime = getStreamTime();
|
f@0
|
2539 RtAudioStreamStatus status = 0;
|
f@0
|
2540 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
|
f@0
|
2541 status |= RTAUDIO_OUTPUT_UNDERFLOW;
|
f@0
|
2542 handle->xrun[0] = false;
|
f@0
|
2543 }
|
f@0
|
2544 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
|
f@0
|
2545 status |= RTAUDIO_INPUT_OVERFLOW;
|
f@0
|
2546 handle->xrun[1] = false;
|
f@0
|
2547 }
|
f@0
|
2548 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
|
f@0
|
2549 stream_.bufferSize, streamTime, status, info->userData );
|
f@0
|
2550 if ( cbReturnValue == 2 ) {
|
f@0
|
2551 stream_.state = STREAM_STOPPING;
|
f@0
|
2552 handle->drainCounter = 2;
|
f@0
|
2553 ThreadHandle id;
|
f@0
|
2554 pthread_create( &id, NULL, jackStopStream, info );
|
f@0
|
2555 return SUCCESS;
|
f@0
|
2556 }
|
f@0
|
2557 else if ( cbReturnValue == 1 ) {
|
f@0
|
2558 handle->drainCounter = 1;
|
f@0
|
2559 handle->internalDrain = true;
|
f@0
|
2560 }
|
f@0
|
2561 }
|
f@0
|
2562
|
f@0
|
2563 jack_default_audio_sample_t *jackbuffer;
|
f@0
|
2564 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
|
f@0
|
2565 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
2566
|
f@0
|
2567 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
|
f@0
|
2568
|
f@0
|
2569 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
|
f@0
|
2570 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
|
f@0
|
2571 memset( jackbuffer, 0, bufferBytes );
|
f@0
|
2572 }
|
f@0
|
2573
|
f@0
|
2574 }
|
f@0
|
2575 else if ( stream_.doConvertBuffer[0] ) {
|
f@0
|
2576
|
f@0
|
2577 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
|
f@0
|
2578
|
f@0
|
2579 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
|
f@0
|
2580 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
|
f@0
|
2581 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
|
f@0
|
2582 }
|
f@0
|
2583 }
|
f@0
|
2584 else { // no buffer conversion
|
f@0
|
2585 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
|
f@0
|
2586 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
|
f@0
|
2587 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
|
f@0
|
2588 }
|
f@0
|
2589 }
|
f@0
|
2590 }
|
f@0
|
2591
|
f@0
|
2592 // Don't bother draining input
|
f@0
|
2593 if ( handle->drainCounter ) {
|
f@0
|
2594 handle->drainCounter++;
|
f@0
|
2595 goto unlock;
|
f@0
|
2596 }
|
f@0
|
2597
|
f@0
|
2598 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
|
f@0
|
2599
|
f@0
|
2600 if ( stream_.doConvertBuffer[1] ) {
|
f@0
|
2601 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
|
f@0
|
2602 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
|
f@0
|
2603 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
|
f@0
|
2604 }
|
f@0
|
2605 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
|
f@0
|
2606 }
|
f@0
|
2607 else { // no buffer conversion
|
f@0
|
2608 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
|
f@0
|
2609 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
|
f@0
|
2610 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
|
f@0
|
2611 }
|
f@0
|
2612 }
|
f@0
|
2613 }
|
f@0
|
2614
|
f@0
|
2615 unlock:
|
f@0
|
2616 RtApi::tickStreamTime();
|
f@0
|
2617 return SUCCESS;
|
f@0
|
2618 }
|
f@0
|
2619 //******************** End of __UNIX_JACK__ *********************//
|
f@0
|
2620 #endif
|
f@0
|
2621
|
f@0
|
2622 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
|
f@0
|
2623
|
f@0
|
2624 // The ASIO API is designed around a callback scheme, so this
|
f@0
|
2625 // implementation is similar to that used for OS-X CoreAudio and Linux
|
f@0
|
2626 // Jack. The primary constraint with ASIO is that it only allows
|
f@0
|
2627 // access to a single driver at a time. Thus, it is not possible to
|
f@0
|
2628 // have more than one simultaneous RtAudio stream.
|
f@0
|
2629 //
|
f@0
|
2630 // This implementation also requires a number of external ASIO files
|
f@0
|
2631 // and a few global variables. The ASIO callback scheme does not
|
f@0
|
2632 // allow for the passing of user data, so we must create a global
|
f@0
|
2633 // pointer to our callbackInfo structure.
|
f@0
|
2634 //
|
f@0
|
2635 // On unix systems, we make use of a pthread condition variable.
|
f@0
|
2636 // Since there is no equivalent in Windows, I hacked something based
|
f@0
|
2637 // on information found in
|
f@0
|
2638 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
|
f@0
|
2639
|
f@0
|
2640 #include "asiosys.h"
|
f@0
|
2641 #include "asio.h"
|
f@0
|
2642 #include "iasiothiscallresolver.h"
|
f@0
|
2643 #include "asiodrivers.h"
|
f@0
|
2644 #include <cmath>
|
f@0
|
2645
|
f@0
|
2646 static AsioDrivers drivers;
|
f@0
|
2647 static ASIOCallbacks asioCallbacks;
|
f@0
|
2648 static ASIODriverInfo driverInfo;
|
f@0
|
2649 static CallbackInfo *asioCallbackInfo;
|
f@0
|
2650 static bool asioXRun;
|
f@0
|
2651
|
f@0
|
2652 struct AsioHandle {
|
f@0
|
2653 int drainCounter; // Tracks callback counts when draining
|
f@0
|
2654 bool internalDrain; // Indicates if stop is initiated from callback or not.
|
f@0
|
2655 ASIOBufferInfo *bufferInfos;
|
f@0
|
2656 HANDLE condition;
|
f@0
|
2657
|
f@0
|
2658 AsioHandle()
|
f@0
|
2659 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
|
f@0
|
2660 };
|
f@0
|
2661
|
f@0
|
2662 // Function declarations (definitions at end of section)
|
f@0
|
2663 static const char* getAsioErrorString( ASIOError result );
|
f@0
|
2664 static void sampleRateChanged( ASIOSampleRate sRate );
|
f@0
|
2665 static long asioMessages( long selector, long value, void* message, double* opt );
|
f@0
|
2666
|
f@0
|
2667 RtApiAsio :: RtApiAsio()
|
f@0
|
2668 {
|
f@0
|
2669 // ASIO cannot run on a multi-threaded appartment. You can call
|
f@0
|
2670 // CoInitialize beforehand, but it must be for appartment threading
|
f@0
|
2671 // (in which case, CoInitilialize will return S_FALSE here).
|
f@0
|
2672 coInitialized_ = false;
|
f@0
|
2673 HRESULT hr = CoInitialize( NULL );
|
f@0
|
2674 if ( FAILED(hr) ) {
|
f@0
|
2675 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
|
f@0
|
2676 error( RtAudioError::WARNING );
|
f@0
|
2677 }
|
f@0
|
2678 coInitialized_ = true;
|
f@0
|
2679
|
f@0
|
2680 drivers.removeCurrentDriver();
|
f@0
|
2681 driverInfo.asioVersion = 2;
|
f@0
|
2682
|
f@0
|
2683 // See note in DirectSound implementation about GetDesktopWindow().
|
f@0
|
2684 driverInfo.sysRef = GetForegroundWindow();
|
f@0
|
2685 }
|
f@0
|
2686
|
f@0
|
2687 RtApiAsio :: ~RtApiAsio()
|
f@0
|
2688 {
|
f@0
|
2689 if ( stream_.state != STREAM_CLOSED ) closeStream();
|
f@0
|
2690 if ( coInitialized_ ) CoUninitialize();
|
f@0
|
2691 }
|
f@0
|
2692
|
f@0
|
2693 unsigned int RtApiAsio :: getDeviceCount( void )
|
f@0
|
2694 {
|
f@0
|
2695 return (unsigned int) drivers.asioGetNumDev();
|
f@0
|
2696 }
|
f@0
|
2697
|
f@0
|
2698 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
|
f@0
|
2699 {
|
f@0
|
2700 RtAudio::DeviceInfo info;
|
f@0
|
2701 info.probed = false;
|
f@0
|
2702
|
f@0
|
2703 // Get device ID
|
f@0
|
2704 unsigned int nDevices = getDeviceCount();
|
f@0
|
2705 if ( nDevices == 0 ) {
|
f@0
|
2706 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
|
f@0
|
2707 error( RtAudioError::INVALID_USE );
|
f@0
|
2708 return info;
|
f@0
|
2709 }
|
f@0
|
2710
|
f@0
|
2711 if ( device >= nDevices ) {
|
f@0
|
2712 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
|
f@0
|
2713 error( RtAudioError::INVALID_USE );
|
f@0
|
2714 return info;
|
f@0
|
2715 }
|
f@0
|
2716
|
f@0
|
2717 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
|
f@0
|
2718 if ( stream_.state != STREAM_CLOSED ) {
|
f@0
|
2719 if ( device >= devices_.size() ) {
|
f@0
|
2720 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
|
f@0
|
2721 error( RtAudioError::WARNING );
|
f@0
|
2722 return info;
|
f@0
|
2723 }
|
f@0
|
2724 return devices_[ device ];
|
f@0
|
2725 }
|
f@0
|
2726
|
f@0
|
2727 char driverName[32];
|
f@0
|
2728 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
|
f@0
|
2729 if ( result != ASE_OK ) {
|
f@0
|
2730 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
|
f@0
|
2731 errorText_ = errorStream_.str();
|
f@0
|
2732 error( RtAudioError::WARNING );
|
f@0
|
2733 return info;
|
f@0
|
2734 }
|
f@0
|
2735
|
f@0
|
2736 info.name = driverName;
|
f@0
|
2737
|
f@0
|
2738 if ( !drivers.loadDriver( driverName ) ) {
|
f@0
|
2739 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
|
f@0
|
2740 errorText_ = errorStream_.str();
|
f@0
|
2741 error( RtAudioError::WARNING );
|
f@0
|
2742 return info;
|
f@0
|
2743 }
|
f@0
|
2744
|
f@0
|
2745 result = ASIOInit( &driverInfo );
|
f@0
|
2746 if ( result != ASE_OK ) {
|
f@0
|
2747 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
|
f@0
|
2748 errorText_ = errorStream_.str();
|
f@0
|
2749 error( RtAudioError::WARNING );
|
f@0
|
2750 return info;
|
f@0
|
2751 }
|
f@0
|
2752
|
f@0
|
2753 // Determine the device channel information.
|
f@0
|
2754 long inputChannels, outputChannels;
|
f@0
|
2755 result = ASIOGetChannels( &inputChannels, &outputChannels );
|
f@0
|
2756 if ( result != ASE_OK ) {
|
f@0
|
2757 drivers.removeCurrentDriver();
|
f@0
|
2758 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
|
f@0
|
2759 errorText_ = errorStream_.str();
|
f@0
|
2760 error( RtAudioError::WARNING );
|
f@0
|
2761 return info;
|
f@0
|
2762 }
|
f@0
|
2763
|
f@0
|
2764 info.outputChannels = outputChannels;
|
f@0
|
2765 info.inputChannels = inputChannels;
|
f@0
|
2766 if ( info.outputChannels > 0 && info.inputChannels > 0 )
|
f@0
|
2767 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
|
f@0
|
2768
|
f@0
|
2769 // Determine the supported sample rates.
|
f@0
|
2770 info.sampleRates.clear();
|
f@0
|
2771 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
|
f@0
|
2772 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
|
f@0
|
2773 if ( result == ASE_OK )
|
f@0
|
2774 info.sampleRates.push_back( SAMPLE_RATES[i] );
|
f@0
|
2775 }
|
f@0
|
2776
|
f@0
|
2777 // Determine supported data types ... just check first channel and assume rest are the same.
|
f@0
|
2778 ASIOChannelInfo channelInfo;
|
f@0
|
2779 channelInfo.channel = 0;
|
f@0
|
2780 channelInfo.isInput = true;
|
f@0
|
2781 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
|
f@0
|
2782 result = ASIOGetChannelInfo( &channelInfo );
|
f@0
|
2783 if ( result != ASE_OK ) {
|
f@0
|
2784 drivers.removeCurrentDriver();
|
f@0
|
2785 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
|
f@0
|
2786 errorText_ = errorStream_.str();
|
f@0
|
2787 error( RtAudioError::WARNING );
|
f@0
|
2788 return info;
|
f@0
|
2789 }
|
f@0
|
2790
|
f@0
|
2791 info.nativeFormats = 0;
|
f@0
|
2792 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
|
f@0
|
2793 info.nativeFormats |= RTAUDIO_SINT16;
|
f@0
|
2794 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
|
f@0
|
2795 info.nativeFormats |= RTAUDIO_SINT32;
|
f@0
|
2796 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
|
f@0
|
2797 info.nativeFormats |= RTAUDIO_FLOAT32;
|
f@0
|
2798 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
|
f@0
|
2799 info.nativeFormats |= RTAUDIO_FLOAT64;
|
f@0
|
2800 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
|
f@0
|
2801 info.nativeFormats |= RTAUDIO_SINT24;
|
f@0
|
2802
|
f@0
|
2803 if ( info.outputChannels > 0 )
|
f@0
|
2804 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
|
f@0
|
2805 if ( info.inputChannels > 0 )
|
f@0
|
2806 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
|
f@0
|
2807
|
f@0
|
2808 info.probed = true;
|
f@0
|
2809 drivers.removeCurrentDriver();
|
f@0
|
2810 return info;
|
f@0
|
2811 }
|
f@0
|
2812
|
f@0
|
2813 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
|
f@0
|
2814 {
|
f@0
|
2815 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
|
f@0
|
2816 object->callbackEvent( index );
|
f@0
|
2817 }
|
f@0
|
2818
|
f@0
|
2819 void RtApiAsio :: saveDeviceInfo( void )
|
f@0
|
2820 {
|
f@0
|
2821 devices_.clear();
|
f@0
|
2822
|
f@0
|
2823 unsigned int nDevices = getDeviceCount();
|
f@0
|
2824 devices_.resize( nDevices );
|
f@0
|
2825 for ( unsigned int i=0; i<nDevices; i++ )
|
f@0
|
2826 devices_[i] = getDeviceInfo( i );
|
f@0
|
2827 }
|
f@0
|
2828
|
f@0
|
2829 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
|
f@0
|
2830 unsigned int firstChannel, unsigned int sampleRate,
|
f@0
|
2831 RtAudioFormat format, unsigned int *bufferSize,
|
f@0
|
2832 RtAudio::StreamOptions *options )
|
f@0
|
2833 {
|
f@0
|
2834 // For ASIO, a duplex stream MUST use the same driver.
|
f@0
|
2835 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
|
f@0
|
2836 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
|
f@0
|
2837 return FAILURE;
|
f@0
|
2838 }
|
f@0
|
2839
|
f@0
|
2840 char driverName[32];
|
f@0
|
2841 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
|
f@0
|
2842 if ( result != ASE_OK ) {
|
f@0
|
2843 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
|
f@0
|
2844 errorText_ = errorStream_.str();
|
f@0
|
2845 return FAILURE;
|
f@0
|
2846 }
|
f@0
|
2847
|
f@0
|
2848 // Only load the driver once for duplex stream.
|
f@0
|
2849 if ( mode != INPUT || stream_.mode != OUTPUT ) {
|
f@0
|
2850 // The getDeviceInfo() function will not work when a stream is open
|
f@0
|
2851 // because ASIO does not allow multiple devices to run at the same
|
f@0
|
2852 // time. Thus, we'll probe the system before opening a stream and
|
f@0
|
2853 // save the results for use by getDeviceInfo().
|
f@0
|
2854 this->saveDeviceInfo();
|
f@0
|
2855
|
f@0
|
2856 if ( !drivers.loadDriver( driverName ) ) {
|
f@0
|
2857 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
|
f@0
|
2858 errorText_ = errorStream_.str();
|
f@0
|
2859 return FAILURE;
|
f@0
|
2860 }
|
f@0
|
2861
|
f@0
|
2862 result = ASIOInit( &driverInfo );
|
f@0
|
2863 if ( result != ASE_OK ) {
|
f@0
|
2864 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
|
f@0
|
2865 errorText_ = errorStream_.str();
|
f@0
|
2866 return FAILURE;
|
f@0
|
2867 }
|
f@0
|
2868 }
|
f@0
|
2869
|
f@0
|
2870 // Check the device channel count.
|
f@0
|
2871 long inputChannels, outputChannels;
|
f@0
|
2872 result = ASIOGetChannels( &inputChannels, &outputChannels );
|
f@0
|
2873 if ( result != ASE_OK ) {
|
f@0
|
2874 drivers.removeCurrentDriver();
|
f@0
|
2875 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
|
f@0
|
2876 errorText_ = errorStream_.str();
|
f@0
|
2877 return FAILURE;
|
f@0
|
2878 }
|
f@0
|
2879
|
f@0
|
2880 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
|
f@0
|
2881 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
|
f@0
|
2882 drivers.removeCurrentDriver();
|
f@0
|
2883 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
|
f@0
|
2884 errorText_ = errorStream_.str();
|
f@0
|
2885 return FAILURE;
|
f@0
|
2886 }
|
f@0
|
2887 stream_.nDeviceChannels[mode] = channels;
|
f@0
|
2888 stream_.nUserChannels[mode] = channels;
|
f@0
|
2889 stream_.channelOffset[mode] = firstChannel;
|
f@0
|
2890
|
f@0
|
2891 // Verify the sample rate is supported.
|
f@0
|
2892 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
|
f@0
|
2893 if ( result != ASE_OK ) {
|
f@0
|
2894 drivers.removeCurrentDriver();
|
f@0
|
2895 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
|
f@0
|
2896 errorText_ = errorStream_.str();
|
f@0
|
2897 return FAILURE;
|
f@0
|
2898 }
|
f@0
|
2899
|
f@0
|
2900 // Get the current sample rate
|
f@0
|
2901 ASIOSampleRate currentRate;
|
f@0
|
2902 result = ASIOGetSampleRate( ¤tRate );
|
f@0
|
2903 if ( result != ASE_OK ) {
|
f@0
|
2904 drivers.removeCurrentDriver();
|
f@0
|
2905 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
|
f@0
|
2906 errorText_ = errorStream_.str();
|
f@0
|
2907 return FAILURE;
|
f@0
|
2908 }
|
f@0
|
2909
|
f@0
|
2910 // Set the sample rate only if necessary
|
f@0
|
2911 if ( currentRate != sampleRate ) {
|
f@0
|
2912 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
|
f@0
|
2913 if ( result != ASE_OK ) {
|
f@0
|
2914 drivers.removeCurrentDriver();
|
f@0
|
2915 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
|
f@0
|
2916 errorText_ = errorStream_.str();
|
f@0
|
2917 return FAILURE;
|
f@0
|
2918 }
|
f@0
|
2919 }
|
f@0
|
2920
|
f@0
|
2921 // Determine the driver data type.
|
f@0
|
2922 ASIOChannelInfo channelInfo;
|
f@0
|
2923 channelInfo.channel = 0;
|
f@0
|
2924 if ( mode == OUTPUT ) channelInfo.isInput = false;
|
f@0
|
2925 else channelInfo.isInput = true;
|
f@0
|
2926 result = ASIOGetChannelInfo( &channelInfo );
|
f@0
|
2927 if ( result != ASE_OK ) {
|
f@0
|
2928 drivers.removeCurrentDriver();
|
f@0
|
2929 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
|
f@0
|
2930 errorText_ = errorStream_.str();
|
f@0
|
2931 return FAILURE;
|
f@0
|
2932 }
|
f@0
|
2933
|
f@0
|
2934 // Assuming WINDOWS host is always little-endian.
|
f@0
|
2935 stream_.doByteSwap[mode] = false;
|
f@0
|
2936 stream_.userFormat = format;
|
f@0
|
2937 stream_.deviceFormat[mode] = 0;
|
f@0
|
2938 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
|
f@0
|
2939 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
|
f@0
|
2940 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
|
f@0
|
2941 }
|
f@0
|
2942 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
|
f@0
|
2943 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
|
f@0
|
2944 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
|
f@0
|
2945 }
|
f@0
|
2946 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
|
f@0
|
2947 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
|
f@0
|
2948 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
|
f@0
|
2949 }
|
f@0
|
2950 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
|
f@0
|
2951 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
|
f@0
|
2952 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
|
f@0
|
2953 }
|
f@0
|
2954 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
|
f@0
|
2955 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
|
f@0
|
2956 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
|
f@0
|
2957 }
|
f@0
|
2958
|
f@0
|
2959 if ( stream_.deviceFormat[mode] == 0 ) {
|
f@0
|
2960 drivers.removeCurrentDriver();
|
f@0
|
2961 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
|
f@0
|
2962 errorText_ = errorStream_.str();
|
f@0
|
2963 return FAILURE;
|
f@0
|
2964 }
|
f@0
|
2965
|
f@0
|
2966 // Set the buffer size. For a duplex stream, this will end up
|
f@0
|
2967 // setting the buffer size based on the input constraints, which
|
f@0
|
2968 // should be ok.
|
f@0
|
2969 long minSize, maxSize, preferSize, granularity;
|
f@0
|
2970 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
|
f@0
|
2971 if ( result != ASE_OK ) {
|
f@0
|
2972 drivers.removeCurrentDriver();
|
f@0
|
2973 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
|
f@0
|
2974 errorText_ = errorStream_.str();
|
f@0
|
2975 return FAILURE;
|
f@0
|
2976 }
|
f@0
|
2977
|
f@0
|
2978 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
|
f@0
|
2979 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
|
f@0
|
2980 else if ( granularity == -1 ) {
|
f@0
|
2981 // Make sure bufferSize is a power of two.
|
f@0
|
2982 int log2_of_min_size = 0;
|
f@0
|
2983 int log2_of_max_size = 0;
|
f@0
|
2984
|
f@0
|
2985 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
|
f@0
|
2986 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
|
f@0
|
2987 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
|
f@0
|
2988 }
|
f@0
|
2989
|
f@0
|
2990 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
|
f@0
|
2991 int min_delta_num = log2_of_min_size;
|
f@0
|
2992
|
f@0
|
2993 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
|
f@0
|
2994 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
|
f@0
|
2995 if (current_delta < min_delta) {
|
f@0
|
2996 min_delta = current_delta;
|
f@0
|
2997 min_delta_num = i;
|
f@0
|
2998 }
|
f@0
|
2999 }
|
f@0
|
3000
|
f@0
|
3001 *bufferSize = ( (unsigned int)1 << min_delta_num );
|
f@0
|
3002 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
|
f@0
|
3003 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
|
f@0
|
3004 }
|
f@0
|
3005 else if ( granularity != 0 ) {
|
f@0
|
3006 // Set to an even multiple of granularity, rounding up.
|
f@0
|
3007 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
|
f@0
|
3008 }
|
f@0
|
3009
|
f@0
|
3010 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
|
f@0
|
3011 drivers.removeCurrentDriver();
|
f@0
|
3012 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
|
f@0
|
3013 return FAILURE;
|
f@0
|
3014 }
|
f@0
|
3015
|
f@0
|
3016 stream_.bufferSize = *bufferSize;
|
f@0
|
3017 stream_.nBuffers = 2;
|
f@0
|
3018
|
f@0
|
3019 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
|
f@0
|
3020 else stream_.userInterleaved = true;
|
f@0
|
3021
|
f@0
|
3022 // ASIO always uses non-interleaved buffers.
|
f@0
|
3023 stream_.deviceInterleaved[mode] = false;
|
f@0
|
3024
|
f@0
|
3025 // Allocate, if necessary, our AsioHandle structure for the stream.
|
f@0
|
3026 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
|
f@0
|
3027 if ( handle == 0 ) {
|
f@0
|
3028 try {
|
f@0
|
3029 handle = new AsioHandle;
|
f@0
|
3030 }
|
f@0
|
3031 catch ( std::bad_alloc& ) {
|
f@0
|
3032 //if ( handle == NULL ) {
|
f@0
|
3033 drivers.removeCurrentDriver();
|
f@0
|
3034 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
|
f@0
|
3035 return FAILURE;
|
f@0
|
3036 }
|
f@0
|
3037 handle->bufferInfos = 0;
|
f@0
|
3038
|
f@0
|
3039 // Create a manual-reset event.
|
f@0
|
3040 handle->condition = CreateEvent( NULL, // no security
|
f@0
|
3041 TRUE, // manual-reset
|
f@0
|
3042 FALSE, // non-signaled initially
|
f@0
|
3043 NULL ); // unnamed
|
f@0
|
3044 stream_.apiHandle = (void *) handle;
|
f@0
|
3045 }
|
f@0
|
3046
|
f@0
|
3047 // Create the ASIO internal buffers. Since RtAudio sets up input
|
f@0
|
3048 // and output separately, we'll have to dispose of previously
|
f@0
|
3049 // created output buffers for a duplex stream.
|
f@0
|
3050 long inputLatency, outputLatency;
|
f@0
|
3051 if ( mode == INPUT && stream_.mode == OUTPUT ) {
|
f@0
|
3052 ASIODisposeBuffers();
|
f@0
|
3053 if ( handle->bufferInfos ) free( handle->bufferInfos );
|
f@0
|
3054 }
|
f@0
|
3055
|
f@0
|
3056 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
|
f@0
|
3057 bool buffersAllocated = false;
|
f@0
|
3058 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
|
f@0
|
3059 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
|
f@0
|
3060 if ( handle->bufferInfos == NULL ) {
|
f@0
|
3061 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
|
f@0
|
3062 errorText_ = errorStream_.str();
|
f@0
|
3063 goto error;
|
f@0
|
3064 }
|
f@0
|
3065
|
f@0
|
3066 ASIOBufferInfo *infos;
|
f@0
|
3067 infos = handle->bufferInfos;
|
f@0
|
3068 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
|
f@0
|
3069 infos->isInput = ASIOFalse;
|
f@0
|
3070 infos->channelNum = i + stream_.channelOffset[0];
|
f@0
|
3071 infos->buffers[0] = infos->buffers[1] = 0;
|
f@0
|
3072 }
|
f@0
|
3073 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
|
f@0
|
3074 infos->isInput = ASIOTrue;
|
f@0
|
3075 infos->channelNum = i + stream_.channelOffset[1];
|
f@0
|
3076 infos->buffers[0] = infos->buffers[1] = 0;
|
f@0
|
3077 }
|
f@0
|
3078
|
f@0
|
3079 // Set up the ASIO callback structure and create the ASIO data buffers.
|
f@0
|
3080 asioCallbacks.bufferSwitch = &bufferSwitch;
|
f@0
|
3081 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
|
f@0
|
3082 asioCallbacks.asioMessage = &asioMessages;
|
f@0
|
3083 asioCallbacks.bufferSwitchTimeInfo = NULL;
|
f@0
|
3084 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
|
f@0
|
3085 if ( result != ASE_OK ) {
|
f@0
|
3086 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
|
f@0
|
3087 errorText_ = errorStream_.str();
|
f@0
|
3088 goto error;
|
f@0
|
3089 }
|
f@0
|
3090 buffersAllocated = true;
|
f@0
|
3091
|
f@0
|
3092 // Set flags for buffer conversion.
|
f@0
|
3093 stream_.doConvertBuffer[mode] = false;
|
f@0
|
3094 if ( stream_.userFormat != stream_.deviceFormat[mode] )
|
f@0
|
3095 stream_.doConvertBuffer[mode] = true;
|
f@0
|
3096 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
|
f@0
|
3097 stream_.nUserChannels[mode] > 1 )
|
f@0
|
3098 stream_.doConvertBuffer[mode] = true;
|
f@0
|
3099
|
f@0
|
3100 // Allocate necessary internal buffers
|
f@0
|
3101 unsigned long bufferBytes;
|
f@0
|
3102 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
|
f@0
|
3103 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
|
f@0
|
3104 if ( stream_.userBuffer[mode] == NULL ) {
|
f@0
|
3105 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
|
f@0
|
3106 goto error;
|
f@0
|
3107 }
|
f@0
|
3108
|
f@0
|
3109 if ( stream_.doConvertBuffer[mode] ) {
|
f@0
|
3110
|
f@0
|
3111 bool makeBuffer = true;
|
f@0
|
3112 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
|
f@0
|
3113 if ( mode == INPUT ) {
|
f@0
|
3114 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
|
f@0
|
3115 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
|
f@0
|
3116 if ( bufferBytes <= bytesOut ) makeBuffer = false;
|
f@0
|
3117 }
|
f@0
|
3118 }
|
f@0
|
3119
|
f@0
|
3120 if ( makeBuffer ) {
|
f@0
|
3121 bufferBytes *= *bufferSize;
|
f@0
|
3122 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
|
f@0
|
3123 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
|
f@0
|
3124 if ( stream_.deviceBuffer == NULL ) {
|
f@0
|
3125 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
|
f@0
|
3126 goto error;
|
f@0
|
3127 }
|
f@0
|
3128 }
|
f@0
|
3129 }
|
f@0
|
3130
|
f@0
|
3131 stream_.sampleRate = sampleRate;
|
f@0
|
3132 stream_.device[mode] = device;
|
f@0
|
3133 stream_.state = STREAM_STOPPED;
|
f@0
|
3134 asioCallbackInfo = &stream_.callbackInfo;
|
f@0
|
3135 stream_.callbackInfo.object = (void *) this;
|
f@0
|
3136 if ( stream_.mode == OUTPUT && mode == INPUT )
|
f@0
|
3137 // We had already set up an output stream.
|
f@0
|
3138 stream_.mode = DUPLEX;
|
f@0
|
3139 else
|
f@0
|
3140 stream_.mode = mode;
|
f@0
|
3141
|
f@0
|
3142 // Determine device latencies
|
f@0
|
3143 result = ASIOGetLatencies( &inputLatency, &outputLatency );
|
f@0
|
3144 if ( result != ASE_OK ) {
|
f@0
|
3145 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
|
f@0
|
3146 errorText_ = errorStream_.str();
|
f@0
|
3147 error( RtAudioError::WARNING); // warn but don't fail
|
f@0
|
3148 }
|
f@0
|
3149 else {
|
f@0
|
3150 stream_.latency[0] = outputLatency;
|
f@0
|
3151 stream_.latency[1] = inputLatency;
|
f@0
|
3152 }
|
f@0
|
3153
|
f@0
|
3154 // Setup the buffer conversion information structure. We don't use
|
f@0
|
3155 // buffers to do channel offsets, so we override that parameter
|
f@0
|
3156 // here.
|
f@0
|
3157 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
|
f@0
|
3158
|
f@0
|
3159 return SUCCESS;
|
f@0
|
3160
|
f@0
|
3161 error:
|
f@0
|
3162 if ( buffersAllocated )
|
f@0
|
3163 ASIODisposeBuffers();
|
f@0
|
3164 drivers.removeCurrentDriver();
|
f@0
|
3165
|
f@0
|
3166 if ( handle ) {
|
f@0
|
3167 CloseHandle( handle->condition );
|
f@0
|
3168 if ( handle->bufferInfos )
|
f@0
|
3169 free( handle->bufferInfos );
|
f@0
|
3170 delete handle;
|
f@0
|
3171 stream_.apiHandle = 0;
|
f@0
|
3172 }
|
f@0
|
3173
|
f@0
|
3174 for ( int i=0; i<2; i++ ) {
|
f@0
|
3175 if ( stream_.userBuffer[i] ) {
|
f@0
|
3176 free( stream_.userBuffer[i] );
|
f@0
|
3177 stream_.userBuffer[i] = 0;
|
f@0
|
3178 }
|
f@0
|
3179 }
|
f@0
|
3180
|
f@0
|
3181 if ( stream_.deviceBuffer ) {
|
f@0
|
3182 free( stream_.deviceBuffer );
|
f@0
|
3183 stream_.deviceBuffer = 0;
|
f@0
|
3184 }
|
f@0
|
3185
|
f@0
|
3186 return FAILURE;
|
f@0
|
3187 }
|
f@0
|
3188
|
f@0
|
3189 void RtApiAsio :: closeStream()
|
f@0
|
3190 {
|
f@0
|
3191 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
3192 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
|
f@0
|
3193 error( RtAudioError::WARNING );
|
f@0
|
3194 return;
|
f@0
|
3195 }
|
f@0
|
3196
|
f@0
|
3197 if ( stream_.state == STREAM_RUNNING ) {
|
f@0
|
3198 stream_.state = STREAM_STOPPED;
|
f@0
|
3199 ASIOStop();
|
f@0
|
3200 }
|
f@0
|
3201 ASIODisposeBuffers();
|
f@0
|
3202 drivers.removeCurrentDriver();
|
f@0
|
3203
|
f@0
|
3204 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
|
f@0
|
3205 if ( handle ) {
|
f@0
|
3206 CloseHandle( handle->condition );
|
f@0
|
3207 if ( handle->bufferInfos )
|
f@0
|
3208 free( handle->bufferInfos );
|
f@0
|
3209 delete handle;
|
f@0
|
3210 stream_.apiHandle = 0;
|
f@0
|
3211 }
|
f@0
|
3212
|
f@0
|
3213 for ( int i=0; i<2; i++ ) {
|
f@0
|
3214 if ( stream_.userBuffer[i] ) {
|
f@0
|
3215 free( stream_.userBuffer[i] );
|
f@0
|
3216 stream_.userBuffer[i] = 0;
|
f@0
|
3217 }
|
f@0
|
3218 }
|
f@0
|
3219
|
f@0
|
3220 if ( stream_.deviceBuffer ) {
|
f@0
|
3221 free( stream_.deviceBuffer );
|
f@0
|
3222 stream_.deviceBuffer = 0;
|
f@0
|
3223 }
|
f@0
|
3224
|
f@0
|
3225 stream_.mode = UNINITIALIZED;
|
f@0
|
3226 stream_.state = STREAM_CLOSED;
|
f@0
|
3227 }
|
f@0
|
3228
|
f@0
|
3229 bool stopThreadCalled = false;
|
f@0
|
3230
|
f@0
|
3231 void RtApiAsio :: startStream()
|
f@0
|
3232 {
|
f@0
|
3233 verifyStream();
|
f@0
|
3234 if ( stream_.state == STREAM_RUNNING ) {
|
f@0
|
3235 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
|
f@0
|
3236 error( RtAudioError::WARNING );
|
f@0
|
3237 return;
|
f@0
|
3238 }
|
f@0
|
3239
|
f@0
|
3240 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
|
f@0
|
3241 ASIOError result = ASIOStart();
|
f@0
|
3242 if ( result != ASE_OK ) {
|
f@0
|
3243 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
|
f@0
|
3244 errorText_ = errorStream_.str();
|
f@0
|
3245 goto unlock;
|
f@0
|
3246 }
|
f@0
|
3247
|
f@0
|
3248 handle->drainCounter = 0;
|
f@0
|
3249 handle->internalDrain = false;
|
f@0
|
3250 ResetEvent( handle->condition );
|
f@0
|
3251 stream_.state = STREAM_RUNNING;
|
f@0
|
3252 asioXRun = false;
|
f@0
|
3253
|
f@0
|
3254 unlock:
|
f@0
|
3255 stopThreadCalled = false;
|
f@0
|
3256
|
f@0
|
3257 if ( result == ASE_OK ) return;
|
f@0
|
3258 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
3259 }
|
f@0
|
3260
|
f@0
|
3261 void RtApiAsio :: stopStream()
|
f@0
|
3262 {
|
f@0
|
3263 verifyStream();
|
f@0
|
3264 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
3265 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
|
f@0
|
3266 error( RtAudioError::WARNING );
|
f@0
|
3267 return;
|
f@0
|
3268 }
|
f@0
|
3269
|
f@0
|
3270 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
|
f@0
|
3271 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
3272 if ( handle->drainCounter == 0 ) {
|
f@0
|
3273 handle->drainCounter = 2;
|
f@0
|
3274 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
|
f@0
|
3275 }
|
f@0
|
3276 }
|
f@0
|
3277
|
f@0
|
3278 stream_.state = STREAM_STOPPED;
|
f@0
|
3279
|
f@0
|
3280 ASIOError result = ASIOStop();
|
f@0
|
3281 if ( result != ASE_OK ) {
|
f@0
|
3282 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
|
f@0
|
3283 errorText_ = errorStream_.str();
|
f@0
|
3284 }
|
f@0
|
3285
|
f@0
|
3286 if ( result == ASE_OK ) return;
|
f@0
|
3287 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
3288 }
|
f@0
|
3289
|
f@0
|
3290 void RtApiAsio :: abortStream()
|
f@0
|
3291 {
|
f@0
|
3292 verifyStream();
|
f@0
|
3293 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
3294 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
|
f@0
|
3295 error( RtAudioError::WARNING );
|
f@0
|
3296 return;
|
f@0
|
3297 }
|
f@0
|
3298
|
f@0
|
3299 // The following lines were commented-out because some behavior was
|
f@0
|
3300 // noted where the device buffers need to be zeroed to avoid
|
f@0
|
3301 // continuing sound, even when the device buffers are completely
|
f@0
|
3302 // disposed. So now, calling abort is the same as calling stop.
|
f@0
|
3303 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
|
f@0
|
3304 // handle->drainCounter = 2;
|
f@0
|
3305 stopStream();
|
f@0
|
3306 }
|
f@0
|
3307
|
f@0
|
3308 // This function will be called by a spawned thread when the user
|
f@0
|
3309 // callback function signals that the stream should be stopped or
|
f@0
|
3310 // aborted. It is necessary to handle it this way because the
|
f@0
|
3311 // callbackEvent() function must return before the ASIOStop()
|
f@0
|
3312 // function will return.
|
f@0
|
3313 static unsigned __stdcall asioStopStream( void *ptr )
|
f@0
|
3314 {
|
f@0
|
3315 CallbackInfo *info = (CallbackInfo *) ptr;
|
f@0
|
3316 RtApiAsio *object = (RtApiAsio *) info->object;
|
f@0
|
3317
|
f@0
|
3318 object->stopStream();
|
f@0
|
3319 _endthreadex( 0 );
|
f@0
|
3320 return 0;
|
f@0
|
3321 }
|
f@0
|
3322
|
f@0
|
3323 bool RtApiAsio :: callbackEvent( long bufferIndex )
|
f@0
|
3324 {
|
f@0
|
3325 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
|
f@0
|
3326 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
3327 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
|
f@0
|
3328 error( RtAudioError::WARNING );
|
f@0
|
3329 return FAILURE;
|
f@0
|
3330 }
|
f@0
|
3331
|
f@0
|
3332 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
|
f@0
|
3333 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
|
f@0
|
3334
|
f@0
|
3335 // Check if we were draining the stream and signal if finished.
|
f@0
|
3336 if ( handle->drainCounter > 3 ) {
|
f@0
|
3337
|
f@0
|
3338 stream_.state = STREAM_STOPPING;
|
f@0
|
3339 if ( handle->internalDrain == false )
|
f@0
|
3340 SetEvent( handle->condition );
|
f@0
|
3341 else { // spawn a thread to stop the stream
|
f@0
|
3342 unsigned threadId;
|
f@0
|
3343 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
|
f@0
|
3344 &stream_.callbackInfo, 0, &threadId );
|
f@0
|
3345 }
|
f@0
|
3346 return SUCCESS;
|
f@0
|
3347 }
|
f@0
|
3348
|
f@0
|
3349 // Invoke user callback to get fresh output data UNLESS we are
|
f@0
|
3350 // draining stream.
|
f@0
|
3351 if ( handle->drainCounter == 0 ) {
|
f@0
|
3352 RtAudioCallback callback = (RtAudioCallback) info->callback;
|
f@0
|
3353 double streamTime = getStreamTime();
|
f@0
|
3354 RtAudioStreamStatus status = 0;
|
f@0
|
3355 if ( stream_.mode != INPUT && asioXRun == true ) {
|
f@0
|
3356 status |= RTAUDIO_OUTPUT_UNDERFLOW;
|
f@0
|
3357 asioXRun = false;
|
f@0
|
3358 }
|
f@0
|
3359 if ( stream_.mode != OUTPUT && asioXRun == true ) {
|
f@0
|
3360 status |= RTAUDIO_INPUT_OVERFLOW;
|
f@0
|
3361 asioXRun = false;
|
f@0
|
3362 }
|
f@0
|
3363 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
|
f@0
|
3364 stream_.bufferSize, streamTime, status, info->userData );
|
f@0
|
3365 if ( cbReturnValue == 2 ) {
|
f@0
|
3366 stream_.state = STREAM_STOPPING;
|
f@0
|
3367 handle->drainCounter = 2;
|
f@0
|
3368 unsigned threadId;
|
f@0
|
3369 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
|
f@0
|
3370 &stream_.callbackInfo, 0, &threadId );
|
f@0
|
3371 return SUCCESS;
|
f@0
|
3372 }
|
f@0
|
3373 else if ( cbReturnValue == 1 ) {
|
f@0
|
3374 handle->drainCounter = 1;
|
f@0
|
3375 handle->internalDrain = true;
|
f@0
|
3376 }
|
f@0
|
3377 }
|
f@0
|
3378
|
f@0
|
3379 unsigned int nChannels, bufferBytes, i, j;
|
f@0
|
3380 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
|
f@0
|
3381 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
3382
|
f@0
|
3383 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
|
f@0
|
3384
|
f@0
|
3385 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
|
f@0
|
3386
|
f@0
|
3387 for ( i=0, j=0; i<nChannels; i++ ) {
|
f@0
|
3388 if ( handle->bufferInfos[i].isInput != ASIOTrue )
|
f@0
|
3389 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
|
f@0
|
3390 }
|
f@0
|
3391
|
f@0
|
3392 }
|
f@0
|
3393 else if ( stream_.doConvertBuffer[0] ) {
|
f@0
|
3394
|
f@0
|
3395 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
|
f@0
|
3396 if ( stream_.doByteSwap[0] )
|
f@0
|
3397 byteSwapBuffer( stream_.deviceBuffer,
|
f@0
|
3398 stream_.bufferSize * stream_.nDeviceChannels[0],
|
f@0
|
3399 stream_.deviceFormat[0] );
|
f@0
|
3400
|
f@0
|
3401 for ( i=0, j=0; i<nChannels; i++ ) {
|
f@0
|
3402 if ( handle->bufferInfos[i].isInput != ASIOTrue )
|
f@0
|
3403 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
|
f@0
|
3404 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
|
f@0
|
3405 }
|
f@0
|
3406
|
f@0
|
3407 }
|
f@0
|
3408 else {
|
f@0
|
3409
|
f@0
|
3410 if ( stream_.doByteSwap[0] )
|
f@0
|
3411 byteSwapBuffer( stream_.userBuffer[0],
|
f@0
|
3412 stream_.bufferSize * stream_.nUserChannels[0],
|
f@0
|
3413 stream_.userFormat );
|
f@0
|
3414
|
f@0
|
3415 for ( i=0, j=0; i<nChannels; i++ ) {
|
f@0
|
3416 if ( handle->bufferInfos[i].isInput != ASIOTrue )
|
f@0
|
3417 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
|
f@0
|
3418 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
|
f@0
|
3419 }
|
f@0
|
3420
|
f@0
|
3421 }
|
f@0
|
3422 }
|
f@0
|
3423
|
f@0
|
3424 // Don't bother draining input
|
f@0
|
3425 if ( handle->drainCounter ) {
|
f@0
|
3426 handle->drainCounter++;
|
f@0
|
3427 goto unlock;
|
f@0
|
3428 }
|
f@0
|
3429
|
f@0
|
3430 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
|
f@0
|
3431
|
f@0
|
3432 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
|
f@0
|
3433
|
f@0
|
3434 if (stream_.doConvertBuffer[1]) {
|
f@0
|
3435
|
f@0
|
3436 // Always interleave ASIO input data.
|
f@0
|
3437 for ( i=0, j=0; i<nChannels; i++ ) {
|
f@0
|
3438 if ( handle->bufferInfos[i].isInput == ASIOTrue )
|
f@0
|
3439 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
|
f@0
|
3440 handle->bufferInfos[i].buffers[bufferIndex],
|
f@0
|
3441 bufferBytes );
|
f@0
|
3442 }
|
f@0
|
3443
|
f@0
|
3444 if ( stream_.doByteSwap[1] )
|
f@0
|
3445 byteSwapBuffer( stream_.deviceBuffer,
|
f@0
|
3446 stream_.bufferSize * stream_.nDeviceChannels[1],
|
f@0
|
3447 stream_.deviceFormat[1] );
|
f@0
|
3448 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
|
f@0
|
3449
|
f@0
|
3450 }
|
f@0
|
3451 else {
|
f@0
|
3452 for ( i=0, j=0; i<nChannels; i++ ) {
|
f@0
|
3453 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
|
f@0
|
3454 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
|
f@0
|
3455 handle->bufferInfos[i].buffers[bufferIndex],
|
f@0
|
3456 bufferBytes );
|
f@0
|
3457 }
|
f@0
|
3458 }
|
f@0
|
3459
|
f@0
|
3460 if ( stream_.doByteSwap[1] )
|
f@0
|
3461 byteSwapBuffer( stream_.userBuffer[1],
|
f@0
|
3462 stream_.bufferSize * stream_.nUserChannels[1],
|
f@0
|
3463 stream_.userFormat );
|
f@0
|
3464 }
|
f@0
|
3465 }
|
f@0
|
3466
|
f@0
|
3467 unlock:
|
f@0
|
3468 // The following call was suggested by Malte Clasen. While the API
|
f@0
|
3469 // documentation indicates it should not be required, some device
|
f@0
|
3470 // drivers apparently do not function correctly without it.
|
f@0
|
3471 ASIOOutputReady();
|
f@0
|
3472
|
f@0
|
3473 RtApi::tickStreamTime();
|
f@0
|
3474 return SUCCESS;
|
f@0
|
3475 }
|
f@0
|
3476
|
f@0
|
3477 static void sampleRateChanged( ASIOSampleRate sRate )
|
f@0
|
3478 {
|
f@0
|
3479 // The ASIO documentation says that this usually only happens during
|
f@0
|
3480 // external sync. Audio processing is not stopped by the driver,
|
f@0
|
3481 // actual sample rate might not have even changed, maybe only the
|
f@0
|
3482 // sample rate status of an AES/EBU or S/PDIF digital input at the
|
f@0
|
3483 // audio device.
|
f@0
|
3484
|
f@0
|
3485 RtApi *object = (RtApi *) asioCallbackInfo->object;
|
f@0
|
3486 try {
|
f@0
|
3487 object->stopStream();
|
f@0
|
3488 }
|
f@0
|
3489 catch ( RtAudioError &exception ) {
|
f@0
|
3490 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
|
f@0
|
3491 return;
|
f@0
|
3492 }
|
f@0
|
3493
|
f@0
|
3494 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
|
f@0
|
3495 }
|
f@0
|
3496
|
f@0
|
3497 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
|
f@0
|
3498 {
|
f@0
|
3499 long ret = 0;
|
f@0
|
3500
|
f@0
|
3501 switch( selector ) {
|
f@0
|
3502 case kAsioSelectorSupported:
|
f@0
|
3503 if ( value == kAsioResetRequest
|
f@0
|
3504 || value == kAsioEngineVersion
|
f@0
|
3505 || value == kAsioResyncRequest
|
f@0
|
3506 || value == kAsioLatenciesChanged
|
f@0
|
3507 // The following three were added for ASIO 2.0, you don't
|
f@0
|
3508 // necessarily have to support them.
|
f@0
|
3509 || value == kAsioSupportsTimeInfo
|
f@0
|
3510 || value == kAsioSupportsTimeCode
|
f@0
|
3511 || value == kAsioSupportsInputMonitor)
|
f@0
|
3512 ret = 1L;
|
f@0
|
3513 break;
|
f@0
|
3514 case kAsioResetRequest:
|
f@0
|
3515 // Defer the task and perform the reset of the driver during the
|
f@0
|
3516 // next "safe" situation. You cannot reset the driver right now,
|
f@0
|
3517 // as this code is called from the driver. Reset the driver is
|
f@0
|
3518 // done by completely destruct is. I.e. ASIOStop(),
|
f@0
|
3519 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
|
f@0
|
3520 // driver again.
|
f@0
|
3521 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
|
f@0
|
3522 ret = 1L;
|
f@0
|
3523 break;
|
f@0
|
3524 case kAsioResyncRequest:
|
f@0
|
3525 // This informs the application that the driver encountered some
|
f@0
|
3526 // non-fatal data loss. It is used for synchronization purposes
|
f@0
|
3527 // of different media. Added mainly to work around the Win16Mutex
|
f@0
|
3528 // problems in Windows 95/98 with the Windows Multimedia system,
|
f@0
|
3529 // which could lose data because the Mutex was held too long by
|
f@0
|
3530 // another thread. However a driver can issue it in other
|
f@0
|
3531 // situations, too.
|
f@0
|
3532 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
|
f@0
|
3533 asioXRun = true;
|
f@0
|
3534 ret = 1L;
|
f@0
|
3535 break;
|
f@0
|
3536 case kAsioLatenciesChanged:
|
f@0
|
3537 // This will inform the host application that the drivers were
|
f@0
|
3538 // latencies changed. Beware, it this does not mean that the
|
f@0
|
3539 // buffer sizes have changed! You might need to update internal
|
f@0
|
3540 // delay data.
|
f@0
|
3541 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
|
f@0
|
3542 ret = 1L;
|
f@0
|
3543 break;
|
f@0
|
3544 case kAsioEngineVersion:
|
f@0
|
3545 // Return the supported ASIO version of the host application. If
|
f@0
|
3546 // a host application does not implement this selector, ASIO 1.0
|
f@0
|
3547 // is assumed by the driver.
|
f@0
|
3548 ret = 2L;
|
f@0
|
3549 break;
|
f@0
|
3550 case kAsioSupportsTimeInfo:
|
f@0
|
3551 // Informs the driver whether the
|
f@0
|
3552 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
|
f@0
|
3553 // For compatibility with ASIO 1.0 drivers the host application
|
f@0
|
3554 // should always support the "old" bufferSwitch method, too.
|
f@0
|
3555 ret = 0;
|
f@0
|
3556 break;
|
f@0
|
3557 case kAsioSupportsTimeCode:
|
f@0
|
3558 // Informs the driver whether application is interested in time
|
f@0
|
3559 // code info. If an application does not need to know about time
|
f@0
|
3560 // code, the driver has less work to do.
|
f@0
|
3561 ret = 0;
|
f@0
|
3562 break;
|
f@0
|
3563 }
|
f@0
|
3564 return ret;
|
f@0
|
3565 }
|
f@0
|
3566
|
f@0
|
3567 static const char* getAsioErrorString( ASIOError result )
|
f@0
|
3568 {
|
f@0
|
3569 struct Messages
|
f@0
|
3570 {
|
f@0
|
3571 ASIOError value;
|
f@0
|
3572 const char*message;
|
f@0
|
3573 };
|
f@0
|
3574
|
f@0
|
3575 static const Messages m[] =
|
f@0
|
3576 {
|
f@0
|
3577 { ASE_NotPresent, "Hardware input or output is not present or available." },
|
f@0
|
3578 { ASE_HWMalfunction, "Hardware is malfunctioning." },
|
f@0
|
3579 { ASE_InvalidParameter, "Invalid input parameter." },
|
f@0
|
3580 { ASE_InvalidMode, "Invalid mode." },
|
f@0
|
3581 { ASE_SPNotAdvancing, "Sample position not advancing." },
|
f@0
|
3582 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
|
f@0
|
3583 { ASE_NoMemory, "Not enough memory to complete the request." }
|
f@0
|
3584 };
|
f@0
|
3585
|
f@0
|
3586 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
|
f@0
|
3587 if ( m[i].value == result ) return m[i].message;
|
f@0
|
3588
|
f@0
|
3589 return "Unknown error.";
|
f@0
|
3590 }
|
f@0
|
3591
|
f@0
|
3592 //******************** End of __WINDOWS_ASIO__ *********************//
|
f@0
|
3593 #endif
|
f@0
|
3594
|
f@0
|
3595
|
f@0
|
3596 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
|
f@0
|
3597
|
f@0
|
3598 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
|
f@0
|
3599 // - Introduces support for the Windows WASAPI API
|
f@0
|
3600 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
|
f@0
|
3601 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
|
f@0
|
3602 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
|
f@0
|
3603
|
f@0
|
3604 #ifndef INITGUID
|
f@0
|
3605 #define INITGUID
|
f@0
|
3606 #endif
|
f@0
|
3607 #include <audioclient.h>
|
f@0
|
3608 #include <avrt.h>
|
f@0
|
3609 #include <mmdeviceapi.h>
|
f@0
|
3610 #include <functiondiscoverykeys_devpkey.h>
|
f@0
|
3611
|
f@0
|
3612 //=============================================================================
|
f@0
|
3613
|
f@0
|
3614 #define SAFE_RELEASE( objectPtr )\
|
f@0
|
3615 if ( objectPtr )\
|
f@0
|
3616 {\
|
f@0
|
3617 objectPtr->Release();\
|
f@0
|
3618 objectPtr = NULL;\
|
f@0
|
3619 }
|
f@0
|
3620
|
f@0
|
3621 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
|
f@0
|
3622
|
f@0
|
3623 //-----------------------------------------------------------------------------
|
f@0
|
3624
|
f@0
|
3625 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
|
f@0
|
3626 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
|
f@0
|
3627 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
|
f@0
|
3628 // provide intermediate storage for read / write synchronization.
|
f@0
|
3629 class WasapiBuffer
|
f@0
|
3630 {
|
f@0
|
3631 public:
|
f@0
|
3632 WasapiBuffer()
|
f@0
|
3633 : buffer_( NULL ),
|
f@0
|
3634 bufferSize_( 0 ),
|
f@0
|
3635 inIndex_( 0 ),
|
f@0
|
3636 outIndex_( 0 ) {}
|
f@0
|
3637
|
f@0
|
3638 ~WasapiBuffer() {
|
f@0
|
3639 delete buffer_;
|
f@0
|
3640 }
|
f@0
|
3641
|
f@0
|
3642 // sets the length of the internal ring buffer
|
f@0
|
3643 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
|
f@0
|
3644 delete buffer_;
|
f@0
|
3645
|
f@0
|
3646 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
|
f@0
|
3647
|
f@0
|
3648 bufferSize_ = bufferSize;
|
f@0
|
3649 inIndex_ = 0;
|
f@0
|
3650 outIndex_ = 0;
|
f@0
|
3651 }
|
f@0
|
3652
|
f@0
|
3653 // attempt to push a buffer into the ring buffer at the current "in" index
|
f@0
|
3654 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
|
f@0
|
3655 {
|
f@0
|
3656 if ( !buffer || // incoming buffer is NULL
|
f@0
|
3657 bufferSize == 0 || // incoming buffer has no data
|
f@0
|
3658 bufferSize > bufferSize_ ) // incoming buffer too large
|
f@0
|
3659 {
|
f@0
|
3660 return false;
|
f@0
|
3661 }
|
f@0
|
3662
|
f@0
|
3663 unsigned int relOutIndex = outIndex_;
|
f@0
|
3664 unsigned int inIndexEnd = inIndex_ + bufferSize;
|
f@0
|
3665 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
|
f@0
|
3666 relOutIndex += bufferSize_;
|
f@0
|
3667 }
|
f@0
|
3668
|
f@0
|
3669 // "in" index can end on the "out" index but cannot begin at it
|
f@0
|
3670 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
|
f@0
|
3671 return false; // not enough space between "in" index and "out" index
|
f@0
|
3672 }
|
f@0
|
3673
|
f@0
|
3674 // copy buffer from external to internal
|
f@0
|
3675 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
|
f@0
|
3676 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
|
f@0
|
3677 int fromInSize = bufferSize - fromZeroSize;
|
f@0
|
3678
|
f@0
|
3679 switch( format )
|
f@0
|
3680 {
|
f@0
|
3681 case RTAUDIO_SINT8:
|
f@0
|
3682 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
|
f@0
|
3683 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
|
f@0
|
3684 break;
|
f@0
|
3685 case RTAUDIO_SINT16:
|
f@0
|
3686 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
|
f@0
|
3687 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
|
f@0
|
3688 break;
|
f@0
|
3689 case RTAUDIO_SINT24:
|
f@0
|
3690 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
|
f@0
|
3691 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
|
f@0
|
3692 break;
|
f@0
|
3693 case RTAUDIO_SINT32:
|
f@0
|
3694 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
|
f@0
|
3695 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
|
f@0
|
3696 break;
|
f@0
|
3697 case RTAUDIO_FLOAT32:
|
f@0
|
3698 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
|
f@0
|
3699 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
|
f@0
|
3700 break;
|
f@0
|
3701 case RTAUDIO_FLOAT64:
|
f@0
|
3702 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
|
f@0
|
3703 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
|
f@0
|
3704 break;
|
f@0
|
3705 }
|
f@0
|
3706
|
f@0
|
3707 // update "in" index
|
f@0
|
3708 inIndex_ += bufferSize;
|
f@0
|
3709 inIndex_ %= bufferSize_;
|
f@0
|
3710
|
f@0
|
3711 return true;
|
f@0
|
3712 }
|
f@0
|
3713
|
f@0
|
3714 // attempt to pull a buffer from the ring buffer from the current "out" index
|
f@0
|
3715 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
|
f@0
|
3716 {
|
f@0
|
3717 if ( !buffer || // incoming buffer is NULL
|
f@0
|
3718 bufferSize == 0 || // incoming buffer has no data
|
f@0
|
3719 bufferSize > bufferSize_ ) // incoming buffer too large
|
f@0
|
3720 {
|
f@0
|
3721 return false;
|
f@0
|
3722 }
|
f@0
|
3723
|
f@0
|
3724 unsigned int relInIndex = inIndex_;
|
f@0
|
3725 unsigned int outIndexEnd = outIndex_ + bufferSize;
|
f@0
|
3726 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
|
f@0
|
3727 relInIndex += bufferSize_;
|
f@0
|
3728 }
|
f@0
|
3729
|
f@0
|
3730 // "out" index can begin at and end on the "in" index
|
f@0
|
3731 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
|
f@0
|
3732 return false; // not enough space between "out" index and "in" index
|
f@0
|
3733 }
|
f@0
|
3734
|
f@0
|
3735 // copy buffer from internal to external
|
f@0
|
3736 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
|
f@0
|
3737 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
|
f@0
|
3738 int fromOutSize = bufferSize - fromZeroSize;
|
f@0
|
3739
|
f@0
|
3740 switch( format )
|
f@0
|
3741 {
|
f@0
|
3742 case RTAUDIO_SINT8:
|
f@0
|
3743 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
|
f@0
|
3744 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
|
f@0
|
3745 break;
|
f@0
|
3746 case RTAUDIO_SINT16:
|
f@0
|
3747 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
|
f@0
|
3748 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
|
f@0
|
3749 break;
|
f@0
|
3750 case RTAUDIO_SINT24:
|
f@0
|
3751 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
|
f@0
|
3752 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
|
f@0
|
3753 break;
|
f@0
|
3754 case RTAUDIO_SINT32:
|
f@0
|
3755 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
|
f@0
|
3756 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
|
f@0
|
3757 break;
|
f@0
|
3758 case RTAUDIO_FLOAT32:
|
f@0
|
3759 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
|
f@0
|
3760 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
|
f@0
|
3761 break;
|
f@0
|
3762 case RTAUDIO_FLOAT64:
|
f@0
|
3763 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
|
f@0
|
3764 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
|
f@0
|
3765 break;
|
f@0
|
3766 }
|
f@0
|
3767
|
f@0
|
3768 // update "out" index
|
f@0
|
3769 outIndex_ += bufferSize;
|
f@0
|
3770 outIndex_ %= bufferSize_;
|
f@0
|
3771
|
f@0
|
3772 return true;
|
f@0
|
3773 }
|
f@0
|
3774
|
f@0
|
3775 private:
|
f@0
|
3776 char* buffer_;
|
f@0
|
3777 unsigned int bufferSize_;
|
f@0
|
3778 unsigned int inIndex_;
|
f@0
|
3779 unsigned int outIndex_;
|
f@0
|
3780 };
|
f@0
|
3781
|
f@0
|
3782 //-----------------------------------------------------------------------------
|
f@0
|
3783
|
f@0
|
3784 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
|
f@0
|
3785 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
|
f@0
|
3786 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
|
f@0
|
3787 // This sample rate converter favors speed over quality, and works best with conversions between
|
f@0
|
3788 // one rate and its multiple.
|
f@0
|
3789 void convertBufferWasapi( char* outBuffer,
|
f@0
|
3790 const char* inBuffer,
|
f@0
|
3791 const unsigned int& channelCount,
|
f@0
|
3792 const unsigned int& inSampleRate,
|
f@0
|
3793 const unsigned int& outSampleRate,
|
f@0
|
3794 const unsigned int& inSampleCount,
|
f@0
|
3795 unsigned int& outSampleCount,
|
f@0
|
3796 const RtAudioFormat& format )
|
f@0
|
3797 {
|
f@0
|
3798 // calculate the new outSampleCount and relative sampleStep
|
f@0
|
3799 float sampleRatio = ( float ) outSampleRate / inSampleRate;
|
f@0
|
3800 float sampleStep = 1.0f / sampleRatio;
|
f@0
|
3801 float inSampleFraction = 0.0f;
|
f@0
|
3802
|
f@0
|
3803 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
|
f@0
|
3804
|
f@0
|
3805 // frame-by-frame, copy each relative input sample into it's corresponding output sample
|
f@0
|
3806 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
|
f@0
|
3807 {
|
f@0
|
3808 unsigned int inSample = ( unsigned int ) inSampleFraction;
|
f@0
|
3809
|
f@0
|
3810 switch ( format )
|
f@0
|
3811 {
|
f@0
|
3812 case RTAUDIO_SINT8:
|
f@0
|
3813 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
|
f@0
|
3814 break;
|
f@0
|
3815 case RTAUDIO_SINT16:
|
f@0
|
3816 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
|
f@0
|
3817 break;
|
f@0
|
3818 case RTAUDIO_SINT24:
|
f@0
|
3819 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
|
f@0
|
3820 break;
|
f@0
|
3821 case RTAUDIO_SINT32:
|
f@0
|
3822 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
|
f@0
|
3823 break;
|
f@0
|
3824 case RTAUDIO_FLOAT32:
|
f@0
|
3825 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
|
f@0
|
3826 break;
|
f@0
|
3827 case RTAUDIO_FLOAT64:
|
f@0
|
3828 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
|
f@0
|
3829 break;
|
f@0
|
3830 }
|
f@0
|
3831
|
f@0
|
3832 // jump to next in sample
|
f@0
|
3833 inSampleFraction += sampleStep;
|
f@0
|
3834 }
|
f@0
|
3835 }
|
f@0
|
3836
|
f@0
|
3837 //-----------------------------------------------------------------------------
|
f@0
|
3838
|
f@0
|
3839 // A structure to hold various information related to the WASAPI implementation.
|
f@0
|
3840 struct WasapiHandle
|
f@0
|
3841 {
|
f@0
|
3842 IAudioClient* captureAudioClient;
|
f@0
|
3843 IAudioClient* renderAudioClient;
|
f@0
|
3844 IAudioCaptureClient* captureClient;
|
f@0
|
3845 IAudioRenderClient* renderClient;
|
f@0
|
3846 HANDLE captureEvent;
|
f@0
|
3847 HANDLE renderEvent;
|
f@0
|
3848
|
f@0
|
3849 WasapiHandle()
|
f@0
|
3850 : captureAudioClient( NULL ),
|
f@0
|
3851 renderAudioClient( NULL ),
|
f@0
|
3852 captureClient( NULL ),
|
f@0
|
3853 renderClient( NULL ),
|
f@0
|
3854 captureEvent( NULL ),
|
f@0
|
3855 renderEvent( NULL ) {}
|
f@0
|
3856 };
|
f@0
|
3857
|
f@0
|
3858 //=============================================================================
|
f@0
|
3859
|
f@0
|
3860 RtApiWasapi::RtApiWasapi()
|
f@0
|
3861 : coInitialized_( false ), deviceEnumerator_( NULL )
|
f@0
|
3862 {
|
f@0
|
3863 // WASAPI can run either apartment or multi-threaded
|
f@0
|
3864 HRESULT hr = CoInitialize( NULL );
|
f@0
|
3865 if ( !FAILED( hr ) )
|
f@0
|
3866 coInitialized_ = true;
|
f@0
|
3867
|
f@0
|
3868 // Instantiate device enumerator
|
f@0
|
3869 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
|
f@0
|
3870 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
|
f@0
|
3871 ( void** ) &deviceEnumerator_ );
|
f@0
|
3872
|
f@0
|
3873 if ( FAILED( hr ) ) {
|
f@0
|
3874 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
|
f@0
|
3875 error( RtAudioError::DRIVER_ERROR );
|
f@0
|
3876 }
|
f@0
|
3877 }
|
f@0
|
3878
|
f@0
|
3879 //-----------------------------------------------------------------------------
|
f@0
|
3880
|
f@0
|
3881 RtApiWasapi::~RtApiWasapi()
|
f@0
|
3882 {
|
f@0
|
3883 if ( stream_.state != STREAM_CLOSED )
|
f@0
|
3884 closeStream();
|
f@0
|
3885
|
f@0
|
3886 SAFE_RELEASE( deviceEnumerator_ );
|
f@0
|
3887
|
f@0
|
3888 // If this object previously called CoInitialize()
|
f@0
|
3889 if ( coInitialized_ )
|
f@0
|
3890 CoUninitialize();
|
f@0
|
3891 }
|
f@0
|
3892
|
f@0
|
3893 //=============================================================================
|
f@0
|
3894
|
f@0
|
3895 unsigned int RtApiWasapi::getDeviceCount( void )
|
f@0
|
3896 {
|
f@0
|
3897 unsigned int captureDeviceCount = 0;
|
f@0
|
3898 unsigned int renderDeviceCount = 0;
|
f@0
|
3899
|
f@0
|
3900 IMMDeviceCollection* captureDevices = NULL;
|
f@0
|
3901 IMMDeviceCollection* renderDevices = NULL;
|
f@0
|
3902
|
f@0
|
3903 // Count capture devices
|
f@0
|
3904 errorText_.clear();
|
f@0
|
3905 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
|
f@0
|
3906 if ( FAILED( hr ) ) {
|
f@0
|
3907 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
|
f@0
|
3908 goto Exit;
|
f@0
|
3909 }
|
f@0
|
3910
|
f@0
|
3911 hr = captureDevices->GetCount( &captureDeviceCount );
|
f@0
|
3912 if ( FAILED( hr ) ) {
|
f@0
|
3913 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
|
f@0
|
3914 goto Exit;
|
f@0
|
3915 }
|
f@0
|
3916
|
f@0
|
3917 // Count render devices
|
f@0
|
3918 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
|
f@0
|
3919 if ( FAILED( hr ) ) {
|
f@0
|
3920 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
|
f@0
|
3921 goto Exit;
|
f@0
|
3922 }
|
f@0
|
3923
|
f@0
|
3924 hr = renderDevices->GetCount( &renderDeviceCount );
|
f@0
|
3925 if ( FAILED( hr ) ) {
|
f@0
|
3926 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
|
f@0
|
3927 goto Exit;
|
f@0
|
3928 }
|
f@0
|
3929
|
f@0
|
3930 Exit:
|
f@0
|
3931 // release all references
|
f@0
|
3932 SAFE_RELEASE( captureDevices );
|
f@0
|
3933 SAFE_RELEASE( renderDevices );
|
f@0
|
3934
|
f@0
|
3935 if ( errorText_.empty() )
|
f@0
|
3936 return captureDeviceCount + renderDeviceCount;
|
f@0
|
3937
|
f@0
|
3938 error( RtAudioError::DRIVER_ERROR );
|
f@0
|
3939 return 0;
|
f@0
|
3940 }
|
f@0
|
3941
|
f@0
|
3942 //-----------------------------------------------------------------------------
|
f@0
|
3943
|
f@0
|
3944 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
|
f@0
|
3945 {
|
f@0
|
3946 RtAudio::DeviceInfo info;
|
f@0
|
3947 unsigned int captureDeviceCount = 0;
|
f@0
|
3948 unsigned int renderDeviceCount = 0;
|
f@0
|
3949 std::wstring deviceName;
|
f@0
|
3950 std::string defaultDeviceName;
|
f@0
|
3951 bool isCaptureDevice = false;
|
f@0
|
3952
|
f@0
|
3953 PROPVARIANT deviceNameProp;
|
f@0
|
3954 PROPVARIANT defaultDeviceNameProp;
|
f@0
|
3955
|
f@0
|
3956 IMMDeviceCollection* captureDevices = NULL;
|
f@0
|
3957 IMMDeviceCollection* renderDevices = NULL;
|
f@0
|
3958 IMMDevice* devicePtr = NULL;
|
f@0
|
3959 IMMDevice* defaultDevicePtr = NULL;
|
f@0
|
3960 IAudioClient* audioClient = NULL;
|
f@0
|
3961 IPropertyStore* devicePropStore = NULL;
|
f@0
|
3962 IPropertyStore* defaultDevicePropStore = NULL;
|
f@0
|
3963
|
f@0
|
3964 WAVEFORMATEX* deviceFormat = NULL;
|
f@0
|
3965 WAVEFORMATEX* closestMatchFormat = NULL;
|
f@0
|
3966
|
f@0
|
3967 // probed
|
f@0
|
3968 info.probed = false;
|
f@0
|
3969
|
f@0
|
3970 // Count capture devices
|
f@0
|
3971 errorText_.clear();
|
f@0
|
3972 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
|
f@0
|
3973 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
|
f@0
|
3974 if ( FAILED( hr ) ) {
|
f@0
|
3975 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
|
f@0
|
3976 goto Exit;
|
f@0
|
3977 }
|
f@0
|
3978
|
f@0
|
3979 hr = captureDevices->GetCount( &captureDeviceCount );
|
f@0
|
3980 if ( FAILED( hr ) ) {
|
f@0
|
3981 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
|
f@0
|
3982 goto Exit;
|
f@0
|
3983 }
|
f@0
|
3984
|
f@0
|
3985 // Count render devices
|
f@0
|
3986 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
|
f@0
|
3987 if ( FAILED( hr ) ) {
|
f@0
|
3988 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
|
f@0
|
3989 goto Exit;
|
f@0
|
3990 }
|
f@0
|
3991
|
f@0
|
3992 hr = renderDevices->GetCount( &renderDeviceCount );
|
f@0
|
3993 if ( FAILED( hr ) ) {
|
f@0
|
3994 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
|
f@0
|
3995 goto Exit;
|
f@0
|
3996 }
|
f@0
|
3997
|
f@0
|
3998 // validate device index
|
f@0
|
3999 if ( device >= captureDeviceCount + renderDeviceCount ) {
|
f@0
|
4000 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
|
f@0
|
4001 errorType = RtAudioError::INVALID_USE;
|
f@0
|
4002 goto Exit;
|
f@0
|
4003 }
|
f@0
|
4004
|
f@0
|
4005 // determine whether index falls within capture or render devices
|
f@0
|
4006 if ( device >= renderDeviceCount ) {
|
f@0
|
4007 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
|
f@0
|
4008 if ( FAILED( hr ) ) {
|
f@0
|
4009 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
|
f@0
|
4010 goto Exit;
|
f@0
|
4011 }
|
f@0
|
4012 isCaptureDevice = true;
|
f@0
|
4013 }
|
f@0
|
4014 else {
|
f@0
|
4015 hr = renderDevices->Item( device, &devicePtr );
|
f@0
|
4016 if ( FAILED( hr ) ) {
|
f@0
|
4017 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
|
f@0
|
4018 goto Exit;
|
f@0
|
4019 }
|
f@0
|
4020 isCaptureDevice = false;
|
f@0
|
4021 }
|
f@0
|
4022
|
f@0
|
4023 // get default device name
|
f@0
|
4024 if ( isCaptureDevice ) {
|
f@0
|
4025 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
|
f@0
|
4026 if ( FAILED( hr ) ) {
|
f@0
|
4027 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
|
f@0
|
4028 goto Exit;
|
f@0
|
4029 }
|
f@0
|
4030 }
|
f@0
|
4031 else {
|
f@0
|
4032 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
|
f@0
|
4033 if ( FAILED( hr ) ) {
|
f@0
|
4034 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
|
f@0
|
4035 goto Exit;
|
f@0
|
4036 }
|
f@0
|
4037 }
|
f@0
|
4038
|
f@0
|
4039 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
|
f@0
|
4040 if ( FAILED( hr ) ) {
|
f@0
|
4041 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
|
f@0
|
4042 goto Exit;
|
f@0
|
4043 }
|
f@0
|
4044 PropVariantInit( &defaultDeviceNameProp );
|
f@0
|
4045
|
f@0
|
4046 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
|
f@0
|
4047 if ( FAILED( hr ) ) {
|
f@0
|
4048 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
|
f@0
|
4049 goto Exit;
|
f@0
|
4050 }
|
f@0
|
4051
|
f@0
|
4052 deviceName = defaultDeviceNameProp.pwszVal;
|
f@0
|
4053 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
|
f@0
|
4054
|
f@0
|
4055 // name
|
f@0
|
4056 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
|
f@0
|
4057 if ( FAILED( hr ) ) {
|
f@0
|
4058 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
|
f@0
|
4059 goto Exit;
|
f@0
|
4060 }
|
f@0
|
4061
|
f@0
|
4062 PropVariantInit( &deviceNameProp );
|
f@0
|
4063
|
f@0
|
4064 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
|
f@0
|
4065 if ( FAILED( hr ) ) {
|
f@0
|
4066 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
|
f@0
|
4067 goto Exit;
|
f@0
|
4068 }
|
f@0
|
4069
|
f@0
|
4070 deviceName = deviceNameProp.pwszVal;
|
f@0
|
4071 info.name = std::string( deviceName.begin(), deviceName.end() );
|
f@0
|
4072
|
f@0
|
4073 // is default
|
f@0
|
4074 if ( isCaptureDevice ) {
|
f@0
|
4075 info.isDefaultInput = info.name == defaultDeviceName;
|
f@0
|
4076 info.isDefaultOutput = false;
|
f@0
|
4077 }
|
f@0
|
4078 else {
|
f@0
|
4079 info.isDefaultInput = false;
|
f@0
|
4080 info.isDefaultOutput = info.name == defaultDeviceName;
|
f@0
|
4081 }
|
f@0
|
4082
|
f@0
|
4083 // channel count
|
f@0
|
4084 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
|
f@0
|
4085 if ( FAILED( hr ) ) {
|
f@0
|
4086 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
|
f@0
|
4087 goto Exit;
|
f@0
|
4088 }
|
f@0
|
4089
|
f@0
|
4090 hr = audioClient->GetMixFormat( &deviceFormat );
|
f@0
|
4091 if ( FAILED( hr ) ) {
|
f@0
|
4092 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
|
f@0
|
4093 goto Exit;
|
f@0
|
4094 }
|
f@0
|
4095
|
f@0
|
4096 if ( isCaptureDevice ) {
|
f@0
|
4097 info.inputChannels = deviceFormat->nChannels;
|
f@0
|
4098 info.outputChannels = 0;
|
f@0
|
4099 info.duplexChannels = 0;
|
f@0
|
4100 }
|
f@0
|
4101 else {
|
f@0
|
4102 info.inputChannels = 0;
|
f@0
|
4103 info.outputChannels = deviceFormat->nChannels;
|
f@0
|
4104 info.duplexChannels = 0;
|
f@0
|
4105 }
|
f@0
|
4106
|
f@0
|
4107 // sample rates
|
f@0
|
4108 info.sampleRates.clear();
|
f@0
|
4109
|
f@0
|
4110 // allow support for all sample rates as we have a built-in sample rate converter
|
f@0
|
4111 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
|
f@0
|
4112 info.sampleRates.push_back( SAMPLE_RATES[i] );
|
f@0
|
4113 }
|
f@0
|
4114
|
f@0
|
4115 // native format
|
f@0
|
4116 info.nativeFormats = 0;
|
f@0
|
4117
|
f@0
|
4118 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
|
f@0
|
4119 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
|
f@0
|
4120 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
|
f@0
|
4121 {
|
f@0
|
4122 if ( deviceFormat->wBitsPerSample == 32 ) {
|
f@0
|
4123 info.nativeFormats |= RTAUDIO_FLOAT32;
|
f@0
|
4124 }
|
f@0
|
4125 else if ( deviceFormat->wBitsPerSample == 64 ) {
|
f@0
|
4126 info.nativeFormats |= RTAUDIO_FLOAT64;
|
f@0
|
4127 }
|
f@0
|
4128 }
|
f@0
|
4129 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
|
f@0
|
4130 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
|
f@0
|
4131 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
|
f@0
|
4132 {
|
f@0
|
4133 if ( deviceFormat->wBitsPerSample == 8 ) {
|
f@0
|
4134 info.nativeFormats |= RTAUDIO_SINT8;
|
f@0
|
4135 }
|
f@0
|
4136 else if ( deviceFormat->wBitsPerSample == 16 ) {
|
f@0
|
4137 info.nativeFormats |= RTAUDIO_SINT16;
|
f@0
|
4138 }
|
f@0
|
4139 else if ( deviceFormat->wBitsPerSample == 24 ) {
|
f@0
|
4140 info.nativeFormats |= RTAUDIO_SINT24;
|
f@0
|
4141 }
|
f@0
|
4142 else if ( deviceFormat->wBitsPerSample == 32 ) {
|
f@0
|
4143 info.nativeFormats |= RTAUDIO_SINT32;
|
f@0
|
4144 }
|
f@0
|
4145 }
|
f@0
|
4146
|
f@0
|
4147 // probed
|
f@0
|
4148 info.probed = true;
|
f@0
|
4149
|
f@0
|
4150 Exit:
|
f@0
|
4151 // release all references
|
f@0
|
4152 PropVariantClear( &deviceNameProp );
|
f@0
|
4153 PropVariantClear( &defaultDeviceNameProp );
|
f@0
|
4154
|
f@0
|
4155 SAFE_RELEASE( captureDevices );
|
f@0
|
4156 SAFE_RELEASE( renderDevices );
|
f@0
|
4157 SAFE_RELEASE( devicePtr );
|
f@0
|
4158 SAFE_RELEASE( defaultDevicePtr );
|
f@0
|
4159 SAFE_RELEASE( audioClient );
|
f@0
|
4160 SAFE_RELEASE( devicePropStore );
|
f@0
|
4161 SAFE_RELEASE( defaultDevicePropStore );
|
f@0
|
4162
|
f@0
|
4163 CoTaskMemFree( deviceFormat );
|
f@0
|
4164 CoTaskMemFree( closestMatchFormat );
|
f@0
|
4165
|
f@0
|
4166 if ( !errorText_.empty() )
|
f@0
|
4167 error( errorType );
|
f@0
|
4168 return info;
|
f@0
|
4169 }
|
f@0
|
4170
|
f@0
|
4171 //-----------------------------------------------------------------------------
|
f@0
|
4172
|
f@0
|
4173 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
|
f@0
|
4174 {
|
f@0
|
4175 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
|
f@0
|
4176 if ( getDeviceInfo( i ).isDefaultOutput ) {
|
f@0
|
4177 return i;
|
f@0
|
4178 }
|
f@0
|
4179 }
|
f@0
|
4180
|
f@0
|
4181 return 0;
|
f@0
|
4182 }
|
f@0
|
4183
|
f@0
|
4184 //-----------------------------------------------------------------------------
|
f@0
|
4185
|
f@0
|
4186 unsigned int RtApiWasapi::getDefaultInputDevice( void )
|
f@0
|
4187 {
|
f@0
|
4188 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
|
f@0
|
4189 if ( getDeviceInfo( i ).isDefaultInput ) {
|
f@0
|
4190 return i;
|
f@0
|
4191 }
|
f@0
|
4192 }
|
f@0
|
4193
|
f@0
|
4194 return 0;
|
f@0
|
4195 }
|
f@0
|
4196
|
f@0
|
4197 //-----------------------------------------------------------------------------
|
f@0
|
4198
|
f@0
|
4199 void RtApiWasapi::closeStream( void )
|
f@0
|
4200 {
|
f@0
|
4201 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
4202 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
|
f@0
|
4203 error( RtAudioError::WARNING );
|
f@0
|
4204 return;
|
f@0
|
4205 }
|
f@0
|
4206
|
f@0
|
4207 if ( stream_.state != STREAM_STOPPED )
|
f@0
|
4208 stopStream();
|
f@0
|
4209
|
f@0
|
4210 // clean up stream memory
|
f@0
|
4211 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
|
f@0
|
4212 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
|
f@0
|
4213
|
f@0
|
4214 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
|
f@0
|
4215 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
|
f@0
|
4216
|
f@0
|
4217 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
|
f@0
|
4218 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
|
f@0
|
4219
|
f@0
|
4220 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
|
f@0
|
4221 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
|
f@0
|
4222
|
f@0
|
4223 delete ( WasapiHandle* ) stream_.apiHandle;
|
f@0
|
4224 stream_.apiHandle = NULL;
|
f@0
|
4225
|
f@0
|
4226 for ( int i = 0; i < 2; i++ ) {
|
f@0
|
4227 if ( stream_.userBuffer[i] ) {
|
f@0
|
4228 free( stream_.userBuffer[i] );
|
f@0
|
4229 stream_.userBuffer[i] = 0;
|
f@0
|
4230 }
|
f@0
|
4231 }
|
f@0
|
4232
|
f@0
|
4233 if ( stream_.deviceBuffer ) {
|
f@0
|
4234 free( stream_.deviceBuffer );
|
f@0
|
4235 stream_.deviceBuffer = 0;
|
f@0
|
4236 }
|
f@0
|
4237
|
f@0
|
4238 // update stream state
|
f@0
|
4239 stream_.state = STREAM_CLOSED;
|
f@0
|
4240 }
|
f@0
|
4241
|
f@0
|
4242 //-----------------------------------------------------------------------------
|
f@0
|
4243
|
f@0
|
4244 void RtApiWasapi::startStream( void )
|
f@0
|
4245 {
|
f@0
|
4246 verifyStream();
|
f@0
|
4247
|
f@0
|
4248 if ( stream_.state == STREAM_RUNNING ) {
|
f@0
|
4249 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
|
f@0
|
4250 error( RtAudioError::WARNING );
|
f@0
|
4251 return;
|
f@0
|
4252 }
|
f@0
|
4253
|
f@0
|
4254 // update stream state
|
f@0
|
4255 stream_.state = STREAM_RUNNING;
|
f@0
|
4256
|
f@0
|
4257 // create WASAPI stream thread
|
f@0
|
4258 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
|
f@0
|
4259
|
f@0
|
4260 if ( !stream_.callbackInfo.thread ) {
|
f@0
|
4261 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
|
f@0
|
4262 error( RtAudioError::THREAD_ERROR );
|
f@0
|
4263 }
|
f@0
|
4264 else {
|
f@0
|
4265 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
|
f@0
|
4266 ResumeThread( ( void* ) stream_.callbackInfo.thread );
|
f@0
|
4267 }
|
f@0
|
4268 }
|
f@0
|
4269
|
f@0
|
4270 //-----------------------------------------------------------------------------
|
f@0
|
4271
|
f@0
|
4272 void RtApiWasapi::stopStream( void )
|
f@0
|
4273 {
|
f@0
|
4274 verifyStream();
|
f@0
|
4275
|
f@0
|
4276 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
4277 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
|
f@0
|
4278 error( RtAudioError::WARNING );
|
f@0
|
4279 return;
|
f@0
|
4280 }
|
f@0
|
4281
|
f@0
|
4282 // inform stream thread by setting stream state to STREAM_STOPPING
|
f@0
|
4283 stream_.state = STREAM_STOPPING;
|
f@0
|
4284
|
f@0
|
4285 // wait until stream thread is stopped
|
f@0
|
4286 while( stream_.state != STREAM_STOPPED ) {
|
f@0
|
4287 Sleep( 1 );
|
f@0
|
4288 }
|
f@0
|
4289
|
f@0
|
4290 // Wait for the last buffer to play before stopping.
|
f@0
|
4291 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
|
f@0
|
4292
|
f@0
|
4293 // stop capture client if applicable
|
f@0
|
4294 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
|
f@0
|
4295 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
|
f@0
|
4296 if ( FAILED( hr ) ) {
|
f@0
|
4297 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
|
f@0
|
4298 error( RtAudioError::DRIVER_ERROR );
|
f@0
|
4299 return;
|
f@0
|
4300 }
|
f@0
|
4301 }
|
f@0
|
4302
|
f@0
|
4303 // stop render client if applicable
|
f@0
|
4304 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
|
f@0
|
4305 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
|
f@0
|
4306 if ( FAILED( hr ) ) {
|
f@0
|
4307 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
|
f@0
|
4308 error( RtAudioError::DRIVER_ERROR );
|
f@0
|
4309 return;
|
f@0
|
4310 }
|
f@0
|
4311 }
|
f@0
|
4312
|
f@0
|
4313 // close thread handle
|
f@0
|
4314 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
|
f@0
|
4315 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
|
f@0
|
4316 error( RtAudioError::THREAD_ERROR );
|
f@0
|
4317 return;
|
f@0
|
4318 }
|
f@0
|
4319
|
f@0
|
4320 stream_.callbackInfo.thread = (ThreadHandle) NULL;
|
f@0
|
4321 }
|
f@0
|
4322
|
f@0
|
4323 //-----------------------------------------------------------------------------
|
f@0
|
4324
|
f@0
|
4325 void RtApiWasapi::abortStream( void )
|
f@0
|
4326 {
|
f@0
|
4327 verifyStream();
|
f@0
|
4328
|
f@0
|
4329 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
4330 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
|
f@0
|
4331 error( RtAudioError::WARNING );
|
f@0
|
4332 return;
|
f@0
|
4333 }
|
f@0
|
4334
|
f@0
|
4335 // inform stream thread by setting stream state to STREAM_STOPPING
|
f@0
|
4336 stream_.state = STREAM_STOPPING;
|
f@0
|
4337
|
f@0
|
4338 // wait until stream thread is stopped
|
f@0
|
4339 while ( stream_.state != STREAM_STOPPED ) {
|
f@0
|
4340 Sleep( 1 );
|
f@0
|
4341 }
|
f@0
|
4342
|
f@0
|
4343 // stop capture client if applicable
|
f@0
|
4344 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
|
f@0
|
4345 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
|
f@0
|
4346 if ( FAILED( hr ) ) {
|
f@0
|
4347 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
|
f@0
|
4348 error( RtAudioError::DRIVER_ERROR );
|
f@0
|
4349 return;
|
f@0
|
4350 }
|
f@0
|
4351 }
|
f@0
|
4352
|
f@0
|
4353 // stop render client if applicable
|
f@0
|
4354 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
|
f@0
|
4355 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
|
f@0
|
4356 if ( FAILED( hr ) ) {
|
f@0
|
4357 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
|
f@0
|
4358 error( RtAudioError::DRIVER_ERROR );
|
f@0
|
4359 return;
|
f@0
|
4360 }
|
f@0
|
4361 }
|
f@0
|
4362
|
f@0
|
4363 // close thread handle
|
f@0
|
4364 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
|
f@0
|
4365 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
|
f@0
|
4366 error( RtAudioError::THREAD_ERROR );
|
f@0
|
4367 return;
|
f@0
|
4368 }
|
f@0
|
4369
|
f@0
|
4370 stream_.callbackInfo.thread = (ThreadHandle) NULL;
|
f@0
|
4371 }
|
f@0
|
4372
|
f@0
|
4373 //-----------------------------------------------------------------------------
|
f@0
|
4374
|
f@0
|
4375 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
|
f@0
|
4376 unsigned int firstChannel, unsigned int sampleRate,
|
f@0
|
4377 RtAudioFormat format, unsigned int* bufferSize,
|
f@0
|
4378 RtAudio::StreamOptions* options )
|
f@0
|
4379 {
|
f@0
|
4380 bool methodResult = FAILURE;
|
f@0
|
4381 unsigned int captureDeviceCount = 0;
|
f@0
|
4382 unsigned int renderDeviceCount = 0;
|
f@0
|
4383
|
f@0
|
4384 IMMDeviceCollection* captureDevices = NULL;
|
f@0
|
4385 IMMDeviceCollection* renderDevices = NULL;
|
f@0
|
4386 IMMDevice* devicePtr = NULL;
|
f@0
|
4387 WAVEFORMATEX* deviceFormat = NULL;
|
f@0
|
4388 unsigned int bufferBytes;
|
f@0
|
4389 stream_.state = STREAM_STOPPED;
|
f@0
|
4390
|
f@0
|
4391 // create API Handle if not already created
|
f@0
|
4392 if ( !stream_.apiHandle )
|
f@0
|
4393 stream_.apiHandle = ( void* ) new WasapiHandle();
|
f@0
|
4394
|
f@0
|
4395 // Count capture devices
|
f@0
|
4396 errorText_.clear();
|
f@0
|
4397 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
|
f@0
|
4398 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
|
f@0
|
4399 if ( FAILED( hr ) ) {
|
f@0
|
4400 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
|
f@0
|
4401 goto Exit;
|
f@0
|
4402 }
|
f@0
|
4403
|
f@0
|
4404 hr = captureDevices->GetCount( &captureDeviceCount );
|
f@0
|
4405 if ( FAILED( hr ) ) {
|
f@0
|
4406 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
|
f@0
|
4407 goto Exit;
|
f@0
|
4408 }
|
f@0
|
4409
|
f@0
|
4410 // Count render devices
|
f@0
|
4411 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
|
f@0
|
4412 if ( FAILED( hr ) ) {
|
f@0
|
4413 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
|
f@0
|
4414 goto Exit;
|
f@0
|
4415 }
|
f@0
|
4416
|
f@0
|
4417 hr = renderDevices->GetCount( &renderDeviceCount );
|
f@0
|
4418 if ( FAILED( hr ) ) {
|
f@0
|
4419 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
|
f@0
|
4420 goto Exit;
|
f@0
|
4421 }
|
f@0
|
4422
|
f@0
|
4423 // validate device index
|
f@0
|
4424 if ( device >= captureDeviceCount + renderDeviceCount ) {
|
f@0
|
4425 errorType = RtAudioError::INVALID_USE;
|
f@0
|
4426 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
|
f@0
|
4427 goto Exit;
|
f@0
|
4428 }
|
f@0
|
4429
|
f@0
|
4430 // determine whether index falls within capture or render devices
|
f@0
|
4431 if ( device >= renderDeviceCount ) {
|
f@0
|
4432 if ( mode != INPUT ) {
|
f@0
|
4433 errorType = RtAudioError::INVALID_USE;
|
f@0
|
4434 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
|
f@0
|
4435 goto Exit;
|
f@0
|
4436 }
|
f@0
|
4437
|
f@0
|
4438 // retrieve captureAudioClient from devicePtr
|
f@0
|
4439 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
|
f@0
|
4440
|
f@0
|
4441 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
|
f@0
|
4442 if ( FAILED( hr ) ) {
|
f@0
|
4443 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
|
f@0
|
4444 goto Exit;
|
f@0
|
4445 }
|
f@0
|
4446
|
f@0
|
4447 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
|
f@0
|
4448 NULL, ( void** ) &captureAudioClient );
|
f@0
|
4449 if ( FAILED( hr ) ) {
|
f@0
|
4450 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
|
f@0
|
4451 goto Exit;
|
f@0
|
4452 }
|
f@0
|
4453
|
f@0
|
4454 hr = captureAudioClient->GetMixFormat( &deviceFormat );
|
f@0
|
4455 if ( FAILED( hr ) ) {
|
f@0
|
4456 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
|
f@0
|
4457 goto Exit;
|
f@0
|
4458 }
|
f@0
|
4459
|
f@0
|
4460 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
|
f@0
|
4461 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
|
f@0
|
4462 }
|
f@0
|
4463 else {
|
f@0
|
4464 if ( mode != OUTPUT ) {
|
f@0
|
4465 errorType = RtAudioError::INVALID_USE;
|
f@0
|
4466 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
|
f@0
|
4467 goto Exit;
|
f@0
|
4468 }
|
f@0
|
4469
|
f@0
|
4470 // retrieve renderAudioClient from devicePtr
|
f@0
|
4471 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
|
f@0
|
4472
|
f@0
|
4473 hr = renderDevices->Item( device, &devicePtr );
|
f@0
|
4474 if ( FAILED( hr ) ) {
|
f@0
|
4475 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
|
f@0
|
4476 goto Exit;
|
f@0
|
4477 }
|
f@0
|
4478
|
f@0
|
4479 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
|
f@0
|
4480 NULL, ( void** ) &renderAudioClient );
|
f@0
|
4481 if ( FAILED( hr ) ) {
|
f@0
|
4482 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
|
f@0
|
4483 goto Exit;
|
f@0
|
4484 }
|
f@0
|
4485
|
f@0
|
4486 hr = renderAudioClient->GetMixFormat( &deviceFormat );
|
f@0
|
4487 if ( FAILED( hr ) ) {
|
f@0
|
4488 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
|
f@0
|
4489 goto Exit;
|
f@0
|
4490 }
|
f@0
|
4491
|
f@0
|
4492 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
|
f@0
|
4493 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
|
f@0
|
4494 }
|
f@0
|
4495
|
f@0
|
4496 // fill stream data
|
f@0
|
4497 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
|
f@0
|
4498 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
|
f@0
|
4499 stream_.mode = DUPLEX;
|
f@0
|
4500 }
|
f@0
|
4501 else {
|
f@0
|
4502 stream_.mode = mode;
|
f@0
|
4503 }
|
f@0
|
4504
|
f@0
|
4505 stream_.device[mode] = device;
|
f@0
|
4506 stream_.doByteSwap[mode] = false;
|
f@0
|
4507 stream_.sampleRate = sampleRate;
|
f@0
|
4508 stream_.bufferSize = *bufferSize;
|
f@0
|
4509 stream_.nBuffers = 1;
|
f@0
|
4510 stream_.nUserChannels[mode] = channels;
|
f@0
|
4511 stream_.channelOffset[mode] = firstChannel;
|
f@0
|
4512 stream_.userFormat = format;
|
f@0
|
4513 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
|
f@0
|
4514
|
f@0
|
4515 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
|
f@0
|
4516 stream_.userInterleaved = false;
|
f@0
|
4517 else
|
f@0
|
4518 stream_.userInterleaved = true;
|
f@0
|
4519 stream_.deviceInterleaved[mode] = true;
|
f@0
|
4520
|
f@0
|
4521 // Set flags for buffer conversion.
|
f@0
|
4522 stream_.doConvertBuffer[mode] = false;
|
f@0
|
4523 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
|
f@0
|
4524 stream_.nUserChannels != stream_.nDeviceChannels )
|
f@0
|
4525 stream_.doConvertBuffer[mode] = true;
|
f@0
|
4526 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
|
f@0
|
4527 stream_.nUserChannels[mode] > 1 )
|
f@0
|
4528 stream_.doConvertBuffer[mode] = true;
|
f@0
|
4529
|
f@0
|
4530 if ( stream_.doConvertBuffer[mode] )
|
f@0
|
4531 setConvertInfo( mode, 0 );
|
f@0
|
4532
|
f@0
|
4533 // Allocate necessary internal buffers
|
f@0
|
4534 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
|
f@0
|
4535
|
f@0
|
4536 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
|
f@0
|
4537 if ( !stream_.userBuffer[mode] ) {
|
f@0
|
4538 errorType = RtAudioError::MEMORY_ERROR;
|
f@0
|
4539 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
|
f@0
|
4540 goto Exit;
|
f@0
|
4541 }
|
f@0
|
4542
|
f@0
|
4543 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
|
f@0
|
4544 stream_.callbackInfo.priority = 15;
|
f@0
|
4545 else
|
f@0
|
4546 stream_.callbackInfo.priority = 0;
|
f@0
|
4547
|
f@0
|
4548 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
|
f@0
|
4549 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
|
f@0
|
4550
|
f@0
|
4551 methodResult = SUCCESS;
|
f@0
|
4552
|
f@0
|
4553 Exit:
|
f@0
|
4554 //clean up
|
f@0
|
4555 SAFE_RELEASE( captureDevices );
|
f@0
|
4556 SAFE_RELEASE( renderDevices );
|
f@0
|
4557 SAFE_RELEASE( devicePtr );
|
f@0
|
4558 CoTaskMemFree( deviceFormat );
|
f@0
|
4559
|
f@0
|
4560 // if method failed, close the stream
|
f@0
|
4561 if ( methodResult == FAILURE )
|
f@0
|
4562 closeStream();
|
f@0
|
4563
|
f@0
|
4564 if ( !errorText_.empty() )
|
f@0
|
4565 error( errorType );
|
f@0
|
4566 return methodResult;
|
f@0
|
4567 }
|
f@0
|
4568
|
f@0
|
4569 //=============================================================================
|
f@0
|
4570
|
f@0
|
4571 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
|
f@0
|
4572 {
|
f@0
|
4573 if ( wasapiPtr )
|
f@0
|
4574 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
|
f@0
|
4575
|
f@0
|
4576 return 0;
|
f@0
|
4577 }
|
f@0
|
4578
|
f@0
|
4579 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
|
f@0
|
4580 {
|
f@0
|
4581 if ( wasapiPtr )
|
f@0
|
4582 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
|
f@0
|
4583
|
f@0
|
4584 return 0;
|
f@0
|
4585 }
|
f@0
|
4586
|
f@0
|
4587 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
|
f@0
|
4588 {
|
f@0
|
4589 if ( wasapiPtr )
|
f@0
|
4590 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
|
f@0
|
4591
|
f@0
|
4592 return 0;
|
f@0
|
4593 }
|
f@0
|
4594
|
f@0
|
4595 //-----------------------------------------------------------------------------
|
f@0
|
4596
|
f@0
|
4597 void RtApiWasapi::wasapiThread()
|
f@0
|
4598 {
|
f@0
|
4599 // as this is a new thread, we must CoInitialize it
|
f@0
|
4600 CoInitialize( NULL );
|
f@0
|
4601
|
f@0
|
4602 HRESULT hr;
|
f@0
|
4603
|
f@0
|
4604 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
|
f@0
|
4605 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
|
f@0
|
4606 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
|
f@0
|
4607 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
|
f@0
|
4608 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
|
f@0
|
4609 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
|
f@0
|
4610
|
f@0
|
4611 WAVEFORMATEX* captureFormat = NULL;
|
f@0
|
4612 WAVEFORMATEX* renderFormat = NULL;
|
f@0
|
4613 float captureSrRatio = 0.0f;
|
f@0
|
4614 float renderSrRatio = 0.0f;
|
f@0
|
4615 WasapiBuffer captureBuffer;
|
f@0
|
4616 WasapiBuffer renderBuffer;
|
f@0
|
4617
|
f@0
|
4618 // declare local stream variables
|
f@0
|
4619 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
|
f@0
|
4620 BYTE* streamBuffer = NULL;
|
f@0
|
4621 unsigned long captureFlags = 0;
|
f@0
|
4622 unsigned int bufferFrameCount = 0;
|
f@0
|
4623 unsigned int numFramesPadding = 0;
|
f@0
|
4624 unsigned int convBufferSize = 0;
|
f@0
|
4625 bool callbackPushed = false;
|
f@0
|
4626 bool callbackPulled = false;
|
f@0
|
4627 bool callbackStopped = false;
|
f@0
|
4628 int callbackResult = 0;
|
f@0
|
4629
|
f@0
|
4630 // convBuffer is used to store converted buffers between WASAPI and the user
|
f@0
|
4631 char* convBuffer = NULL;
|
f@0
|
4632 unsigned int convBuffSize = 0;
|
f@0
|
4633 unsigned int deviceBuffSize = 0;
|
f@0
|
4634
|
f@0
|
4635 errorText_.clear();
|
f@0
|
4636 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
|
f@0
|
4637
|
f@0
|
4638 // Attempt to assign "Pro Audio" characteristic to thread
|
f@0
|
4639 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
|
f@0
|
4640 if ( AvrtDll ) {
|
f@0
|
4641 DWORD taskIndex = 0;
|
f@0
|
4642 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
|
f@0
|
4643 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
|
f@0
|
4644 FreeLibrary( AvrtDll );
|
f@0
|
4645 }
|
f@0
|
4646
|
f@0
|
4647 // start capture stream if applicable
|
f@0
|
4648 if ( captureAudioClient ) {
|
f@0
|
4649 hr = captureAudioClient->GetMixFormat( &captureFormat );
|
f@0
|
4650 if ( FAILED( hr ) ) {
|
f@0
|
4651 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
|
f@0
|
4652 goto Exit;
|
f@0
|
4653 }
|
f@0
|
4654
|
f@0
|
4655 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
|
f@0
|
4656
|
f@0
|
4657 // initialize capture stream according to desire buffer size
|
f@0
|
4658 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
|
f@0
|
4659 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
|
f@0
|
4660
|
f@0
|
4661 if ( !captureClient ) {
|
f@0
|
4662 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
|
f@0
|
4663 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
|
f@0
|
4664 desiredBufferPeriod,
|
f@0
|
4665 desiredBufferPeriod,
|
f@0
|
4666 captureFormat,
|
f@0
|
4667 NULL );
|
f@0
|
4668 if ( FAILED( hr ) ) {
|
f@0
|
4669 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
|
f@0
|
4670 goto Exit;
|
f@0
|
4671 }
|
f@0
|
4672
|
f@0
|
4673 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
|
f@0
|
4674 ( void** ) &captureClient );
|
f@0
|
4675 if ( FAILED( hr ) ) {
|
f@0
|
4676 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
|
f@0
|
4677 goto Exit;
|
f@0
|
4678 }
|
f@0
|
4679
|
f@0
|
4680 // configure captureEvent to trigger on every available capture buffer
|
f@0
|
4681 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
|
f@0
|
4682 if ( !captureEvent ) {
|
f@0
|
4683 errorType = RtAudioError::SYSTEM_ERROR;
|
f@0
|
4684 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
|
f@0
|
4685 goto Exit;
|
f@0
|
4686 }
|
f@0
|
4687
|
f@0
|
4688 hr = captureAudioClient->SetEventHandle( captureEvent );
|
f@0
|
4689 if ( FAILED( hr ) ) {
|
f@0
|
4690 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
|
f@0
|
4691 goto Exit;
|
f@0
|
4692 }
|
f@0
|
4693
|
f@0
|
4694 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
|
f@0
|
4695 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
|
f@0
|
4696 }
|
f@0
|
4697
|
f@0
|
4698 unsigned int inBufferSize = 0;
|
f@0
|
4699 hr = captureAudioClient->GetBufferSize( &inBufferSize );
|
f@0
|
4700 if ( FAILED( hr ) ) {
|
f@0
|
4701 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
|
f@0
|
4702 goto Exit;
|
f@0
|
4703 }
|
f@0
|
4704
|
f@0
|
4705 // scale outBufferSize according to stream->user sample rate ratio
|
f@0
|
4706 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
|
f@0
|
4707 inBufferSize *= stream_.nDeviceChannels[INPUT];
|
f@0
|
4708
|
f@0
|
4709 // set captureBuffer size
|
f@0
|
4710 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
|
f@0
|
4711
|
f@0
|
4712 // reset the capture stream
|
f@0
|
4713 hr = captureAudioClient->Reset();
|
f@0
|
4714 if ( FAILED( hr ) ) {
|
f@0
|
4715 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
|
f@0
|
4716 goto Exit;
|
f@0
|
4717 }
|
f@0
|
4718
|
f@0
|
4719 // start the capture stream
|
f@0
|
4720 hr = captureAudioClient->Start();
|
f@0
|
4721 if ( FAILED( hr ) ) {
|
f@0
|
4722 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
|
f@0
|
4723 goto Exit;
|
f@0
|
4724 }
|
f@0
|
4725 }
|
f@0
|
4726
|
f@0
|
4727 // start render stream if applicable
|
f@0
|
4728 if ( renderAudioClient ) {
|
f@0
|
4729 hr = renderAudioClient->GetMixFormat( &renderFormat );
|
f@0
|
4730 if ( FAILED( hr ) ) {
|
f@0
|
4731 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
|
f@0
|
4732 goto Exit;
|
f@0
|
4733 }
|
f@0
|
4734
|
f@0
|
4735 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
|
f@0
|
4736
|
f@0
|
4737 // initialize render stream according to desire buffer size
|
f@0
|
4738 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
|
f@0
|
4739 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
|
f@0
|
4740
|
f@0
|
4741 if ( !renderClient ) {
|
f@0
|
4742 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
|
f@0
|
4743 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
|
f@0
|
4744 desiredBufferPeriod,
|
f@0
|
4745 desiredBufferPeriod,
|
f@0
|
4746 renderFormat,
|
f@0
|
4747 NULL );
|
f@0
|
4748 if ( FAILED( hr ) ) {
|
f@0
|
4749 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
|
f@0
|
4750 goto Exit;
|
f@0
|
4751 }
|
f@0
|
4752
|
f@0
|
4753 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
|
f@0
|
4754 ( void** ) &renderClient );
|
f@0
|
4755 if ( FAILED( hr ) ) {
|
f@0
|
4756 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
|
f@0
|
4757 goto Exit;
|
f@0
|
4758 }
|
f@0
|
4759
|
f@0
|
4760 // configure renderEvent to trigger on every available render buffer
|
f@0
|
4761 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
|
f@0
|
4762 if ( !renderEvent ) {
|
f@0
|
4763 errorType = RtAudioError::SYSTEM_ERROR;
|
f@0
|
4764 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
|
f@0
|
4765 goto Exit;
|
f@0
|
4766 }
|
f@0
|
4767
|
f@0
|
4768 hr = renderAudioClient->SetEventHandle( renderEvent );
|
f@0
|
4769 if ( FAILED( hr ) ) {
|
f@0
|
4770 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
|
f@0
|
4771 goto Exit;
|
f@0
|
4772 }
|
f@0
|
4773
|
f@0
|
4774 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
|
f@0
|
4775 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
|
f@0
|
4776 }
|
f@0
|
4777
|
f@0
|
4778 unsigned int outBufferSize = 0;
|
f@0
|
4779 hr = renderAudioClient->GetBufferSize( &outBufferSize );
|
f@0
|
4780 if ( FAILED( hr ) ) {
|
f@0
|
4781 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
|
f@0
|
4782 goto Exit;
|
f@0
|
4783 }
|
f@0
|
4784
|
f@0
|
4785 // scale inBufferSize according to user->stream sample rate ratio
|
f@0
|
4786 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
|
f@0
|
4787 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
|
f@0
|
4788
|
f@0
|
4789 // set renderBuffer size
|
f@0
|
4790 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
|
f@0
|
4791
|
f@0
|
4792 // reset the render stream
|
f@0
|
4793 hr = renderAudioClient->Reset();
|
f@0
|
4794 if ( FAILED( hr ) ) {
|
f@0
|
4795 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
|
f@0
|
4796 goto Exit;
|
f@0
|
4797 }
|
f@0
|
4798
|
f@0
|
4799 // start the render stream
|
f@0
|
4800 hr = renderAudioClient->Start();
|
f@0
|
4801 if ( FAILED( hr ) ) {
|
f@0
|
4802 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
|
f@0
|
4803 goto Exit;
|
f@0
|
4804 }
|
f@0
|
4805 }
|
f@0
|
4806
|
f@0
|
4807 if ( stream_.mode == INPUT ) {
|
f@0
|
4808 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
|
f@0
|
4809 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
|
f@0
|
4810 }
|
f@0
|
4811 else if ( stream_.mode == OUTPUT ) {
|
f@0
|
4812 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
|
f@0
|
4813 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
|
f@0
|
4814 }
|
f@0
|
4815 else if ( stream_.mode == DUPLEX ) {
|
f@0
|
4816 convBuffSize = max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
|
f@0
|
4817 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
|
f@0
|
4818 deviceBuffSize = max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
|
f@0
|
4819 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
|
f@0
|
4820 }
|
f@0
|
4821
|
f@0
|
4822 convBuffer = ( char* ) malloc( convBuffSize );
|
f@0
|
4823 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
|
f@0
|
4824 if ( !convBuffer || !stream_.deviceBuffer ) {
|
f@0
|
4825 errorType = RtAudioError::MEMORY_ERROR;
|
f@0
|
4826 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
|
f@0
|
4827 goto Exit;
|
f@0
|
4828 }
|
f@0
|
4829
|
f@0
|
4830 // stream process loop
|
f@0
|
4831 while ( stream_.state != STREAM_STOPPING ) {
|
f@0
|
4832 if ( !callbackPulled ) {
|
f@0
|
4833 // Callback Input
|
f@0
|
4834 // ==============
|
f@0
|
4835 // 1. Pull callback buffer from inputBuffer
|
f@0
|
4836 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
|
f@0
|
4837 // Convert callback buffer to user format
|
f@0
|
4838
|
f@0
|
4839 if ( captureAudioClient ) {
|
f@0
|
4840 // Pull callback buffer from inputBuffer
|
f@0
|
4841 callbackPulled = captureBuffer.pullBuffer( convBuffer,
|
f@0
|
4842 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
|
f@0
|
4843 stream_.deviceFormat[INPUT] );
|
f@0
|
4844
|
f@0
|
4845 if ( callbackPulled ) {
|
f@0
|
4846 // Convert callback buffer to user sample rate
|
f@0
|
4847 convertBufferWasapi( stream_.deviceBuffer,
|
f@0
|
4848 convBuffer,
|
f@0
|
4849 stream_.nDeviceChannels[INPUT],
|
f@0
|
4850 captureFormat->nSamplesPerSec,
|
f@0
|
4851 stream_.sampleRate,
|
f@0
|
4852 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
|
f@0
|
4853 convBufferSize,
|
f@0
|
4854 stream_.deviceFormat[INPUT] );
|
f@0
|
4855
|
f@0
|
4856 if ( stream_.doConvertBuffer[INPUT] ) {
|
f@0
|
4857 // Convert callback buffer to user format
|
f@0
|
4858 convertBuffer( stream_.userBuffer[INPUT],
|
f@0
|
4859 stream_.deviceBuffer,
|
f@0
|
4860 stream_.convertInfo[INPUT] );
|
f@0
|
4861 }
|
f@0
|
4862 else {
|
f@0
|
4863 // no further conversion, simple copy deviceBuffer to userBuffer
|
f@0
|
4864 memcpy( stream_.userBuffer[INPUT],
|
f@0
|
4865 stream_.deviceBuffer,
|
f@0
|
4866 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
|
f@0
|
4867 }
|
f@0
|
4868 }
|
f@0
|
4869 }
|
f@0
|
4870 else {
|
f@0
|
4871 // if there is no capture stream, set callbackPulled flag
|
f@0
|
4872 callbackPulled = true;
|
f@0
|
4873 }
|
f@0
|
4874
|
f@0
|
4875 // Execute Callback
|
f@0
|
4876 // ================
|
f@0
|
4877 // 1. Execute user callback method
|
f@0
|
4878 // 2. Handle return value from callback
|
f@0
|
4879
|
f@0
|
4880 // if callback has not requested the stream to stop
|
f@0
|
4881 if ( callbackPulled && !callbackStopped ) {
|
f@0
|
4882 // Execute user callback method
|
f@0
|
4883 callbackResult = callback( stream_.userBuffer[OUTPUT],
|
f@0
|
4884 stream_.userBuffer[INPUT],
|
f@0
|
4885 stream_.bufferSize,
|
f@0
|
4886 getStreamTime(),
|
f@0
|
4887 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
|
f@0
|
4888 stream_.callbackInfo.userData );
|
f@0
|
4889
|
f@0
|
4890 // Handle return value from callback
|
f@0
|
4891 if ( callbackResult == 1 ) {
|
f@0
|
4892 // instantiate a thread to stop this thread
|
f@0
|
4893 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
|
f@0
|
4894 if ( !threadHandle ) {
|
f@0
|
4895 errorType = RtAudioError::THREAD_ERROR;
|
f@0
|
4896 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
|
f@0
|
4897 goto Exit;
|
f@0
|
4898 }
|
f@0
|
4899 else if ( !CloseHandle( threadHandle ) ) {
|
f@0
|
4900 errorType = RtAudioError::THREAD_ERROR;
|
f@0
|
4901 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
|
f@0
|
4902 goto Exit;
|
f@0
|
4903 }
|
f@0
|
4904
|
f@0
|
4905 callbackStopped = true;
|
f@0
|
4906 }
|
f@0
|
4907 else if ( callbackResult == 2 ) {
|
f@0
|
4908 // instantiate a thread to stop this thread
|
f@0
|
4909 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
|
f@0
|
4910 if ( !threadHandle ) {
|
f@0
|
4911 errorType = RtAudioError::THREAD_ERROR;
|
f@0
|
4912 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
|
f@0
|
4913 goto Exit;
|
f@0
|
4914 }
|
f@0
|
4915 else if ( !CloseHandle( threadHandle ) ) {
|
f@0
|
4916 errorType = RtAudioError::THREAD_ERROR;
|
f@0
|
4917 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
|
f@0
|
4918 goto Exit;
|
f@0
|
4919 }
|
f@0
|
4920
|
f@0
|
4921 callbackStopped = true;
|
f@0
|
4922 }
|
f@0
|
4923 }
|
f@0
|
4924 }
|
f@0
|
4925
|
f@0
|
4926 // Callback Output
|
f@0
|
4927 // ===============
|
f@0
|
4928 // 1. Convert callback buffer to stream format
|
f@0
|
4929 // 2. Convert callback buffer to stream sample rate and channel count
|
f@0
|
4930 // 3. Push callback buffer into outputBuffer
|
f@0
|
4931
|
f@0
|
4932 if ( renderAudioClient && callbackPulled ) {
|
f@0
|
4933 if ( stream_.doConvertBuffer[OUTPUT] ) {
|
f@0
|
4934 // Convert callback buffer to stream format
|
f@0
|
4935 convertBuffer( stream_.deviceBuffer,
|
f@0
|
4936 stream_.userBuffer[OUTPUT],
|
f@0
|
4937 stream_.convertInfo[OUTPUT] );
|
f@0
|
4938
|
f@0
|
4939 }
|
f@0
|
4940
|
f@0
|
4941 // Convert callback buffer to stream sample rate
|
f@0
|
4942 convertBufferWasapi( convBuffer,
|
f@0
|
4943 stream_.deviceBuffer,
|
f@0
|
4944 stream_.nDeviceChannels[OUTPUT],
|
f@0
|
4945 stream_.sampleRate,
|
f@0
|
4946 renderFormat->nSamplesPerSec,
|
f@0
|
4947 stream_.bufferSize,
|
f@0
|
4948 convBufferSize,
|
f@0
|
4949 stream_.deviceFormat[OUTPUT] );
|
f@0
|
4950
|
f@0
|
4951 // Push callback buffer into outputBuffer
|
f@0
|
4952 callbackPushed = renderBuffer.pushBuffer( convBuffer,
|
f@0
|
4953 convBufferSize * stream_.nDeviceChannels[OUTPUT],
|
f@0
|
4954 stream_.deviceFormat[OUTPUT] );
|
f@0
|
4955 }
|
f@0
|
4956 else {
|
f@0
|
4957 // if there is no render stream, set callbackPushed flag
|
f@0
|
4958 callbackPushed = true;
|
f@0
|
4959 }
|
f@0
|
4960
|
f@0
|
4961 // Stream Capture
|
f@0
|
4962 // ==============
|
f@0
|
4963 // 1. Get capture buffer from stream
|
f@0
|
4964 // 2. Push capture buffer into inputBuffer
|
f@0
|
4965 // 3. If 2. was successful: Release capture buffer
|
f@0
|
4966
|
f@0
|
4967 if ( captureAudioClient ) {
|
f@0
|
4968 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
|
f@0
|
4969 if ( !callbackPulled ) {
|
f@0
|
4970 WaitForSingleObject( captureEvent, INFINITE );
|
f@0
|
4971 }
|
f@0
|
4972
|
f@0
|
4973 // Get capture buffer from stream
|
f@0
|
4974 hr = captureClient->GetBuffer( &streamBuffer,
|
f@0
|
4975 &bufferFrameCount,
|
f@0
|
4976 &captureFlags, NULL, NULL );
|
f@0
|
4977 if ( FAILED( hr ) ) {
|
f@0
|
4978 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
|
f@0
|
4979 goto Exit;
|
f@0
|
4980 }
|
f@0
|
4981
|
f@0
|
4982 if ( bufferFrameCount != 0 ) {
|
f@0
|
4983 // Push capture buffer into inputBuffer
|
f@0
|
4984 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
|
f@0
|
4985 bufferFrameCount * stream_.nDeviceChannels[INPUT],
|
f@0
|
4986 stream_.deviceFormat[INPUT] ) )
|
f@0
|
4987 {
|
f@0
|
4988 // Release capture buffer
|
f@0
|
4989 hr = captureClient->ReleaseBuffer( bufferFrameCount );
|
f@0
|
4990 if ( FAILED( hr ) ) {
|
f@0
|
4991 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
|
f@0
|
4992 goto Exit;
|
f@0
|
4993 }
|
f@0
|
4994 }
|
f@0
|
4995 else
|
f@0
|
4996 {
|
f@0
|
4997 // Inform WASAPI that capture was unsuccessful
|
f@0
|
4998 hr = captureClient->ReleaseBuffer( 0 );
|
f@0
|
4999 if ( FAILED( hr ) ) {
|
f@0
|
5000 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
|
f@0
|
5001 goto Exit;
|
f@0
|
5002 }
|
f@0
|
5003 }
|
f@0
|
5004 }
|
f@0
|
5005 else
|
f@0
|
5006 {
|
f@0
|
5007 // Inform WASAPI that capture was unsuccessful
|
f@0
|
5008 hr = captureClient->ReleaseBuffer( 0 );
|
f@0
|
5009 if ( FAILED( hr ) ) {
|
f@0
|
5010 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
|
f@0
|
5011 goto Exit;
|
f@0
|
5012 }
|
f@0
|
5013 }
|
f@0
|
5014 }
|
f@0
|
5015
|
f@0
|
5016 // Stream Render
|
f@0
|
5017 // =============
|
f@0
|
5018 // 1. Get render buffer from stream
|
f@0
|
5019 // 2. Pull next buffer from outputBuffer
|
f@0
|
5020 // 3. If 2. was successful: Fill render buffer with next buffer
|
f@0
|
5021 // Release render buffer
|
f@0
|
5022
|
f@0
|
5023 if ( renderAudioClient ) {
|
f@0
|
5024 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
|
f@0
|
5025 if ( callbackPulled && !callbackPushed ) {
|
f@0
|
5026 WaitForSingleObject( renderEvent, INFINITE );
|
f@0
|
5027 }
|
f@0
|
5028
|
f@0
|
5029 // Get render buffer from stream
|
f@0
|
5030 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
|
f@0
|
5031 if ( FAILED( hr ) ) {
|
f@0
|
5032 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
|
f@0
|
5033 goto Exit;
|
f@0
|
5034 }
|
f@0
|
5035
|
f@0
|
5036 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
|
f@0
|
5037 if ( FAILED( hr ) ) {
|
f@0
|
5038 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
|
f@0
|
5039 goto Exit;
|
f@0
|
5040 }
|
f@0
|
5041
|
f@0
|
5042 bufferFrameCount -= numFramesPadding;
|
f@0
|
5043
|
f@0
|
5044 if ( bufferFrameCount != 0 ) {
|
f@0
|
5045 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
|
f@0
|
5046 if ( FAILED( hr ) ) {
|
f@0
|
5047 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
|
f@0
|
5048 goto Exit;
|
f@0
|
5049 }
|
f@0
|
5050
|
f@0
|
5051 // Pull next buffer from outputBuffer
|
f@0
|
5052 // Fill render buffer with next buffer
|
f@0
|
5053 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
|
f@0
|
5054 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
|
f@0
|
5055 stream_.deviceFormat[OUTPUT] ) )
|
f@0
|
5056 {
|
f@0
|
5057 // Release render buffer
|
f@0
|
5058 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
|
f@0
|
5059 if ( FAILED( hr ) ) {
|
f@0
|
5060 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
|
f@0
|
5061 goto Exit;
|
f@0
|
5062 }
|
f@0
|
5063 }
|
f@0
|
5064 else
|
f@0
|
5065 {
|
f@0
|
5066 // Inform WASAPI that render was unsuccessful
|
f@0
|
5067 hr = renderClient->ReleaseBuffer( 0, 0 );
|
f@0
|
5068 if ( FAILED( hr ) ) {
|
f@0
|
5069 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
|
f@0
|
5070 goto Exit;
|
f@0
|
5071 }
|
f@0
|
5072 }
|
f@0
|
5073 }
|
f@0
|
5074 else
|
f@0
|
5075 {
|
f@0
|
5076 // Inform WASAPI that render was unsuccessful
|
f@0
|
5077 hr = renderClient->ReleaseBuffer( 0, 0 );
|
f@0
|
5078 if ( FAILED( hr ) ) {
|
f@0
|
5079 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
|
f@0
|
5080 goto Exit;
|
f@0
|
5081 }
|
f@0
|
5082 }
|
f@0
|
5083 }
|
f@0
|
5084
|
f@0
|
5085 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
|
f@0
|
5086 if ( callbackPushed ) {
|
f@0
|
5087 callbackPulled = false;
|
f@0
|
5088 }
|
f@0
|
5089
|
f@0
|
5090 // tick stream time
|
f@0
|
5091 RtApi::tickStreamTime();
|
f@0
|
5092 }
|
f@0
|
5093
|
f@0
|
5094 Exit:
|
f@0
|
5095 // clean up
|
f@0
|
5096 CoTaskMemFree( captureFormat );
|
f@0
|
5097 CoTaskMemFree( renderFormat );
|
f@0
|
5098
|
f@0
|
5099 free ( convBuffer );
|
f@0
|
5100
|
f@0
|
5101 CoUninitialize();
|
f@0
|
5102
|
f@0
|
5103 // update stream state
|
f@0
|
5104 stream_.state = STREAM_STOPPED;
|
f@0
|
5105
|
f@0
|
5106 if ( errorText_.empty() )
|
f@0
|
5107 return;
|
f@0
|
5108 else
|
f@0
|
5109 error( errorType );
|
f@0
|
5110 }
|
f@0
|
5111
|
f@0
|
5112 //******************** End of __WINDOWS_WASAPI__ *********************//
|
f@0
|
5113 #endif
|
f@0
|
5114
|
f@0
|
5115
|
f@0
|
5116 #if defined(__WINDOWS_DS__) // Windows DirectSound API
|
f@0
|
5117
|
f@0
|
5118 // Modified by Robin Davies, October 2005
|
f@0
|
5119 // - Improvements to DirectX pointer chasing.
|
f@0
|
5120 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
|
f@0
|
5121 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
|
f@0
|
5122 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
|
f@0
|
5123 // Changed device query structure for RtAudio 4.0.7, January 2010
|
f@0
|
5124
|
f@0
|
5125 #include <dsound.h>
|
f@0
|
5126 #include <assert.h>
|
f@0
|
5127 #include <algorithm>
|
f@0
|
5128
|
f@0
|
5129 #if defined(__MINGW32__)
|
f@0
|
5130 // missing from latest mingw winapi
|
f@0
|
5131 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
|
f@0
|
5132 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
|
f@0
|
5133 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
|
f@0
|
5134 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
|
f@0
|
5135 #endif
|
f@0
|
5136
|
f@0
|
5137 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
|
f@0
|
5138
|
f@0
|
5139 #ifdef _MSC_VER // if Microsoft Visual C++
|
f@0
|
5140 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
|
f@0
|
5141 #endif
|
f@0
|
5142
|
f@0
|
5143 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
|
f@0
|
5144 {
|
f@0
|
5145 if ( pointer > bufferSize ) pointer -= bufferSize;
|
f@0
|
5146 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
|
f@0
|
5147 if ( pointer < earlierPointer ) pointer += bufferSize;
|
f@0
|
5148 return pointer >= earlierPointer && pointer < laterPointer;
|
f@0
|
5149 }
|
f@0
|
5150
|
f@0
|
5151 // A structure to hold various information related to the DirectSound
|
f@0
|
5152 // API implementation.
|
f@0
|
5153 struct DsHandle {
|
f@0
|
5154 unsigned int drainCounter; // Tracks callback counts when draining
|
f@0
|
5155 bool internalDrain; // Indicates if stop is initiated from callback or not.
|
f@0
|
5156 void *id[2];
|
f@0
|
5157 void *buffer[2];
|
f@0
|
5158 bool xrun[2];
|
f@0
|
5159 UINT bufferPointer[2];
|
f@0
|
5160 DWORD dsBufferSize[2];
|
f@0
|
5161 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
|
f@0
|
5162 HANDLE condition;
|
f@0
|
5163
|
f@0
|
5164 DsHandle()
|
f@0
|
5165 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
|
f@0
|
5166 };
|
f@0
|
5167
|
f@0
|
5168 // Declarations for utility functions, callbacks, and structures
|
f@0
|
5169 // specific to the DirectSound implementation.
|
f@0
|
5170 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
|
f@0
|
5171 LPCTSTR description,
|
f@0
|
5172 LPCTSTR module,
|
f@0
|
5173 LPVOID lpContext );
|
f@0
|
5174
|
f@0
|
5175 static const char* getErrorString( int code );
|
f@0
|
5176
|
f@0
|
5177 static unsigned __stdcall callbackHandler( void *ptr );
|
f@0
|
5178
|
f@0
|
5179 struct DsDevice {
|
f@0
|
5180 LPGUID id[2];
|
f@0
|
5181 bool validId[2];
|
f@0
|
5182 bool found;
|
f@0
|
5183 std::string name;
|
f@0
|
5184
|
f@0
|
5185 DsDevice()
|
f@0
|
5186 : found(false) { validId[0] = false; validId[1] = false; }
|
f@0
|
5187 };
|
f@0
|
5188
|
f@0
|
5189 struct DsProbeData {
|
f@0
|
5190 bool isInput;
|
f@0
|
5191 std::vector<struct DsDevice>* dsDevices;
|
f@0
|
5192 };
|
f@0
|
5193
|
f@0
|
5194 RtApiDs :: RtApiDs()
|
f@0
|
5195 {
|
f@0
|
5196 // Dsound will run both-threaded. If CoInitialize fails, then just
|
f@0
|
5197 // accept whatever the mainline chose for a threading model.
|
f@0
|
5198 coInitialized_ = false;
|
f@0
|
5199 HRESULT hr = CoInitialize( NULL );
|
f@0
|
5200 if ( !FAILED( hr ) ) coInitialized_ = true;
|
f@0
|
5201 }
|
f@0
|
5202
|
f@0
|
5203 RtApiDs :: ~RtApiDs()
|
f@0
|
5204 {
|
f@0
|
5205 if ( coInitialized_ ) CoUninitialize(); // balanced call.
|
f@0
|
5206 if ( stream_.state != STREAM_CLOSED ) closeStream();
|
f@0
|
5207 }
|
f@0
|
5208
|
f@0
|
5209 // The DirectSound default output is always the first device.
|
f@0
|
5210 unsigned int RtApiDs :: getDefaultOutputDevice( void )
|
f@0
|
5211 {
|
f@0
|
5212 return 0;
|
f@0
|
5213 }
|
f@0
|
5214
|
f@0
|
5215 // The DirectSound default input is always the first input device,
|
f@0
|
5216 // which is the first capture device enumerated.
|
f@0
|
5217 unsigned int RtApiDs :: getDefaultInputDevice( void )
|
f@0
|
5218 {
|
f@0
|
5219 return 0;
|
f@0
|
5220 }
|
f@0
|
5221
|
f@0
|
5222 unsigned int RtApiDs :: getDeviceCount( void )
|
f@0
|
5223 {
|
f@0
|
5224 // Set query flag for previously found devices to false, so that we
|
f@0
|
5225 // can check for any devices that have disappeared.
|
f@0
|
5226 for ( unsigned int i=0; i<dsDevices.size(); i++ )
|
f@0
|
5227 dsDevices[i].found = false;
|
f@0
|
5228
|
f@0
|
5229 // Query DirectSound devices.
|
f@0
|
5230 struct DsProbeData probeInfo;
|
f@0
|
5231 probeInfo.isInput = false;
|
f@0
|
5232 probeInfo.dsDevices = &dsDevices;
|
f@0
|
5233 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
|
f@0
|
5234 if ( FAILED( result ) ) {
|
f@0
|
5235 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
|
f@0
|
5236 errorText_ = errorStream_.str();
|
f@0
|
5237 error( RtAudioError::WARNING );
|
f@0
|
5238 }
|
f@0
|
5239
|
f@0
|
5240 // Query DirectSoundCapture devices.
|
f@0
|
5241 probeInfo.isInput = true;
|
f@0
|
5242 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
|
f@0
|
5243 if ( FAILED( result ) ) {
|
f@0
|
5244 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
|
f@0
|
5245 errorText_ = errorStream_.str();
|
f@0
|
5246 error( RtAudioError::WARNING );
|
f@0
|
5247 }
|
f@0
|
5248
|
f@0
|
5249 // Clean out any devices that may have disappeared.
|
f@0
|
5250 std::vector< int > indices;
|
f@0
|
5251 for ( unsigned int i=0; i<dsDevices.size(); i++ )
|
f@0
|
5252 if ( dsDevices[i].found == false ) indices.push_back( i );
|
f@0
|
5253 //unsigned int nErased = 0;
|
f@0
|
5254 for ( unsigned int i=0; i<indices.size(); i++ )
|
f@0
|
5255 dsDevices.erase( dsDevices.begin()+indices[i] );
|
f@0
|
5256 //dsDevices.erase( dsDevices.begin()-nErased++ );
|
f@0
|
5257
|
f@0
|
5258 return static_cast<unsigned int>(dsDevices.size());
|
f@0
|
5259 }
|
f@0
|
5260
|
f@0
|
5261 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
|
f@0
|
5262 {
|
f@0
|
5263 RtAudio::DeviceInfo info;
|
f@0
|
5264 info.probed = false;
|
f@0
|
5265
|
f@0
|
5266 if ( dsDevices.size() == 0 ) {
|
f@0
|
5267 // Force a query of all devices
|
f@0
|
5268 getDeviceCount();
|
f@0
|
5269 if ( dsDevices.size() == 0 ) {
|
f@0
|
5270 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
|
f@0
|
5271 error( RtAudioError::INVALID_USE );
|
f@0
|
5272 return info;
|
f@0
|
5273 }
|
f@0
|
5274 }
|
f@0
|
5275
|
f@0
|
5276 if ( device >= dsDevices.size() ) {
|
f@0
|
5277 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
|
f@0
|
5278 error( RtAudioError::INVALID_USE );
|
f@0
|
5279 return info;
|
f@0
|
5280 }
|
f@0
|
5281
|
f@0
|
5282 HRESULT result;
|
f@0
|
5283 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
|
f@0
|
5284
|
f@0
|
5285 LPDIRECTSOUND output;
|
f@0
|
5286 DSCAPS outCaps;
|
f@0
|
5287 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
|
f@0
|
5288 if ( FAILED( result ) ) {
|
f@0
|
5289 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
|
f@0
|
5290 errorText_ = errorStream_.str();
|
f@0
|
5291 error( RtAudioError::WARNING );
|
f@0
|
5292 goto probeInput;
|
f@0
|
5293 }
|
f@0
|
5294
|
f@0
|
5295 outCaps.dwSize = sizeof( outCaps );
|
f@0
|
5296 result = output->GetCaps( &outCaps );
|
f@0
|
5297 if ( FAILED( result ) ) {
|
f@0
|
5298 output->Release();
|
f@0
|
5299 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
|
f@0
|
5300 errorText_ = errorStream_.str();
|
f@0
|
5301 error( RtAudioError::WARNING );
|
f@0
|
5302 goto probeInput;
|
f@0
|
5303 }
|
f@0
|
5304
|
f@0
|
5305 // Get output channel information.
|
f@0
|
5306 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
|
f@0
|
5307
|
f@0
|
5308 // Get sample rate information.
|
f@0
|
5309 info.sampleRates.clear();
|
f@0
|
5310 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
|
f@0
|
5311 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
|
f@0
|
5312 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
|
f@0
|
5313 info.sampleRates.push_back( SAMPLE_RATES[k] );
|
f@0
|
5314 }
|
f@0
|
5315
|
f@0
|
5316 // Get format information.
|
f@0
|
5317 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
|
f@0
|
5318 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
|
f@0
|
5319
|
f@0
|
5320 output->Release();
|
f@0
|
5321
|
f@0
|
5322 if ( getDefaultOutputDevice() == device )
|
f@0
|
5323 info.isDefaultOutput = true;
|
f@0
|
5324
|
f@0
|
5325 if ( dsDevices[ device ].validId[1] == false ) {
|
f@0
|
5326 info.name = dsDevices[ device ].name;
|
f@0
|
5327 info.probed = true;
|
f@0
|
5328 return info;
|
f@0
|
5329 }
|
f@0
|
5330
|
f@0
|
5331 probeInput:
|
f@0
|
5332
|
f@0
|
5333 LPDIRECTSOUNDCAPTURE input;
|
f@0
|
5334 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
|
f@0
|
5335 if ( FAILED( result ) ) {
|
f@0
|
5336 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
|
f@0
|
5337 errorText_ = errorStream_.str();
|
f@0
|
5338 error( RtAudioError::WARNING );
|
f@0
|
5339 return info;
|
f@0
|
5340 }
|
f@0
|
5341
|
f@0
|
5342 DSCCAPS inCaps;
|
f@0
|
5343 inCaps.dwSize = sizeof( inCaps );
|
f@0
|
5344 result = input->GetCaps( &inCaps );
|
f@0
|
5345 if ( FAILED( result ) ) {
|
f@0
|
5346 input->Release();
|
f@0
|
5347 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
|
f@0
|
5348 errorText_ = errorStream_.str();
|
f@0
|
5349 error( RtAudioError::WARNING );
|
f@0
|
5350 return info;
|
f@0
|
5351 }
|
f@0
|
5352
|
f@0
|
5353 // Get input channel information.
|
f@0
|
5354 info.inputChannels = inCaps.dwChannels;
|
f@0
|
5355
|
f@0
|
5356 // Get sample rate and format information.
|
f@0
|
5357 std::vector<unsigned int> rates;
|
f@0
|
5358 if ( inCaps.dwChannels >= 2 ) {
|
f@0
|
5359 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
|
f@0
|
5360 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
|
f@0
|
5361 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
|
f@0
|
5362 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
|
f@0
|
5363 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
|
f@0
|
5364 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
|
f@0
|
5365 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
|
f@0
|
5366 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
|
f@0
|
5367
|
f@0
|
5368 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
|
f@0
|
5369 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
|
f@0
|
5370 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
|
f@0
|
5371 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
|
f@0
|
5372 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
|
f@0
|
5373 }
|
f@0
|
5374 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
|
f@0
|
5375 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
|
f@0
|
5376 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
|
f@0
|
5377 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
|
f@0
|
5378 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
|
f@0
|
5379 }
|
f@0
|
5380 }
|
f@0
|
5381 else if ( inCaps.dwChannels == 1 ) {
|
f@0
|
5382 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
|
f@0
|
5383 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
|
f@0
|
5384 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
|
f@0
|
5385 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
|
f@0
|
5386 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
|
f@0
|
5387 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
|
f@0
|
5388 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
|
f@0
|
5389 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
|
f@0
|
5390
|
f@0
|
5391 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
|
f@0
|
5392 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
|
f@0
|
5393 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
|
f@0
|
5394 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
|
f@0
|
5395 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
|
f@0
|
5396 }
|
f@0
|
5397 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
|
f@0
|
5398 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
|
f@0
|
5399 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
|
f@0
|
5400 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
|
f@0
|
5401 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
|
f@0
|
5402 }
|
f@0
|
5403 }
|
f@0
|
5404 else info.inputChannels = 0; // technically, this would be an error
|
f@0
|
5405
|
f@0
|
5406 input->Release();
|
f@0
|
5407
|
f@0
|
5408 if ( info.inputChannels == 0 ) return info;
|
f@0
|
5409
|
f@0
|
5410 // Copy the supported rates to the info structure but avoid duplication.
|
f@0
|
5411 bool found;
|
f@0
|
5412 for ( unsigned int i=0; i<rates.size(); i++ ) {
|
f@0
|
5413 found = false;
|
f@0
|
5414 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
|
f@0
|
5415 if ( rates[i] == info.sampleRates[j] ) {
|
f@0
|
5416 found = true;
|
f@0
|
5417 break;
|
f@0
|
5418 }
|
f@0
|
5419 }
|
f@0
|
5420 if ( found == false ) info.sampleRates.push_back( rates[i] );
|
f@0
|
5421 }
|
f@0
|
5422 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
|
f@0
|
5423
|
f@0
|
5424 // If device opens for both playback and capture, we determine the channels.
|
f@0
|
5425 if ( info.outputChannels > 0 && info.inputChannels > 0 )
|
f@0
|
5426 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
|
f@0
|
5427
|
f@0
|
5428 if ( device == 0 ) info.isDefaultInput = true;
|
f@0
|
5429
|
f@0
|
5430 // Copy name and return.
|
f@0
|
5431 info.name = dsDevices[ device ].name;
|
f@0
|
5432 info.probed = true;
|
f@0
|
5433 return info;
|
f@0
|
5434 }
|
f@0
|
5435
|
f@0
|
5436 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
|
f@0
|
5437 unsigned int firstChannel, unsigned int sampleRate,
|
f@0
|
5438 RtAudioFormat format, unsigned int *bufferSize,
|
f@0
|
5439 RtAudio::StreamOptions *options )
|
f@0
|
5440 {
|
f@0
|
5441 if ( channels + firstChannel > 2 ) {
|
f@0
|
5442 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
|
f@0
|
5443 return FAILURE;
|
f@0
|
5444 }
|
f@0
|
5445
|
f@0
|
5446 size_t nDevices = dsDevices.size();
|
f@0
|
5447 if ( nDevices == 0 ) {
|
f@0
|
5448 // This should not happen because a check is made before this function is called.
|
f@0
|
5449 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
|
f@0
|
5450 return FAILURE;
|
f@0
|
5451 }
|
f@0
|
5452
|
f@0
|
5453 if ( device >= nDevices ) {
|
f@0
|
5454 // This should not happen because a check is made before this function is called.
|
f@0
|
5455 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
|
f@0
|
5456 return FAILURE;
|
f@0
|
5457 }
|
f@0
|
5458
|
f@0
|
5459 if ( mode == OUTPUT ) {
|
f@0
|
5460 if ( dsDevices[ device ].validId[0] == false ) {
|
f@0
|
5461 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
|
f@0
|
5462 errorText_ = errorStream_.str();
|
f@0
|
5463 return FAILURE;
|
f@0
|
5464 }
|
f@0
|
5465 }
|
f@0
|
5466 else { // mode == INPUT
|
f@0
|
5467 if ( dsDevices[ device ].validId[1] == false ) {
|
f@0
|
5468 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
|
f@0
|
5469 errorText_ = errorStream_.str();
|
f@0
|
5470 return FAILURE;
|
f@0
|
5471 }
|
f@0
|
5472 }
|
f@0
|
5473
|
f@0
|
5474 // According to a note in PortAudio, using GetDesktopWindow()
|
f@0
|
5475 // instead of GetForegroundWindow() is supposed to avoid problems
|
f@0
|
5476 // that occur when the application's window is not the foreground
|
f@0
|
5477 // window. Also, if the application window closes before the
|
f@0
|
5478 // DirectSound buffer, DirectSound can crash. In the past, I had
|
f@0
|
5479 // problems when using GetDesktopWindow() but it seems fine now
|
f@0
|
5480 // (January 2010). I'll leave it commented here.
|
f@0
|
5481 // HWND hWnd = GetForegroundWindow();
|
f@0
|
5482 HWND hWnd = GetDesktopWindow();
|
f@0
|
5483
|
f@0
|
5484 // Check the numberOfBuffers parameter and limit the lowest value to
|
f@0
|
5485 // two. This is a judgement call and a value of two is probably too
|
f@0
|
5486 // low for capture, but it should work for playback.
|
f@0
|
5487 int nBuffers = 0;
|
f@0
|
5488 if ( options ) nBuffers = options->numberOfBuffers;
|
f@0
|
5489 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
|
f@0
|
5490 if ( nBuffers < 2 ) nBuffers = 3;
|
f@0
|
5491
|
f@0
|
5492 // Check the lower range of the user-specified buffer size and set
|
f@0
|
5493 // (arbitrarily) to a lower bound of 32.
|
f@0
|
5494 if ( *bufferSize < 32 ) *bufferSize = 32;
|
f@0
|
5495
|
f@0
|
5496 // Create the wave format structure. The data format setting will
|
f@0
|
5497 // be determined later.
|
f@0
|
5498 WAVEFORMATEX waveFormat;
|
f@0
|
5499 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
|
f@0
|
5500 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
|
f@0
|
5501 waveFormat.nChannels = channels + firstChannel;
|
f@0
|
5502 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
|
f@0
|
5503
|
f@0
|
5504 // Determine the device buffer size. By default, we'll use the value
|
f@0
|
5505 // defined above (32K), but we will grow it to make allowances for
|
f@0
|
5506 // very large software buffer sizes.
|
f@0
|
5507 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
|
f@0
|
5508 DWORD dsPointerLeadTime = 0;
|
f@0
|
5509
|
f@0
|
5510 void *ohandle = 0, *bhandle = 0;
|
f@0
|
5511 HRESULT result;
|
f@0
|
5512 if ( mode == OUTPUT ) {
|
f@0
|
5513
|
f@0
|
5514 LPDIRECTSOUND output;
|
f@0
|
5515 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
|
f@0
|
5516 if ( FAILED( result ) ) {
|
f@0
|
5517 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
|
f@0
|
5518 errorText_ = errorStream_.str();
|
f@0
|
5519 return FAILURE;
|
f@0
|
5520 }
|
f@0
|
5521
|
f@0
|
5522 DSCAPS outCaps;
|
f@0
|
5523 outCaps.dwSize = sizeof( outCaps );
|
f@0
|
5524 result = output->GetCaps( &outCaps );
|
f@0
|
5525 if ( FAILED( result ) ) {
|
f@0
|
5526 output->Release();
|
f@0
|
5527 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
|
f@0
|
5528 errorText_ = errorStream_.str();
|
f@0
|
5529 return FAILURE;
|
f@0
|
5530 }
|
f@0
|
5531
|
f@0
|
5532 // Check channel information.
|
f@0
|
5533 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
|
f@0
|
5534 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
|
f@0
|
5535 errorText_ = errorStream_.str();
|
f@0
|
5536 return FAILURE;
|
f@0
|
5537 }
|
f@0
|
5538
|
f@0
|
5539 // Check format information. Use 16-bit format unless not
|
f@0
|
5540 // supported or user requests 8-bit.
|
f@0
|
5541 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
|
f@0
|
5542 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
|
f@0
|
5543 waveFormat.wBitsPerSample = 16;
|
f@0
|
5544 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
|
f@0
|
5545 }
|
f@0
|
5546 else {
|
f@0
|
5547 waveFormat.wBitsPerSample = 8;
|
f@0
|
5548 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
|
f@0
|
5549 }
|
f@0
|
5550 stream_.userFormat = format;
|
f@0
|
5551
|
f@0
|
5552 // Update wave format structure and buffer information.
|
f@0
|
5553 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
|
f@0
|
5554 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
|
f@0
|
5555 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
|
f@0
|
5556
|
f@0
|
5557 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
|
f@0
|
5558 while ( dsPointerLeadTime * 2U > dsBufferSize )
|
f@0
|
5559 dsBufferSize *= 2;
|
f@0
|
5560
|
f@0
|
5561 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
|
f@0
|
5562 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
|
f@0
|
5563 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
|
f@0
|
5564 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
|
f@0
|
5565 if ( FAILED( result ) ) {
|
f@0
|
5566 output->Release();
|
f@0
|
5567 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
|
f@0
|
5568 errorText_ = errorStream_.str();
|
f@0
|
5569 return FAILURE;
|
f@0
|
5570 }
|
f@0
|
5571
|
f@0
|
5572 // Even though we will write to the secondary buffer, we need to
|
f@0
|
5573 // access the primary buffer to set the correct output format
|
f@0
|
5574 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
|
f@0
|
5575 // buffer description.
|
f@0
|
5576 DSBUFFERDESC bufferDescription;
|
f@0
|
5577 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
|
f@0
|
5578 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
|
f@0
|
5579 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
|
f@0
|
5580
|
f@0
|
5581 // Obtain the primary buffer
|
f@0
|
5582 LPDIRECTSOUNDBUFFER buffer;
|
f@0
|
5583 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
|
f@0
|
5584 if ( FAILED( result ) ) {
|
f@0
|
5585 output->Release();
|
f@0
|
5586 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
|
f@0
|
5587 errorText_ = errorStream_.str();
|
f@0
|
5588 return FAILURE;
|
f@0
|
5589 }
|
f@0
|
5590
|
f@0
|
5591 // Set the primary DS buffer sound format.
|
f@0
|
5592 result = buffer->SetFormat( &waveFormat );
|
f@0
|
5593 if ( FAILED( result ) ) {
|
f@0
|
5594 output->Release();
|
f@0
|
5595 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
|
f@0
|
5596 errorText_ = errorStream_.str();
|
f@0
|
5597 return FAILURE;
|
f@0
|
5598 }
|
f@0
|
5599
|
f@0
|
5600 // Setup the secondary DS buffer description.
|
f@0
|
5601 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
|
f@0
|
5602 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
|
f@0
|
5603 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
|
f@0
|
5604 DSBCAPS_GLOBALFOCUS |
|
f@0
|
5605 DSBCAPS_GETCURRENTPOSITION2 |
|
f@0
|
5606 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
|
f@0
|
5607 bufferDescription.dwBufferBytes = dsBufferSize;
|
f@0
|
5608 bufferDescription.lpwfxFormat = &waveFormat;
|
f@0
|
5609
|
f@0
|
5610 // Try to create the secondary DS buffer. If that doesn't work,
|
f@0
|
5611 // try to use software mixing. Otherwise, there's a problem.
|
f@0
|
5612 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
|
f@0
|
5613 if ( FAILED( result ) ) {
|
f@0
|
5614 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
|
f@0
|
5615 DSBCAPS_GLOBALFOCUS |
|
f@0
|
5616 DSBCAPS_GETCURRENTPOSITION2 |
|
f@0
|
5617 DSBCAPS_LOCSOFTWARE ); // Force software mixing
|
f@0
|
5618 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
|
f@0
|
5619 if ( FAILED( result ) ) {
|
f@0
|
5620 output->Release();
|
f@0
|
5621 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
|
f@0
|
5622 errorText_ = errorStream_.str();
|
f@0
|
5623 return FAILURE;
|
f@0
|
5624 }
|
f@0
|
5625 }
|
f@0
|
5626
|
f@0
|
5627 // Get the buffer size ... might be different from what we specified.
|
f@0
|
5628 DSBCAPS dsbcaps;
|
f@0
|
5629 dsbcaps.dwSize = sizeof( DSBCAPS );
|
f@0
|
5630 result = buffer->GetCaps( &dsbcaps );
|
f@0
|
5631 if ( FAILED( result ) ) {
|
f@0
|
5632 output->Release();
|
f@0
|
5633 buffer->Release();
|
f@0
|
5634 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
|
f@0
|
5635 errorText_ = errorStream_.str();
|
f@0
|
5636 return FAILURE;
|
f@0
|
5637 }
|
f@0
|
5638
|
f@0
|
5639 dsBufferSize = dsbcaps.dwBufferBytes;
|
f@0
|
5640
|
f@0
|
5641 // Lock the DS buffer
|
f@0
|
5642 LPVOID audioPtr;
|
f@0
|
5643 DWORD dataLen;
|
f@0
|
5644 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
|
f@0
|
5645 if ( FAILED( result ) ) {
|
f@0
|
5646 output->Release();
|
f@0
|
5647 buffer->Release();
|
f@0
|
5648 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
|
f@0
|
5649 errorText_ = errorStream_.str();
|
f@0
|
5650 return FAILURE;
|
f@0
|
5651 }
|
f@0
|
5652
|
f@0
|
5653 // Zero the DS buffer
|
f@0
|
5654 ZeroMemory( audioPtr, dataLen );
|
f@0
|
5655
|
f@0
|
5656 // Unlock the DS buffer
|
f@0
|
5657 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
|
f@0
|
5658 if ( FAILED( result ) ) {
|
f@0
|
5659 output->Release();
|
f@0
|
5660 buffer->Release();
|
f@0
|
5661 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
|
f@0
|
5662 errorText_ = errorStream_.str();
|
f@0
|
5663 return FAILURE;
|
f@0
|
5664 }
|
f@0
|
5665
|
f@0
|
5666 ohandle = (void *) output;
|
f@0
|
5667 bhandle = (void *) buffer;
|
f@0
|
5668 }
|
f@0
|
5669
|
f@0
|
5670 if ( mode == INPUT ) {
|
f@0
|
5671
|
f@0
|
5672 LPDIRECTSOUNDCAPTURE input;
|
f@0
|
5673 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
|
f@0
|
5674 if ( FAILED( result ) ) {
|
f@0
|
5675 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
|
f@0
|
5676 errorText_ = errorStream_.str();
|
f@0
|
5677 return FAILURE;
|
f@0
|
5678 }
|
f@0
|
5679
|
f@0
|
5680 DSCCAPS inCaps;
|
f@0
|
5681 inCaps.dwSize = sizeof( inCaps );
|
f@0
|
5682 result = input->GetCaps( &inCaps );
|
f@0
|
5683 if ( FAILED( result ) ) {
|
f@0
|
5684 input->Release();
|
f@0
|
5685 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
|
f@0
|
5686 errorText_ = errorStream_.str();
|
f@0
|
5687 return FAILURE;
|
f@0
|
5688 }
|
f@0
|
5689
|
f@0
|
5690 // Check channel information.
|
f@0
|
5691 if ( inCaps.dwChannels < channels + firstChannel ) {
|
f@0
|
5692 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
|
f@0
|
5693 return FAILURE;
|
f@0
|
5694 }
|
f@0
|
5695
|
f@0
|
5696 // Check format information. Use 16-bit format unless user
|
f@0
|
5697 // requests 8-bit.
|
f@0
|
5698 DWORD deviceFormats;
|
f@0
|
5699 if ( channels + firstChannel == 2 ) {
|
f@0
|
5700 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
|
f@0
|
5701 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
|
f@0
|
5702 waveFormat.wBitsPerSample = 8;
|
f@0
|
5703 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
|
f@0
|
5704 }
|
f@0
|
5705 else { // assume 16-bit is supported
|
f@0
|
5706 waveFormat.wBitsPerSample = 16;
|
f@0
|
5707 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
|
f@0
|
5708 }
|
f@0
|
5709 }
|
f@0
|
5710 else { // channel == 1
|
f@0
|
5711 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
|
f@0
|
5712 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
|
f@0
|
5713 waveFormat.wBitsPerSample = 8;
|
f@0
|
5714 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
|
f@0
|
5715 }
|
f@0
|
5716 else { // assume 16-bit is supported
|
f@0
|
5717 waveFormat.wBitsPerSample = 16;
|
f@0
|
5718 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
|
f@0
|
5719 }
|
f@0
|
5720 }
|
f@0
|
5721 stream_.userFormat = format;
|
f@0
|
5722
|
f@0
|
5723 // Update wave format structure and buffer information.
|
f@0
|
5724 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
|
f@0
|
5725 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
|
f@0
|
5726 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
|
f@0
|
5727
|
f@0
|
5728 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
|
f@0
|
5729 while ( dsPointerLeadTime * 2U > dsBufferSize )
|
f@0
|
5730 dsBufferSize *= 2;
|
f@0
|
5731
|
f@0
|
5732 // Setup the secondary DS buffer description.
|
f@0
|
5733 DSCBUFFERDESC bufferDescription;
|
f@0
|
5734 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
|
f@0
|
5735 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
|
f@0
|
5736 bufferDescription.dwFlags = 0;
|
f@0
|
5737 bufferDescription.dwReserved = 0;
|
f@0
|
5738 bufferDescription.dwBufferBytes = dsBufferSize;
|
f@0
|
5739 bufferDescription.lpwfxFormat = &waveFormat;
|
f@0
|
5740
|
f@0
|
5741 // Create the capture buffer.
|
f@0
|
5742 LPDIRECTSOUNDCAPTUREBUFFER buffer;
|
f@0
|
5743 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
|
f@0
|
5744 if ( FAILED( result ) ) {
|
f@0
|
5745 input->Release();
|
f@0
|
5746 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
|
f@0
|
5747 errorText_ = errorStream_.str();
|
f@0
|
5748 return FAILURE;
|
f@0
|
5749 }
|
f@0
|
5750
|
f@0
|
5751 // Get the buffer size ... might be different from what we specified.
|
f@0
|
5752 DSCBCAPS dscbcaps;
|
f@0
|
5753 dscbcaps.dwSize = sizeof( DSCBCAPS );
|
f@0
|
5754 result = buffer->GetCaps( &dscbcaps );
|
f@0
|
5755 if ( FAILED( result ) ) {
|
f@0
|
5756 input->Release();
|
f@0
|
5757 buffer->Release();
|
f@0
|
5758 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
|
f@0
|
5759 errorText_ = errorStream_.str();
|
f@0
|
5760 return FAILURE;
|
f@0
|
5761 }
|
f@0
|
5762
|
f@0
|
5763 dsBufferSize = dscbcaps.dwBufferBytes;
|
f@0
|
5764
|
f@0
|
5765 // NOTE: We could have a problem here if this is a duplex stream
|
f@0
|
5766 // and the play and capture hardware buffer sizes are different
|
f@0
|
5767 // (I'm actually not sure if that is a problem or not).
|
f@0
|
5768 // Currently, we are not verifying that.
|
f@0
|
5769
|
f@0
|
5770 // Lock the capture buffer
|
f@0
|
5771 LPVOID audioPtr;
|
f@0
|
5772 DWORD dataLen;
|
f@0
|
5773 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
|
f@0
|
5774 if ( FAILED( result ) ) {
|
f@0
|
5775 input->Release();
|
f@0
|
5776 buffer->Release();
|
f@0
|
5777 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
|
f@0
|
5778 errorText_ = errorStream_.str();
|
f@0
|
5779 return FAILURE;
|
f@0
|
5780 }
|
f@0
|
5781
|
f@0
|
5782 // Zero the buffer
|
f@0
|
5783 ZeroMemory( audioPtr, dataLen );
|
f@0
|
5784
|
f@0
|
5785 // Unlock the buffer
|
f@0
|
5786 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
|
f@0
|
5787 if ( FAILED( result ) ) {
|
f@0
|
5788 input->Release();
|
f@0
|
5789 buffer->Release();
|
f@0
|
5790 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
|
f@0
|
5791 errorText_ = errorStream_.str();
|
f@0
|
5792 return FAILURE;
|
f@0
|
5793 }
|
f@0
|
5794
|
f@0
|
5795 ohandle = (void *) input;
|
f@0
|
5796 bhandle = (void *) buffer;
|
f@0
|
5797 }
|
f@0
|
5798
|
f@0
|
5799 // Set various stream parameters
|
f@0
|
5800 DsHandle *handle = 0;
|
f@0
|
5801 stream_.nDeviceChannels[mode] = channels + firstChannel;
|
f@0
|
5802 stream_.nUserChannels[mode] = channels;
|
f@0
|
5803 stream_.bufferSize = *bufferSize;
|
f@0
|
5804 stream_.channelOffset[mode] = firstChannel;
|
f@0
|
5805 stream_.deviceInterleaved[mode] = true;
|
f@0
|
5806 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
|
f@0
|
5807 else stream_.userInterleaved = true;
|
f@0
|
5808
|
f@0
|
5809 // Set flag for buffer conversion
|
f@0
|
5810 stream_.doConvertBuffer[mode] = false;
|
f@0
|
5811 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
|
f@0
|
5812 stream_.doConvertBuffer[mode] = true;
|
f@0
|
5813 if (stream_.userFormat != stream_.deviceFormat[mode])
|
f@0
|
5814 stream_.doConvertBuffer[mode] = true;
|
f@0
|
5815 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
|
f@0
|
5816 stream_.nUserChannels[mode] > 1 )
|
f@0
|
5817 stream_.doConvertBuffer[mode] = true;
|
f@0
|
5818
|
f@0
|
5819 // Allocate necessary internal buffers
|
f@0
|
5820 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
|
f@0
|
5821 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
|
f@0
|
5822 if ( stream_.userBuffer[mode] == NULL ) {
|
f@0
|
5823 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
|
f@0
|
5824 goto error;
|
f@0
|
5825 }
|
f@0
|
5826
|
f@0
|
5827 if ( stream_.doConvertBuffer[mode] ) {
|
f@0
|
5828
|
f@0
|
5829 bool makeBuffer = true;
|
f@0
|
5830 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
|
f@0
|
5831 if ( mode == INPUT ) {
|
f@0
|
5832 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
|
f@0
|
5833 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
|
f@0
|
5834 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
|
f@0
|
5835 }
|
f@0
|
5836 }
|
f@0
|
5837
|
f@0
|
5838 if ( makeBuffer ) {
|
f@0
|
5839 bufferBytes *= *bufferSize;
|
f@0
|
5840 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
|
f@0
|
5841 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
|
f@0
|
5842 if ( stream_.deviceBuffer == NULL ) {
|
f@0
|
5843 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
|
f@0
|
5844 goto error;
|
f@0
|
5845 }
|
f@0
|
5846 }
|
f@0
|
5847 }
|
f@0
|
5848
|
f@0
|
5849 // Allocate our DsHandle structures for the stream.
|
f@0
|
5850 if ( stream_.apiHandle == 0 ) {
|
f@0
|
5851 try {
|
f@0
|
5852 handle = new DsHandle;
|
f@0
|
5853 }
|
f@0
|
5854 catch ( std::bad_alloc& ) {
|
f@0
|
5855 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
|
f@0
|
5856 goto error;
|
f@0
|
5857 }
|
f@0
|
5858
|
f@0
|
5859 // Create a manual-reset event.
|
f@0
|
5860 handle->condition = CreateEvent( NULL, // no security
|
f@0
|
5861 TRUE, // manual-reset
|
f@0
|
5862 FALSE, // non-signaled initially
|
f@0
|
5863 NULL ); // unnamed
|
f@0
|
5864 stream_.apiHandle = (void *) handle;
|
f@0
|
5865 }
|
f@0
|
5866 else
|
f@0
|
5867 handle = (DsHandle *) stream_.apiHandle;
|
f@0
|
5868 handle->id[mode] = ohandle;
|
f@0
|
5869 handle->buffer[mode] = bhandle;
|
f@0
|
5870 handle->dsBufferSize[mode] = dsBufferSize;
|
f@0
|
5871 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
|
f@0
|
5872
|
f@0
|
5873 stream_.device[mode] = device;
|
f@0
|
5874 stream_.state = STREAM_STOPPED;
|
f@0
|
5875 if ( stream_.mode == OUTPUT && mode == INPUT )
|
f@0
|
5876 // We had already set up an output stream.
|
f@0
|
5877 stream_.mode = DUPLEX;
|
f@0
|
5878 else
|
f@0
|
5879 stream_.mode = mode;
|
f@0
|
5880 stream_.nBuffers = nBuffers;
|
f@0
|
5881 stream_.sampleRate = sampleRate;
|
f@0
|
5882
|
f@0
|
5883 // Setup the buffer conversion information structure.
|
f@0
|
5884 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
|
f@0
|
5885
|
f@0
|
5886 // Setup the callback thread.
|
f@0
|
5887 if ( stream_.callbackInfo.isRunning == false ) {
|
f@0
|
5888 unsigned threadId;
|
f@0
|
5889 stream_.callbackInfo.isRunning = true;
|
f@0
|
5890 stream_.callbackInfo.object = (void *) this;
|
f@0
|
5891 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
|
f@0
|
5892 &stream_.callbackInfo, 0, &threadId );
|
f@0
|
5893 if ( stream_.callbackInfo.thread == 0 ) {
|
f@0
|
5894 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
|
f@0
|
5895 goto error;
|
f@0
|
5896 }
|
f@0
|
5897
|
f@0
|
5898 // Boost DS thread priority
|
f@0
|
5899 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
|
f@0
|
5900 }
|
f@0
|
5901 return SUCCESS;
|
f@0
|
5902
|
f@0
|
5903 error:
|
f@0
|
5904 if ( handle ) {
|
f@0
|
5905 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
|
f@0
|
5906 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
|
f@0
|
5907 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
f@0
|
5908 if ( buffer ) buffer->Release();
|
f@0
|
5909 object->Release();
|
f@0
|
5910 }
|
f@0
|
5911 if ( handle->buffer[1] ) {
|
f@0
|
5912 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
|
f@0
|
5913 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
|
f@0
|
5914 if ( buffer ) buffer->Release();
|
f@0
|
5915 object->Release();
|
f@0
|
5916 }
|
f@0
|
5917 CloseHandle( handle->condition );
|
f@0
|
5918 delete handle;
|
f@0
|
5919 stream_.apiHandle = 0;
|
f@0
|
5920 }
|
f@0
|
5921
|
f@0
|
5922 for ( int i=0; i<2; i++ ) {
|
f@0
|
5923 if ( stream_.userBuffer[i] ) {
|
f@0
|
5924 free( stream_.userBuffer[i] );
|
f@0
|
5925 stream_.userBuffer[i] = 0;
|
f@0
|
5926 }
|
f@0
|
5927 }
|
f@0
|
5928
|
f@0
|
5929 if ( stream_.deviceBuffer ) {
|
f@0
|
5930 free( stream_.deviceBuffer );
|
f@0
|
5931 stream_.deviceBuffer = 0;
|
f@0
|
5932 }
|
f@0
|
5933
|
f@0
|
5934 stream_.state = STREAM_CLOSED;
|
f@0
|
5935 return FAILURE;
|
f@0
|
5936 }
|
f@0
|
5937
|
f@0
|
5938 void RtApiDs :: closeStream()
|
f@0
|
5939 {
|
f@0
|
5940 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
5941 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
|
f@0
|
5942 error( RtAudioError::WARNING );
|
f@0
|
5943 return;
|
f@0
|
5944 }
|
f@0
|
5945
|
f@0
|
5946 // Stop the callback thread.
|
f@0
|
5947 stream_.callbackInfo.isRunning = false;
|
f@0
|
5948 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
|
f@0
|
5949 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
|
f@0
|
5950
|
f@0
|
5951 DsHandle *handle = (DsHandle *) stream_.apiHandle;
|
f@0
|
5952 if ( handle ) {
|
f@0
|
5953 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
|
f@0
|
5954 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
|
f@0
|
5955 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
f@0
|
5956 if ( buffer ) {
|
f@0
|
5957 buffer->Stop();
|
f@0
|
5958 buffer->Release();
|
f@0
|
5959 }
|
f@0
|
5960 object->Release();
|
f@0
|
5961 }
|
f@0
|
5962 if ( handle->buffer[1] ) {
|
f@0
|
5963 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
|
f@0
|
5964 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
|
f@0
|
5965 if ( buffer ) {
|
f@0
|
5966 buffer->Stop();
|
f@0
|
5967 buffer->Release();
|
f@0
|
5968 }
|
f@0
|
5969 object->Release();
|
f@0
|
5970 }
|
f@0
|
5971 CloseHandle( handle->condition );
|
f@0
|
5972 delete handle;
|
f@0
|
5973 stream_.apiHandle = 0;
|
f@0
|
5974 }
|
f@0
|
5975
|
f@0
|
5976 for ( int i=0; i<2; i++ ) {
|
f@0
|
5977 if ( stream_.userBuffer[i] ) {
|
f@0
|
5978 free( stream_.userBuffer[i] );
|
f@0
|
5979 stream_.userBuffer[i] = 0;
|
f@0
|
5980 }
|
f@0
|
5981 }
|
f@0
|
5982
|
f@0
|
5983 if ( stream_.deviceBuffer ) {
|
f@0
|
5984 free( stream_.deviceBuffer );
|
f@0
|
5985 stream_.deviceBuffer = 0;
|
f@0
|
5986 }
|
f@0
|
5987
|
f@0
|
5988 stream_.mode = UNINITIALIZED;
|
f@0
|
5989 stream_.state = STREAM_CLOSED;
|
f@0
|
5990 }
|
f@0
|
5991
|
f@0
|
5992 void RtApiDs :: startStream()
|
f@0
|
5993 {
|
f@0
|
5994 verifyStream();
|
f@0
|
5995 if ( stream_.state == STREAM_RUNNING ) {
|
f@0
|
5996 errorText_ = "RtApiDs::startStream(): the stream is already running!";
|
f@0
|
5997 error( RtAudioError::WARNING );
|
f@0
|
5998 return;
|
f@0
|
5999 }
|
f@0
|
6000
|
f@0
|
6001 DsHandle *handle = (DsHandle *) stream_.apiHandle;
|
f@0
|
6002
|
f@0
|
6003 // Increase scheduler frequency on lesser windows (a side-effect of
|
f@0
|
6004 // increasing timer accuracy). On greater windows (Win2K or later),
|
f@0
|
6005 // this is already in effect.
|
f@0
|
6006 timeBeginPeriod( 1 );
|
f@0
|
6007
|
f@0
|
6008 buffersRolling = false;
|
f@0
|
6009 duplexPrerollBytes = 0;
|
f@0
|
6010
|
f@0
|
6011 if ( stream_.mode == DUPLEX ) {
|
f@0
|
6012 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
|
f@0
|
6013 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
|
f@0
|
6014 }
|
f@0
|
6015
|
f@0
|
6016 HRESULT result = 0;
|
f@0
|
6017 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
6018
|
f@0
|
6019 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
f@0
|
6020 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
|
f@0
|
6021 if ( FAILED( result ) ) {
|
f@0
|
6022 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
|
f@0
|
6023 errorText_ = errorStream_.str();
|
f@0
|
6024 goto unlock;
|
f@0
|
6025 }
|
f@0
|
6026 }
|
f@0
|
6027
|
f@0
|
6028 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
|
f@0
|
6029
|
f@0
|
6030 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
|
f@0
|
6031 result = buffer->Start( DSCBSTART_LOOPING );
|
f@0
|
6032 if ( FAILED( result ) ) {
|
f@0
|
6033 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
|
f@0
|
6034 errorText_ = errorStream_.str();
|
f@0
|
6035 goto unlock;
|
f@0
|
6036 }
|
f@0
|
6037 }
|
f@0
|
6038
|
f@0
|
6039 handle->drainCounter = 0;
|
f@0
|
6040 handle->internalDrain = false;
|
f@0
|
6041 ResetEvent( handle->condition );
|
f@0
|
6042 stream_.state = STREAM_RUNNING;
|
f@0
|
6043
|
f@0
|
6044 unlock:
|
f@0
|
6045 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
6046 }
|
f@0
|
6047
|
f@0
|
6048 void RtApiDs :: stopStream()
|
f@0
|
6049 {
|
f@0
|
6050 verifyStream();
|
f@0
|
6051 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
6052 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
|
f@0
|
6053 error( RtAudioError::WARNING );
|
f@0
|
6054 return;
|
f@0
|
6055 }
|
f@0
|
6056
|
f@0
|
6057 HRESULT result = 0;
|
f@0
|
6058 LPVOID audioPtr;
|
f@0
|
6059 DWORD dataLen;
|
f@0
|
6060 DsHandle *handle = (DsHandle *) stream_.apiHandle;
|
f@0
|
6061 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
6062 if ( handle->drainCounter == 0 ) {
|
f@0
|
6063 handle->drainCounter = 2;
|
f@0
|
6064 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
|
f@0
|
6065 }
|
f@0
|
6066
|
f@0
|
6067 stream_.state = STREAM_STOPPED;
|
f@0
|
6068
|
f@0
|
6069 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
6070
|
f@0
|
6071 // Stop the buffer and clear memory
|
f@0
|
6072 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
f@0
|
6073 result = buffer->Stop();
|
f@0
|
6074 if ( FAILED( result ) ) {
|
f@0
|
6075 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
|
f@0
|
6076 errorText_ = errorStream_.str();
|
f@0
|
6077 goto unlock;
|
f@0
|
6078 }
|
f@0
|
6079
|
f@0
|
6080 // Lock the buffer and clear it so that if we start to play again,
|
f@0
|
6081 // we won't have old data playing.
|
f@0
|
6082 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
|
f@0
|
6083 if ( FAILED( result ) ) {
|
f@0
|
6084 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
|
f@0
|
6085 errorText_ = errorStream_.str();
|
f@0
|
6086 goto unlock;
|
f@0
|
6087 }
|
f@0
|
6088
|
f@0
|
6089 // Zero the DS buffer
|
f@0
|
6090 ZeroMemory( audioPtr, dataLen );
|
f@0
|
6091
|
f@0
|
6092 // Unlock the DS buffer
|
f@0
|
6093 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
|
f@0
|
6094 if ( FAILED( result ) ) {
|
f@0
|
6095 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
|
f@0
|
6096 errorText_ = errorStream_.str();
|
f@0
|
6097 goto unlock;
|
f@0
|
6098 }
|
f@0
|
6099
|
f@0
|
6100 // If we start playing again, we must begin at beginning of buffer.
|
f@0
|
6101 handle->bufferPointer[0] = 0;
|
f@0
|
6102 }
|
f@0
|
6103
|
f@0
|
6104 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
|
f@0
|
6105 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
|
f@0
|
6106 audioPtr = NULL;
|
f@0
|
6107 dataLen = 0;
|
f@0
|
6108
|
f@0
|
6109 stream_.state = STREAM_STOPPED;
|
f@0
|
6110
|
f@0
|
6111 if ( stream_.mode != DUPLEX )
|
f@0
|
6112 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
6113
|
f@0
|
6114 result = buffer->Stop();
|
f@0
|
6115 if ( FAILED( result ) ) {
|
f@0
|
6116 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
|
f@0
|
6117 errorText_ = errorStream_.str();
|
f@0
|
6118 goto unlock;
|
f@0
|
6119 }
|
f@0
|
6120
|
f@0
|
6121 // Lock the buffer and clear it so that if we start to play again,
|
f@0
|
6122 // we won't have old data playing.
|
f@0
|
6123 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
|
f@0
|
6124 if ( FAILED( result ) ) {
|
f@0
|
6125 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
|
f@0
|
6126 errorText_ = errorStream_.str();
|
f@0
|
6127 goto unlock;
|
f@0
|
6128 }
|
f@0
|
6129
|
f@0
|
6130 // Zero the DS buffer
|
f@0
|
6131 ZeroMemory( audioPtr, dataLen );
|
f@0
|
6132
|
f@0
|
6133 // Unlock the DS buffer
|
f@0
|
6134 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
|
f@0
|
6135 if ( FAILED( result ) ) {
|
f@0
|
6136 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
|
f@0
|
6137 errorText_ = errorStream_.str();
|
f@0
|
6138 goto unlock;
|
f@0
|
6139 }
|
f@0
|
6140
|
f@0
|
6141 // If we start recording again, we must begin at beginning of buffer.
|
f@0
|
6142 handle->bufferPointer[1] = 0;
|
f@0
|
6143 }
|
f@0
|
6144
|
f@0
|
6145 unlock:
|
f@0
|
6146 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
|
f@0
|
6147 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
6148
|
f@0
|
6149 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
6150 }
|
f@0
|
6151
|
f@0
|
6152 void RtApiDs :: abortStream()
|
f@0
|
6153 {
|
f@0
|
6154 verifyStream();
|
f@0
|
6155 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
6156 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
|
f@0
|
6157 error( RtAudioError::WARNING );
|
f@0
|
6158 return;
|
f@0
|
6159 }
|
f@0
|
6160
|
f@0
|
6161 DsHandle *handle = (DsHandle *) stream_.apiHandle;
|
f@0
|
6162 handle->drainCounter = 2;
|
f@0
|
6163
|
f@0
|
6164 stopStream();
|
f@0
|
6165 }
|
f@0
|
6166
|
f@0
|
6167 void RtApiDs :: callbackEvent()
|
f@0
|
6168 {
|
f@0
|
6169 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
|
f@0
|
6170 Sleep( 50 ); // sleep 50 milliseconds
|
f@0
|
6171 return;
|
f@0
|
6172 }
|
f@0
|
6173
|
f@0
|
6174 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
6175 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
|
f@0
|
6176 error( RtAudioError::WARNING );
|
f@0
|
6177 return;
|
f@0
|
6178 }
|
f@0
|
6179
|
f@0
|
6180 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
|
f@0
|
6181 DsHandle *handle = (DsHandle *) stream_.apiHandle;
|
f@0
|
6182
|
f@0
|
6183 // Check if we were draining the stream and signal is finished.
|
f@0
|
6184 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
|
f@0
|
6185
|
f@0
|
6186 stream_.state = STREAM_STOPPING;
|
f@0
|
6187 if ( handle->internalDrain == false )
|
f@0
|
6188 SetEvent( handle->condition );
|
f@0
|
6189 else
|
f@0
|
6190 stopStream();
|
f@0
|
6191 return;
|
f@0
|
6192 }
|
f@0
|
6193
|
f@0
|
6194 // Invoke user callback to get fresh output data UNLESS we are
|
f@0
|
6195 // draining stream.
|
f@0
|
6196 if ( handle->drainCounter == 0 ) {
|
f@0
|
6197 RtAudioCallback callback = (RtAudioCallback) info->callback;
|
f@0
|
6198 double streamTime = getStreamTime();
|
f@0
|
6199 RtAudioStreamStatus status = 0;
|
f@0
|
6200 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
|
f@0
|
6201 status |= RTAUDIO_OUTPUT_UNDERFLOW;
|
f@0
|
6202 handle->xrun[0] = false;
|
f@0
|
6203 }
|
f@0
|
6204 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
|
f@0
|
6205 status |= RTAUDIO_INPUT_OVERFLOW;
|
f@0
|
6206 handle->xrun[1] = false;
|
f@0
|
6207 }
|
f@0
|
6208 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
|
f@0
|
6209 stream_.bufferSize, streamTime, status, info->userData );
|
f@0
|
6210 if ( cbReturnValue == 2 ) {
|
f@0
|
6211 stream_.state = STREAM_STOPPING;
|
f@0
|
6212 handle->drainCounter = 2;
|
f@0
|
6213 abortStream();
|
f@0
|
6214 return;
|
f@0
|
6215 }
|
f@0
|
6216 else if ( cbReturnValue == 1 ) {
|
f@0
|
6217 handle->drainCounter = 1;
|
f@0
|
6218 handle->internalDrain = true;
|
f@0
|
6219 }
|
f@0
|
6220 }
|
f@0
|
6221
|
f@0
|
6222 HRESULT result;
|
f@0
|
6223 DWORD currentWritePointer, safeWritePointer;
|
f@0
|
6224 DWORD currentReadPointer, safeReadPointer;
|
f@0
|
6225 UINT nextWritePointer;
|
f@0
|
6226
|
f@0
|
6227 LPVOID buffer1 = NULL;
|
f@0
|
6228 LPVOID buffer2 = NULL;
|
f@0
|
6229 DWORD bufferSize1 = 0;
|
f@0
|
6230 DWORD bufferSize2 = 0;
|
f@0
|
6231
|
f@0
|
6232 char *buffer;
|
f@0
|
6233 long bufferBytes;
|
f@0
|
6234
|
f@0
|
6235 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
6236 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
6237 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
6238 return;
|
f@0
|
6239 }
|
f@0
|
6240
|
f@0
|
6241 if ( buffersRolling == false ) {
|
f@0
|
6242 if ( stream_.mode == DUPLEX ) {
|
f@0
|
6243 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
|
f@0
|
6244
|
f@0
|
6245 // It takes a while for the devices to get rolling. As a result,
|
f@0
|
6246 // there's no guarantee that the capture and write device pointers
|
f@0
|
6247 // will move in lockstep. Wait here for both devices to start
|
f@0
|
6248 // rolling, and then set our buffer pointers accordingly.
|
f@0
|
6249 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
|
f@0
|
6250 // bytes later than the write buffer.
|
f@0
|
6251
|
f@0
|
6252 // Stub: a serious risk of having a pre-emptive scheduling round
|
f@0
|
6253 // take place between the two GetCurrentPosition calls... but I'm
|
f@0
|
6254 // really not sure how to solve the problem. Temporarily boost to
|
f@0
|
6255 // Realtime priority, maybe; but I'm not sure what priority the
|
f@0
|
6256 // DirectSound service threads run at. We *should* be roughly
|
f@0
|
6257 // within a ms or so of correct.
|
f@0
|
6258
|
f@0
|
6259 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
f@0
|
6260 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
|
f@0
|
6261
|
f@0
|
6262 DWORD startSafeWritePointer, startSafeReadPointer;
|
f@0
|
6263
|
f@0
|
6264 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
|
f@0
|
6265 if ( FAILED( result ) ) {
|
f@0
|
6266 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
|
f@0
|
6267 errorText_ = errorStream_.str();
|
f@0
|
6268 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
6269 return;
|
f@0
|
6270 }
|
f@0
|
6271 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
|
f@0
|
6272 if ( FAILED( result ) ) {
|
f@0
|
6273 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
|
f@0
|
6274 errorText_ = errorStream_.str();
|
f@0
|
6275 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
6276 return;
|
f@0
|
6277 }
|
f@0
|
6278 while ( true ) {
|
f@0
|
6279 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
|
f@0
|
6280 if ( FAILED( result ) ) {
|
f@0
|
6281 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
|
f@0
|
6282 errorText_ = errorStream_.str();
|
f@0
|
6283 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
6284 return;
|
f@0
|
6285 }
|
f@0
|
6286 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
|
f@0
|
6287 if ( FAILED( result ) ) {
|
f@0
|
6288 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
|
f@0
|
6289 errorText_ = errorStream_.str();
|
f@0
|
6290 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
6291 return;
|
f@0
|
6292 }
|
f@0
|
6293 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
|
f@0
|
6294 Sleep( 1 );
|
f@0
|
6295 }
|
f@0
|
6296
|
f@0
|
6297 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
|
f@0
|
6298
|
f@0
|
6299 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
|
f@0
|
6300 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
|
f@0
|
6301 handle->bufferPointer[1] = safeReadPointer;
|
f@0
|
6302 }
|
f@0
|
6303 else if ( stream_.mode == OUTPUT ) {
|
f@0
|
6304
|
f@0
|
6305 // Set the proper nextWritePosition after initial startup.
|
f@0
|
6306 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
f@0
|
6307 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
|
f@0
|
6308 if ( FAILED( result ) ) {
|
f@0
|
6309 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
|
f@0
|
6310 errorText_ = errorStream_.str();
|
f@0
|
6311 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
6312 return;
|
f@0
|
6313 }
|
f@0
|
6314 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
|
f@0
|
6315 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
|
f@0
|
6316 }
|
f@0
|
6317
|
f@0
|
6318 buffersRolling = true;
|
f@0
|
6319 }
|
f@0
|
6320
|
f@0
|
6321 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
6322
|
f@0
|
6323 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
f@0
|
6324
|
f@0
|
6325 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
|
f@0
|
6326 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
|
f@0
|
6327 bufferBytes *= formatBytes( stream_.userFormat );
|
f@0
|
6328 memset( stream_.userBuffer[0], 0, bufferBytes );
|
f@0
|
6329 }
|
f@0
|
6330
|
f@0
|
6331 // Setup parameters and do buffer conversion if necessary.
|
f@0
|
6332 if ( stream_.doConvertBuffer[0] ) {
|
f@0
|
6333 buffer = stream_.deviceBuffer;
|
f@0
|
6334 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
|
f@0
|
6335 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
|
f@0
|
6336 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
|
f@0
|
6337 }
|
f@0
|
6338 else {
|
f@0
|
6339 buffer = stream_.userBuffer[0];
|
f@0
|
6340 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
|
f@0
|
6341 bufferBytes *= formatBytes( stream_.userFormat );
|
f@0
|
6342 }
|
f@0
|
6343
|
f@0
|
6344 // No byte swapping necessary in DirectSound implementation.
|
f@0
|
6345
|
f@0
|
6346 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
|
f@0
|
6347 // unsigned. So, we need to convert our signed 8-bit data here to
|
f@0
|
6348 // unsigned.
|
f@0
|
6349 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
|
f@0
|
6350 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
|
f@0
|
6351
|
f@0
|
6352 DWORD dsBufferSize = handle->dsBufferSize[0];
|
f@0
|
6353 nextWritePointer = handle->bufferPointer[0];
|
f@0
|
6354
|
f@0
|
6355 DWORD endWrite, leadPointer;
|
f@0
|
6356 while ( true ) {
|
f@0
|
6357 // Find out where the read and "safe write" pointers are.
|
f@0
|
6358 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
|
f@0
|
6359 if ( FAILED( result ) ) {
|
f@0
|
6360 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
|
f@0
|
6361 errorText_ = errorStream_.str();
|
f@0
|
6362 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
6363 return;
|
f@0
|
6364 }
|
f@0
|
6365
|
f@0
|
6366 // We will copy our output buffer into the region between
|
f@0
|
6367 // safeWritePointer and leadPointer. If leadPointer is not
|
f@0
|
6368 // beyond the next endWrite position, wait until it is.
|
f@0
|
6369 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
|
f@0
|
6370 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
|
f@0
|
6371 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
|
f@0
|
6372 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
|
f@0
|
6373 endWrite = nextWritePointer + bufferBytes;
|
f@0
|
6374
|
f@0
|
6375 // Check whether the entire write region is behind the play pointer.
|
f@0
|
6376 if ( leadPointer >= endWrite ) break;
|
f@0
|
6377
|
f@0
|
6378 // If we are here, then we must wait until the leadPointer advances
|
f@0
|
6379 // beyond the end of our next write region. We use the
|
f@0
|
6380 // Sleep() function to suspend operation until that happens.
|
f@0
|
6381 double millis = ( endWrite - leadPointer ) * 1000.0;
|
f@0
|
6382 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
|
f@0
|
6383 if ( millis < 1.0 ) millis = 1.0;
|
f@0
|
6384 Sleep( (DWORD) millis );
|
f@0
|
6385 }
|
f@0
|
6386
|
f@0
|
6387 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
|
f@0
|
6388 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
|
f@0
|
6389 // We've strayed into the forbidden zone ... resync the read pointer.
|
f@0
|
6390 handle->xrun[0] = true;
|
f@0
|
6391 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
|
f@0
|
6392 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
|
f@0
|
6393 handle->bufferPointer[0] = nextWritePointer;
|
f@0
|
6394 endWrite = nextWritePointer + bufferBytes;
|
f@0
|
6395 }
|
f@0
|
6396
|
f@0
|
6397 // Lock free space in the buffer
|
f@0
|
6398 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
|
f@0
|
6399 &bufferSize1, &buffer2, &bufferSize2, 0 );
|
f@0
|
6400 if ( FAILED( result ) ) {
|
f@0
|
6401 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
|
f@0
|
6402 errorText_ = errorStream_.str();
|
f@0
|
6403 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
6404 return;
|
f@0
|
6405 }
|
f@0
|
6406
|
f@0
|
6407 // Copy our buffer into the DS buffer
|
f@0
|
6408 CopyMemory( buffer1, buffer, bufferSize1 );
|
f@0
|
6409 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
|
f@0
|
6410
|
f@0
|
6411 // Update our buffer offset and unlock sound buffer
|
f@0
|
6412 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
|
f@0
|
6413 if ( FAILED( result ) ) {
|
f@0
|
6414 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
|
f@0
|
6415 errorText_ = errorStream_.str();
|
f@0
|
6416 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
6417 return;
|
f@0
|
6418 }
|
f@0
|
6419 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
|
f@0
|
6420 handle->bufferPointer[0] = nextWritePointer;
|
f@0
|
6421 }
|
f@0
|
6422
|
f@0
|
6423 // Don't bother draining input
|
f@0
|
6424 if ( handle->drainCounter ) {
|
f@0
|
6425 handle->drainCounter++;
|
f@0
|
6426 goto unlock;
|
f@0
|
6427 }
|
f@0
|
6428
|
f@0
|
6429 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
|
f@0
|
6430
|
f@0
|
6431 // Setup parameters.
|
f@0
|
6432 if ( stream_.doConvertBuffer[1] ) {
|
f@0
|
6433 buffer = stream_.deviceBuffer;
|
f@0
|
6434 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
|
f@0
|
6435 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
|
f@0
|
6436 }
|
f@0
|
6437 else {
|
f@0
|
6438 buffer = stream_.userBuffer[1];
|
f@0
|
6439 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
|
f@0
|
6440 bufferBytes *= formatBytes( stream_.userFormat );
|
f@0
|
6441 }
|
f@0
|
6442
|
f@0
|
6443 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
|
f@0
|
6444 long nextReadPointer = handle->bufferPointer[1];
|
f@0
|
6445 DWORD dsBufferSize = handle->dsBufferSize[1];
|
f@0
|
6446
|
f@0
|
6447 // Find out where the write and "safe read" pointers are.
|
f@0
|
6448 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
|
f@0
|
6449 if ( FAILED( result ) ) {
|
f@0
|
6450 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
|
f@0
|
6451 errorText_ = errorStream_.str();
|
f@0
|
6452 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
6453 return;
|
f@0
|
6454 }
|
f@0
|
6455
|
f@0
|
6456 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
|
f@0
|
6457 DWORD endRead = nextReadPointer + bufferBytes;
|
f@0
|
6458
|
f@0
|
6459 // Handling depends on whether we are INPUT or DUPLEX.
|
f@0
|
6460 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
|
f@0
|
6461 // then a wait here will drag the write pointers into the forbidden zone.
|
f@0
|
6462 //
|
f@0
|
6463 // In DUPLEX mode, rather than wait, we will back off the read pointer until
|
f@0
|
6464 // it's in a safe position. This causes dropouts, but it seems to be the only
|
f@0
|
6465 // practical way to sync up the read and write pointers reliably, given the
|
f@0
|
6466 // the very complex relationship between phase and increment of the read and write
|
f@0
|
6467 // pointers.
|
f@0
|
6468 //
|
f@0
|
6469 // In order to minimize audible dropouts in DUPLEX mode, we will
|
f@0
|
6470 // provide a pre-roll period of 0.5 seconds in which we return
|
f@0
|
6471 // zeros from the read buffer while the pointers sync up.
|
f@0
|
6472
|
f@0
|
6473 if ( stream_.mode == DUPLEX ) {
|
f@0
|
6474 if ( safeReadPointer < endRead ) {
|
f@0
|
6475 if ( duplexPrerollBytes <= 0 ) {
|
f@0
|
6476 // Pre-roll time over. Be more agressive.
|
f@0
|
6477 int adjustment = endRead-safeReadPointer;
|
f@0
|
6478
|
f@0
|
6479 handle->xrun[1] = true;
|
f@0
|
6480 // Two cases:
|
f@0
|
6481 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
|
f@0
|
6482 // and perform fine adjustments later.
|
f@0
|
6483 // - small adjustments: back off by twice as much.
|
f@0
|
6484 if ( adjustment >= 2*bufferBytes )
|
f@0
|
6485 nextReadPointer = safeReadPointer-2*bufferBytes;
|
f@0
|
6486 else
|
f@0
|
6487 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
|
f@0
|
6488
|
f@0
|
6489 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
|
f@0
|
6490
|
f@0
|
6491 }
|
f@0
|
6492 else {
|
f@0
|
6493 // In pre=roll time. Just do it.
|
f@0
|
6494 nextReadPointer = safeReadPointer - bufferBytes;
|
f@0
|
6495 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
|
f@0
|
6496 }
|
f@0
|
6497 endRead = nextReadPointer + bufferBytes;
|
f@0
|
6498 }
|
f@0
|
6499 }
|
f@0
|
6500 else { // mode == INPUT
|
f@0
|
6501 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
|
f@0
|
6502 // See comments for playback.
|
f@0
|
6503 double millis = (endRead - safeReadPointer) * 1000.0;
|
f@0
|
6504 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
|
f@0
|
6505 if ( millis < 1.0 ) millis = 1.0;
|
f@0
|
6506 Sleep( (DWORD) millis );
|
f@0
|
6507
|
f@0
|
6508 // Wake up and find out where we are now.
|
f@0
|
6509 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
|
f@0
|
6510 if ( FAILED( result ) ) {
|
f@0
|
6511 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
|
f@0
|
6512 errorText_ = errorStream_.str();
|
f@0
|
6513 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
6514 return;
|
f@0
|
6515 }
|
f@0
|
6516
|
f@0
|
6517 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
|
f@0
|
6518 }
|
f@0
|
6519 }
|
f@0
|
6520
|
f@0
|
6521 // Lock free space in the buffer
|
f@0
|
6522 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
|
f@0
|
6523 &bufferSize1, &buffer2, &bufferSize2, 0 );
|
f@0
|
6524 if ( FAILED( result ) ) {
|
f@0
|
6525 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
|
f@0
|
6526 errorText_ = errorStream_.str();
|
f@0
|
6527 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
6528 return;
|
f@0
|
6529 }
|
f@0
|
6530
|
f@0
|
6531 if ( duplexPrerollBytes <= 0 ) {
|
f@0
|
6532 // Copy our buffer into the DS buffer
|
f@0
|
6533 CopyMemory( buffer, buffer1, bufferSize1 );
|
f@0
|
6534 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
|
f@0
|
6535 }
|
f@0
|
6536 else {
|
f@0
|
6537 memset( buffer, 0, bufferSize1 );
|
f@0
|
6538 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
|
f@0
|
6539 duplexPrerollBytes -= bufferSize1 + bufferSize2;
|
f@0
|
6540 }
|
f@0
|
6541
|
f@0
|
6542 // Update our buffer offset and unlock sound buffer
|
f@0
|
6543 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
|
f@0
|
6544 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
|
f@0
|
6545 if ( FAILED( result ) ) {
|
f@0
|
6546 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
|
f@0
|
6547 errorText_ = errorStream_.str();
|
f@0
|
6548 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
6549 return;
|
f@0
|
6550 }
|
f@0
|
6551 handle->bufferPointer[1] = nextReadPointer;
|
f@0
|
6552
|
f@0
|
6553 // No byte swapping necessary in DirectSound implementation.
|
f@0
|
6554
|
f@0
|
6555 // If necessary, convert 8-bit data from unsigned to signed.
|
f@0
|
6556 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
|
f@0
|
6557 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
|
f@0
|
6558
|
f@0
|
6559 // Do buffer conversion if necessary.
|
f@0
|
6560 if ( stream_.doConvertBuffer[1] )
|
f@0
|
6561 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
|
f@0
|
6562 }
|
f@0
|
6563
|
f@0
|
6564 unlock:
|
f@0
|
6565 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
6566 RtApi::tickStreamTime();
|
f@0
|
6567 }
|
f@0
|
6568
|
f@0
|
6569 // Definitions for utility functions and callbacks
|
f@0
|
6570 // specific to the DirectSound implementation.
|
f@0
|
6571
|
f@0
|
6572 static unsigned __stdcall callbackHandler( void *ptr )
|
f@0
|
6573 {
|
f@0
|
6574 CallbackInfo *info = (CallbackInfo *) ptr;
|
f@0
|
6575 RtApiDs *object = (RtApiDs *) info->object;
|
f@0
|
6576 bool* isRunning = &info->isRunning;
|
f@0
|
6577
|
f@0
|
6578 while ( *isRunning == true ) {
|
f@0
|
6579 object->callbackEvent();
|
f@0
|
6580 }
|
f@0
|
6581
|
f@0
|
6582 _endthreadex( 0 );
|
f@0
|
6583 return 0;
|
f@0
|
6584 }
|
f@0
|
6585
|
f@0
|
6586 #include "tchar.h"
|
f@0
|
6587
|
f@0
|
6588 static std::string convertTChar( LPCTSTR name )
|
f@0
|
6589 {
|
f@0
|
6590 #if defined( UNICODE ) || defined( _UNICODE )
|
f@0
|
6591 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
|
f@0
|
6592 std::string s( length-1, '\0' );
|
f@0
|
6593 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
|
f@0
|
6594 #else
|
f@0
|
6595 std::string s( name );
|
f@0
|
6596 #endif
|
f@0
|
6597
|
f@0
|
6598 return s;
|
f@0
|
6599 }
|
f@0
|
6600
|
f@0
|
6601 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
|
f@0
|
6602 LPCTSTR description,
|
f@0
|
6603 LPCTSTR /*module*/,
|
f@0
|
6604 LPVOID lpContext )
|
f@0
|
6605 {
|
f@0
|
6606 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
|
f@0
|
6607 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
|
f@0
|
6608
|
f@0
|
6609 HRESULT hr;
|
f@0
|
6610 bool validDevice = false;
|
f@0
|
6611 if ( probeInfo.isInput == true ) {
|
f@0
|
6612 DSCCAPS caps;
|
f@0
|
6613 LPDIRECTSOUNDCAPTURE object;
|
f@0
|
6614
|
f@0
|
6615 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
|
f@0
|
6616 if ( hr != DS_OK ) return TRUE;
|
f@0
|
6617
|
f@0
|
6618 caps.dwSize = sizeof(caps);
|
f@0
|
6619 hr = object->GetCaps( &caps );
|
f@0
|
6620 if ( hr == DS_OK ) {
|
f@0
|
6621 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
|
f@0
|
6622 validDevice = true;
|
f@0
|
6623 }
|
f@0
|
6624 object->Release();
|
f@0
|
6625 }
|
f@0
|
6626 else {
|
f@0
|
6627 DSCAPS caps;
|
f@0
|
6628 LPDIRECTSOUND object;
|
f@0
|
6629 hr = DirectSoundCreate( lpguid, &object, NULL );
|
f@0
|
6630 if ( hr != DS_OK ) return TRUE;
|
f@0
|
6631
|
f@0
|
6632 caps.dwSize = sizeof(caps);
|
f@0
|
6633 hr = object->GetCaps( &caps );
|
f@0
|
6634 if ( hr == DS_OK ) {
|
f@0
|
6635 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
|
f@0
|
6636 validDevice = true;
|
f@0
|
6637 }
|
f@0
|
6638 object->Release();
|
f@0
|
6639 }
|
f@0
|
6640
|
f@0
|
6641 // If good device, then save its name and guid.
|
f@0
|
6642 std::string name = convertTChar( description );
|
f@0
|
6643 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
|
f@0
|
6644 if ( lpguid == NULL )
|
f@0
|
6645 name = "Default Device";
|
f@0
|
6646 if ( validDevice ) {
|
f@0
|
6647 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
|
f@0
|
6648 if ( dsDevices[i].name == name ) {
|
f@0
|
6649 dsDevices[i].found = true;
|
f@0
|
6650 if ( probeInfo.isInput ) {
|
f@0
|
6651 dsDevices[i].id[1] = lpguid;
|
f@0
|
6652 dsDevices[i].validId[1] = true;
|
f@0
|
6653 }
|
f@0
|
6654 else {
|
f@0
|
6655 dsDevices[i].id[0] = lpguid;
|
f@0
|
6656 dsDevices[i].validId[0] = true;
|
f@0
|
6657 }
|
f@0
|
6658 return TRUE;
|
f@0
|
6659 }
|
f@0
|
6660 }
|
f@0
|
6661
|
f@0
|
6662 DsDevice device;
|
f@0
|
6663 device.name = name;
|
f@0
|
6664 device.found = true;
|
f@0
|
6665 if ( probeInfo.isInput ) {
|
f@0
|
6666 device.id[1] = lpguid;
|
f@0
|
6667 device.validId[1] = true;
|
f@0
|
6668 }
|
f@0
|
6669 else {
|
f@0
|
6670 device.id[0] = lpguid;
|
f@0
|
6671 device.validId[0] = true;
|
f@0
|
6672 }
|
f@0
|
6673 dsDevices.push_back( device );
|
f@0
|
6674 }
|
f@0
|
6675
|
f@0
|
6676 return TRUE;
|
f@0
|
6677 }
|
f@0
|
6678
|
f@0
|
6679 static const char* getErrorString( int code )
|
f@0
|
6680 {
|
f@0
|
6681 switch ( code ) {
|
f@0
|
6682
|
f@0
|
6683 case DSERR_ALLOCATED:
|
f@0
|
6684 return "Already allocated";
|
f@0
|
6685
|
f@0
|
6686 case DSERR_CONTROLUNAVAIL:
|
f@0
|
6687 return "Control unavailable";
|
f@0
|
6688
|
f@0
|
6689 case DSERR_INVALIDPARAM:
|
f@0
|
6690 return "Invalid parameter";
|
f@0
|
6691
|
f@0
|
6692 case DSERR_INVALIDCALL:
|
f@0
|
6693 return "Invalid call";
|
f@0
|
6694
|
f@0
|
6695 case DSERR_GENERIC:
|
f@0
|
6696 return "Generic error";
|
f@0
|
6697
|
f@0
|
6698 case DSERR_PRIOLEVELNEEDED:
|
f@0
|
6699 return "Priority level needed";
|
f@0
|
6700
|
f@0
|
6701 case DSERR_OUTOFMEMORY:
|
f@0
|
6702 return "Out of memory";
|
f@0
|
6703
|
f@0
|
6704 case DSERR_BADFORMAT:
|
f@0
|
6705 return "The sample rate or the channel format is not supported";
|
f@0
|
6706
|
f@0
|
6707 case DSERR_UNSUPPORTED:
|
f@0
|
6708 return "Not supported";
|
f@0
|
6709
|
f@0
|
6710 case DSERR_NODRIVER:
|
f@0
|
6711 return "No driver";
|
f@0
|
6712
|
f@0
|
6713 case DSERR_ALREADYINITIALIZED:
|
f@0
|
6714 return "Already initialized";
|
f@0
|
6715
|
f@0
|
6716 case DSERR_NOAGGREGATION:
|
f@0
|
6717 return "No aggregation";
|
f@0
|
6718
|
f@0
|
6719 case DSERR_BUFFERLOST:
|
f@0
|
6720 return "Buffer lost";
|
f@0
|
6721
|
f@0
|
6722 case DSERR_OTHERAPPHASPRIO:
|
f@0
|
6723 return "Another application already has priority";
|
f@0
|
6724
|
f@0
|
6725 case DSERR_UNINITIALIZED:
|
f@0
|
6726 return "Uninitialized";
|
f@0
|
6727
|
f@0
|
6728 default:
|
f@0
|
6729 return "DirectSound unknown error";
|
f@0
|
6730 }
|
f@0
|
6731 }
|
f@0
|
6732 //******************** End of __WINDOWS_DS__ *********************//
|
f@0
|
6733 #endif
|
f@0
|
6734
|
f@0
|
6735
|
f@0
|
6736 #if defined(__LINUX_ALSA__)
|
f@0
|
6737
|
f@0
|
6738 #include <alsa/asoundlib.h>
|
f@0
|
6739 #include <unistd.h>
|
f@0
|
6740
|
f@0
|
6741 // A structure to hold various information related to the ALSA API
|
f@0
|
6742 // implementation.
|
f@0
|
6743 struct AlsaHandle {
|
f@0
|
6744 snd_pcm_t *handles[2];
|
f@0
|
6745 bool synchronized;
|
f@0
|
6746 bool xrun[2];
|
f@0
|
6747 pthread_cond_t runnable_cv;
|
f@0
|
6748 bool runnable;
|
f@0
|
6749
|
f@0
|
6750 AlsaHandle()
|
f@0
|
6751 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
|
f@0
|
6752 };
|
f@0
|
6753
|
f@0
|
6754 static void *alsaCallbackHandler( void * ptr );
|
f@0
|
6755
|
f@0
|
6756 RtApiAlsa :: RtApiAlsa()
|
f@0
|
6757 {
|
f@0
|
6758 // Nothing to do here.
|
f@0
|
6759 }
|
f@0
|
6760
|
f@0
|
6761 RtApiAlsa :: ~RtApiAlsa()
|
f@0
|
6762 {
|
f@0
|
6763 if ( stream_.state != STREAM_CLOSED ) closeStream();
|
f@0
|
6764 }
|
f@0
|
6765
|
f@0
|
6766 unsigned int RtApiAlsa :: getDeviceCount( void )
|
f@0
|
6767 {
|
f@0
|
6768 unsigned nDevices = 0;
|
f@0
|
6769 int result, subdevice, card;
|
f@0
|
6770 char name[64];
|
f@0
|
6771 snd_ctl_t *handle;
|
f@0
|
6772
|
f@0
|
6773 // Count cards and devices
|
f@0
|
6774 card = -1;
|
f@0
|
6775 snd_card_next( &card );
|
f@0
|
6776 while ( card >= 0 ) {
|
f@0
|
6777 sprintf( name, "hw:%d", card );
|
f@0
|
6778 result = snd_ctl_open( &handle, name, 0 );
|
f@0
|
6779 if ( result < 0 ) {
|
f@0
|
6780 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
|
f@0
|
6781 errorText_ = errorStream_.str();
|
f@0
|
6782 error( RtAudioError::WARNING );
|
f@0
|
6783 goto nextcard;
|
f@0
|
6784 }
|
f@0
|
6785 subdevice = -1;
|
f@0
|
6786 while( 1 ) {
|
f@0
|
6787 result = snd_ctl_pcm_next_device( handle, &subdevice );
|
f@0
|
6788 if ( result < 0 ) {
|
f@0
|
6789 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
|
f@0
|
6790 errorText_ = errorStream_.str();
|
f@0
|
6791 error( RtAudioError::WARNING );
|
f@0
|
6792 break;
|
f@0
|
6793 }
|
f@0
|
6794 if ( subdevice < 0 )
|
f@0
|
6795 break;
|
f@0
|
6796 nDevices++;
|
f@0
|
6797 }
|
f@0
|
6798 nextcard:
|
f@0
|
6799 snd_ctl_close( handle );
|
f@0
|
6800 snd_card_next( &card );
|
f@0
|
6801 }
|
f@0
|
6802
|
f@0
|
6803 result = snd_ctl_open( &handle, "default", 0 );
|
f@0
|
6804 if (result == 0) {
|
f@0
|
6805 nDevices++;
|
f@0
|
6806 snd_ctl_close( handle );
|
f@0
|
6807 }
|
f@0
|
6808
|
f@0
|
6809 return nDevices;
|
f@0
|
6810 }
|
f@0
|
6811
|
f@0
|
6812 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
|
f@0
|
6813 {
|
f@0
|
6814 RtAudio::DeviceInfo info;
|
f@0
|
6815 info.probed = false;
|
f@0
|
6816
|
f@0
|
6817 unsigned nDevices = 0;
|
f@0
|
6818 int result, subdevice, card;
|
f@0
|
6819 char name[64];
|
f@0
|
6820 snd_ctl_t *chandle;
|
f@0
|
6821
|
f@0
|
6822 // Count cards and devices
|
f@0
|
6823 card = -1;
|
f@0
|
6824 snd_card_next( &card );
|
f@0
|
6825 while ( card >= 0 ) {
|
f@0
|
6826 sprintf( name, "hw:%d", card );
|
f@0
|
6827 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
|
f@0
|
6828 if ( result < 0 ) {
|
f@0
|
6829 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
|
f@0
|
6830 errorText_ = errorStream_.str();
|
f@0
|
6831 error( RtAudioError::WARNING );
|
f@0
|
6832 goto nextcard;
|
f@0
|
6833 }
|
f@0
|
6834 subdevice = -1;
|
f@0
|
6835 while( 1 ) {
|
f@0
|
6836 result = snd_ctl_pcm_next_device( chandle, &subdevice );
|
f@0
|
6837 if ( result < 0 ) {
|
f@0
|
6838 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
|
f@0
|
6839 errorText_ = errorStream_.str();
|
f@0
|
6840 error( RtAudioError::WARNING );
|
f@0
|
6841 break;
|
f@0
|
6842 }
|
f@0
|
6843 if ( subdevice < 0 ) break;
|
f@0
|
6844 if ( nDevices == device ) {
|
f@0
|
6845 sprintf( name, "hw:%d,%d", card, subdevice );
|
f@0
|
6846 goto foundDevice;
|
f@0
|
6847 }
|
f@0
|
6848 nDevices++;
|
f@0
|
6849 }
|
f@0
|
6850 nextcard:
|
f@0
|
6851 snd_ctl_close( chandle );
|
f@0
|
6852 snd_card_next( &card );
|
f@0
|
6853 }
|
f@0
|
6854
|
f@0
|
6855 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
|
f@0
|
6856 if ( result == 0 ) {
|
f@0
|
6857 if ( nDevices == device ) {
|
f@0
|
6858 strcpy( name, "default" );
|
f@0
|
6859 goto foundDevice;
|
f@0
|
6860 }
|
f@0
|
6861 nDevices++;
|
f@0
|
6862 }
|
f@0
|
6863
|
f@0
|
6864 if ( nDevices == 0 ) {
|
f@0
|
6865 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
|
f@0
|
6866 error( RtAudioError::INVALID_USE );
|
f@0
|
6867 return info;
|
f@0
|
6868 }
|
f@0
|
6869
|
f@0
|
6870 if ( device >= nDevices ) {
|
f@0
|
6871 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
|
f@0
|
6872 error( RtAudioError::INVALID_USE );
|
f@0
|
6873 return info;
|
f@0
|
6874 }
|
f@0
|
6875
|
f@0
|
6876 foundDevice:
|
f@0
|
6877
|
f@0
|
6878 // If a stream is already open, we cannot probe the stream devices.
|
f@0
|
6879 // Thus, use the saved results.
|
f@0
|
6880 if ( stream_.state != STREAM_CLOSED &&
|
f@0
|
6881 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
|
f@0
|
6882 snd_ctl_close( chandle );
|
f@0
|
6883 if ( device >= devices_.size() ) {
|
f@0
|
6884 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
|
f@0
|
6885 error( RtAudioError::WARNING );
|
f@0
|
6886 return info;
|
f@0
|
6887 }
|
f@0
|
6888 return devices_[ device ];
|
f@0
|
6889 }
|
f@0
|
6890
|
f@0
|
6891 int openMode = SND_PCM_ASYNC;
|
f@0
|
6892 snd_pcm_stream_t stream;
|
f@0
|
6893 snd_pcm_info_t *pcminfo;
|
f@0
|
6894 snd_pcm_info_alloca( &pcminfo );
|
f@0
|
6895 snd_pcm_t *phandle;
|
f@0
|
6896 snd_pcm_hw_params_t *params;
|
f@0
|
6897 snd_pcm_hw_params_alloca( ¶ms );
|
f@0
|
6898
|
f@0
|
6899 // First try for playback unless default device (which has subdev -1)
|
f@0
|
6900 stream = SND_PCM_STREAM_PLAYBACK;
|
f@0
|
6901 snd_pcm_info_set_stream( pcminfo, stream );
|
f@0
|
6902 if ( subdevice != -1 ) {
|
f@0
|
6903 snd_pcm_info_set_device( pcminfo, subdevice );
|
f@0
|
6904 snd_pcm_info_set_subdevice( pcminfo, 0 );
|
f@0
|
6905
|
f@0
|
6906 result = snd_ctl_pcm_info( chandle, pcminfo );
|
f@0
|
6907 if ( result < 0 ) {
|
f@0
|
6908 // Device probably doesn't support playback.
|
f@0
|
6909 goto captureProbe;
|
f@0
|
6910 }
|
f@0
|
6911 }
|
f@0
|
6912
|
f@0
|
6913 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
|
f@0
|
6914 if ( result < 0 ) {
|
f@0
|
6915 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
|
f@0
|
6916 errorText_ = errorStream_.str();
|
f@0
|
6917 error( RtAudioError::WARNING );
|
f@0
|
6918 goto captureProbe;
|
f@0
|
6919 }
|
f@0
|
6920
|
f@0
|
6921 // The device is open ... fill the parameter structure.
|
f@0
|
6922 result = snd_pcm_hw_params_any( phandle, params );
|
f@0
|
6923 if ( result < 0 ) {
|
f@0
|
6924 snd_pcm_close( phandle );
|
f@0
|
6925 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
|
f@0
|
6926 errorText_ = errorStream_.str();
|
f@0
|
6927 error( RtAudioError::WARNING );
|
f@0
|
6928 goto captureProbe;
|
f@0
|
6929 }
|
f@0
|
6930
|
f@0
|
6931 // Get output channel information.
|
f@0
|
6932 unsigned int value;
|
f@0
|
6933 result = snd_pcm_hw_params_get_channels_max( params, &value );
|
f@0
|
6934 if ( result < 0 ) {
|
f@0
|
6935 snd_pcm_close( phandle );
|
f@0
|
6936 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
|
f@0
|
6937 errorText_ = errorStream_.str();
|
f@0
|
6938 error( RtAudioError::WARNING );
|
f@0
|
6939 goto captureProbe;
|
f@0
|
6940 }
|
f@0
|
6941 info.outputChannels = value;
|
f@0
|
6942 snd_pcm_close( phandle );
|
f@0
|
6943
|
f@0
|
6944 captureProbe:
|
f@0
|
6945 stream = SND_PCM_STREAM_CAPTURE;
|
f@0
|
6946 snd_pcm_info_set_stream( pcminfo, stream );
|
f@0
|
6947
|
f@0
|
6948 // Now try for capture unless default device (with subdev = -1)
|
f@0
|
6949 if ( subdevice != -1 ) {
|
f@0
|
6950 result = snd_ctl_pcm_info( chandle, pcminfo );
|
f@0
|
6951 snd_ctl_close( chandle );
|
f@0
|
6952 if ( result < 0 ) {
|
f@0
|
6953 // Device probably doesn't support capture.
|
f@0
|
6954 if ( info.outputChannels == 0 ) return info;
|
f@0
|
6955 goto probeParameters;
|
f@0
|
6956 }
|
f@0
|
6957 }
|
f@0
|
6958 else
|
f@0
|
6959 snd_ctl_close( chandle );
|
f@0
|
6960
|
f@0
|
6961 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
|
f@0
|
6962 if ( result < 0 ) {
|
f@0
|
6963 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
|
f@0
|
6964 errorText_ = errorStream_.str();
|
f@0
|
6965 error( RtAudioError::WARNING );
|
f@0
|
6966 if ( info.outputChannels == 0 ) return info;
|
f@0
|
6967 goto probeParameters;
|
f@0
|
6968 }
|
f@0
|
6969
|
f@0
|
6970 // The device is open ... fill the parameter structure.
|
f@0
|
6971 result = snd_pcm_hw_params_any( phandle, params );
|
f@0
|
6972 if ( result < 0 ) {
|
f@0
|
6973 snd_pcm_close( phandle );
|
f@0
|
6974 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
|
f@0
|
6975 errorText_ = errorStream_.str();
|
f@0
|
6976 error( RtAudioError::WARNING );
|
f@0
|
6977 if ( info.outputChannels == 0 ) return info;
|
f@0
|
6978 goto probeParameters;
|
f@0
|
6979 }
|
f@0
|
6980
|
f@0
|
6981 result = snd_pcm_hw_params_get_channels_max( params, &value );
|
f@0
|
6982 if ( result < 0 ) {
|
f@0
|
6983 snd_pcm_close( phandle );
|
f@0
|
6984 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
|
f@0
|
6985 errorText_ = errorStream_.str();
|
f@0
|
6986 error( RtAudioError::WARNING );
|
f@0
|
6987 if ( info.outputChannels == 0 ) return info;
|
f@0
|
6988 goto probeParameters;
|
f@0
|
6989 }
|
f@0
|
6990 info.inputChannels = value;
|
f@0
|
6991 snd_pcm_close( phandle );
|
f@0
|
6992
|
f@0
|
6993 // If device opens for both playback and capture, we determine the channels.
|
f@0
|
6994 if ( info.outputChannels > 0 && info.inputChannels > 0 )
|
f@0
|
6995 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
|
f@0
|
6996
|
f@0
|
6997 // ALSA doesn't provide default devices so we'll use the first available one.
|
f@0
|
6998 if ( device == 0 && info.outputChannels > 0 )
|
f@0
|
6999 info.isDefaultOutput = true;
|
f@0
|
7000 if ( device == 0 && info.inputChannels > 0 )
|
f@0
|
7001 info.isDefaultInput = true;
|
f@0
|
7002
|
f@0
|
7003 probeParameters:
|
f@0
|
7004 // At this point, we just need to figure out the supported data
|
f@0
|
7005 // formats and sample rates. We'll proceed by opening the device in
|
f@0
|
7006 // the direction with the maximum number of channels, or playback if
|
f@0
|
7007 // they are equal. This might limit our sample rate options, but so
|
f@0
|
7008 // be it.
|
f@0
|
7009
|
f@0
|
7010 if ( info.outputChannels >= info.inputChannels )
|
f@0
|
7011 stream = SND_PCM_STREAM_PLAYBACK;
|
f@0
|
7012 else
|
f@0
|
7013 stream = SND_PCM_STREAM_CAPTURE;
|
f@0
|
7014 snd_pcm_info_set_stream( pcminfo, stream );
|
f@0
|
7015
|
f@0
|
7016 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
|
f@0
|
7017 if ( result < 0 ) {
|
f@0
|
7018 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
|
f@0
|
7019 errorText_ = errorStream_.str();
|
f@0
|
7020 error( RtAudioError::WARNING );
|
f@0
|
7021 return info;
|
f@0
|
7022 }
|
f@0
|
7023
|
f@0
|
7024 // The device is open ... fill the parameter structure.
|
f@0
|
7025 result = snd_pcm_hw_params_any( phandle, params );
|
f@0
|
7026 if ( result < 0 ) {
|
f@0
|
7027 snd_pcm_close( phandle );
|
f@0
|
7028 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
|
f@0
|
7029 errorText_ = errorStream_.str();
|
f@0
|
7030 error( RtAudioError::WARNING );
|
f@0
|
7031 return info;
|
f@0
|
7032 }
|
f@0
|
7033
|
f@0
|
7034 // Test our discrete set of sample rate values.
|
f@0
|
7035 info.sampleRates.clear();
|
f@0
|
7036 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
|
f@0
|
7037 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
|
f@0
|
7038 info.sampleRates.push_back( SAMPLE_RATES[i] );
|
f@0
|
7039 }
|
f@0
|
7040 if ( info.sampleRates.size() == 0 ) {
|
f@0
|
7041 snd_pcm_close( phandle );
|
f@0
|
7042 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
|
f@0
|
7043 errorText_ = errorStream_.str();
|
f@0
|
7044 error( RtAudioError::WARNING );
|
f@0
|
7045 return info;
|
f@0
|
7046 }
|
f@0
|
7047
|
f@0
|
7048 // Probe the supported data formats ... we don't care about endian-ness just yet
|
f@0
|
7049 snd_pcm_format_t format;
|
f@0
|
7050 info.nativeFormats = 0;
|
f@0
|
7051 format = SND_PCM_FORMAT_S8;
|
f@0
|
7052 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
|
f@0
|
7053 info.nativeFormats |= RTAUDIO_SINT8;
|
f@0
|
7054 format = SND_PCM_FORMAT_S16;
|
f@0
|
7055 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
|
f@0
|
7056 info.nativeFormats |= RTAUDIO_SINT16;
|
f@0
|
7057 format = SND_PCM_FORMAT_S24;
|
f@0
|
7058 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
|
f@0
|
7059 info.nativeFormats |= RTAUDIO_SINT24;
|
f@0
|
7060 format = SND_PCM_FORMAT_S32;
|
f@0
|
7061 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
|
f@0
|
7062 info.nativeFormats |= RTAUDIO_SINT32;
|
f@0
|
7063 format = SND_PCM_FORMAT_FLOAT;
|
f@0
|
7064 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
|
f@0
|
7065 info.nativeFormats |= RTAUDIO_FLOAT32;
|
f@0
|
7066 format = SND_PCM_FORMAT_FLOAT64;
|
f@0
|
7067 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
|
f@0
|
7068 info.nativeFormats |= RTAUDIO_FLOAT64;
|
f@0
|
7069
|
f@0
|
7070 // Check that we have at least one supported format
|
f@0
|
7071 if ( info.nativeFormats == 0 ) {
|
f@0
|
7072 snd_pcm_close( phandle );
|
f@0
|
7073 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
|
f@0
|
7074 errorText_ = errorStream_.str();
|
f@0
|
7075 error( RtAudioError::WARNING );
|
f@0
|
7076 return info;
|
f@0
|
7077 }
|
f@0
|
7078
|
f@0
|
7079 // Get the device name
|
f@0
|
7080 char *cardname;
|
f@0
|
7081 result = snd_card_get_name( card, &cardname );
|
f@0
|
7082 if ( result >= 0 ) {
|
f@0
|
7083 sprintf( name, "hw:%s,%d", cardname, subdevice );
|
f@0
|
7084 free( cardname );
|
f@0
|
7085 }
|
f@0
|
7086 info.name = name;
|
f@0
|
7087
|
f@0
|
7088 // That's all ... close the device and return
|
f@0
|
7089 snd_pcm_close( phandle );
|
f@0
|
7090 info.probed = true;
|
f@0
|
7091 return info;
|
f@0
|
7092 }
|
f@0
|
7093
|
f@0
|
7094 void RtApiAlsa :: saveDeviceInfo( void )
|
f@0
|
7095 {
|
f@0
|
7096 devices_.clear();
|
f@0
|
7097
|
f@0
|
7098 unsigned int nDevices = getDeviceCount();
|
f@0
|
7099 devices_.resize( nDevices );
|
f@0
|
7100 for ( unsigned int i=0; i<nDevices; i++ )
|
f@0
|
7101 devices_[i] = getDeviceInfo( i );
|
f@0
|
7102 }
|
f@0
|
7103
|
f@0
|
7104 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
|
f@0
|
7105 unsigned int firstChannel, unsigned int sampleRate,
|
f@0
|
7106 RtAudioFormat format, unsigned int *bufferSize,
|
f@0
|
7107 RtAudio::StreamOptions *options )
|
f@0
|
7108
|
f@0
|
7109 {
|
f@0
|
7110 #if defined(__RTAUDIO_DEBUG__)
|
f@0
|
7111 snd_output_t *out;
|
f@0
|
7112 snd_output_stdio_attach(&out, stderr, 0);
|
f@0
|
7113 #endif
|
f@0
|
7114
|
f@0
|
7115 // I'm not using the "plug" interface ... too much inconsistent behavior.
|
f@0
|
7116
|
f@0
|
7117 unsigned nDevices = 0;
|
f@0
|
7118 int result, subdevice, card;
|
f@0
|
7119 char name[64];
|
f@0
|
7120 snd_ctl_t *chandle;
|
f@0
|
7121
|
f@0
|
7122 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
|
f@0
|
7123 snprintf(name, sizeof(name), "%s", "default");
|
f@0
|
7124 else {
|
f@0
|
7125 // Count cards and devices
|
f@0
|
7126 card = -1;
|
f@0
|
7127 snd_card_next( &card );
|
f@0
|
7128 while ( card >= 0 ) {
|
f@0
|
7129 sprintf( name, "hw:%d", card );
|
f@0
|
7130 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
|
f@0
|
7131 if ( result < 0 ) {
|
f@0
|
7132 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
|
f@0
|
7133 errorText_ = errorStream_.str();
|
f@0
|
7134 return FAILURE;
|
f@0
|
7135 }
|
f@0
|
7136 subdevice = -1;
|
f@0
|
7137 while( 1 ) {
|
f@0
|
7138 result = snd_ctl_pcm_next_device( chandle, &subdevice );
|
f@0
|
7139 if ( result < 0 ) break;
|
f@0
|
7140 if ( subdevice < 0 ) break;
|
f@0
|
7141 if ( nDevices == device ) {
|
f@0
|
7142 sprintf( name, "hw:%d,%d", card, subdevice );
|
f@0
|
7143 snd_ctl_close( chandle );
|
f@0
|
7144 goto foundDevice;
|
f@0
|
7145 }
|
f@0
|
7146 nDevices++;
|
f@0
|
7147 }
|
f@0
|
7148 snd_ctl_close( chandle );
|
f@0
|
7149 snd_card_next( &card );
|
f@0
|
7150 }
|
f@0
|
7151
|
f@0
|
7152 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
|
f@0
|
7153 if ( result == 0 ) {
|
f@0
|
7154 if ( nDevices == device ) {
|
f@0
|
7155 strcpy( name, "default" );
|
f@0
|
7156 goto foundDevice;
|
f@0
|
7157 }
|
f@0
|
7158 nDevices++;
|
f@0
|
7159 }
|
f@0
|
7160
|
f@0
|
7161 if ( nDevices == 0 ) {
|
f@0
|
7162 // This should not happen because a check is made before this function is called.
|
f@0
|
7163 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
|
f@0
|
7164 return FAILURE;
|
f@0
|
7165 }
|
f@0
|
7166
|
f@0
|
7167 if ( device >= nDevices ) {
|
f@0
|
7168 // This should not happen because a check is made before this function is called.
|
f@0
|
7169 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
|
f@0
|
7170 return FAILURE;
|
f@0
|
7171 }
|
f@0
|
7172 }
|
f@0
|
7173
|
f@0
|
7174 foundDevice:
|
f@0
|
7175
|
f@0
|
7176 // The getDeviceInfo() function will not work for a device that is
|
f@0
|
7177 // already open. Thus, we'll probe the system before opening a
|
f@0
|
7178 // stream and save the results for use by getDeviceInfo().
|
f@0
|
7179 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
|
f@0
|
7180 this->saveDeviceInfo();
|
f@0
|
7181
|
f@0
|
7182 snd_pcm_stream_t stream;
|
f@0
|
7183 if ( mode == OUTPUT )
|
f@0
|
7184 stream = SND_PCM_STREAM_PLAYBACK;
|
f@0
|
7185 else
|
f@0
|
7186 stream = SND_PCM_STREAM_CAPTURE;
|
f@0
|
7187
|
f@0
|
7188 snd_pcm_t *phandle;
|
f@0
|
7189 int openMode = SND_PCM_ASYNC;
|
f@0
|
7190 result = snd_pcm_open( &phandle, name, stream, openMode );
|
f@0
|
7191 if ( result < 0 ) {
|
f@0
|
7192 if ( mode == OUTPUT )
|
f@0
|
7193 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
|
f@0
|
7194 else
|
f@0
|
7195 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
|
f@0
|
7196 errorText_ = errorStream_.str();
|
f@0
|
7197 return FAILURE;
|
f@0
|
7198 }
|
f@0
|
7199
|
f@0
|
7200 // Fill the parameter structure.
|
f@0
|
7201 snd_pcm_hw_params_t *hw_params;
|
f@0
|
7202 snd_pcm_hw_params_alloca( &hw_params );
|
f@0
|
7203 result = snd_pcm_hw_params_any( phandle, hw_params );
|
f@0
|
7204 if ( result < 0 ) {
|
f@0
|
7205 snd_pcm_close( phandle );
|
f@0
|
7206 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
|
f@0
|
7207 errorText_ = errorStream_.str();
|
f@0
|
7208 return FAILURE;
|
f@0
|
7209 }
|
f@0
|
7210
|
f@0
|
7211 #if defined(__RTAUDIO_DEBUG__)
|
f@0
|
7212 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
|
f@0
|
7213 snd_pcm_hw_params_dump( hw_params, out );
|
f@0
|
7214 #endif
|
f@0
|
7215
|
f@0
|
7216 // Set access ... check user preference.
|
f@0
|
7217 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
|
f@0
|
7218 stream_.userInterleaved = false;
|
f@0
|
7219 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
|
f@0
|
7220 if ( result < 0 ) {
|
f@0
|
7221 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
|
f@0
|
7222 stream_.deviceInterleaved[mode] = true;
|
f@0
|
7223 }
|
f@0
|
7224 else
|
f@0
|
7225 stream_.deviceInterleaved[mode] = false;
|
f@0
|
7226 }
|
f@0
|
7227 else {
|
f@0
|
7228 stream_.userInterleaved = true;
|
f@0
|
7229 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
|
f@0
|
7230 if ( result < 0 ) {
|
f@0
|
7231 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
|
f@0
|
7232 stream_.deviceInterleaved[mode] = false;
|
f@0
|
7233 }
|
f@0
|
7234 else
|
f@0
|
7235 stream_.deviceInterleaved[mode] = true;
|
f@0
|
7236 }
|
f@0
|
7237
|
f@0
|
7238 if ( result < 0 ) {
|
f@0
|
7239 snd_pcm_close( phandle );
|
f@0
|
7240 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
|
f@0
|
7241 errorText_ = errorStream_.str();
|
f@0
|
7242 return FAILURE;
|
f@0
|
7243 }
|
f@0
|
7244
|
f@0
|
7245 // Determine how to set the device format.
|
f@0
|
7246 stream_.userFormat = format;
|
f@0
|
7247 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
|
f@0
|
7248
|
f@0
|
7249 if ( format == RTAUDIO_SINT8 )
|
f@0
|
7250 deviceFormat = SND_PCM_FORMAT_S8;
|
f@0
|
7251 else if ( format == RTAUDIO_SINT16 )
|
f@0
|
7252 deviceFormat = SND_PCM_FORMAT_S16;
|
f@0
|
7253 else if ( format == RTAUDIO_SINT24 )
|
f@0
|
7254 deviceFormat = SND_PCM_FORMAT_S24;
|
f@0
|
7255 else if ( format == RTAUDIO_SINT32 )
|
f@0
|
7256 deviceFormat = SND_PCM_FORMAT_S32;
|
f@0
|
7257 else if ( format == RTAUDIO_FLOAT32 )
|
f@0
|
7258 deviceFormat = SND_PCM_FORMAT_FLOAT;
|
f@0
|
7259 else if ( format == RTAUDIO_FLOAT64 )
|
f@0
|
7260 deviceFormat = SND_PCM_FORMAT_FLOAT64;
|
f@0
|
7261
|
f@0
|
7262 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
|
f@0
|
7263 stream_.deviceFormat[mode] = format;
|
f@0
|
7264 goto setFormat;
|
f@0
|
7265 }
|
f@0
|
7266
|
f@0
|
7267 // The user requested format is not natively supported by the device.
|
f@0
|
7268 deviceFormat = SND_PCM_FORMAT_FLOAT64;
|
f@0
|
7269 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
|
f@0
|
7270 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
|
f@0
|
7271 goto setFormat;
|
f@0
|
7272 }
|
f@0
|
7273
|
f@0
|
7274 deviceFormat = SND_PCM_FORMAT_FLOAT;
|
f@0
|
7275 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
|
f@0
|
7276 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
|
f@0
|
7277 goto setFormat;
|
f@0
|
7278 }
|
f@0
|
7279
|
f@0
|
7280 deviceFormat = SND_PCM_FORMAT_S32;
|
f@0
|
7281 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
|
f@0
|
7282 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
|
f@0
|
7283 goto setFormat;
|
f@0
|
7284 }
|
f@0
|
7285
|
f@0
|
7286 deviceFormat = SND_PCM_FORMAT_S24;
|
f@0
|
7287 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
|
f@0
|
7288 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
|
f@0
|
7289 goto setFormat;
|
f@0
|
7290 }
|
f@0
|
7291
|
f@0
|
7292 deviceFormat = SND_PCM_FORMAT_S16;
|
f@0
|
7293 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
|
f@0
|
7294 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
|
f@0
|
7295 goto setFormat;
|
f@0
|
7296 }
|
f@0
|
7297
|
f@0
|
7298 deviceFormat = SND_PCM_FORMAT_S8;
|
f@0
|
7299 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
|
f@0
|
7300 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
|
f@0
|
7301 goto setFormat;
|
f@0
|
7302 }
|
f@0
|
7303
|
f@0
|
7304 // If we get here, no supported format was found.
|
f@0
|
7305 snd_pcm_close( phandle );
|
f@0
|
7306 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
|
f@0
|
7307 errorText_ = errorStream_.str();
|
f@0
|
7308 return FAILURE;
|
f@0
|
7309
|
f@0
|
7310 setFormat:
|
f@0
|
7311 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
|
f@0
|
7312 if ( result < 0 ) {
|
f@0
|
7313 snd_pcm_close( phandle );
|
f@0
|
7314 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
|
f@0
|
7315 errorText_ = errorStream_.str();
|
f@0
|
7316 return FAILURE;
|
f@0
|
7317 }
|
f@0
|
7318
|
f@0
|
7319 // Determine whether byte-swaping is necessary.
|
f@0
|
7320 stream_.doByteSwap[mode] = false;
|
f@0
|
7321 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
|
f@0
|
7322 result = snd_pcm_format_cpu_endian( deviceFormat );
|
f@0
|
7323 if ( result == 0 )
|
f@0
|
7324 stream_.doByteSwap[mode] = true;
|
f@0
|
7325 else if (result < 0) {
|
f@0
|
7326 snd_pcm_close( phandle );
|
f@0
|
7327 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
|
f@0
|
7328 errorText_ = errorStream_.str();
|
f@0
|
7329 return FAILURE;
|
f@0
|
7330 }
|
f@0
|
7331 }
|
f@0
|
7332
|
f@0
|
7333 // Set the sample rate.
|
f@0
|
7334 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
|
f@0
|
7335 if ( result < 0 ) {
|
f@0
|
7336 snd_pcm_close( phandle );
|
f@0
|
7337 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
|
f@0
|
7338 errorText_ = errorStream_.str();
|
f@0
|
7339 return FAILURE;
|
f@0
|
7340 }
|
f@0
|
7341
|
f@0
|
7342 // Determine the number of channels for this device. We support a possible
|
f@0
|
7343 // minimum device channel number > than the value requested by the user.
|
f@0
|
7344 stream_.nUserChannels[mode] = channels;
|
f@0
|
7345 unsigned int value;
|
f@0
|
7346 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
|
f@0
|
7347 unsigned int deviceChannels = value;
|
f@0
|
7348 if ( result < 0 || deviceChannels < channels + firstChannel ) {
|
f@0
|
7349 snd_pcm_close( phandle );
|
f@0
|
7350 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
|
f@0
|
7351 errorText_ = errorStream_.str();
|
f@0
|
7352 return FAILURE;
|
f@0
|
7353 }
|
f@0
|
7354
|
f@0
|
7355 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
|
f@0
|
7356 if ( result < 0 ) {
|
f@0
|
7357 snd_pcm_close( phandle );
|
f@0
|
7358 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
|
f@0
|
7359 errorText_ = errorStream_.str();
|
f@0
|
7360 return FAILURE;
|
f@0
|
7361 }
|
f@0
|
7362 deviceChannels = value;
|
f@0
|
7363 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
|
f@0
|
7364 stream_.nDeviceChannels[mode] = deviceChannels;
|
f@0
|
7365
|
f@0
|
7366 // Set the device channels.
|
f@0
|
7367 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
|
f@0
|
7368 if ( result < 0 ) {
|
f@0
|
7369 snd_pcm_close( phandle );
|
f@0
|
7370 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
|
f@0
|
7371 errorText_ = errorStream_.str();
|
f@0
|
7372 return FAILURE;
|
f@0
|
7373 }
|
f@0
|
7374
|
f@0
|
7375 // Set the buffer (or period) size.
|
f@0
|
7376 int dir = 0;
|
f@0
|
7377 snd_pcm_uframes_t periodSize = *bufferSize;
|
f@0
|
7378 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
|
f@0
|
7379 if ( result < 0 ) {
|
f@0
|
7380 snd_pcm_close( phandle );
|
f@0
|
7381 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
|
f@0
|
7382 errorText_ = errorStream_.str();
|
f@0
|
7383 return FAILURE;
|
f@0
|
7384 }
|
f@0
|
7385 *bufferSize = periodSize;
|
f@0
|
7386
|
f@0
|
7387 // Set the buffer number, which in ALSA is referred to as the "period".
|
f@0
|
7388 unsigned int periods = 0;
|
f@0
|
7389 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
|
f@0
|
7390 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
|
f@0
|
7391 if ( periods < 2 ) periods = 4; // a fairly safe default value
|
f@0
|
7392 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
|
f@0
|
7393 if ( result < 0 ) {
|
f@0
|
7394 snd_pcm_close( phandle );
|
f@0
|
7395 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
|
f@0
|
7396 errorText_ = errorStream_.str();
|
f@0
|
7397 return FAILURE;
|
f@0
|
7398 }
|
f@0
|
7399
|
f@0
|
7400 // If attempting to setup a duplex stream, the bufferSize parameter
|
f@0
|
7401 // MUST be the same in both directions!
|
f@0
|
7402 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
|
f@0
|
7403 snd_pcm_close( phandle );
|
f@0
|
7404 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
|
f@0
|
7405 errorText_ = errorStream_.str();
|
f@0
|
7406 return FAILURE;
|
f@0
|
7407 }
|
f@0
|
7408
|
f@0
|
7409 stream_.bufferSize = *bufferSize;
|
f@0
|
7410
|
f@0
|
7411 // Install the hardware configuration
|
f@0
|
7412 result = snd_pcm_hw_params( phandle, hw_params );
|
f@0
|
7413 if ( result < 0 ) {
|
f@0
|
7414 snd_pcm_close( phandle );
|
f@0
|
7415 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
|
f@0
|
7416 errorText_ = errorStream_.str();
|
f@0
|
7417 return FAILURE;
|
f@0
|
7418 }
|
f@0
|
7419
|
f@0
|
7420 #if defined(__RTAUDIO_DEBUG__)
|
f@0
|
7421 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
|
f@0
|
7422 snd_pcm_hw_params_dump( hw_params, out );
|
f@0
|
7423 #endif
|
f@0
|
7424
|
f@0
|
7425 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
|
f@0
|
7426 snd_pcm_sw_params_t *sw_params = NULL;
|
f@0
|
7427 snd_pcm_sw_params_alloca( &sw_params );
|
f@0
|
7428 snd_pcm_sw_params_current( phandle, sw_params );
|
f@0
|
7429 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
|
f@0
|
7430 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
|
f@0
|
7431 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
|
f@0
|
7432
|
f@0
|
7433 // The following two settings were suggested by Theo Veenker
|
f@0
|
7434 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
|
f@0
|
7435 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
|
f@0
|
7436
|
f@0
|
7437 // here are two options for a fix
|
f@0
|
7438 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
|
f@0
|
7439 snd_pcm_uframes_t val;
|
f@0
|
7440 snd_pcm_sw_params_get_boundary( sw_params, &val );
|
f@0
|
7441 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
|
f@0
|
7442
|
f@0
|
7443 result = snd_pcm_sw_params( phandle, sw_params );
|
f@0
|
7444 if ( result < 0 ) {
|
f@0
|
7445 snd_pcm_close( phandle );
|
f@0
|
7446 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
|
f@0
|
7447 errorText_ = errorStream_.str();
|
f@0
|
7448 return FAILURE;
|
f@0
|
7449 }
|
f@0
|
7450
|
f@0
|
7451 #if defined(__RTAUDIO_DEBUG__)
|
f@0
|
7452 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
|
f@0
|
7453 snd_pcm_sw_params_dump( sw_params, out );
|
f@0
|
7454 #endif
|
f@0
|
7455
|
f@0
|
7456 // Set flags for buffer conversion
|
f@0
|
7457 stream_.doConvertBuffer[mode] = false;
|
f@0
|
7458 if ( stream_.userFormat != stream_.deviceFormat[mode] )
|
f@0
|
7459 stream_.doConvertBuffer[mode] = true;
|
f@0
|
7460 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
|
f@0
|
7461 stream_.doConvertBuffer[mode] = true;
|
f@0
|
7462 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
|
f@0
|
7463 stream_.nUserChannels[mode] > 1 )
|
f@0
|
7464 stream_.doConvertBuffer[mode] = true;
|
f@0
|
7465
|
f@0
|
7466 // Allocate the ApiHandle if necessary and then save.
|
f@0
|
7467 AlsaHandle *apiInfo = 0;
|
f@0
|
7468 if ( stream_.apiHandle == 0 ) {
|
f@0
|
7469 try {
|
f@0
|
7470 apiInfo = (AlsaHandle *) new AlsaHandle;
|
f@0
|
7471 }
|
f@0
|
7472 catch ( std::bad_alloc& ) {
|
f@0
|
7473 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
|
f@0
|
7474 goto error;
|
f@0
|
7475 }
|
f@0
|
7476
|
f@0
|
7477 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
|
f@0
|
7478 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
|
f@0
|
7479 goto error;
|
f@0
|
7480 }
|
f@0
|
7481
|
f@0
|
7482 stream_.apiHandle = (void *) apiInfo;
|
f@0
|
7483 apiInfo->handles[0] = 0;
|
f@0
|
7484 apiInfo->handles[1] = 0;
|
f@0
|
7485 }
|
f@0
|
7486 else {
|
f@0
|
7487 apiInfo = (AlsaHandle *) stream_.apiHandle;
|
f@0
|
7488 }
|
f@0
|
7489 apiInfo->handles[mode] = phandle;
|
f@0
|
7490 phandle = 0;
|
f@0
|
7491
|
f@0
|
7492 // Allocate necessary internal buffers.
|
f@0
|
7493 unsigned long bufferBytes;
|
f@0
|
7494 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
|
f@0
|
7495 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
|
f@0
|
7496 if ( stream_.userBuffer[mode] == NULL ) {
|
f@0
|
7497 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
|
f@0
|
7498 goto error;
|
f@0
|
7499 }
|
f@0
|
7500
|
f@0
|
7501 if ( stream_.doConvertBuffer[mode] ) {
|
f@0
|
7502
|
f@0
|
7503 bool makeBuffer = true;
|
f@0
|
7504 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
|
f@0
|
7505 if ( mode == INPUT ) {
|
f@0
|
7506 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
|
f@0
|
7507 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
|
f@0
|
7508 if ( bufferBytes <= bytesOut ) makeBuffer = false;
|
f@0
|
7509 }
|
f@0
|
7510 }
|
f@0
|
7511
|
f@0
|
7512 if ( makeBuffer ) {
|
f@0
|
7513 bufferBytes *= *bufferSize;
|
f@0
|
7514 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
|
f@0
|
7515 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
|
f@0
|
7516 if ( stream_.deviceBuffer == NULL ) {
|
f@0
|
7517 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
|
f@0
|
7518 goto error;
|
f@0
|
7519 }
|
f@0
|
7520 }
|
f@0
|
7521 }
|
f@0
|
7522
|
f@0
|
7523 stream_.sampleRate = sampleRate;
|
f@0
|
7524 stream_.nBuffers = periods;
|
f@0
|
7525 stream_.device[mode] = device;
|
f@0
|
7526 stream_.state = STREAM_STOPPED;
|
f@0
|
7527
|
f@0
|
7528 // Setup the buffer conversion information structure.
|
f@0
|
7529 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
|
f@0
|
7530
|
f@0
|
7531 // Setup thread if necessary.
|
f@0
|
7532 if ( stream_.mode == OUTPUT && mode == INPUT ) {
|
f@0
|
7533 // We had already set up an output stream.
|
f@0
|
7534 stream_.mode = DUPLEX;
|
f@0
|
7535 // Link the streams if possible.
|
f@0
|
7536 apiInfo->synchronized = false;
|
f@0
|
7537 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
|
f@0
|
7538 apiInfo->synchronized = true;
|
f@0
|
7539 else {
|
f@0
|
7540 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
|
f@0
|
7541 error( RtAudioError::WARNING );
|
f@0
|
7542 }
|
f@0
|
7543 }
|
f@0
|
7544 else {
|
f@0
|
7545 stream_.mode = mode;
|
f@0
|
7546
|
f@0
|
7547 // Setup callback thread.
|
f@0
|
7548 stream_.callbackInfo.object = (void *) this;
|
f@0
|
7549
|
f@0
|
7550 // Set the thread attributes for joinable and realtime scheduling
|
f@0
|
7551 // priority (optional). The higher priority will only take affect
|
f@0
|
7552 // if the program is run as root or suid. Note, under Linux
|
f@0
|
7553 // processes with CAP_SYS_NICE privilege, a user can change
|
f@0
|
7554 // scheduling policy and priority (thus need not be root). See
|
f@0
|
7555 // POSIX "capabilities".
|
f@0
|
7556 pthread_attr_t attr;
|
f@0
|
7557 pthread_attr_init( &attr );
|
f@0
|
7558 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
|
f@0
|
7559
|
f@0
|
7560 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
|
f@0
|
7561 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
|
f@0
|
7562 // We previously attempted to increase the audio callback priority
|
f@0
|
7563 // to SCHED_RR here via the attributes. However, while no errors
|
f@0
|
7564 // were reported in doing so, it did not work. So, now this is
|
f@0
|
7565 // done in the alsaCallbackHandler function.
|
f@0
|
7566 stream_.callbackInfo.doRealtime = true;
|
f@0
|
7567 int priority = options->priority;
|
f@0
|
7568 int min = sched_get_priority_min( SCHED_RR );
|
f@0
|
7569 int max = sched_get_priority_max( SCHED_RR );
|
f@0
|
7570 if ( priority < min ) priority = min;
|
f@0
|
7571 else if ( priority > max ) priority = max;
|
f@0
|
7572 stream_.callbackInfo.priority = priority;
|
f@0
|
7573 }
|
f@0
|
7574 #endif
|
f@0
|
7575
|
f@0
|
7576 stream_.callbackInfo.isRunning = true;
|
f@0
|
7577 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
|
f@0
|
7578 pthread_attr_destroy( &attr );
|
f@0
|
7579 if ( result ) {
|
f@0
|
7580 stream_.callbackInfo.isRunning = false;
|
f@0
|
7581 errorText_ = "RtApiAlsa::error creating callback thread!";
|
f@0
|
7582 goto error;
|
f@0
|
7583 }
|
f@0
|
7584 }
|
f@0
|
7585
|
f@0
|
7586 return SUCCESS;
|
f@0
|
7587
|
f@0
|
7588 error:
|
f@0
|
7589 if ( apiInfo ) {
|
f@0
|
7590 pthread_cond_destroy( &apiInfo->runnable_cv );
|
f@0
|
7591 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
|
f@0
|
7592 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
|
f@0
|
7593 delete apiInfo;
|
f@0
|
7594 stream_.apiHandle = 0;
|
f@0
|
7595 }
|
f@0
|
7596
|
f@0
|
7597 if ( phandle) snd_pcm_close( phandle );
|
f@0
|
7598
|
f@0
|
7599 for ( int i=0; i<2; i++ ) {
|
f@0
|
7600 if ( stream_.userBuffer[i] ) {
|
f@0
|
7601 free( stream_.userBuffer[i] );
|
f@0
|
7602 stream_.userBuffer[i] = 0;
|
f@0
|
7603 }
|
f@0
|
7604 }
|
f@0
|
7605
|
f@0
|
7606 if ( stream_.deviceBuffer ) {
|
f@0
|
7607 free( stream_.deviceBuffer );
|
f@0
|
7608 stream_.deviceBuffer = 0;
|
f@0
|
7609 }
|
f@0
|
7610
|
f@0
|
7611 stream_.state = STREAM_CLOSED;
|
f@0
|
7612 return FAILURE;
|
f@0
|
7613 }
|
f@0
|
7614
|
f@0
|
7615 void RtApiAlsa :: closeStream()
|
f@0
|
7616 {
|
f@0
|
7617 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
7618 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
|
f@0
|
7619 error( RtAudioError::WARNING );
|
f@0
|
7620 return;
|
f@0
|
7621 }
|
f@0
|
7622
|
f@0
|
7623 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
|
f@0
|
7624 stream_.callbackInfo.isRunning = false;
|
f@0
|
7625 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
7626 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
7627 apiInfo->runnable = true;
|
f@0
|
7628 pthread_cond_signal( &apiInfo->runnable_cv );
|
f@0
|
7629 }
|
f@0
|
7630 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
7631 pthread_join( stream_.callbackInfo.thread, NULL );
|
f@0
|
7632
|
f@0
|
7633 if ( stream_.state == STREAM_RUNNING ) {
|
f@0
|
7634 stream_.state = STREAM_STOPPED;
|
f@0
|
7635 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
|
f@0
|
7636 snd_pcm_drop( apiInfo->handles[0] );
|
f@0
|
7637 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
|
f@0
|
7638 snd_pcm_drop( apiInfo->handles[1] );
|
f@0
|
7639 }
|
f@0
|
7640
|
f@0
|
7641 if ( apiInfo ) {
|
f@0
|
7642 pthread_cond_destroy( &apiInfo->runnable_cv );
|
f@0
|
7643 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
|
f@0
|
7644 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
|
f@0
|
7645 delete apiInfo;
|
f@0
|
7646 stream_.apiHandle = 0;
|
f@0
|
7647 }
|
f@0
|
7648
|
f@0
|
7649 for ( int i=0; i<2; i++ ) {
|
f@0
|
7650 if ( stream_.userBuffer[i] ) {
|
f@0
|
7651 free( stream_.userBuffer[i] );
|
f@0
|
7652 stream_.userBuffer[i] = 0;
|
f@0
|
7653 }
|
f@0
|
7654 }
|
f@0
|
7655
|
f@0
|
7656 if ( stream_.deviceBuffer ) {
|
f@0
|
7657 free( stream_.deviceBuffer );
|
f@0
|
7658 stream_.deviceBuffer = 0;
|
f@0
|
7659 }
|
f@0
|
7660
|
f@0
|
7661 stream_.mode = UNINITIALIZED;
|
f@0
|
7662 stream_.state = STREAM_CLOSED;
|
f@0
|
7663 }
|
f@0
|
7664
|
f@0
|
7665 void RtApiAlsa :: startStream()
|
f@0
|
7666 {
|
f@0
|
7667 // This method calls snd_pcm_prepare if the device isn't already in that state.
|
f@0
|
7668
|
f@0
|
7669 verifyStream();
|
f@0
|
7670 if ( stream_.state == STREAM_RUNNING ) {
|
f@0
|
7671 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
|
f@0
|
7672 error( RtAudioError::WARNING );
|
f@0
|
7673 return;
|
f@0
|
7674 }
|
f@0
|
7675
|
f@0
|
7676 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
7677
|
f@0
|
7678 int result = 0;
|
f@0
|
7679 snd_pcm_state_t state;
|
f@0
|
7680 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
|
f@0
|
7681 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
|
f@0
|
7682 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
7683 state = snd_pcm_state( handle[0] );
|
f@0
|
7684 if ( state != SND_PCM_STATE_PREPARED ) {
|
f@0
|
7685 result = snd_pcm_prepare( handle[0] );
|
f@0
|
7686 if ( result < 0 ) {
|
f@0
|
7687 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
|
f@0
|
7688 errorText_ = errorStream_.str();
|
f@0
|
7689 goto unlock;
|
f@0
|
7690 }
|
f@0
|
7691 }
|
f@0
|
7692 }
|
f@0
|
7693
|
f@0
|
7694 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
|
f@0
|
7695 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
|
f@0
|
7696 state = snd_pcm_state( handle[1] );
|
f@0
|
7697 if ( state != SND_PCM_STATE_PREPARED ) {
|
f@0
|
7698 result = snd_pcm_prepare( handle[1] );
|
f@0
|
7699 if ( result < 0 ) {
|
f@0
|
7700 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
|
f@0
|
7701 errorText_ = errorStream_.str();
|
f@0
|
7702 goto unlock;
|
f@0
|
7703 }
|
f@0
|
7704 }
|
f@0
|
7705 }
|
f@0
|
7706
|
f@0
|
7707 stream_.state = STREAM_RUNNING;
|
f@0
|
7708
|
f@0
|
7709 unlock:
|
f@0
|
7710 apiInfo->runnable = true;
|
f@0
|
7711 pthread_cond_signal( &apiInfo->runnable_cv );
|
f@0
|
7712 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
7713
|
f@0
|
7714 if ( result >= 0 ) return;
|
f@0
|
7715 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
7716 }
|
f@0
|
7717
|
f@0
|
7718 void RtApiAlsa :: stopStream()
|
f@0
|
7719 {
|
f@0
|
7720 verifyStream();
|
f@0
|
7721 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
7722 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
|
f@0
|
7723 error( RtAudioError::WARNING );
|
f@0
|
7724 return;
|
f@0
|
7725 }
|
f@0
|
7726
|
f@0
|
7727 stream_.state = STREAM_STOPPED;
|
f@0
|
7728 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
7729
|
f@0
|
7730 int result = 0;
|
f@0
|
7731 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
|
f@0
|
7732 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
|
f@0
|
7733 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
7734 if ( apiInfo->synchronized )
|
f@0
|
7735 result = snd_pcm_drop( handle[0] );
|
f@0
|
7736 else
|
f@0
|
7737 result = snd_pcm_drain( handle[0] );
|
f@0
|
7738 if ( result < 0 ) {
|
f@0
|
7739 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
|
f@0
|
7740 errorText_ = errorStream_.str();
|
f@0
|
7741 goto unlock;
|
f@0
|
7742 }
|
f@0
|
7743 }
|
f@0
|
7744
|
f@0
|
7745 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
|
f@0
|
7746 result = snd_pcm_drop( handle[1] );
|
f@0
|
7747 if ( result < 0 ) {
|
f@0
|
7748 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
|
f@0
|
7749 errorText_ = errorStream_.str();
|
f@0
|
7750 goto unlock;
|
f@0
|
7751 }
|
f@0
|
7752 }
|
f@0
|
7753
|
f@0
|
7754 unlock:
|
f@0
|
7755 apiInfo->runnable = false; // fixes high CPU usage when stopped
|
f@0
|
7756 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
7757
|
f@0
|
7758 if ( result >= 0 ) return;
|
f@0
|
7759 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
7760 }
|
f@0
|
7761
|
f@0
|
7762 void RtApiAlsa :: abortStream()
|
f@0
|
7763 {
|
f@0
|
7764 verifyStream();
|
f@0
|
7765 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
7766 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
|
f@0
|
7767 error( RtAudioError::WARNING );
|
f@0
|
7768 return;
|
f@0
|
7769 }
|
f@0
|
7770
|
f@0
|
7771 stream_.state = STREAM_STOPPED;
|
f@0
|
7772 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
7773
|
f@0
|
7774 int result = 0;
|
f@0
|
7775 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
|
f@0
|
7776 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
|
f@0
|
7777 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
7778 result = snd_pcm_drop( handle[0] );
|
f@0
|
7779 if ( result < 0 ) {
|
f@0
|
7780 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
|
f@0
|
7781 errorText_ = errorStream_.str();
|
f@0
|
7782 goto unlock;
|
f@0
|
7783 }
|
f@0
|
7784 }
|
f@0
|
7785
|
f@0
|
7786 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
|
f@0
|
7787 result = snd_pcm_drop( handle[1] );
|
f@0
|
7788 if ( result < 0 ) {
|
f@0
|
7789 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
|
f@0
|
7790 errorText_ = errorStream_.str();
|
f@0
|
7791 goto unlock;
|
f@0
|
7792 }
|
f@0
|
7793 }
|
f@0
|
7794
|
f@0
|
7795 unlock:
|
f@0
|
7796 apiInfo->runnable = false; // fixes high CPU usage when stopped
|
f@0
|
7797 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
7798
|
f@0
|
7799 if ( result >= 0 ) return;
|
f@0
|
7800 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
7801 }
|
f@0
|
7802
|
f@0
|
7803 void RtApiAlsa :: callbackEvent()
|
f@0
|
7804 {
|
f@0
|
7805 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
|
f@0
|
7806 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
7807 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
7808 while ( !apiInfo->runnable )
|
f@0
|
7809 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
|
f@0
|
7810
|
f@0
|
7811 if ( stream_.state != STREAM_RUNNING ) {
|
f@0
|
7812 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
7813 return;
|
f@0
|
7814 }
|
f@0
|
7815 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
7816 }
|
f@0
|
7817
|
f@0
|
7818 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
7819 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
|
f@0
|
7820 error( RtAudioError::WARNING );
|
f@0
|
7821 return;
|
f@0
|
7822 }
|
f@0
|
7823
|
f@0
|
7824 int doStopStream = 0;
|
f@0
|
7825 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
|
f@0
|
7826 double streamTime = getStreamTime();
|
f@0
|
7827 RtAudioStreamStatus status = 0;
|
f@0
|
7828 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
|
f@0
|
7829 status |= RTAUDIO_OUTPUT_UNDERFLOW;
|
f@0
|
7830 apiInfo->xrun[0] = false;
|
f@0
|
7831 }
|
f@0
|
7832 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
|
f@0
|
7833 status |= RTAUDIO_INPUT_OVERFLOW;
|
f@0
|
7834 apiInfo->xrun[1] = false;
|
f@0
|
7835 }
|
f@0
|
7836 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
|
f@0
|
7837 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
|
f@0
|
7838
|
f@0
|
7839 if ( doStopStream == 2 ) {
|
f@0
|
7840 abortStream();
|
f@0
|
7841 return;
|
f@0
|
7842 }
|
f@0
|
7843
|
f@0
|
7844 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
7845
|
f@0
|
7846 // The state might change while waiting on a mutex.
|
f@0
|
7847 if ( stream_.state == STREAM_STOPPED ) goto unlock;
|
f@0
|
7848
|
f@0
|
7849 int result;
|
f@0
|
7850 char *buffer;
|
f@0
|
7851 int channels;
|
f@0
|
7852 snd_pcm_t **handle;
|
f@0
|
7853 snd_pcm_sframes_t frames;
|
f@0
|
7854 RtAudioFormat format;
|
f@0
|
7855 handle = (snd_pcm_t **) apiInfo->handles;
|
f@0
|
7856
|
f@0
|
7857 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
|
f@0
|
7858
|
f@0
|
7859 // Setup parameters.
|
f@0
|
7860 if ( stream_.doConvertBuffer[1] ) {
|
f@0
|
7861 buffer = stream_.deviceBuffer;
|
f@0
|
7862 channels = stream_.nDeviceChannels[1];
|
f@0
|
7863 format = stream_.deviceFormat[1];
|
f@0
|
7864 }
|
f@0
|
7865 else {
|
f@0
|
7866 buffer = stream_.userBuffer[1];
|
f@0
|
7867 channels = stream_.nUserChannels[1];
|
f@0
|
7868 format = stream_.userFormat;
|
f@0
|
7869 }
|
f@0
|
7870
|
f@0
|
7871 // Read samples from device in interleaved/non-interleaved format.
|
f@0
|
7872 if ( stream_.deviceInterleaved[1] )
|
f@0
|
7873 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
|
f@0
|
7874 else {
|
f@0
|
7875 void *bufs[channels];
|
f@0
|
7876 size_t offset = stream_.bufferSize * formatBytes( format );
|
f@0
|
7877 for ( int i=0; i<channels; i++ )
|
f@0
|
7878 bufs[i] = (void *) (buffer + (i * offset));
|
f@0
|
7879 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
|
f@0
|
7880 }
|
f@0
|
7881
|
f@0
|
7882 if ( result < (int) stream_.bufferSize ) {
|
f@0
|
7883 // Either an error or overrun occured.
|
f@0
|
7884 if ( result == -EPIPE ) {
|
f@0
|
7885 snd_pcm_state_t state = snd_pcm_state( handle[1] );
|
f@0
|
7886 if ( state == SND_PCM_STATE_XRUN ) {
|
f@0
|
7887 apiInfo->xrun[1] = true;
|
f@0
|
7888 result = snd_pcm_prepare( handle[1] );
|
f@0
|
7889 if ( result < 0 ) {
|
f@0
|
7890 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
|
f@0
|
7891 errorText_ = errorStream_.str();
|
f@0
|
7892 }
|
f@0
|
7893 }
|
f@0
|
7894 else {
|
f@0
|
7895 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
|
f@0
|
7896 errorText_ = errorStream_.str();
|
f@0
|
7897 }
|
f@0
|
7898 }
|
f@0
|
7899 else {
|
f@0
|
7900 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
|
f@0
|
7901 errorText_ = errorStream_.str();
|
f@0
|
7902 }
|
f@0
|
7903 error( RtAudioError::WARNING );
|
f@0
|
7904 goto tryOutput;
|
f@0
|
7905 }
|
f@0
|
7906
|
f@0
|
7907 // Do byte swapping if necessary.
|
f@0
|
7908 if ( stream_.doByteSwap[1] )
|
f@0
|
7909 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
|
f@0
|
7910
|
f@0
|
7911 // Do buffer conversion if necessary.
|
f@0
|
7912 if ( stream_.doConvertBuffer[1] )
|
f@0
|
7913 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
|
f@0
|
7914
|
f@0
|
7915 // Check stream latency
|
f@0
|
7916 result = snd_pcm_delay( handle[1], &frames );
|
f@0
|
7917 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
|
f@0
|
7918 }
|
f@0
|
7919
|
f@0
|
7920 tryOutput:
|
f@0
|
7921
|
f@0
|
7922 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
7923
|
f@0
|
7924 // Setup parameters and do buffer conversion if necessary.
|
f@0
|
7925 if ( stream_.doConvertBuffer[0] ) {
|
f@0
|
7926 buffer = stream_.deviceBuffer;
|
f@0
|
7927 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
|
f@0
|
7928 channels = stream_.nDeviceChannels[0];
|
f@0
|
7929 format = stream_.deviceFormat[0];
|
f@0
|
7930 }
|
f@0
|
7931 else {
|
f@0
|
7932 buffer = stream_.userBuffer[0];
|
f@0
|
7933 channels = stream_.nUserChannels[0];
|
f@0
|
7934 format = stream_.userFormat;
|
f@0
|
7935 }
|
f@0
|
7936
|
f@0
|
7937 // Do byte swapping if necessary.
|
f@0
|
7938 if ( stream_.doByteSwap[0] )
|
f@0
|
7939 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
|
f@0
|
7940
|
f@0
|
7941 // Write samples to device in interleaved/non-interleaved format.
|
f@0
|
7942 if ( stream_.deviceInterleaved[0] )
|
f@0
|
7943 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
|
f@0
|
7944 else {
|
f@0
|
7945 void *bufs[channels];
|
f@0
|
7946 size_t offset = stream_.bufferSize * formatBytes( format );
|
f@0
|
7947 for ( int i=0; i<channels; i++ )
|
f@0
|
7948 bufs[i] = (void *) (buffer + (i * offset));
|
f@0
|
7949 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
|
f@0
|
7950 }
|
f@0
|
7951
|
f@0
|
7952 if ( result < (int) stream_.bufferSize ) {
|
f@0
|
7953 // Either an error or underrun occured.
|
f@0
|
7954 if ( result == -EPIPE ) {
|
f@0
|
7955 snd_pcm_state_t state = snd_pcm_state( handle[0] );
|
f@0
|
7956 if ( state == SND_PCM_STATE_XRUN ) {
|
f@0
|
7957 apiInfo->xrun[0] = true;
|
f@0
|
7958 result = snd_pcm_prepare( handle[0] );
|
f@0
|
7959 if ( result < 0 ) {
|
f@0
|
7960 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
|
f@0
|
7961 errorText_ = errorStream_.str();
|
f@0
|
7962 }
|
f@0
|
7963 }
|
f@0
|
7964 else {
|
f@0
|
7965 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
|
f@0
|
7966 errorText_ = errorStream_.str();
|
f@0
|
7967 }
|
f@0
|
7968 }
|
f@0
|
7969 else {
|
f@0
|
7970 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
|
f@0
|
7971 errorText_ = errorStream_.str();
|
f@0
|
7972 }
|
f@0
|
7973 error( RtAudioError::WARNING );
|
f@0
|
7974 goto unlock;
|
f@0
|
7975 }
|
f@0
|
7976
|
f@0
|
7977 // Check stream latency
|
f@0
|
7978 result = snd_pcm_delay( handle[0], &frames );
|
f@0
|
7979 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
|
f@0
|
7980 }
|
f@0
|
7981
|
f@0
|
7982 unlock:
|
f@0
|
7983 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
7984
|
f@0
|
7985 RtApi::tickStreamTime();
|
f@0
|
7986 if ( doStopStream == 1 ) this->stopStream();
|
f@0
|
7987 }
|
f@0
|
7988
|
f@0
|
7989 static void *alsaCallbackHandler( void *ptr )
|
f@0
|
7990 {
|
f@0
|
7991 CallbackInfo *info = (CallbackInfo *) ptr;
|
f@0
|
7992 RtApiAlsa *object = (RtApiAlsa *) info->object;
|
f@0
|
7993 bool *isRunning = &info->isRunning;
|
f@0
|
7994
|
f@0
|
7995 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
|
f@0
|
7996 if ( &info->doRealtime ) {
|
f@0
|
7997 pthread_t tID = pthread_self(); // ID of this thread
|
f@0
|
7998 sched_param prio = { info->priority }; // scheduling priority of thread
|
f@0
|
7999 pthread_setschedparam( tID, SCHED_RR, &prio );
|
f@0
|
8000 }
|
f@0
|
8001 #endif
|
f@0
|
8002
|
f@0
|
8003 while ( *isRunning == true ) {
|
f@0
|
8004 pthread_testcancel();
|
f@0
|
8005 object->callbackEvent();
|
f@0
|
8006 }
|
f@0
|
8007
|
f@0
|
8008 pthread_exit( NULL );
|
f@0
|
8009 }
|
f@0
|
8010
|
f@0
|
8011 //******************** End of __LINUX_ALSA__ *********************//
|
f@0
|
8012 #endif
|
f@0
|
8013
|
f@0
|
8014 #if defined(__LINUX_PULSE__)
|
f@0
|
8015
|
f@0
|
8016 // Code written by Peter Meerwald, pmeerw@pmeerw.net
|
f@0
|
8017 // and Tristan Matthews.
|
f@0
|
8018
|
f@0
|
8019 #include <pulse/error.h>
|
f@0
|
8020 #include <pulse/simple.h>
|
f@0
|
8021 #include <cstdio>
|
f@0
|
8022
|
f@0
|
8023 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
|
f@0
|
8024 44100, 48000, 96000, 0};
|
f@0
|
8025
|
f@0
|
8026 struct rtaudio_pa_format_mapping_t {
|
f@0
|
8027 RtAudioFormat rtaudio_format;
|
f@0
|
8028 pa_sample_format_t pa_format;
|
f@0
|
8029 };
|
f@0
|
8030
|
f@0
|
8031 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
|
f@0
|
8032 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
|
f@0
|
8033 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
|
f@0
|
8034 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
|
f@0
|
8035 {0, PA_SAMPLE_INVALID}};
|
f@0
|
8036
|
f@0
|
8037 struct PulseAudioHandle {
|
f@0
|
8038 pa_simple *s_play;
|
f@0
|
8039 pa_simple *s_rec;
|
f@0
|
8040 pthread_t thread;
|
f@0
|
8041 pthread_cond_t runnable_cv;
|
f@0
|
8042 bool runnable;
|
f@0
|
8043 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
|
f@0
|
8044 };
|
f@0
|
8045
|
f@0
|
8046 RtApiPulse::~RtApiPulse()
|
f@0
|
8047 {
|
f@0
|
8048 if ( stream_.state != STREAM_CLOSED )
|
f@0
|
8049 closeStream();
|
f@0
|
8050 }
|
f@0
|
8051
|
f@0
|
8052 unsigned int RtApiPulse::getDeviceCount( void )
|
f@0
|
8053 {
|
f@0
|
8054 return 1;
|
f@0
|
8055 }
|
f@0
|
8056
|
f@0
|
8057 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
|
f@0
|
8058 {
|
f@0
|
8059 RtAudio::DeviceInfo info;
|
f@0
|
8060 info.probed = true;
|
f@0
|
8061 info.name = "PulseAudio";
|
f@0
|
8062 info.outputChannels = 2;
|
f@0
|
8063 info.inputChannels = 2;
|
f@0
|
8064 info.duplexChannels = 2;
|
f@0
|
8065 info.isDefaultOutput = true;
|
f@0
|
8066 info.isDefaultInput = true;
|
f@0
|
8067
|
f@0
|
8068 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
|
f@0
|
8069 info.sampleRates.push_back( *sr );
|
f@0
|
8070
|
f@0
|
8071 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
|
f@0
|
8072
|
f@0
|
8073 return info;
|
f@0
|
8074 }
|
f@0
|
8075
|
f@0
|
8076 static void *pulseaudio_callback( void * user )
|
f@0
|
8077 {
|
f@0
|
8078 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
|
f@0
|
8079 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
|
f@0
|
8080 volatile bool *isRunning = &cbi->isRunning;
|
f@0
|
8081
|
f@0
|
8082 while ( *isRunning ) {
|
f@0
|
8083 pthread_testcancel();
|
f@0
|
8084 context->callbackEvent();
|
f@0
|
8085 }
|
f@0
|
8086
|
f@0
|
8087 pthread_exit( NULL );
|
f@0
|
8088 }
|
f@0
|
8089
|
f@0
|
8090 void RtApiPulse::closeStream( void )
|
f@0
|
8091 {
|
f@0
|
8092 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
|
f@0
|
8093
|
f@0
|
8094 stream_.callbackInfo.isRunning = false;
|
f@0
|
8095 if ( pah ) {
|
f@0
|
8096 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
8097 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
8098 pah->runnable = true;
|
f@0
|
8099 pthread_cond_signal( &pah->runnable_cv );
|
f@0
|
8100 }
|
f@0
|
8101 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
8102
|
f@0
|
8103 pthread_join( pah->thread, 0 );
|
f@0
|
8104 if ( pah->s_play ) {
|
f@0
|
8105 pa_simple_flush( pah->s_play, NULL );
|
f@0
|
8106 pa_simple_free( pah->s_play );
|
f@0
|
8107 }
|
f@0
|
8108 if ( pah->s_rec )
|
f@0
|
8109 pa_simple_free( pah->s_rec );
|
f@0
|
8110
|
f@0
|
8111 pthread_cond_destroy( &pah->runnable_cv );
|
f@0
|
8112 delete pah;
|
f@0
|
8113 stream_.apiHandle = 0;
|
f@0
|
8114 }
|
f@0
|
8115
|
f@0
|
8116 if ( stream_.userBuffer[0] ) {
|
f@0
|
8117 free( stream_.userBuffer[0] );
|
f@0
|
8118 stream_.userBuffer[0] = 0;
|
f@0
|
8119 }
|
f@0
|
8120 if ( stream_.userBuffer[1] ) {
|
f@0
|
8121 free( stream_.userBuffer[1] );
|
f@0
|
8122 stream_.userBuffer[1] = 0;
|
f@0
|
8123 }
|
f@0
|
8124
|
f@0
|
8125 stream_.state = STREAM_CLOSED;
|
f@0
|
8126 stream_.mode = UNINITIALIZED;
|
f@0
|
8127 }
|
f@0
|
8128
|
f@0
|
8129 void RtApiPulse::callbackEvent( void )
|
f@0
|
8130 {
|
f@0
|
8131 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
|
f@0
|
8132
|
f@0
|
8133 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
8134 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
8135 while ( !pah->runnable )
|
f@0
|
8136 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
|
f@0
|
8137
|
f@0
|
8138 if ( stream_.state != STREAM_RUNNING ) {
|
f@0
|
8139 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
8140 return;
|
f@0
|
8141 }
|
f@0
|
8142 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
8143 }
|
f@0
|
8144
|
f@0
|
8145 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
8146 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
|
f@0
|
8147 "this shouldn't happen!";
|
f@0
|
8148 error( RtAudioError::WARNING );
|
f@0
|
8149 return;
|
f@0
|
8150 }
|
f@0
|
8151
|
f@0
|
8152 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
|
f@0
|
8153 double streamTime = getStreamTime();
|
f@0
|
8154 RtAudioStreamStatus status = 0;
|
f@0
|
8155 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
|
f@0
|
8156 stream_.bufferSize, streamTime, status,
|
f@0
|
8157 stream_.callbackInfo.userData );
|
f@0
|
8158
|
f@0
|
8159 if ( doStopStream == 2 ) {
|
f@0
|
8160 abortStream();
|
f@0
|
8161 return;
|
f@0
|
8162 }
|
f@0
|
8163
|
f@0
|
8164 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
8165 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
|
f@0
|
8166 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
|
f@0
|
8167
|
f@0
|
8168 if ( stream_.state != STREAM_RUNNING )
|
f@0
|
8169 goto unlock;
|
f@0
|
8170
|
f@0
|
8171 int pa_error;
|
f@0
|
8172 size_t bytes;
|
f@0
|
8173 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
8174 if ( stream_.doConvertBuffer[OUTPUT] ) {
|
f@0
|
8175 convertBuffer( stream_.deviceBuffer,
|
f@0
|
8176 stream_.userBuffer[OUTPUT],
|
f@0
|
8177 stream_.convertInfo[OUTPUT] );
|
f@0
|
8178 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
|
f@0
|
8179 formatBytes( stream_.deviceFormat[OUTPUT] );
|
f@0
|
8180 } else
|
f@0
|
8181 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
|
f@0
|
8182 formatBytes( stream_.userFormat );
|
f@0
|
8183
|
f@0
|
8184 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
|
f@0
|
8185 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
|
f@0
|
8186 pa_strerror( pa_error ) << ".";
|
f@0
|
8187 errorText_ = errorStream_.str();
|
f@0
|
8188 error( RtAudioError::WARNING );
|
f@0
|
8189 }
|
f@0
|
8190 }
|
f@0
|
8191
|
f@0
|
8192 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
|
f@0
|
8193 if ( stream_.doConvertBuffer[INPUT] )
|
f@0
|
8194 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
|
f@0
|
8195 formatBytes( stream_.deviceFormat[INPUT] );
|
f@0
|
8196 else
|
f@0
|
8197 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
|
f@0
|
8198 formatBytes( stream_.userFormat );
|
f@0
|
8199
|
f@0
|
8200 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
|
f@0
|
8201 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
|
f@0
|
8202 pa_strerror( pa_error ) << ".";
|
f@0
|
8203 errorText_ = errorStream_.str();
|
f@0
|
8204 error( RtAudioError::WARNING );
|
f@0
|
8205 }
|
f@0
|
8206 if ( stream_.doConvertBuffer[INPUT] ) {
|
f@0
|
8207 convertBuffer( stream_.userBuffer[INPUT],
|
f@0
|
8208 stream_.deviceBuffer,
|
f@0
|
8209 stream_.convertInfo[INPUT] );
|
f@0
|
8210 }
|
f@0
|
8211 }
|
f@0
|
8212
|
f@0
|
8213 unlock:
|
f@0
|
8214 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
8215 RtApi::tickStreamTime();
|
f@0
|
8216
|
f@0
|
8217 if ( doStopStream == 1 )
|
f@0
|
8218 stopStream();
|
f@0
|
8219 }
|
f@0
|
8220
|
f@0
|
8221 void RtApiPulse::startStream( void )
|
f@0
|
8222 {
|
f@0
|
8223 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
|
f@0
|
8224
|
f@0
|
8225 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
8226 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
|
f@0
|
8227 error( RtAudioError::INVALID_USE );
|
f@0
|
8228 return;
|
f@0
|
8229 }
|
f@0
|
8230 if ( stream_.state == STREAM_RUNNING ) {
|
f@0
|
8231 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
|
f@0
|
8232 error( RtAudioError::WARNING );
|
f@0
|
8233 return;
|
f@0
|
8234 }
|
f@0
|
8235
|
f@0
|
8236 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
8237
|
f@0
|
8238 stream_.state = STREAM_RUNNING;
|
f@0
|
8239
|
f@0
|
8240 pah->runnable = true;
|
f@0
|
8241 pthread_cond_signal( &pah->runnable_cv );
|
f@0
|
8242 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
8243 }
|
f@0
|
8244
|
f@0
|
8245 void RtApiPulse::stopStream( void )
|
f@0
|
8246 {
|
f@0
|
8247 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
|
f@0
|
8248
|
f@0
|
8249 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
8250 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
|
f@0
|
8251 error( RtAudioError::INVALID_USE );
|
f@0
|
8252 return;
|
f@0
|
8253 }
|
f@0
|
8254 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
8255 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
|
f@0
|
8256 error( RtAudioError::WARNING );
|
f@0
|
8257 return;
|
f@0
|
8258 }
|
f@0
|
8259
|
f@0
|
8260 stream_.state = STREAM_STOPPED;
|
f@0
|
8261 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
8262
|
f@0
|
8263 if ( pah && pah->s_play ) {
|
f@0
|
8264 int pa_error;
|
f@0
|
8265 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
|
f@0
|
8266 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
|
f@0
|
8267 pa_strerror( pa_error ) << ".";
|
f@0
|
8268 errorText_ = errorStream_.str();
|
f@0
|
8269 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
8270 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
8271 return;
|
f@0
|
8272 }
|
f@0
|
8273 }
|
f@0
|
8274
|
f@0
|
8275 stream_.state = STREAM_STOPPED;
|
f@0
|
8276 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
8277 }
|
f@0
|
8278
|
f@0
|
8279 void RtApiPulse::abortStream( void )
|
f@0
|
8280 {
|
f@0
|
8281 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
|
f@0
|
8282
|
f@0
|
8283 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
8284 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
|
f@0
|
8285 error( RtAudioError::INVALID_USE );
|
f@0
|
8286 return;
|
f@0
|
8287 }
|
f@0
|
8288 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
8289 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
|
f@0
|
8290 error( RtAudioError::WARNING );
|
f@0
|
8291 return;
|
f@0
|
8292 }
|
f@0
|
8293
|
f@0
|
8294 stream_.state = STREAM_STOPPED;
|
f@0
|
8295 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
8296
|
f@0
|
8297 if ( pah && pah->s_play ) {
|
f@0
|
8298 int pa_error;
|
f@0
|
8299 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
|
f@0
|
8300 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
|
f@0
|
8301 pa_strerror( pa_error ) << ".";
|
f@0
|
8302 errorText_ = errorStream_.str();
|
f@0
|
8303 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
8304 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
8305 return;
|
f@0
|
8306 }
|
f@0
|
8307 }
|
f@0
|
8308
|
f@0
|
8309 stream_.state = STREAM_STOPPED;
|
f@0
|
8310 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
8311 }
|
f@0
|
8312
|
f@0
|
8313 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
|
f@0
|
8314 unsigned int channels, unsigned int firstChannel,
|
f@0
|
8315 unsigned int sampleRate, RtAudioFormat format,
|
f@0
|
8316 unsigned int *bufferSize, RtAudio::StreamOptions *options )
|
f@0
|
8317 {
|
f@0
|
8318 PulseAudioHandle *pah = 0;
|
f@0
|
8319 unsigned long bufferBytes = 0;
|
f@0
|
8320 pa_sample_spec ss;
|
f@0
|
8321
|
f@0
|
8322 if ( device != 0 ) return false;
|
f@0
|
8323 if ( mode != INPUT && mode != OUTPUT ) return false;
|
f@0
|
8324 if ( channels != 1 && channels != 2 ) {
|
f@0
|
8325 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
|
f@0
|
8326 return false;
|
f@0
|
8327 }
|
f@0
|
8328 ss.channels = channels;
|
f@0
|
8329
|
f@0
|
8330 if ( firstChannel != 0 ) return false;
|
f@0
|
8331
|
f@0
|
8332 bool sr_found = false;
|
f@0
|
8333 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
|
f@0
|
8334 if ( sampleRate == *sr ) {
|
f@0
|
8335 sr_found = true;
|
f@0
|
8336 stream_.sampleRate = sampleRate;
|
f@0
|
8337 ss.rate = sampleRate;
|
f@0
|
8338 break;
|
f@0
|
8339 }
|
f@0
|
8340 }
|
f@0
|
8341 if ( !sr_found ) {
|
f@0
|
8342 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
|
f@0
|
8343 return false;
|
f@0
|
8344 }
|
f@0
|
8345
|
f@0
|
8346 bool sf_found = 0;
|
f@0
|
8347 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
|
f@0
|
8348 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
|
f@0
|
8349 if ( format == sf->rtaudio_format ) {
|
f@0
|
8350 sf_found = true;
|
f@0
|
8351 stream_.userFormat = sf->rtaudio_format;
|
f@0
|
8352 stream_.deviceFormat[mode] = stream_.userFormat;
|
f@0
|
8353 ss.format = sf->pa_format;
|
f@0
|
8354 break;
|
f@0
|
8355 }
|
f@0
|
8356 }
|
f@0
|
8357 if ( !sf_found ) { // Use internal data format conversion.
|
f@0
|
8358 stream_.userFormat = format;
|
f@0
|
8359 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
|
f@0
|
8360 ss.format = PA_SAMPLE_FLOAT32LE;
|
f@0
|
8361 }
|
f@0
|
8362
|
f@0
|
8363 // Set other stream parameters.
|
f@0
|
8364 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
|
f@0
|
8365 else stream_.userInterleaved = true;
|
f@0
|
8366 stream_.deviceInterleaved[mode] = true;
|
f@0
|
8367 stream_.nBuffers = 1;
|
f@0
|
8368 stream_.doByteSwap[mode] = false;
|
f@0
|
8369 stream_.nUserChannels[mode] = channels;
|
f@0
|
8370 stream_.nDeviceChannels[mode] = channels + firstChannel;
|
f@0
|
8371 stream_.channelOffset[mode] = 0;
|
f@0
|
8372 std::string streamName = "RtAudio";
|
f@0
|
8373
|
f@0
|
8374 // Set flags for buffer conversion.
|
f@0
|
8375 stream_.doConvertBuffer[mode] = false;
|
f@0
|
8376 if ( stream_.userFormat != stream_.deviceFormat[mode] )
|
f@0
|
8377 stream_.doConvertBuffer[mode] = true;
|
f@0
|
8378 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
|
f@0
|
8379 stream_.doConvertBuffer[mode] = true;
|
f@0
|
8380
|
f@0
|
8381 // Allocate necessary internal buffers.
|
f@0
|
8382 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
|
f@0
|
8383 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
|
f@0
|
8384 if ( stream_.userBuffer[mode] == NULL ) {
|
f@0
|
8385 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
|
f@0
|
8386 goto error;
|
f@0
|
8387 }
|
f@0
|
8388 stream_.bufferSize = *bufferSize;
|
f@0
|
8389
|
f@0
|
8390 if ( stream_.doConvertBuffer[mode] ) {
|
f@0
|
8391
|
f@0
|
8392 bool makeBuffer = true;
|
f@0
|
8393 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
|
f@0
|
8394 if ( mode == INPUT ) {
|
f@0
|
8395 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
|
f@0
|
8396 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
|
f@0
|
8397 if ( bufferBytes <= bytesOut ) makeBuffer = false;
|
f@0
|
8398 }
|
f@0
|
8399 }
|
f@0
|
8400
|
f@0
|
8401 if ( makeBuffer ) {
|
f@0
|
8402 bufferBytes *= *bufferSize;
|
f@0
|
8403 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
|
f@0
|
8404 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
|
f@0
|
8405 if ( stream_.deviceBuffer == NULL ) {
|
f@0
|
8406 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
|
f@0
|
8407 goto error;
|
f@0
|
8408 }
|
f@0
|
8409 }
|
f@0
|
8410 }
|
f@0
|
8411
|
f@0
|
8412 stream_.device[mode] = device;
|
f@0
|
8413
|
f@0
|
8414 // Setup the buffer conversion information structure.
|
f@0
|
8415 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
|
f@0
|
8416
|
f@0
|
8417 if ( !stream_.apiHandle ) {
|
f@0
|
8418 PulseAudioHandle *pah = new PulseAudioHandle;
|
f@0
|
8419 if ( !pah ) {
|
f@0
|
8420 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
|
f@0
|
8421 goto error;
|
f@0
|
8422 }
|
f@0
|
8423
|
f@0
|
8424 stream_.apiHandle = pah;
|
f@0
|
8425 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
|
f@0
|
8426 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
|
f@0
|
8427 goto error;
|
f@0
|
8428 }
|
f@0
|
8429 }
|
f@0
|
8430 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
|
f@0
|
8431
|
f@0
|
8432 int error;
|
f@0
|
8433 if ( !options->streamName.empty() ) streamName = options->streamName;
|
f@0
|
8434 switch ( mode ) {
|
f@0
|
8435 case INPUT:
|
f@0
|
8436 pa_buffer_attr buffer_attr;
|
f@0
|
8437 buffer_attr.fragsize = bufferBytes;
|
f@0
|
8438 buffer_attr.maxlength = -1;
|
f@0
|
8439
|
f@0
|
8440 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
|
f@0
|
8441 if ( !pah->s_rec ) {
|
f@0
|
8442 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
|
f@0
|
8443 goto error;
|
f@0
|
8444 }
|
f@0
|
8445 break;
|
f@0
|
8446 case OUTPUT:
|
f@0
|
8447 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
|
f@0
|
8448 if ( !pah->s_play ) {
|
f@0
|
8449 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
|
f@0
|
8450 goto error;
|
f@0
|
8451 }
|
f@0
|
8452 break;
|
f@0
|
8453 default:
|
f@0
|
8454 goto error;
|
f@0
|
8455 }
|
f@0
|
8456
|
f@0
|
8457 if ( stream_.mode == UNINITIALIZED )
|
f@0
|
8458 stream_.mode = mode;
|
f@0
|
8459 else if ( stream_.mode == mode )
|
f@0
|
8460 goto error;
|
f@0
|
8461 else
|
f@0
|
8462 stream_.mode = DUPLEX;
|
f@0
|
8463
|
f@0
|
8464 if ( !stream_.callbackInfo.isRunning ) {
|
f@0
|
8465 stream_.callbackInfo.object = this;
|
f@0
|
8466 stream_.callbackInfo.isRunning = true;
|
f@0
|
8467 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
|
f@0
|
8468 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
|
f@0
|
8469 goto error;
|
f@0
|
8470 }
|
f@0
|
8471 }
|
f@0
|
8472
|
f@0
|
8473 stream_.state = STREAM_STOPPED;
|
f@0
|
8474 return true;
|
f@0
|
8475
|
f@0
|
8476 error:
|
f@0
|
8477 if ( pah && stream_.callbackInfo.isRunning ) {
|
f@0
|
8478 pthread_cond_destroy( &pah->runnable_cv );
|
f@0
|
8479 delete pah;
|
f@0
|
8480 stream_.apiHandle = 0;
|
f@0
|
8481 }
|
f@0
|
8482
|
f@0
|
8483 for ( int i=0; i<2; i++ ) {
|
f@0
|
8484 if ( stream_.userBuffer[i] ) {
|
f@0
|
8485 free( stream_.userBuffer[i] );
|
f@0
|
8486 stream_.userBuffer[i] = 0;
|
f@0
|
8487 }
|
f@0
|
8488 }
|
f@0
|
8489
|
f@0
|
8490 if ( stream_.deviceBuffer ) {
|
f@0
|
8491 free( stream_.deviceBuffer );
|
f@0
|
8492 stream_.deviceBuffer = 0;
|
f@0
|
8493 }
|
f@0
|
8494
|
f@0
|
8495 return FAILURE;
|
f@0
|
8496 }
|
f@0
|
8497
|
f@0
|
8498 //******************** End of __LINUX_PULSE__ *********************//
|
f@0
|
8499 #endif
|
f@0
|
8500
|
f@0
|
8501 #if defined(__LINUX_OSS__)
|
f@0
|
8502
|
f@0
|
8503 #include <unistd.h>
|
f@0
|
8504 #include <sys/ioctl.h>
|
f@0
|
8505 #include <unistd.h>
|
f@0
|
8506 #include <fcntl.h>
|
f@0
|
8507 #include <sys/soundcard.h>
|
f@0
|
8508 #include <errno.h>
|
f@0
|
8509 #include <math.h>
|
f@0
|
8510
|
f@0
|
8511 static void *ossCallbackHandler(void * ptr);
|
f@0
|
8512
|
f@0
|
8513 // A structure to hold various information related to the OSS API
|
f@0
|
8514 // implementation.
|
f@0
|
8515 struct OssHandle {
|
f@0
|
8516 int id[2]; // device ids
|
f@0
|
8517 bool xrun[2];
|
f@0
|
8518 bool triggered;
|
f@0
|
8519 pthread_cond_t runnable;
|
f@0
|
8520
|
f@0
|
8521 OssHandle()
|
f@0
|
8522 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
|
f@0
|
8523 };
|
f@0
|
8524
|
f@0
|
8525 RtApiOss :: RtApiOss()
|
f@0
|
8526 {
|
f@0
|
8527 // Nothing to do here.
|
f@0
|
8528 }
|
f@0
|
8529
|
f@0
|
8530 RtApiOss :: ~RtApiOss()
|
f@0
|
8531 {
|
f@0
|
8532 if ( stream_.state != STREAM_CLOSED ) closeStream();
|
f@0
|
8533 }
|
f@0
|
8534
|
f@0
|
8535 unsigned int RtApiOss :: getDeviceCount( void )
|
f@0
|
8536 {
|
f@0
|
8537 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
|
f@0
|
8538 if ( mixerfd == -1 ) {
|
f@0
|
8539 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
|
f@0
|
8540 error( RtAudioError::WARNING );
|
f@0
|
8541 return 0;
|
f@0
|
8542 }
|
f@0
|
8543
|
f@0
|
8544 oss_sysinfo sysinfo;
|
f@0
|
8545 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
|
f@0
|
8546 close( mixerfd );
|
f@0
|
8547 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
|
f@0
|
8548 error( RtAudioError::WARNING );
|
f@0
|
8549 return 0;
|
f@0
|
8550 }
|
f@0
|
8551
|
f@0
|
8552 close( mixerfd );
|
f@0
|
8553 return sysinfo.numaudios;
|
f@0
|
8554 }
|
f@0
|
8555
|
f@0
|
8556 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
|
f@0
|
8557 {
|
f@0
|
8558 RtAudio::DeviceInfo info;
|
f@0
|
8559 info.probed = false;
|
f@0
|
8560
|
f@0
|
8561 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
|
f@0
|
8562 if ( mixerfd == -1 ) {
|
f@0
|
8563 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
|
f@0
|
8564 error( RtAudioError::WARNING );
|
f@0
|
8565 return info;
|
f@0
|
8566 }
|
f@0
|
8567
|
f@0
|
8568 oss_sysinfo sysinfo;
|
f@0
|
8569 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
|
f@0
|
8570 if ( result == -1 ) {
|
f@0
|
8571 close( mixerfd );
|
f@0
|
8572 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
|
f@0
|
8573 error( RtAudioError::WARNING );
|
f@0
|
8574 return info;
|
f@0
|
8575 }
|
f@0
|
8576
|
f@0
|
8577 unsigned nDevices = sysinfo.numaudios;
|
f@0
|
8578 if ( nDevices == 0 ) {
|
f@0
|
8579 close( mixerfd );
|
f@0
|
8580 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
|
f@0
|
8581 error( RtAudioError::INVALID_USE );
|
f@0
|
8582 return info;
|
f@0
|
8583 }
|
f@0
|
8584
|
f@0
|
8585 if ( device >= nDevices ) {
|
f@0
|
8586 close( mixerfd );
|
f@0
|
8587 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
|
f@0
|
8588 error( RtAudioError::INVALID_USE );
|
f@0
|
8589 return info;
|
f@0
|
8590 }
|
f@0
|
8591
|
f@0
|
8592 oss_audioinfo ainfo;
|
f@0
|
8593 ainfo.dev = device;
|
f@0
|
8594 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
|
f@0
|
8595 close( mixerfd );
|
f@0
|
8596 if ( result == -1 ) {
|
f@0
|
8597 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
|
f@0
|
8598 errorText_ = errorStream_.str();
|
f@0
|
8599 error( RtAudioError::WARNING );
|
f@0
|
8600 return info;
|
f@0
|
8601 }
|
f@0
|
8602
|
f@0
|
8603 // Probe channels
|
f@0
|
8604 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
|
f@0
|
8605 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
|
f@0
|
8606 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
|
f@0
|
8607 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
|
f@0
|
8608 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
|
f@0
|
8609 }
|
f@0
|
8610
|
f@0
|
8611 // Probe data formats ... do for input
|
f@0
|
8612 unsigned long mask = ainfo.iformats;
|
f@0
|
8613 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
|
f@0
|
8614 info.nativeFormats |= RTAUDIO_SINT16;
|
f@0
|
8615 if ( mask & AFMT_S8 )
|
f@0
|
8616 info.nativeFormats |= RTAUDIO_SINT8;
|
f@0
|
8617 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
|
f@0
|
8618 info.nativeFormats |= RTAUDIO_SINT32;
|
f@0
|
8619 if ( mask & AFMT_FLOAT )
|
f@0
|
8620 info.nativeFormats |= RTAUDIO_FLOAT32;
|
f@0
|
8621 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
|
f@0
|
8622 info.nativeFormats |= RTAUDIO_SINT24;
|
f@0
|
8623
|
f@0
|
8624 // Check that we have at least one supported format
|
f@0
|
8625 if ( info.nativeFormats == 0 ) {
|
f@0
|
8626 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
|
f@0
|
8627 errorText_ = errorStream_.str();
|
f@0
|
8628 error( RtAudioError::WARNING );
|
f@0
|
8629 return info;
|
f@0
|
8630 }
|
f@0
|
8631
|
f@0
|
8632 // Probe the supported sample rates.
|
f@0
|
8633 info.sampleRates.clear();
|
f@0
|
8634 if ( ainfo.nrates ) {
|
f@0
|
8635 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
|
f@0
|
8636 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
|
f@0
|
8637 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
|
f@0
|
8638 info.sampleRates.push_back( SAMPLE_RATES[k] );
|
f@0
|
8639 break;
|
f@0
|
8640 }
|
f@0
|
8641 }
|
f@0
|
8642 }
|
f@0
|
8643 }
|
f@0
|
8644 else {
|
f@0
|
8645 // Check min and max rate values;
|
f@0
|
8646 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
|
f@0
|
8647 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
|
f@0
|
8648 info.sampleRates.push_back( SAMPLE_RATES[k] );
|
f@0
|
8649 }
|
f@0
|
8650 }
|
f@0
|
8651
|
f@0
|
8652 if ( info.sampleRates.size() == 0 ) {
|
f@0
|
8653 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
|
f@0
|
8654 errorText_ = errorStream_.str();
|
f@0
|
8655 error( RtAudioError::WARNING );
|
f@0
|
8656 }
|
f@0
|
8657 else {
|
f@0
|
8658 info.probed = true;
|
f@0
|
8659 info.name = ainfo.name;
|
f@0
|
8660 }
|
f@0
|
8661
|
f@0
|
8662 return info;
|
f@0
|
8663 }
|
f@0
|
8664
|
f@0
|
8665
|
f@0
|
8666 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
|
f@0
|
8667 unsigned int firstChannel, unsigned int sampleRate,
|
f@0
|
8668 RtAudioFormat format, unsigned int *bufferSize,
|
f@0
|
8669 RtAudio::StreamOptions *options )
|
f@0
|
8670 {
|
f@0
|
8671 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
|
f@0
|
8672 if ( mixerfd == -1 ) {
|
f@0
|
8673 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
|
f@0
|
8674 return FAILURE;
|
f@0
|
8675 }
|
f@0
|
8676
|
f@0
|
8677 oss_sysinfo sysinfo;
|
f@0
|
8678 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
|
f@0
|
8679 if ( result == -1 ) {
|
f@0
|
8680 close( mixerfd );
|
f@0
|
8681 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
|
f@0
|
8682 return FAILURE;
|
f@0
|
8683 }
|
f@0
|
8684
|
f@0
|
8685 unsigned nDevices = sysinfo.numaudios;
|
f@0
|
8686 if ( nDevices == 0 ) {
|
f@0
|
8687 // This should not happen because a check is made before this function is called.
|
f@0
|
8688 close( mixerfd );
|
f@0
|
8689 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
|
f@0
|
8690 return FAILURE;
|
f@0
|
8691 }
|
f@0
|
8692
|
f@0
|
8693 if ( device >= nDevices ) {
|
f@0
|
8694 // This should not happen because a check is made before this function is called.
|
f@0
|
8695 close( mixerfd );
|
f@0
|
8696 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
|
f@0
|
8697 return FAILURE;
|
f@0
|
8698 }
|
f@0
|
8699
|
f@0
|
8700 oss_audioinfo ainfo;
|
f@0
|
8701 ainfo.dev = device;
|
f@0
|
8702 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
|
f@0
|
8703 close( mixerfd );
|
f@0
|
8704 if ( result == -1 ) {
|
f@0
|
8705 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
|
f@0
|
8706 errorText_ = errorStream_.str();
|
f@0
|
8707 return FAILURE;
|
f@0
|
8708 }
|
f@0
|
8709
|
f@0
|
8710 // Check if device supports input or output
|
f@0
|
8711 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
|
f@0
|
8712 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
|
f@0
|
8713 if ( mode == OUTPUT )
|
f@0
|
8714 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
|
f@0
|
8715 else
|
f@0
|
8716 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
|
f@0
|
8717 errorText_ = errorStream_.str();
|
f@0
|
8718 return FAILURE;
|
f@0
|
8719 }
|
f@0
|
8720
|
f@0
|
8721 int flags = 0;
|
f@0
|
8722 OssHandle *handle = (OssHandle *) stream_.apiHandle;
|
f@0
|
8723 if ( mode == OUTPUT )
|
f@0
|
8724 flags |= O_WRONLY;
|
f@0
|
8725 else { // mode == INPUT
|
f@0
|
8726 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
|
f@0
|
8727 // We just set the same device for playback ... close and reopen for duplex (OSS only).
|
f@0
|
8728 close( handle->id[0] );
|
f@0
|
8729 handle->id[0] = 0;
|
f@0
|
8730 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
|
f@0
|
8731 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
|
f@0
|
8732 errorText_ = errorStream_.str();
|
f@0
|
8733 return FAILURE;
|
f@0
|
8734 }
|
f@0
|
8735 // Check that the number previously set channels is the same.
|
f@0
|
8736 if ( stream_.nUserChannels[0] != channels ) {
|
f@0
|
8737 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
|
f@0
|
8738 errorText_ = errorStream_.str();
|
f@0
|
8739 return FAILURE;
|
f@0
|
8740 }
|
f@0
|
8741 flags |= O_RDWR;
|
f@0
|
8742 }
|
f@0
|
8743 else
|
f@0
|
8744 flags |= O_RDONLY;
|
f@0
|
8745 }
|
f@0
|
8746
|
f@0
|
8747 // Set exclusive access if specified.
|
f@0
|
8748 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
|
f@0
|
8749
|
f@0
|
8750 // Try to open the device.
|
f@0
|
8751 int fd;
|
f@0
|
8752 fd = open( ainfo.devnode, flags, 0 );
|
f@0
|
8753 if ( fd == -1 ) {
|
f@0
|
8754 if ( errno == EBUSY )
|
f@0
|
8755 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
|
f@0
|
8756 else
|
f@0
|
8757 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
|
f@0
|
8758 errorText_ = errorStream_.str();
|
f@0
|
8759 return FAILURE;
|
f@0
|
8760 }
|
f@0
|
8761
|
f@0
|
8762 // For duplex operation, specifically set this mode (this doesn't seem to work).
|
f@0
|
8763 /*
|
f@0
|
8764 if ( flags | O_RDWR ) {
|
f@0
|
8765 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
|
f@0
|
8766 if ( result == -1) {
|
f@0
|
8767 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
|
f@0
|
8768 errorText_ = errorStream_.str();
|
f@0
|
8769 return FAILURE;
|
f@0
|
8770 }
|
f@0
|
8771 }
|
f@0
|
8772 */
|
f@0
|
8773
|
f@0
|
8774 // Check the device channel support.
|
f@0
|
8775 stream_.nUserChannels[mode] = channels;
|
f@0
|
8776 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
|
f@0
|
8777 close( fd );
|
f@0
|
8778 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
|
f@0
|
8779 errorText_ = errorStream_.str();
|
f@0
|
8780 return FAILURE;
|
f@0
|
8781 }
|
f@0
|
8782
|
f@0
|
8783 // Set the number of channels.
|
f@0
|
8784 int deviceChannels = channels + firstChannel;
|
f@0
|
8785 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
|
f@0
|
8786 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
|
f@0
|
8787 close( fd );
|
f@0
|
8788 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
|
f@0
|
8789 errorText_ = errorStream_.str();
|
f@0
|
8790 return FAILURE;
|
f@0
|
8791 }
|
f@0
|
8792 stream_.nDeviceChannels[mode] = deviceChannels;
|
f@0
|
8793
|
f@0
|
8794 // Get the data format mask
|
f@0
|
8795 int mask;
|
f@0
|
8796 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
|
f@0
|
8797 if ( result == -1 ) {
|
f@0
|
8798 close( fd );
|
f@0
|
8799 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
|
f@0
|
8800 errorText_ = errorStream_.str();
|
f@0
|
8801 return FAILURE;
|
f@0
|
8802 }
|
f@0
|
8803
|
f@0
|
8804 // Determine how to set the device format.
|
f@0
|
8805 stream_.userFormat = format;
|
f@0
|
8806 int deviceFormat = -1;
|
f@0
|
8807 stream_.doByteSwap[mode] = false;
|
f@0
|
8808 if ( format == RTAUDIO_SINT8 ) {
|
f@0
|
8809 if ( mask & AFMT_S8 ) {
|
f@0
|
8810 deviceFormat = AFMT_S8;
|
f@0
|
8811 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
|
f@0
|
8812 }
|
f@0
|
8813 }
|
f@0
|
8814 else if ( format == RTAUDIO_SINT16 ) {
|
f@0
|
8815 if ( mask & AFMT_S16_NE ) {
|
f@0
|
8816 deviceFormat = AFMT_S16_NE;
|
f@0
|
8817 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
|
f@0
|
8818 }
|
f@0
|
8819 else if ( mask & AFMT_S16_OE ) {
|
f@0
|
8820 deviceFormat = AFMT_S16_OE;
|
f@0
|
8821 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
|
f@0
|
8822 stream_.doByteSwap[mode] = true;
|
f@0
|
8823 }
|
f@0
|
8824 }
|
f@0
|
8825 else if ( format == RTAUDIO_SINT24 ) {
|
f@0
|
8826 if ( mask & AFMT_S24_NE ) {
|
f@0
|
8827 deviceFormat = AFMT_S24_NE;
|
f@0
|
8828 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
|
f@0
|
8829 }
|
f@0
|
8830 else if ( mask & AFMT_S24_OE ) {
|
f@0
|
8831 deviceFormat = AFMT_S24_OE;
|
f@0
|
8832 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
|
f@0
|
8833 stream_.doByteSwap[mode] = true;
|
f@0
|
8834 }
|
f@0
|
8835 }
|
f@0
|
8836 else if ( format == RTAUDIO_SINT32 ) {
|
f@0
|
8837 if ( mask & AFMT_S32_NE ) {
|
f@0
|
8838 deviceFormat = AFMT_S32_NE;
|
f@0
|
8839 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
|
f@0
|
8840 }
|
f@0
|
8841 else if ( mask & AFMT_S32_OE ) {
|
f@0
|
8842 deviceFormat = AFMT_S32_OE;
|
f@0
|
8843 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
|
f@0
|
8844 stream_.doByteSwap[mode] = true;
|
f@0
|
8845 }
|
f@0
|
8846 }
|
f@0
|
8847
|
f@0
|
8848 if ( deviceFormat == -1 ) {
|
f@0
|
8849 // The user requested format is not natively supported by the device.
|
f@0
|
8850 if ( mask & AFMT_S16_NE ) {
|
f@0
|
8851 deviceFormat = AFMT_S16_NE;
|
f@0
|
8852 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
|
f@0
|
8853 }
|
f@0
|
8854 else if ( mask & AFMT_S32_NE ) {
|
f@0
|
8855 deviceFormat = AFMT_S32_NE;
|
f@0
|
8856 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
|
f@0
|
8857 }
|
f@0
|
8858 else if ( mask & AFMT_S24_NE ) {
|
f@0
|
8859 deviceFormat = AFMT_S24_NE;
|
f@0
|
8860 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
|
f@0
|
8861 }
|
f@0
|
8862 else if ( mask & AFMT_S16_OE ) {
|
f@0
|
8863 deviceFormat = AFMT_S16_OE;
|
f@0
|
8864 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
|
f@0
|
8865 stream_.doByteSwap[mode] = true;
|
f@0
|
8866 }
|
f@0
|
8867 else if ( mask & AFMT_S32_OE ) {
|
f@0
|
8868 deviceFormat = AFMT_S32_OE;
|
f@0
|
8869 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
|
f@0
|
8870 stream_.doByteSwap[mode] = true;
|
f@0
|
8871 }
|
f@0
|
8872 else if ( mask & AFMT_S24_OE ) {
|
f@0
|
8873 deviceFormat = AFMT_S24_OE;
|
f@0
|
8874 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
|
f@0
|
8875 stream_.doByteSwap[mode] = true;
|
f@0
|
8876 }
|
f@0
|
8877 else if ( mask & AFMT_S8) {
|
f@0
|
8878 deviceFormat = AFMT_S8;
|
f@0
|
8879 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
|
f@0
|
8880 }
|
f@0
|
8881 }
|
f@0
|
8882
|
f@0
|
8883 if ( stream_.deviceFormat[mode] == 0 ) {
|
f@0
|
8884 // This really shouldn't happen ...
|
f@0
|
8885 close( fd );
|
f@0
|
8886 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
|
f@0
|
8887 errorText_ = errorStream_.str();
|
f@0
|
8888 return FAILURE;
|
f@0
|
8889 }
|
f@0
|
8890
|
f@0
|
8891 // Set the data format.
|
f@0
|
8892 int temp = deviceFormat;
|
f@0
|
8893 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
|
f@0
|
8894 if ( result == -1 || deviceFormat != temp ) {
|
f@0
|
8895 close( fd );
|
f@0
|
8896 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
|
f@0
|
8897 errorText_ = errorStream_.str();
|
f@0
|
8898 return FAILURE;
|
f@0
|
8899 }
|
f@0
|
8900
|
f@0
|
8901 // Attempt to set the buffer size. According to OSS, the minimum
|
f@0
|
8902 // number of buffers is two. The supposed minimum buffer size is 16
|
f@0
|
8903 // bytes, so that will be our lower bound. The argument to this
|
f@0
|
8904 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
|
f@0
|
8905 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
|
f@0
|
8906 // We'll check the actual value used near the end of the setup
|
f@0
|
8907 // procedure.
|
f@0
|
8908 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
|
f@0
|
8909 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
|
f@0
|
8910 int buffers = 0;
|
f@0
|
8911 if ( options ) buffers = options->numberOfBuffers;
|
f@0
|
8912 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
|
f@0
|
8913 if ( buffers < 2 ) buffers = 3;
|
f@0
|
8914 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
|
f@0
|
8915 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
|
f@0
|
8916 if ( result == -1 ) {
|
f@0
|
8917 close( fd );
|
f@0
|
8918 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
|
f@0
|
8919 errorText_ = errorStream_.str();
|
f@0
|
8920 return FAILURE;
|
f@0
|
8921 }
|
f@0
|
8922 stream_.nBuffers = buffers;
|
f@0
|
8923
|
f@0
|
8924 // Save buffer size (in sample frames).
|
f@0
|
8925 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
|
f@0
|
8926 stream_.bufferSize = *bufferSize;
|
f@0
|
8927
|
f@0
|
8928 // Set the sample rate.
|
f@0
|
8929 int srate = sampleRate;
|
f@0
|
8930 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
|
f@0
|
8931 if ( result == -1 ) {
|
f@0
|
8932 close( fd );
|
f@0
|
8933 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
|
f@0
|
8934 errorText_ = errorStream_.str();
|
f@0
|
8935 return FAILURE;
|
f@0
|
8936 }
|
f@0
|
8937
|
f@0
|
8938 // Verify the sample rate setup worked.
|
f@0
|
8939 if ( abs( srate - sampleRate ) > 100 ) {
|
f@0
|
8940 close( fd );
|
f@0
|
8941 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
|
f@0
|
8942 errorText_ = errorStream_.str();
|
f@0
|
8943 return FAILURE;
|
f@0
|
8944 }
|
f@0
|
8945 stream_.sampleRate = sampleRate;
|
f@0
|
8946
|
f@0
|
8947 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
|
f@0
|
8948 // We're doing duplex setup here.
|
f@0
|
8949 stream_.deviceFormat[0] = stream_.deviceFormat[1];
|
f@0
|
8950 stream_.nDeviceChannels[0] = deviceChannels;
|
f@0
|
8951 }
|
f@0
|
8952
|
f@0
|
8953 // Set interleaving parameters.
|
f@0
|
8954 stream_.userInterleaved = true;
|
f@0
|
8955 stream_.deviceInterleaved[mode] = true;
|
f@0
|
8956 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
|
f@0
|
8957 stream_.userInterleaved = false;
|
f@0
|
8958
|
f@0
|
8959 // Set flags for buffer conversion
|
f@0
|
8960 stream_.doConvertBuffer[mode] = false;
|
f@0
|
8961 if ( stream_.userFormat != stream_.deviceFormat[mode] )
|
f@0
|
8962 stream_.doConvertBuffer[mode] = true;
|
f@0
|
8963 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
|
f@0
|
8964 stream_.doConvertBuffer[mode] = true;
|
f@0
|
8965 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
|
f@0
|
8966 stream_.nUserChannels[mode] > 1 )
|
f@0
|
8967 stream_.doConvertBuffer[mode] = true;
|
f@0
|
8968
|
f@0
|
8969 // Allocate the stream handles if necessary and then save.
|
f@0
|
8970 if ( stream_.apiHandle == 0 ) {
|
f@0
|
8971 try {
|
f@0
|
8972 handle = new OssHandle;
|
f@0
|
8973 }
|
f@0
|
8974 catch ( std::bad_alloc& ) {
|
f@0
|
8975 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
|
f@0
|
8976 goto error;
|
f@0
|
8977 }
|
f@0
|
8978
|
f@0
|
8979 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
|
f@0
|
8980 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
|
f@0
|
8981 goto error;
|
f@0
|
8982 }
|
f@0
|
8983
|
f@0
|
8984 stream_.apiHandle = (void *) handle;
|
f@0
|
8985 }
|
f@0
|
8986 else {
|
f@0
|
8987 handle = (OssHandle *) stream_.apiHandle;
|
f@0
|
8988 }
|
f@0
|
8989 handle->id[mode] = fd;
|
f@0
|
8990
|
f@0
|
8991 // Allocate necessary internal buffers.
|
f@0
|
8992 unsigned long bufferBytes;
|
f@0
|
8993 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
|
f@0
|
8994 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
|
f@0
|
8995 if ( stream_.userBuffer[mode] == NULL ) {
|
f@0
|
8996 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
|
f@0
|
8997 goto error;
|
f@0
|
8998 }
|
f@0
|
8999
|
f@0
|
9000 if ( stream_.doConvertBuffer[mode] ) {
|
f@0
|
9001
|
f@0
|
9002 bool makeBuffer = true;
|
f@0
|
9003 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
|
f@0
|
9004 if ( mode == INPUT ) {
|
f@0
|
9005 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
|
f@0
|
9006 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
|
f@0
|
9007 if ( bufferBytes <= bytesOut ) makeBuffer = false;
|
f@0
|
9008 }
|
f@0
|
9009 }
|
f@0
|
9010
|
f@0
|
9011 if ( makeBuffer ) {
|
f@0
|
9012 bufferBytes *= *bufferSize;
|
f@0
|
9013 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
|
f@0
|
9014 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
|
f@0
|
9015 if ( stream_.deviceBuffer == NULL ) {
|
f@0
|
9016 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
|
f@0
|
9017 goto error;
|
f@0
|
9018 }
|
f@0
|
9019 }
|
f@0
|
9020 }
|
f@0
|
9021
|
f@0
|
9022 stream_.device[mode] = device;
|
f@0
|
9023 stream_.state = STREAM_STOPPED;
|
f@0
|
9024
|
f@0
|
9025 // Setup the buffer conversion information structure.
|
f@0
|
9026 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
|
f@0
|
9027
|
f@0
|
9028 // Setup thread if necessary.
|
f@0
|
9029 if ( stream_.mode == OUTPUT && mode == INPUT ) {
|
f@0
|
9030 // We had already set up an output stream.
|
f@0
|
9031 stream_.mode = DUPLEX;
|
f@0
|
9032 if ( stream_.device[0] == device ) handle->id[0] = fd;
|
f@0
|
9033 }
|
f@0
|
9034 else {
|
f@0
|
9035 stream_.mode = mode;
|
f@0
|
9036
|
f@0
|
9037 // Setup callback thread.
|
f@0
|
9038 stream_.callbackInfo.object = (void *) this;
|
f@0
|
9039
|
f@0
|
9040 // Set the thread attributes for joinable and realtime scheduling
|
f@0
|
9041 // priority. The higher priority will only take affect if the
|
f@0
|
9042 // program is run as root or suid.
|
f@0
|
9043 pthread_attr_t attr;
|
f@0
|
9044 pthread_attr_init( &attr );
|
f@0
|
9045 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
|
f@0
|
9046 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
|
f@0
|
9047 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
|
f@0
|
9048 struct sched_param param;
|
f@0
|
9049 int priority = options->priority;
|
f@0
|
9050 int min = sched_get_priority_min( SCHED_RR );
|
f@0
|
9051 int max = sched_get_priority_max( SCHED_RR );
|
f@0
|
9052 if ( priority < min ) priority = min;
|
f@0
|
9053 else if ( priority > max ) priority = max;
|
f@0
|
9054 param.sched_priority = priority;
|
f@0
|
9055 pthread_attr_setschedparam( &attr, ¶m );
|
f@0
|
9056 pthread_attr_setschedpolicy( &attr, SCHED_RR );
|
f@0
|
9057 }
|
f@0
|
9058 else
|
f@0
|
9059 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
|
f@0
|
9060 #else
|
f@0
|
9061 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
|
f@0
|
9062 #endif
|
f@0
|
9063
|
f@0
|
9064 stream_.callbackInfo.isRunning = true;
|
f@0
|
9065 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
|
f@0
|
9066 pthread_attr_destroy( &attr );
|
f@0
|
9067 if ( result ) {
|
f@0
|
9068 stream_.callbackInfo.isRunning = false;
|
f@0
|
9069 errorText_ = "RtApiOss::error creating callback thread!";
|
f@0
|
9070 goto error;
|
f@0
|
9071 }
|
f@0
|
9072 }
|
f@0
|
9073
|
f@0
|
9074 return SUCCESS;
|
f@0
|
9075
|
f@0
|
9076 error:
|
f@0
|
9077 if ( handle ) {
|
f@0
|
9078 pthread_cond_destroy( &handle->runnable );
|
f@0
|
9079 if ( handle->id[0] ) close( handle->id[0] );
|
f@0
|
9080 if ( handle->id[1] ) close( handle->id[1] );
|
f@0
|
9081 delete handle;
|
f@0
|
9082 stream_.apiHandle = 0;
|
f@0
|
9083 }
|
f@0
|
9084
|
f@0
|
9085 for ( int i=0; i<2; i++ ) {
|
f@0
|
9086 if ( stream_.userBuffer[i] ) {
|
f@0
|
9087 free( stream_.userBuffer[i] );
|
f@0
|
9088 stream_.userBuffer[i] = 0;
|
f@0
|
9089 }
|
f@0
|
9090 }
|
f@0
|
9091
|
f@0
|
9092 if ( stream_.deviceBuffer ) {
|
f@0
|
9093 free( stream_.deviceBuffer );
|
f@0
|
9094 stream_.deviceBuffer = 0;
|
f@0
|
9095 }
|
f@0
|
9096
|
f@0
|
9097 return FAILURE;
|
f@0
|
9098 }
|
f@0
|
9099
|
f@0
|
9100 void RtApiOss :: closeStream()
|
f@0
|
9101 {
|
f@0
|
9102 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
9103 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
|
f@0
|
9104 error( RtAudioError::WARNING );
|
f@0
|
9105 return;
|
f@0
|
9106 }
|
f@0
|
9107
|
f@0
|
9108 OssHandle *handle = (OssHandle *) stream_.apiHandle;
|
f@0
|
9109 stream_.callbackInfo.isRunning = false;
|
f@0
|
9110 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
9111 if ( stream_.state == STREAM_STOPPED )
|
f@0
|
9112 pthread_cond_signal( &handle->runnable );
|
f@0
|
9113 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
9114 pthread_join( stream_.callbackInfo.thread, NULL );
|
f@0
|
9115
|
f@0
|
9116 if ( stream_.state == STREAM_RUNNING ) {
|
f@0
|
9117 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
|
f@0
|
9118 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
|
f@0
|
9119 else
|
f@0
|
9120 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
|
f@0
|
9121 stream_.state = STREAM_STOPPED;
|
f@0
|
9122 }
|
f@0
|
9123
|
f@0
|
9124 if ( handle ) {
|
f@0
|
9125 pthread_cond_destroy( &handle->runnable );
|
f@0
|
9126 if ( handle->id[0] ) close( handle->id[0] );
|
f@0
|
9127 if ( handle->id[1] ) close( handle->id[1] );
|
f@0
|
9128 delete handle;
|
f@0
|
9129 stream_.apiHandle = 0;
|
f@0
|
9130 }
|
f@0
|
9131
|
f@0
|
9132 for ( int i=0; i<2; i++ ) {
|
f@0
|
9133 if ( stream_.userBuffer[i] ) {
|
f@0
|
9134 free( stream_.userBuffer[i] );
|
f@0
|
9135 stream_.userBuffer[i] = 0;
|
f@0
|
9136 }
|
f@0
|
9137 }
|
f@0
|
9138
|
f@0
|
9139 if ( stream_.deviceBuffer ) {
|
f@0
|
9140 free( stream_.deviceBuffer );
|
f@0
|
9141 stream_.deviceBuffer = 0;
|
f@0
|
9142 }
|
f@0
|
9143
|
f@0
|
9144 stream_.mode = UNINITIALIZED;
|
f@0
|
9145 stream_.state = STREAM_CLOSED;
|
f@0
|
9146 }
|
f@0
|
9147
|
f@0
|
9148 void RtApiOss :: startStream()
|
f@0
|
9149 {
|
f@0
|
9150 verifyStream();
|
f@0
|
9151 if ( stream_.state == STREAM_RUNNING ) {
|
f@0
|
9152 errorText_ = "RtApiOss::startStream(): the stream is already running!";
|
f@0
|
9153 error( RtAudioError::WARNING );
|
f@0
|
9154 return;
|
f@0
|
9155 }
|
f@0
|
9156
|
f@0
|
9157 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
9158
|
f@0
|
9159 stream_.state = STREAM_RUNNING;
|
f@0
|
9160
|
f@0
|
9161 // No need to do anything else here ... OSS automatically starts
|
f@0
|
9162 // when fed samples.
|
f@0
|
9163
|
f@0
|
9164 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
9165
|
f@0
|
9166 OssHandle *handle = (OssHandle *) stream_.apiHandle;
|
f@0
|
9167 pthread_cond_signal( &handle->runnable );
|
f@0
|
9168 }
|
f@0
|
9169
|
f@0
|
9170 void RtApiOss :: stopStream()
|
f@0
|
9171 {
|
f@0
|
9172 verifyStream();
|
f@0
|
9173 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
9174 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
|
f@0
|
9175 error( RtAudioError::WARNING );
|
f@0
|
9176 return;
|
f@0
|
9177 }
|
f@0
|
9178
|
f@0
|
9179 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
9180
|
f@0
|
9181 // The state might change while waiting on a mutex.
|
f@0
|
9182 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
9183 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
9184 return;
|
f@0
|
9185 }
|
f@0
|
9186
|
f@0
|
9187 int result = 0;
|
f@0
|
9188 OssHandle *handle = (OssHandle *) stream_.apiHandle;
|
f@0
|
9189 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
9190
|
f@0
|
9191 // Flush the output with zeros a few times.
|
f@0
|
9192 char *buffer;
|
f@0
|
9193 int samples;
|
f@0
|
9194 RtAudioFormat format;
|
f@0
|
9195
|
f@0
|
9196 if ( stream_.doConvertBuffer[0] ) {
|
f@0
|
9197 buffer = stream_.deviceBuffer;
|
f@0
|
9198 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
|
f@0
|
9199 format = stream_.deviceFormat[0];
|
f@0
|
9200 }
|
f@0
|
9201 else {
|
f@0
|
9202 buffer = stream_.userBuffer[0];
|
f@0
|
9203 samples = stream_.bufferSize * stream_.nUserChannels[0];
|
f@0
|
9204 format = stream_.userFormat;
|
f@0
|
9205 }
|
f@0
|
9206
|
f@0
|
9207 memset( buffer, 0, samples * formatBytes(format) );
|
f@0
|
9208 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
|
f@0
|
9209 result = write( handle->id[0], buffer, samples * formatBytes(format) );
|
f@0
|
9210 if ( result == -1 ) {
|
f@0
|
9211 errorText_ = "RtApiOss::stopStream: audio write error.";
|
f@0
|
9212 error( RtAudioError::WARNING );
|
f@0
|
9213 }
|
f@0
|
9214 }
|
f@0
|
9215
|
f@0
|
9216 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
|
f@0
|
9217 if ( result == -1 ) {
|
f@0
|
9218 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
|
f@0
|
9219 errorText_ = errorStream_.str();
|
f@0
|
9220 goto unlock;
|
f@0
|
9221 }
|
f@0
|
9222 handle->triggered = false;
|
f@0
|
9223 }
|
f@0
|
9224
|
f@0
|
9225 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
|
f@0
|
9226 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
|
f@0
|
9227 if ( result == -1 ) {
|
f@0
|
9228 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
|
f@0
|
9229 errorText_ = errorStream_.str();
|
f@0
|
9230 goto unlock;
|
f@0
|
9231 }
|
f@0
|
9232 }
|
f@0
|
9233
|
f@0
|
9234 unlock:
|
f@0
|
9235 stream_.state = STREAM_STOPPED;
|
f@0
|
9236 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
9237
|
f@0
|
9238 if ( result != -1 ) return;
|
f@0
|
9239 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
9240 }
|
f@0
|
9241
|
f@0
|
9242 void RtApiOss :: abortStream()
|
f@0
|
9243 {
|
f@0
|
9244 verifyStream();
|
f@0
|
9245 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
9246 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
|
f@0
|
9247 error( RtAudioError::WARNING );
|
f@0
|
9248 return;
|
f@0
|
9249 }
|
f@0
|
9250
|
f@0
|
9251 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
9252
|
f@0
|
9253 // The state might change while waiting on a mutex.
|
f@0
|
9254 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
9255 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
9256 return;
|
f@0
|
9257 }
|
f@0
|
9258
|
f@0
|
9259 int result = 0;
|
f@0
|
9260 OssHandle *handle = (OssHandle *) stream_.apiHandle;
|
f@0
|
9261 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
9262 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
|
f@0
|
9263 if ( result == -1 ) {
|
f@0
|
9264 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
|
f@0
|
9265 errorText_ = errorStream_.str();
|
f@0
|
9266 goto unlock;
|
f@0
|
9267 }
|
f@0
|
9268 handle->triggered = false;
|
f@0
|
9269 }
|
f@0
|
9270
|
f@0
|
9271 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
|
f@0
|
9272 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
|
f@0
|
9273 if ( result == -1 ) {
|
f@0
|
9274 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
|
f@0
|
9275 errorText_ = errorStream_.str();
|
f@0
|
9276 goto unlock;
|
f@0
|
9277 }
|
f@0
|
9278 }
|
f@0
|
9279
|
f@0
|
9280 unlock:
|
f@0
|
9281 stream_.state = STREAM_STOPPED;
|
f@0
|
9282 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
9283
|
f@0
|
9284 if ( result != -1 ) return;
|
f@0
|
9285 error( RtAudioError::SYSTEM_ERROR );
|
f@0
|
9286 }
|
f@0
|
9287
|
f@0
|
9288 void RtApiOss :: callbackEvent()
|
f@0
|
9289 {
|
f@0
|
9290 OssHandle *handle = (OssHandle *) stream_.apiHandle;
|
f@0
|
9291 if ( stream_.state == STREAM_STOPPED ) {
|
f@0
|
9292 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
9293 pthread_cond_wait( &handle->runnable, &stream_.mutex );
|
f@0
|
9294 if ( stream_.state != STREAM_RUNNING ) {
|
f@0
|
9295 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
9296 return;
|
f@0
|
9297 }
|
f@0
|
9298 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
9299 }
|
f@0
|
9300
|
f@0
|
9301 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
9302 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
|
f@0
|
9303 error( RtAudioError::WARNING );
|
f@0
|
9304 return;
|
f@0
|
9305 }
|
f@0
|
9306
|
f@0
|
9307 // Invoke user callback to get fresh output data.
|
f@0
|
9308 int doStopStream = 0;
|
f@0
|
9309 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
|
f@0
|
9310 double streamTime = getStreamTime();
|
f@0
|
9311 RtAudioStreamStatus status = 0;
|
f@0
|
9312 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
|
f@0
|
9313 status |= RTAUDIO_OUTPUT_UNDERFLOW;
|
f@0
|
9314 handle->xrun[0] = false;
|
f@0
|
9315 }
|
f@0
|
9316 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
|
f@0
|
9317 status |= RTAUDIO_INPUT_OVERFLOW;
|
f@0
|
9318 handle->xrun[1] = false;
|
f@0
|
9319 }
|
f@0
|
9320 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
|
f@0
|
9321 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
|
f@0
|
9322 if ( doStopStream == 2 ) {
|
f@0
|
9323 this->abortStream();
|
f@0
|
9324 return;
|
f@0
|
9325 }
|
f@0
|
9326
|
f@0
|
9327 MUTEX_LOCK( &stream_.mutex );
|
f@0
|
9328
|
f@0
|
9329 // The state might change while waiting on a mutex.
|
f@0
|
9330 if ( stream_.state == STREAM_STOPPED ) goto unlock;
|
f@0
|
9331
|
f@0
|
9332 int result;
|
f@0
|
9333 char *buffer;
|
f@0
|
9334 int samples;
|
f@0
|
9335 RtAudioFormat format;
|
f@0
|
9336
|
f@0
|
9337 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
|
f@0
|
9338
|
f@0
|
9339 // Setup parameters and do buffer conversion if necessary.
|
f@0
|
9340 if ( stream_.doConvertBuffer[0] ) {
|
f@0
|
9341 buffer = stream_.deviceBuffer;
|
f@0
|
9342 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
|
f@0
|
9343 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
|
f@0
|
9344 format = stream_.deviceFormat[0];
|
f@0
|
9345 }
|
f@0
|
9346 else {
|
f@0
|
9347 buffer = stream_.userBuffer[0];
|
f@0
|
9348 samples = stream_.bufferSize * stream_.nUserChannels[0];
|
f@0
|
9349 format = stream_.userFormat;
|
f@0
|
9350 }
|
f@0
|
9351
|
f@0
|
9352 // Do byte swapping if necessary.
|
f@0
|
9353 if ( stream_.doByteSwap[0] )
|
f@0
|
9354 byteSwapBuffer( buffer, samples, format );
|
f@0
|
9355
|
f@0
|
9356 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
|
f@0
|
9357 int trig = 0;
|
f@0
|
9358 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
|
f@0
|
9359 result = write( handle->id[0], buffer, samples * formatBytes(format) );
|
f@0
|
9360 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
|
f@0
|
9361 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
|
f@0
|
9362 handle->triggered = true;
|
f@0
|
9363 }
|
f@0
|
9364 else
|
f@0
|
9365 // Write samples to device.
|
f@0
|
9366 result = write( handle->id[0], buffer, samples * formatBytes(format) );
|
f@0
|
9367
|
f@0
|
9368 if ( result == -1 ) {
|
f@0
|
9369 // We'll assume this is an underrun, though there isn't a
|
f@0
|
9370 // specific means for determining that.
|
f@0
|
9371 handle->xrun[0] = true;
|
f@0
|
9372 errorText_ = "RtApiOss::callbackEvent: audio write error.";
|
f@0
|
9373 error( RtAudioError::WARNING );
|
f@0
|
9374 // Continue on to input section.
|
f@0
|
9375 }
|
f@0
|
9376 }
|
f@0
|
9377
|
f@0
|
9378 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
|
f@0
|
9379
|
f@0
|
9380 // Setup parameters.
|
f@0
|
9381 if ( stream_.doConvertBuffer[1] ) {
|
f@0
|
9382 buffer = stream_.deviceBuffer;
|
f@0
|
9383 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
|
f@0
|
9384 format = stream_.deviceFormat[1];
|
f@0
|
9385 }
|
f@0
|
9386 else {
|
f@0
|
9387 buffer = stream_.userBuffer[1];
|
f@0
|
9388 samples = stream_.bufferSize * stream_.nUserChannels[1];
|
f@0
|
9389 format = stream_.userFormat;
|
f@0
|
9390 }
|
f@0
|
9391
|
f@0
|
9392 // Read samples from device.
|
f@0
|
9393 result = read( handle->id[1], buffer, samples * formatBytes(format) );
|
f@0
|
9394
|
f@0
|
9395 if ( result == -1 ) {
|
f@0
|
9396 // We'll assume this is an overrun, though there isn't a
|
f@0
|
9397 // specific means for determining that.
|
f@0
|
9398 handle->xrun[1] = true;
|
f@0
|
9399 errorText_ = "RtApiOss::callbackEvent: audio read error.";
|
f@0
|
9400 error( RtAudioError::WARNING );
|
f@0
|
9401 goto unlock;
|
f@0
|
9402 }
|
f@0
|
9403
|
f@0
|
9404 // Do byte swapping if necessary.
|
f@0
|
9405 if ( stream_.doByteSwap[1] )
|
f@0
|
9406 byteSwapBuffer( buffer, samples, format );
|
f@0
|
9407
|
f@0
|
9408 // Do buffer conversion if necessary.
|
f@0
|
9409 if ( stream_.doConvertBuffer[1] )
|
f@0
|
9410 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
|
f@0
|
9411 }
|
f@0
|
9412
|
f@0
|
9413 unlock:
|
f@0
|
9414 MUTEX_UNLOCK( &stream_.mutex );
|
f@0
|
9415
|
f@0
|
9416 RtApi::tickStreamTime();
|
f@0
|
9417 if ( doStopStream == 1 ) this->stopStream();
|
f@0
|
9418 }
|
f@0
|
9419
|
f@0
|
9420 static void *ossCallbackHandler( void *ptr )
|
f@0
|
9421 {
|
f@0
|
9422 CallbackInfo *info = (CallbackInfo *) ptr;
|
f@0
|
9423 RtApiOss *object = (RtApiOss *) info->object;
|
f@0
|
9424 bool *isRunning = &info->isRunning;
|
f@0
|
9425
|
f@0
|
9426 while ( *isRunning == true ) {
|
f@0
|
9427 pthread_testcancel();
|
f@0
|
9428 object->callbackEvent();
|
f@0
|
9429 }
|
f@0
|
9430
|
f@0
|
9431 pthread_exit( NULL );
|
f@0
|
9432 }
|
f@0
|
9433
|
f@0
|
9434 //******************** End of __LINUX_OSS__ *********************//
|
f@0
|
9435 #endif
|
f@0
|
9436
|
f@0
|
9437
|
f@0
|
9438 // *************************************************** //
|
f@0
|
9439 //
|
f@0
|
9440 // Protected common (OS-independent) RtAudio methods.
|
f@0
|
9441 //
|
f@0
|
9442 // *************************************************** //
|
f@0
|
9443
|
f@0
|
9444 // This method can be modified to control the behavior of error
|
f@0
|
9445 // message printing.
|
f@0
|
9446 void RtApi :: error( RtAudioError::Type type )
|
f@0
|
9447 {
|
f@0
|
9448 errorStream_.str(""); // clear the ostringstream
|
f@0
|
9449
|
f@0
|
9450 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
|
f@0
|
9451 if ( errorCallback ) {
|
f@0
|
9452 // abortStream() can generate new error messages. Ignore them. Just keep original one.
|
f@0
|
9453
|
f@0
|
9454 if ( firstErrorOccurred_ )
|
f@0
|
9455 return;
|
f@0
|
9456
|
f@0
|
9457 firstErrorOccurred_ = true;
|
f@0
|
9458 const std::string errorMessage = errorText_;
|
f@0
|
9459
|
f@0
|
9460 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
|
f@0
|
9461 stream_.callbackInfo.isRunning = false; // exit from the thread
|
f@0
|
9462 abortStream();
|
f@0
|
9463 }
|
f@0
|
9464
|
f@0
|
9465 errorCallback( type, errorMessage );
|
f@0
|
9466 firstErrorOccurred_ = false;
|
f@0
|
9467 return;
|
f@0
|
9468 }
|
f@0
|
9469
|
f@0
|
9470 if ( type == RtAudioError::WARNING && showWarnings_ == true )
|
f@0
|
9471 std::cerr << '\n' << errorText_ << "\n\n";
|
f@0
|
9472 else if ( type != RtAudioError::WARNING )
|
f@0
|
9473 throw( RtAudioError( errorText_, type ) );
|
f@0
|
9474 }
|
f@0
|
9475
|
f@0
|
9476 void RtApi :: verifyStream()
|
f@0
|
9477 {
|
f@0
|
9478 if ( stream_.state == STREAM_CLOSED ) {
|
f@0
|
9479 errorText_ = "RtApi:: a stream is not open!";
|
f@0
|
9480 error( RtAudioError::INVALID_USE );
|
f@0
|
9481 }
|
f@0
|
9482 }
|
f@0
|
9483
|
f@0
|
9484 void RtApi :: clearStreamInfo()
|
f@0
|
9485 {
|
f@0
|
9486 stream_.mode = UNINITIALIZED;
|
f@0
|
9487 stream_.state = STREAM_CLOSED;
|
f@0
|
9488 stream_.sampleRate = 0;
|
f@0
|
9489 stream_.bufferSize = 0;
|
f@0
|
9490 stream_.nBuffers = 0;
|
f@0
|
9491 stream_.userFormat = 0;
|
f@0
|
9492 stream_.userInterleaved = true;
|
f@0
|
9493 stream_.streamTime = 0.0;
|
f@0
|
9494 stream_.apiHandle = 0;
|
f@0
|
9495 stream_.deviceBuffer = 0;
|
f@0
|
9496 stream_.callbackInfo.callback = 0;
|
f@0
|
9497 stream_.callbackInfo.userData = 0;
|
f@0
|
9498 stream_.callbackInfo.isRunning = false;
|
f@0
|
9499 stream_.callbackInfo.errorCallback = 0;
|
f@0
|
9500 for ( int i=0; i<2; i++ ) {
|
f@0
|
9501 stream_.device[i] = 11111;
|
f@0
|
9502 stream_.doConvertBuffer[i] = false;
|
f@0
|
9503 stream_.deviceInterleaved[i] = true;
|
f@0
|
9504 stream_.doByteSwap[i] = false;
|
f@0
|
9505 stream_.nUserChannels[i] = 0;
|
f@0
|
9506 stream_.nDeviceChannels[i] = 0;
|
f@0
|
9507 stream_.channelOffset[i] = 0;
|
f@0
|
9508 stream_.deviceFormat[i] = 0;
|
f@0
|
9509 stream_.latency[i] = 0;
|
f@0
|
9510 stream_.userBuffer[i] = 0;
|
f@0
|
9511 stream_.convertInfo[i].channels = 0;
|
f@0
|
9512 stream_.convertInfo[i].inJump = 0;
|
f@0
|
9513 stream_.convertInfo[i].outJump = 0;
|
f@0
|
9514 stream_.convertInfo[i].inFormat = 0;
|
f@0
|
9515 stream_.convertInfo[i].outFormat = 0;
|
f@0
|
9516 stream_.convertInfo[i].inOffset.clear();
|
f@0
|
9517 stream_.convertInfo[i].outOffset.clear();
|
f@0
|
9518 }
|
f@0
|
9519 }
|
f@0
|
9520
|
f@0
|
9521 unsigned int RtApi :: formatBytes( RtAudioFormat format )
|
f@0
|
9522 {
|
f@0
|
9523 if ( format == RTAUDIO_SINT16 )
|
f@0
|
9524 return 2;
|
f@0
|
9525 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
|
f@0
|
9526 return 4;
|
f@0
|
9527 else if ( format == RTAUDIO_FLOAT64 )
|
f@0
|
9528 return 8;
|
f@0
|
9529 else if ( format == RTAUDIO_SINT24 )
|
f@0
|
9530 return 3;
|
f@0
|
9531 else if ( format == RTAUDIO_SINT8 )
|
f@0
|
9532 return 1;
|
f@0
|
9533
|
f@0
|
9534 errorText_ = "RtApi::formatBytes: undefined format.";
|
f@0
|
9535 error( RtAudioError::WARNING );
|
f@0
|
9536
|
f@0
|
9537 return 0;
|
f@0
|
9538 }
|
f@0
|
9539
|
f@0
|
9540 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
|
f@0
|
9541 {
|
f@0
|
9542 if ( mode == INPUT ) { // convert device to user buffer
|
f@0
|
9543 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
|
f@0
|
9544 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
|
f@0
|
9545 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
|
f@0
|
9546 stream_.convertInfo[mode].outFormat = stream_.userFormat;
|
f@0
|
9547 }
|
f@0
|
9548 else { // convert user to device buffer
|
f@0
|
9549 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
|
f@0
|
9550 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
|
f@0
|
9551 stream_.convertInfo[mode].inFormat = stream_.userFormat;
|
f@0
|
9552 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
|
f@0
|
9553 }
|
f@0
|
9554
|
f@0
|
9555 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
|
f@0
|
9556 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
|
f@0
|
9557 else
|
f@0
|
9558 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
|
f@0
|
9559
|
f@0
|
9560 // Set up the interleave/deinterleave offsets.
|
f@0
|
9561 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
|
f@0
|
9562 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
|
f@0
|
9563 ( mode == INPUT && stream_.userInterleaved ) ) {
|
f@0
|
9564 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
|
f@0
|
9565 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
|
f@0
|
9566 stream_.convertInfo[mode].outOffset.push_back( k );
|
f@0
|
9567 stream_.convertInfo[mode].inJump = 1;
|
f@0
|
9568 }
|
f@0
|
9569 }
|
f@0
|
9570 else {
|
f@0
|
9571 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
|
f@0
|
9572 stream_.convertInfo[mode].inOffset.push_back( k );
|
f@0
|
9573 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
|
f@0
|
9574 stream_.convertInfo[mode].outJump = 1;
|
f@0
|
9575 }
|
f@0
|
9576 }
|
f@0
|
9577 }
|
f@0
|
9578 else { // no (de)interleaving
|
f@0
|
9579 if ( stream_.userInterleaved ) {
|
f@0
|
9580 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
|
f@0
|
9581 stream_.convertInfo[mode].inOffset.push_back( k );
|
f@0
|
9582 stream_.convertInfo[mode].outOffset.push_back( k );
|
f@0
|
9583 }
|
f@0
|
9584 }
|
f@0
|
9585 else {
|
f@0
|
9586 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
|
f@0
|
9587 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
|
f@0
|
9588 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
|
f@0
|
9589 stream_.convertInfo[mode].inJump = 1;
|
f@0
|
9590 stream_.convertInfo[mode].outJump = 1;
|
f@0
|
9591 }
|
f@0
|
9592 }
|
f@0
|
9593 }
|
f@0
|
9594
|
f@0
|
9595 // Add channel offset.
|
f@0
|
9596 if ( firstChannel > 0 ) {
|
f@0
|
9597 if ( stream_.deviceInterleaved[mode] ) {
|
f@0
|
9598 if ( mode == OUTPUT ) {
|
f@0
|
9599 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
|
f@0
|
9600 stream_.convertInfo[mode].outOffset[k] += firstChannel;
|
f@0
|
9601 }
|
f@0
|
9602 else {
|
f@0
|
9603 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
|
f@0
|
9604 stream_.convertInfo[mode].inOffset[k] += firstChannel;
|
f@0
|
9605 }
|
f@0
|
9606 }
|
f@0
|
9607 else {
|
f@0
|
9608 if ( mode == OUTPUT ) {
|
f@0
|
9609 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
|
f@0
|
9610 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
|
f@0
|
9611 }
|
f@0
|
9612 else {
|
f@0
|
9613 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
|
f@0
|
9614 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
|
f@0
|
9615 }
|
f@0
|
9616 }
|
f@0
|
9617 }
|
f@0
|
9618 }
|
f@0
|
9619
|
f@0
|
9620 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
|
f@0
|
9621 {
|
f@0
|
9622 // This function does format conversion, input/output channel compensation, and
|
f@0
|
9623 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
|
f@0
|
9624 // the lower three bytes of a 32-bit integer.
|
f@0
|
9625
|
f@0
|
9626 // Clear our device buffer when in/out duplex device channels are different
|
f@0
|
9627 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
|
f@0
|
9628 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
|
f@0
|
9629 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
|
f@0
|
9630
|
f@0
|
9631 int j;
|
f@0
|
9632 if (info.outFormat == RTAUDIO_FLOAT64) {
|
f@0
|
9633 Float64 scale;
|
f@0
|
9634 Float64 *out = (Float64 *)outBuffer;
|
f@0
|
9635
|
f@0
|
9636 if (info.inFormat == RTAUDIO_SINT8) {
|
f@0
|
9637 signed char *in = (signed char *)inBuffer;
|
f@0
|
9638 scale = 1.0 / 127.5;
|
f@0
|
9639 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9640 for (j=0; j<info.channels; j++) {
|
f@0
|
9641 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
|
f@0
|
9642 out[info.outOffset[j]] += 0.5;
|
f@0
|
9643 out[info.outOffset[j]] *= scale;
|
f@0
|
9644 }
|
f@0
|
9645 in += info.inJump;
|
f@0
|
9646 out += info.outJump;
|
f@0
|
9647 }
|
f@0
|
9648 }
|
f@0
|
9649 else if (info.inFormat == RTAUDIO_SINT16) {
|
f@0
|
9650 Int16 *in = (Int16 *)inBuffer;
|
f@0
|
9651 scale = 1.0 / 32767.5;
|
f@0
|
9652 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9653 for (j=0; j<info.channels; j++) {
|
f@0
|
9654 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
|
f@0
|
9655 out[info.outOffset[j]] += 0.5;
|
f@0
|
9656 out[info.outOffset[j]] *= scale;
|
f@0
|
9657 }
|
f@0
|
9658 in += info.inJump;
|
f@0
|
9659 out += info.outJump;
|
f@0
|
9660 }
|
f@0
|
9661 }
|
f@0
|
9662 else if (info.inFormat == RTAUDIO_SINT24) {
|
f@0
|
9663 Int24 *in = (Int24 *)inBuffer;
|
f@0
|
9664 scale = 1.0 / 8388607.5;
|
f@0
|
9665 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9666 for (j=0; j<info.channels; j++) {
|
f@0
|
9667 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
|
f@0
|
9668 out[info.outOffset[j]] += 0.5;
|
f@0
|
9669 out[info.outOffset[j]] *= scale;
|
f@0
|
9670 }
|
f@0
|
9671 in += info.inJump;
|
f@0
|
9672 out += info.outJump;
|
f@0
|
9673 }
|
f@0
|
9674 }
|
f@0
|
9675 else if (info.inFormat == RTAUDIO_SINT32) {
|
f@0
|
9676 Int32 *in = (Int32 *)inBuffer;
|
f@0
|
9677 scale = 1.0 / 2147483647.5;
|
f@0
|
9678 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9679 for (j=0; j<info.channels; j++) {
|
f@0
|
9680 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
|
f@0
|
9681 out[info.outOffset[j]] += 0.5;
|
f@0
|
9682 out[info.outOffset[j]] *= scale;
|
f@0
|
9683 }
|
f@0
|
9684 in += info.inJump;
|
f@0
|
9685 out += info.outJump;
|
f@0
|
9686 }
|
f@0
|
9687 }
|
f@0
|
9688 else if (info.inFormat == RTAUDIO_FLOAT32) {
|
f@0
|
9689 Float32 *in = (Float32 *)inBuffer;
|
f@0
|
9690 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9691 for (j=0; j<info.channels; j++) {
|
f@0
|
9692 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
|
f@0
|
9693 }
|
f@0
|
9694 in += info.inJump;
|
f@0
|
9695 out += info.outJump;
|
f@0
|
9696 }
|
f@0
|
9697 }
|
f@0
|
9698 else if (info.inFormat == RTAUDIO_FLOAT64) {
|
f@0
|
9699 // Channel compensation and/or (de)interleaving only.
|
f@0
|
9700 Float64 *in = (Float64 *)inBuffer;
|
f@0
|
9701 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9702 for (j=0; j<info.channels; j++) {
|
f@0
|
9703 out[info.outOffset[j]] = in[info.inOffset[j]];
|
f@0
|
9704 }
|
f@0
|
9705 in += info.inJump;
|
f@0
|
9706 out += info.outJump;
|
f@0
|
9707 }
|
f@0
|
9708 }
|
f@0
|
9709 }
|
f@0
|
9710 else if (info.outFormat == RTAUDIO_FLOAT32) {
|
f@0
|
9711 Float32 scale;
|
f@0
|
9712 Float32 *out = (Float32 *)outBuffer;
|
f@0
|
9713
|
f@0
|
9714 if (info.inFormat == RTAUDIO_SINT8) {
|
f@0
|
9715 signed char *in = (signed char *)inBuffer;
|
f@0
|
9716 scale = (Float32) ( 1.0 / 127.5 );
|
f@0
|
9717 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9718 for (j=0; j<info.channels; j++) {
|
f@0
|
9719 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
|
f@0
|
9720 out[info.outOffset[j]] += 0.5;
|
f@0
|
9721 out[info.outOffset[j]] *= scale;
|
f@0
|
9722 }
|
f@0
|
9723 in += info.inJump;
|
f@0
|
9724 out += info.outJump;
|
f@0
|
9725 }
|
f@0
|
9726 }
|
f@0
|
9727 else if (info.inFormat == RTAUDIO_SINT16) {
|
f@0
|
9728 Int16 *in = (Int16 *)inBuffer;
|
f@0
|
9729 scale = (Float32) ( 1.0 / 32767.5 );
|
f@0
|
9730 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9731 for (j=0; j<info.channels; j++) {
|
f@0
|
9732 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
|
f@0
|
9733 out[info.outOffset[j]] += 0.5;
|
f@0
|
9734 out[info.outOffset[j]] *= scale;
|
f@0
|
9735 }
|
f@0
|
9736 in += info.inJump;
|
f@0
|
9737 out += info.outJump;
|
f@0
|
9738 }
|
f@0
|
9739 }
|
f@0
|
9740 else if (info.inFormat == RTAUDIO_SINT24) {
|
f@0
|
9741 Int24 *in = (Int24 *)inBuffer;
|
f@0
|
9742 scale = (Float32) ( 1.0 / 8388607.5 );
|
f@0
|
9743 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9744 for (j=0; j<info.channels; j++) {
|
f@0
|
9745 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
|
f@0
|
9746 out[info.outOffset[j]] += 0.5;
|
f@0
|
9747 out[info.outOffset[j]] *= scale;
|
f@0
|
9748 }
|
f@0
|
9749 in += info.inJump;
|
f@0
|
9750 out += info.outJump;
|
f@0
|
9751 }
|
f@0
|
9752 }
|
f@0
|
9753 else if (info.inFormat == RTAUDIO_SINT32) {
|
f@0
|
9754 Int32 *in = (Int32 *)inBuffer;
|
f@0
|
9755 scale = (Float32) ( 1.0 / 2147483647.5 );
|
f@0
|
9756 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9757 for (j=0; j<info.channels; j++) {
|
f@0
|
9758 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
|
f@0
|
9759 out[info.outOffset[j]] += 0.5;
|
f@0
|
9760 out[info.outOffset[j]] *= scale;
|
f@0
|
9761 }
|
f@0
|
9762 in += info.inJump;
|
f@0
|
9763 out += info.outJump;
|
f@0
|
9764 }
|
f@0
|
9765 }
|
f@0
|
9766 else if (info.inFormat == RTAUDIO_FLOAT32) {
|
f@0
|
9767 // Channel compensation and/or (de)interleaving only.
|
f@0
|
9768 Float32 *in = (Float32 *)inBuffer;
|
f@0
|
9769 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9770 for (j=0; j<info.channels; j++) {
|
f@0
|
9771 out[info.outOffset[j]] = in[info.inOffset[j]];
|
f@0
|
9772 }
|
f@0
|
9773 in += info.inJump;
|
f@0
|
9774 out += info.outJump;
|
f@0
|
9775 }
|
f@0
|
9776 }
|
f@0
|
9777 else if (info.inFormat == RTAUDIO_FLOAT64) {
|
f@0
|
9778 Float64 *in = (Float64 *)inBuffer;
|
f@0
|
9779 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9780 for (j=0; j<info.channels; j++) {
|
f@0
|
9781 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
|
f@0
|
9782 }
|
f@0
|
9783 in += info.inJump;
|
f@0
|
9784 out += info.outJump;
|
f@0
|
9785 }
|
f@0
|
9786 }
|
f@0
|
9787 }
|
f@0
|
9788 else if (info.outFormat == RTAUDIO_SINT32) {
|
f@0
|
9789 Int32 *out = (Int32 *)outBuffer;
|
f@0
|
9790 if (info.inFormat == RTAUDIO_SINT8) {
|
f@0
|
9791 signed char *in = (signed char *)inBuffer;
|
f@0
|
9792 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9793 for (j=0; j<info.channels; j++) {
|
f@0
|
9794 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
|
f@0
|
9795 out[info.outOffset[j]] <<= 24;
|
f@0
|
9796 }
|
f@0
|
9797 in += info.inJump;
|
f@0
|
9798 out += info.outJump;
|
f@0
|
9799 }
|
f@0
|
9800 }
|
f@0
|
9801 else if (info.inFormat == RTAUDIO_SINT16) {
|
f@0
|
9802 Int16 *in = (Int16 *)inBuffer;
|
f@0
|
9803 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9804 for (j=0; j<info.channels; j++) {
|
f@0
|
9805 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
|
f@0
|
9806 out[info.outOffset[j]] <<= 16;
|
f@0
|
9807 }
|
f@0
|
9808 in += info.inJump;
|
f@0
|
9809 out += info.outJump;
|
f@0
|
9810 }
|
f@0
|
9811 }
|
f@0
|
9812 else if (info.inFormat == RTAUDIO_SINT24) {
|
f@0
|
9813 Int24 *in = (Int24 *)inBuffer;
|
f@0
|
9814 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9815 for (j=0; j<info.channels; j++) {
|
f@0
|
9816 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
|
f@0
|
9817 out[info.outOffset[j]] <<= 8;
|
f@0
|
9818 }
|
f@0
|
9819 in += info.inJump;
|
f@0
|
9820 out += info.outJump;
|
f@0
|
9821 }
|
f@0
|
9822 }
|
f@0
|
9823 else if (info.inFormat == RTAUDIO_SINT32) {
|
f@0
|
9824 // Channel compensation and/or (de)interleaving only.
|
f@0
|
9825 Int32 *in = (Int32 *)inBuffer;
|
f@0
|
9826 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9827 for (j=0; j<info.channels; j++) {
|
f@0
|
9828 out[info.outOffset[j]] = in[info.inOffset[j]];
|
f@0
|
9829 }
|
f@0
|
9830 in += info.inJump;
|
f@0
|
9831 out += info.outJump;
|
f@0
|
9832 }
|
f@0
|
9833 }
|
f@0
|
9834 else if (info.inFormat == RTAUDIO_FLOAT32) {
|
f@0
|
9835 Float32 *in = (Float32 *)inBuffer;
|
f@0
|
9836 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9837 for (j=0; j<info.channels; j++) {
|
f@0
|
9838 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
|
f@0
|
9839 }
|
f@0
|
9840 in += info.inJump;
|
f@0
|
9841 out += info.outJump;
|
f@0
|
9842 }
|
f@0
|
9843 }
|
f@0
|
9844 else if (info.inFormat == RTAUDIO_FLOAT64) {
|
f@0
|
9845 Float64 *in = (Float64 *)inBuffer;
|
f@0
|
9846 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9847 for (j=0; j<info.channels; j++) {
|
f@0
|
9848 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
|
f@0
|
9849 }
|
f@0
|
9850 in += info.inJump;
|
f@0
|
9851 out += info.outJump;
|
f@0
|
9852 }
|
f@0
|
9853 }
|
f@0
|
9854 }
|
f@0
|
9855 else if (info.outFormat == RTAUDIO_SINT24) {
|
f@0
|
9856 Int24 *out = (Int24 *)outBuffer;
|
f@0
|
9857 if (info.inFormat == RTAUDIO_SINT8) {
|
f@0
|
9858 signed char *in = (signed char *)inBuffer;
|
f@0
|
9859 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9860 for (j=0; j<info.channels; j++) {
|
f@0
|
9861 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
|
f@0
|
9862 //out[info.outOffset[j]] <<= 16;
|
f@0
|
9863 }
|
f@0
|
9864 in += info.inJump;
|
f@0
|
9865 out += info.outJump;
|
f@0
|
9866 }
|
f@0
|
9867 }
|
f@0
|
9868 else if (info.inFormat == RTAUDIO_SINT16) {
|
f@0
|
9869 Int16 *in = (Int16 *)inBuffer;
|
f@0
|
9870 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9871 for (j=0; j<info.channels; j++) {
|
f@0
|
9872 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
|
f@0
|
9873 //out[info.outOffset[j]] <<= 8;
|
f@0
|
9874 }
|
f@0
|
9875 in += info.inJump;
|
f@0
|
9876 out += info.outJump;
|
f@0
|
9877 }
|
f@0
|
9878 }
|
f@0
|
9879 else if (info.inFormat == RTAUDIO_SINT24) {
|
f@0
|
9880 // Channel compensation and/or (de)interleaving only.
|
f@0
|
9881 Int24 *in = (Int24 *)inBuffer;
|
f@0
|
9882 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9883 for (j=0; j<info.channels; j++) {
|
f@0
|
9884 out[info.outOffset[j]] = in[info.inOffset[j]];
|
f@0
|
9885 }
|
f@0
|
9886 in += info.inJump;
|
f@0
|
9887 out += info.outJump;
|
f@0
|
9888 }
|
f@0
|
9889 }
|
f@0
|
9890 else if (info.inFormat == RTAUDIO_SINT32) {
|
f@0
|
9891 Int32 *in = (Int32 *)inBuffer;
|
f@0
|
9892 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9893 for (j=0; j<info.channels; j++) {
|
f@0
|
9894 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
|
f@0
|
9895 //out[info.outOffset[j]] >>= 8;
|
f@0
|
9896 }
|
f@0
|
9897 in += info.inJump;
|
f@0
|
9898 out += info.outJump;
|
f@0
|
9899 }
|
f@0
|
9900 }
|
f@0
|
9901 else if (info.inFormat == RTAUDIO_FLOAT32) {
|
f@0
|
9902 Float32 *in = (Float32 *)inBuffer;
|
f@0
|
9903 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9904 for (j=0; j<info.channels; j++) {
|
f@0
|
9905 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
|
f@0
|
9906 }
|
f@0
|
9907 in += info.inJump;
|
f@0
|
9908 out += info.outJump;
|
f@0
|
9909 }
|
f@0
|
9910 }
|
f@0
|
9911 else if (info.inFormat == RTAUDIO_FLOAT64) {
|
f@0
|
9912 Float64 *in = (Float64 *)inBuffer;
|
f@0
|
9913 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9914 for (j=0; j<info.channels; j++) {
|
f@0
|
9915 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
|
f@0
|
9916 }
|
f@0
|
9917 in += info.inJump;
|
f@0
|
9918 out += info.outJump;
|
f@0
|
9919 }
|
f@0
|
9920 }
|
f@0
|
9921 }
|
f@0
|
9922 else if (info.outFormat == RTAUDIO_SINT16) {
|
f@0
|
9923 Int16 *out = (Int16 *)outBuffer;
|
f@0
|
9924 if (info.inFormat == RTAUDIO_SINT8) {
|
f@0
|
9925 signed char *in = (signed char *)inBuffer;
|
f@0
|
9926 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9927 for (j=0; j<info.channels; j++) {
|
f@0
|
9928 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
|
f@0
|
9929 out[info.outOffset[j]] <<= 8;
|
f@0
|
9930 }
|
f@0
|
9931 in += info.inJump;
|
f@0
|
9932 out += info.outJump;
|
f@0
|
9933 }
|
f@0
|
9934 }
|
f@0
|
9935 else if (info.inFormat == RTAUDIO_SINT16) {
|
f@0
|
9936 // Channel compensation and/or (de)interleaving only.
|
f@0
|
9937 Int16 *in = (Int16 *)inBuffer;
|
f@0
|
9938 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9939 for (j=0; j<info.channels; j++) {
|
f@0
|
9940 out[info.outOffset[j]] = in[info.inOffset[j]];
|
f@0
|
9941 }
|
f@0
|
9942 in += info.inJump;
|
f@0
|
9943 out += info.outJump;
|
f@0
|
9944 }
|
f@0
|
9945 }
|
f@0
|
9946 else if (info.inFormat == RTAUDIO_SINT24) {
|
f@0
|
9947 Int24 *in = (Int24 *)inBuffer;
|
f@0
|
9948 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9949 for (j=0; j<info.channels; j++) {
|
f@0
|
9950 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
|
f@0
|
9951 }
|
f@0
|
9952 in += info.inJump;
|
f@0
|
9953 out += info.outJump;
|
f@0
|
9954 }
|
f@0
|
9955 }
|
f@0
|
9956 else if (info.inFormat == RTAUDIO_SINT32) {
|
f@0
|
9957 Int32 *in = (Int32 *)inBuffer;
|
f@0
|
9958 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9959 for (j=0; j<info.channels; j++) {
|
f@0
|
9960 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
|
f@0
|
9961 }
|
f@0
|
9962 in += info.inJump;
|
f@0
|
9963 out += info.outJump;
|
f@0
|
9964 }
|
f@0
|
9965 }
|
f@0
|
9966 else if (info.inFormat == RTAUDIO_FLOAT32) {
|
f@0
|
9967 Float32 *in = (Float32 *)inBuffer;
|
f@0
|
9968 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9969 for (j=0; j<info.channels; j++) {
|
f@0
|
9970 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
|
f@0
|
9971 }
|
f@0
|
9972 in += info.inJump;
|
f@0
|
9973 out += info.outJump;
|
f@0
|
9974 }
|
f@0
|
9975 }
|
f@0
|
9976 else if (info.inFormat == RTAUDIO_FLOAT64) {
|
f@0
|
9977 Float64 *in = (Float64 *)inBuffer;
|
f@0
|
9978 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9979 for (j=0; j<info.channels; j++) {
|
f@0
|
9980 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
|
f@0
|
9981 }
|
f@0
|
9982 in += info.inJump;
|
f@0
|
9983 out += info.outJump;
|
f@0
|
9984 }
|
f@0
|
9985 }
|
f@0
|
9986 }
|
f@0
|
9987 else if (info.outFormat == RTAUDIO_SINT8) {
|
f@0
|
9988 signed char *out = (signed char *)outBuffer;
|
f@0
|
9989 if (info.inFormat == RTAUDIO_SINT8) {
|
f@0
|
9990 // Channel compensation and/or (de)interleaving only.
|
f@0
|
9991 signed char *in = (signed char *)inBuffer;
|
f@0
|
9992 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
9993 for (j=0; j<info.channels; j++) {
|
f@0
|
9994 out[info.outOffset[j]] = in[info.inOffset[j]];
|
f@0
|
9995 }
|
f@0
|
9996 in += info.inJump;
|
f@0
|
9997 out += info.outJump;
|
f@0
|
9998 }
|
f@0
|
9999 }
|
f@0
|
10000 if (info.inFormat == RTAUDIO_SINT16) {
|
f@0
|
10001 Int16 *in = (Int16 *)inBuffer;
|
f@0
|
10002 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
10003 for (j=0; j<info.channels; j++) {
|
f@0
|
10004 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
|
f@0
|
10005 }
|
f@0
|
10006 in += info.inJump;
|
f@0
|
10007 out += info.outJump;
|
f@0
|
10008 }
|
f@0
|
10009 }
|
f@0
|
10010 else if (info.inFormat == RTAUDIO_SINT24) {
|
f@0
|
10011 Int24 *in = (Int24 *)inBuffer;
|
f@0
|
10012 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
10013 for (j=0; j<info.channels; j++) {
|
f@0
|
10014 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
|
f@0
|
10015 }
|
f@0
|
10016 in += info.inJump;
|
f@0
|
10017 out += info.outJump;
|
f@0
|
10018 }
|
f@0
|
10019 }
|
f@0
|
10020 else if (info.inFormat == RTAUDIO_SINT32) {
|
f@0
|
10021 Int32 *in = (Int32 *)inBuffer;
|
f@0
|
10022 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
10023 for (j=0; j<info.channels; j++) {
|
f@0
|
10024 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
|
f@0
|
10025 }
|
f@0
|
10026 in += info.inJump;
|
f@0
|
10027 out += info.outJump;
|
f@0
|
10028 }
|
f@0
|
10029 }
|
f@0
|
10030 else if (info.inFormat == RTAUDIO_FLOAT32) {
|
f@0
|
10031 Float32 *in = (Float32 *)inBuffer;
|
f@0
|
10032 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
10033 for (j=0; j<info.channels; j++) {
|
f@0
|
10034 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
|
f@0
|
10035 }
|
f@0
|
10036 in += info.inJump;
|
f@0
|
10037 out += info.outJump;
|
f@0
|
10038 }
|
f@0
|
10039 }
|
f@0
|
10040 else if (info.inFormat == RTAUDIO_FLOAT64) {
|
f@0
|
10041 Float64 *in = (Float64 *)inBuffer;
|
f@0
|
10042 for (unsigned int i=0; i<stream_.bufferSize; i++) {
|
f@0
|
10043 for (j=0; j<info.channels; j++) {
|
f@0
|
10044 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
|
f@0
|
10045 }
|
f@0
|
10046 in += info.inJump;
|
f@0
|
10047 out += info.outJump;
|
f@0
|
10048 }
|
f@0
|
10049 }
|
f@0
|
10050 }
|
f@0
|
10051 }
|
f@0
|
10052
|
f@0
|
10053 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
|
f@0
|
10054 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
|
f@0
|
10055 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
|
f@0
|
10056
|
f@0
|
10057 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
|
f@0
|
10058 {
|
f@0
|
10059 register char val;
|
f@0
|
10060 register char *ptr;
|
f@0
|
10061
|
f@0
|
10062 ptr = buffer;
|
f@0
|
10063 if ( format == RTAUDIO_SINT16 ) {
|
f@0
|
10064 for ( unsigned int i=0; i<samples; i++ ) {
|
f@0
|
10065 // Swap 1st and 2nd bytes.
|
f@0
|
10066 val = *(ptr);
|
f@0
|
10067 *(ptr) = *(ptr+1);
|
f@0
|
10068 *(ptr+1) = val;
|
f@0
|
10069
|
f@0
|
10070 // Increment 2 bytes.
|
f@0
|
10071 ptr += 2;
|
f@0
|
10072 }
|
f@0
|
10073 }
|
f@0
|
10074 else if ( format == RTAUDIO_SINT32 ||
|
f@0
|
10075 format == RTAUDIO_FLOAT32 ) {
|
f@0
|
10076 for ( unsigned int i=0; i<samples; i++ ) {
|
f@0
|
10077 // Swap 1st and 4th bytes.
|
f@0
|
10078 val = *(ptr);
|
f@0
|
10079 *(ptr) = *(ptr+3);
|
f@0
|
10080 *(ptr+3) = val;
|
f@0
|
10081
|
f@0
|
10082 // Swap 2nd and 3rd bytes.
|
f@0
|
10083 ptr += 1;
|
f@0
|
10084 val = *(ptr);
|
f@0
|
10085 *(ptr) = *(ptr+1);
|
f@0
|
10086 *(ptr+1) = val;
|
f@0
|
10087
|
f@0
|
10088 // Increment 3 more bytes.
|
f@0
|
10089 ptr += 3;
|
f@0
|
10090 }
|
f@0
|
10091 }
|
f@0
|
10092 else if ( format == RTAUDIO_SINT24 ) {
|
f@0
|
10093 for ( unsigned int i=0; i<samples; i++ ) {
|
f@0
|
10094 // Swap 1st and 3rd bytes.
|
f@0
|
10095 val = *(ptr);
|
f@0
|
10096 *(ptr) = *(ptr+2);
|
f@0
|
10097 *(ptr+2) = val;
|
f@0
|
10098
|
f@0
|
10099 // Increment 2 more bytes.
|
f@0
|
10100 ptr += 2;
|
f@0
|
10101 }
|
f@0
|
10102 }
|
f@0
|
10103 else if ( format == RTAUDIO_FLOAT64 ) {
|
f@0
|
10104 for ( unsigned int i=0; i<samples; i++ ) {
|
f@0
|
10105 // Swap 1st and 8th bytes
|
f@0
|
10106 val = *(ptr);
|
f@0
|
10107 *(ptr) = *(ptr+7);
|
f@0
|
10108 *(ptr+7) = val;
|
f@0
|
10109
|
f@0
|
10110 // Swap 2nd and 7th bytes
|
f@0
|
10111 ptr += 1;
|
f@0
|
10112 val = *(ptr);
|
f@0
|
10113 *(ptr) = *(ptr+5);
|
f@0
|
10114 *(ptr+5) = val;
|
f@0
|
10115
|
f@0
|
10116 // Swap 3rd and 6th bytes
|
f@0
|
10117 ptr += 1;
|
f@0
|
10118 val = *(ptr);
|
f@0
|
10119 *(ptr) = *(ptr+3);
|
f@0
|
10120 *(ptr+3) = val;
|
f@0
|
10121
|
f@0
|
10122 // Swap 4th and 5th bytes
|
f@0
|
10123 ptr += 1;
|
f@0
|
10124 val = *(ptr);
|
f@0
|
10125 *(ptr) = *(ptr+1);
|
f@0
|
10126 *(ptr+1) = val;
|
f@0
|
10127
|
f@0
|
10128 // Increment 5 more bytes.
|
f@0
|
10129 ptr += 5;
|
f@0
|
10130 }
|
f@0
|
10131 }
|
f@0
|
10132 }
|
f@0
|
10133
|
f@0
|
10134 // Indentation settings for Vim and Emacs
|
f@0
|
10135 //
|
f@0
|
10136 // Local Variables:
|
f@0
|
10137 // c-basic-offset: 2
|
f@0
|
10138 // indent-tabs-mode: nil
|
f@0
|
10139 // End:
|
f@0
|
10140 //
|
f@0
|
10141 // vim: et sts=2 sw=2
|
f@0
|
10142
|