comparison stk/src/RtAudio.cpp @ 0:4606bd505630 tip

first import
author Fiore Martin <f.martin@qmul.ac.uk>
date Sat, 13 Jun 2015 15:08:10 +0100
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:4606bd505630
1 /************************************************************************/
2 /*! \class RtAudio
3 \brief Realtime audio i/o C++ classes.
4
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
9
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
11
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2014 Gary P. Scavone
14
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
22
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
25
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
30
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39 /************************************************************************/
40
41 // RtAudio: Version 4.1.1
42 #pragma once
43
44
45 #include "../include/RtAudio.h"
46 #include <iostream>
47 #include <cstdlib>
48 #include <cstring>
49 #include <climits>
50
51 template <class T> const T& max(const T& a, const T& b) {
52 return (a<b) ? b : a; // or: return comp(a,b)?b:a; for version (2)
53 }
54
55 // Static variable definitions.
56 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
57 const unsigned int RtApi::SAMPLE_RATES[] = {
58 4000, 5512, 8000, 9600, 11025, 16000, 22050,
59 32000, 44100, 48000, 88200, 96000, 176400, 192000
60 };
61
62 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
63 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
64 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
65 #define MUTEX_LOCK(A) EnterCriticalSection(A)
66 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
68 // pthread API
69 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
70 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
71 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
72 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
73 #else
74 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
75 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
76 #endif
77
78 // *************************************************** //
79 //
80 // RtAudio definitions.
81 //
82 // *************************************************** //
83
84 std::string RtAudio :: getVersion( void ) throw()
85 {
86 return RTAUDIO_VERSION;
87 }
88
89 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
90 {
91 apis.clear();
92
93 // The order here will control the order of RtAudio's API search in
94 // the constructor.
95 #if defined(__UNIX_JACK__)
96 apis.push_back( UNIX_JACK );
97 #endif
98 #if defined(__LINUX_ALSA__)
99 apis.push_back( LINUX_ALSA );
100 #endif
101 #if defined(__LINUX_PULSE__)
102 apis.push_back( LINUX_PULSE );
103 #endif
104 #if defined(__LINUX_OSS__)
105 apis.push_back( LINUX_OSS );
106 #endif
107 #if defined(__WINDOWS_ASIO__)
108 apis.push_back( WINDOWS_ASIO );
109 #endif
110 #if defined(__WINDOWS_WASAPI__)
111 apis.push_back( WINDOWS_WASAPI );
112 #endif
113 #if defined(__WINDOWS_DS__)
114 apis.push_back( WINDOWS_DS );
115 #endif
116 #if defined(__MACOSX_CORE__)
117 apis.push_back( MACOSX_CORE );
118 #endif
119 #if defined(__RTAUDIO_DUMMY__)
120 apis.push_back( RTAUDIO_DUMMY );
121 #endif
122 }
123
124 void RtAudio :: openRtApi( RtAudio::Api api )
125 {
126 if ( rtapi_ )
127 delete rtapi_;
128 rtapi_ = 0;
129
130 #if defined(__UNIX_JACK__)
131 if ( api == UNIX_JACK )
132 rtapi_ = new RtApiJack();
133 #endif
134 #if defined(__LINUX_ALSA__)
135 if ( api == LINUX_ALSA )
136 rtapi_ = new RtApiAlsa();
137 #endif
138 #if defined(__LINUX_PULSE__)
139 if ( api == LINUX_PULSE )
140 rtapi_ = new RtApiPulse();
141 #endif
142 #if defined(__LINUX_OSS__)
143 if ( api == LINUX_OSS )
144 rtapi_ = new RtApiOss();
145 #endif
146 #if defined(__WINDOWS_ASIO__)
147 if ( api == WINDOWS_ASIO )
148 rtapi_ = new RtApiAsio();
149 #endif
150 #if defined(__WINDOWS_WASAPI__)
151 if ( api == WINDOWS_WASAPI )
152 rtapi_ = new RtApiWasapi();
153 #endif
154 #if defined(__WINDOWS_DS__)
155 if ( api == WINDOWS_DS )
156 rtapi_ = new RtApiDs();
157 #endif
158 #if defined(__MACOSX_CORE__)
159 if ( api == MACOSX_CORE )
160 rtapi_ = new RtApiCore();
161 #endif
162 #if defined(__RTAUDIO_DUMMY__)
163 if ( api == RTAUDIO_DUMMY )
164 rtapi_ = new RtApiDummy();
165 #endif
166 }
167
168 RtAudio :: RtAudio( RtAudio::Api api )
169 {
170 rtapi_ = 0;
171
172 if ( api != UNSPECIFIED ) {
173 // Attempt to open the specified API.
174 openRtApi( api );
175 if ( rtapi_ ) return;
176
177 // No compiled support for specified API value. Issue a debug
178 // warning and continue as if no API was specified.
179 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
180 }
181
182 // Iterate through the compiled APIs and return as soon as we find
183 // one with at least one device or we reach the end of the list.
184 std::vector< RtAudio::Api > apis;
185 getCompiledApi( apis );
186 for ( unsigned int i=0; i<apis.size(); i++ ) {
187 openRtApi( apis[i] );
188 if ( rtapi_->getDeviceCount() ) break;
189 }
190
191 if ( rtapi_ ) return;
192
193 // It should not be possible to get here because the preprocessor
194 // definition __RTAUDIO_DUMMY__ is automatically defined if no
195 // API-specific definitions are passed to the compiler. But just in
196 // case something weird happens, we'll thow an error.
197 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
198 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
199 }
200
201 RtAudio :: ~RtAudio() throw()
202 {
203 if ( rtapi_ )
204 delete rtapi_;
205 }
206
207 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
208 RtAudio::StreamParameters *inputParameters,
209 RtAudioFormat format, unsigned int sampleRate,
210 unsigned int *bufferFrames,
211 RtAudioCallback callback, void *userData,
212 RtAudio::StreamOptions *options,
213 RtAudioErrorCallback errorCallback )
214 {
215 return rtapi_->openStream( outputParameters, inputParameters, format,
216 sampleRate, bufferFrames, callback,
217 userData, options, errorCallback );
218 }
219
220 // *************************************************** //
221 //
222 // Public RtApi definitions (see end of file for
223 // private or protected utility functions).
224 //
225 // *************************************************** //
226
227 RtApi :: RtApi()
228 {
229 stream_.state = STREAM_CLOSED;
230 stream_.mode = UNINITIALIZED;
231 stream_.apiHandle = 0;
232 stream_.userBuffer[0] = 0;
233 stream_.userBuffer[1] = 0;
234 MUTEX_INITIALIZE( &stream_.mutex );
235 showWarnings_ = true;
236 firstErrorOccurred_ = false;
237 }
238
239 RtApi :: ~RtApi()
240 {
241 MUTEX_DESTROY( &stream_.mutex );
242 }
243
244 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
245 RtAudio::StreamParameters *iParams,
246 RtAudioFormat format, unsigned int sampleRate,
247 unsigned int *bufferFrames,
248 RtAudioCallback callback, void *userData,
249 RtAudio::StreamOptions *options,
250 RtAudioErrorCallback errorCallback )
251 {
252 if ( stream_.state != STREAM_CLOSED ) {
253 errorText_ = "RtApi::openStream: a stream is already open!";
254 error( RtAudioError::INVALID_USE );
255 return;
256 }
257
258 // Clear stream information potentially left from a previously open stream.
259 clearStreamInfo();
260
261 if ( oParams && oParams->nChannels < 1 ) {
262 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
263 error( RtAudioError::INVALID_USE );
264 return;
265 }
266
267 if ( iParams && iParams->nChannels < 1 ) {
268 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
269 error( RtAudioError::INVALID_USE );
270 return;
271 }
272
273 if ( oParams == NULL && iParams == NULL ) {
274 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
275 error( RtAudioError::INVALID_USE );
276 return;
277 }
278
279 if ( formatBytes(format) == 0 ) {
280 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
281 error( RtAudioError::INVALID_USE );
282 return;
283 }
284
285 unsigned int nDevices = getDeviceCount();
286 unsigned int oChannels = 0;
287 if ( oParams ) {
288 oChannels = oParams->nChannels;
289 if ( oParams->deviceId >= nDevices ) {
290 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
291 error( RtAudioError::INVALID_USE );
292 return;
293 }
294 }
295
296 unsigned int iChannels = 0;
297 if ( iParams ) {
298 iChannels = iParams->nChannels;
299 if ( iParams->deviceId >= nDevices ) {
300 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
301 error( RtAudioError::INVALID_USE );
302 return;
303 }
304 }
305
306 bool result;
307
308 if ( oChannels > 0 ) {
309
310 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
311 sampleRate, format, bufferFrames, options );
312 if ( result == false ) {
313 error( RtAudioError::SYSTEM_ERROR );
314 return;
315 }
316 }
317
318 if ( iChannels > 0 ) {
319
320 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
321 sampleRate, format, bufferFrames, options );
322 if ( result == false ) {
323 if ( oChannels > 0 ) closeStream();
324 error( RtAudioError::SYSTEM_ERROR );
325 return;
326 }
327 }
328
329 stream_.callbackInfo.callback = (void *) callback;
330 stream_.callbackInfo.userData = userData;
331 stream_.callbackInfo.errorCallback = (void *) errorCallback;
332
333 if ( options ) options->numberOfBuffers = stream_.nBuffers;
334 stream_.state = STREAM_STOPPED;
335 }
336
337 unsigned int RtApi :: getDefaultInputDevice( void )
338 {
339 // Should be implemented in subclasses if possible.
340 return 0;
341 }
342
343 unsigned int RtApi :: getDefaultOutputDevice( void )
344 {
345 // Should be implemented in subclasses if possible.
346 return 0;
347 }
348
349 void RtApi :: closeStream( void )
350 {
351 // MUST be implemented in subclasses!
352 return;
353 }
354
355 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
356 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
357 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
358 RtAudio::StreamOptions * /*options*/ )
359 {
360 // MUST be implemented in subclasses!
361 return FAILURE;
362 }
363
364 void RtApi :: tickStreamTime( void )
365 {
366 // Subclasses that do not provide their own implementation of
367 // getStreamTime should call this function once per buffer I/O to
368 // provide basic stream time support.
369
370 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
371
372 #if defined( HAVE_GETTIMEOFDAY )
373 gettimeofday( &stream_.lastTickTimestamp, NULL );
374 #endif
375 }
376
377 long RtApi :: getStreamLatency( void )
378 {
379 verifyStream();
380
381 long totalLatency = 0;
382 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
383 totalLatency = stream_.latency[0];
384 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
385 totalLatency += stream_.latency[1];
386
387 return totalLatency;
388 }
389
390 double RtApi :: getStreamTime( void )
391 {
392 verifyStream();
393
394 #if defined( HAVE_GETTIMEOFDAY )
395 // Return a very accurate estimate of the stream time by
396 // adding in the elapsed time since the last tick.
397 struct timeval then;
398 struct timeval now;
399
400 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
401 return stream_.streamTime;
402
403 gettimeofday( &now, NULL );
404 then = stream_.lastTickTimestamp;
405 return stream_.streamTime +
406 ((now.tv_sec + 0.000001 * now.tv_usec) -
407 (then.tv_sec + 0.000001 * then.tv_usec));
408 #else
409 return stream_.streamTime;
410 #endif
411 }
412
413 void RtApi :: setStreamTime( double time )
414 {
415 verifyStream();
416
417 if ( time >= 0.0 )
418 stream_.streamTime = time;
419 }
420
421 unsigned int RtApi :: getStreamSampleRate( void )
422 {
423 verifyStream();
424
425 return stream_.sampleRate;
426 }
427
428
429 // *************************************************** //
430 //
431 // OS/API-specific methods.
432 //
433 // *************************************************** //
434
435 #if defined(__MACOSX_CORE__)
436
437 // The OS X CoreAudio API is designed to use a separate callback
438 // procedure for each of its audio devices. A single RtAudio duplex
439 // stream using two different devices is supported here, though it
440 // cannot be guaranteed to always behave correctly because we cannot
441 // synchronize these two callbacks.
442 //
443 // A property listener is installed for over/underrun information.
444 // However, no functionality is currently provided to allow property
445 // listeners to trigger user handlers because it is unclear what could
446 // be done if a critical stream parameter (buffer size, sample rate,
447 // device disconnect) notification arrived. The listeners entail
448 // quite a bit of extra code and most likely, a user program wouldn't
449 // be prepared for the result anyway. However, we do provide a flag
450 // to the client callback function to inform of an over/underrun.
451
452 // A structure to hold various information related to the CoreAudio API
453 // implementation.
454 struct CoreHandle {
455 AudioDeviceID id[2]; // device ids
456 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
457 AudioDeviceIOProcID procId[2];
458 #endif
459 UInt32 iStream[2]; // device stream index (or first if using multiple)
460 UInt32 nStreams[2]; // number of streams to use
461 bool xrun[2];
462 char *deviceBuffer;
463 pthread_cond_t condition;
464 int drainCounter; // Tracks callback counts when draining
465 bool internalDrain; // Indicates if stop is initiated from callback or not.
466
467 CoreHandle()
468 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
469 };
470
471 RtApiCore:: RtApiCore()
472 {
473 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
474 // This is a largely undocumented but absolutely necessary
475 // requirement starting with OS-X 10.6. If not called, queries and
476 // updates to various audio device properties are not handled
477 // correctly.
478 CFRunLoopRef theRunLoop = NULL;
479 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
480 kAudioObjectPropertyScopeGlobal,
481 kAudioObjectPropertyElementMaster };
482 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
483 if ( result != noErr ) {
484 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
485 error( RtAudioError::WARNING );
486 }
487 #endif
488 }
489
490 RtApiCore :: ~RtApiCore()
491 {
492 // The subclass destructor gets called before the base class
493 // destructor, so close an existing stream before deallocating
494 // apiDeviceId memory.
495 if ( stream_.state != STREAM_CLOSED ) closeStream();
496 }
497
498 unsigned int RtApiCore :: getDeviceCount( void )
499 {
500 // Find out how many audio devices there are, if any.
501 UInt32 dataSize;
502 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
503 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
504 if ( result != noErr ) {
505 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
506 error( RtAudioError::WARNING );
507 return 0;
508 }
509
510 return dataSize / sizeof( AudioDeviceID );
511 }
512
513 unsigned int RtApiCore :: getDefaultInputDevice( void )
514 {
515 unsigned int nDevices = getDeviceCount();
516 if ( nDevices <= 1 ) return 0;
517
518 AudioDeviceID id;
519 UInt32 dataSize = sizeof( AudioDeviceID );
520 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
521 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
522 if ( result != noErr ) {
523 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
524 error( RtAudioError::WARNING );
525 return 0;
526 }
527
528 dataSize *= nDevices;
529 AudioDeviceID deviceList[ nDevices ];
530 property.mSelector = kAudioHardwarePropertyDevices;
531 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
532 if ( result != noErr ) {
533 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
534 error( RtAudioError::WARNING );
535 return 0;
536 }
537
538 for ( unsigned int i=0; i<nDevices; i++ )
539 if ( id == deviceList[i] ) return i;
540
541 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
542 error( RtAudioError::WARNING );
543 return 0;
544 }
545
546 unsigned int RtApiCore :: getDefaultOutputDevice( void )
547 {
548 unsigned int nDevices = getDeviceCount();
549 if ( nDevices <= 1 ) return 0;
550
551 AudioDeviceID id;
552 UInt32 dataSize = sizeof( AudioDeviceID );
553 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
554 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
555 if ( result != noErr ) {
556 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
557 error( RtAudioError::WARNING );
558 return 0;
559 }
560
561 dataSize = sizeof( AudioDeviceID ) * nDevices;
562 AudioDeviceID deviceList[ nDevices ];
563 property.mSelector = kAudioHardwarePropertyDevices;
564 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
565 if ( result != noErr ) {
566 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
567 error( RtAudioError::WARNING );
568 return 0;
569 }
570
571 for ( unsigned int i=0; i<nDevices; i++ )
572 if ( id == deviceList[i] ) return i;
573
574 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
575 error( RtAudioError::WARNING );
576 return 0;
577 }
578
579 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
580 {
581 RtAudio::DeviceInfo info;
582 info.probed = false;
583
584 // Get device ID
585 unsigned int nDevices = getDeviceCount();
586 if ( nDevices == 0 ) {
587 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
588 error( RtAudioError::INVALID_USE );
589 return info;
590 }
591
592 if ( device >= nDevices ) {
593 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
594 error( RtAudioError::INVALID_USE );
595 return info;
596 }
597
598 AudioDeviceID deviceList[ nDevices ];
599 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
600 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
601 kAudioObjectPropertyScopeGlobal,
602 kAudioObjectPropertyElementMaster };
603 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
604 0, NULL, &dataSize, (void *) &deviceList );
605 if ( result != noErr ) {
606 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
607 error( RtAudioError::WARNING );
608 return info;
609 }
610
611 AudioDeviceID id = deviceList[ device ];
612
613 // Get the device name.
614 info.name.erase();
615 CFStringRef cfname;
616 dataSize = sizeof( CFStringRef );
617 property.mSelector = kAudioObjectPropertyManufacturer;
618 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
619 if ( result != noErr ) {
620 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
621 errorText_ = errorStream_.str();
622 error( RtAudioError::WARNING );
623 return info;
624 }
625
626 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
627 int length = CFStringGetLength(cfname);
628 char *mname = (char *)malloc(length * 3 + 1);
629 #if defined( UNICODE ) || defined( _UNICODE )
630 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
631 #else
632 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
633 #endif
634 info.name.append( (const char *)mname, strlen(mname) );
635 info.name.append( ": " );
636 CFRelease( cfname );
637 free(mname);
638
639 property.mSelector = kAudioObjectPropertyName;
640 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
641 if ( result != noErr ) {
642 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
643 errorText_ = errorStream_.str();
644 error( RtAudioError::WARNING );
645 return info;
646 }
647
648 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
649 length = CFStringGetLength(cfname);
650 char *name = (char *)malloc(length * 3 + 1);
651 #if defined( UNICODE ) || defined( _UNICODE )
652 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
653 #else
654 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
655 #endif
656 info.name.append( (const char *)name, strlen(name) );
657 CFRelease( cfname );
658 free(name);
659
660 // Get the output stream "configuration".
661 AudioBufferList *bufferList = nil;
662 property.mSelector = kAudioDevicePropertyStreamConfiguration;
663 property.mScope = kAudioDevicePropertyScopeOutput;
664 // property.mElement = kAudioObjectPropertyElementWildcard;
665 dataSize = 0;
666 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
667 if ( result != noErr || dataSize == 0 ) {
668 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
669 errorText_ = errorStream_.str();
670 error( RtAudioError::WARNING );
671 return info;
672 }
673
674 // Allocate the AudioBufferList.
675 bufferList = (AudioBufferList *) malloc( dataSize );
676 if ( bufferList == NULL ) {
677 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
678 error( RtAudioError::WARNING );
679 return info;
680 }
681
682 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
683 if ( result != noErr || dataSize == 0 ) {
684 free( bufferList );
685 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
686 errorText_ = errorStream_.str();
687 error( RtAudioError::WARNING );
688 return info;
689 }
690
691 // Get output channel information.
692 unsigned int i, nStreams = bufferList->mNumberBuffers;
693 for ( i=0; i<nStreams; i++ )
694 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
695 free( bufferList );
696
697 // Get the input stream "configuration".
698 property.mScope = kAudioDevicePropertyScopeInput;
699 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
700 if ( result != noErr || dataSize == 0 ) {
701 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
702 errorText_ = errorStream_.str();
703 error( RtAudioError::WARNING );
704 return info;
705 }
706
707 // Allocate the AudioBufferList.
708 bufferList = (AudioBufferList *) malloc( dataSize );
709 if ( bufferList == NULL ) {
710 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
711 error( RtAudioError::WARNING );
712 return info;
713 }
714
715 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
716 if (result != noErr || dataSize == 0) {
717 free( bufferList );
718 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
719 errorText_ = errorStream_.str();
720 error( RtAudioError::WARNING );
721 return info;
722 }
723
724 // Get input channel information.
725 nStreams = bufferList->mNumberBuffers;
726 for ( i=0; i<nStreams; i++ )
727 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
728 free( bufferList );
729
730 // If device opens for both playback and capture, we determine the channels.
731 if ( info.outputChannels > 0 && info.inputChannels > 0 )
732 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
733
734 // Probe the device sample rates.
735 bool isInput = false;
736 if ( info.outputChannels == 0 ) isInput = true;
737
738 // Determine the supported sample rates.
739 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
740 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
741 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
742 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
743 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
744 errorText_ = errorStream_.str();
745 error( RtAudioError::WARNING );
746 return info;
747 }
748
749 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
750 AudioValueRange rangeList[ nRanges ];
751 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
752 if ( result != kAudioHardwareNoError ) {
753 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
754 errorText_ = errorStream_.str();
755 error( RtAudioError::WARNING );
756 return info;
757 }
758
759 // The sample rate reporting mechanism is a bit of a mystery. It
760 // seems that it can either return individual rates or a range of
761 // rates. I assume that if the min / max range values are the same,
762 // then that represents a single supported rate and if the min / max
763 // range values are different, the device supports an arbitrary
764 // range of values (though there might be multiple ranges, so we'll
765 // use the most conservative range).
766 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
767 bool haveValueRange = false;
768 info.sampleRates.clear();
769 for ( UInt32 i=0; i<nRanges; i++ ) {
770 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
771 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
772 else {
773 haveValueRange = true;
774 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
775 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
776 }
777 }
778
779 if ( haveValueRange ) {
780 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
781 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
782 info.sampleRates.push_back( SAMPLE_RATES[k] );
783 }
784 }
785
786 // Sort and remove any redundant values
787 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
788 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
789
790 if ( info.sampleRates.size() == 0 ) {
791 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
792 errorText_ = errorStream_.str();
793 error( RtAudioError::WARNING );
794 return info;
795 }
796
797 // CoreAudio always uses 32-bit floating point data for PCM streams.
798 // Thus, any other "physical" formats supported by the device are of
799 // no interest to the client.
800 info.nativeFormats = RTAUDIO_FLOAT32;
801
802 if ( info.outputChannels > 0 )
803 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
804 if ( info.inputChannels > 0 )
805 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
806
807 info.probed = true;
808 return info;
809 }
810
811 static OSStatus callbackHandler( AudioDeviceID inDevice,
812 const AudioTimeStamp* /*inNow*/,
813 const AudioBufferList* inInputData,
814 const AudioTimeStamp* /*inInputTime*/,
815 AudioBufferList* outOutputData,
816 const AudioTimeStamp* /*inOutputTime*/,
817 void* infoPointer )
818 {
819 CallbackInfo *info = (CallbackInfo *) infoPointer;
820
821 RtApiCore *object = (RtApiCore *) info->object;
822 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
823 return kAudioHardwareUnspecifiedError;
824 else
825 return kAudioHardwareNoError;
826 }
827
828 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
829 UInt32 nAddresses,
830 const AudioObjectPropertyAddress properties[],
831 void* handlePointer )
832 {
833 CoreHandle *handle = (CoreHandle *) handlePointer;
834 for ( UInt32 i=0; i<nAddresses; i++ ) {
835 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
836 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
837 handle->xrun[1] = true;
838 else
839 handle->xrun[0] = true;
840 }
841 }
842
843 return kAudioHardwareNoError;
844 }
845
846 static OSStatus rateListener( AudioObjectID inDevice,
847 UInt32 /*nAddresses*/,
848 const AudioObjectPropertyAddress /*properties*/[],
849 void* ratePointer )
850 {
851 Float64 *rate = (Float64 *) ratePointer;
852 UInt32 dataSize = sizeof( Float64 );
853 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
854 kAudioObjectPropertyScopeGlobal,
855 kAudioObjectPropertyElementMaster };
856 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
857 return kAudioHardwareNoError;
858 }
859
860 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
861 unsigned int firstChannel, unsigned int sampleRate,
862 RtAudioFormat format, unsigned int *bufferSize,
863 RtAudio::StreamOptions *options )
864 {
865 // Get device ID
866 unsigned int nDevices = getDeviceCount();
867 if ( nDevices == 0 ) {
868 // This should not happen because a check is made before this function is called.
869 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
870 return FAILURE;
871 }
872
873 if ( device >= nDevices ) {
874 // This should not happen because a check is made before this function is called.
875 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
876 return FAILURE;
877 }
878
879 AudioDeviceID deviceList[ nDevices ];
880 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
881 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
882 kAudioObjectPropertyScopeGlobal,
883 kAudioObjectPropertyElementMaster };
884 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
885 0, NULL, &dataSize, (void *) &deviceList );
886 if ( result != noErr ) {
887 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
888 return FAILURE;
889 }
890
891 AudioDeviceID id = deviceList[ device ];
892
893 // Setup for stream mode.
894 bool isInput = false;
895 if ( mode == INPUT ) {
896 isInput = true;
897 property.mScope = kAudioDevicePropertyScopeInput;
898 }
899 else
900 property.mScope = kAudioDevicePropertyScopeOutput;
901
902 // Get the stream "configuration".
903 AudioBufferList *bufferList = nil;
904 dataSize = 0;
905 property.mSelector = kAudioDevicePropertyStreamConfiguration;
906 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
907 if ( result != noErr || dataSize == 0 ) {
908 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
909 errorText_ = errorStream_.str();
910 return FAILURE;
911 }
912
913 // Allocate the AudioBufferList.
914 bufferList = (AudioBufferList *) malloc( dataSize );
915 if ( bufferList == NULL ) {
916 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
917 return FAILURE;
918 }
919
920 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
921 if (result != noErr || dataSize == 0) {
922 free( bufferList );
923 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
924 errorText_ = errorStream_.str();
925 return FAILURE;
926 }
927
928 // Search for one or more streams that contain the desired number of
929 // channels. CoreAudio devices can have an arbitrary number of
930 // streams and each stream can have an arbitrary number of channels.
931 // For each stream, a single buffer of interleaved samples is
932 // provided. RtAudio prefers the use of one stream of interleaved
933 // data or multiple consecutive single-channel streams. However, we
934 // now support multiple consecutive multi-channel streams of
935 // interleaved data as well.
936 UInt32 iStream, offsetCounter = firstChannel;
937 UInt32 nStreams = bufferList->mNumberBuffers;
938 bool monoMode = false;
939 bool foundStream = false;
940
941 // First check that the device supports the requested number of
942 // channels.
943 UInt32 deviceChannels = 0;
944 for ( iStream=0; iStream<nStreams; iStream++ )
945 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
946
947 if ( deviceChannels < ( channels + firstChannel ) ) {
948 free( bufferList );
949 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
950 errorText_ = errorStream_.str();
951 return FAILURE;
952 }
953
954 // Look for a single stream meeting our needs.
955 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
956 for ( iStream=0; iStream<nStreams; iStream++ ) {
957 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
958 if ( streamChannels >= channels + offsetCounter ) {
959 firstStream = iStream;
960 channelOffset = offsetCounter;
961 foundStream = true;
962 break;
963 }
964 if ( streamChannels > offsetCounter ) break;
965 offsetCounter -= streamChannels;
966 }
967
968 // If we didn't find a single stream above, then we should be able
969 // to meet the channel specification with multiple streams.
970 if ( foundStream == false ) {
971 monoMode = true;
972 offsetCounter = firstChannel;
973 for ( iStream=0; iStream<nStreams; iStream++ ) {
974 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
975 if ( streamChannels > offsetCounter ) break;
976 offsetCounter -= streamChannels;
977 }
978
979 firstStream = iStream;
980 channelOffset = offsetCounter;
981 Int32 channelCounter = channels + offsetCounter - streamChannels;
982
983 if ( streamChannels > 1 ) monoMode = false;
984 while ( channelCounter > 0 ) {
985 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
986 if ( streamChannels > 1 ) monoMode = false;
987 channelCounter -= streamChannels;
988 streamCount++;
989 }
990 }
991
992 free( bufferList );
993
994 // Determine the buffer size.
995 AudioValueRange bufferRange;
996 dataSize = sizeof( AudioValueRange );
997 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
998 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
999
1000 if ( result != noErr ) {
1001 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1002 errorText_ = errorStream_.str();
1003 return FAILURE;
1004 }
1005
1006 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1007 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1008 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1009
1010 // Set the buffer size. For multiple streams, I'm assuming we only
1011 // need to make this setting for the master channel.
1012 UInt32 theSize = (UInt32) *bufferSize;
1013 dataSize = sizeof( UInt32 );
1014 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1015 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1016
1017 if ( result != noErr ) {
1018 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1019 errorText_ = errorStream_.str();
1020 return FAILURE;
1021 }
1022
1023 // If attempting to setup a duplex stream, the bufferSize parameter
1024 // MUST be the same in both directions!
1025 *bufferSize = theSize;
1026 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1027 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1028 errorText_ = errorStream_.str();
1029 return FAILURE;
1030 }
1031
1032 stream_.bufferSize = *bufferSize;
1033 stream_.nBuffers = 1;
1034
1035 // Try to set "hog" mode ... it's not clear to me this is working.
1036 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1037 pid_t hog_pid;
1038 dataSize = sizeof( hog_pid );
1039 property.mSelector = kAudioDevicePropertyHogMode;
1040 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1043 errorText_ = errorStream_.str();
1044 return FAILURE;
1045 }
1046
1047 if ( hog_pid != getpid() ) {
1048 hog_pid = getpid();
1049 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1050 if ( result != noErr ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1052 errorText_ = errorStream_.str();
1053 return FAILURE;
1054 }
1055 }
1056 }
1057
1058 // Check and if necessary, change the sample rate for the device.
1059 Float64 nominalRate;
1060 dataSize = sizeof( Float64 );
1061 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1062 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1063 if ( result != noErr ) {
1064 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1065 errorText_ = errorStream_.str();
1066 return FAILURE;
1067 }
1068
1069 // Only change the sample rate if off by more than 1 Hz.
1070 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1071
1072 // Set a property listener for the sample rate change
1073 Float64 reportedRate = 0.0;
1074 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1075 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1076 if ( result != noErr ) {
1077 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1078 errorText_ = errorStream_.str();
1079 return FAILURE;
1080 }
1081
1082 nominalRate = (Float64) sampleRate;
1083 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1084 if ( result != noErr ) {
1085 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1086 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1087 errorText_ = errorStream_.str();
1088 return FAILURE;
1089 }
1090
1091 // Now wait until the reported nominal rate is what we just set.
1092 UInt32 microCounter = 0;
1093 while ( reportedRate != nominalRate ) {
1094 microCounter += 5000;
1095 if ( microCounter > 5000000 ) break;
1096 usleep( 5000 );
1097 }
1098
1099 // Remove the property listener.
1100 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1101
1102 if ( microCounter > 5000000 ) {
1103 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1104 errorText_ = errorStream_.str();
1105 return FAILURE;
1106 }
1107 }
1108
1109 // Now set the stream format for all streams. Also, check the
1110 // physical format of the device and change that if necessary.
1111 AudioStreamBasicDescription description;
1112 dataSize = sizeof( AudioStreamBasicDescription );
1113 property.mSelector = kAudioStreamPropertyVirtualFormat;
1114 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1115 if ( result != noErr ) {
1116 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1117 errorText_ = errorStream_.str();
1118 return FAILURE;
1119 }
1120
1121 // Set the sample rate and data format id. However, only make the
1122 // change if the sample rate is not within 1.0 of the desired
1123 // rate and the format is not linear pcm.
1124 bool updateFormat = false;
1125 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1126 description.mSampleRate = (Float64) sampleRate;
1127 updateFormat = true;
1128 }
1129
1130 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1131 description.mFormatID = kAudioFormatLinearPCM;
1132 updateFormat = true;
1133 }
1134
1135 if ( updateFormat ) {
1136 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1137 if ( result != noErr ) {
1138 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1139 errorText_ = errorStream_.str();
1140 return FAILURE;
1141 }
1142 }
1143
1144 // Now check the physical format.
1145 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1146 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1147 if ( result != noErr ) {
1148 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1149 errorText_ = errorStream_.str();
1150 return FAILURE;
1151 }
1152
1153 //std::cout << "Current physical stream format:" << std::endl;
1154 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1155 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1156 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1157 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1158
1159 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1160 description.mFormatID = kAudioFormatLinearPCM;
1161 //description.mSampleRate = (Float64) sampleRate;
1162 AudioStreamBasicDescription testDescription = description;
1163 UInt32 formatFlags;
1164
1165 // We'll try higher bit rates first and then work our way down.
1166 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1167 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1168 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1169 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1170 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1171 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1172 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1173 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1174 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1175 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1176 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1177 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1178 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1179
1180 bool setPhysicalFormat = false;
1181 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1182 testDescription = description;
1183 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1184 testDescription.mFormatFlags = physicalFormats[i].second;
1185 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1186 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1187 else
1188 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1189 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1190 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1191 if ( result == noErr ) {
1192 setPhysicalFormat = true;
1193 //std::cout << "Updated physical stream format:" << std::endl;
1194 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1195 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1196 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1197 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1198 break;
1199 }
1200 }
1201
1202 if ( !setPhysicalFormat ) {
1203 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1204 errorText_ = errorStream_.str();
1205 return FAILURE;
1206 }
1207 } // done setting virtual/physical formats.
1208
1209 // Get the stream / device latency.
1210 UInt32 latency;
1211 dataSize = sizeof( UInt32 );
1212 property.mSelector = kAudioDevicePropertyLatency;
1213 if ( AudioObjectHasProperty( id, &property ) == true ) {
1214 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1215 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1216 else {
1217 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1218 errorText_ = errorStream_.str();
1219 error( RtAudioError::WARNING );
1220 }
1221 }
1222
1223 // Byte-swapping: According to AudioHardware.h, the stream data will
1224 // always be presented in native-endian format, so we should never
1225 // need to byte swap.
1226 stream_.doByteSwap[mode] = false;
1227
1228 // From the CoreAudio documentation, PCM data must be supplied as
1229 // 32-bit floats.
1230 stream_.userFormat = format;
1231 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1232
1233 if ( streamCount == 1 )
1234 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1235 else // multiple streams
1236 stream_.nDeviceChannels[mode] = channels;
1237 stream_.nUserChannels[mode] = channels;
1238 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1239 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1240 else stream_.userInterleaved = true;
1241 stream_.deviceInterleaved[mode] = true;
1242 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1243
1244 // Set flags for buffer conversion.
1245 stream_.doConvertBuffer[mode] = false;
1246 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1247 stream_.doConvertBuffer[mode] = true;
1248 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1249 stream_.doConvertBuffer[mode] = true;
1250 if ( streamCount == 1 ) {
1251 if ( stream_.nUserChannels[mode] > 1 &&
1252 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1253 stream_.doConvertBuffer[mode] = true;
1254 }
1255 else if ( monoMode && stream_.userInterleaved )
1256 stream_.doConvertBuffer[mode] = true;
1257
1258 // Allocate our CoreHandle structure for the stream.
1259 CoreHandle *handle = 0;
1260 if ( stream_.apiHandle == 0 ) {
1261 try {
1262 handle = new CoreHandle;
1263 }
1264 catch ( std::bad_alloc& ) {
1265 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1266 goto error;
1267 }
1268
1269 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1270 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1271 goto error;
1272 }
1273 stream_.apiHandle = (void *) handle;
1274 }
1275 else
1276 handle = (CoreHandle *) stream_.apiHandle;
1277 handle->iStream[mode] = firstStream;
1278 handle->nStreams[mode] = streamCount;
1279 handle->id[mode] = id;
1280
1281 // Allocate necessary internal buffers.
1282 unsigned long bufferBytes;
1283 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1284 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1285 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1286 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1287 if ( stream_.userBuffer[mode] == NULL ) {
1288 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1289 goto error;
1290 }
1291
1292 // If possible, we will make use of the CoreAudio stream buffers as
1293 // "device buffers". However, we can't do this if using multiple
1294 // streams.
1295 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1296
1297 bool makeBuffer = true;
1298 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1299 if ( mode == INPUT ) {
1300 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1301 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1302 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1303 }
1304 }
1305
1306 if ( makeBuffer ) {
1307 bufferBytes *= *bufferSize;
1308 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1309 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1310 if ( stream_.deviceBuffer == NULL ) {
1311 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1312 goto error;
1313 }
1314 }
1315 }
1316
1317 stream_.sampleRate = sampleRate;
1318 stream_.device[mode] = device;
1319 stream_.state = STREAM_STOPPED;
1320 stream_.callbackInfo.object = (void *) this;
1321
1322 // Setup the buffer conversion information structure.
1323 if ( stream_.doConvertBuffer[mode] ) {
1324 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1325 else setConvertInfo( mode, channelOffset );
1326 }
1327
1328 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1329 // Only one callback procedure per device.
1330 stream_.mode = DUPLEX;
1331 else {
1332 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1333 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1334 #else
1335 // deprecated in favor of AudioDeviceCreateIOProcID()
1336 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1337 #endif
1338 if ( result != noErr ) {
1339 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1340 errorText_ = errorStream_.str();
1341 goto error;
1342 }
1343 if ( stream_.mode == OUTPUT && mode == INPUT )
1344 stream_.mode = DUPLEX;
1345 else
1346 stream_.mode = mode;
1347 }
1348
1349 // Setup the device property listener for over/underload.
1350 property.mSelector = kAudioDeviceProcessorOverload;
1351 property.mScope = kAudioObjectPropertyScopeGlobal;
1352 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1353
1354 return SUCCESS;
1355
1356 error:
1357 if ( handle ) {
1358 pthread_cond_destroy( &handle->condition );
1359 delete handle;
1360 stream_.apiHandle = 0;
1361 }
1362
1363 for ( int i=0; i<2; i++ ) {
1364 if ( stream_.userBuffer[i] ) {
1365 free( stream_.userBuffer[i] );
1366 stream_.userBuffer[i] = 0;
1367 }
1368 }
1369
1370 if ( stream_.deviceBuffer ) {
1371 free( stream_.deviceBuffer );
1372 stream_.deviceBuffer = 0;
1373 }
1374
1375 stream_.state = STREAM_CLOSED;
1376 return FAILURE;
1377 }
1378
1379 void RtApiCore :: closeStream( void )
1380 {
1381 if ( stream_.state == STREAM_CLOSED ) {
1382 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1383 error( RtAudioError::WARNING );
1384 return;
1385 }
1386
1387 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1388 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1389 if ( stream_.state == STREAM_RUNNING )
1390 AudioDeviceStop( handle->id[0], callbackHandler );
1391 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1392 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1393 #else
1394 // deprecated in favor of AudioDeviceDestroyIOProcID()
1395 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1396 #endif
1397 }
1398
1399 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1400 if ( stream_.state == STREAM_RUNNING )
1401 AudioDeviceStop( handle->id[1], callbackHandler );
1402 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1403 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1404 #else
1405 // deprecated in favor of AudioDeviceDestroyIOProcID()
1406 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1407 #endif
1408 }
1409
1410 for ( int i=0; i<2; i++ ) {
1411 if ( stream_.userBuffer[i] ) {
1412 free( stream_.userBuffer[i] );
1413 stream_.userBuffer[i] = 0;
1414 }
1415 }
1416
1417 if ( stream_.deviceBuffer ) {
1418 free( stream_.deviceBuffer );
1419 stream_.deviceBuffer = 0;
1420 }
1421
1422 // Destroy pthread condition variable.
1423 pthread_cond_destroy( &handle->condition );
1424 delete handle;
1425 stream_.apiHandle = 0;
1426
1427 stream_.mode = UNINITIALIZED;
1428 stream_.state = STREAM_CLOSED;
1429 }
1430
1431 void RtApiCore :: startStream( void )
1432 {
1433 verifyStream();
1434 if ( stream_.state == STREAM_RUNNING ) {
1435 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1436 error( RtAudioError::WARNING );
1437 return;
1438 }
1439
1440 OSStatus result = noErr;
1441 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1442 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1443
1444 result = AudioDeviceStart( handle->id[0], callbackHandler );
1445 if ( result != noErr ) {
1446 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1447 errorText_ = errorStream_.str();
1448 goto unlock;
1449 }
1450 }
1451
1452 if ( stream_.mode == INPUT ||
1453 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1454
1455 result = AudioDeviceStart( handle->id[1], callbackHandler );
1456 if ( result != noErr ) {
1457 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1458 errorText_ = errorStream_.str();
1459 goto unlock;
1460 }
1461 }
1462
1463 handle->drainCounter = 0;
1464 handle->internalDrain = false;
1465 stream_.state = STREAM_RUNNING;
1466
1467 unlock:
1468 if ( result == noErr ) return;
1469 error( RtAudioError::SYSTEM_ERROR );
1470 }
1471
1472 void RtApiCore :: stopStream( void )
1473 {
1474 verifyStream();
1475 if ( stream_.state == STREAM_STOPPED ) {
1476 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1477 error( RtAudioError::WARNING );
1478 return;
1479 }
1480
1481 OSStatus result = noErr;
1482 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1483 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1484
1485 if ( handle->drainCounter == 0 ) {
1486 handle->drainCounter = 2;
1487 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1488 }
1489
1490 result = AudioDeviceStop( handle->id[0], callbackHandler );
1491 if ( result != noErr ) {
1492 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1493 errorText_ = errorStream_.str();
1494 goto unlock;
1495 }
1496 }
1497
1498 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1499
1500 result = AudioDeviceStop( handle->id[1], callbackHandler );
1501 if ( result != noErr ) {
1502 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1503 errorText_ = errorStream_.str();
1504 goto unlock;
1505 }
1506 }
1507
1508 stream_.state = STREAM_STOPPED;
1509
1510 unlock:
1511 if ( result == noErr ) return;
1512 error( RtAudioError::SYSTEM_ERROR );
1513 }
1514
1515 void RtApiCore :: abortStream( void )
1516 {
1517 verifyStream();
1518 if ( stream_.state == STREAM_STOPPED ) {
1519 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1520 error( RtAudioError::WARNING );
1521 return;
1522 }
1523
1524 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1525 handle->drainCounter = 2;
1526
1527 stopStream();
1528 }
1529
1530 // This function will be called by a spawned thread when the user
1531 // callback function signals that the stream should be stopped or
1532 // aborted. It is better to handle it this way because the
1533 // callbackEvent() function probably should return before the AudioDeviceStop()
1534 // function is called.
1535 static void *coreStopStream( void *ptr )
1536 {
1537 CallbackInfo *info = (CallbackInfo *) ptr;
1538 RtApiCore *object = (RtApiCore *) info->object;
1539
1540 object->stopStream();
1541 pthread_exit( NULL );
1542 }
1543
1544 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1545 const AudioBufferList *inBufferList,
1546 const AudioBufferList *outBufferList )
1547 {
1548 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1549 if ( stream_.state == STREAM_CLOSED ) {
1550 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1551 error( RtAudioError::WARNING );
1552 return FAILURE;
1553 }
1554
1555 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1556 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1557
1558 // Check if we were draining the stream and signal is finished.
1559 if ( handle->drainCounter > 3 ) {
1560 ThreadHandle threadId;
1561
1562 stream_.state = STREAM_STOPPING;
1563 if ( handle->internalDrain == true )
1564 pthread_create( &threadId, NULL, coreStopStream, info );
1565 else // external call to stopStream()
1566 pthread_cond_signal( &handle->condition );
1567 return SUCCESS;
1568 }
1569
1570 AudioDeviceID outputDevice = handle->id[0];
1571
1572 // Invoke user callback to get fresh output data UNLESS we are
1573 // draining stream or duplex mode AND the input/output devices are
1574 // different AND this function is called for the input device.
1575 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1576 RtAudioCallback callback = (RtAudioCallback) info->callback;
1577 double streamTime = getStreamTime();
1578 RtAudioStreamStatus status = 0;
1579 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1580 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1581 handle->xrun[0] = false;
1582 }
1583 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1584 status |= RTAUDIO_INPUT_OVERFLOW;
1585 handle->xrun[1] = false;
1586 }
1587
1588 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1589 stream_.bufferSize, streamTime, status, info->userData );
1590 if ( cbReturnValue == 2 ) {
1591 stream_.state = STREAM_STOPPING;
1592 handle->drainCounter = 2;
1593 abortStream();
1594 return SUCCESS;
1595 }
1596 else if ( cbReturnValue == 1 ) {
1597 handle->drainCounter = 1;
1598 handle->internalDrain = true;
1599 }
1600 }
1601
1602 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1603
1604 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1605
1606 if ( handle->nStreams[0] == 1 ) {
1607 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1608 0,
1609 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1610 }
1611 else { // fill multiple streams with zeros
1612 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1613 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1614 0,
1615 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1616 }
1617 }
1618 }
1619 else if ( handle->nStreams[0] == 1 ) {
1620 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1621 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1622 stream_.userBuffer[0], stream_.convertInfo[0] );
1623 }
1624 else { // copy from user buffer
1625 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1626 stream_.userBuffer[0],
1627 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1628 }
1629 }
1630 else { // fill multiple streams
1631 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1632 if ( stream_.doConvertBuffer[0] ) {
1633 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1634 inBuffer = (Float32 *) stream_.deviceBuffer;
1635 }
1636
1637 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1638 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1639 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1640 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1641 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1642 }
1643 }
1644 else { // fill multiple multi-channel streams with interleaved data
1645 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1646 Float32 *out, *in;
1647
1648 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1649 UInt32 inChannels = stream_.nUserChannels[0];
1650 if ( stream_.doConvertBuffer[0] ) {
1651 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1652 inChannels = stream_.nDeviceChannels[0];
1653 }
1654
1655 if ( inInterleaved ) inOffset = 1;
1656 else inOffset = stream_.bufferSize;
1657
1658 channelsLeft = inChannels;
1659 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1660 in = inBuffer;
1661 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1662 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1663
1664 outJump = 0;
1665 // Account for possible channel offset in first stream
1666 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1667 streamChannels -= stream_.channelOffset[0];
1668 outJump = stream_.channelOffset[0];
1669 out += outJump;
1670 }
1671
1672 // Account for possible unfilled channels at end of the last stream
1673 if ( streamChannels > channelsLeft ) {
1674 outJump = streamChannels - channelsLeft;
1675 streamChannels = channelsLeft;
1676 }
1677
1678 // Determine input buffer offsets and skips
1679 if ( inInterleaved ) {
1680 inJump = inChannels;
1681 in += inChannels - channelsLeft;
1682 }
1683 else {
1684 inJump = 1;
1685 in += (inChannels - channelsLeft) * inOffset;
1686 }
1687
1688 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1689 for ( unsigned int j=0; j<streamChannels; j++ ) {
1690 *out++ = in[j*inOffset];
1691 }
1692 out += outJump;
1693 in += inJump;
1694 }
1695 channelsLeft -= streamChannels;
1696 }
1697 }
1698 }
1699 }
1700
1701 // Don't bother draining input
1702 if ( handle->drainCounter ) {
1703 handle->drainCounter++;
1704 goto unlock;
1705 }
1706
1707 AudioDeviceID inputDevice;
1708 inputDevice = handle->id[1];
1709 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1710
1711 if ( handle->nStreams[1] == 1 ) {
1712 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1713 convertBuffer( stream_.userBuffer[1],
1714 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1715 stream_.convertInfo[1] );
1716 }
1717 else { // copy to user buffer
1718 memcpy( stream_.userBuffer[1],
1719 inBufferList->mBuffers[handle->iStream[1]].mData,
1720 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1721 }
1722 }
1723 else { // read from multiple streams
1724 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1725 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1726
1727 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1728 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1729 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1730 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1731 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1732 }
1733 }
1734 else { // read from multiple multi-channel streams
1735 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1736 Float32 *out, *in;
1737
1738 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1739 UInt32 outChannels = stream_.nUserChannels[1];
1740 if ( stream_.doConvertBuffer[1] ) {
1741 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1742 outChannels = stream_.nDeviceChannels[1];
1743 }
1744
1745 if ( outInterleaved ) outOffset = 1;
1746 else outOffset = stream_.bufferSize;
1747
1748 channelsLeft = outChannels;
1749 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1750 out = outBuffer;
1751 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1752 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1753
1754 inJump = 0;
1755 // Account for possible channel offset in first stream
1756 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1757 streamChannels -= stream_.channelOffset[1];
1758 inJump = stream_.channelOffset[1];
1759 in += inJump;
1760 }
1761
1762 // Account for possible unread channels at end of the last stream
1763 if ( streamChannels > channelsLeft ) {
1764 inJump = streamChannels - channelsLeft;
1765 streamChannels = channelsLeft;
1766 }
1767
1768 // Determine output buffer offsets and skips
1769 if ( outInterleaved ) {
1770 outJump = outChannels;
1771 out += outChannels - channelsLeft;
1772 }
1773 else {
1774 outJump = 1;
1775 out += (outChannels - channelsLeft) * outOffset;
1776 }
1777
1778 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1779 for ( unsigned int j=0; j<streamChannels; j++ ) {
1780 out[j*outOffset] = *in++;
1781 }
1782 out += outJump;
1783 in += inJump;
1784 }
1785 channelsLeft -= streamChannels;
1786 }
1787 }
1788
1789 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1790 convertBuffer( stream_.userBuffer[1],
1791 stream_.deviceBuffer,
1792 stream_.convertInfo[1] );
1793 }
1794 }
1795 }
1796
1797 unlock:
1798 //MUTEX_UNLOCK( &stream_.mutex );
1799
1800 RtApi::tickStreamTime();
1801 return SUCCESS;
1802 }
1803
1804 const char* RtApiCore :: getErrorCode( OSStatus code )
1805 {
1806 switch( code ) {
1807
1808 case kAudioHardwareNotRunningError:
1809 return "kAudioHardwareNotRunningError";
1810
1811 case kAudioHardwareUnspecifiedError:
1812 return "kAudioHardwareUnspecifiedError";
1813
1814 case kAudioHardwareUnknownPropertyError:
1815 return "kAudioHardwareUnknownPropertyError";
1816
1817 case kAudioHardwareBadPropertySizeError:
1818 return "kAudioHardwareBadPropertySizeError";
1819
1820 case kAudioHardwareIllegalOperationError:
1821 return "kAudioHardwareIllegalOperationError";
1822
1823 case kAudioHardwareBadObjectError:
1824 return "kAudioHardwareBadObjectError";
1825
1826 case kAudioHardwareBadDeviceError:
1827 return "kAudioHardwareBadDeviceError";
1828
1829 case kAudioHardwareBadStreamError:
1830 return "kAudioHardwareBadStreamError";
1831
1832 case kAudioHardwareUnsupportedOperationError:
1833 return "kAudioHardwareUnsupportedOperationError";
1834
1835 case kAudioDeviceUnsupportedFormatError:
1836 return "kAudioDeviceUnsupportedFormatError";
1837
1838 case kAudioDevicePermissionsError:
1839 return "kAudioDevicePermissionsError";
1840
1841 default:
1842 return "CoreAudio unknown error";
1843 }
1844 }
1845
1846 //******************** End of __MACOSX_CORE__ *********************//
1847 #endif
1848
1849 #if defined(__UNIX_JACK__)
1850
1851 // JACK is a low-latency audio server, originally written for the
1852 // GNU/Linux operating system and now also ported to OS-X. It can
1853 // connect a number of different applications to an audio device, as
1854 // well as allowing them to share audio between themselves.
1855 //
1856 // When using JACK with RtAudio, "devices" refer to JACK clients that
1857 // have ports connected to the server. The JACK server is typically
1858 // started in a terminal as follows:
1859 //
1860 // .jackd -d alsa -d hw:0
1861 //
1862 // or through an interface program such as qjackctl. Many of the
1863 // parameters normally set for a stream are fixed by the JACK server
1864 // and can be specified when the JACK server is started. In
1865 // particular,
1866 //
1867 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1868 //
1869 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1870 // frames, and number of buffers = 4. Once the server is running, it
1871 // is not possible to override these values. If the values are not
1872 // specified in the command-line, the JACK server uses default values.
1873 //
1874 // The JACK server does not have to be running when an instance of
1875 // RtApiJack is created, though the function getDeviceCount() will
1876 // report 0 devices found until JACK has been started. When no
1877 // devices are available (i.e., the JACK server is not running), a
1878 // stream cannot be opened.
1879
1880 #include <jack/jack.h>
1881 #include <unistd.h>
1882 #include <cstdio>
1883
1884 // A structure to hold various information related to the Jack API
1885 // implementation.
1886 struct JackHandle {
1887 jack_client_t *client;
1888 jack_port_t **ports[2];
1889 std::string deviceName[2];
1890 bool xrun[2];
1891 pthread_cond_t condition;
1892 int drainCounter; // Tracks callback counts when draining
1893 bool internalDrain; // Indicates if stop is initiated from callback or not.
1894
1895 JackHandle()
1896 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1897 };
1898
1899 static void jackSilentError( const char * ) {};
1900
1901 RtApiJack :: RtApiJack()
1902 {
1903 // Nothing to do here.
1904 #if !defined(__RTAUDIO_DEBUG__)
1905 // Turn off Jack's internal error reporting.
1906 jack_set_error_function( &jackSilentError );
1907 #endif
1908 }
1909
1910 RtApiJack :: ~RtApiJack()
1911 {
1912 if ( stream_.state != STREAM_CLOSED ) closeStream();
1913 }
1914
1915 unsigned int RtApiJack :: getDeviceCount( void )
1916 {
1917 // See if we can become a jack client.
1918 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1919 jack_status_t *status = NULL;
1920 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1921 if ( client == 0 ) return 0;
1922
1923 const char **ports;
1924 std::string port, previousPort;
1925 unsigned int nChannels = 0, nDevices = 0;
1926 ports = jack_get_ports( client, NULL, NULL, 0 );
1927 if ( ports ) {
1928 // Parse the port names up to the first colon (:).
1929 size_t iColon = 0;
1930 do {
1931 port = (char *) ports[ nChannels ];
1932 iColon = port.find(":");
1933 if ( iColon != std::string::npos ) {
1934 port = port.substr( 0, iColon + 1 );
1935 if ( port != previousPort ) {
1936 nDevices++;
1937 previousPort = port;
1938 }
1939 }
1940 } while ( ports[++nChannels] );
1941 free( ports );
1942 }
1943
1944 jack_client_close( client );
1945 return nDevices;
1946 }
1947
1948 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
1949 {
1950 RtAudio::DeviceInfo info;
1951 info.probed = false;
1952
1953 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
1954 jack_status_t *status = NULL;
1955 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
1956 if ( client == 0 ) {
1957 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
1958 error( RtAudioError::WARNING );
1959 return info;
1960 }
1961
1962 const char **ports;
1963 std::string port, previousPort;
1964 unsigned int nPorts = 0, nDevices = 0;
1965 ports = jack_get_ports( client, NULL, NULL, 0 );
1966 if ( ports ) {
1967 // Parse the port names up to the first colon (:).
1968 size_t iColon = 0;
1969 do {
1970 port = (char *) ports[ nPorts ];
1971 iColon = port.find(":");
1972 if ( iColon != std::string::npos ) {
1973 port = port.substr( 0, iColon );
1974 if ( port != previousPort ) {
1975 if ( nDevices == device ) info.name = port;
1976 nDevices++;
1977 previousPort = port;
1978 }
1979 }
1980 } while ( ports[++nPorts] );
1981 free( ports );
1982 }
1983
1984 if ( device >= nDevices ) {
1985 jack_client_close( client );
1986 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
1987 error( RtAudioError::INVALID_USE );
1988 return info;
1989 }
1990
1991 // Get the current jack server sample rate.
1992 info.sampleRates.clear();
1993 info.sampleRates.push_back( jack_get_sample_rate( client ) );
1994
1995 // Count the available ports containing the client name as device
1996 // channels. Jack "input ports" equal RtAudio output channels.
1997 unsigned int nChannels = 0;
1998 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
1999 if ( ports ) {
2000 while ( ports[ nChannels ] ) nChannels++;
2001 free( ports );
2002 info.outputChannels = nChannels;
2003 }
2004
2005 // Jack "output ports" equal RtAudio input channels.
2006 nChannels = 0;
2007 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2008 if ( ports ) {
2009 while ( ports[ nChannels ] ) nChannels++;
2010 free( ports );
2011 info.inputChannels = nChannels;
2012 }
2013
2014 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2015 jack_client_close(client);
2016 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2017 error( RtAudioError::WARNING );
2018 return info;
2019 }
2020
2021 // If device opens for both playback and capture, we determine the channels.
2022 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2023 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2024
2025 // Jack always uses 32-bit floats.
2026 info.nativeFormats = RTAUDIO_FLOAT32;
2027
2028 // Jack doesn't provide default devices so we'll use the first available one.
2029 if ( device == 0 && info.outputChannels > 0 )
2030 info.isDefaultOutput = true;
2031 if ( device == 0 && info.inputChannels > 0 )
2032 info.isDefaultInput = true;
2033
2034 jack_client_close(client);
2035 info.probed = true;
2036 return info;
2037 }
2038
2039 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2040 {
2041 CallbackInfo *info = (CallbackInfo *) infoPointer;
2042
2043 RtApiJack *object = (RtApiJack *) info->object;
2044 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2045
2046 return 0;
2047 }
2048
2049 // This function will be called by a spawned thread when the Jack
2050 // server signals that it is shutting down. It is necessary to handle
2051 // it this way because the jackShutdown() function must return before
2052 // the jack_deactivate() function (in closeStream()) will return.
2053 static void *jackCloseStream( void *ptr )
2054 {
2055 CallbackInfo *info = (CallbackInfo *) ptr;
2056 RtApiJack *object = (RtApiJack *) info->object;
2057
2058 object->closeStream();
2059
2060 pthread_exit( NULL );
2061 }
2062 static void jackShutdown( void *infoPointer )
2063 {
2064 CallbackInfo *info = (CallbackInfo *) infoPointer;
2065 RtApiJack *object = (RtApiJack *) info->object;
2066
2067 // Check current stream state. If stopped, then we'll assume this
2068 // was called as a result of a call to RtApiJack::stopStream (the
2069 // deactivation of a client handle causes this function to be called).
2070 // If not, we'll assume the Jack server is shutting down or some
2071 // other problem occurred and we should close the stream.
2072 if ( object->isStreamRunning() == false ) return;
2073
2074 ThreadHandle threadId;
2075 pthread_create( &threadId, NULL, jackCloseStream, info );
2076 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2077 }
2078
2079 static int jackXrun( void *infoPointer )
2080 {
2081 JackHandle *handle = (JackHandle *) infoPointer;
2082
2083 if ( handle->ports[0] ) handle->xrun[0] = true;
2084 if ( handle->ports[1] ) handle->xrun[1] = true;
2085
2086 return 0;
2087 }
2088
2089 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2090 unsigned int firstChannel, unsigned int sampleRate,
2091 RtAudioFormat format, unsigned int *bufferSize,
2092 RtAudio::StreamOptions *options )
2093 {
2094 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2095
2096 // Look for jack server and try to become a client (only do once per stream).
2097 jack_client_t *client = 0;
2098 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2099 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2100 jack_status_t *status = NULL;
2101 if ( options && !options->streamName.empty() )
2102 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2103 else
2104 client = jack_client_open( "RtApiJack", jackoptions, status );
2105 if ( client == 0 ) {
2106 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2107 error( RtAudioError::WARNING );
2108 return FAILURE;
2109 }
2110 }
2111 else {
2112 // The handle must have been created on an earlier pass.
2113 client = handle->client;
2114 }
2115
2116 const char **ports;
2117 std::string port, previousPort, deviceName;
2118 unsigned int nPorts = 0, nDevices = 0;
2119 ports = jack_get_ports( client, NULL, NULL, 0 );
2120 if ( ports ) {
2121 // Parse the port names up to the first colon (:).
2122 size_t iColon = 0;
2123 do {
2124 port = (char *) ports[ nPorts ];
2125 iColon = port.find(":");
2126 if ( iColon != std::string::npos ) {
2127 port = port.substr( 0, iColon );
2128 if ( port != previousPort ) {
2129 if ( nDevices == device ) deviceName = port;
2130 nDevices++;
2131 previousPort = port;
2132 }
2133 }
2134 } while ( ports[++nPorts] );
2135 free( ports );
2136 }
2137
2138 if ( device >= nDevices ) {
2139 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2140 return FAILURE;
2141 }
2142
2143 // Count the available ports containing the client name as device
2144 // channels. Jack "input ports" equal RtAudio output channels.
2145 unsigned int nChannels = 0;
2146 unsigned long flag = JackPortIsInput;
2147 if ( mode == INPUT ) flag = JackPortIsOutput;
2148 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2149 if ( ports ) {
2150 while ( ports[ nChannels ] ) nChannels++;
2151 free( ports );
2152 }
2153
2154 // Compare the jack ports for specified client to the requested number of channels.
2155 if ( nChannels < (channels + firstChannel) ) {
2156 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2157 errorText_ = errorStream_.str();
2158 return FAILURE;
2159 }
2160
2161 // Check the jack server sample rate.
2162 unsigned int jackRate = jack_get_sample_rate( client );
2163 if ( sampleRate != jackRate ) {
2164 jack_client_close( client );
2165 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2166 errorText_ = errorStream_.str();
2167 return FAILURE;
2168 }
2169 stream_.sampleRate = jackRate;
2170
2171 // Get the latency of the JACK port.
2172 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2173 if ( ports[ firstChannel ] ) {
2174 // Added by Ge Wang
2175 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2176 // the range (usually the min and max are equal)
2177 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2178 // get the latency range
2179 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2180 // be optimistic, use the min!
2181 stream_.latency[mode] = latrange.min;
2182 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2183 }
2184 free( ports );
2185
2186 // The jack server always uses 32-bit floating-point data.
2187 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2188 stream_.userFormat = format;
2189
2190 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2191 else stream_.userInterleaved = true;
2192
2193 // Jack always uses non-interleaved buffers.
2194 stream_.deviceInterleaved[mode] = false;
2195
2196 // Jack always provides host byte-ordered data.
2197 stream_.doByteSwap[mode] = false;
2198
2199 // Get the buffer size. The buffer size and number of buffers
2200 // (periods) is set when the jack server is started.
2201 stream_.bufferSize = (int) jack_get_buffer_size( client );
2202 *bufferSize = stream_.bufferSize;
2203
2204 stream_.nDeviceChannels[mode] = channels;
2205 stream_.nUserChannels[mode] = channels;
2206
2207 // Set flags for buffer conversion.
2208 stream_.doConvertBuffer[mode] = false;
2209 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2210 stream_.doConvertBuffer[mode] = true;
2211 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2212 stream_.nUserChannels[mode] > 1 )
2213 stream_.doConvertBuffer[mode] = true;
2214
2215 // Allocate our JackHandle structure for the stream.
2216 if ( handle == 0 ) {
2217 try {
2218 handle = new JackHandle;
2219 }
2220 catch ( std::bad_alloc& ) {
2221 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2222 goto error;
2223 }
2224
2225 if ( pthread_cond_init(&handle->condition, NULL) ) {
2226 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2227 goto error;
2228 }
2229 stream_.apiHandle = (void *) handle;
2230 handle->client = client;
2231 }
2232 handle->deviceName[mode] = deviceName;
2233
2234 // Allocate necessary internal buffers.
2235 unsigned long bufferBytes;
2236 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2237 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2238 if ( stream_.userBuffer[mode] == NULL ) {
2239 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2240 goto error;
2241 }
2242
2243 if ( stream_.doConvertBuffer[mode] ) {
2244
2245 bool makeBuffer = true;
2246 if ( mode == OUTPUT )
2247 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2248 else { // mode == INPUT
2249 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2250 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2251 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2252 if ( bufferBytes < bytesOut ) makeBuffer = false;
2253 }
2254 }
2255
2256 if ( makeBuffer ) {
2257 bufferBytes *= *bufferSize;
2258 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2259 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2260 if ( stream_.deviceBuffer == NULL ) {
2261 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2262 goto error;
2263 }
2264 }
2265 }
2266
2267 // Allocate memory for the Jack ports (channels) identifiers.
2268 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2269 if ( handle->ports[mode] == NULL ) {
2270 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2271 goto error;
2272 }
2273
2274 stream_.device[mode] = device;
2275 stream_.channelOffset[mode] = firstChannel;
2276 stream_.state = STREAM_STOPPED;
2277 stream_.callbackInfo.object = (void *) this;
2278
2279 if ( stream_.mode == OUTPUT && mode == INPUT )
2280 // We had already set up the stream for output.
2281 stream_.mode = DUPLEX;
2282 else {
2283 stream_.mode = mode;
2284 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2285 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
2286 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2287 }
2288
2289 // Register our ports.
2290 char label[64];
2291 if ( mode == OUTPUT ) {
2292 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2293 snprintf( label, 64, "outport %d", i );
2294 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2295 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2296 }
2297 }
2298 else {
2299 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2300 snprintf( label, 64, "inport %d", i );
2301 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2302 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2303 }
2304 }
2305
2306 // Setup the buffer conversion information structure. We don't use
2307 // buffers to do channel offsets, so we override that parameter
2308 // here.
2309 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2310
2311 return SUCCESS;
2312
2313 error:
2314 if ( handle ) {
2315 pthread_cond_destroy( &handle->condition );
2316 jack_client_close( handle->client );
2317
2318 if ( handle->ports[0] ) free( handle->ports[0] );
2319 if ( handle->ports[1] ) free( handle->ports[1] );
2320
2321 delete handle;
2322 stream_.apiHandle = 0;
2323 }
2324
2325 for ( int i=0; i<2; i++ ) {
2326 if ( stream_.userBuffer[i] ) {
2327 free( stream_.userBuffer[i] );
2328 stream_.userBuffer[i] = 0;
2329 }
2330 }
2331
2332 if ( stream_.deviceBuffer ) {
2333 free( stream_.deviceBuffer );
2334 stream_.deviceBuffer = 0;
2335 }
2336
2337 return FAILURE;
2338 }
2339
2340 void RtApiJack :: closeStream( void )
2341 {
2342 if ( stream_.state == STREAM_CLOSED ) {
2343 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2344 error( RtAudioError::WARNING );
2345 return;
2346 }
2347
2348 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2349 if ( handle ) {
2350
2351 if ( stream_.state == STREAM_RUNNING )
2352 jack_deactivate( handle->client );
2353
2354 jack_client_close( handle->client );
2355 }
2356
2357 if ( handle ) {
2358 if ( handle->ports[0] ) free( handle->ports[0] );
2359 if ( handle->ports[1] ) free( handle->ports[1] );
2360 pthread_cond_destroy( &handle->condition );
2361 delete handle;
2362 stream_.apiHandle = 0;
2363 }
2364
2365 for ( int i=0; i<2; i++ ) {
2366 if ( stream_.userBuffer[i] ) {
2367 free( stream_.userBuffer[i] );
2368 stream_.userBuffer[i] = 0;
2369 }
2370 }
2371
2372 if ( stream_.deviceBuffer ) {
2373 free( stream_.deviceBuffer );
2374 stream_.deviceBuffer = 0;
2375 }
2376
2377 stream_.mode = UNINITIALIZED;
2378 stream_.state = STREAM_CLOSED;
2379 }
2380
2381 void RtApiJack :: startStream( void )
2382 {
2383 verifyStream();
2384 if ( stream_.state == STREAM_RUNNING ) {
2385 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2386 error( RtAudioError::WARNING );
2387 return;
2388 }
2389
2390 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2391 int result = jack_activate( handle->client );
2392 if ( result ) {
2393 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2394 goto unlock;
2395 }
2396
2397 const char **ports;
2398
2399 // Get the list of available ports.
2400 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2401 result = 1;
2402 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2403 if ( ports == NULL) {
2404 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2405 goto unlock;
2406 }
2407
2408 // Now make the port connections. Since RtAudio wasn't designed to
2409 // allow the user to select particular channels of a device, we'll
2410 // just open the first "nChannels" ports with offset.
2411 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2412 result = 1;
2413 if ( ports[ stream_.channelOffset[0] + i ] )
2414 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2415 if ( result ) {
2416 free( ports );
2417 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2418 goto unlock;
2419 }
2420 }
2421 free(ports);
2422 }
2423
2424 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2425 result = 1;
2426 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2427 if ( ports == NULL) {
2428 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2429 goto unlock;
2430 }
2431
2432 // Now make the port connections. See note above.
2433 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2434 result = 1;
2435 if ( ports[ stream_.channelOffset[1] + i ] )
2436 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2437 if ( result ) {
2438 free( ports );
2439 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2440 goto unlock;
2441 }
2442 }
2443 free(ports);
2444 }
2445
2446 handle->drainCounter = 0;
2447 handle->internalDrain = false;
2448 stream_.state = STREAM_RUNNING;
2449
2450 unlock:
2451 if ( result == 0 ) return;
2452 error( RtAudioError::SYSTEM_ERROR );
2453 }
2454
2455 void RtApiJack :: stopStream( void )
2456 {
2457 verifyStream();
2458 if ( stream_.state == STREAM_STOPPED ) {
2459 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2460 error( RtAudioError::WARNING );
2461 return;
2462 }
2463
2464 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2465 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2466
2467 if ( handle->drainCounter == 0 ) {
2468 handle->drainCounter = 2;
2469 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2470 }
2471 }
2472
2473 jack_deactivate( handle->client );
2474 stream_.state = STREAM_STOPPED;
2475 }
2476
2477 void RtApiJack :: abortStream( void )
2478 {
2479 verifyStream();
2480 if ( stream_.state == STREAM_STOPPED ) {
2481 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2482 error( RtAudioError::WARNING );
2483 return;
2484 }
2485
2486 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2487 handle->drainCounter = 2;
2488
2489 stopStream();
2490 }
2491
2492 // This function will be called by a spawned thread when the user
2493 // callback function signals that the stream should be stopped or
2494 // aborted. It is necessary to handle it this way because the
2495 // callbackEvent() function must return before the jack_deactivate()
2496 // function will return.
2497 static void *jackStopStream( void *ptr )
2498 {
2499 CallbackInfo *info = (CallbackInfo *) ptr;
2500 RtApiJack *object = (RtApiJack *) info->object;
2501
2502 object->stopStream();
2503 pthread_exit( NULL );
2504 }
2505
2506 bool RtApiJack :: callbackEvent( unsigned long nframes )
2507 {
2508 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2509 if ( stream_.state == STREAM_CLOSED ) {
2510 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2511 error( RtAudioError::WARNING );
2512 return FAILURE;
2513 }
2514 if ( stream_.bufferSize != nframes ) {
2515 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2516 error( RtAudioError::WARNING );
2517 return FAILURE;
2518 }
2519
2520 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2521 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2522
2523 // Check if we were draining the stream and signal is finished.
2524 if ( handle->drainCounter > 3 ) {
2525 ThreadHandle threadId;
2526
2527 stream_.state = STREAM_STOPPING;
2528 if ( handle->internalDrain == true )
2529 pthread_create( &threadId, NULL, jackStopStream, info );
2530 else
2531 pthread_cond_signal( &handle->condition );
2532 return SUCCESS;
2533 }
2534
2535 // Invoke user callback first, to get fresh output data.
2536 if ( handle->drainCounter == 0 ) {
2537 RtAudioCallback callback = (RtAudioCallback) info->callback;
2538 double streamTime = getStreamTime();
2539 RtAudioStreamStatus status = 0;
2540 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2541 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2542 handle->xrun[0] = false;
2543 }
2544 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2545 status |= RTAUDIO_INPUT_OVERFLOW;
2546 handle->xrun[1] = false;
2547 }
2548 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2549 stream_.bufferSize, streamTime, status, info->userData );
2550 if ( cbReturnValue == 2 ) {
2551 stream_.state = STREAM_STOPPING;
2552 handle->drainCounter = 2;
2553 ThreadHandle id;
2554 pthread_create( &id, NULL, jackStopStream, info );
2555 return SUCCESS;
2556 }
2557 else if ( cbReturnValue == 1 ) {
2558 handle->drainCounter = 1;
2559 handle->internalDrain = true;
2560 }
2561 }
2562
2563 jack_default_audio_sample_t *jackbuffer;
2564 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2565 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2566
2567 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2568
2569 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2570 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2571 memset( jackbuffer, 0, bufferBytes );
2572 }
2573
2574 }
2575 else if ( stream_.doConvertBuffer[0] ) {
2576
2577 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2578
2579 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2580 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2581 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2582 }
2583 }
2584 else { // no buffer conversion
2585 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2586 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2587 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2588 }
2589 }
2590 }
2591
2592 // Don't bother draining input
2593 if ( handle->drainCounter ) {
2594 handle->drainCounter++;
2595 goto unlock;
2596 }
2597
2598 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2599
2600 if ( stream_.doConvertBuffer[1] ) {
2601 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2602 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2603 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2604 }
2605 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2606 }
2607 else { // no buffer conversion
2608 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2609 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2610 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2611 }
2612 }
2613 }
2614
2615 unlock:
2616 RtApi::tickStreamTime();
2617 return SUCCESS;
2618 }
2619 //******************** End of __UNIX_JACK__ *********************//
2620 #endif
2621
2622 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2623
2624 // The ASIO API is designed around a callback scheme, so this
2625 // implementation is similar to that used for OS-X CoreAudio and Linux
2626 // Jack. The primary constraint with ASIO is that it only allows
2627 // access to a single driver at a time. Thus, it is not possible to
2628 // have more than one simultaneous RtAudio stream.
2629 //
2630 // This implementation also requires a number of external ASIO files
2631 // and a few global variables. The ASIO callback scheme does not
2632 // allow for the passing of user data, so we must create a global
2633 // pointer to our callbackInfo structure.
2634 //
2635 // On unix systems, we make use of a pthread condition variable.
2636 // Since there is no equivalent in Windows, I hacked something based
2637 // on information found in
2638 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2639
2640 #include "asiosys.h"
2641 #include "asio.h"
2642 #include "iasiothiscallresolver.h"
2643 #include "asiodrivers.h"
2644 #include <cmath>
2645
2646 static AsioDrivers drivers;
2647 static ASIOCallbacks asioCallbacks;
2648 static ASIODriverInfo driverInfo;
2649 static CallbackInfo *asioCallbackInfo;
2650 static bool asioXRun;
2651
2652 struct AsioHandle {
2653 int drainCounter; // Tracks callback counts when draining
2654 bool internalDrain; // Indicates if stop is initiated from callback or not.
2655 ASIOBufferInfo *bufferInfos;
2656 HANDLE condition;
2657
2658 AsioHandle()
2659 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2660 };
2661
2662 // Function declarations (definitions at end of section)
2663 static const char* getAsioErrorString( ASIOError result );
2664 static void sampleRateChanged( ASIOSampleRate sRate );
2665 static long asioMessages( long selector, long value, void* message, double* opt );
2666
2667 RtApiAsio :: RtApiAsio()
2668 {
2669 // ASIO cannot run on a multi-threaded appartment. You can call
2670 // CoInitialize beforehand, but it must be for appartment threading
2671 // (in which case, CoInitilialize will return S_FALSE here).
2672 coInitialized_ = false;
2673 HRESULT hr = CoInitialize( NULL );
2674 if ( FAILED(hr) ) {
2675 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2676 error( RtAudioError::WARNING );
2677 }
2678 coInitialized_ = true;
2679
2680 drivers.removeCurrentDriver();
2681 driverInfo.asioVersion = 2;
2682
2683 // See note in DirectSound implementation about GetDesktopWindow().
2684 driverInfo.sysRef = GetForegroundWindow();
2685 }
2686
2687 RtApiAsio :: ~RtApiAsio()
2688 {
2689 if ( stream_.state != STREAM_CLOSED ) closeStream();
2690 if ( coInitialized_ ) CoUninitialize();
2691 }
2692
2693 unsigned int RtApiAsio :: getDeviceCount( void )
2694 {
2695 return (unsigned int) drivers.asioGetNumDev();
2696 }
2697
2698 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2699 {
2700 RtAudio::DeviceInfo info;
2701 info.probed = false;
2702
2703 // Get device ID
2704 unsigned int nDevices = getDeviceCount();
2705 if ( nDevices == 0 ) {
2706 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2707 error( RtAudioError::INVALID_USE );
2708 return info;
2709 }
2710
2711 if ( device >= nDevices ) {
2712 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2713 error( RtAudioError::INVALID_USE );
2714 return info;
2715 }
2716
2717 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2718 if ( stream_.state != STREAM_CLOSED ) {
2719 if ( device >= devices_.size() ) {
2720 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2721 error( RtAudioError::WARNING );
2722 return info;
2723 }
2724 return devices_[ device ];
2725 }
2726
2727 char driverName[32];
2728 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2729 if ( result != ASE_OK ) {
2730 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2731 errorText_ = errorStream_.str();
2732 error( RtAudioError::WARNING );
2733 return info;
2734 }
2735
2736 info.name = driverName;
2737
2738 if ( !drivers.loadDriver( driverName ) ) {
2739 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2740 errorText_ = errorStream_.str();
2741 error( RtAudioError::WARNING );
2742 return info;
2743 }
2744
2745 result = ASIOInit( &driverInfo );
2746 if ( result != ASE_OK ) {
2747 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2748 errorText_ = errorStream_.str();
2749 error( RtAudioError::WARNING );
2750 return info;
2751 }
2752
2753 // Determine the device channel information.
2754 long inputChannels, outputChannels;
2755 result = ASIOGetChannels( &inputChannels, &outputChannels );
2756 if ( result != ASE_OK ) {
2757 drivers.removeCurrentDriver();
2758 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2759 errorText_ = errorStream_.str();
2760 error( RtAudioError::WARNING );
2761 return info;
2762 }
2763
2764 info.outputChannels = outputChannels;
2765 info.inputChannels = inputChannels;
2766 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2767 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2768
2769 // Determine the supported sample rates.
2770 info.sampleRates.clear();
2771 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2772 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2773 if ( result == ASE_OK )
2774 info.sampleRates.push_back( SAMPLE_RATES[i] );
2775 }
2776
2777 // Determine supported data types ... just check first channel and assume rest are the same.
2778 ASIOChannelInfo channelInfo;
2779 channelInfo.channel = 0;
2780 channelInfo.isInput = true;
2781 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2782 result = ASIOGetChannelInfo( &channelInfo );
2783 if ( result != ASE_OK ) {
2784 drivers.removeCurrentDriver();
2785 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2786 errorText_ = errorStream_.str();
2787 error( RtAudioError::WARNING );
2788 return info;
2789 }
2790
2791 info.nativeFormats = 0;
2792 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2793 info.nativeFormats |= RTAUDIO_SINT16;
2794 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2795 info.nativeFormats |= RTAUDIO_SINT32;
2796 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2797 info.nativeFormats |= RTAUDIO_FLOAT32;
2798 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2799 info.nativeFormats |= RTAUDIO_FLOAT64;
2800 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2801 info.nativeFormats |= RTAUDIO_SINT24;
2802
2803 if ( info.outputChannels > 0 )
2804 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2805 if ( info.inputChannels > 0 )
2806 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2807
2808 info.probed = true;
2809 drivers.removeCurrentDriver();
2810 return info;
2811 }
2812
2813 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2814 {
2815 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2816 object->callbackEvent( index );
2817 }
2818
2819 void RtApiAsio :: saveDeviceInfo( void )
2820 {
2821 devices_.clear();
2822
2823 unsigned int nDevices = getDeviceCount();
2824 devices_.resize( nDevices );
2825 for ( unsigned int i=0; i<nDevices; i++ )
2826 devices_[i] = getDeviceInfo( i );
2827 }
2828
2829 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2830 unsigned int firstChannel, unsigned int sampleRate,
2831 RtAudioFormat format, unsigned int *bufferSize,
2832 RtAudio::StreamOptions *options )
2833 {
2834 // For ASIO, a duplex stream MUST use the same driver.
2835 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
2836 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2837 return FAILURE;
2838 }
2839
2840 char driverName[32];
2841 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2842 if ( result != ASE_OK ) {
2843 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2844 errorText_ = errorStream_.str();
2845 return FAILURE;
2846 }
2847
2848 // Only load the driver once for duplex stream.
2849 if ( mode != INPUT || stream_.mode != OUTPUT ) {
2850 // The getDeviceInfo() function will not work when a stream is open
2851 // because ASIO does not allow multiple devices to run at the same
2852 // time. Thus, we'll probe the system before opening a stream and
2853 // save the results for use by getDeviceInfo().
2854 this->saveDeviceInfo();
2855
2856 if ( !drivers.loadDriver( driverName ) ) {
2857 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2858 errorText_ = errorStream_.str();
2859 return FAILURE;
2860 }
2861
2862 result = ASIOInit( &driverInfo );
2863 if ( result != ASE_OK ) {
2864 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2865 errorText_ = errorStream_.str();
2866 return FAILURE;
2867 }
2868 }
2869
2870 // Check the device channel count.
2871 long inputChannels, outputChannels;
2872 result = ASIOGetChannels( &inputChannels, &outputChannels );
2873 if ( result != ASE_OK ) {
2874 drivers.removeCurrentDriver();
2875 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2876 errorText_ = errorStream_.str();
2877 return FAILURE;
2878 }
2879
2880 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2881 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2882 drivers.removeCurrentDriver();
2883 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2884 errorText_ = errorStream_.str();
2885 return FAILURE;
2886 }
2887 stream_.nDeviceChannels[mode] = channels;
2888 stream_.nUserChannels[mode] = channels;
2889 stream_.channelOffset[mode] = firstChannel;
2890
2891 // Verify the sample rate is supported.
2892 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2893 if ( result != ASE_OK ) {
2894 drivers.removeCurrentDriver();
2895 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2896 errorText_ = errorStream_.str();
2897 return FAILURE;
2898 }
2899
2900 // Get the current sample rate
2901 ASIOSampleRate currentRate;
2902 result = ASIOGetSampleRate( &currentRate );
2903 if ( result != ASE_OK ) {
2904 drivers.removeCurrentDriver();
2905 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2906 errorText_ = errorStream_.str();
2907 return FAILURE;
2908 }
2909
2910 // Set the sample rate only if necessary
2911 if ( currentRate != sampleRate ) {
2912 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2913 if ( result != ASE_OK ) {
2914 drivers.removeCurrentDriver();
2915 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2916 errorText_ = errorStream_.str();
2917 return FAILURE;
2918 }
2919 }
2920
2921 // Determine the driver data type.
2922 ASIOChannelInfo channelInfo;
2923 channelInfo.channel = 0;
2924 if ( mode == OUTPUT ) channelInfo.isInput = false;
2925 else channelInfo.isInput = true;
2926 result = ASIOGetChannelInfo( &channelInfo );
2927 if ( result != ASE_OK ) {
2928 drivers.removeCurrentDriver();
2929 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2930 errorText_ = errorStream_.str();
2931 return FAILURE;
2932 }
2933
2934 // Assuming WINDOWS host is always little-endian.
2935 stream_.doByteSwap[mode] = false;
2936 stream_.userFormat = format;
2937 stream_.deviceFormat[mode] = 0;
2938 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
2939 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
2940 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
2941 }
2942 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
2943 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
2944 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
2945 }
2946 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
2947 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2948 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
2949 }
2950 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
2951 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
2952 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
2953 }
2954 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
2955 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
2956 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
2957 }
2958
2959 if ( stream_.deviceFormat[mode] == 0 ) {
2960 drivers.removeCurrentDriver();
2961 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
2962 errorText_ = errorStream_.str();
2963 return FAILURE;
2964 }
2965
2966 // Set the buffer size. For a duplex stream, this will end up
2967 // setting the buffer size based on the input constraints, which
2968 // should be ok.
2969 long minSize, maxSize, preferSize, granularity;
2970 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
2971 if ( result != ASE_OK ) {
2972 drivers.removeCurrentDriver();
2973 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
2974 errorText_ = errorStream_.str();
2975 return FAILURE;
2976 }
2977
2978 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
2979 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
2980 else if ( granularity == -1 ) {
2981 // Make sure bufferSize is a power of two.
2982 int log2_of_min_size = 0;
2983 int log2_of_max_size = 0;
2984
2985 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
2986 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
2987 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
2988 }
2989
2990 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
2991 int min_delta_num = log2_of_min_size;
2992
2993 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
2994 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
2995 if (current_delta < min_delta) {
2996 min_delta = current_delta;
2997 min_delta_num = i;
2998 }
2999 }
3000
3001 *bufferSize = ( (unsigned int)1 << min_delta_num );
3002 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3003 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3004 }
3005 else if ( granularity != 0 ) {
3006 // Set to an even multiple of granularity, rounding up.
3007 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3008 }
3009
3010 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
3011 drivers.removeCurrentDriver();
3012 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3013 return FAILURE;
3014 }
3015
3016 stream_.bufferSize = *bufferSize;
3017 stream_.nBuffers = 2;
3018
3019 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3020 else stream_.userInterleaved = true;
3021
3022 // ASIO always uses non-interleaved buffers.
3023 stream_.deviceInterleaved[mode] = false;
3024
3025 // Allocate, if necessary, our AsioHandle structure for the stream.
3026 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3027 if ( handle == 0 ) {
3028 try {
3029 handle = new AsioHandle;
3030 }
3031 catch ( std::bad_alloc& ) {
3032 //if ( handle == NULL ) {
3033 drivers.removeCurrentDriver();
3034 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3035 return FAILURE;
3036 }
3037 handle->bufferInfos = 0;
3038
3039 // Create a manual-reset event.
3040 handle->condition = CreateEvent( NULL, // no security
3041 TRUE, // manual-reset
3042 FALSE, // non-signaled initially
3043 NULL ); // unnamed
3044 stream_.apiHandle = (void *) handle;
3045 }
3046
3047 // Create the ASIO internal buffers. Since RtAudio sets up input
3048 // and output separately, we'll have to dispose of previously
3049 // created output buffers for a duplex stream.
3050 long inputLatency, outputLatency;
3051 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3052 ASIODisposeBuffers();
3053 if ( handle->bufferInfos ) free( handle->bufferInfos );
3054 }
3055
3056 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3057 bool buffersAllocated = false;
3058 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3059 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3060 if ( handle->bufferInfos == NULL ) {
3061 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3062 errorText_ = errorStream_.str();
3063 goto error;
3064 }
3065
3066 ASIOBufferInfo *infos;
3067 infos = handle->bufferInfos;
3068 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3069 infos->isInput = ASIOFalse;
3070 infos->channelNum = i + stream_.channelOffset[0];
3071 infos->buffers[0] = infos->buffers[1] = 0;
3072 }
3073 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3074 infos->isInput = ASIOTrue;
3075 infos->channelNum = i + stream_.channelOffset[1];
3076 infos->buffers[0] = infos->buffers[1] = 0;
3077 }
3078
3079 // Set up the ASIO callback structure and create the ASIO data buffers.
3080 asioCallbacks.bufferSwitch = &bufferSwitch;
3081 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3082 asioCallbacks.asioMessage = &asioMessages;
3083 asioCallbacks.bufferSwitchTimeInfo = NULL;
3084 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3085 if ( result != ASE_OK ) {
3086 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3087 errorText_ = errorStream_.str();
3088 goto error;
3089 }
3090 buffersAllocated = true;
3091
3092 // Set flags for buffer conversion.
3093 stream_.doConvertBuffer[mode] = false;
3094 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3095 stream_.doConvertBuffer[mode] = true;
3096 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3097 stream_.nUserChannels[mode] > 1 )
3098 stream_.doConvertBuffer[mode] = true;
3099
3100 // Allocate necessary internal buffers
3101 unsigned long bufferBytes;
3102 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3103 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3104 if ( stream_.userBuffer[mode] == NULL ) {
3105 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3106 goto error;
3107 }
3108
3109 if ( stream_.doConvertBuffer[mode] ) {
3110
3111 bool makeBuffer = true;
3112 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3113 if ( mode == INPUT ) {
3114 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
3115 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3116 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3117 }
3118 }
3119
3120 if ( makeBuffer ) {
3121 bufferBytes *= *bufferSize;
3122 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3123 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3124 if ( stream_.deviceBuffer == NULL ) {
3125 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3126 goto error;
3127 }
3128 }
3129 }
3130
3131 stream_.sampleRate = sampleRate;
3132 stream_.device[mode] = device;
3133 stream_.state = STREAM_STOPPED;
3134 asioCallbackInfo = &stream_.callbackInfo;
3135 stream_.callbackInfo.object = (void *) this;
3136 if ( stream_.mode == OUTPUT && mode == INPUT )
3137 // We had already set up an output stream.
3138 stream_.mode = DUPLEX;
3139 else
3140 stream_.mode = mode;
3141
3142 // Determine device latencies
3143 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3144 if ( result != ASE_OK ) {
3145 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3146 errorText_ = errorStream_.str();
3147 error( RtAudioError::WARNING); // warn but don't fail
3148 }
3149 else {
3150 stream_.latency[0] = outputLatency;
3151 stream_.latency[1] = inputLatency;
3152 }
3153
3154 // Setup the buffer conversion information structure. We don't use
3155 // buffers to do channel offsets, so we override that parameter
3156 // here.
3157 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3158
3159 return SUCCESS;
3160
3161 error:
3162 if ( buffersAllocated )
3163 ASIODisposeBuffers();
3164 drivers.removeCurrentDriver();
3165
3166 if ( handle ) {
3167 CloseHandle( handle->condition );
3168 if ( handle->bufferInfos )
3169 free( handle->bufferInfos );
3170 delete handle;
3171 stream_.apiHandle = 0;
3172 }
3173
3174 for ( int i=0; i<2; i++ ) {
3175 if ( stream_.userBuffer[i] ) {
3176 free( stream_.userBuffer[i] );
3177 stream_.userBuffer[i] = 0;
3178 }
3179 }
3180
3181 if ( stream_.deviceBuffer ) {
3182 free( stream_.deviceBuffer );
3183 stream_.deviceBuffer = 0;
3184 }
3185
3186 return FAILURE;
3187 }
3188
3189 void RtApiAsio :: closeStream()
3190 {
3191 if ( stream_.state == STREAM_CLOSED ) {
3192 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3193 error( RtAudioError::WARNING );
3194 return;
3195 }
3196
3197 if ( stream_.state == STREAM_RUNNING ) {
3198 stream_.state = STREAM_STOPPED;
3199 ASIOStop();
3200 }
3201 ASIODisposeBuffers();
3202 drivers.removeCurrentDriver();
3203
3204 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3205 if ( handle ) {
3206 CloseHandle( handle->condition );
3207 if ( handle->bufferInfos )
3208 free( handle->bufferInfos );
3209 delete handle;
3210 stream_.apiHandle = 0;
3211 }
3212
3213 for ( int i=0; i<2; i++ ) {
3214 if ( stream_.userBuffer[i] ) {
3215 free( stream_.userBuffer[i] );
3216 stream_.userBuffer[i] = 0;
3217 }
3218 }
3219
3220 if ( stream_.deviceBuffer ) {
3221 free( stream_.deviceBuffer );
3222 stream_.deviceBuffer = 0;
3223 }
3224
3225 stream_.mode = UNINITIALIZED;
3226 stream_.state = STREAM_CLOSED;
3227 }
3228
3229 bool stopThreadCalled = false;
3230
3231 void RtApiAsio :: startStream()
3232 {
3233 verifyStream();
3234 if ( stream_.state == STREAM_RUNNING ) {
3235 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3236 error( RtAudioError::WARNING );
3237 return;
3238 }
3239
3240 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3241 ASIOError result = ASIOStart();
3242 if ( result != ASE_OK ) {
3243 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3244 errorText_ = errorStream_.str();
3245 goto unlock;
3246 }
3247
3248 handle->drainCounter = 0;
3249 handle->internalDrain = false;
3250 ResetEvent( handle->condition );
3251 stream_.state = STREAM_RUNNING;
3252 asioXRun = false;
3253
3254 unlock:
3255 stopThreadCalled = false;
3256
3257 if ( result == ASE_OK ) return;
3258 error( RtAudioError::SYSTEM_ERROR );
3259 }
3260
3261 void RtApiAsio :: stopStream()
3262 {
3263 verifyStream();
3264 if ( stream_.state == STREAM_STOPPED ) {
3265 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3266 error( RtAudioError::WARNING );
3267 return;
3268 }
3269
3270 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3271 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3272 if ( handle->drainCounter == 0 ) {
3273 handle->drainCounter = 2;
3274 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3275 }
3276 }
3277
3278 stream_.state = STREAM_STOPPED;
3279
3280 ASIOError result = ASIOStop();
3281 if ( result != ASE_OK ) {
3282 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3283 errorText_ = errorStream_.str();
3284 }
3285
3286 if ( result == ASE_OK ) return;
3287 error( RtAudioError::SYSTEM_ERROR );
3288 }
3289
3290 void RtApiAsio :: abortStream()
3291 {
3292 verifyStream();
3293 if ( stream_.state == STREAM_STOPPED ) {
3294 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3295 error( RtAudioError::WARNING );
3296 return;
3297 }
3298
3299 // The following lines were commented-out because some behavior was
3300 // noted where the device buffers need to be zeroed to avoid
3301 // continuing sound, even when the device buffers are completely
3302 // disposed. So now, calling abort is the same as calling stop.
3303 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3304 // handle->drainCounter = 2;
3305 stopStream();
3306 }
3307
3308 // This function will be called by a spawned thread when the user
3309 // callback function signals that the stream should be stopped or
3310 // aborted. It is necessary to handle it this way because the
3311 // callbackEvent() function must return before the ASIOStop()
3312 // function will return.
3313 static unsigned __stdcall asioStopStream( void *ptr )
3314 {
3315 CallbackInfo *info = (CallbackInfo *) ptr;
3316 RtApiAsio *object = (RtApiAsio *) info->object;
3317
3318 object->stopStream();
3319 _endthreadex( 0 );
3320 return 0;
3321 }
3322
3323 bool RtApiAsio :: callbackEvent( long bufferIndex )
3324 {
3325 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3326 if ( stream_.state == STREAM_CLOSED ) {
3327 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3328 error( RtAudioError::WARNING );
3329 return FAILURE;
3330 }
3331
3332 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3333 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3334
3335 // Check if we were draining the stream and signal if finished.
3336 if ( handle->drainCounter > 3 ) {
3337
3338 stream_.state = STREAM_STOPPING;
3339 if ( handle->internalDrain == false )
3340 SetEvent( handle->condition );
3341 else { // spawn a thread to stop the stream
3342 unsigned threadId;
3343 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3344 &stream_.callbackInfo, 0, &threadId );
3345 }
3346 return SUCCESS;
3347 }
3348
3349 // Invoke user callback to get fresh output data UNLESS we are
3350 // draining stream.
3351 if ( handle->drainCounter == 0 ) {
3352 RtAudioCallback callback = (RtAudioCallback) info->callback;
3353 double streamTime = getStreamTime();
3354 RtAudioStreamStatus status = 0;
3355 if ( stream_.mode != INPUT && asioXRun == true ) {
3356 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3357 asioXRun = false;
3358 }
3359 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3360 status |= RTAUDIO_INPUT_OVERFLOW;
3361 asioXRun = false;
3362 }
3363 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3364 stream_.bufferSize, streamTime, status, info->userData );
3365 if ( cbReturnValue == 2 ) {
3366 stream_.state = STREAM_STOPPING;
3367 handle->drainCounter = 2;
3368 unsigned threadId;
3369 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3370 &stream_.callbackInfo, 0, &threadId );
3371 return SUCCESS;
3372 }
3373 else if ( cbReturnValue == 1 ) {
3374 handle->drainCounter = 1;
3375 handle->internalDrain = true;
3376 }
3377 }
3378
3379 unsigned int nChannels, bufferBytes, i, j;
3380 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3381 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3382
3383 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3384
3385 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3386
3387 for ( i=0, j=0; i<nChannels; i++ ) {
3388 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3389 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3390 }
3391
3392 }
3393 else if ( stream_.doConvertBuffer[0] ) {
3394
3395 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3396 if ( stream_.doByteSwap[0] )
3397 byteSwapBuffer( stream_.deviceBuffer,
3398 stream_.bufferSize * stream_.nDeviceChannels[0],
3399 stream_.deviceFormat[0] );
3400
3401 for ( i=0, j=0; i<nChannels; i++ ) {
3402 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3403 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3404 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3405 }
3406
3407 }
3408 else {
3409
3410 if ( stream_.doByteSwap[0] )
3411 byteSwapBuffer( stream_.userBuffer[0],
3412 stream_.bufferSize * stream_.nUserChannels[0],
3413 stream_.userFormat );
3414
3415 for ( i=0, j=0; i<nChannels; i++ ) {
3416 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3417 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3418 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3419 }
3420
3421 }
3422 }
3423
3424 // Don't bother draining input
3425 if ( handle->drainCounter ) {
3426 handle->drainCounter++;
3427 goto unlock;
3428 }
3429
3430 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3431
3432 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3433
3434 if (stream_.doConvertBuffer[1]) {
3435
3436 // Always interleave ASIO input data.
3437 for ( i=0, j=0; i<nChannels; i++ ) {
3438 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3439 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3440 handle->bufferInfos[i].buffers[bufferIndex],
3441 bufferBytes );
3442 }
3443
3444 if ( stream_.doByteSwap[1] )
3445 byteSwapBuffer( stream_.deviceBuffer,
3446 stream_.bufferSize * stream_.nDeviceChannels[1],
3447 stream_.deviceFormat[1] );
3448 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3449
3450 }
3451 else {
3452 for ( i=0, j=0; i<nChannels; i++ ) {
3453 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3454 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3455 handle->bufferInfos[i].buffers[bufferIndex],
3456 bufferBytes );
3457 }
3458 }
3459
3460 if ( stream_.doByteSwap[1] )
3461 byteSwapBuffer( stream_.userBuffer[1],
3462 stream_.bufferSize * stream_.nUserChannels[1],
3463 stream_.userFormat );
3464 }
3465 }
3466
3467 unlock:
3468 // The following call was suggested by Malte Clasen. While the API
3469 // documentation indicates it should not be required, some device
3470 // drivers apparently do not function correctly without it.
3471 ASIOOutputReady();
3472
3473 RtApi::tickStreamTime();
3474 return SUCCESS;
3475 }
3476
3477 static void sampleRateChanged( ASIOSampleRate sRate )
3478 {
3479 // The ASIO documentation says that this usually only happens during
3480 // external sync. Audio processing is not stopped by the driver,
3481 // actual sample rate might not have even changed, maybe only the
3482 // sample rate status of an AES/EBU or S/PDIF digital input at the
3483 // audio device.
3484
3485 RtApi *object = (RtApi *) asioCallbackInfo->object;
3486 try {
3487 object->stopStream();
3488 }
3489 catch ( RtAudioError &exception ) {
3490 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3491 return;
3492 }
3493
3494 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3495 }
3496
3497 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3498 {
3499 long ret = 0;
3500
3501 switch( selector ) {
3502 case kAsioSelectorSupported:
3503 if ( value == kAsioResetRequest
3504 || value == kAsioEngineVersion
3505 || value == kAsioResyncRequest
3506 || value == kAsioLatenciesChanged
3507 // The following three were added for ASIO 2.0, you don't
3508 // necessarily have to support them.
3509 || value == kAsioSupportsTimeInfo
3510 || value == kAsioSupportsTimeCode
3511 || value == kAsioSupportsInputMonitor)
3512 ret = 1L;
3513 break;
3514 case kAsioResetRequest:
3515 // Defer the task and perform the reset of the driver during the
3516 // next "safe" situation. You cannot reset the driver right now,
3517 // as this code is called from the driver. Reset the driver is
3518 // done by completely destruct is. I.e. ASIOStop(),
3519 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3520 // driver again.
3521 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3522 ret = 1L;
3523 break;
3524 case kAsioResyncRequest:
3525 // This informs the application that the driver encountered some
3526 // non-fatal data loss. It is used for synchronization purposes
3527 // of different media. Added mainly to work around the Win16Mutex
3528 // problems in Windows 95/98 with the Windows Multimedia system,
3529 // which could lose data because the Mutex was held too long by
3530 // another thread. However a driver can issue it in other
3531 // situations, too.
3532 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3533 asioXRun = true;
3534 ret = 1L;
3535 break;
3536 case kAsioLatenciesChanged:
3537 // This will inform the host application that the drivers were
3538 // latencies changed. Beware, it this does not mean that the
3539 // buffer sizes have changed! You might need to update internal
3540 // delay data.
3541 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3542 ret = 1L;
3543 break;
3544 case kAsioEngineVersion:
3545 // Return the supported ASIO version of the host application. If
3546 // a host application does not implement this selector, ASIO 1.0
3547 // is assumed by the driver.
3548 ret = 2L;
3549 break;
3550 case kAsioSupportsTimeInfo:
3551 // Informs the driver whether the
3552 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3553 // For compatibility with ASIO 1.0 drivers the host application
3554 // should always support the "old" bufferSwitch method, too.
3555 ret = 0;
3556 break;
3557 case kAsioSupportsTimeCode:
3558 // Informs the driver whether application is interested in time
3559 // code info. If an application does not need to know about time
3560 // code, the driver has less work to do.
3561 ret = 0;
3562 break;
3563 }
3564 return ret;
3565 }
3566
3567 static const char* getAsioErrorString( ASIOError result )
3568 {
3569 struct Messages
3570 {
3571 ASIOError value;
3572 const char*message;
3573 };
3574
3575 static const Messages m[] =
3576 {
3577 { ASE_NotPresent, "Hardware input or output is not present or available." },
3578 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3579 { ASE_InvalidParameter, "Invalid input parameter." },
3580 { ASE_InvalidMode, "Invalid mode." },
3581 { ASE_SPNotAdvancing, "Sample position not advancing." },
3582 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3583 { ASE_NoMemory, "Not enough memory to complete the request." }
3584 };
3585
3586 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3587 if ( m[i].value == result ) return m[i].message;
3588
3589 return "Unknown error.";
3590 }
3591
3592 //******************** End of __WINDOWS_ASIO__ *********************//
3593 #endif
3594
3595
3596 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3597
3598 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3599 // - Introduces support for the Windows WASAPI API
3600 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3601 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3602 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3603
3604 #ifndef INITGUID
3605 #define INITGUID
3606 #endif
3607 #include <audioclient.h>
3608 #include <avrt.h>
3609 #include <mmdeviceapi.h>
3610 #include <functiondiscoverykeys_devpkey.h>
3611
3612 //=============================================================================
3613
3614 #define SAFE_RELEASE( objectPtr )\
3615 if ( objectPtr )\
3616 {\
3617 objectPtr->Release();\
3618 objectPtr = NULL;\
3619 }
3620
3621 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3622
3623 //-----------------------------------------------------------------------------
3624
3625 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3626 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3627 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3628 // provide intermediate storage for read / write synchronization.
3629 class WasapiBuffer
3630 {
3631 public:
3632 WasapiBuffer()
3633 : buffer_( NULL ),
3634 bufferSize_( 0 ),
3635 inIndex_( 0 ),
3636 outIndex_( 0 ) {}
3637
3638 ~WasapiBuffer() {
3639 delete buffer_;
3640 }
3641
3642 // sets the length of the internal ring buffer
3643 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3644 delete buffer_;
3645
3646 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3647
3648 bufferSize_ = bufferSize;
3649 inIndex_ = 0;
3650 outIndex_ = 0;
3651 }
3652
3653 // attempt to push a buffer into the ring buffer at the current "in" index
3654 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3655 {
3656 if ( !buffer || // incoming buffer is NULL
3657 bufferSize == 0 || // incoming buffer has no data
3658 bufferSize > bufferSize_ ) // incoming buffer too large
3659 {
3660 return false;
3661 }
3662
3663 unsigned int relOutIndex = outIndex_;
3664 unsigned int inIndexEnd = inIndex_ + bufferSize;
3665 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3666 relOutIndex += bufferSize_;
3667 }
3668
3669 // "in" index can end on the "out" index but cannot begin at it
3670 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3671 return false; // not enough space between "in" index and "out" index
3672 }
3673
3674 // copy buffer from external to internal
3675 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3676 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3677 int fromInSize = bufferSize - fromZeroSize;
3678
3679 switch( format )
3680 {
3681 case RTAUDIO_SINT8:
3682 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3683 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3684 break;
3685 case RTAUDIO_SINT16:
3686 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3687 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3688 break;
3689 case RTAUDIO_SINT24:
3690 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3691 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3692 break;
3693 case RTAUDIO_SINT32:
3694 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3695 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3696 break;
3697 case RTAUDIO_FLOAT32:
3698 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3699 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3700 break;
3701 case RTAUDIO_FLOAT64:
3702 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3703 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3704 break;
3705 }
3706
3707 // update "in" index
3708 inIndex_ += bufferSize;
3709 inIndex_ %= bufferSize_;
3710
3711 return true;
3712 }
3713
3714 // attempt to pull a buffer from the ring buffer from the current "out" index
3715 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3716 {
3717 if ( !buffer || // incoming buffer is NULL
3718 bufferSize == 0 || // incoming buffer has no data
3719 bufferSize > bufferSize_ ) // incoming buffer too large
3720 {
3721 return false;
3722 }
3723
3724 unsigned int relInIndex = inIndex_;
3725 unsigned int outIndexEnd = outIndex_ + bufferSize;
3726 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3727 relInIndex += bufferSize_;
3728 }
3729
3730 // "out" index can begin at and end on the "in" index
3731 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3732 return false; // not enough space between "out" index and "in" index
3733 }
3734
3735 // copy buffer from internal to external
3736 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3737 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3738 int fromOutSize = bufferSize - fromZeroSize;
3739
3740 switch( format )
3741 {
3742 case RTAUDIO_SINT8:
3743 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3744 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3745 break;
3746 case RTAUDIO_SINT16:
3747 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3748 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3749 break;
3750 case RTAUDIO_SINT24:
3751 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3752 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3753 break;
3754 case RTAUDIO_SINT32:
3755 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3756 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3757 break;
3758 case RTAUDIO_FLOAT32:
3759 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3760 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3761 break;
3762 case RTAUDIO_FLOAT64:
3763 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3764 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3765 break;
3766 }
3767
3768 // update "out" index
3769 outIndex_ += bufferSize;
3770 outIndex_ %= bufferSize_;
3771
3772 return true;
3773 }
3774
3775 private:
3776 char* buffer_;
3777 unsigned int bufferSize_;
3778 unsigned int inIndex_;
3779 unsigned int outIndex_;
3780 };
3781
3782 //-----------------------------------------------------------------------------
3783
3784 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3785 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
3786 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3787 // This sample rate converter favors speed over quality, and works best with conversions between
3788 // one rate and its multiple.
3789 void convertBufferWasapi( char* outBuffer,
3790 const char* inBuffer,
3791 const unsigned int& channelCount,
3792 const unsigned int& inSampleRate,
3793 const unsigned int& outSampleRate,
3794 const unsigned int& inSampleCount,
3795 unsigned int& outSampleCount,
3796 const RtAudioFormat& format )
3797 {
3798 // calculate the new outSampleCount and relative sampleStep
3799 float sampleRatio = ( float ) outSampleRate / inSampleRate;
3800 float sampleStep = 1.0f / sampleRatio;
3801 float inSampleFraction = 0.0f;
3802
3803 outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
3804
3805 // frame-by-frame, copy each relative input sample into it's corresponding output sample
3806 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
3807 {
3808 unsigned int inSample = ( unsigned int ) inSampleFraction;
3809
3810 switch ( format )
3811 {
3812 case RTAUDIO_SINT8:
3813 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
3814 break;
3815 case RTAUDIO_SINT16:
3816 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
3817 break;
3818 case RTAUDIO_SINT24:
3819 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
3820 break;
3821 case RTAUDIO_SINT32:
3822 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
3823 break;
3824 case RTAUDIO_FLOAT32:
3825 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
3826 break;
3827 case RTAUDIO_FLOAT64:
3828 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
3829 break;
3830 }
3831
3832 // jump to next in sample
3833 inSampleFraction += sampleStep;
3834 }
3835 }
3836
3837 //-----------------------------------------------------------------------------
3838
3839 // A structure to hold various information related to the WASAPI implementation.
3840 struct WasapiHandle
3841 {
3842 IAudioClient* captureAudioClient;
3843 IAudioClient* renderAudioClient;
3844 IAudioCaptureClient* captureClient;
3845 IAudioRenderClient* renderClient;
3846 HANDLE captureEvent;
3847 HANDLE renderEvent;
3848
3849 WasapiHandle()
3850 : captureAudioClient( NULL ),
3851 renderAudioClient( NULL ),
3852 captureClient( NULL ),
3853 renderClient( NULL ),
3854 captureEvent( NULL ),
3855 renderEvent( NULL ) {}
3856 };
3857
3858 //=============================================================================
3859
3860 RtApiWasapi::RtApiWasapi()
3861 : coInitialized_( false ), deviceEnumerator_( NULL )
3862 {
3863 // WASAPI can run either apartment or multi-threaded
3864 HRESULT hr = CoInitialize( NULL );
3865 if ( !FAILED( hr ) )
3866 coInitialized_ = true;
3867
3868 // Instantiate device enumerator
3869 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3870 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3871 ( void** ) &deviceEnumerator_ );
3872
3873 if ( FAILED( hr ) ) {
3874 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3875 error( RtAudioError::DRIVER_ERROR );
3876 }
3877 }
3878
3879 //-----------------------------------------------------------------------------
3880
3881 RtApiWasapi::~RtApiWasapi()
3882 {
3883 if ( stream_.state != STREAM_CLOSED )
3884 closeStream();
3885
3886 SAFE_RELEASE( deviceEnumerator_ );
3887
3888 // If this object previously called CoInitialize()
3889 if ( coInitialized_ )
3890 CoUninitialize();
3891 }
3892
3893 //=============================================================================
3894
3895 unsigned int RtApiWasapi::getDeviceCount( void )
3896 {
3897 unsigned int captureDeviceCount = 0;
3898 unsigned int renderDeviceCount = 0;
3899
3900 IMMDeviceCollection* captureDevices = NULL;
3901 IMMDeviceCollection* renderDevices = NULL;
3902
3903 // Count capture devices
3904 errorText_.clear();
3905 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3906 if ( FAILED( hr ) ) {
3907 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
3908 goto Exit;
3909 }
3910
3911 hr = captureDevices->GetCount( &captureDeviceCount );
3912 if ( FAILED( hr ) ) {
3913 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
3914 goto Exit;
3915 }
3916
3917 // Count render devices
3918 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
3919 if ( FAILED( hr ) ) {
3920 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
3921 goto Exit;
3922 }
3923
3924 hr = renderDevices->GetCount( &renderDeviceCount );
3925 if ( FAILED( hr ) ) {
3926 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
3927 goto Exit;
3928 }
3929
3930 Exit:
3931 // release all references
3932 SAFE_RELEASE( captureDevices );
3933 SAFE_RELEASE( renderDevices );
3934
3935 if ( errorText_.empty() )
3936 return captureDeviceCount + renderDeviceCount;
3937
3938 error( RtAudioError::DRIVER_ERROR );
3939 return 0;
3940 }
3941
3942 //-----------------------------------------------------------------------------
3943
3944 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
3945 {
3946 RtAudio::DeviceInfo info;
3947 unsigned int captureDeviceCount = 0;
3948 unsigned int renderDeviceCount = 0;
3949 std::wstring deviceName;
3950 std::string defaultDeviceName;
3951 bool isCaptureDevice = false;
3952
3953 PROPVARIANT deviceNameProp;
3954 PROPVARIANT defaultDeviceNameProp;
3955
3956 IMMDeviceCollection* captureDevices = NULL;
3957 IMMDeviceCollection* renderDevices = NULL;
3958 IMMDevice* devicePtr = NULL;
3959 IMMDevice* defaultDevicePtr = NULL;
3960 IAudioClient* audioClient = NULL;
3961 IPropertyStore* devicePropStore = NULL;
3962 IPropertyStore* defaultDevicePropStore = NULL;
3963
3964 WAVEFORMATEX* deviceFormat = NULL;
3965 WAVEFORMATEX* closestMatchFormat = NULL;
3966
3967 // probed
3968 info.probed = false;
3969
3970 // Count capture devices
3971 errorText_.clear();
3972 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
3973 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3974 if ( FAILED( hr ) ) {
3975 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
3976 goto Exit;
3977 }
3978
3979 hr = captureDevices->GetCount( &captureDeviceCount );
3980 if ( FAILED( hr ) ) {
3981 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
3982 goto Exit;
3983 }
3984
3985 // Count render devices
3986 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
3987 if ( FAILED( hr ) ) {
3988 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
3989 goto Exit;
3990 }
3991
3992 hr = renderDevices->GetCount( &renderDeviceCount );
3993 if ( FAILED( hr ) ) {
3994 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
3995 goto Exit;
3996 }
3997
3998 // validate device index
3999 if ( device >= captureDeviceCount + renderDeviceCount ) {
4000 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4001 errorType = RtAudioError::INVALID_USE;
4002 goto Exit;
4003 }
4004
4005 // determine whether index falls within capture or render devices
4006 if ( device >= renderDeviceCount ) {
4007 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4008 if ( FAILED( hr ) ) {
4009 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4010 goto Exit;
4011 }
4012 isCaptureDevice = true;
4013 }
4014 else {
4015 hr = renderDevices->Item( device, &devicePtr );
4016 if ( FAILED( hr ) ) {
4017 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4018 goto Exit;
4019 }
4020 isCaptureDevice = false;
4021 }
4022
4023 // get default device name
4024 if ( isCaptureDevice ) {
4025 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4026 if ( FAILED( hr ) ) {
4027 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4028 goto Exit;
4029 }
4030 }
4031 else {
4032 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4033 if ( FAILED( hr ) ) {
4034 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4035 goto Exit;
4036 }
4037 }
4038
4039 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4040 if ( FAILED( hr ) ) {
4041 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4042 goto Exit;
4043 }
4044 PropVariantInit( &defaultDeviceNameProp );
4045
4046 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4047 if ( FAILED( hr ) ) {
4048 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4049 goto Exit;
4050 }
4051
4052 deviceName = defaultDeviceNameProp.pwszVal;
4053 defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );
4054
4055 // name
4056 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4057 if ( FAILED( hr ) ) {
4058 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4059 goto Exit;
4060 }
4061
4062 PropVariantInit( &deviceNameProp );
4063
4064 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4065 if ( FAILED( hr ) ) {
4066 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4067 goto Exit;
4068 }
4069
4070 deviceName = deviceNameProp.pwszVal;
4071 info.name = std::string( deviceName.begin(), deviceName.end() );
4072
4073 // is default
4074 if ( isCaptureDevice ) {
4075 info.isDefaultInput = info.name == defaultDeviceName;
4076 info.isDefaultOutput = false;
4077 }
4078 else {
4079 info.isDefaultInput = false;
4080 info.isDefaultOutput = info.name == defaultDeviceName;
4081 }
4082
4083 // channel count
4084 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4085 if ( FAILED( hr ) ) {
4086 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4087 goto Exit;
4088 }
4089
4090 hr = audioClient->GetMixFormat( &deviceFormat );
4091 if ( FAILED( hr ) ) {
4092 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4093 goto Exit;
4094 }
4095
4096 if ( isCaptureDevice ) {
4097 info.inputChannels = deviceFormat->nChannels;
4098 info.outputChannels = 0;
4099 info.duplexChannels = 0;
4100 }
4101 else {
4102 info.inputChannels = 0;
4103 info.outputChannels = deviceFormat->nChannels;
4104 info.duplexChannels = 0;
4105 }
4106
4107 // sample rates
4108 info.sampleRates.clear();
4109
4110 // allow support for all sample rates as we have a built-in sample rate converter
4111 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4112 info.sampleRates.push_back( SAMPLE_RATES[i] );
4113 }
4114
4115 // native format
4116 info.nativeFormats = 0;
4117
4118 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4119 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4120 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4121 {
4122 if ( deviceFormat->wBitsPerSample == 32 ) {
4123 info.nativeFormats |= RTAUDIO_FLOAT32;
4124 }
4125 else if ( deviceFormat->wBitsPerSample == 64 ) {
4126 info.nativeFormats |= RTAUDIO_FLOAT64;
4127 }
4128 }
4129 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4130 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4131 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4132 {
4133 if ( deviceFormat->wBitsPerSample == 8 ) {
4134 info.nativeFormats |= RTAUDIO_SINT8;
4135 }
4136 else if ( deviceFormat->wBitsPerSample == 16 ) {
4137 info.nativeFormats |= RTAUDIO_SINT16;
4138 }
4139 else if ( deviceFormat->wBitsPerSample == 24 ) {
4140 info.nativeFormats |= RTAUDIO_SINT24;
4141 }
4142 else if ( deviceFormat->wBitsPerSample == 32 ) {
4143 info.nativeFormats |= RTAUDIO_SINT32;
4144 }
4145 }
4146
4147 // probed
4148 info.probed = true;
4149
4150 Exit:
4151 // release all references
4152 PropVariantClear( &deviceNameProp );
4153 PropVariantClear( &defaultDeviceNameProp );
4154
4155 SAFE_RELEASE( captureDevices );
4156 SAFE_RELEASE( renderDevices );
4157 SAFE_RELEASE( devicePtr );
4158 SAFE_RELEASE( defaultDevicePtr );
4159 SAFE_RELEASE( audioClient );
4160 SAFE_RELEASE( devicePropStore );
4161 SAFE_RELEASE( defaultDevicePropStore );
4162
4163 CoTaskMemFree( deviceFormat );
4164 CoTaskMemFree( closestMatchFormat );
4165
4166 if ( !errorText_.empty() )
4167 error( errorType );
4168 return info;
4169 }
4170
4171 //-----------------------------------------------------------------------------
4172
4173 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4174 {
4175 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4176 if ( getDeviceInfo( i ).isDefaultOutput ) {
4177 return i;
4178 }
4179 }
4180
4181 return 0;
4182 }
4183
4184 //-----------------------------------------------------------------------------
4185
4186 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4187 {
4188 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4189 if ( getDeviceInfo( i ).isDefaultInput ) {
4190 return i;
4191 }
4192 }
4193
4194 return 0;
4195 }
4196
4197 //-----------------------------------------------------------------------------
4198
4199 void RtApiWasapi::closeStream( void )
4200 {
4201 if ( stream_.state == STREAM_CLOSED ) {
4202 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4203 error( RtAudioError::WARNING );
4204 return;
4205 }
4206
4207 if ( stream_.state != STREAM_STOPPED )
4208 stopStream();
4209
4210 // clean up stream memory
4211 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4212 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4213
4214 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4215 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4216
4217 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4218 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4219
4220 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4221 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4222
4223 delete ( WasapiHandle* ) stream_.apiHandle;
4224 stream_.apiHandle = NULL;
4225
4226 for ( int i = 0; i < 2; i++ ) {
4227 if ( stream_.userBuffer[i] ) {
4228 free( stream_.userBuffer[i] );
4229 stream_.userBuffer[i] = 0;
4230 }
4231 }
4232
4233 if ( stream_.deviceBuffer ) {
4234 free( stream_.deviceBuffer );
4235 stream_.deviceBuffer = 0;
4236 }
4237
4238 // update stream state
4239 stream_.state = STREAM_CLOSED;
4240 }
4241
4242 //-----------------------------------------------------------------------------
4243
4244 void RtApiWasapi::startStream( void )
4245 {
4246 verifyStream();
4247
4248 if ( stream_.state == STREAM_RUNNING ) {
4249 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4250 error( RtAudioError::WARNING );
4251 return;
4252 }
4253
4254 // update stream state
4255 stream_.state = STREAM_RUNNING;
4256
4257 // create WASAPI stream thread
4258 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4259
4260 if ( !stream_.callbackInfo.thread ) {
4261 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4262 error( RtAudioError::THREAD_ERROR );
4263 }
4264 else {
4265 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4266 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4267 }
4268 }
4269
4270 //-----------------------------------------------------------------------------
4271
4272 void RtApiWasapi::stopStream( void )
4273 {
4274 verifyStream();
4275
4276 if ( stream_.state == STREAM_STOPPED ) {
4277 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4278 error( RtAudioError::WARNING );
4279 return;
4280 }
4281
4282 // inform stream thread by setting stream state to STREAM_STOPPING
4283 stream_.state = STREAM_STOPPING;
4284
4285 // wait until stream thread is stopped
4286 while( stream_.state != STREAM_STOPPED ) {
4287 Sleep( 1 );
4288 }
4289
4290 // Wait for the last buffer to play before stopping.
4291 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4292
4293 // stop capture client if applicable
4294 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4295 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4296 if ( FAILED( hr ) ) {
4297 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4298 error( RtAudioError::DRIVER_ERROR );
4299 return;
4300 }
4301 }
4302
4303 // stop render client if applicable
4304 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4305 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4306 if ( FAILED( hr ) ) {
4307 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4308 error( RtAudioError::DRIVER_ERROR );
4309 return;
4310 }
4311 }
4312
4313 // close thread handle
4314 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4315 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4316 error( RtAudioError::THREAD_ERROR );
4317 return;
4318 }
4319
4320 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4321 }
4322
4323 //-----------------------------------------------------------------------------
4324
4325 void RtApiWasapi::abortStream( void )
4326 {
4327 verifyStream();
4328
4329 if ( stream_.state == STREAM_STOPPED ) {
4330 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4331 error( RtAudioError::WARNING );
4332 return;
4333 }
4334
4335 // inform stream thread by setting stream state to STREAM_STOPPING
4336 stream_.state = STREAM_STOPPING;
4337
4338 // wait until stream thread is stopped
4339 while ( stream_.state != STREAM_STOPPED ) {
4340 Sleep( 1 );
4341 }
4342
4343 // stop capture client if applicable
4344 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4345 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4346 if ( FAILED( hr ) ) {
4347 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4348 error( RtAudioError::DRIVER_ERROR );
4349 return;
4350 }
4351 }
4352
4353 // stop render client if applicable
4354 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4355 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4356 if ( FAILED( hr ) ) {
4357 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4358 error( RtAudioError::DRIVER_ERROR );
4359 return;
4360 }
4361 }
4362
4363 // close thread handle
4364 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4365 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4366 error( RtAudioError::THREAD_ERROR );
4367 return;
4368 }
4369
4370 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4371 }
4372
4373 //-----------------------------------------------------------------------------
4374
4375 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4376 unsigned int firstChannel, unsigned int sampleRate,
4377 RtAudioFormat format, unsigned int* bufferSize,
4378 RtAudio::StreamOptions* options )
4379 {
4380 bool methodResult = FAILURE;
4381 unsigned int captureDeviceCount = 0;
4382 unsigned int renderDeviceCount = 0;
4383
4384 IMMDeviceCollection* captureDevices = NULL;
4385 IMMDeviceCollection* renderDevices = NULL;
4386 IMMDevice* devicePtr = NULL;
4387 WAVEFORMATEX* deviceFormat = NULL;
4388 unsigned int bufferBytes;
4389 stream_.state = STREAM_STOPPED;
4390
4391 // create API Handle if not already created
4392 if ( !stream_.apiHandle )
4393 stream_.apiHandle = ( void* ) new WasapiHandle();
4394
4395 // Count capture devices
4396 errorText_.clear();
4397 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4398 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4399 if ( FAILED( hr ) ) {
4400 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4401 goto Exit;
4402 }
4403
4404 hr = captureDevices->GetCount( &captureDeviceCount );
4405 if ( FAILED( hr ) ) {
4406 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4407 goto Exit;
4408 }
4409
4410 // Count render devices
4411 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4412 if ( FAILED( hr ) ) {
4413 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4414 goto Exit;
4415 }
4416
4417 hr = renderDevices->GetCount( &renderDeviceCount );
4418 if ( FAILED( hr ) ) {
4419 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4420 goto Exit;
4421 }
4422
4423 // validate device index
4424 if ( device >= captureDeviceCount + renderDeviceCount ) {
4425 errorType = RtAudioError::INVALID_USE;
4426 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4427 goto Exit;
4428 }
4429
4430 // determine whether index falls within capture or render devices
4431 if ( device >= renderDeviceCount ) {
4432 if ( mode != INPUT ) {
4433 errorType = RtAudioError::INVALID_USE;
4434 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4435 goto Exit;
4436 }
4437
4438 // retrieve captureAudioClient from devicePtr
4439 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4440
4441 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4442 if ( FAILED( hr ) ) {
4443 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4444 goto Exit;
4445 }
4446
4447 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4448 NULL, ( void** ) &captureAudioClient );
4449 if ( FAILED( hr ) ) {
4450 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4451 goto Exit;
4452 }
4453
4454 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4455 if ( FAILED( hr ) ) {
4456 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4457 goto Exit;
4458 }
4459
4460 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4461 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4462 }
4463 else {
4464 if ( mode != OUTPUT ) {
4465 errorType = RtAudioError::INVALID_USE;
4466 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4467 goto Exit;
4468 }
4469
4470 // retrieve renderAudioClient from devicePtr
4471 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4472
4473 hr = renderDevices->Item( device, &devicePtr );
4474 if ( FAILED( hr ) ) {
4475 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4476 goto Exit;
4477 }
4478
4479 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4480 NULL, ( void** ) &renderAudioClient );
4481 if ( FAILED( hr ) ) {
4482 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4483 goto Exit;
4484 }
4485
4486 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4487 if ( FAILED( hr ) ) {
4488 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4489 goto Exit;
4490 }
4491
4492 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4493 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4494 }
4495
4496 // fill stream data
4497 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4498 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4499 stream_.mode = DUPLEX;
4500 }
4501 else {
4502 stream_.mode = mode;
4503 }
4504
4505 stream_.device[mode] = device;
4506 stream_.doByteSwap[mode] = false;
4507 stream_.sampleRate = sampleRate;
4508 stream_.bufferSize = *bufferSize;
4509 stream_.nBuffers = 1;
4510 stream_.nUserChannels[mode] = channels;
4511 stream_.channelOffset[mode] = firstChannel;
4512 stream_.userFormat = format;
4513 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4514
4515 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4516 stream_.userInterleaved = false;
4517 else
4518 stream_.userInterleaved = true;
4519 stream_.deviceInterleaved[mode] = true;
4520
4521 // Set flags for buffer conversion.
4522 stream_.doConvertBuffer[mode] = false;
4523 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4524 stream_.nUserChannels != stream_.nDeviceChannels )
4525 stream_.doConvertBuffer[mode] = true;
4526 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4527 stream_.nUserChannels[mode] > 1 )
4528 stream_.doConvertBuffer[mode] = true;
4529
4530 if ( stream_.doConvertBuffer[mode] )
4531 setConvertInfo( mode, 0 );
4532
4533 // Allocate necessary internal buffers
4534 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4535
4536 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4537 if ( !stream_.userBuffer[mode] ) {
4538 errorType = RtAudioError::MEMORY_ERROR;
4539 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4540 goto Exit;
4541 }
4542
4543 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4544 stream_.callbackInfo.priority = 15;
4545 else
4546 stream_.callbackInfo.priority = 0;
4547
4548 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4549 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4550
4551 methodResult = SUCCESS;
4552
4553 Exit:
4554 //clean up
4555 SAFE_RELEASE( captureDevices );
4556 SAFE_RELEASE( renderDevices );
4557 SAFE_RELEASE( devicePtr );
4558 CoTaskMemFree( deviceFormat );
4559
4560 // if method failed, close the stream
4561 if ( methodResult == FAILURE )
4562 closeStream();
4563
4564 if ( !errorText_.empty() )
4565 error( errorType );
4566 return methodResult;
4567 }
4568
4569 //=============================================================================
4570
4571 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4572 {
4573 if ( wasapiPtr )
4574 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4575
4576 return 0;
4577 }
4578
4579 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4580 {
4581 if ( wasapiPtr )
4582 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4583
4584 return 0;
4585 }
4586
4587 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4588 {
4589 if ( wasapiPtr )
4590 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4591
4592 return 0;
4593 }
4594
4595 //-----------------------------------------------------------------------------
4596
4597 void RtApiWasapi::wasapiThread()
4598 {
4599 // as this is a new thread, we must CoInitialize it
4600 CoInitialize( NULL );
4601
4602 HRESULT hr;
4603
4604 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4605 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4606 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4607 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4608 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4609 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4610
4611 WAVEFORMATEX* captureFormat = NULL;
4612 WAVEFORMATEX* renderFormat = NULL;
4613 float captureSrRatio = 0.0f;
4614 float renderSrRatio = 0.0f;
4615 WasapiBuffer captureBuffer;
4616 WasapiBuffer renderBuffer;
4617
4618 // declare local stream variables
4619 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4620 BYTE* streamBuffer = NULL;
4621 unsigned long captureFlags = 0;
4622 unsigned int bufferFrameCount = 0;
4623 unsigned int numFramesPadding = 0;
4624 unsigned int convBufferSize = 0;
4625 bool callbackPushed = false;
4626 bool callbackPulled = false;
4627 bool callbackStopped = false;
4628 int callbackResult = 0;
4629
4630 // convBuffer is used to store converted buffers between WASAPI and the user
4631 char* convBuffer = NULL;
4632 unsigned int convBuffSize = 0;
4633 unsigned int deviceBuffSize = 0;
4634
4635 errorText_.clear();
4636 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4637
4638 // Attempt to assign "Pro Audio" characteristic to thread
4639 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4640 if ( AvrtDll ) {
4641 DWORD taskIndex = 0;
4642 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4643 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4644 FreeLibrary( AvrtDll );
4645 }
4646
4647 // start capture stream if applicable
4648 if ( captureAudioClient ) {
4649 hr = captureAudioClient->GetMixFormat( &captureFormat );
4650 if ( FAILED( hr ) ) {
4651 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4652 goto Exit;
4653 }
4654
4655 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4656
4657 // initialize capture stream according to desire buffer size
4658 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4659 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4660
4661 if ( !captureClient ) {
4662 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4663 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4664 desiredBufferPeriod,
4665 desiredBufferPeriod,
4666 captureFormat,
4667 NULL );
4668 if ( FAILED( hr ) ) {
4669 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4670 goto Exit;
4671 }
4672
4673 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4674 ( void** ) &captureClient );
4675 if ( FAILED( hr ) ) {
4676 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4677 goto Exit;
4678 }
4679
4680 // configure captureEvent to trigger on every available capture buffer
4681 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4682 if ( !captureEvent ) {
4683 errorType = RtAudioError::SYSTEM_ERROR;
4684 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4685 goto Exit;
4686 }
4687
4688 hr = captureAudioClient->SetEventHandle( captureEvent );
4689 if ( FAILED( hr ) ) {
4690 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4691 goto Exit;
4692 }
4693
4694 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4695 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4696 }
4697
4698 unsigned int inBufferSize = 0;
4699 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4700 if ( FAILED( hr ) ) {
4701 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4702 goto Exit;
4703 }
4704
4705 // scale outBufferSize according to stream->user sample rate ratio
4706 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4707 inBufferSize *= stream_.nDeviceChannels[INPUT];
4708
4709 // set captureBuffer size
4710 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4711
4712 // reset the capture stream
4713 hr = captureAudioClient->Reset();
4714 if ( FAILED( hr ) ) {
4715 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4716 goto Exit;
4717 }
4718
4719 // start the capture stream
4720 hr = captureAudioClient->Start();
4721 if ( FAILED( hr ) ) {
4722 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4723 goto Exit;
4724 }
4725 }
4726
4727 // start render stream if applicable
4728 if ( renderAudioClient ) {
4729 hr = renderAudioClient->GetMixFormat( &renderFormat );
4730 if ( FAILED( hr ) ) {
4731 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4732 goto Exit;
4733 }
4734
4735 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4736
4737 // initialize render stream according to desire buffer size
4738 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4739 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4740
4741 if ( !renderClient ) {
4742 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4743 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4744 desiredBufferPeriod,
4745 desiredBufferPeriod,
4746 renderFormat,
4747 NULL );
4748 if ( FAILED( hr ) ) {
4749 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4750 goto Exit;
4751 }
4752
4753 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4754 ( void** ) &renderClient );
4755 if ( FAILED( hr ) ) {
4756 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4757 goto Exit;
4758 }
4759
4760 // configure renderEvent to trigger on every available render buffer
4761 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4762 if ( !renderEvent ) {
4763 errorType = RtAudioError::SYSTEM_ERROR;
4764 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4765 goto Exit;
4766 }
4767
4768 hr = renderAudioClient->SetEventHandle( renderEvent );
4769 if ( FAILED( hr ) ) {
4770 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4771 goto Exit;
4772 }
4773
4774 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4775 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4776 }
4777
4778 unsigned int outBufferSize = 0;
4779 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4780 if ( FAILED( hr ) ) {
4781 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4782 goto Exit;
4783 }
4784
4785 // scale inBufferSize according to user->stream sample rate ratio
4786 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
4787 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4788
4789 // set renderBuffer size
4790 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4791
4792 // reset the render stream
4793 hr = renderAudioClient->Reset();
4794 if ( FAILED( hr ) ) {
4795 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4796 goto Exit;
4797 }
4798
4799 // start the render stream
4800 hr = renderAudioClient->Start();
4801 if ( FAILED( hr ) ) {
4802 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4803 goto Exit;
4804 }
4805 }
4806
4807 if ( stream_.mode == INPUT ) {
4808 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4809 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4810 }
4811 else if ( stream_.mode == OUTPUT ) {
4812 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4813 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4814 }
4815 else if ( stream_.mode == DUPLEX ) {
4816 convBuffSize = max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4817 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4818 deviceBuffSize = max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4819 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4820 }
4821
4822 convBuffer = ( char* ) malloc( convBuffSize );
4823 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4824 if ( !convBuffer || !stream_.deviceBuffer ) {
4825 errorType = RtAudioError::MEMORY_ERROR;
4826 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4827 goto Exit;
4828 }
4829
4830 // stream process loop
4831 while ( stream_.state != STREAM_STOPPING ) {
4832 if ( !callbackPulled ) {
4833 // Callback Input
4834 // ==============
4835 // 1. Pull callback buffer from inputBuffer
4836 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
4837 // Convert callback buffer to user format
4838
4839 if ( captureAudioClient ) {
4840 // Pull callback buffer from inputBuffer
4841 callbackPulled = captureBuffer.pullBuffer( convBuffer,
4842 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
4843 stream_.deviceFormat[INPUT] );
4844
4845 if ( callbackPulled ) {
4846 // Convert callback buffer to user sample rate
4847 convertBufferWasapi( stream_.deviceBuffer,
4848 convBuffer,
4849 stream_.nDeviceChannels[INPUT],
4850 captureFormat->nSamplesPerSec,
4851 stream_.sampleRate,
4852 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
4853 convBufferSize,
4854 stream_.deviceFormat[INPUT] );
4855
4856 if ( stream_.doConvertBuffer[INPUT] ) {
4857 // Convert callback buffer to user format
4858 convertBuffer( stream_.userBuffer[INPUT],
4859 stream_.deviceBuffer,
4860 stream_.convertInfo[INPUT] );
4861 }
4862 else {
4863 // no further conversion, simple copy deviceBuffer to userBuffer
4864 memcpy( stream_.userBuffer[INPUT],
4865 stream_.deviceBuffer,
4866 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4867 }
4868 }
4869 }
4870 else {
4871 // if there is no capture stream, set callbackPulled flag
4872 callbackPulled = true;
4873 }
4874
4875 // Execute Callback
4876 // ================
4877 // 1. Execute user callback method
4878 // 2. Handle return value from callback
4879
4880 // if callback has not requested the stream to stop
4881 if ( callbackPulled && !callbackStopped ) {
4882 // Execute user callback method
4883 callbackResult = callback( stream_.userBuffer[OUTPUT],
4884 stream_.userBuffer[INPUT],
4885 stream_.bufferSize,
4886 getStreamTime(),
4887 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4888 stream_.callbackInfo.userData );
4889
4890 // Handle return value from callback
4891 if ( callbackResult == 1 ) {
4892 // instantiate a thread to stop this thread
4893 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4894 if ( !threadHandle ) {
4895 errorType = RtAudioError::THREAD_ERROR;
4896 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4897 goto Exit;
4898 }
4899 else if ( !CloseHandle( threadHandle ) ) {
4900 errorType = RtAudioError::THREAD_ERROR;
4901 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
4902 goto Exit;
4903 }
4904
4905 callbackStopped = true;
4906 }
4907 else if ( callbackResult == 2 ) {
4908 // instantiate a thread to stop this thread
4909 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
4910 if ( !threadHandle ) {
4911 errorType = RtAudioError::THREAD_ERROR;
4912 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
4913 goto Exit;
4914 }
4915 else if ( !CloseHandle( threadHandle ) ) {
4916 errorType = RtAudioError::THREAD_ERROR;
4917 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
4918 goto Exit;
4919 }
4920
4921 callbackStopped = true;
4922 }
4923 }
4924 }
4925
4926 // Callback Output
4927 // ===============
4928 // 1. Convert callback buffer to stream format
4929 // 2. Convert callback buffer to stream sample rate and channel count
4930 // 3. Push callback buffer into outputBuffer
4931
4932 if ( renderAudioClient && callbackPulled ) {
4933 if ( stream_.doConvertBuffer[OUTPUT] ) {
4934 // Convert callback buffer to stream format
4935 convertBuffer( stream_.deviceBuffer,
4936 stream_.userBuffer[OUTPUT],
4937 stream_.convertInfo[OUTPUT] );
4938
4939 }
4940
4941 // Convert callback buffer to stream sample rate
4942 convertBufferWasapi( convBuffer,
4943 stream_.deviceBuffer,
4944 stream_.nDeviceChannels[OUTPUT],
4945 stream_.sampleRate,
4946 renderFormat->nSamplesPerSec,
4947 stream_.bufferSize,
4948 convBufferSize,
4949 stream_.deviceFormat[OUTPUT] );
4950
4951 // Push callback buffer into outputBuffer
4952 callbackPushed = renderBuffer.pushBuffer( convBuffer,
4953 convBufferSize * stream_.nDeviceChannels[OUTPUT],
4954 stream_.deviceFormat[OUTPUT] );
4955 }
4956 else {
4957 // if there is no render stream, set callbackPushed flag
4958 callbackPushed = true;
4959 }
4960
4961 // Stream Capture
4962 // ==============
4963 // 1. Get capture buffer from stream
4964 // 2. Push capture buffer into inputBuffer
4965 // 3. If 2. was successful: Release capture buffer
4966
4967 if ( captureAudioClient ) {
4968 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
4969 if ( !callbackPulled ) {
4970 WaitForSingleObject( captureEvent, INFINITE );
4971 }
4972
4973 // Get capture buffer from stream
4974 hr = captureClient->GetBuffer( &streamBuffer,
4975 &bufferFrameCount,
4976 &captureFlags, NULL, NULL );
4977 if ( FAILED( hr ) ) {
4978 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
4979 goto Exit;
4980 }
4981
4982 if ( bufferFrameCount != 0 ) {
4983 // Push capture buffer into inputBuffer
4984 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
4985 bufferFrameCount * stream_.nDeviceChannels[INPUT],
4986 stream_.deviceFormat[INPUT] ) )
4987 {
4988 // Release capture buffer
4989 hr = captureClient->ReleaseBuffer( bufferFrameCount );
4990 if ( FAILED( hr ) ) {
4991 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
4992 goto Exit;
4993 }
4994 }
4995 else
4996 {
4997 // Inform WASAPI that capture was unsuccessful
4998 hr = captureClient->ReleaseBuffer( 0 );
4999 if ( FAILED( hr ) ) {
5000 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5001 goto Exit;
5002 }
5003 }
5004 }
5005 else
5006 {
5007 // Inform WASAPI that capture was unsuccessful
5008 hr = captureClient->ReleaseBuffer( 0 );
5009 if ( FAILED( hr ) ) {
5010 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5011 goto Exit;
5012 }
5013 }
5014 }
5015
5016 // Stream Render
5017 // =============
5018 // 1. Get render buffer from stream
5019 // 2. Pull next buffer from outputBuffer
5020 // 3. If 2. was successful: Fill render buffer with next buffer
5021 // Release render buffer
5022
5023 if ( renderAudioClient ) {
5024 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5025 if ( callbackPulled && !callbackPushed ) {
5026 WaitForSingleObject( renderEvent, INFINITE );
5027 }
5028
5029 // Get render buffer from stream
5030 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5031 if ( FAILED( hr ) ) {
5032 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5033 goto Exit;
5034 }
5035
5036 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5037 if ( FAILED( hr ) ) {
5038 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5039 goto Exit;
5040 }
5041
5042 bufferFrameCount -= numFramesPadding;
5043
5044 if ( bufferFrameCount != 0 ) {
5045 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5046 if ( FAILED( hr ) ) {
5047 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5048 goto Exit;
5049 }
5050
5051 // Pull next buffer from outputBuffer
5052 // Fill render buffer with next buffer
5053 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5054 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5055 stream_.deviceFormat[OUTPUT] ) )
5056 {
5057 // Release render buffer
5058 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5059 if ( FAILED( hr ) ) {
5060 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5061 goto Exit;
5062 }
5063 }
5064 else
5065 {
5066 // Inform WASAPI that render was unsuccessful
5067 hr = renderClient->ReleaseBuffer( 0, 0 );
5068 if ( FAILED( hr ) ) {
5069 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5070 goto Exit;
5071 }
5072 }
5073 }
5074 else
5075 {
5076 // Inform WASAPI that render was unsuccessful
5077 hr = renderClient->ReleaseBuffer( 0, 0 );
5078 if ( FAILED( hr ) ) {
5079 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5080 goto Exit;
5081 }
5082 }
5083 }
5084
5085 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5086 if ( callbackPushed ) {
5087 callbackPulled = false;
5088 }
5089
5090 // tick stream time
5091 RtApi::tickStreamTime();
5092 }
5093
5094 Exit:
5095 // clean up
5096 CoTaskMemFree( captureFormat );
5097 CoTaskMemFree( renderFormat );
5098
5099 free ( convBuffer );
5100
5101 CoUninitialize();
5102
5103 // update stream state
5104 stream_.state = STREAM_STOPPED;
5105
5106 if ( errorText_.empty() )
5107 return;
5108 else
5109 error( errorType );
5110 }
5111
5112 //******************** End of __WINDOWS_WASAPI__ *********************//
5113 #endif
5114
5115
5116 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5117
5118 // Modified by Robin Davies, October 2005
5119 // - Improvements to DirectX pointer chasing.
5120 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5121 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5122 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5123 // Changed device query structure for RtAudio 4.0.7, January 2010
5124
5125 #include <dsound.h>
5126 #include <assert.h>
5127 #include <algorithm>
5128
5129 #if defined(__MINGW32__)
5130 // missing from latest mingw winapi
5131 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5132 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5133 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5134 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5135 #endif
5136
5137 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5138
5139 #ifdef _MSC_VER // if Microsoft Visual C++
5140 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5141 #endif
5142
5143 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5144 {
5145 if ( pointer > bufferSize ) pointer -= bufferSize;
5146 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5147 if ( pointer < earlierPointer ) pointer += bufferSize;
5148 return pointer >= earlierPointer && pointer < laterPointer;
5149 }
5150
5151 // A structure to hold various information related to the DirectSound
5152 // API implementation.
5153 struct DsHandle {
5154 unsigned int drainCounter; // Tracks callback counts when draining
5155 bool internalDrain; // Indicates if stop is initiated from callback or not.
5156 void *id[2];
5157 void *buffer[2];
5158 bool xrun[2];
5159 UINT bufferPointer[2];
5160 DWORD dsBufferSize[2];
5161 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5162 HANDLE condition;
5163
5164 DsHandle()
5165 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5166 };
5167
5168 // Declarations for utility functions, callbacks, and structures
5169 // specific to the DirectSound implementation.
5170 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5171 LPCTSTR description,
5172 LPCTSTR module,
5173 LPVOID lpContext );
5174
5175 static const char* getErrorString( int code );
5176
5177 static unsigned __stdcall callbackHandler( void *ptr );
5178
5179 struct DsDevice {
5180 LPGUID id[2];
5181 bool validId[2];
5182 bool found;
5183 std::string name;
5184
5185 DsDevice()
5186 : found(false) { validId[0] = false; validId[1] = false; }
5187 };
5188
5189 struct DsProbeData {
5190 bool isInput;
5191 std::vector<struct DsDevice>* dsDevices;
5192 };
5193
5194 RtApiDs :: RtApiDs()
5195 {
5196 // Dsound will run both-threaded. If CoInitialize fails, then just
5197 // accept whatever the mainline chose for a threading model.
5198 coInitialized_ = false;
5199 HRESULT hr = CoInitialize( NULL );
5200 if ( !FAILED( hr ) ) coInitialized_ = true;
5201 }
5202
5203 RtApiDs :: ~RtApiDs()
5204 {
5205 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5206 if ( stream_.state != STREAM_CLOSED ) closeStream();
5207 }
5208
5209 // The DirectSound default output is always the first device.
5210 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5211 {
5212 return 0;
5213 }
5214
5215 // The DirectSound default input is always the first input device,
5216 // which is the first capture device enumerated.
5217 unsigned int RtApiDs :: getDefaultInputDevice( void )
5218 {
5219 return 0;
5220 }
5221
5222 unsigned int RtApiDs :: getDeviceCount( void )
5223 {
5224 // Set query flag for previously found devices to false, so that we
5225 // can check for any devices that have disappeared.
5226 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5227 dsDevices[i].found = false;
5228
5229 // Query DirectSound devices.
5230 struct DsProbeData probeInfo;
5231 probeInfo.isInput = false;
5232 probeInfo.dsDevices = &dsDevices;
5233 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5234 if ( FAILED( result ) ) {
5235 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5236 errorText_ = errorStream_.str();
5237 error( RtAudioError::WARNING );
5238 }
5239
5240 // Query DirectSoundCapture devices.
5241 probeInfo.isInput = true;
5242 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5243 if ( FAILED( result ) ) {
5244 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5245 errorText_ = errorStream_.str();
5246 error( RtAudioError::WARNING );
5247 }
5248
5249 // Clean out any devices that may have disappeared.
5250 std::vector< int > indices;
5251 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5252 if ( dsDevices[i].found == false ) indices.push_back( i );
5253 //unsigned int nErased = 0;
5254 for ( unsigned int i=0; i<indices.size(); i++ )
5255 dsDevices.erase( dsDevices.begin()+indices[i] );
5256 //dsDevices.erase( dsDevices.begin()-nErased++ );
5257
5258 return static_cast<unsigned int>(dsDevices.size());
5259 }
5260
5261 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5262 {
5263 RtAudio::DeviceInfo info;
5264 info.probed = false;
5265
5266 if ( dsDevices.size() == 0 ) {
5267 // Force a query of all devices
5268 getDeviceCount();
5269 if ( dsDevices.size() == 0 ) {
5270 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5271 error( RtAudioError::INVALID_USE );
5272 return info;
5273 }
5274 }
5275
5276 if ( device >= dsDevices.size() ) {
5277 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5278 error( RtAudioError::INVALID_USE );
5279 return info;
5280 }
5281
5282 HRESULT result;
5283 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5284
5285 LPDIRECTSOUND output;
5286 DSCAPS outCaps;
5287 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5288 if ( FAILED( result ) ) {
5289 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5290 errorText_ = errorStream_.str();
5291 error( RtAudioError::WARNING );
5292 goto probeInput;
5293 }
5294
5295 outCaps.dwSize = sizeof( outCaps );
5296 result = output->GetCaps( &outCaps );
5297 if ( FAILED( result ) ) {
5298 output->Release();
5299 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5300 errorText_ = errorStream_.str();
5301 error( RtAudioError::WARNING );
5302 goto probeInput;
5303 }
5304
5305 // Get output channel information.
5306 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5307
5308 // Get sample rate information.
5309 info.sampleRates.clear();
5310 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5311 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5312 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
5313 info.sampleRates.push_back( SAMPLE_RATES[k] );
5314 }
5315
5316 // Get format information.
5317 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5318 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5319
5320 output->Release();
5321
5322 if ( getDefaultOutputDevice() == device )
5323 info.isDefaultOutput = true;
5324
5325 if ( dsDevices[ device ].validId[1] == false ) {
5326 info.name = dsDevices[ device ].name;
5327 info.probed = true;
5328 return info;
5329 }
5330
5331 probeInput:
5332
5333 LPDIRECTSOUNDCAPTURE input;
5334 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5335 if ( FAILED( result ) ) {
5336 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5337 errorText_ = errorStream_.str();
5338 error( RtAudioError::WARNING );
5339 return info;
5340 }
5341
5342 DSCCAPS inCaps;
5343 inCaps.dwSize = sizeof( inCaps );
5344 result = input->GetCaps( &inCaps );
5345 if ( FAILED( result ) ) {
5346 input->Release();
5347 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5348 errorText_ = errorStream_.str();
5349 error( RtAudioError::WARNING );
5350 return info;
5351 }
5352
5353 // Get input channel information.
5354 info.inputChannels = inCaps.dwChannels;
5355
5356 // Get sample rate and format information.
5357 std::vector<unsigned int> rates;
5358 if ( inCaps.dwChannels >= 2 ) {
5359 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5360 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5361 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5362 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5363 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5364 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5365 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5366 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5367
5368 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5369 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5370 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5371 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5372 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5373 }
5374 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5375 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5376 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5377 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5378 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5379 }
5380 }
5381 else if ( inCaps.dwChannels == 1 ) {
5382 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5383 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5384 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5385 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5386 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5387 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5388 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5389 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5390
5391 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5392 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5393 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5394 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5395 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5396 }
5397 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5398 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5399 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5400 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5401 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5402 }
5403 }
5404 else info.inputChannels = 0; // technically, this would be an error
5405
5406 input->Release();
5407
5408 if ( info.inputChannels == 0 ) return info;
5409
5410 // Copy the supported rates to the info structure but avoid duplication.
5411 bool found;
5412 for ( unsigned int i=0; i<rates.size(); i++ ) {
5413 found = false;
5414 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5415 if ( rates[i] == info.sampleRates[j] ) {
5416 found = true;
5417 break;
5418 }
5419 }
5420 if ( found == false ) info.sampleRates.push_back( rates[i] );
5421 }
5422 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5423
5424 // If device opens for both playback and capture, we determine the channels.
5425 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5426 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5427
5428 if ( device == 0 ) info.isDefaultInput = true;
5429
5430 // Copy name and return.
5431 info.name = dsDevices[ device ].name;
5432 info.probed = true;
5433 return info;
5434 }
5435
5436 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5437 unsigned int firstChannel, unsigned int sampleRate,
5438 RtAudioFormat format, unsigned int *bufferSize,
5439 RtAudio::StreamOptions *options )
5440 {
5441 if ( channels + firstChannel > 2 ) {
5442 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5443 return FAILURE;
5444 }
5445
5446 size_t nDevices = dsDevices.size();
5447 if ( nDevices == 0 ) {
5448 // This should not happen because a check is made before this function is called.
5449 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5450 return FAILURE;
5451 }
5452
5453 if ( device >= nDevices ) {
5454 // This should not happen because a check is made before this function is called.
5455 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5456 return FAILURE;
5457 }
5458
5459 if ( mode == OUTPUT ) {
5460 if ( dsDevices[ device ].validId[0] == false ) {
5461 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5462 errorText_ = errorStream_.str();
5463 return FAILURE;
5464 }
5465 }
5466 else { // mode == INPUT
5467 if ( dsDevices[ device ].validId[1] == false ) {
5468 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5469 errorText_ = errorStream_.str();
5470 return FAILURE;
5471 }
5472 }
5473
5474 // According to a note in PortAudio, using GetDesktopWindow()
5475 // instead of GetForegroundWindow() is supposed to avoid problems
5476 // that occur when the application's window is not the foreground
5477 // window. Also, if the application window closes before the
5478 // DirectSound buffer, DirectSound can crash. In the past, I had
5479 // problems when using GetDesktopWindow() but it seems fine now
5480 // (January 2010). I'll leave it commented here.
5481 // HWND hWnd = GetForegroundWindow();
5482 HWND hWnd = GetDesktopWindow();
5483
5484 // Check the numberOfBuffers parameter and limit the lowest value to
5485 // two. This is a judgement call and a value of two is probably too
5486 // low for capture, but it should work for playback.
5487 int nBuffers = 0;
5488 if ( options ) nBuffers = options->numberOfBuffers;
5489 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5490 if ( nBuffers < 2 ) nBuffers = 3;
5491
5492 // Check the lower range of the user-specified buffer size and set
5493 // (arbitrarily) to a lower bound of 32.
5494 if ( *bufferSize < 32 ) *bufferSize = 32;
5495
5496 // Create the wave format structure. The data format setting will
5497 // be determined later.
5498 WAVEFORMATEX waveFormat;
5499 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5500 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5501 waveFormat.nChannels = channels + firstChannel;
5502 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5503
5504 // Determine the device buffer size. By default, we'll use the value
5505 // defined above (32K), but we will grow it to make allowances for
5506 // very large software buffer sizes.
5507 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5508 DWORD dsPointerLeadTime = 0;
5509
5510 void *ohandle = 0, *bhandle = 0;
5511 HRESULT result;
5512 if ( mode == OUTPUT ) {
5513
5514 LPDIRECTSOUND output;
5515 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5516 if ( FAILED( result ) ) {
5517 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5518 errorText_ = errorStream_.str();
5519 return FAILURE;
5520 }
5521
5522 DSCAPS outCaps;
5523 outCaps.dwSize = sizeof( outCaps );
5524 result = output->GetCaps( &outCaps );
5525 if ( FAILED( result ) ) {
5526 output->Release();
5527 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5528 errorText_ = errorStream_.str();
5529 return FAILURE;
5530 }
5531
5532 // Check channel information.
5533 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5534 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5535 errorText_ = errorStream_.str();
5536 return FAILURE;
5537 }
5538
5539 // Check format information. Use 16-bit format unless not
5540 // supported or user requests 8-bit.
5541 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5542 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5543 waveFormat.wBitsPerSample = 16;
5544 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5545 }
5546 else {
5547 waveFormat.wBitsPerSample = 8;
5548 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5549 }
5550 stream_.userFormat = format;
5551
5552 // Update wave format structure and buffer information.
5553 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5554 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5555 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5556
5557 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5558 while ( dsPointerLeadTime * 2U > dsBufferSize )
5559 dsBufferSize *= 2;
5560
5561 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5562 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5563 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5564 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5565 if ( FAILED( result ) ) {
5566 output->Release();
5567 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5568 errorText_ = errorStream_.str();
5569 return FAILURE;
5570 }
5571
5572 // Even though we will write to the secondary buffer, we need to
5573 // access the primary buffer to set the correct output format
5574 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5575 // buffer description.
5576 DSBUFFERDESC bufferDescription;
5577 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5578 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5579 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5580
5581 // Obtain the primary buffer
5582 LPDIRECTSOUNDBUFFER buffer;
5583 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5584 if ( FAILED( result ) ) {
5585 output->Release();
5586 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5587 errorText_ = errorStream_.str();
5588 return FAILURE;
5589 }
5590
5591 // Set the primary DS buffer sound format.
5592 result = buffer->SetFormat( &waveFormat );
5593 if ( FAILED( result ) ) {
5594 output->Release();
5595 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5596 errorText_ = errorStream_.str();
5597 return FAILURE;
5598 }
5599
5600 // Setup the secondary DS buffer description.
5601 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5602 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5603 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5604 DSBCAPS_GLOBALFOCUS |
5605 DSBCAPS_GETCURRENTPOSITION2 |
5606 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5607 bufferDescription.dwBufferBytes = dsBufferSize;
5608 bufferDescription.lpwfxFormat = &waveFormat;
5609
5610 // Try to create the secondary DS buffer. If that doesn't work,
5611 // try to use software mixing. Otherwise, there's a problem.
5612 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5613 if ( FAILED( result ) ) {
5614 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5615 DSBCAPS_GLOBALFOCUS |
5616 DSBCAPS_GETCURRENTPOSITION2 |
5617 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5618 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5619 if ( FAILED( result ) ) {
5620 output->Release();
5621 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5622 errorText_ = errorStream_.str();
5623 return FAILURE;
5624 }
5625 }
5626
5627 // Get the buffer size ... might be different from what we specified.
5628 DSBCAPS dsbcaps;
5629 dsbcaps.dwSize = sizeof( DSBCAPS );
5630 result = buffer->GetCaps( &dsbcaps );
5631 if ( FAILED( result ) ) {
5632 output->Release();
5633 buffer->Release();
5634 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5635 errorText_ = errorStream_.str();
5636 return FAILURE;
5637 }
5638
5639 dsBufferSize = dsbcaps.dwBufferBytes;
5640
5641 // Lock the DS buffer
5642 LPVOID audioPtr;
5643 DWORD dataLen;
5644 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5645 if ( FAILED( result ) ) {
5646 output->Release();
5647 buffer->Release();
5648 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5649 errorText_ = errorStream_.str();
5650 return FAILURE;
5651 }
5652
5653 // Zero the DS buffer
5654 ZeroMemory( audioPtr, dataLen );
5655
5656 // Unlock the DS buffer
5657 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5658 if ( FAILED( result ) ) {
5659 output->Release();
5660 buffer->Release();
5661 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5662 errorText_ = errorStream_.str();
5663 return FAILURE;
5664 }
5665
5666 ohandle = (void *) output;
5667 bhandle = (void *) buffer;
5668 }
5669
5670 if ( mode == INPUT ) {
5671
5672 LPDIRECTSOUNDCAPTURE input;
5673 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5674 if ( FAILED( result ) ) {
5675 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5676 errorText_ = errorStream_.str();
5677 return FAILURE;
5678 }
5679
5680 DSCCAPS inCaps;
5681 inCaps.dwSize = sizeof( inCaps );
5682 result = input->GetCaps( &inCaps );
5683 if ( FAILED( result ) ) {
5684 input->Release();
5685 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5686 errorText_ = errorStream_.str();
5687 return FAILURE;
5688 }
5689
5690 // Check channel information.
5691 if ( inCaps.dwChannels < channels + firstChannel ) {
5692 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5693 return FAILURE;
5694 }
5695
5696 // Check format information. Use 16-bit format unless user
5697 // requests 8-bit.
5698 DWORD deviceFormats;
5699 if ( channels + firstChannel == 2 ) {
5700 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5701 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5702 waveFormat.wBitsPerSample = 8;
5703 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5704 }
5705 else { // assume 16-bit is supported
5706 waveFormat.wBitsPerSample = 16;
5707 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5708 }
5709 }
5710 else { // channel == 1
5711 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5712 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5713 waveFormat.wBitsPerSample = 8;
5714 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5715 }
5716 else { // assume 16-bit is supported
5717 waveFormat.wBitsPerSample = 16;
5718 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5719 }
5720 }
5721 stream_.userFormat = format;
5722
5723 // Update wave format structure and buffer information.
5724 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5725 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5726 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5727
5728 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5729 while ( dsPointerLeadTime * 2U > dsBufferSize )
5730 dsBufferSize *= 2;
5731
5732 // Setup the secondary DS buffer description.
5733 DSCBUFFERDESC bufferDescription;
5734 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5735 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5736 bufferDescription.dwFlags = 0;
5737 bufferDescription.dwReserved = 0;
5738 bufferDescription.dwBufferBytes = dsBufferSize;
5739 bufferDescription.lpwfxFormat = &waveFormat;
5740
5741 // Create the capture buffer.
5742 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5743 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5744 if ( FAILED( result ) ) {
5745 input->Release();
5746 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5747 errorText_ = errorStream_.str();
5748 return FAILURE;
5749 }
5750
5751 // Get the buffer size ... might be different from what we specified.
5752 DSCBCAPS dscbcaps;
5753 dscbcaps.dwSize = sizeof( DSCBCAPS );
5754 result = buffer->GetCaps( &dscbcaps );
5755 if ( FAILED( result ) ) {
5756 input->Release();
5757 buffer->Release();
5758 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5759 errorText_ = errorStream_.str();
5760 return FAILURE;
5761 }
5762
5763 dsBufferSize = dscbcaps.dwBufferBytes;
5764
5765 // NOTE: We could have a problem here if this is a duplex stream
5766 // and the play and capture hardware buffer sizes are different
5767 // (I'm actually not sure if that is a problem or not).
5768 // Currently, we are not verifying that.
5769
5770 // Lock the capture buffer
5771 LPVOID audioPtr;
5772 DWORD dataLen;
5773 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5774 if ( FAILED( result ) ) {
5775 input->Release();
5776 buffer->Release();
5777 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5778 errorText_ = errorStream_.str();
5779 return FAILURE;
5780 }
5781
5782 // Zero the buffer
5783 ZeroMemory( audioPtr, dataLen );
5784
5785 // Unlock the buffer
5786 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5787 if ( FAILED( result ) ) {
5788 input->Release();
5789 buffer->Release();
5790 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5791 errorText_ = errorStream_.str();
5792 return FAILURE;
5793 }
5794
5795 ohandle = (void *) input;
5796 bhandle = (void *) buffer;
5797 }
5798
5799 // Set various stream parameters
5800 DsHandle *handle = 0;
5801 stream_.nDeviceChannels[mode] = channels + firstChannel;
5802 stream_.nUserChannels[mode] = channels;
5803 stream_.bufferSize = *bufferSize;
5804 stream_.channelOffset[mode] = firstChannel;
5805 stream_.deviceInterleaved[mode] = true;
5806 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5807 else stream_.userInterleaved = true;
5808
5809 // Set flag for buffer conversion
5810 stream_.doConvertBuffer[mode] = false;
5811 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5812 stream_.doConvertBuffer[mode] = true;
5813 if (stream_.userFormat != stream_.deviceFormat[mode])
5814 stream_.doConvertBuffer[mode] = true;
5815 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5816 stream_.nUserChannels[mode] > 1 )
5817 stream_.doConvertBuffer[mode] = true;
5818
5819 // Allocate necessary internal buffers
5820 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5821 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5822 if ( stream_.userBuffer[mode] == NULL ) {
5823 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5824 goto error;
5825 }
5826
5827 if ( stream_.doConvertBuffer[mode] ) {
5828
5829 bool makeBuffer = true;
5830 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5831 if ( mode == INPUT ) {
5832 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5833 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5834 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5835 }
5836 }
5837
5838 if ( makeBuffer ) {
5839 bufferBytes *= *bufferSize;
5840 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5841 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5842 if ( stream_.deviceBuffer == NULL ) {
5843 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5844 goto error;
5845 }
5846 }
5847 }
5848
5849 // Allocate our DsHandle structures for the stream.
5850 if ( stream_.apiHandle == 0 ) {
5851 try {
5852 handle = new DsHandle;
5853 }
5854 catch ( std::bad_alloc& ) {
5855 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5856 goto error;
5857 }
5858
5859 // Create a manual-reset event.
5860 handle->condition = CreateEvent( NULL, // no security
5861 TRUE, // manual-reset
5862 FALSE, // non-signaled initially
5863 NULL ); // unnamed
5864 stream_.apiHandle = (void *) handle;
5865 }
5866 else
5867 handle = (DsHandle *) stream_.apiHandle;
5868 handle->id[mode] = ohandle;
5869 handle->buffer[mode] = bhandle;
5870 handle->dsBufferSize[mode] = dsBufferSize;
5871 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5872
5873 stream_.device[mode] = device;
5874 stream_.state = STREAM_STOPPED;
5875 if ( stream_.mode == OUTPUT && mode == INPUT )
5876 // We had already set up an output stream.
5877 stream_.mode = DUPLEX;
5878 else
5879 stream_.mode = mode;
5880 stream_.nBuffers = nBuffers;
5881 stream_.sampleRate = sampleRate;
5882
5883 // Setup the buffer conversion information structure.
5884 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5885
5886 // Setup the callback thread.
5887 if ( stream_.callbackInfo.isRunning == false ) {
5888 unsigned threadId;
5889 stream_.callbackInfo.isRunning = true;
5890 stream_.callbackInfo.object = (void *) this;
5891 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5892 &stream_.callbackInfo, 0, &threadId );
5893 if ( stream_.callbackInfo.thread == 0 ) {
5894 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5895 goto error;
5896 }
5897
5898 // Boost DS thread priority
5899 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5900 }
5901 return SUCCESS;
5902
5903 error:
5904 if ( handle ) {
5905 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5906 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5907 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5908 if ( buffer ) buffer->Release();
5909 object->Release();
5910 }
5911 if ( handle->buffer[1] ) {
5912 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5913 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5914 if ( buffer ) buffer->Release();
5915 object->Release();
5916 }
5917 CloseHandle( handle->condition );
5918 delete handle;
5919 stream_.apiHandle = 0;
5920 }
5921
5922 for ( int i=0; i<2; i++ ) {
5923 if ( stream_.userBuffer[i] ) {
5924 free( stream_.userBuffer[i] );
5925 stream_.userBuffer[i] = 0;
5926 }
5927 }
5928
5929 if ( stream_.deviceBuffer ) {
5930 free( stream_.deviceBuffer );
5931 stream_.deviceBuffer = 0;
5932 }
5933
5934 stream_.state = STREAM_CLOSED;
5935 return FAILURE;
5936 }
5937
5938 void RtApiDs :: closeStream()
5939 {
5940 if ( stream_.state == STREAM_CLOSED ) {
5941 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
5942 error( RtAudioError::WARNING );
5943 return;
5944 }
5945
5946 // Stop the callback thread.
5947 stream_.callbackInfo.isRunning = false;
5948 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
5949 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
5950
5951 DsHandle *handle = (DsHandle *) stream_.apiHandle;
5952 if ( handle ) {
5953 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5954 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5955 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5956 if ( buffer ) {
5957 buffer->Stop();
5958 buffer->Release();
5959 }
5960 object->Release();
5961 }
5962 if ( handle->buffer[1] ) {
5963 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5964 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5965 if ( buffer ) {
5966 buffer->Stop();
5967 buffer->Release();
5968 }
5969 object->Release();
5970 }
5971 CloseHandle( handle->condition );
5972 delete handle;
5973 stream_.apiHandle = 0;
5974 }
5975
5976 for ( int i=0; i<2; i++ ) {
5977 if ( stream_.userBuffer[i] ) {
5978 free( stream_.userBuffer[i] );
5979 stream_.userBuffer[i] = 0;
5980 }
5981 }
5982
5983 if ( stream_.deviceBuffer ) {
5984 free( stream_.deviceBuffer );
5985 stream_.deviceBuffer = 0;
5986 }
5987
5988 stream_.mode = UNINITIALIZED;
5989 stream_.state = STREAM_CLOSED;
5990 }
5991
5992 void RtApiDs :: startStream()
5993 {
5994 verifyStream();
5995 if ( stream_.state == STREAM_RUNNING ) {
5996 errorText_ = "RtApiDs::startStream(): the stream is already running!";
5997 error( RtAudioError::WARNING );
5998 return;
5999 }
6000
6001 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6002
6003 // Increase scheduler frequency on lesser windows (a side-effect of
6004 // increasing timer accuracy). On greater windows (Win2K or later),
6005 // this is already in effect.
6006 timeBeginPeriod( 1 );
6007
6008 buffersRolling = false;
6009 duplexPrerollBytes = 0;
6010
6011 if ( stream_.mode == DUPLEX ) {
6012 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6013 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6014 }
6015
6016 HRESULT result = 0;
6017 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6018
6019 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6020 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6021 if ( FAILED( result ) ) {
6022 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6023 errorText_ = errorStream_.str();
6024 goto unlock;
6025 }
6026 }
6027
6028 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6029
6030 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6031 result = buffer->Start( DSCBSTART_LOOPING );
6032 if ( FAILED( result ) ) {
6033 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6034 errorText_ = errorStream_.str();
6035 goto unlock;
6036 }
6037 }
6038
6039 handle->drainCounter = 0;
6040 handle->internalDrain = false;
6041 ResetEvent( handle->condition );
6042 stream_.state = STREAM_RUNNING;
6043
6044 unlock:
6045 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6046 }
6047
6048 void RtApiDs :: stopStream()
6049 {
6050 verifyStream();
6051 if ( stream_.state == STREAM_STOPPED ) {
6052 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6053 error( RtAudioError::WARNING );
6054 return;
6055 }
6056
6057 HRESULT result = 0;
6058 LPVOID audioPtr;
6059 DWORD dataLen;
6060 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6061 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6062 if ( handle->drainCounter == 0 ) {
6063 handle->drainCounter = 2;
6064 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6065 }
6066
6067 stream_.state = STREAM_STOPPED;
6068
6069 MUTEX_LOCK( &stream_.mutex );
6070
6071 // Stop the buffer and clear memory
6072 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6073 result = buffer->Stop();
6074 if ( FAILED( result ) ) {
6075 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6076 errorText_ = errorStream_.str();
6077 goto unlock;
6078 }
6079
6080 // Lock the buffer and clear it so that if we start to play again,
6081 // we won't have old data playing.
6082 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6083 if ( FAILED( result ) ) {
6084 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6085 errorText_ = errorStream_.str();
6086 goto unlock;
6087 }
6088
6089 // Zero the DS buffer
6090 ZeroMemory( audioPtr, dataLen );
6091
6092 // Unlock the DS buffer
6093 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6094 if ( FAILED( result ) ) {
6095 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6096 errorText_ = errorStream_.str();
6097 goto unlock;
6098 }
6099
6100 // If we start playing again, we must begin at beginning of buffer.
6101 handle->bufferPointer[0] = 0;
6102 }
6103
6104 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6105 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6106 audioPtr = NULL;
6107 dataLen = 0;
6108
6109 stream_.state = STREAM_STOPPED;
6110
6111 if ( stream_.mode != DUPLEX )
6112 MUTEX_LOCK( &stream_.mutex );
6113
6114 result = buffer->Stop();
6115 if ( FAILED( result ) ) {
6116 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6117 errorText_ = errorStream_.str();
6118 goto unlock;
6119 }
6120
6121 // Lock the buffer and clear it so that if we start to play again,
6122 // we won't have old data playing.
6123 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6124 if ( FAILED( result ) ) {
6125 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6126 errorText_ = errorStream_.str();
6127 goto unlock;
6128 }
6129
6130 // Zero the DS buffer
6131 ZeroMemory( audioPtr, dataLen );
6132
6133 // Unlock the DS buffer
6134 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6135 if ( FAILED( result ) ) {
6136 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6137 errorText_ = errorStream_.str();
6138 goto unlock;
6139 }
6140
6141 // If we start recording again, we must begin at beginning of buffer.
6142 handle->bufferPointer[1] = 0;
6143 }
6144
6145 unlock:
6146 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6147 MUTEX_UNLOCK( &stream_.mutex );
6148
6149 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6150 }
6151
6152 void RtApiDs :: abortStream()
6153 {
6154 verifyStream();
6155 if ( stream_.state == STREAM_STOPPED ) {
6156 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6157 error( RtAudioError::WARNING );
6158 return;
6159 }
6160
6161 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6162 handle->drainCounter = 2;
6163
6164 stopStream();
6165 }
6166
6167 void RtApiDs :: callbackEvent()
6168 {
6169 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6170 Sleep( 50 ); // sleep 50 milliseconds
6171 return;
6172 }
6173
6174 if ( stream_.state == STREAM_CLOSED ) {
6175 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6176 error( RtAudioError::WARNING );
6177 return;
6178 }
6179
6180 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6181 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6182
6183 // Check if we were draining the stream and signal is finished.
6184 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6185
6186 stream_.state = STREAM_STOPPING;
6187 if ( handle->internalDrain == false )
6188 SetEvent( handle->condition );
6189 else
6190 stopStream();
6191 return;
6192 }
6193
6194 // Invoke user callback to get fresh output data UNLESS we are
6195 // draining stream.
6196 if ( handle->drainCounter == 0 ) {
6197 RtAudioCallback callback = (RtAudioCallback) info->callback;
6198 double streamTime = getStreamTime();
6199 RtAudioStreamStatus status = 0;
6200 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6201 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6202 handle->xrun[0] = false;
6203 }
6204 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6205 status |= RTAUDIO_INPUT_OVERFLOW;
6206 handle->xrun[1] = false;
6207 }
6208 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6209 stream_.bufferSize, streamTime, status, info->userData );
6210 if ( cbReturnValue == 2 ) {
6211 stream_.state = STREAM_STOPPING;
6212 handle->drainCounter = 2;
6213 abortStream();
6214 return;
6215 }
6216 else if ( cbReturnValue == 1 ) {
6217 handle->drainCounter = 1;
6218 handle->internalDrain = true;
6219 }
6220 }
6221
6222 HRESULT result;
6223 DWORD currentWritePointer, safeWritePointer;
6224 DWORD currentReadPointer, safeReadPointer;
6225 UINT nextWritePointer;
6226
6227 LPVOID buffer1 = NULL;
6228 LPVOID buffer2 = NULL;
6229 DWORD bufferSize1 = 0;
6230 DWORD bufferSize2 = 0;
6231
6232 char *buffer;
6233 long bufferBytes;
6234
6235 MUTEX_LOCK( &stream_.mutex );
6236 if ( stream_.state == STREAM_STOPPED ) {
6237 MUTEX_UNLOCK( &stream_.mutex );
6238 return;
6239 }
6240
6241 if ( buffersRolling == false ) {
6242 if ( stream_.mode == DUPLEX ) {
6243 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6244
6245 // It takes a while for the devices to get rolling. As a result,
6246 // there's no guarantee that the capture and write device pointers
6247 // will move in lockstep. Wait here for both devices to start
6248 // rolling, and then set our buffer pointers accordingly.
6249 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6250 // bytes later than the write buffer.
6251
6252 // Stub: a serious risk of having a pre-emptive scheduling round
6253 // take place between the two GetCurrentPosition calls... but I'm
6254 // really not sure how to solve the problem. Temporarily boost to
6255 // Realtime priority, maybe; but I'm not sure what priority the
6256 // DirectSound service threads run at. We *should* be roughly
6257 // within a ms or so of correct.
6258
6259 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6260 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6261
6262 DWORD startSafeWritePointer, startSafeReadPointer;
6263
6264 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6265 if ( FAILED( result ) ) {
6266 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6267 errorText_ = errorStream_.str();
6268 error( RtAudioError::SYSTEM_ERROR );
6269 return;
6270 }
6271 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6272 if ( FAILED( result ) ) {
6273 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6274 errorText_ = errorStream_.str();
6275 error( RtAudioError::SYSTEM_ERROR );
6276 return;
6277 }
6278 while ( true ) {
6279 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6280 if ( FAILED( result ) ) {
6281 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6282 errorText_ = errorStream_.str();
6283 error( RtAudioError::SYSTEM_ERROR );
6284 return;
6285 }
6286 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6287 if ( FAILED( result ) ) {
6288 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6289 errorText_ = errorStream_.str();
6290 error( RtAudioError::SYSTEM_ERROR );
6291 return;
6292 }
6293 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6294 Sleep( 1 );
6295 }
6296
6297 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6298
6299 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6300 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6301 handle->bufferPointer[1] = safeReadPointer;
6302 }
6303 else if ( stream_.mode == OUTPUT ) {
6304
6305 // Set the proper nextWritePosition after initial startup.
6306 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6307 result = dsWriteBuffer->GetCurrentPosition( &currentWritePointer, &safeWritePointer );
6308 if ( FAILED( result ) ) {
6309 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6310 errorText_ = errorStream_.str();
6311 error( RtAudioError::SYSTEM_ERROR );
6312 return;
6313 }
6314 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6315 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6316 }
6317
6318 buffersRolling = true;
6319 }
6320
6321 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6322
6323 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6324
6325 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6326 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6327 bufferBytes *= formatBytes( stream_.userFormat );
6328 memset( stream_.userBuffer[0], 0, bufferBytes );
6329 }
6330
6331 // Setup parameters and do buffer conversion if necessary.
6332 if ( stream_.doConvertBuffer[0] ) {
6333 buffer = stream_.deviceBuffer;
6334 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6335 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6336 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6337 }
6338 else {
6339 buffer = stream_.userBuffer[0];
6340 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6341 bufferBytes *= formatBytes( stream_.userFormat );
6342 }
6343
6344 // No byte swapping necessary in DirectSound implementation.
6345
6346 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6347 // unsigned. So, we need to convert our signed 8-bit data here to
6348 // unsigned.
6349 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6350 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6351
6352 DWORD dsBufferSize = handle->dsBufferSize[0];
6353 nextWritePointer = handle->bufferPointer[0];
6354
6355 DWORD endWrite, leadPointer;
6356 while ( true ) {
6357 // Find out where the read and "safe write" pointers are.
6358 result = dsBuffer->GetCurrentPosition( &currentWritePointer, &safeWritePointer );
6359 if ( FAILED( result ) ) {
6360 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6361 errorText_ = errorStream_.str();
6362 error( RtAudioError::SYSTEM_ERROR );
6363 return;
6364 }
6365
6366 // We will copy our output buffer into the region between
6367 // safeWritePointer and leadPointer. If leadPointer is not
6368 // beyond the next endWrite position, wait until it is.
6369 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6370 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6371 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6372 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6373 endWrite = nextWritePointer + bufferBytes;
6374
6375 // Check whether the entire write region is behind the play pointer.
6376 if ( leadPointer >= endWrite ) break;
6377
6378 // If we are here, then we must wait until the leadPointer advances
6379 // beyond the end of our next write region. We use the
6380 // Sleep() function to suspend operation until that happens.
6381 double millis = ( endWrite - leadPointer ) * 1000.0;
6382 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6383 if ( millis < 1.0 ) millis = 1.0;
6384 Sleep( (DWORD) millis );
6385 }
6386
6387 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6388 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6389 // We've strayed into the forbidden zone ... resync the read pointer.
6390 handle->xrun[0] = true;
6391 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6392 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6393 handle->bufferPointer[0] = nextWritePointer;
6394 endWrite = nextWritePointer + bufferBytes;
6395 }
6396
6397 // Lock free space in the buffer
6398 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6399 &bufferSize1, &buffer2, &bufferSize2, 0 );
6400 if ( FAILED( result ) ) {
6401 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6402 errorText_ = errorStream_.str();
6403 error( RtAudioError::SYSTEM_ERROR );
6404 return;
6405 }
6406
6407 // Copy our buffer into the DS buffer
6408 CopyMemory( buffer1, buffer, bufferSize1 );
6409 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6410
6411 // Update our buffer offset and unlock sound buffer
6412 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6413 if ( FAILED( result ) ) {
6414 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6415 errorText_ = errorStream_.str();
6416 error( RtAudioError::SYSTEM_ERROR );
6417 return;
6418 }
6419 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6420 handle->bufferPointer[0] = nextWritePointer;
6421 }
6422
6423 // Don't bother draining input
6424 if ( handle->drainCounter ) {
6425 handle->drainCounter++;
6426 goto unlock;
6427 }
6428
6429 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6430
6431 // Setup parameters.
6432 if ( stream_.doConvertBuffer[1] ) {
6433 buffer = stream_.deviceBuffer;
6434 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6435 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6436 }
6437 else {
6438 buffer = stream_.userBuffer[1];
6439 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6440 bufferBytes *= formatBytes( stream_.userFormat );
6441 }
6442
6443 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6444 long nextReadPointer = handle->bufferPointer[1];
6445 DWORD dsBufferSize = handle->dsBufferSize[1];
6446
6447 // Find out where the write and "safe read" pointers are.
6448 result = dsBuffer->GetCurrentPosition( &currentReadPointer, &safeReadPointer );
6449 if ( FAILED( result ) ) {
6450 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6451 errorText_ = errorStream_.str();
6452 error( RtAudioError::SYSTEM_ERROR );
6453 return;
6454 }
6455
6456 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6457 DWORD endRead = nextReadPointer + bufferBytes;
6458
6459 // Handling depends on whether we are INPUT or DUPLEX.
6460 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6461 // then a wait here will drag the write pointers into the forbidden zone.
6462 //
6463 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6464 // it's in a safe position. This causes dropouts, but it seems to be the only
6465 // practical way to sync up the read and write pointers reliably, given the
6466 // the very complex relationship between phase and increment of the read and write
6467 // pointers.
6468 //
6469 // In order to minimize audible dropouts in DUPLEX mode, we will
6470 // provide a pre-roll period of 0.5 seconds in which we return
6471 // zeros from the read buffer while the pointers sync up.
6472
6473 if ( stream_.mode == DUPLEX ) {
6474 if ( safeReadPointer < endRead ) {
6475 if ( duplexPrerollBytes <= 0 ) {
6476 // Pre-roll time over. Be more agressive.
6477 int adjustment = endRead-safeReadPointer;
6478
6479 handle->xrun[1] = true;
6480 // Two cases:
6481 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6482 // and perform fine adjustments later.
6483 // - small adjustments: back off by twice as much.
6484 if ( adjustment >= 2*bufferBytes )
6485 nextReadPointer = safeReadPointer-2*bufferBytes;
6486 else
6487 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6488
6489 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6490
6491 }
6492 else {
6493 // In pre=roll time. Just do it.
6494 nextReadPointer = safeReadPointer - bufferBytes;
6495 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6496 }
6497 endRead = nextReadPointer + bufferBytes;
6498 }
6499 }
6500 else { // mode == INPUT
6501 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6502 // See comments for playback.
6503 double millis = (endRead - safeReadPointer) * 1000.0;
6504 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6505 if ( millis < 1.0 ) millis = 1.0;
6506 Sleep( (DWORD) millis );
6507
6508 // Wake up and find out where we are now.
6509 result = dsBuffer->GetCurrentPosition( &currentReadPointer, &safeReadPointer );
6510 if ( FAILED( result ) ) {
6511 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6512 errorText_ = errorStream_.str();
6513 error( RtAudioError::SYSTEM_ERROR );
6514 return;
6515 }
6516
6517 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6518 }
6519 }
6520
6521 // Lock free space in the buffer
6522 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6523 &bufferSize1, &buffer2, &bufferSize2, 0 );
6524 if ( FAILED( result ) ) {
6525 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6526 errorText_ = errorStream_.str();
6527 error( RtAudioError::SYSTEM_ERROR );
6528 return;
6529 }
6530
6531 if ( duplexPrerollBytes <= 0 ) {
6532 // Copy our buffer into the DS buffer
6533 CopyMemory( buffer, buffer1, bufferSize1 );
6534 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6535 }
6536 else {
6537 memset( buffer, 0, bufferSize1 );
6538 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6539 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6540 }
6541
6542 // Update our buffer offset and unlock sound buffer
6543 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6544 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6545 if ( FAILED( result ) ) {
6546 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6547 errorText_ = errorStream_.str();
6548 error( RtAudioError::SYSTEM_ERROR );
6549 return;
6550 }
6551 handle->bufferPointer[1] = nextReadPointer;
6552
6553 // No byte swapping necessary in DirectSound implementation.
6554
6555 // If necessary, convert 8-bit data from unsigned to signed.
6556 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6557 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6558
6559 // Do buffer conversion if necessary.
6560 if ( stream_.doConvertBuffer[1] )
6561 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6562 }
6563
6564 unlock:
6565 MUTEX_UNLOCK( &stream_.mutex );
6566 RtApi::tickStreamTime();
6567 }
6568
6569 // Definitions for utility functions and callbacks
6570 // specific to the DirectSound implementation.
6571
6572 static unsigned __stdcall callbackHandler( void *ptr )
6573 {
6574 CallbackInfo *info = (CallbackInfo *) ptr;
6575 RtApiDs *object = (RtApiDs *) info->object;
6576 bool* isRunning = &info->isRunning;
6577
6578 while ( *isRunning == true ) {
6579 object->callbackEvent();
6580 }
6581
6582 _endthreadex( 0 );
6583 return 0;
6584 }
6585
6586 #include "tchar.h"
6587
6588 static std::string convertTChar( LPCTSTR name )
6589 {
6590 #if defined( UNICODE ) || defined( _UNICODE )
6591 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
6592 std::string s( length-1, '\0' );
6593 WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);
6594 #else
6595 std::string s( name );
6596 #endif
6597
6598 return s;
6599 }
6600
6601 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6602 LPCTSTR description,
6603 LPCTSTR /*module*/,
6604 LPVOID lpContext )
6605 {
6606 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6607 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6608
6609 HRESULT hr;
6610 bool validDevice = false;
6611 if ( probeInfo.isInput == true ) {
6612 DSCCAPS caps;
6613 LPDIRECTSOUNDCAPTURE object;
6614
6615 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6616 if ( hr != DS_OK ) return TRUE;
6617
6618 caps.dwSize = sizeof(caps);
6619 hr = object->GetCaps( &caps );
6620 if ( hr == DS_OK ) {
6621 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6622 validDevice = true;
6623 }
6624 object->Release();
6625 }
6626 else {
6627 DSCAPS caps;
6628 LPDIRECTSOUND object;
6629 hr = DirectSoundCreate( lpguid, &object, NULL );
6630 if ( hr != DS_OK ) return TRUE;
6631
6632 caps.dwSize = sizeof(caps);
6633 hr = object->GetCaps( &caps );
6634 if ( hr == DS_OK ) {
6635 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6636 validDevice = true;
6637 }
6638 object->Release();
6639 }
6640
6641 // If good device, then save its name and guid.
6642 std::string name = convertTChar( description );
6643 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6644 if ( lpguid == NULL )
6645 name = "Default Device";
6646 if ( validDevice ) {
6647 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6648 if ( dsDevices[i].name == name ) {
6649 dsDevices[i].found = true;
6650 if ( probeInfo.isInput ) {
6651 dsDevices[i].id[1] = lpguid;
6652 dsDevices[i].validId[1] = true;
6653 }
6654 else {
6655 dsDevices[i].id[0] = lpguid;
6656 dsDevices[i].validId[0] = true;
6657 }
6658 return TRUE;
6659 }
6660 }
6661
6662 DsDevice device;
6663 device.name = name;
6664 device.found = true;
6665 if ( probeInfo.isInput ) {
6666 device.id[1] = lpguid;
6667 device.validId[1] = true;
6668 }
6669 else {
6670 device.id[0] = lpguid;
6671 device.validId[0] = true;
6672 }
6673 dsDevices.push_back( device );
6674 }
6675
6676 return TRUE;
6677 }
6678
6679 static const char* getErrorString( int code )
6680 {
6681 switch ( code ) {
6682
6683 case DSERR_ALLOCATED:
6684 return "Already allocated";
6685
6686 case DSERR_CONTROLUNAVAIL:
6687 return "Control unavailable";
6688
6689 case DSERR_INVALIDPARAM:
6690 return "Invalid parameter";
6691
6692 case DSERR_INVALIDCALL:
6693 return "Invalid call";
6694
6695 case DSERR_GENERIC:
6696 return "Generic error";
6697
6698 case DSERR_PRIOLEVELNEEDED:
6699 return "Priority level needed";
6700
6701 case DSERR_OUTOFMEMORY:
6702 return "Out of memory";
6703
6704 case DSERR_BADFORMAT:
6705 return "The sample rate or the channel format is not supported";
6706
6707 case DSERR_UNSUPPORTED:
6708 return "Not supported";
6709
6710 case DSERR_NODRIVER:
6711 return "No driver";
6712
6713 case DSERR_ALREADYINITIALIZED:
6714 return "Already initialized";
6715
6716 case DSERR_NOAGGREGATION:
6717 return "No aggregation";
6718
6719 case DSERR_BUFFERLOST:
6720 return "Buffer lost";
6721
6722 case DSERR_OTHERAPPHASPRIO:
6723 return "Another application already has priority";
6724
6725 case DSERR_UNINITIALIZED:
6726 return "Uninitialized";
6727
6728 default:
6729 return "DirectSound unknown error";
6730 }
6731 }
6732 //******************** End of __WINDOWS_DS__ *********************//
6733 #endif
6734
6735
6736 #if defined(__LINUX_ALSA__)
6737
6738 #include <alsa/asoundlib.h>
6739 #include <unistd.h>
6740
6741 // A structure to hold various information related to the ALSA API
6742 // implementation.
6743 struct AlsaHandle {
6744 snd_pcm_t *handles[2];
6745 bool synchronized;
6746 bool xrun[2];
6747 pthread_cond_t runnable_cv;
6748 bool runnable;
6749
6750 AlsaHandle()
6751 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6752 };
6753
6754 static void *alsaCallbackHandler( void * ptr );
6755
6756 RtApiAlsa :: RtApiAlsa()
6757 {
6758 // Nothing to do here.
6759 }
6760
6761 RtApiAlsa :: ~RtApiAlsa()
6762 {
6763 if ( stream_.state != STREAM_CLOSED ) closeStream();
6764 }
6765
6766 unsigned int RtApiAlsa :: getDeviceCount( void )
6767 {
6768 unsigned nDevices = 0;
6769 int result, subdevice, card;
6770 char name[64];
6771 snd_ctl_t *handle;
6772
6773 // Count cards and devices
6774 card = -1;
6775 snd_card_next( &card );
6776 while ( card >= 0 ) {
6777 sprintf( name, "hw:%d", card );
6778 result = snd_ctl_open( &handle, name, 0 );
6779 if ( result < 0 ) {
6780 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6781 errorText_ = errorStream_.str();
6782 error( RtAudioError::WARNING );
6783 goto nextcard;
6784 }
6785 subdevice = -1;
6786 while( 1 ) {
6787 result = snd_ctl_pcm_next_device( handle, &subdevice );
6788 if ( result < 0 ) {
6789 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6790 errorText_ = errorStream_.str();
6791 error( RtAudioError::WARNING );
6792 break;
6793 }
6794 if ( subdevice < 0 )
6795 break;
6796 nDevices++;
6797 }
6798 nextcard:
6799 snd_ctl_close( handle );
6800 snd_card_next( &card );
6801 }
6802
6803 result = snd_ctl_open( &handle, "default", 0 );
6804 if (result == 0) {
6805 nDevices++;
6806 snd_ctl_close( handle );
6807 }
6808
6809 return nDevices;
6810 }
6811
6812 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6813 {
6814 RtAudio::DeviceInfo info;
6815 info.probed = false;
6816
6817 unsigned nDevices = 0;
6818 int result, subdevice, card;
6819 char name[64];
6820 snd_ctl_t *chandle;
6821
6822 // Count cards and devices
6823 card = -1;
6824 snd_card_next( &card );
6825 while ( card >= 0 ) {
6826 sprintf( name, "hw:%d", card );
6827 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6828 if ( result < 0 ) {
6829 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6830 errorText_ = errorStream_.str();
6831 error( RtAudioError::WARNING );
6832 goto nextcard;
6833 }
6834 subdevice = -1;
6835 while( 1 ) {
6836 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6837 if ( result < 0 ) {
6838 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6839 errorText_ = errorStream_.str();
6840 error( RtAudioError::WARNING );
6841 break;
6842 }
6843 if ( subdevice < 0 ) break;
6844 if ( nDevices == device ) {
6845 sprintf( name, "hw:%d,%d", card, subdevice );
6846 goto foundDevice;
6847 }
6848 nDevices++;
6849 }
6850 nextcard:
6851 snd_ctl_close( chandle );
6852 snd_card_next( &card );
6853 }
6854
6855 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6856 if ( result == 0 ) {
6857 if ( nDevices == device ) {
6858 strcpy( name, "default" );
6859 goto foundDevice;
6860 }
6861 nDevices++;
6862 }
6863
6864 if ( nDevices == 0 ) {
6865 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6866 error( RtAudioError::INVALID_USE );
6867 return info;
6868 }
6869
6870 if ( device >= nDevices ) {
6871 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6872 error( RtAudioError::INVALID_USE );
6873 return info;
6874 }
6875
6876 foundDevice:
6877
6878 // If a stream is already open, we cannot probe the stream devices.
6879 // Thus, use the saved results.
6880 if ( stream_.state != STREAM_CLOSED &&
6881 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6882 snd_ctl_close( chandle );
6883 if ( device >= devices_.size() ) {
6884 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6885 error( RtAudioError::WARNING );
6886 return info;
6887 }
6888 return devices_[ device ];
6889 }
6890
6891 int openMode = SND_PCM_ASYNC;
6892 snd_pcm_stream_t stream;
6893 snd_pcm_info_t *pcminfo;
6894 snd_pcm_info_alloca( &pcminfo );
6895 snd_pcm_t *phandle;
6896 snd_pcm_hw_params_t *params;
6897 snd_pcm_hw_params_alloca( &params );
6898
6899 // First try for playback unless default device (which has subdev -1)
6900 stream = SND_PCM_STREAM_PLAYBACK;
6901 snd_pcm_info_set_stream( pcminfo, stream );
6902 if ( subdevice != -1 ) {
6903 snd_pcm_info_set_device( pcminfo, subdevice );
6904 snd_pcm_info_set_subdevice( pcminfo, 0 );
6905
6906 result = snd_ctl_pcm_info( chandle, pcminfo );
6907 if ( result < 0 ) {
6908 // Device probably doesn't support playback.
6909 goto captureProbe;
6910 }
6911 }
6912
6913 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
6914 if ( result < 0 ) {
6915 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6916 errorText_ = errorStream_.str();
6917 error( RtAudioError::WARNING );
6918 goto captureProbe;
6919 }
6920
6921 // The device is open ... fill the parameter structure.
6922 result = snd_pcm_hw_params_any( phandle, params );
6923 if ( result < 0 ) {
6924 snd_pcm_close( phandle );
6925 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6926 errorText_ = errorStream_.str();
6927 error( RtAudioError::WARNING );
6928 goto captureProbe;
6929 }
6930
6931 // Get output channel information.
6932 unsigned int value;
6933 result = snd_pcm_hw_params_get_channels_max( params, &value );
6934 if ( result < 0 ) {
6935 snd_pcm_close( phandle );
6936 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
6937 errorText_ = errorStream_.str();
6938 error( RtAudioError::WARNING );
6939 goto captureProbe;
6940 }
6941 info.outputChannels = value;
6942 snd_pcm_close( phandle );
6943
6944 captureProbe:
6945 stream = SND_PCM_STREAM_CAPTURE;
6946 snd_pcm_info_set_stream( pcminfo, stream );
6947
6948 // Now try for capture unless default device (with subdev = -1)
6949 if ( subdevice != -1 ) {
6950 result = snd_ctl_pcm_info( chandle, pcminfo );
6951 snd_ctl_close( chandle );
6952 if ( result < 0 ) {
6953 // Device probably doesn't support capture.
6954 if ( info.outputChannels == 0 ) return info;
6955 goto probeParameters;
6956 }
6957 }
6958 else
6959 snd_ctl_close( chandle );
6960
6961 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
6962 if ( result < 0 ) {
6963 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6964 errorText_ = errorStream_.str();
6965 error( RtAudioError::WARNING );
6966 if ( info.outputChannels == 0 ) return info;
6967 goto probeParameters;
6968 }
6969
6970 // The device is open ... fill the parameter structure.
6971 result = snd_pcm_hw_params_any( phandle, params );
6972 if ( result < 0 ) {
6973 snd_pcm_close( phandle );
6974 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6975 errorText_ = errorStream_.str();
6976 error( RtAudioError::WARNING );
6977 if ( info.outputChannels == 0 ) return info;
6978 goto probeParameters;
6979 }
6980
6981 result = snd_pcm_hw_params_get_channels_max( params, &value );
6982 if ( result < 0 ) {
6983 snd_pcm_close( phandle );
6984 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
6985 errorText_ = errorStream_.str();
6986 error( RtAudioError::WARNING );
6987 if ( info.outputChannels == 0 ) return info;
6988 goto probeParameters;
6989 }
6990 info.inputChannels = value;
6991 snd_pcm_close( phandle );
6992
6993 // If device opens for both playback and capture, we determine the channels.
6994 if ( info.outputChannels > 0 && info.inputChannels > 0 )
6995 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
6996
6997 // ALSA doesn't provide default devices so we'll use the first available one.
6998 if ( device == 0 && info.outputChannels > 0 )
6999 info.isDefaultOutput = true;
7000 if ( device == 0 && info.inputChannels > 0 )
7001 info.isDefaultInput = true;
7002
7003 probeParameters:
7004 // At this point, we just need to figure out the supported data
7005 // formats and sample rates. We'll proceed by opening the device in
7006 // the direction with the maximum number of channels, or playback if
7007 // they are equal. This might limit our sample rate options, but so
7008 // be it.
7009
7010 if ( info.outputChannels >= info.inputChannels )
7011 stream = SND_PCM_STREAM_PLAYBACK;
7012 else
7013 stream = SND_PCM_STREAM_CAPTURE;
7014 snd_pcm_info_set_stream( pcminfo, stream );
7015
7016 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7017 if ( result < 0 ) {
7018 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7019 errorText_ = errorStream_.str();
7020 error( RtAudioError::WARNING );
7021 return info;
7022 }
7023
7024 // The device is open ... fill the parameter structure.
7025 result = snd_pcm_hw_params_any( phandle, params );
7026 if ( result < 0 ) {
7027 snd_pcm_close( phandle );
7028 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7029 errorText_ = errorStream_.str();
7030 error( RtAudioError::WARNING );
7031 return info;
7032 }
7033
7034 // Test our discrete set of sample rate values.
7035 info.sampleRates.clear();
7036 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7037 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
7038 info.sampleRates.push_back( SAMPLE_RATES[i] );
7039 }
7040 if ( info.sampleRates.size() == 0 ) {
7041 snd_pcm_close( phandle );
7042 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7043 errorText_ = errorStream_.str();
7044 error( RtAudioError::WARNING );
7045 return info;
7046 }
7047
7048 // Probe the supported data formats ... we don't care about endian-ness just yet
7049 snd_pcm_format_t format;
7050 info.nativeFormats = 0;
7051 format = SND_PCM_FORMAT_S8;
7052 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7053 info.nativeFormats |= RTAUDIO_SINT8;
7054 format = SND_PCM_FORMAT_S16;
7055 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7056 info.nativeFormats |= RTAUDIO_SINT16;
7057 format = SND_PCM_FORMAT_S24;
7058 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7059 info.nativeFormats |= RTAUDIO_SINT24;
7060 format = SND_PCM_FORMAT_S32;
7061 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7062 info.nativeFormats |= RTAUDIO_SINT32;
7063 format = SND_PCM_FORMAT_FLOAT;
7064 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7065 info.nativeFormats |= RTAUDIO_FLOAT32;
7066 format = SND_PCM_FORMAT_FLOAT64;
7067 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7068 info.nativeFormats |= RTAUDIO_FLOAT64;
7069
7070 // Check that we have at least one supported format
7071 if ( info.nativeFormats == 0 ) {
7072 snd_pcm_close( phandle );
7073 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7074 errorText_ = errorStream_.str();
7075 error( RtAudioError::WARNING );
7076 return info;
7077 }
7078
7079 // Get the device name
7080 char *cardname;
7081 result = snd_card_get_name( card, &cardname );
7082 if ( result >= 0 ) {
7083 sprintf( name, "hw:%s,%d", cardname, subdevice );
7084 free( cardname );
7085 }
7086 info.name = name;
7087
7088 // That's all ... close the device and return
7089 snd_pcm_close( phandle );
7090 info.probed = true;
7091 return info;
7092 }
7093
7094 void RtApiAlsa :: saveDeviceInfo( void )
7095 {
7096 devices_.clear();
7097
7098 unsigned int nDevices = getDeviceCount();
7099 devices_.resize( nDevices );
7100 for ( unsigned int i=0; i<nDevices; i++ )
7101 devices_[i] = getDeviceInfo( i );
7102 }
7103
7104 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7105 unsigned int firstChannel, unsigned int sampleRate,
7106 RtAudioFormat format, unsigned int *bufferSize,
7107 RtAudio::StreamOptions *options )
7108
7109 {
7110 #if defined(__RTAUDIO_DEBUG__)
7111 snd_output_t *out;
7112 snd_output_stdio_attach(&out, stderr, 0);
7113 #endif
7114
7115 // I'm not using the "plug" interface ... too much inconsistent behavior.
7116
7117 unsigned nDevices = 0;
7118 int result, subdevice, card;
7119 char name[64];
7120 snd_ctl_t *chandle;
7121
7122 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7123 snprintf(name, sizeof(name), "%s", "default");
7124 else {
7125 // Count cards and devices
7126 card = -1;
7127 snd_card_next( &card );
7128 while ( card >= 0 ) {
7129 sprintf( name, "hw:%d", card );
7130 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7131 if ( result < 0 ) {
7132 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7133 errorText_ = errorStream_.str();
7134 return FAILURE;
7135 }
7136 subdevice = -1;
7137 while( 1 ) {
7138 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7139 if ( result < 0 ) break;
7140 if ( subdevice < 0 ) break;
7141 if ( nDevices == device ) {
7142 sprintf( name, "hw:%d,%d", card, subdevice );
7143 snd_ctl_close( chandle );
7144 goto foundDevice;
7145 }
7146 nDevices++;
7147 }
7148 snd_ctl_close( chandle );
7149 snd_card_next( &card );
7150 }
7151
7152 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7153 if ( result == 0 ) {
7154 if ( nDevices == device ) {
7155 strcpy( name, "default" );
7156 goto foundDevice;
7157 }
7158 nDevices++;
7159 }
7160
7161 if ( nDevices == 0 ) {
7162 // This should not happen because a check is made before this function is called.
7163 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7164 return FAILURE;
7165 }
7166
7167 if ( device >= nDevices ) {
7168 // This should not happen because a check is made before this function is called.
7169 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7170 return FAILURE;
7171 }
7172 }
7173
7174 foundDevice:
7175
7176 // The getDeviceInfo() function will not work for a device that is
7177 // already open. Thus, we'll probe the system before opening a
7178 // stream and save the results for use by getDeviceInfo().
7179 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7180 this->saveDeviceInfo();
7181
7182 snd_pcm_stream_t stream;
7183 if ( mode == OUTPUT )
7184 stream = SND_PCM_STREAM_PLAYBACK;
7185 else
7186 stream = SND_PCM_STREAM_CAPTURE;
7187
7188 snd_pcm_t *phandle;
7189 int openMode = SND_PCM_ASYNC;
7190 result = snd_pcm_open( &phandle, name, stream, openMode );
7191 if ( result < 0 ) {
7192 if ( mode == OUTPUT )
7193 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7194 else
7195 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7196 errorText_ = errorStream_.str();
7197 return FAILURE;
7198 }
7199
7200 // Fill the parameter structure.
7201 snd_pcm_hw_params_t *hw_params;
7202 snd_pcm_hw_params_alloca( &hw_params );
7203 result = snd_pcm_hw_params_any( phandle, hw_params );
7204 if ( result < 0 ) {
7205 snd_pcm_close( phandle );
7206 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7207 errorText_ = errorStream_.str();
7208 return FAILURE;
7209 }
7210
7211 #if defined(__RTAUDIO_DEBUG__)
7212 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7213 snd_pcm_hw_params_dump( hw_params, out );
7214 #endif
7215
7216 // Set access ... check user preference.
7217 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7218 stream_.userInterleaved = false;
7219 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7220 if ( result < 0 ) {
7221 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7222 stream_.deviceInterleaved[mode] = true;
7223 }
7224 else
7225 stream_.deviceInterleaved[mode] = false;
7226 }
7227 else {
7228 stream_.userInterleaved = true;
7229 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7230 if ( result < 0 ) {
7231 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7232 stream_.deviceInterleaved[mode] = false;
7233 }
7234 else
7235 stream_.deviceInterleaved[mode] = true;
7236 }
7237
7238 if ( result < 0 ) {
7239 snd_pcm_close( phandle );
7240 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7241 errorText_ = errorStream_.str();
7242 return FAILURE;
7243 }
7244
7245 // Determine how to set the device format.
7246 stream_.userFormat = format;
7247 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7248
7249 if ( format == RTAUDIO_SINT8 )
7250 deviceFormat = SND_PCM_FORMAT_S8;
7251 else if ( format == RTAUDIO_SINT16 )
7252 deviceFormat = SND_PCM_FORMAT_S16;
7253 else if ( format == RTAUDIO_SINT24 )
7254 deviceFormat = SND_PCM_FORMAT_S24;
7255 else if ( format == RTAUDIO_SINT32 )
7256 deviceFormat = SND_PCM_FORMAT_S32;
7257 else if ( format == RTAUDIO_FLOAT32 )
7258 deviceFormat = SND_PCM_FORMAT_FLOAT;
7259 else if ( format == RTAUDIO_FLOAT64 )
7260 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7261
7262 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7263 stream_.deviceFormat[mode] = format;
7264 goto setFormat;
7265 }
7266
7267 // The user requested format is not natively supported by the device.
7268 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7269 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7270 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7271 goto setFormat;
7272 }
7273
7274 deviceFormat = SND_PCM_FORMAT_FLOAT;
7275 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7276 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7277 goto setFormat;
7278 }
7279
7280 deviceFormat = SND_PCM_FORMAT_S32;
7281 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7282 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7283 goto setFormat;
7284 }
7285
7286 deviceFormat = SND_PCM_FORMAT_S24;
7287 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7288 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7289 goto setFormat;
7290 }
7291
7292 deviceFormat = SND_PCM_FORMAT_S16;
7293 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7294 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7295 goto setFormat;
7296 }
7297
7298 deviceFormat = SND_PCM_FORMAT_S8;
7299 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7300 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7301 goto setFormat;
7302 }
7303
7304 // If we get here, no supported format was found.
7305 snd_pcm_close( phandle );
7306 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7307 errorText_ = errorStream_.str();
7308 return FAILURE;
7309
7310 setFormat:
7311 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7312 if ( result < 0 ) {
7313 snd_pcm_close( phandle );
7314 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7315 errorText_ = errorStream_.str();
7316 return FAILURE;
7317 }
7318
7319 // Determine whether byte-swaping is necessary.
7320 stream_.doByteSwap[mode] = false;
7321 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7322 result = snd_pcm_format_cpu_endian( deviceFormat );
7323 if ( result == 0 )
7324 stream_.doByteSwap[mode] = true;
7325 else if (result < 0) {
7326 snd_pcm_close( phandle );
7327 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7328 errorText_ = errorStream_.str();
7329 return FAILURE;
7330 }
7331 }
7332
7333 // Set the sample rate.
7334 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7335 if ( result < 0 ) {
7336 snd_pcm_close( phandle );
7337 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7338 errorText_ = errorStream_.str();
7339 return FAILURE;
7340 }
7341
7342 // Determine the number of channels for this device. We support a possible
7343 // minimum device channel number > than the value requested by the user.
7344 stream_.nUserChannels[mode] = channels;
7345 unsigned int value;
7346 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7347 unsigned int deviceChannels = value;
7348 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7349 snd_pcm_close( phandle );
7350 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7351 errorText_ = errorStream_.str();
7352 return FAILURE;
7353 }
7354
7355 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7356 if ( result < 0 ) {
7357 snd_pcm_close( phandle );
7358 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7359 errorText_ = errorStream_.str();
7360 return FAILURE;
7361 }
7362 deviceChannels = value;
7363 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7364 stream_.nDeviceChannels[mode] = deviceChannels;
7365
7366 // Set the device channels.
7367 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7368 if ( result < 0 ) {
7369 snd_pcm_close( phandle );
7370 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7371 errorText_ = errorStream_.str();
7372 return FAILURE;
7373 }
7374
7375 // Set the buffer (or period) size.
7376 int dir = 0;
7377 snd_pcm_uframes_t periodSize = *bufferSize;
7378 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7379 if ( result < 0 ) {
7380 snd_pcm_close( phandle );
7381 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7382 errorText_ = errorStream_.str();
7383 return FAILURE;
7384 }
7385 *bufferSize = periodSize;
7386
7387 // Set the buffer number, which in ALSA is referred to as the "period".
7388 unsigned int periods = 0;
7389 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7390 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7391 if ( periods < 2 ) periods = 4; // a fairly safe default value
7392 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7393 if ( result < 0 ) {
7394 snd_pcm_close( phandle );
7395 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7396 errorText_ = errorStream_.str();
7397 return FAILURE;
7398 }
7399
7400 // If attempting to setup a duplex stream, the bufferSize parameter
7401 // MUST be the same in both directions!
7402 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7403 snd_pcm_close( phandle );
7404 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7405 errorText_ = errorStream_.str();
7406 return FAILURE;
7407 }
7408
7409 stream_.bufferSize = *bufferSize;
7410
7411 // Install the hardware configuration
7412 result = snd_pcm_hw_params( phandle, hw_params );
7413 if ( result < 0 ) {
7414 snd_pcm_close( phandle );
7415 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7416 errorText_ = errorStream_.str();
7417 return FAILURE;
7418 }
7419
7420 #if defined(__RTAUDIO_DEBUG__)
7421 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7422 snd_pcm_hw_params_dump( hw_params, out );
7423 #endif
7424
7425 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7426 snd_pcm_sw_params_t *sw_params = NULL;
7427 snd_pcm_sw_params_alloca( &sw_params );
7428 snd_pcm_sw_params_current( phandle, sw_params );
7429 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7430 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7431 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7432
7433 // The following two settings were suggested by Theo Veenker
7434 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7435 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7436
7437 // here are two options for a fix
7438 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7439 snd_pcm_uframes_t val;
7440 snd_pcm_sw_params_get_boundary( sw_params, &val );
7441 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7442
7443 result = snd_pcm_sw_params( phandle, sw_params );
7444 if ( result < 0 ) {
7445 snd_pcm_close( phandle );
7446 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7447 errorText_ = errorStream_.str();
7448 return FAILURE;
7449 }
7450
7451 #if defined(__RTAUDIO_DEBUG__)
7452 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7453 snd_pcm_sw_params_dump( sw_params, out );
7454 #endif
7455
7456 // Set flags for buffer conversion
7457 stream_.doConvertBuffer[mode] = false;
7458 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7459 stream_.doConvertBuffer[mode] = true;
7460 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7461 stream_.doConvertBuffer[mode] = true;
7462 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7463 stream_.nUserChannels[mode] > 1 )
7464 stream_.doConvertBuffer[mode] = true;
7465
7466 // Allocate the ApiHandle if necessary and then save.
7467 AlsaHandle *apiInfo = 0;
7468 if ( stream_.apiHandle == 0 ) {
7469 try {
7470 apiInfo = (AlsaHandle *) new AlsaHandle;
7471 }
7472 catch ( std::bad_alloc& ) {
7473 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7474 goto error;
7475 }
7476
7477 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7478 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7479 goto error;
7480 }
7481
7482 stream_.apiHandle = (void *) apiInfo;
7483 apiInfo->handles[0] = 0;
7484 apiInfo->handles[1] = 0;
7485 }
7486 else {
7487 apiInfo = (AlsaHandle *) stream_.apiHandle;
7488 }
7489 apiInfo->handles[mode] = phandle;
7490 phandle = 0;
7491
7492 // Allocate necessary internal buffers.
7493 unsigned long bufferBytes;
7494 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7495 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7496 if ( stream_.userBuffer[mode] == NULL ) {
7497 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7498 goto error;
7499 }
7500
7501 if ( stream_.doConvertBuffer[mode] ) {
7502
7503 bool makeBuffer = true;
7504 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7505 if ( mode == INPUT ) {
7506 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7507 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7508 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7509 }
7510 }
7511
7512 if ( makeBuffer ) {
7513 bufferBytes *= *bufferSize;
7514 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7515 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7516 if ( stream_.deviceBuffer == NULL ) {
7517 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7518 goto error;
7519 }
7520 }
7521 }
7522
7523 stream_.sampleRate = sampleRate;
7524 stream_.nBuffers = periods;
7525 stream_.device[mode] = device;
7526 stream_.state = STREAM_STOPPED;
7527
7528 // Setup the buffer conversion information structure.
7529 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7530
7531 // Setup thread if necessary.
7532 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7533 // We had already set up an output stream.
7534 stream_.mode = DUPLEX;
7535 // Link the streams if possible.
7536 apiInfo->synchronized = false;
7537 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7538 apiInfo->synchronized = true;
7539 else {
7540 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7541 error( RtAudioError::WARNING );
7542 }
7543 }
7544 else {
7545 stream_.mode = mode;
7546
7547 // Setup callback thread.
7548 stream_.callbackInfo.object = (void *) this;
7549
7550 // Set the thread attributes for joinable and realtime scheduling
7551 // priority (optional). The higher priority will only take affect
7552 // if the program is run as root or suid. Note, under Linux
7553 // processes with CAP_SYS_NICE privilege, a user can change
7554 // scheduling policy and priority (thus need not be root). See
7555 // POSIX "capabilities".
7556 pthread_attr_t attr;
7557 pthread_attr_init( &attr );
7558 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7559
7560 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7561 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7562 // We previously attempted to increase the audio callback priority
7563 // to SCHED_RR here via the attributes. However, while no errors
7564 // were reported in doing so, it did not work. So, now this is
7565 // done in the alsaCallbackHandler function.
7566 stream_.callbackInfo.doRealtime = true;
7567 int priority = options->priority;
7568 int min = sched_get_priority_min( SCHED_RR );
7569 int max = sched_get_priority_max( SCHED_RR );
7570 if ( priority < min ) priority = min;
7571 else if ( priority > max ) priority = max;
7572 stream_.callbackInfo.priority = priority;
7573 }
7574 #endif
7575
7576 stream_.callbackInfo.isRunning = true;
7577 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7578 pthread_attr_destroy( &attr );
7579 if ( result ) {
7580 stream_.callbackInfo.isRunning = false;
7581 errorText_ = "RtApiAlsa::error creating callback thread!";
7582 goto error;
7583 }
7584 }
7585
7586 return SUCCESS;
7587
7588 error:
7589 if ( apiInfo ) {
7590 pthread_cond_destroy( &apiInfo->runnable_cv );
7591 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7592 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7593 delete apiInfo;
7594 stream_.apiHandle = 0;
7595 }
7596
7597 if ( phandle) snd_pcm_close( phandle );
7598
7599 for ( int i=0; i<2; i++ ) {
7600 if ( stream_.userBuffer[i] ) {
7601 free( stream_.userBuffer[i] );
7602 stream_.userBuffer[i] = 0;
7603 }
7604 }
7605
7606 if ( stream_.deviceBuffer ) {
7607 free( stream_.deviceBuffer );
7608 stream_.deviceBuffer = 0;
7609 }
7610
7611 stream_.state = STREAM_CLOSED;
7612 return FAILURE;
7613 }
7614
7615 void RtApiAlsa :: closeStream()
7616 {
7617 if ( stream_.state == STREAM_CLOSED ) {
7618 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7619 error( RtAudioError::WARNING );
7620 return;
7621 }
7622
7623 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7624 stream_.callbackInfo.isRunning = false;
7625 MUTEX_LOCK( &stream_.mutex );
7626 if ( stream_.state == STREAM_STOPPED ) {
7627 apiInfo->runnable = true;
7628 pthread_cond_signal( &apiInfo->runnable_cv );
7629 }
7630 MUTEX_UNLOCK( &stream_.mutex );
7631 pthread_join( stream_.callbackInfo.thread, NULL );
7632
7633 if ( stream_.state == STREAM_RUNNING ) {
7634 stream_.state = STREAM_STOPPED;
7635 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7636 snd_pcm_drop( apiInfo->handles[0] );
7637 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7638 snd_pcm_drop( apiInfo->handles[1] );
7639 }
7640
7641 if ( apiInfo ) {
7642 pthread_cond_destroy( &apiInfo->runnable_cv );
7643 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7644 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7645 delete apiInfo;
7646 stream_.apiHandle = 0;
7647 }
7648
7649 for ( int i=0; i<2; i++ ) {
7650 if ( stream_.userBuffer[i] ) {
7651 free( stream_.userBuffer[i] );
7652 stream_.userBuffer[i] = 0;
7653 }
7654 }
7655
7656 if ( stream_.deviceBuffer ) {
7657 free( stream_.deviceBuffer );
7658 stream_.deviceBuffer = 0;
7659 }
7660
7661 stream_.mode = UNINITIALIZED;
7662 stream_.state = STREAM_CLOSED;
7663 }
7664
7665 void RtApiAlsa :: startStream()
7666 {
7667 // This method calls snd_pcm_prepare if the device isn't already in that state.
7668
7669 verifyStream();
7670 if ( stream_.state == STREAM_RUNNING ) {
7671 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7672 error( RtAudioError::WARNING );
7673 return;
7674 }
7675
7676 MUTEX_LOCK( &stream_.mutex );
7677
7678 int result = 0;
7679 snd_pcm_state_t state;
7680 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7681 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7682 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7683 state = snd_pcm_state( handle[0] );
7684 if ( state != SND_PCM_STATE_PREPARED ) {
7685 result = snd_pcm_prepare( handle[0] );
7686 if ( result < 0 ) {
7687 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7688 errorText_ = errorStream_.str();
7689 goto unlock;
7690 }
7691 }
7692 }
7693
7694 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7695 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7696 state = snd_pcm_state( handle[1] );
7697 if ( state != SND_PCM_STATE_PREPARED ) {
7698 result = snd_pcm_prepare( handle[1] );
7699 if ( result < 0 ) {
7700 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7701 errorText_ = errorStream_.str();
7702 goto unlock;
7703 }
7704 }
7705 }
7706
7707 stream_.state = STREAM_RUNNING;
7708
7709 unlock:
7710 apiInfo->runnable = true;
7711 pthread_cond_signal( &apiInfo->runnable_cv );
7712 MUTEX_UNLOCK( &stream_.mutex );
7713
7714 if ( result >= 0 ) return;
7715 error( RtAudioError::SYSTEM_ERROR );
7716 }
7717
7718 void RtApiAlsa :: stopStream()
7719 {
7720 verifyStream();
7721 if ( stream_.state == STREAM_STOPPED ) {
7722 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7723 error( RtAudioError::WARNING );
7724 return;
7725 }
7726
7727 stream_.state = STREAM_STOPPED;
7728 MUTEX_LOCK( &stream_.mutex );
7729
7730 int result = 0;
7731 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7732 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7733 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7734 if ( apiInfo->synchronized )
7735 result = snd_pcm_drop( handle[0] );
7736 else
7737 result = snd_pcm_drain( handle[0] );
7738 if ( result < 0 ) {
7739 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7740 errorText_ = errorStream_.str();
7741 goto unlock;
7742 }
7743 }
7744
7745 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7746 result = snd_pcm_drop( handle[1] );
7747 if ( result < 0 ) {
7748 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7749 errorText_ = errorStream_.str();
7750 goto unlock;
7751 }
7752 }
7753
7754 unlock:
7755 apiInfo->runnable = false; // fixes high CPU usage when stopped
7756 MUTEX_UNLOCK( &stream_.mutex );
7757
7758 if ( result >= 0 ) return;
7759 error( RtAudioError::SYSTEM_ERROR );
7760 }
7761
7762 void RtApiAlsa :: abortStream()
7763 {
7764 verifyStream();
7765 if ( stream_.state == STREAM_STOPPED ) {
7766 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7767 error( RtAudioError::WARNING );
7768 return;
7769 }
7770
7771 stream_.state = STREAM_STOPPED;
7772 MUTEX_LOCK( &stream_.mutex );
7773
7774 int result = 0;
7775 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7776 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7777 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7778 result = snd_pcm_drop( handle[0] );
7779 if ( result < 0 ) {
7780 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7781 errorText_ = errorStream_.str();
7782 goto unlock;
7783 }
7784 }
7785
7786 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7787 result = snd_pcm_drop( handle[1] );
7788 if ( result < 0 ) {
7789 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7790 errorText_ = errorStream_.str();
7791 goto unlock;
7792 }
7793 }
7794
7795 unlock:
7796 apiInfo->runnable = false; // fixes high CPU usage when stopped
7797 MUTEX_UNLOCK( &stream_.mutex );
7798
7799 if ( result >= 0 ) return;
7800 error( RtAudioError::SYSTEM_ERROR );
7801 }
7802
7803 void RtApiAlsa :: callbackEvent()
7804 {
7805 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7806 if ( stream_.state == STREAM_STOPPED ) {
7807 MUTEX_LOCK( &stream_.mutex );
7808 while ( !apiInfo->runnable )
7809 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7810
7811 if ( stream_.state != STREAM_RUNNING ) {
7812 MUTEX_UNLOCK( &stream_.mutex );
7813 return;
7814 }
7815 MUTEX_UNLOCK( &stream_.mutex );
7816 }
7817
7818 if ( stream_.state == STREAM_CLOSED ) {
7819 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7820 error( RtAudioError::WARNING );
7821 return;
7822 }
7823
7824 int doStopStream = 0;
7825 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7826 double streamTime = getStreamTime();
7827 RtAudioStreamStatus status = 0;
7828 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7829 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7830 apiInfo->xrun[0] = false;
7831 }
7832 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7833 status |= RTAUDIO_INPUT_OVERFLOW;
7834 apiInfo->xrun[1] = false;
7835 }
7836 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7837 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7838
7839 if ( doStopStream == 2 ) {
7840 abortStream();
7841 return;
7842 }
7843
7844 MUTEX_LOCK( &stream_.mutex );
7845
7846 // The state might change while waiting on a mutex.
7847 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7848
7849 int result;
7850 char *buffer;
7851 int channels;
7852 snd_pcm_t **handle;
7853 snd_pcm_sframes_t frames;
7854 RtAudioFormat format;
7855 handle = (snd_pcm_t **) apiInfo->handles;
7856
7857 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7858
7859 // Setup parameters.
7860 if ( stream_.doConvertBuffer[1] ) {
7861 buffer = stream_.deviceBuffer;
7862 channels = stream_.nDeviceChannels[1];
7863 format = stream_.deviceFormat[1];
7864 }
7865 else {
7866 buffer = stream_.userBuffer[1];
7867 channels = stream_.nUserChannels[1];
7868 format = stream_.userFormat;
7869 }
7870
7871 // Read samples from device in interleaved/non-interleaved format.
7872 if ( stream_.deviceInterleaved[1] )
7873 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7874 else {
7875 void *bufs[channels];
7876 size_t offset = stream_.bufferSize * formatBytes( format );
7877 for ( int i=0; i<channels; i++ )
7878 bufs[i] = (void *) (buffer + (i * offset));
7879 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7880 }
7881
7882 if ( result < (int) stream_.bufferSize ) {
7883 // Either an error or overrun occured.
7884 if ( result == -EPIPE ) {
7885 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7886 if ( state == SND_PCM_STATE_XRUN ) {
7887 apiInfo->xrun[1] = true;
7888 result = snd_pcm_prepare( handle[1] );
7889 if ( result < 0 ) {
7890 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7891 errorText_ = errorStream_.str();
7892 }
7893 }
7894 else {
7895 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7896 errorText_ = errorStream_.str();
7897 }
7898 }
7899 else {
7900 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
7901 errorText_ = errorStream_.str();
7902 }
7903 error( RtAudioError::WARNING );
7904 goto tryOutput;
7905 }
7906
7907 // Do byte swapping if necessary.
7908 if ( stream_.doByteSwap[1] )
7909 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
7910
7911 // Do buffer conversion if necessary.
7912 if ( stream_.doConvertBuffer[1] )
7913 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7914
7915 // Check stream latency
7916 result = snd_pcm_delay( handle[1], &frames );
7917 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
7918 }
7919
7920 tryOutput:
7921
7922 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7923
7924 // Setup parameters and do buffer conversion if necessary.
7925 if ( stream_.doConvertBuffer[0] ) {
7926 buffer = stream_.deviceBuffer;
7927 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
7928 channels = stream_.nDeviceChannels[0];
7929 format = stream_.deviceFormat[0];
7930 }
7931 else {
7932 buffer = stream_.userBuffer[0];
7933 channels = stream_.nUserChannels[0];
7934 format = stream_.userFormat;
7935 }
7936
7937 // Do byte swapping if necessary.
7938 if ( stream_.doByteSwap[0] )
7939 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
7940
7941 // Write samples to device in interleaved/non-interleaved format.
7942 if ( stream_.deviceInterleaved[0] )
7943 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
7944 else {
7945 void *bufs[channels];
7946 size_t offset = stream_.bufferSize * formatBytes( format );
7947 for ( int i=0; i<channels; i++ )
7948 bufs[i] = (void *) (buffer + (i * offset));
7949 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
7950 }
7951
7952 if ( result < (int) stream_.bufferSize ) {
7953 // Either an error or underrun occured.
7954 if ( result == -EPIPE ) {
7955 snd_pcm_state_t state = snd_pcm_state( handle[0] );
7956 if ( state == SND_PCM_STATE_XRUN ) {
7957 apiInfo->xrun[0] = true;
7958 result = snd_pcm_prepare( handle[0] );
7959 if ( result < 0 ) {
7960 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
7961 errorText_ = errorStream_.str();
7962 }
7963 }
7964 else {
7965 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7966 errorText_ = errorStream_.str();
7967 }
7968 }
7969 else {
7970 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
7971 errorText_ = errorStream_.str();
7972 }
7973 error( RtAudioError::WARNING );
7974 goto unlock;
7975 }
7976
7977 // Check stream latency
7978 result = snd_pcm_delay( handle[0], &frames );
7979 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
7980 }
7981
7982 unlock:
7983 MUTEX_UNLOCK( &stream_.mutex );
7984
7985 RtApi::tickStreamTime();
7986 if ( doStopStream == 1 ) this->stopStream();
7987 }
7988
7989 static void *alsaCallbackHandler( void *ptr )
7990 {
7991 CallbackInfo *info = (CallbackInfo *) ptr;
7992 RtApiAlsa *object = (RtApiAlsa *) info->object;
7993 bool *isRunning = &info->isRunning;
7994
7995 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7996 if ( &info->doRealtime ) {
7997 pthread_t tID = pthread_self(); // ID of this thread
7998 sched_param prio = { info->priority }; // scheduling priority of thread
7999 pthread_setschedparam( tID, SCHED_RR, &prio );
8000 }
8001 #endif
8002
8003 while ( *isRunning == true ) {
8004 pthread_testcancel();
8005 object->callbackEvent();
8006 }
8007
8008 pthread_exit( NULL );
8009 }
8010
8011 //******************** End of __LINUX_ALSA__ *********************//
8012 #endif
8013
8014 #if defined(__LINUX_PULSE__)
8015
8016 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8017 // and Tristan Matthews.
8018
8019 #include <pulse/error.h>
8020 #include <pulse/simple.h>
8021 #include <cstdio>
8022
8023 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8024 44100, 48000, 96000, 0};
8025
8026 struct rtaudio_pa_format_mapping_t {
8027 RtAudioFormat rtaudio_format;
8028 pa_sample_format_t pa_format;
8029 };
8030
8031 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8032 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8033 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8034 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8035 {0, PA_SAMPLE_INVALID}};
8036
8037 struct PulseAudioHandle {
8038 pa_simple *s_play;
8039 pa_simple *s_rec;
8040 pthread_t thread;
8041 pthread_cond_t runnable_cv;
8042 bool runnable;
8043 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8044 };
8045
8046 RtApiPulse::~RtApiPulse()
8047 {
8048 if ( stream_.state != STREAM_CLOSED )
8049 closeStream();
8050 }
8051
8052 unsigned int RtApiPulse::getDeviceCount( void )
8053 {
8054 return 1;
8055 }
8056
8057 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8058 {
8059 RtAudio::DeviceInfo info;
8060 info.probed = true;
8061 info.name = "PulseAudio";
8062 info.outputChannels = 2;
8063 info.inputChannels = 2;
8064 info.duplexChannels = 2;
8065 info.isDefaultOutput = true;
8066 info.isDefaultInput = true;
8067
8068 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8069 info.sampleRates.push_back( *sr );
8070
8071 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8072
8073 return info;
8074 }
8075
8076 static void *pulseaudio_callback( void * user )
8077 {
8078 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8079 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8080 volatile bool *isRunning = &cbi->isRunning;
8081
8082 while ( *isRunning ) {
8083 pthread_testcancel();
8084 context->callbackEvent();
8085 }
8086
8087 pthread_exit( NULL );
8088 }
8089
8090 void RtApiPulse::closeStream( void )
8091 {
8092 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8093
8094 stream_.callbackInfo.isRunning = false;
8095 if ( pah ) {
8096 MUTEX_LOCK( &stream_.mutex );
8097 if ( stream_.state == STREAM_STOPPED ) {
8098 pah->runnable = true;
8099 pthread_cond_signal( &pah->runnable_cv );
8100 }
8101 MUTEX_UNLOCK( &stream_.mutex );
8102
8103 pthread_join( pah->thread, 0 );
8104 if ( pah->s_play ) {
8105 pa_simple_flush( pah->s_play, NULL );
8106 pa_simple_free( pah->s_play );
8107 }
8108 if ( pah->s_rec )
8109 pa_simple_free( pah->s_rec );
8110
8111 pthread_cond_destroy( &pah->runnable_cv );
8112 delete pah;
8113 stream_.apiHandle = 0;
8114 }
8115
8116 if ( stream_.userBuffer[0] ) {
8117 free( stream_.userBuffer[0] );
8118 stream_.userBuffer[0] = 0;
8119 }
8120 if ( stream_.userBuffer[1] ) {
8121 free( stream_.userBuffer[1] );
8122 stream_.userBuffer[1] = 0;
8123 }
8124
8125 stream_.state = STREAM_CLOSED;
8126 stream_.mode = UNINITIALIZED;
8127 }
8128
8129 void RtApiPulse::callbackEvent( void )
8130 {
8131 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8132
8133 if ( stream_.state == STREAM_STOPPED ) {
8134 MUTEX_LOCK( &stream_.mutex );
8135 while ( !pah->runnable )
8136 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8137
8138 if ( stream_.state != STREAM_RUNNING ) {
8139 MUTEX_UNLOCK( &stream_.mutex );
8140 return;
8141 }
8142 MUTEX_UNLOCK( &stream_.mutex );
8143 }
8144
8145 if ( stream_.state == STREAM_CLOSED ) {
8146 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8147 "this shouldn't happen!";
8148 error( RtAudioError::WARNING );
8149 return;
8150 }
8151
8152 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8153 double streamTime = getStreamTime();
8154 RtAudioStreamStatus status = 0;
8155 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8156 stream_.bufferSize, streamTime, status,
8157 stream_.callbackInfo.userData );
8158
8159 if ( doStopStream == 2 ) {
8160 abortStream();
8161 return;
8162 }
8163
8164 MUTEX_LOCK( &stream_.mutex );
8165 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8166 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8167
8168 if ( stream_.state != STREAM_RUNNING )
8169 goto unlock;
8170
8171 int pa_error;
8172 size_t bytes;
8173 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8174 if ( stream_.doConvertBuffer[OUTPUT] ) {
8175 convertBuffer( stream_.deviceBuffer,
8176 stream_.userBuffer[OUTPUT],
8177 stream_.convertInfo[OUTPUT] );
8178 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8179 formatBytes( stream_.deviceFormat[OUTPUT] );
8180 } else
8181 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8182 formatBytes( stream_.userFormat );
8183
8184 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8185 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8186 pa_strerror( pa_error ) << ".";
8187 errorText_ = errorStream_.str();
8188 error( RtAudioError::WARNING );
8189 }
8190 }
8191
8192 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8193 if ( stream_.doConvertBuffer[INPUT] )
8194 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8195 formatBytes( stream_.deviceFormat[INPUT] );
8196 else
8197 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8198 formatBytes( stream_.userFormat );
8199
8200 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8201 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8202 pa_strerror( pa_error ) << ".";
8203 errorText_ = errorStream_.str();
8204 error( RtAudioError::WARNING );
8205 }
8206 if ( stream_.doConvertBuffer[INPUT] ) {
8207 convertBuffer( stream_.userBuffer[INPUT],
8208 stream_.deviceBuffer,
8209 stream_.convertInfo[INPUT] );
8210 }
8211 }
8212
8213 unlock:
8214 MUTEX_UNLOCK( &stream_.mutex );
8215 RtApi::tickStreamTime();
8216
8217 if ( doStopStream == 1 )
8218 stopStream();
8219 }
8220
8221 void RtApiPulse::startStream( void )
8222 {
8223 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8224
8225 if ( stream_.state == STREAM_CLOSED ) {
8226 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8227 error( RtAudioError::INVALID_USE );
8228 return;
8229 }
8230 if ( stream_.state == STREAM_RUNNING ) {
8231 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8232 error( RtAudioError::WARNING );
8233 return;
8234 }
8235
8236 MUTEX_LOCK( &stream_.mutex );
8237
8238 stream_.state = STREAM_RUNNING;
8239
8240 pah->runnable = true;
8241 pthread_cond_signal( &pah->runnable_cv );
8242 MUTEX_UNLOCK( &stream_.mutex );
8243 }
8244
8245 void RtApiPulse::stopStream( void )
8246 {
8247 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8248
8249 if ( stream_.state == STREAM_CLOSED ) {
8250 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8251 error( RtAudioError::INVALID_USE );
8252 return;
8253 }
8254 if ( stream_.state == STREAM_STOPPED ) {
8255 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8256 error( RtAudioError::WARNING );
8257 return;
8258 }
8259
8260 stream_.state = STREAM_STOPPED;
8261 MUTEX_LOCK( &stream_.mutex );
8262
8263 if ( pah && pah->s_play ) {
8264 int pa_error;
8265 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8266 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8267 pa_strerror( pa_error ) << ".";
8268 errorText_ = errorStream_.str();
8269 MUTEX_UNLOCK( &stream_.mutex );
8270 error( RtAudioError::SYSTEM_ERROR );
8271 return;
8272 }
8273 }
8274
8275 stream_.state = STREAM_STOPPED;
8276 MUTEX_UNLOCK( &stream_.mutex );
8277 }
8278
8279 void RtApiPulse::abortStream( void )
8280 {
8281 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8282
8283 if ( stream_.state == STREAM_CLOSED ) {
8284 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8285 error( RtAudioError::INVALID_USE );
8286 return;
8287 }
8288 if ( stream_.state == STREAM_STOPPED ) {
8289 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8290 error( RtAudioError::WARNING );
8291 return;
8292 }
8293
8294 stream_.state = STREAM_STOPPED;
8295 MUTEX_LOCK( &stream_.mutex );
8296
8297 if ( pah && pah->s_play ) {
8298 int pa_error;
8299 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8300 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8301 pa_strerror( pa_error ) << ".";
8302 errorText_ = errorStream_.str();
8303 MUTEX_UNLOCK( &stream_.mutex );
8304 error( RtAudioError::SYSTEM_ERROR );
8305 return;
8306 }
8307 }
8308
8309 stream_.state = STREAM_STOPPED;
8310 MUTEX_UNLOCK( &stream_.mutex );
8311 }
8312
8313 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8314 unsigned int channels, unsigned int firstChannel,
8315 unsigned int sampleRate, RtAudioFormat format,
8316 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8317 {
8318 PulseAudioHandle *pah = 0;
8319 unsigned long bufferBytes = 0;
8320 pa_sample_spec ss;
8321
8322 if ( device != 0 ) return false;
8323 if ( mode != INPUT && mode != OUTPUT ) return false;
8324 if ( channels != 1 && channels != 2 ) {
8325 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8326 return false;
8327 }
8328 ss.channels = channels;
8329
8330 if ( firstChannel != 0 ) return false;
8331
8332 bool sr_found = false;
8333 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8334 if ( sampleRate == *sr ) {
8335 sr_found = true;
8336 stream_.sampleRate = sampleRate;
8337 ss.rate = sampleRate;
8338 break;
8339 }
8340 }
8341 if ( !sr_found ) {
8342 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8343 return false;
8344 }
8345
8346 bool sf_found = 0;
8347 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8348 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8349 if ( format == sf->rtaudio_format ) {
8350 sf_found = true;
8351 stream_.userFormat = sf->rtaudio_format;
8352 stream_.deviceFormat[mode] = stream_.userFormat;
8353 ss.format = sf->pa_format;
8354 break;
8355 }
8356 }
8357 if ( !sf_found ) { // Use internal data format conversion.
8358 stream_.userFormat = format;
8359 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8360 ss.format = PA_SAMPLE_FLOAT32LE;
8361 }
8362
8363 // Set other stream parameters.
8364 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8365 else stream_.userInterleaved = true;
8366 stream_.deviceInterleaved[mode] = true;
8367 stream_.nBuffers = 1;
8368 stream_.doByteSwap[mode] = false;
8369 stream_.nUserChannels[mode] = channels;
8370 stream_.nDeviceChannels[mode] = channels + firstChannel;
8371 stream_.channelOffset[mode] = 0;
8372 std::string streamName = "RtAudio";
8373
8374 // Set flags for buffer conversion.
8375 stream_.doConvertBuffer[mode] = false;
8376 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8377 stream_.doConvertBuffer[mode] = true;
8378 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8379 stream_.doConvertBuffer[mode] = true;
8380
8381 // Allocate necessary internal buffers.
8382 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8383 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8384 if ( stream_.userBuffer[mode] == NULL ) {
8385 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8386 goto error;
8387 }
8388 stream_.bufferSize = *bufferSize;
8389
8390 if ( stream_.doConvertBuffer[mode] ) {
8391
8392 bool makeBuffer = true;
8393 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8394 if ( mode == INPUT ) {
8395 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8396 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8397 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8398 }
8399 }
8400
8401 if ( makeBuffer ) {
8402 bufferBytes *= *bufferSize;
8403 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8404 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8405 if ( stream_.deviceBuffer == NULL ) {
8406 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8407 goto error;
8408 }
8409 }
8410 }
8411
8412 stream_.device[mode] = device;
8413
8414 // Setup the buffer conversion information structure.
8415 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8416
8417 if ( !stream_.apiHandle ) {
8418 PulseAudioHandle *pah = new PulseAudioHandle;
8419 if ( !pah ) {
8420 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8421 goto error;
8422 }
8423
8424 stream_.apiHandle = pah;
8425 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8426 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8427 goto error;
8428 }
8429 }
8430 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8431
8432 int error;
8433 if ( !options->streamName.empty() ) streamName = options->streamName;
8434 switch ( mode ) {
8435 case INPUT:
8436 pa_buffer_attr buffer_attr;
8437 buffer_attr.fragsize = bufferBytes;
8438 buffer_attr.maxlength = -1;
8439
8440 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8441 if ( !pah->s_rec ) {
8442 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8443 goto error;
8444 }
8445 break;
8446 case OUTPUT:
8447 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8448 if ( !pah->s_play ) {
8449 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8450 goto error;
8451 }
8452 break;
8453 default:
8454 goto error;
8455 }
8456
8457 if ( stream_.mode == UNINITIALIZED )
8458 stream_.mode = mode;
8459 else if ( stream_.mode == mode )
8460 goto error;
8461 else
8462 stream_.mode = DUPLEX;
8463
8464 if ( !stream_.callbackInfo.isRunning ) {
8465 stream_.callbackInfo.object = this;
8466 stream_.callbackInfo.isRunning = true;
8467 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
8468 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8469 goto error;
8470 }
8471 }
8472
8473 stream_.state = STREAM_STOPPED;
8474 return true;
8475
8476 error:
8477 if ( pah && stream_.callbackInfo.isRunning ) {
8478 pthread_cond_destroy( &pah->runnable_cv );
8479 delete pah;
8480 stream_.apiHandle = 0;
8481 }
8482
8483 for ( int i=0; i<2; i++ ) {
8484 if ( stream_.userBuffer[i] ) {
8485 free( stream_.userBuffer[i] );
8486 stream_.userBuffer[i] = 0;
8487 }
8488 }
8489
8490 if ( stream_.deviceBuffer ) {
8491 free( stream_.deviceBuffer );
8492 stream_.deviceBuffer = 0;
8493 }
8494
8495 return FAILURE;
8496 }
8497
8498 //******************** End of __LINUX_PULSE__ *********************//
8499 #endif
8500
8501 #if defined(__LINUX_OSS__)
8502
8503 #include <unistd.h>
8504 #include <sys/ioctl.h>
8505 #include <unistd.h>
8506 #include <fcntl.h>
8507 #include <sys/soundcard.h>
8508 #include <errno.h>
8509 #include <math.h>
8510
8511 static void *ossCallbackHandler(void * ptr);
8512
8513 // A structure to hold various information related to the OSS API
8514 // implementation.
8515 struct OssHandle {
8516 int id[2]; // device ids
8517 bool xrun[2];
8518 bool triggered;
8519 pthread_cond_t runnable;
8520
8521 OssHandle()
8522 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8523 };
8524
8525 RtApiOss :: RtApiOss()
8526 {
8527 // Nothing to do here.
8528 }
8529
8530 RtApiOss :: ~RtApiOss()
8531 {
8532 if ( stream_.state != STREAM_CLOSED ) closeStream();
8533 }
8534
8535 unsigned int RtApiOss :: getDeviceCount( void )
8536 {
8537 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8538 if ( mixerfd == -1 ) {
8539 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8540 error( RtAudioError::WARNING );
8541 return 0;
8542 }
8543
8544 oss_sysinfo sysinfo;
8545 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8546 close( mixerfd );
8547 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8548 error( RtAudioError::WARNING );
8549 return 0;
8550 }
8551
8552 close( mixerfd );
8553 return sysinfo.numaudios;
8554 }
8555
8556 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8557 {
8558 RtAudio::DeviceInfo info;
8559 info.probed = false;
8560
8561 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8562 if ( mixerfd == -1 ) {
8563 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8564 error( RtAudioError::WARNING );
8565 return info;
8566 }
8567
8568 oss_sysinfo sysinfo;
8569 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8570 if ( result == -1 ) {
8571 close( mixerfd );
8572 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8573 error( RtAudioError::WARNING );
8574 return info;
8575 }
8576
8577 unsigned nDevices = sysinfo.numaudios;
8578 if ( nDevices == 0 ) {
8579 close( mixerfd );
8580 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8581 error( RtAudioError::INVALID_USE );
8582 return info;
8583 }
8584
8585 if ( device >= nDevices ) {
8586 close( mixerfd );
8587 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8588 error( RtAudioError::INVALID_USE );
8589 return info;
8590 }
8591
8592 oss_audioinfo ainfo;
8593 ainfo.dev = device;
8594 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8595 close( mixerfd );
8596 if ( result == -1 ) {
8597 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8598 errorText_ = errorStream_.str();
8599 error( RtAudioError::WARNING );
8600 return info;
8601 }
8602
8603 // Probe channels
8604 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8605 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8606 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8607 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8608 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8609 }
8610
8611 // Probe data formats ... do for input
8612 unsigned long mask = ainfo.iformats;
8613 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8614 info.nativeFormats |= RTAUDIO_SINT16;
8615 if ( mask & AFMT_S8 )
8616 info.nativeFormats |= RTAUDIO_SINT8;
8617 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8618 info.nativeFormats |= RTAUDIO_SINT32;
8619 if ( mask & AFMT_FLOAT )
8620 info.nativeFormats |= RTAUDIO_FLOAT32;
8621 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8622 info.nativeFormats |= RTAUDIO_SINT24;
8623
8624 // Check that we have at least one supported format
8625 if ( info.nativeFormats == 0 ) {
8626 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8627 errorText_ = errorStream_.str();
8628 error( RtAudioError::WARNING );
8629 return info;
8630 }
8631
8632 // Probe the supported sample rates.
8633 info.sampleRates.clear();
8634 if ( ainfo.nrates ) {
8635 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8636 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8637 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8638 info.sampleRates.push_back( SAMPLE_RATES[k] );
8639 break;
8640 }
8641 }
8642 }
8643 }
8644 else {
8645 // Check min and max rate values;
8646 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8647 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
8648 info.sampleRates.push_back( SAMPLE_RATES[k] );
8649 }
8650 }
8651
8652 if ( info.sampleRates.size() == 0 ) {
8653 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8654 errorText_ = errorStream_.str();
8655 error( RtAudioError::WARNING );
8656 }
8657 else {
8658 info.probed = true;
8659 info.name = ainfo.name;
8660 }
8661
8662 return info;
8663 }
8664
8665
8666 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8667 unsigned int firstChannel, unsigned int sampleRate,
8668 RtAudioFormat format, unsigned int *bufferSize,
8669 RtAudio::StreamOptions *options )
8670 {
8671 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8672 if ( mixerfd == -1 ) {
8673 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8674 return FAILURE;
8675 }
8676
8677 oss_sysinfo sysinfo;
8678 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8679 if ( result == -1 ) {
8680 close( mixerfd );
8681 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8682 return FAILURE;
8683 }
8684
8685 unsigned nDevices = sysinfo.numaudios;
8686 if ( nDevices == 0 ) {
8687 // This should not happen because a check is made before this function is called.
8688 close( mixerfd );
8689 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8690 return FAILURE;
8691 }
8692
8693 if ( device >= nDevices ) {
8694 // This should not happen because a check is made before this function is called.
8695 close( mixerfd );
8696 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8697 return FAILURE;
8698 }
8699
8700 oss_audioinfo ainfo;
8701 ainfo.dev = device;
8702 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8703 close( mixerfd );
8704 if ( result == -1 ) {
8705 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8706 errorText_ = errorStream_.str();
8707 return FAILURE;
8708 }
8709
8710 // Check if device supports input or output
8711 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8712 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8713 if ( mode == OUTPUT )
8714 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8715 else
8716 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8717 errorText_ = errorStream_.str();
8718 return FAILURE;
8719 }
8720
8721 int flags = 0;
8722 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8723 if ( mode == OUTPUT )
8724 flags |= O_WRONLY;
8725 else { // mode == INPUT
8726 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8727 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8728 close( handle->id[0] );
8729 handle->id[0] = 0;
8730 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8731 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8732 errorText_ = errorStream_.str();
8733 return FAILURE;
8734 }
8735 // Check that the number previously set channels is the same.
8736 if ( stream_.nUserChannels[0] != channels ) {
8737 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8738 errorText_ = errorStream_.str();
8739 return FAILURE;
8740 }
8741 flags |= O_RDWR;
8742 }
8743 else
8744 flags |= O_RDONLY;
8745 }
8746
8747 // Set exclusive access if specified.
8748 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8749
8750 // Try to open the device.
8751 int fd;
8752 fd = open( ainfo.devnode, flags, 0 );
8753 if ( fd == -1 ) {
8754 if ( errno == EBUSY )
8755 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8756 else
8757 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8758 errorText_ = errorStream_.str();
8759 return FAILURE;
8760 }
8761
8762 // For duplex operation, specifically set this mode (this doesn't seem to work).
8763 /*
8764 if ( flags | O_RDWR ) {
8765 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8766 if ( result == -1) {
8767 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8768 errorText_ = errorStream_.str();
8769 return FAILURE;
8770 }
8771 }
8772 */
8773
8774 // Check the device channel support.
8775 stream_.nUserChannels[mode] = channels;
8776 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8777 close( fd );
8778 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8779 errorText_ = errorStream_.str();
8780 return FAILURE;
8781 }
8782
8783 // Set the number of channels.
8784 int deviceChannels = channels + firstChannel;
8785 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8786 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8787 close( fd );
8788 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8789 errorText_ = errorStream_.str();
8790 return FAILURE;
8791 }
8792 stream_.nDeviceChannels[mode] = deviceChannels;
8793
8794 // Get the data format mask
8795 int mask;
8796 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8797 if ( result == -1 ) {
8798 close( fd );
8799 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8800 errorText_ = errorStream_.str();
8801 return FAILURE;
8802 }
8803
8804 // Determine how to set the device format.
8805 stream_.userFormat = format;
8806 int deviceFormat = -1;
8807 stream_.doByteSwap[mode] = false;
8808 if ( format == RTAUDIO_SINT8 ) {
8809 if ( mask & AFMT_S8 ) {
8810 deviceFormat = AFMT_S8;
8811 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8812 }
8813 }
8814 else if ( format == RTAUDIO_SINT16 ) {
8815 if ( mask & AFMT_S16_NE ) {
8816 deviceFormat = AFMT_S16_NE;
8817 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8818 }
8819 else if ( mask & AFMT_S16_OE ) {
8820 deviceFormat = AFMT_S16_OE;
8821 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8822 stream_.doByteSwap[mode] = true;
8823 }
8824 }
8825 else if ( format == RTAUDIO_SINT24 ) {
8826 if ( mask & AFMT_S24_NE ) {
8827 deviceFormat = AFMT_S24_NE;
8828 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8829 }
8830 else if ( mask & AFMT_S24_OE ) {
8831 deviceFormat = AFMT_S24_OE;
8832 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8833 stream_.doByteSwap[mode] = true;
8834 }
8835 }
8836 else if ( format == RTAUDIO_SINT32 ) {
8837 if ( mask & AFMT_S32_NE ) {
8838 deviceFormat = AFMT_S32_NE;
8839 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8840 }
8841 else if ( mask & AFMT_S32_OE ) {
8842 deviceFormat = AFMT_S32_OE;
8843 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8844 stream_.doByteSwap[mode] = true;
8845 }
8846 }
8847
8848 if ( deviceFormat == -1 ) {
8849 // The user requested format is not natively supported by the device.
8850 if ( mask & AFMT_S16_NE ) {
8851 deviceFormat = AFMT_S16_NE;
8852 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8853 }
8854 else if ( mask & AFMT_S32_NE ) {
8855 deviceFormat = AFMT_S32_NE;
8856 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8857 }
8858 else if ( mask & AFMT_S24_NE ) {
8859 deviceFormat = AFMT_S24_NE;
8860 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8861 }
8862 else if ( mask & AFMT_S16_OE ) {
8863 deviceFormat = AFMT_S16_OE;
8864 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8865 stream_.doByteSwap[mode] = true;
8866 }
8867 else if ( mask & AFMT_S32_OE ) {
8868 deviceFormat = AFMT_S32_OE;
8869 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8870 stream_.doByteSwap[mode] = true;
8871 }
8872 else if ( mask & AFMT_S24_OE ) {
8873 deviceFormat = AFMT_S24_OE;
8874 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8875 stream_.doByteSwap[mode] = true;
8876 }
8877 else if ( mask & AFMT_S8) {
8878 deviceFormat = AFMT_S8;
8879 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8880 }
8881 }
8882
8883 if ( stream_.deviceFormat[mode] == 0 ) {
8884 // This really shouldn't happen ...
8885 close( fd );
8886 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
8887 errorText_ = errorStream_.str();
8888 return FAILURE;
8889 }
8890
8891 // Set the data format.
8892 int temp = deviceFormat;
8893 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
8894 if ( result == -1 || deviceFormat != temp ) {
8895 close( fd );
8896 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
8897 errorText_ = errorStream_.str();
8898 return FAILURE;
8899 }
8900
8901 // Attempt to set the buffer size. According to OSS, the minimum
8902 // number of buffers is two. The supposed minimum buffer size is 16
8903 // bytes, so that will be our lower bound. The argument to this
8904 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
8905 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
8906 // We'll check the actual value used near the end of the setup
8907 // procedure.
8908 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
8909 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
8910 int buffers = 0;
8911 if ( options ) buffers = options->numberOfBuffers;
8912 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
8913 if ( buffers < 2 ) buffers = 3;
8914 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
8915 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
8916 if ( result == -1 ) {
8917 close( fd );
8918 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
8919 errorText_ = errorStream_.str();
8920 return FAILURE;
8921 }
8922 stream_.nBuffers = buffers;
8923
8924 // Save buffer size (in sample frames).
8925 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
8926 stream_.bufferSize = *bufferSize;
8927
8928 // Set the sample rate.
8929 int srate = sampleRate;
8930 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
8931 if ( result == -1 ) {
8932 close( fd );
8933 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
8934 errorText_ = errorStream_.str();
8935 return FAILURE;
8936 }
8937
8938 // Verify the sample rate setup worked.
8939 if ( abs( srate - sampleRate ) > 100 ) {
8940 close( fd );
8941 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
8942 errorText_ = errorStream_.str();
8943 return FAILURE;
8944 }
8945 stream_.sampleRate = sampleRate;
8946
8947 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
8948 // We're doing duplex setup here.
8949 stream_.deviceFormat[0] = stream_.deviceFormat[1];
8950 stream_.nDeviceChannels[0] = deviceChannels;
8951 }
8952
8953 // Set interleaving parameters.
8954 stream_.userInterleaved = true;
8955 stream_.deviceInterleaved[mode] = true;
8956 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
8957 stream_.userInterleaved = false;
8958
8959 // Set flags for buffer conversion
8960 stream_.doConvertBuffer[mode] = false;
8961 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8962 stream_.doConvertBuffer[mode] = true;
8963 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8964 stream_.doConvertBuffer[mode] = true;
8965 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
8966 stream_.nUserChannels[mode] > 1 )
8967 stream_.doConvertBuffer[mode] = true;
8968
8969 // Allocate the stream handles if necessary and then save.
8970 if ( stream_.apiHandle == 0 ) {
8971 try {
8972 handle = new OssHandle;
8973 }
8974 catch ( std::bad_alloc& ) {
8975 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
8976 goto error;
8977 }
8978
8979 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
8980 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
8981 goto error;
8982 }
8983
8984 stream_.apiHandle = (void *) handle;
8985 }
8986 else {
8987 handle = (OssHandle *) stream_.apiHandle;
8988 }
8989 handle->id[mode] = fd;
8990
8991 // Allocate necessary internal buffers.
8992 unsigned long bufferBytes;
8993 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8994 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8995 if ( stream_.userBuffer[mode] == NULL ) {
8996 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
8997 goto error;
8998 }
8999
9000 if ( stream_.doConvertBuffer[mode] ) {
9001
9002 bool makeBuffer = true;
9003 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9004 if ( mode == INPUT ) {
9005 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9006 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9007 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9008 }
9009 }
9010
9011 if ( makeBuffer ) {
9012 bufferBytes *= *bufferSize;
9013 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9014 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9015 if ( stream_.deviceBuffer == NULL ) {
9016 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9017 goto error;
9018 }
9019 }
9020 }
9021
9022 stream_.device[mode] = device;
9023 stream_.state = STREAM_STOPPED;
9024
9025 // Setup the buffer conversion information structure.
9026 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9027
9028 // Setup thread if necessary.
9029 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9030 // We had already set up an output stream.
9031 stream_.mode = DUPLEX;
9032 if ( stream_.device[0] == device ) handle->id[0] = fd;
9033 }
9034 else {
9035 stream_.mode = mode;
9036
9037 // Setup callback thread.
9038 stream_.callbackInfo.object = (void *) this;
9039
9040 // Set the thread attributes for joinable and realtime scheduling
9041 // priority. The higher priority will only take affect if the
9042 // program is run as root or suid.
9043 pthread_attr_t attr;
9044 pthread_attr_init( &attr );
9045 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9046 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9047 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9048 struct sched_param param;
9049 int priority = options->priority;
9050 int min = sched_get_priority_min( SCHED_RR );
9051 int max = sched_get_priority_max( SCHED_RR );
9052 if ( priority < min ) priority = min;
9053 else if ( priority > max ) priority = max;
9054 param.sched_priority = priority;
9055 pthread_attr_setschedparam( &attr, &param );
9056 pthread_attr_setschedpolicy( &attr, SCHED_RR );
9057 }
9058 else
9059 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9060 #else
9061 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9062 #endif
9063
9064 stream_.callbackInfo.isRunning = true;
9065 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9066 pthread_attr_destroy( &attr );
9067 if ( result ) {
9068 stream_.callbackInfo.isRunning = false;
9069 errorText_ = "RtApiOss::error creating callback thread!";
9070 goto error;
9071 }
9072 }
9073
9074 return SUCCESS;
9075
9076 error:
9077 if ( handle ) {
9078 pthread_cond_destroy( &handle->runnable );
9079 if ( handle->id[0] ) close( handle->id[0] );
9080 if ( handle->id[1] ) close( handle->id[1] );
9081 delete handle;
9082 stream_.apiHandle = 0;
9083 }
9084
9085 for ( int i=0; i<2; i++ ) {
9086 if ( stream_.userBuffer[i] ) {
9087 free( stream_.userBuffer[i] );
9088 stream_.userBuffer[i] = 0;
9089 }
9090 }
9091
9092 if ( stream_.deviceBuffer ) {
9093 free( stream_.deviceBuffer );
9094 stream_.deviceBuffer = 0;
9095 }
9096
9097 return FAILURE;
9098 }
9099
9100 void RtApiOss :: closeStream()
9101 {
9102 if ( stream_.state == STREAM_CLOSED ) {
9103 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9104 error( RtAudioError::WARNING );
9105 return;
9106 }
9107
9108 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9109 stream_.callbackInfo.isRunning = false;
9110 MUTEX_LOCK( &stream_.mutex );
9111 if ( stream_.state == STREAM_STOPPED )
9112 pthread_cond_signal( &handle->runnable );
9113 MUTEX_UNLOCK( &stream_.mutex );
9114 pthread_join( stream_.callbackInfo.thread, NULL );
9115
9116 if ( stream_.state == STREAM_RUNNING ) {
9117 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9118 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9119 else
9120 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9121 stream_.state = STREAM_STOPPED;
9122 }
9123
9124 if ( handle ) {
9125 pthread_cond_destroy( &handle->runnable );
9126 if ( handle->id[0] ) close( handle->id[0] );
9127 if ( handle->id[1] ) close( handle->id[1] );
9128 delete handle;
9129 stream_.apiHandle = 0;
9130 }
9131
9132 for ( int i=0; i<2; i++ ) {
9133 if ( stream_.userBuffer[i] ) {
9134 free( stream_.userBuffer[i] );
9135 stream_.userBuffer[i] = 0;
9136 }
9137 }
9138
9139 if ( stream_.deviceBuffer ) {
9140 free( stream_.deviceBuffer );
9141 stream_.deviceBuffer = 0;
9142 }
9143
9144 stream_.mode = UNINITIALIZED;
9145 stream_.state = STREAM_CLOSED;
9146 }
9147
9148 void RtApiOss :: startStream()
9149 {
9150 verifyStream();
9151 if ( stream_.state == STREAM_RUNNING ) {
9152 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9153 error( RtAudioError::WARNING );
9154 return;
9155 }
9156
9157 MUTEX_LOCK( &stream_.mutex );
9158
9159 stream_.state = STREAM_RUNNING;
9160
9161 // No need to do anything else here ... OSS automatically starts
9162 // when fed samples.
9163
9164 MUTEX_UNLOCK( &stream_.mutex );
9165
9166 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9167 pthread_cond_signal( &handle->runnable );
9168 }
9169
9170 void RtApiOss :: stopStream()
9171 {
9172 verifyStream();
9173 if ( stream_.state == STREAM_STOPPED ) {
9174 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9175 error( RtAudioError::WARNING );
9176 return;
9177 }
9178
9179 MUTEX_LOCK( &stream_.mutex );
9180
9181 // The state might change while waiting on a mutex.
9182 if ( stream_.state == STREAM_STOPPED ) {
9183 MUTEX_UNLOCK( &stream_.mutex );
9184 return;
9185 }
9186
9187 int result = 0;
9188 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9189 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9190
9191 // Flush the output with zeros a few times.
9192 char *buffer;
9193 int samples;
9194 RtAudioFormat format;
9195
9196 if ( stream_.doConvertBuffer[0] ) {
9197 buffer = stream_.deviceBuffer;
9198 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9199 format = stream_.deviceFormat[0];
9200 }
9201 else {
9202 buffer = stream_.userBuffer[0];
9203 samples = stream_.bufferSize * stream_.nUserChannels[0];
9204 format = stream_.userFormat;
9205 }
9206
9207 memset( buffer, 0, samples * formatBytes(format) );
9208 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9209 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9210 if ( result == -1 ) {
9211 errorText_ = "RtApiOss::stopStream: audio write error.";
9212 error( RtAudioError::WARNING );
9213 }
9214 }
9215
9216 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9217 if ( result == -1 ) {
9218 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9219 errorText_ = errorStream_.str();
9220 goto unlock;
9221 }
9222 handle->triggered = false;
9223 }
9224
9225 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9226 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9227 if ( result == -1 ) {
9228 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9229 errorText_ = errorStream_.str();
9230 goto unlock;
9231 }
9232 }
9233
9234 unlock:
9235 stream_.state = STREAM_STOPPED;
9236 MUTEX_UNLOCK( &stream_.mutex );
9237
9238 if ( result != -1 ) return;
9239 error( RtAudioError::SYSTEM_ERROR );
9240 }
9241
9242 void RtApiOss :: abortStream()
9243 {
9244 verifyStream();
9245 if ( stream_.state == STREAM_STOPPED ) {
9246 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9247 error( RtAudioError::WARNING );
9248 return;
9249 }
9250
9251 MUTEX_LOCK( &stream_.mutex );
9252
9253 // The state might change while waiting on a mutex.
9254 if ( stream_.state == STREAM_STOPPED ) {
9255 MUTEX_UNLOCK( &stream_.mutex );
9256 return;
9257 }
9258
9259 int result = 0;
9260 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9261 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9262 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9263 if ( result == -1 ) {
9264 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9265 errorText_ = errorStream_.str();
9266 goto unlock;
9267 }
9268 handle->triggered = false;
9269 }
9270
9271 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9272 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9273 if ( result == -1 ) {
9274 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9275 errorText_ = errorStream_.str();
9276 goto unlock;
9277 }
9278 }
9279
9280 unlock:
9281 stream_.state = STREAM_STOPPED;
9282 MUTEX_UNLOCK( &stream_.mutex );
9283
9284 if ( result != -1 ) return;
9285 error( RtAudioError::SYSTEM_ERROR );
9286 }
9287
9288 void RtApiOss :: callbackEvent()
9289 {
9290 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9291 if ( stream_.state == STREAM_STOPPED ) {
9292 MUTEX_LOCK( &stream_.mutex );
9293 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9294 if ( stream_.state != STREAM_RUNNING ) {
9295 MUTEX_UNLOCK( &stream_.mutex );
9296 return;
9297 }
9298 MUTEX_UNLOCK( &stream_.mutex );
9299 }
9300
9301 if ( stream_.state == STREAM_CLOSED ) {
9302 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9303 error( RtAudioError::WARNING );
9304 return;
9305 }
9306
9307 // Invoke user callback to get fresh output data.
9308 int doStopStream = 0;
9309 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9310 double streamTime = getStreamTime();
9311 RtAudioStreamStatus status = 0;
9312 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9313 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9314 handle->xrun[0] = false;
9315 }
9316 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9317 status |= RTAUDIO_INPUT_OVERFLOW;
9318 handle->xrun[1] = false;
9319 }
9320 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9321 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9322 if ( doStopStream == 2 ) {
9323 this->abortStream();
9324 return;
9325 }
9326
9327 MUTEX_LOCK( &stream_.mutex );
9328
9329 // The state might change while waiting on a mutex.
9330 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9331
9332 int result;
9333 char *buffer;
9334 int samples;
9335 RtAudioFormat format;
9336
9337 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9338
9339 // Setup parameters and do buffer conversion if necessary.
9340 if ( stream_.doConvertBuffer[0] ) {
9341 buffer = stream_.deviceBuffer;
9342 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9343 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9344 format = stream_.deviceFormat[0];
9345 }
9346 else {
9347 buffer = stream_.userBuffer[0];
9348 samples = stream_.bufferSize * stream_.nUserChannels[0];
9349 format = stream_.userFormat;
9350 }
9351
9352 // Do byte swapping if necessary.
9353 if ( stream_.doByteSwap[0] )
9354 byteSwapBuffer( buffer, samples, format );
9355
9356 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9357 int trig = 0;
9358 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9359 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9360 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9361 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9362 handle->triggered = true;
9363 }
9364 else
9365 // Write samples to device.
9366 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9367
9368 if ( result == -1 ) {
9369 // We'll assume this is an underrun, though there isn't a
9370 // specific means for determining that.
9371 handle->xrun[0] = true;
9372 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9373 error( RtAudioError::WARNING );
9374 // Continue on to input section.
9375 }
9376 }
9377
9378 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9379
9380 // Setup parameters.
9381 if ( stream_.doConvertBuffer[1] ) {
9382 buffer = stream_.deviceBuffer;
9383 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9384 format = stream_.deviceFormat[1];
9385 }
9386 else {
9387 buffer = stream_.userBuffer[1];
9388 samples = stream_.bufferSize * stream_.nUserChannels[1];
9389 format = stream_.userFormat;
9390 }
9391
9392 // Read samples from device.
9393 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9394
9395 if ( result == -1 ) {
9396 // We'll assume this is an overrun, though there isn't a
9397 // specific means for determining that.
9398 handle->xrun[1] = true;
9399 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9400 error( RtAudioError::WARNING );
9401 goto unlock;
9402 }
9403
9404 // Do byte swapping if necessary.
9405 if ( stream_.doByteSwap[1] )
9406 byteSwapBuffer( buffer, samples, format );
9407
9408 // Do buffer conversion if necessary.
9409 if ( stream_.doConvertBuffer[1] )
9410 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9411 }
9412
9413 unlock:
9414 MUTEX_UNLOCK( &stream_.mutex );
9415
9416 RtApi::tickStreamTime();
9417 if ( doStopStream == 1 ) this->stopStream();
9418 }
9419
9420 static void *ossCallbackHandler( void *ptr )
9421 {
9422 CallbackInfo *info = (CallbackInfo *) ptr;
9423 RtApiOss *object = (RtApiOss *) info->object;
9424 bool *isRunning = &info->isRunning;
9425
9426 while ( *isRunning == true ) {
9427 pthread_testcancel();
9428 object->callbackEvent();
9429 }
9430
9431 pthread_exit( NULL );
9432 }
9433
9434 //******************** End of __LINUX_OSS__ *********************//
9435 #endif
9436
9437
9438 // *************************************************** //
9439 //
9440 // Protected common (OS-independent) RtAudio methods.
9441 //
9442 // *************************************************** //
9443
9444 // This method can be modified to control the behavior of error
9445 // message printing.
9446 void RtApi :: error( RtAudioError::Type type )
9447 {
9448 errorStream_.str(""); // clear the ostringstream
9449
9450 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9451 if ( errorCallback ) {
9452 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9453
9454 if ( firstErrorOccurred_ )
9455 return;
9456
9457 firstErrorOccurred_ = true;
9458 const std::string errorMessage = errorText_;
9459
9460 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9461 stream_.callbackInfo.isRunning = false; // exit from the thread
9462 abortStream();
9463 }
9464
9465 errorCallback( type, errorMessage );
9466 firstErrorOccurred_ = false;
9467 return;
9468 }
9469
9470 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9471 std::cerr << '\n' << errorText_ << "\n\n";
9472 else if ( type != RtAudioError::WARNING )
9473 throw( RtAudioError( errorText_, type ) );
9474 }
9475
9476 void RtApi :: verifyStream()
9477 {
9478 if ( stream_.state == STREAM_CLOSED ) {
9479 errorText_ = "RtApi:: a stream is not open!";
9480 error( RtAudioError::INVALID_USE );
9481 }
9482 }
9483
9484 void RtApi :: clearStreamInfo()
9485 {
9486 stream_.mode = UNINITIALIZED;
9487 stream_.state = STREAM_CLOSED;
9488 stream_.sampleRate = 0;
9489 stream_.bufferSize = 0;
9490 stream_.nBuffers = 0;
9491 stream_.userFormat = 0;
9492 stream_.userInterleaved = true;
9493 stream_.streamTime = 0.0;
9494 stream_.apiHandle = 0;
9495 stream_.deviceBuffer = 0;
9496 stream_.callbackInfo.callback = 0;
9497 stream_.callbackInfo.userData = 0;
9498 stream_.callbackInfo.isRunning = false;
9499 stream_.callbackInfo.errorCallback = 0;
9500 for ( int i=0; i<2; i++ ) {
9501 stream_.device[i] = 11111;
9502 stream_.doConvertBuffer[i] = false;
9503 stream_.deviceInterleaved[i] = true;
9504 stream_.doByteSwap[i] = false;
9505 stream_.nUserChannels[i] = 0;
9506 stream_.nDeviceChannels[i] = 0;
9507 stream_.channelOffset[i] = 0;
9508 stream_.deviceFormat[i] = 0;
9509 stream_.latency[i] = 0;
9510 stream_.userBuffer[i] = 0;
9511 stream_.convertInfo[i].channels = 0;
9512 stream_.convertInfo[i].inJump = 0;
9513 stream_.convertInfo[i].outJump = 0;
9514 stream_.convertInfo[i].inFormat = 0;
9515 stream_.convertInfo[i].outFormat = 0;
9516 stream_.convertInfo[i].inOffset.clear();
9517 stream_.convertInfo[i].outOffset.clear();
9518 }
9519 }
9520
9521 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9522 {
9523 if ( format == RTAUDIO_SINT16 )
9524 return 2;
9525 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9526 return 4;
9527 else if ( format == RTAUDIO_FLOAT64 )
9528 return 8;
9529 else if ( format == RTAUDIO_SINT24 )
9530 return 3;
9531 else if ( format == RTAUDIO_SINT8 )
9532 return 1;
9533
9534 errorText_ = "RtApi::formatBytes: undefined format.";
9535 error( RtAudioError::WARNING );
9536
9537 return 0;
9538 }
9539
9540 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9541 {
9542 if ( mode == INPUT ) { // convert device to user buffer
9543 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9544 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9545 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9546 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9547 }
9548 else { // convert user to device buffer
9549 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9550 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9551 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9552 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9553 }
9554
9555 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9556 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9557 else
9558 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9559
9560 // Set up the interleave/deinterleave offsets.
9561 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9562 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9563 ( mode == INPUT && stream_.userInterleaved ) ) {
9564 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9565 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9566 stream_.convertInfo[mode].outOffset.push_back( k );
9567 stream_.convertInfo[mode].inJump = 1;
9568 }
9569 }
9570 else {
9571 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9572 stream_.convertInfo[mode].inOffset.push_back( k );
9573 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9574 stream_.convertInfo[mode].outJump = 1;
9575 }
9576 }
9577 }
9578 else { // no (de)interleaving
9579 if ( stream_.userInterleaved ) {
9580 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9581 stream_.convertInfo[mode].inOffset.push_back( k );
9582 stream_.convertInfo[mode].outOffset.push_back( k );
9583 }
9584 }
9585 else {
9586 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9587 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9588 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9589 stream_.convertInfo[mode].inJump = 1;
9590 stream_.convertInfo[mode].outJump = 1;
9591 }
9592 }
9593 }
9594
9595 // Add channel offset.
9596 if ( firstChannel > 0 ) {
9597 if ( stream_.deviceInterleaved[mode] ) {
9598 if ( mode == OUTPUT ) {
9599 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9600 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9601 }
9602 else {
9603 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9604 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9605 }
9606 }
9607 else {
9608 if ( mode == OUTPUT ) {
9609 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9610 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9611 }
9612 else {
9613 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9614 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9615 }
9616 }
9617 }
9618 }
9619
9620 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9621 {
9622 // This function does format conversion, input/output channel compensation, and
9623 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9624 // the lower three bytes of a 32-bit integer.
9625
9626 // Clear our device buffer when in/out duplex device channels are different
9627 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9628 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9629 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9630
9631 int j;
9632 if (info.outFormat == RTAUDIO_FLOAT64) {
9633 Float64 scale;
9634 Float64 *out = (Float64 *)outBuffer;
9635
9636 if (info.inFormat == RTAUDIO_SINT8) {
9637 signed char *in = (signed char *)inBuffer;
9638 scale = 1.0 / 127.5;
9639 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9640 for (j=0; j<info.channels; j++) {
9641 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9642 out[info.outOffset[j]] += 0.5;
9643 out[info.outOffset[j]] *= scale;
9644 }
9645 in += info.inJump;
9646 out += info.outJump;
9647 }
9648 }
9649 else if (info.inFormat == RTAUDIO_SINT16) {
9650 Int16 *in = (Int16 *)inBuffer;
9651 scale = 1.0 / 32767.5;
9652 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9653 for (j=0; j<info.channels; j++) {
9654 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9655 out[info.outOffset[j]] += 0.5;
9656 out[info.outOffset[j]] *= scale;
9657 }
9658 in += info.inJump;
9659 out += info.outJump;
9660 }
9661 }
9662 else if (info.inFormat == RTAUDIO_SINT24) {
9663 Int24 *in = (Int24 *)inBuffer;
9664 scale = 1.0 / 8388607.5;
9665 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9666 for (j=0; j<info.channels; j++) {
9667 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9668 out[info.outOffset[j]] += 0.5;
9669 out[info.outOffset[j]] *= scale;
9670 }
9671 in += info.inJump;
9672 out += info.outJump;
9673 }
9674 }
9675 else if (info.inFormat == RTAUDIO_SINT32) {
9676 Int32 *in = (Int32 *)inBuffer;
9677 scale = 1.0 / 2147483647.5;
9678 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9679 for (j=0; j<info.channels; j++) {
9680 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9681 out[info.outOffset[j]] += 0.5;
9682 out[info.outOffset[j]] *= scale;
9683 }
9684 in += info.inJump;
9685 out += info.outJump;
9686 }
9687 }
9688 else if (info.inFormat == RTAUDIO_FLOAT32) {
9689 Float32 *in = (Float32 *)inBuffer;
9690 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9691 for (j=0; j<info.channels; j++) {
9692 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9693 }
9694 in += info.inJump;
9695 out += info.outJump;
9696 }
9697 }
9698 else if (info.inFormat == RTAUDIO_FLOAT64) {
9699 // Channel compensation and/or (de)interleaving only.
9700 Float64 *in = (Float64 *)inBuffer;
9701 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9702 for (j=0; j<info.channels; j++) {
9703 out[info.outOffset[j]] = in[info.inOffset[j]];
9704 }
9705 in += info.inJump;
9706 out += info.outJump;
9707 }
9708 }
9709 }
9710 else if (info.outFormat == RTAUDIO_FLOAT32) {
9711 Float32 scale;
9712 Float32 *out = (Float32 *)outBuffer;
9713
9714 if (info.inFormat == RTAUDIO_SINT8) {
9715 signed char *in = (signed char *)inBuffer;
9716 scale = (Float32) ( 1.0 / 127.5 );
9717 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9718 for (j=0; j<info.channels; j++) {
9719 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9720 out[info.outOffset[j]] += 0.5;
9721 out[info.outOffset[j]] *= scale;
9722 }
9723 in += info.inJump;
9724 out += info.outJump;
9725 }
9726 }
9727 else if (info.inFormat == RTAUDIO_SINT16) {
9728 Int16 *in = (Int16 *)inBuffer;
9729 scale = (Float32) ( 1.0 / 32767.5 );
9730 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9731 for (j=0; j<info.channels; j++) {
9732 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9733 out[info.outOffset[j]] += 0.5;
9734 out[info.outOffset[j]] *= scale;
9735 }
9736 in += info.inJump;
9737 out += info.outJump;
9738 }
9739 }
9740 else if (info.inFormat == RTAUDIO_SINT24) {
9741 Int24 *in = (Int24 *)inBuffer;
9742 scale = (Float32) ( 1.0 / 8388607.5 );
9743 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9744 for (j=0; j<info.channels; j++) {
9745 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9746 out[info.outOffset[j]] += 0.5;
9747 out[info.outOffset[j]] *= scale;
9748 }
9749 in += info.inJump;
9750 out += info.outJump;
9751 }
9752 }
9753 else if (info.inFormat == RTAUDIO_SINT32) {
9754 Int32 *in = (Int32 *)inBuffer;
9755 scale = (Float32) ( 1.0 / 2147483647.5 );
9756 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9757 for (j=0; j<info.channels; j++) {
9758 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9759 out[info.outOffset[j]] += 0.5;
9760 out[info.outOffset[j]] *= scale;
9761 }
9762 in += info.inJump;
9763 out += info.outJump;
9764 }
9765 }
9766 else if (info.inFormat == RTAUDIO_FLOAT32) {
9767 // Channel compensation and/or (de)interleaving only.
9768 Float32 *in = (Float32 *)inBuffer;
9769 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9770 for (j=0; j<info.channels; j++) {
9771 out[info.outOffset[j]] = in[info.inOffset[j]];
9772 }
9773 in += info.inJump;
9774 out += info.outJump;
9775 }
9776 }
9777 else if (info.inFormat == RTAUDIO_FLOAT64) {
9778 Float64 *in = (Float64 *)inBuffer;
9779 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9780 for (j=0; j<info.channels; j++) {
9781 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9782 }
9783 in += info.inJump;
9784 out += info.outJump;
9785 }
9786 }
9787 }
9788 else if (info.outFormat == RTAUDIO_SINT32) {
9789 Int32 *out = (Int32 *)outBuffer;
9790 if (info.inFormat == RTAUDIO_SINT8) {
9791 signed char *in = (signed char *)inBuffer;
9792 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9793 for (j=0; j<info.channels; j++) {
9794 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9795 out[info.outOffset[j]] <<= 24;
9796 }
9797 in += info.inJump;
9798 out += info.outJump;
9799 }
9800 }
9801 else if (info.inFormat == RTAUDIO_SINT16) {
9802 Int16 *in = (Int16 *)inBuffer;
9803 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9804 for (j=0; j<info.channels; j++) {
9805 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9806 out[info.outOffset[j]] <<= 16;
9807 }
9808 in += info.inJump;
9809 out += info.outJump;
9810 }
9811 }
9812 else if (info.inFormat == RTAUDIO_SINT24) {
9813 Int24 *in = (Int24 *)inBuffer;
9814 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9815 for (j=0; j<info.channels; j++) {
9816 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
9817 out[info.outOffset[j]] <<= 8;
9818 }
9819 in += info.inJump;
9820 out += info.outJump;
9821 }
9822 }
9823 else if (info.inFormat == RTAUDIO_SINT32) {
9824 // Channel compensation and/or (de)interleaving only.
9825 Int32 *in = (Int32 *)inBuffer;
9826 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9827 for (j=0; j<info.channels; j++) {
9828 out[info.outOffset[j]] = in[info.inOffset[j]];
9829 }
9830 in += info.inJump;
9831 out += info.outJump;
9832 }
9833 }
9834 else if (info.inFormat == RTAUDIO_FLOAT32) {
9835 Float32 *in = (Float32 *)inBuffer;
9836 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9837 for (j=0; j<info.channels; j++) {
9838 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9839 }
9840 in += info.inJump;
9841 out += info.outJump;
9842 }
9843 }
9844 else if (info.inFormat == RTAUDIO_FLOAT64) {
9845 Float64 *in = (Float64 *)inBuffer;
9846 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9847 for (j=0; j<info.channels; j++) {
9848 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9849 }
9850 in += info.inJump;
9851 out += info.outJump;
9852 }
9853 }
9854 }
9855 else if (info.outFormat == RTAUDIO_SINT24) {
9856 Int24 *out = (Int24 *)outBuffer;
9857 if (info.inFormat == RTAUDIO_SINT8) {
9858 signed char *in = (signed char *)inBuffer;
9859 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9860 for (j=0; j<info.channels; j++) {
9861 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
9862 //out[info.outOffset[j]] <<= 16;
9863 }
9864 in += info.inJump;
9865 out += info.outJump;
9866 }
9867 }
9868 else if (info.inFormat == RTAUDIO_SINT16) {
9869 Int16 *in = (Int16 *)inBuffer;
9870 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9871 for (j=0; j<info.channels; j++) {
9872 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
9873 //out[info.outOffset[j]] <<= 8;
9874 }
9875 in += info.inJump;
9876 out += info.outJump;
9877 }
9878 }
9879 else if (info.inFormat == RTAUDIO_SINT24) {
9880 // Channel compensation and/or (de)interleaving only.
9881 Int24 *in = (Int24 *)inBuffer;
9882 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9883 for (j=0; j<info.channels; j++) {
9884 out[info.outOffset[j]] = in[info.inOffset[j]];
9885 }
9886 in += info.inJump;
9887 out += info.outJump;
9888 }
9889 }
9890 else if (info.inFormat == RTAUDIO_SINT32) {
9891 Int32 *in = (Int32 *)inBuffer;
9892 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9893 for (j=0; j<info.channels; j++) {
9894 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
9895 //out[info.outOffset[j]] >>= 8;
9896 }
9897 in += info.inJump;
9898 out += info.outJump;
9899 }
9900 }
9901 else if (info.inFormat == RTAUDIO_FLOAT32) {
9902 Float32 *in = (Float32 *)inBuffer;
9903 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9904 for (j=0; j<info.channels; j++) {
9905 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
9906 }
9907 in += info.inJump;
9908 out += info.outJump;
9909 }
9910 }
9911 else if (info.inFormat == RTAUDIO_FLOAT64) {
9912 Float64 *in = (Float64 *)inBuffer;
9913 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9914 for (j=0; j<info.channels; j++) {
9915 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
9916 }
9917 in += info.inJump;
9918 out += info.outJump;
9919 }
9920 }
9921 }
9922 else if (info.outFormat == RTAUDIO_SINT16) {
9923 Int16 *out = (Int16 *)outBuffer;
9924 if (info.inFormat == RTAUDIO_SINT8) {
9925 signed char *in = (signed char *)inBuffer;
9926 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9927 for (j=0; j<info.channels; j++) {
9928 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
9929 out[info.outOffset[j]] <<= 8;
9930 }
9931 in += info.inJump;
9932 out += info.outJump;
9933 }
9934 }
9935 else if (info.inFormat == RTAUDIO_SINT16) {
9936 // Channel compensation and/or (de)interleaving only.
9937 Int16 *in = (Int16 *)inBuffer;
9938 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9939 for (j=0; j<info.channels; j++) {
9940 out[info.outOffset[j]] = in[info.inOffset[j]];
9941 }
9942 in += info.inJump;
9943 out += info.outJump;
9944 }
9945 }
9946 else if (info.inFormat == RTAUDIO_SINT24) {
9947 Int24 *in = (Int24 *)inBuffer;
9948 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9949 for (j=0; j<info.channels; j++) {
9950 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
9951 }
9952 in += info.inJump;
9953 out += info.outJump;
9954 }
9955 }
9956 else if (info.inFormat == RTAUDIO_SINT32) {
9957 Int32 *in = (Int32 *)inBuffer;
9958 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9959 for (j=0; j<info.channels; j++) {
9960 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
9961 }
9962 in += info.inJump;
9963 out += info.outJump;
9964 }
9965 }
9966 else if (info.inFormat == RTAUDIO_FLOAT32) {
9967 Float32 *in = (Float32 *)inBuffer;
9968 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9969 for (j=0; j<info.channels; j++) {
9970 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
9971 }
9972 in += info.inJump;
9973 out += info.outJump;
9974 }
9975 }
9976 else if (info.inFormat == RTAUDIO_FLOAT64) {
9977 Float64 *in = (Float64 *)inBuffer;
9978 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9979 for (j=0; j<info.channels; j++) {
9980 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
9981 }
9982 in += info.inJump;
9983 out += info.outJump;
9984 }
9985 }
9986 }
9987 else if (info.outFormat == RTAUDIO_SINT8) {
9988 signed char *out = (signed char *)outBuffer;
9989 if (info.inFormat == RTAUDIO_SINT8) {
9990 // Channel compensation and/or (de)interleaving only.
9991 signed char *in = (signed char *)inBuffer;
9992 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9993 for (j=0; j<info.channels; j++) {
9994 out[info.outOffset[j]] = in[info.inOffset[j]];
9995 }
9996 in += info.inJump;
9997 out += info.outJump;
9998 }
9999 }
10000 if (info.inFormat == RTAUDIO_SINT16) {
10001 Int16 *in = (Int16 *)inBuffer;
10002 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10003 for (j=0; j<info.channels; j++) {
10004 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10005 }
10006 in += info.inJump;
10007 out += info.outJump;
10008 }
10009 }
10010 else if (info.inFormat == RTAUDIO_SINT24) {
10011 Int24 *in = (Int24 *)inBuffer;
10012 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10013 for (j=0; j<info.channels; j++) {
10014 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10015 }
10016 in += info.inJump;
10017 out += info.outJump;
10018 }
10019 }
10020 else if (info.inFormat == RTAUDIO_SINT32) {
10021 Int32 *in = (Int32 *)inBuffer;
10022 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10023 for (j=0; j<info.channels; j++) {
10024 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10025 }
10026 in += info.inJump;
10027 out += info.outJump;
10028 }
10029 }
10030 else if (info.inFormat == RTAUDIO_FLOAT32) {
10031 Float32 *in = (Float32 *)inBuffer;
10032 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10033 for (j=0; j<info.channels; j++) {
10034 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10035 }
10036 in += info.inJump;
10037 out += info.outJump;
10038 }
10039 }
10040 else if (info.inFormat == RTAUDIO_FLOAT64) {
10041 Float64 *in = (Float64 *)inBuffer;
10042 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10043 for (j=0; j<info.channels; j++) {
10044 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10045 }
10046 in += info.inJump;
10047 out += info.outJump;
10048 }
10049 }
10050 }
10051 }
10052
10053 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10054 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10055 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10056
10057 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10058 {
10059 register char val;
10060 register char *ptr;
10061
10062 ptr = buffer;
10063 if ( format == RTAUDIO_SINT16 ) {
10064 for ( unsigned int i=0; i<samples; i++ ) {
10065 // Swap 1st and 2nd bytes.
10066 val = *(ptr);
10067 *(ptr) = *(ptr+1);
10068 *(ptr+1) = val;
10069
10070 // Increment 2 bytes.
10071 ptr += 2;
10072 }
10073 }
10074 else if ( format == RTAUDIO_SINT32 ||
10075 format == RTAUDIO_FLOAT32 ) {
10076 for ( unsigned int i=0; i<samples; i++ ) {
10077 // Swap 1st and 4th bytes.
10078 val = *(ptr);
10079 *(ptr) = *(ptr+3);
10080 *(ptr+3) = val;
10081
10082 // Swap 2nd and 3rd bytes.
10083 ptr += 1;
10084 val = *(ptr);
10085 *(ptr) = *(ptr+1);
10086 *(ptr+1) = val;
10087
10088 // Increment 3 more bytes.
10089 ptr += 3;
10090 }
10091 }
10092 else if ( format == RTAUDIO_SINT24 ) {
10093 for ( unsigned int i=0; i<samples; i++ ) {
10094 // Swap 1st and 3rd bytes.
10095 val = *(ptr);
10096 *(ptr) = *(ptr+2);
10097 *(ptr+2) = val;
10098
10099 // Increment 2 more bytes.
10100 ptr += 2;
10101 }
10102 }
10103 else if ( format == RTAUDIO_FLOAT64 ) {
10104 for ( unsigned int i=0; i<samples; i++ ) {
10105 // Swap 1st and 8th bytes
10106 val = *(ptr);
10107 *(ptr) = *(ptr+7);
10108 *(ptr+7) = val;
10109
10110 // Swap 2nd and 7th bytes
10111 ptr += 1;
10112 val = *(ptr);
10113 *(ptr) = *(ptr+5);
10114 *(ptr+5) = val;
10115
10116 // Swap 3rd and 6th bytes
10117 ptr += 1;
10118 val = *(ptr);
10119 *(ptr) = *(ptr+3);
10120 *(ptr+3) = val;
10121
10122 // Swap 4th and 5th bytes
10123 ptr += 1;
10124 val = *(ptr);
10125 *(ptr) = *(ptr+1);
10126 *(ptr+1) = val;
10127
10128 // Increment 5 more bytes.
10129 ptr += 5;
10130 }
10131 }
10132 }
10133
10134 // Indentation settings for Vim and Emacs
10135 //
10136 // Local Variables:
10137 // c-basic-offset: 2
10138 // indent-tabs-mode: nil
10139 // End:
10140 //
10141 // vim: et sts=2 sw=2
10142