f@0: /************************************************************************/ f@0: /*! \class RtAudio f@0: \brief Realtime audio i/o C++ classes. f@0: f@0: RtAudio provides a common API (Application Programming Interface) f@0: for realtime audio input/output across Linux (native ALSA, Jack, f@0: and OSS), Macintosh OS X (CoreAudio and Jack), and Windows f@0: (DirectSound, ASIO and WASAPI) operating systems. f@0: f@0: RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/ f@0: f@0: RtAudio: realtime audio i/o C++ classes f@0: Copyright (c) 2001-2014 Gary P. Scavone f@0: f@0: Permission is hereby granted, free of charge, to any person f@0: obtaining a copy of this software and associated documentation files f@0: (the "Software"), to deal in the Software without restriction, f@0: including without limitation the rights to use, copy, modify, merge, f@0: publish, distribute, sublicense, and/or sell copies of the Software, f@0: and to permit persons to whom the Software is furnished to do so, f@0: subject to the following conditions: f@0: f@0: The above copyright notice and this permission notice shall be f@0: included in all copies or substantial portions of the Software. f@0: f@0: Any person wishing to distribute modifications to the Software is f@0: asked to send the modifications to the original developer so that f@0: they can be incorporated into the canonical version. This is, f@0: however, not a binding provision of this license. f@0: f@0: THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, f@0: EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF f@0: MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. f@0: IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR f@0: ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF f@0: CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION f@0: WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. f@0: */ f@0: /************************************************************************/ f@0: f@0: // RtAudio: Version 4.1.1 f@0: #pragma once f@0: f@0: f@0: #include "../include/RtAudio.h" f@0: #include f@0: #include f@0: #include f@0: #include f@0: f@0: template const T& max(const T& a, const T& b) { f@0: return (a &apis ) throw() f@0: { f@0: apis.clear(); f@0: f@0: // The order here will control the order of RtAudio's API search in f@0: // the constructor. f@0: #if defined(__UNIX_JACK__) f@0: apis.push_back( UNIX_JACK ); f@0: #endif f@0: #if defined(__LINUX_ALSA__) f@0: apis.push_back( LINUX_ALSA ); f@0: #endif f@0: #if defined(__LINUX_PULSE__) f@0: apis.push_back( LINUX_PULSE ); f@0: #endif f@0: #if defined(__LINUX_OSS__) f@0: apis.push_back( LINUX_OSS ); f@0: #endif f@0: #if defined(__WINDOWS_ASIO__) f@0: apis.push_back( WINDOWS_ASIO ); f@0: #endif f@0: #if defined(__WINDOWS_WASAPI__) f@0: apis.push_back( WINDOWS_WASAPI ); f@0: #endif f@0: #if defined(__WINDOWS_DS__) f@0: apis.push_back( WINDOWS_DS ); f@0: #endif f@0: #if defined(__MACOSX_CORE__) f@0: apis.push_back( MACOSX_CORE ); f@0: #endif f@0: #if defined(__RTAUDIO_DUMMY__) f@0: apis.push_back( RTAUDIO_DUMMY ); f@0: #endif f@0: } f@0: f@0: void RtAudio :: openRtApi( RtAudio::Api api ) f@0: { f@0: if ( rtapi_ ) f@0: delete rtapi_; f@0: rtapi_ = 0; f@0: f@0: #if defined(__UNIX_JACK__) f@0: if ( api == UNIX_JACK ) f@0: rtapi_ = new RtApiJack(); f@0: #endif f@0: #if defined(__LINUX_ALSA__) f@0: if ( api == LINUX_ALSA ) f@0: rtapi_ = new RtApiAlsa(); f@0: #endif f@0: #if defined(__LINUX_PULSE__) f@0: if ( api == LINUX_PULSE ) f@0: rtapi_ = new RtApiPulse(); f@0: #endif f@0: #if defined(__LINUX_OSS__) f@0: if ( api == LINUX_OSS ) f@0: rtapi_ = new RtApiOss(); f@0: #endif f@0: #if defined(__WINDOWS_ASIO__) f@0: if ( api == WINDOWS_ASIO ) f@0: rtapi_ = new RtApiAsio(); f@0: #endif f@0: #if defined(__WINDOWS_WASAPI__) f@0: if ( api == WINDOWS_WASAPI ) f@0: rtapi_ = new RtApiWasapi(); f@0: #endif f@0: #if defined(__WINDOWS_DS__) f@0: if ( api == WINDOWS_DS ) f@0: rtapi_ = new RtApiDs(); f@0: #endif f@0: #if defined(__MACOSX_CORE__) f@0: if ( api == MACOSX_CORE ) f@0: rtapi_ = new RtApiCore(); f@0: #endif f@0: #if defined(__RTAUDIO_DUMMY__) f@0: if ( api == RTAUDIO_DUMMY ) f@0: rtapi_ = new RtApiDummy(); f@0: #endif f@0: } f@0: f@0: RtAudio :: RtAudio( RtAudio::Api api ) f@0: { f@0: rtapi_ = 0; f@0: f@0: if ( api != UNSPECIFIED ) { f@0: // Attempt to open the specified API. f@0: openRtApi( api ); f@0: if ( rtapi_ ) return; f@0: f@0: // No compiled support for specified API value. Issue a debug f@0: // warning and continue as if no API was specified. f@0: std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl; f@0: } f@0: f@0: // Iterate through the compiled APIs and return as soon as we find f@0: // one with at least one device or we reach the end of the list. f@0: std::vector< RtAudio::Api > apis; f@0: getCompiledApi( apis ); f@0: for ( unsigned int i=0; igetDeviceCount() ) break; f@0: } f@0: f@0: if ( rtapi_ ) return; f@0: f@0: // It should not be possible to get here because the preprocessor f@0: // definition __RTAUDIO_DUMMY__ is automatically defined if no f@0: // API-specific definitions are passed to the compiler. But just in f@0: // case something weird happens, we'll thow an error. f@0: std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n"; f@0: throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) ); f@0: } f@0: f@0: RtAudio :: ~RtAudio() throw() f@0: { f@0: if ( rtapi_ ) f@0: delete rtapi_; f@0: } f@0: f@0: void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters, f@0: RtAudio::StreamParameters *inputParameters, f@0: RtAudioFormat format, unsigned int sampleRate, f@0: unsigned int *bufferFrames, f@0: RtAudioCallback callback, void *userData, f@0: RtAudio::StreamOptions *options, f@0: RtAudioErrorCallback errorCallback ) f@0: { f@0: return rtapi_->openStream( outputParameters, inputParameters, format, f@0: sampleRate, bufferFrames, callback, f@0: userData, options, errorCallback ); f@0: } f@0: f@0: // *************************************************** // f@0: // f@0: // Public RtApi definitions (see end of file for f@0: // private or protected utility functions). f@0: // f@0: // *************************************************** // f@0: f@0: RtApi :: RtApi() f@0: { f@0: stream_.state = STREAM_CLOSED; f@0: stream_.mode = UNINITIALIZED; f@0: stream_.apiHandle = 0; f@0: stream_.userBuffer[0] = 0; f@0: stream_.userBuffer[1] = 0; f@0: MUTEX_INITIALIZE( &stream_.mutex ); f@0: showWarnings_ = true; f@0: firstErrorOccurred_ = false; f@0: } f@0: f@0: RtApi :: ~RtApi() f@0: { f@0: MUTEX_DESTROY( &stream_.mutex ); f@0: } f@0: f@0: void RtApi :: openStream( RtAudio::StreamParameters *oParams, f@0: RtAudio::StreamParameters *iParams, f@0: RtAudioFormat format, unsigned int sampleRate, f@0: unsigned int *bufferFrames, f@0: RtAudioCallback callback, void *userData, f@0: RtAudio::StreamOptions *options, f@0: RtAudioErrorCallback errorCallback ) f@0: { f@0: if ( stream_.state != STREAM_CLOSED ) { f@0: errorText_ = "RtApi::openStream: a stream is already open!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return; f@0: } f@0: f@0: // Clear stream information potentially left from a previously open stream. f@0: clearStreamInfo(); f@0: f@0: if ( oParams && oParams->nChannels < 1 ) { f@0: errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one."; f@0: error( RtAudioError::INVALID_USE ); f@0: return; f@0: } f@0: f@0: if ( iParams && iParams->nChannels < 1 ) { f@0: errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one."; f@0: error( RtAudioError::INVALID_USE ); f@0: return; f@0: } f@0: f@0: if ( oParams == NULL && iParams == NULL ) { f@0: errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return; f@0: } f@0: f@0: if ( formatBytes(format) == 0 ) { f@0: errorText_ = "RtApi::openStream: 'format' parameter value is undefined."; f@0: error( RtAudioError::INVALID_USE ); f@0: return; f@0: } f@0: f@0: unsigned int nDevices = getDeviceCount(); f@0: unsigned int oChannels = 0; f@0: if ( oParams ) { f@0: oChannels = oParams->nChannels; f@0: if ( oParams->deviceId >= nDevices ) { f@0: errorText_ = "RtApi::openStream: output device parameter value is invalid."; f@0: error( RtAudioError::INVALID_USE ); f@0: return; f@0: } f@0: } f@0: f@0: unsigned int iChannels = 0; f@0: if ( iParams ) { f@0: iChannels = iParams->nChannels; f@0: if ( iParams->deviceId >= nDevices ) { f@0: errorText_ = "RtApi::openStream: input device parameter value is invalid."; f@0: error( RtAudioError::INVALID_USE ); f@0: return; f@0: } f@0: } f@0: f@0: bool result; f@0: f@0: if ( oChannels > 0 ) { f@0: f@0: result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel, f@0: sampleRate, format, bufferFrames, options ); f@0: if ( result == false ) { f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: } f@0: f@0: if ( iChannels > 0 ) { f@0: f@0: result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel, f@0: sampleRate, format, bufferFrames, options ); f@0: if ( result == false ) { f@0: if ( oChannels > 0 ) closeStream(); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: } f@0: f@0: stream_.callbackInfo.callback = (void *) callback; f@0: stream_.callbackInfo.userData = userData; f@0: stream_.callbackInfo.errorCallback = (void *) errorCallback; f@0: f@0: if ( options ) options->numberOfBuffers = stream_.nBuffers; f@0: stream_.state = STREAM_STOPPED; f@0: } f@0: f@0: unsigned int RtApi :: getDefaultInputDevice( void ) f@0: { f@0: // Should be implemented in subclasses if possible. f@0: return 0; f@0: } f@0: f@0: unsigned int RtApi :: getDefaultOutputDevice( void ) f@0: { f@0: // Should be implemented in subclasses if possible. f@0: return 0; f@0: } f@0: f@0: void RtApi :: closeStream( void ) f@0: { f@0: // MUST be implemented in subclasses! f@0: return; f@0: } f@0: f@0: bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/, f@0: unsigned int /*firstChannel*/, unsigned int /*sampleRate*/, f@0: RtAudioFormat /*format*/, unsigned int * /*bufferSize*/, f@0: RtAudio::StreamOptions * /*options*/ ) f@0: { f@0: // MUST be implemented in subclasses! f@0: return FAILURE; f@0: } f@0: f@0: void RtApi :: tickStreamTime( void ) f@0: { f@0: // Subclasses that do not provide their own implementation of f@0: // getStreamTime should call this function once per buffer I/O to f@0: // provide basic stream time support. f@0: f@0: stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate ); f@0: f@0: #if defined( HAVE_GETTIMEOFDAY ) f@0: gettimeofday( &stream_.lastTickTimestamp, NULL ); f@0: #endif f@0: } f@0: f@0: long RtApi :: getStreamLatency( void ) f@0: { f@0: verifyStream(); f@0: f@0: long totalLatency = 0; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) f@0: totalLatency = stream_.latency[0]; f@0: if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) f@0: totalLatency += stream_.latency[1]; f@0: f@0: return totalLatency; f@0: } f@0: f@0: double RtApi :: getStreamTime( void ) f@0: { f@0: verifyStream(); f@0: f@0: #if defined( HAVE_GETTIMEOFDAY ) f@0: // Return a very accurate estimate of the stream time by f@0: // adding in the elapsed time since the last tick. f@0: struct timeval then; f@0: struct timeval now; f@0: f@0: if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 ) f@0: return stream_.streamTime; f@0: f@0: gettimeofday( &now, NULL ); f@0: then = stream_.lastTickTimestamp; f@0: return stream_.streamTime + f@0: ((now.tv_sec + 0.000001 * now.tv_usec) - f@0: (then.tv_sec + 0.000001 * then.tv_usec)); f@0: #else f@0: return stream_.streamTime; f@0: #endif f@0: } f@0: f@0: void RtApi :: setStreamTime( double time ) f@0: { f@0: verifyStream(); f@0: f@0: if ( time >= 0.0 ) f@0: stream_.streamTime = time; f@0: } f@0: f@0: unsigned int RtApi :: getStreamSampleRate( void ) f@0: { f@0: verifyStream(); f@0: f@0: return stream_.sampleRate; f@0: } f@0: f@0: f@0: // *************************************************** // f@0: // f@0: // OS/API-specific methods. f@0: // f@0: // *************************************************** // f@0: f@0: #if defined(__MACOSX_CORE__) f@0: f@0: // The OS X CoreAudio API is designed to use a separate callback f@0: // procedure for each of its audio devices. A single RtAudio duplex f@0: // stream using two different devices is supported here, though it f@0: // cannot be guaranteed to always behave correctly because we cannot f@0: // synchronize these two callbacks. f@0: // f@0: // A property listener is installed for over/underrun information. f@0: // However, no functionality is currently provided to allow property f@0: // listeners to trigger user handlers because it is unclear what could f@0: // be done if a critical stream parameter (buffer size, sample rate, f@0: // device disconnect) notification arrived. The listeners entail f@0: // quite a bit of extra code and most likely, a user program wouldn't f@0: // be prepared for the result anyway. However, we do provide a flag f@0: // to the client callback function to inform of an over/underrun. f@0: f@0: // A structure to hold various information related to the CoreAudio API f@0: // implementation. f@0: struct CoreHandle { f@0: AudioDeviceID id[2]; // device ids f@0: #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 ) f@0: AudioDeviceIOProcID procId[2]; f@0: #endif f@0: UInt32 iStream[2]; // device stream index (or first if using multiple) f@0: UInt32 nStreams[2]; // number of streams to use f@0: bool xrun[2]; f@0: char *deviceBuffer; f@0: pthread_cond_t condition; f@0: int drainCounter; // Tracks callback counts when draining f@0: bool internalDrain; // Indicates if stop is initiated from callback or not. f@0: f@0: CoreHandle() f@0: :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; } f@0: }; f@0: f@0: RtApiCore:: RtApiCore() f@0: { f@0: #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER ) f@0: // This is a largely undocumented but absolutely necessary f@0: // requirement starting with OS-X 10.6. If not called, queries and f@0: // updates to various audio device properties are not handled f@0: // correctly. f@0: CFRunLoopRef theRunLoop = NULL; f@0: AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop, f@0: kAudioObjectPropertyScopeGlobal, f@0: kAudioObjectPropertyElementMaster }; f@0: OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop); f@0: if ( result != noErr ) { f@0: errorText_ = "RtApiCore::RtApiCore: error setting run loop property!"; f@0: error( RtAudioError::WARNING ); f@0: } f@0: #endif f@0: } f@0: f@0: RtApiCore :: ~RtApiCore() f@0: { f@0: // The subclass destructor gets called before the base class f@0: // destructor, so close an existing stream before deallocating f@0: // apiDeviceId memory. f@0: if ( stream_.state != STREAM_CLOSED ) closeStream(); f@0: } f@0: f@0: unsigned int RtApiCore :: getDeviceCount( void ) f@0: { f@0: // Find out how many audio devices there are, if any. f@0: UInt32 dataSize; f@0: AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; f@0: OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize ); f@0: if ( result != noErr ) { f@0: errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!"; f@0: error( RtAudioError::WARNING ); f@0: return 0; f@0: } f@0: f@0: return dataSize / sizeof( AudioDeviceID ); f@0: } f@0: f@0: unsigned int RtApiCore :: getDefaultInputDevice( void ) f@0: { f@0: unsigned int nDevices = getDeviceCount(); f@0: if ( nDevices <= 1 ) return 0; f@0: f@0: AudioDeviceID id; f@0: UInt32 dataSize = sizeof( AudioDeviceID ); f@0: AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; f@0: OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id ); f@0: if ( result != noErr ) { f@0: errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device."; f@0: error( RtAudioError::WARNING ); f@0: return 0; f@0: } f@0: f@0: dataSize *= nDevices; f@0: AudioDeviceID deviceList[ nDevices ]; f@0: property.mSelector = kAudioHardwarePropertyDevices; f@0: result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList ); f@0: if ( result != noErr ) { f@0: errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs."; f@0: error( RtAudioError::WARNING ); f@0: return 0; f@0: } f@0: f@0: for ( unsigned int i=0; i= nDevices ) { f@0: errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return info; f@0: } f@0: f@0: AudioDeviceID deviceList[ nDevices ]; f@0: UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices; f@0: AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices, f@0: kAudioObjectPropertyScopeGlobal, f@0: kAudioObjectPropertyElementMaster }; f@0: OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, f@0: 0, NULL, &dataSize, (void *) &deviceList ); f@0: if ( result != noErr ) { f@0: errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs."; f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: AudioDeviceID id = deviceList[ device ]; f@0: f@0: // Get the device name. f@0: info.name.erase(); f@0: CFStringRef cfname; f@0: dataSize = sizeof( CFStringRef ); f@0: property.mSelector = kAudioObjectPropertyManufacturer; f@0: result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname ); f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() ); f@0: int length = CFStringGetLength(cfname); f@0: char *mname = (char *)malloc(length * 3 + 1); f@0: #if defined( UNICODE ) || defined( _UNICODE ) f@0: CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8); f@0: #else f@0: CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding()); f@0: #endif f@0: info.name.append( (const char *)mname, strlen(mname) ); f@0: info.name.append( ": " ); f@0: CFRelease( cfname ); f@0: free(mname); f@0: f@0: property.mSelector = kAudioObjectPropertyName; f@0: result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname ); f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() ); f@0: length = CFStringGetLength(cfname); f@0: char *name = (char *)malloc(length * 3 + 1); f@0: #if defined( UNICODE ) || defined( _UNICODE ) f@0: CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8); f@0: #else f@0: CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding()); f@0: #endif f@0: info.name.append( (const char *)name, strlen(name) ); f@0: CFRelease( cfname ); f@0: free(name); f@0: f@0: // Get the output stream "configuration". f@0: AudioBufferList *bufferList = nil; f@0: property.mSelector = kAudioDevicePropertyStreamConfiguration; f@0: property.mScope = kAudioDevicePropertyScopeOutput; f@0: // property.mElement = kAudioObjectPropertyElementWildcard; f@0: dataSize = 0; f@0: result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize ); f@0: if ( result != noErr || dataSize == 0 ) { f@0: errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: // Allocate the AudioBufferList. f@0: bufferList = (AudioBufferList *) malloc( dataSize ); f@0: if ( bufferList == NULL ) { f@0: errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList."; f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList ); f@0: if ( result != noErr || dataSize == 0 ) { f@0: free( bufferList ); f@0: errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: // Get output channel information. f@0: unsigned int i, nStreams = bufferList->mNumberBuffers; f@0: for ( i=0; imBuffers[i].mNumberChannels; f@0: free( bufferList ); f@0: f@0: // Get the input stream "configuration". f@0: property.mScope = kAudioDevicePropertyScopeInput; f@0: result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize ); f@0: if ( result != noErr || dataSize == 0 ) { f@0: errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: // Allocate the AudioBufferList. f@0: bufferList = (AudioBufferList *) malloc( dataSize ); f@0: if ( bufferList == NULL ) { f@0: errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList."; f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList ); f@0: if (result != noErr || dataSize == 0) { f@0: free( bufferList ); f@0: errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: // Get input channel information. f@0: nStreams = bufferList->mNumberBuffers; f@0: for ( i=0; imBuffers[i].mNumberChannels; f@0: free( bufferList ); f@0: f@0: // If device opens for both playback and capture, we determine the channels. f@0: if ( info.outputChannels > 0 && info.inputChannels > 0 ) f@0: info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels; f@0: f@0: // Probe the device sample rates. f@0: bool isInput = false; f@0: if ( info.outputChannels == 0 ) isInput = true; f@0: f@0: // Determine the supported sample rates. f@0: property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates; f@0: if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput; f@0: result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize ); f@0: if ( result != kAudioHardwareNoError || dataSize == 0 ) { f@0: errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: UInt32 nRanges = dataSize / sizeof( AudioValueRange ); f@0: AudioValueRange rangeList[ nRanges ]; f@0: result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList ); f@0: if ( result != kAudioHardwareNoError ) { f@0: errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: // The sample rate reporting mechanism is a bit of a mystery. It f@0: // seems that it can either return individual rates or a range of f@0: // rates. I assume that if the min / max range values are the same, f@0: // then that represents a single supported rate and if the min / max f@0: // range values are different, the device supports an arbitrary f@0: // range of values (though there might be multiple ranges, so we'll f@0: // use the most conservative range). f@0: Float64 minimumRate = 1.0, maximumRate = 10000000000.0; f@0: bool haveValueRange = false; f@0: info.sampleRates.clear(); f@0: for ( UInt32 i=0; i minimumRate ) minimumRate = rangeList[i].mMinimum; f@0: if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum; f@0: } f@0: } f@0: f@0: if ( haveValueRange ) { f@0: for ( unsigned int k=0; k= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) f@0: info.sampleRates.push_back( SAMPLE_RATES[k] ); f@0: } f@0: } f@0: f@0: // Sort and remove any redundant values f@0: std::sort( info.sampleRates.begin(), info.sampleRates.end() ); f@0: info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() ); f@0: f@0: if ( info.sampleRates.size() == 0 ) { f@0: errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: // CoreAudio always uses 32-bit floating point data for PCM streams. f@0: // Thus, any other "physical" formats supported by the device are of f@0: // no interest to the client. f@0: info.nativeFormats = RTAUDIO_FLOAT32; f@0: f@0: if ( info.outputChannels > 0 ) f@0: if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true; f@0: if ( info.inputChannels > 0 ) f@0: if ( getDefaultInputDevice() == device ) info.isDefaultInput = true; f@0: f@0: info.probed = true; f@0: return info; f@0: } f@0: f@0: static OSStatus callbackHandler( AudioDeviceID inDevice, f@0: const AudioTimeStamp* /*inNow*/, f@0: const AudioBufferList* inInputData, f@0: const AudioTimeStamp* /*inInputTime*/, f@0: AudioBufferList* outOutputData, f@0: const AudioTimeStamp* /*inOutputTime*/, f@0: void* infoPointer ) f@0: { f@0: CallbackInfo *info = (CallbackInfo *) infoPointer; f@0: f@0: RtApiCore *object = (RtApiCore *) info->object; f@0: if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false ) f@0: return kAudioHardwareUnspecifiedError; f@0: else f@0: return kAudioHardwareNoError; f@0: } f@0: f@0: static OSStatus xrunListener( AudioObjectID /*inDevice*/, f@0: UInt32 nAddresses, f@0: const AudioObjectPropertyAddress properties[], f@0: void* handlePointer ) f@0: { f@0: CoreHandle *handle = (CoreHandle *) handlePointer; f@0: for ( UInt32 i=0; ixrun[1] = true; f@0: else f@0: handle->xrun[0] = true; f@0: } f@0: } f@0: f@0: return kAudioHardwareNoError; f@0: } f@0: f@0: static OSStatus rateListener( AudioObjectID inDevice, f@0: UInt32 /*nAddresses*/, f@0: const AudioObjectPropertyAddress /*properties*/[], f@0: void* ratePointer ) f@0: { f@0: Float64 *rate = (Float64 *) ratePointer; f@0: UInt32 dataSize = sizeof( Float64 ); f@0: AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate, f@0: kAudioObjectPropertyScopeGlobal, f@0: kAudioObjectPropertyElementMaster }; f@0: AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate ); f@0: return kAudioHardwareNoError; f@0: } f@0: f@0: bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels, f@0: unsigned int firstChannel, unsigned int sampleRate, f@0: RtAudioFormat format, unsigned int *bufferSize, f@0: RtAudio::StreamOptions *options ) f@0: { f@0: // Get device ID f@0: unsigned int nDevices = getDeviceCount(); f@0: if ( nDevices == 0 ) { f@0: // This should not happen because a check is made before this function is called. f@0: errorText_ = "RtApiCore::probeDeviceOpen: no devices found!"; f@0: return FAILURE; f@0: } f@0: f@0: if ( device >= nDevices ) { f@0: // This should not happen because a check is made before this function is called. f@0: errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!"; f@0: return FAILURE; f@0: } f@0: f@0: AudioDeviceID deviceList[ nDevices ]; f@0: UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices; f@0: AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices, f@0: kAudioObjectPropertyScopeGlobal, f@0: kAudioObjectPropertyElementMaster }; f@0: OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, f@0: 0, NULL, &dataSize, (void *) &deviceList ); f@0: if ( result != noErr ) { f@0: errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs."; f@0: return FAILURE; f@0: } f@0: f@0: AudioDeviceID id = deviceList[ device ]; f@0: f@0: // Setup for stream mode. f@0: bool isInput = false; f@0: if ( mode == INPUT ) { f@0: isInput = true; f@0: property.mScope = kAudioDevicePropertyScopeInput; f@0: } f@0: else f@0: property.mScope = kAudioDevicePropertyScopeOutput; f@0: f@0: // Get the stream "configuration". f@0: AudioBufferList *bufferList = nil; f@0: dataSize = 0; f@0: property.mSelector = kAudioDevicePropertyStreamConfiguration; f@0: result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize ); f@0: if ( result != noErr || dataSize == 0 ) { f@0: errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Allocate the AudioBufferList. f@0: bufferList = (AudioBufferList *) malloc( dataSize ); f@0: if ( bufferList == NULL ) { f@0: errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList."; f@0: return FAILURE; f@0: } f@0: f@0: result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList ); f@0: if (result != noErr || dataSize == 0) { f@0: free( bufferList ); f@0: errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Search for one or more streams that contain the desired number of f@0: // channels. CoreAudio devices can have an arbitrary number of f@0: // streams and each stream can have an arbitrary number of channels. f@0: // For each stream, a single buffer of interleaved samples is f@0: // provided. RtAudio prefers the use of one stream of interleaved f@0: // data or multiple consecutive single-channel streams. However, we f@0: // now support multiple consecutive multi-channel streams of f@0: // interleaved data as well. f@0: UInt32 iStream, offsetCounter = firstChannel; f@0: UInt32 nStreams = bufferList->mNumberBuffers; f@0: bool monoMode = false; f@0: bool foundStream = false; f@0: f@0: // First check that the device supports the requested number of f@0: // channels. f@0: UInt32 deviceChannels = 0; f@0: for ( iStream=0; iStreammBuffers[iStream].mNumberChannels; f@0: f@0: if ( deviceChannels < ( channels + firstChannel ) ) { f@0: free( bufferList ); f@0: errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Look for a single stream meeting our needs. f@0: UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0; f@0: for ( iStream=0; iStreammBuffers[iStream].mNumberChannels; f@0: if ( streamChannels >= channels + offsetCounter ) { f@0: firstStream = iStream; f@0: channelOffset = offsetCounter; f@0: foundStream = true; f@0: break; f@0: } f@0: if ( streamChannels > offsetCounter ) break; f@0: offsetCounter -= streamChannels; f@0: } f@0: f@0: // If we didn't find a single stream above, then we should be able f@0: // to meet the channel specification with multiple streams. f@0: if ( foundStream == false ) { f@0: monoMode = true; f@0: offsetCounter = firstChannel; f@0: for ( iStream=0; iStreammBuffers[iStream].mNumberChannels; f@0: if ( streamChannels > offsetCounter ) break; f@0: offsetCounter -= streamChannels; f@0: } f@0: f@0: firstStream = iStream; f@0: channelOffset = offsetCounter; f@0: Int32 channelCounter = channels + offsetCounter - streamChannels; f@0: f@0: if ( streamChannels > 1 ) monoMode = false; f@0: while ( channelCounter > 0 ) { f@0: streamChannels = bufferList->mBuffers[++iStream].mNumberChannels; f@0: if ( streamChannels > 1 ) monoMode = false; f@0: channelCounter -= streamChannels; f@0: streamCount++; f@0: } f@0: } f@0: f@0: free( bufferList ); f@0: f@0: // Determine the buffer size. f@0: AudioValueRange bufferRange; f@0: dataSize = sizeof( AudioValueRange ); f@0: property.mSelector = kAudioDevicePropertyBufferFrameSizeRange; f@0: result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange ); f@0: f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum; f@0: else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum; f@0: if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum; f@0: f@0: // Set the buffer size. For multiple streams, I'm assuming we only f@0: // need to make this setting for the master channel. f@0: UInt32 theSize = (UInt32) *bufferSize; f@0: dataSize = sizeof( UInt32 ); f@0: property.mSelector = kAudioDevicePropertyBufferFrameSize; f@0: result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize ); f@0: f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // If attempting to setup a duplex stream, the bufferSize parameter f@0: // MUST be the same in both directions! f@0: *bufferSize = theSize; f@0: if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) { f@0: errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: stream_.bufferSize = *bufferSize; f@0: stream_.nBuffers = 1; f@0: f@0: // Try to set "hog" mode ... it's not clear to me this is working. f@0: if ( options && options->flags & RTAUDIO_HOG_DEVICE ) { f@0: pid_t hog_pid; f@0: dataSize = sizeof( hog_pid ); f@0: property.mSelector = kAudioDevicePropertyHogMode; f@0: result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid ); f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: if ( hog_pid != getpid() ) { f@0: hog_pid = getpid(); f@0: result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid ); f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: } f@0: } f@0: f@0: // Check and if necessary, change the sample rate for the device. f@0: Float64 nominalRate; f@0: dataSize = sizeof( Float64 ); f@0: property.mSelector = kAudioDevicePropertyNominalSampleRate; f@0: result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate ); f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Only change the sample rate if off by more than 1 Hz. f@0: if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) { f@0: f@0: // Set a property listener for the sample rate change f@0: Float64 reportedRate = 0.0; f@0: AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; f@0: result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate ); f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: nominalRate = (Float64) sampleRate; f@0: result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate ); f@0: if ( result != noErr ) { f@0: AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate ); f@0: errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Now wait until the reported nominal rate is what we just set. f@0: UInt32 microCounter = 0; f@0: while ( reportedRate != nominalRate ) { f@0: microCounter += 5000; f@0: if ( microCounter > 5000000 ) break; f@0: usleep( 5000 ); f@0: } f@0: f@0: // Remove the property listener. f@0: AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate ); f@0: f@0: if ( microCounter > 5000000 ) { f@0: errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: } f@0: f@0: // Now set the stream format for all streams. Also, check the f@0: // physical format of the device and change that if necessary. f@0: AudioStreamBasicDescription description; f@0: dataSize = sizeof( AudioStreamBasicDescription ); f@0: property.mSelector = kAudioStreamPropertyVirtualFormat; f@0: result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description ); f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Set the sample rate and data format id. However, only make the f@0: // change if the sample rate is not within 1.0 of the desired f@0: // rate and the format is not linear pcm. f@0: bool updateFormat = false; f@0: if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) { f@0: description.mSampleRate = (Float64) sampleRate; f@0: updateFormat = true; f@0: } f@0: f@0: if ( description.mFormatID != kAudioFormatLinearPCM ) { f@0: description.mFormatID = kAudioFormatLinearPCM; f@0: updateFormat = true; f@0: } f@0: f@0: if ( updateFormat ) { f@0: result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description ); f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: } f@0: f@0: // Now check the physical format. f@0: property.mSelector = kAudioStreamPropertyPhysicalFormat; f@0: result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description ); f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: //std::cout << "Current physical stream format:" << std::endl; f@0: //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl; f@0: //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl; f@0: //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl; f@0: //std::cout << " sample rate = " << description.mSampleRate << std::endl; f@0: f@0: if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) { f@0: description.mFormatID = kAudioFormatLinearPCM; f@0: //description.mSampleRate = (Float64) sampleRate; f@0: AudioStreamBasicDescription testDescription = description; f@0: UInt32 formatFlags; f@0: f@0: // We'll try higher bit rates first and then work our way down. f@0: std::vector< std::pair > physicalFormats; f@0: formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger; f@0: physicalFormats.push_back( std::pair( 32, formatFlags ) ); f@0: formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat; f@0: physicalFormats.push_back( std::pair( 32, formatFlags ) ); f@0: physicalFormats.push_back( std::pair( 24, formatFlags ) ); // 24-bit packed f@0: formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh ); f@0: physicalFormats.push_back( std::pair( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low f@0: formatFlags |= kAudioFormatFlagIsAlignedHigh; f@0: physicalFormats.push_back( std::pair( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high f@0: formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat; f@0: physicalFormats.push_back( std::pair( 16, formatFlags ) ); f@0: physicalFormats.push_back( std::pair( 8, formatFlags ) ); f@0: f@0: bool setPhysicalFormat = false; f@0: for( unsigned int i=0; iflags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false; f@0: else stream_.userInterleaved = true; f@0: stream_.deviceInterleaved[mode] = true; f@0: if ( monoMode == true ) stream_.deviceInterleaved[mode] = false; f@0: f@0: // Set flags for buffer conversion. f@0: stream_.doConvertBuffer[mode] = false; f@0: if ( stream_.userFormat != stream_.deviceFormat[mode] ) f@0: stream_.doConvertBuffer[mode] = true; f@0: if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] ) f@0: stream_.doConvertBuffer[mode] = true; f@0: if ( streamCount == 1 ) { f@0: if ( stream_.nUserChannels[mode] > 1 && f@0: stream_.userInterleaved != stream_.deviceInterleaved[mode] ) f@0: stream_.doConvertBuffer[mode] = true; f@0: } f@0: else if ( monoMode && stream_.userInterleaved ) f@0: stream_.doConvertBuffer[mode] = true; f@0: f@0: // Allocate our CoreHandle structure for the stream. f@0: CoreHandle *handle = 0; f@0: if ( stream_.apiHandle == 0 ) { f@0: try { f@0: handle = new CoreHandle; f@0: } f@0: catch ( std::bad_alloc& ) { f@0: errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory."; f@0: goto error; f@0: } f@0: f@0: if ( pthread_cond_init( &handle->condition, NULL ) ) { f@0: errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable."; f@0: goto error; f@0: } f@0: stream_.apiHandle = (void *) handle; f@0: } f@0: else f@0: handle = (CoreHandle *) stream_.apiHandle; f@0: handle->iStream[mode] = firstStream; f@0: handle->nStreams[mode] = streamCount; f@0: handle->id[mode] = id; f@0: f@0: // Allocate necessary internal buffers. f@0: unsigned long bufferBytes; f@0: bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat ); f@0: // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 ); f@0: stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) ); f@0: memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) ); f@0: if ( stream_.userBuffer[mode] == NULL ) { f@0: errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory."; f@0: goto error; f@0: } f@0: f@0: // If possible, we will make use of the CoreAudio stream buffers as f@0: // "device buffers". However, we can't do this if using multiple f@0: // streams. f@0: if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) { f@0: f@0: bool makeBuffer = true; f@0: bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] ); f@0: if ( mode == INPUT ) { f@0: if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) { f@0: unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] ); f@0: if ( bufferBytes <= bytesOut ) makeBuffer = false; f@0: } f@0: } f@0: f@0: if ( makeBuffer ) { f@0: bufferBytes *= *bufferSize; f@0: if ( stream_.deviceBuffer ) free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 ); f@0: if ( stream_.deviceBuffer == NULL ) { f@0: errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory."; f@0: goto error; f@0: } f@0: } f@0: } f@0: f@0: stream_.sampleRate = sampleRate; f@0: stream_.device[mode] = device; f@0: stream_.state = STREAM_STOPPED; f@0: stream_.callbackInfo.object = (void *) this; f@0: f@0: // Setup the buffer conversion information structure. f@0: if ( stream_.doConvertBuffer[mode] ) { f@0: if ( streamCount > 1 ) setConvertInfo( mode, 0 ); f@0: else setConvertInfo( mode, channelOffset ); f@0: } f@0: f@0: if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device ) f@0: // Only one callback procedure per device. f@0: stream_.mode = DUPLEX; f@0: else { f@0: #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 ) f@0: result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] ); f@0: #else f@0: // deprecated in favor of AudioDeviceCreateIOProcID() f@0: result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo ); f@0: #endif f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ")."; f@0: errorText_ = errorStream_.str(); f@0: goto error; f@0: } f@0: if ( stream_.mode == OUTPUT && mode == INPUT ) f@0: stream_.mode = DUPLEX; f@0: else f@0: stream_.mode = mode; f@0: } f@0: f@0: // Setup the device property listener for over/underload. f@0: property.mSelector = kAudioDeviceProcessorOverload; f@0: property.mScope = kAudioObjectPropertyScopeGlobal; f@0: result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle ); f@0: f@0: return SUCCESS; f@0: f@0: error: f@0: if ( handle ) { f@0: pthread_cond_destroy( &handle->condition ); f@0: delete handle; f@0: stream_.apiHandle = 0; f@0: } f@0: f@0: for ( int i=0; i<2; i++ ) { f@0: if ( stream_.userBuffer[i] ) { f@0: free( stream_.userBuffer[i] ); f@0: stream_.userBuffer[i] = 0; f@0: } f@0: } f@0: f@0: if ( stream_.deviceBuffer ) { f@0: free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = 0; f@0: } f@0: f@0: stream_.state = STREAM_CLOSED; f@0: return FAILURE; f@0: } f@0: f@0: void RtApiCore :: closeStream( void ) f@0: { f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiCore::closeStream(): no open stream to close!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: CoreHandle *handle = (CoreHandle *) stream_.apiHandle; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: if ( stream_.state == STREAM_RUNNING ) f@0: AudioDeviceStop( handle->id[0], callbackHandler ); f@0: #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 ) f@0: AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] ); f@0: #else f@0: // deprecated in favor of AudioDeviceDestroyIOProcID() f@0: AudioDeviceRemoveIOProc( handle->id[0], callbackHandler ); f@0: #endif f@0: } f@0: f@0: if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) { f@0: if ( stream_.state == STREAM_RUNNING ) f@0: AudioDeviceStop( handle->id[1], callbackHandler ); f@0: #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 ) f@0: AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] ); f@0: #else f@0: // deprecated in favor of AudioDeviceDestroyIOProcID() f@0: AudioDeviceRemoveIOProc( handle->id[1], callbackHandler ); f@0: #endif f@0: } f@0: f@0: for ( int i=0; i<2; i++ ) { f@0: if ( stream_.userBuffer[i] ) { f@0: free( stream_.userBuffer[i] ); f@0: stream_.userBuffer[i] = 0; f@0: } f@0: } f@0: f@0: if ( stream_.deviceBuffer ) { f@0: free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = 0; f@0: } f@0: f@0: // Destroy pthread condition variable. f@0: pthread_cond_destroy( &handle->condition ); f@0: delete handle; f@0: stream_.apiHandle = 0; f@0: f@0: stream_.mode = UNINITIALIZED; f@0: stream_.state = STREAM_CLOSED; f@0: } f@0: f@0: void RtApiCore :: startStream( void ) f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_RUNNING ) { f@0: errorText_ = "RtApiCore::startStream(): the stream is already running!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: OSStatus result = noErr; f@0: CoreHandle *handle = (CoreHandle *) stream_.apiHandle; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: f@0: result = AudioDeviceStart( handle->id[0], callbackHandler ); f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ")."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: } f@0: f@0: if ( stream_.mode == INPUT || f@0: ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) { f@0: f@0: result = AudioDeviceStart( handle->id[1], callbackHandler ); f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ")."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: } f@0: f@0: handle->drainCounter = 0; f@0: handle->internalDrain = false; f@0: stream_.state = STREAM_RUNNING; f@0: f@0: unlock: f@0: if ( result == noErr ) return; f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: } f@0: f@0: void RtApiCore :: stopStream( void ) f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiCore::stopStream(): the stream is already stopped!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: OSStatus result = noErr; f@0: CoreHandle *handle = (CoreHandle *) stream_.apiHandle; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: f@0: if ( handle->drainCounter == 0 ) { f@0: handle->drainCounter = 2; f@0: pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled f@0: } f@0: f@0: result = AudioDeviceStop( handle->id[0], callbackHandler ); f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ")."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: } f@0: f@0: if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) { f@0: f@0: result = AudioDeviceStop( handle->id[1], callbackHandler ); f@0: if ( result != noErr ) { f@0: errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ")."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: } f@0: f@0: stream_.state = STREAM_STOPPED; f@0: f@0: unlock: f@0: if ( result == noErr ) return; f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: } f@0: f@0: void RtApiCore :: abortStream( void ) f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiCore::abortStream(): the stream is already stopped!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: CoreHandle *handle = (CoreHandle *) stream_.apiHandle; f@0: handle->drainCounter = 2; f@0: f@0: stopStream(); f@0: } f@0: f@0: // This function will be called by a spawned thread when the user f@0: // callback function signals that the stream should be stopped or f@0: // aborted. It is better to handle it this way because the f@0: // callbackEvent() function probably should return before the AudioDeviceStop() f@0: // function is called. f@0: static void *coreStopStream( void *ptr ) f@0: { f@0: CallbackInfo *info = (CallbackInfo *) ptr; f@0: RtApiCore *object = (RtApiCore *) info->object; f@0: f@0: object->stopStream(); f@0: pthread_exit( NULL ); f@0: } f@0: f@0: bool RtApiCore :: callbackEvent( AudioDeviceID deviceId, f@0: const AudioBufferList *inBufferList, f@0: const AudioBufferList *outBufferList ) f@0: { f@0: if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS; f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!"; f@0: error( RtAudioError::WARNING ); f@0: return FAILURE; f@0: } f@0: f@0: CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo; f@0: CoreHandle *handle = (CoreHandle *) stream_.apiHandle; f@0: f@0: // Check if we were draining the stream and signal is finished. f@0: if ( handle->drainCounter > 3 ) { f@0: ThreadHandle threadId; f@0: f@0: stream_.state = STREAM_STOPPING; f@0: if ( handle->internalDrain == true ) f@0: pthread_create( &threadId, NULL, coreStopStream, info ); f@0: else // external call to stopStream() f@0: pthread_cond_signal( &handle->condition ); f@0: return SUCCESS; f@0: } f@0: f@0: AudioDeviceID outputDevice = handle->id[0]; f@0: f@0: // Invoke user callback to get fresh output data UNLESS we are f@0: // draining stream or duplex mode AND the input/output devices are f@0: // different AND this function is called for the input device. f@0: if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) { f@0: RtAudioCallback callback = (RtAudioCallback) info->callback; f@0: double streamTime = getStreamTime(); f@0: RtAudioStreamStatus status = 0; f@0: if ( stream_.mode != INPUT && handle->xrun[0] == true ) { f@0: status |= RTAUDIO_OUTPUT_UNDERFLOW; f@0: handle->xrun[0] = false; f@0: } f@0: if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) { f@0: status |= RTAUDIO_INPUT_OVERFLOW; f@0: handle->xrun[1] = false; f@0: } f@0: f@0: int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1], f@0: stream_.bufferSize, streamTime, status, info->userData ); f@0: if ( cbReturnValue == 2 ) { f@0: stream_.state = STREAM_STOPPING; f@0: handle->drainCounter = 2; f@0: abortStream(); f@0: return SUCCESS; f@0: } f@0: else if ( cbReturnValue == 1 ) { f@0: handle->drainCounter = 1; f@0: handle->internalDrain = true; f@0: } f@0: } f@0: f@0: if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) { f@0: f@0: if ( handle->drainCounter > 1 ) { // write zeros to the output stream f@0: f@0: if ( handle->nStreams[0] == 1 ) { f@0: memset( outBufferList->mBuffers[handle->iStream[0]].mData, f@0: 0, f@0: outBufferList->mBuffers[handle->iStream[0]].mDataByteSize ); f@0: } f@0: else { // fill multiple streams with zeros f@0: for ( unsigned int i=0; inStreams[0]; i++ ) { f@0: memset( outBufferList->mBuffers[handle->iStream[0]+i].mData, f@0: 0, f@0: outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize ); f@0: } f@0: } f@0: } f@0: else if ( handle->nStreams[0] == 1 ) { f@0: if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer f@0: convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData, f@0: stream_.userBuffer[0], stream_.convertInfo[0] ); f@0: } f@0: else { // copy from user buffer f@0: memcpy( outBufferList->mBuffers[handle->iStream[0]].mData, f@0: stream_.userBuffer[0], f@0: outBufferList->mBuffers[handle->iStream[0]].mDataByteSize ); f@0: } f@0: } f@0: else { // fill multiple streams f@0: Float32 *inBuffer = (Float32 *) stream_.userBuffer[0]; f@0: if ( stream_.doConvertBuffer[0] ) { f@0: convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] ); f@0: inBuffer = (Float32 *) stream_.deviceBuffer; f@0: } f@0: f@0: if ( stream_.deviceInterleaved[0] == false ) { // mono mode f@0: UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize; f@0: for ( unsigned int i=0; imBuffers[handle->iStream[0]+i].mData, f@0: (void *)&inBuffer[i*stream_.bufferSize], bufferBytes ); f@0: } f@0: } f@0: else { // fill multiple multi-channel streams with interleaved data f@0: UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset; f@0: Float32 *out, *in; f@0: f@0: bool inInterleaved = ( stream_.userInterleaved ) ? true : false; f@0: UInt32 inChannels = stream_.nUserChannels[0]; f@0: if ( stream_.doConvertBuffer[0] ) { f@0: inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode f@0: inChannels = stream_.nDeviceChannels[0]; f@0: } f@0: f@0: if ( inInterleaved ) inOffset = 1; f@0: else inOffset = stream_.bufferSize; f@0: f@0: channelsLeft = inChannels; f@0: for ( unsigned int i=0; inStreams[0]; i++ ) { f@0: in = inBuffer; f@0: out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData; f@0: streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels; f@0: f@0: outJump = 0; f@0: // Account for possible channel offset in first stream f@0: if ( i == 0 && stream_.channelOffset[0] > 0 ) { f@0: streamChannels -= stream_.channelOffset[0]; f@0: outJump = stream_.channelOffset[0]; f@0: out += outJump; f@0: } f@0: f@0: // Account for possible unfilled channels at end of the last stream f@0: if ( streamChannels > channelsLeft ) { f@0: outJump = streamChannels - channelsLeft; f@0: streamChannels = channelsLeft; f@0: } f@0: f@0: // Determine input buffer offsets and skips f@0: if ( inInterleaved ) { f@0: inJump = inChannels; f@0: in += inChannels - channelsLeft; f@0: } f@0: else { f@0: inJump = 1; f@0: in += (inChannels - channelsLeft) * inOffset; f@0: } f@0: f@0: for ( unsigned int i=0; idrainCounter ) { f@0: handle->drainCounter++; f@0: goto unlock; f@0: } f@0: f@0: AudioDeviceID inputDevice; f@0: inputDevice = handle->id[1]; f@0: if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) { f@0: f@0: if ( handle->nStreams[1] == 1 ) { f@0: if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer f@0: convertBuffer( stream_.userBuffer[1], f@0: (char *) inBufferList->mBuffers[handle->iStream[1]].mData, f@0: stream_.convertInfo[1] ); f@0: } f@0: else { // copy to user buffer f@0: memcpy( stream_.userBuffer[1], f@0: inBufferList->mBuffers[handle->iStream[1]].mData, f@0: inBufferList->mBuffers[handle->iStream[1]].mDataByteSize ); f@0: } f@0: } f@0: else { // read from multiple streams f@0: Float32 *outBuffer = (Float32 *) stream_.userBuffer[1]; f@0: if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer; f@0: f@0: if ( stream_.deviceInterleaved[1] == false ) { // mono mode f@0: UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize; f@0: for ( unsigned int i=0; imBuffers[handle->iStream[1]+i].mData, bufferBytes ); f@0: } f@0: } f@0: else { // read from multiple multi-channel streams f@0: UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset; f@0: Float32 *out, *in; f@0: f@0: bool outInterleaved = ( stream_.userInterleaved ) ? true : false; f@0: UInt32 outChannels = stream_.nUserChannels[1]; f@0: if ( stream_.doConvertBuffer[1] ) { f@0: outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode f@0: outChannels = stream_.nDeviceChannels[1]; f@0: } f@0: f@0: if ( outInterleaved ) outOffset = 1; f@0: else outOffset = stream_.bufferSize; f@0: f@0: channelsLeft = outChannels; f@0: for ( unsigned int i=0; inStreams[1]; i++ ) { f@0: out = outBuffer; f@0: in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData; f@0: streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels; f@0: f@0: inJump = 0; f@0: // Account for possible channel offset in first stream f@0: if ( i == 0 && stream_.channelOffset[1] > 0 ) { f@0: streamChannels -= stream_.channelOffset[1]; f@0: inJump = stream_.channelOffset[1]; f@0: in += inJump; f@0: } f@0: f@0: // Account for possible unread channels at end of the last stream f@0: if ( streamChannels > channelsLeft ) { f@0: inJump = streamChannels - channelsLeft; f@0: streamChannels = channelsLeft; f@0: } f@0: f@0: // Determine output buffer offsets and skips f@0: if ( outInterleaved ) { f@0: outJump = outChannels; f@0: out += outChannels - channelsLeft; f@0: } f@0: else { f@0: outJump = 1; f@0: out += (outChannels - channelsLeft) * outOffset; f@0: } f@0: f@0: for ( unsigned int i=0; i f@0: #include f@0: #include f@0: f@0: // A structure to hold various information related to the Jack API f@0: // implementation. f@0: struct JackHandle { f@0: jack_client_t *client; f@0: jack_port_t **ports[2]; f@0: std::string deviceName[2]; f@0: bool xrun[2]; f@0: pthread_cond_t condition; f@0: int drainCounter; // Tracks callback counts when draining f@0: bool internalDrain; // Indicates if stop is initiated from callback or not. f@0: f@0: JackHandle() f@0: :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; } f@0: }; f@0: f@0: static void jackSilentError( const char * ) {}; f@0: f@0: RtApiJack :: RtApiJack() f@0: { f@0: // Nothing to do here. f@0: #if !defined(__RTAUDIO_DEBUG__) f@0: // Turn off Jack's internal error reporting. f@0: jack_set_error_function( &jackSilentError ); f@0: #endif f@0: } f@0: f@0: RtApiJack :: ~RtApiJack() f@0: { f@0: if ( stream_.state != STREAM_CLOSED ) closeStream(); f@0: } f@0: f@0: unsigned int RtApiJack :: getDeviceCount( void ) f@0: { f@0: // See if we can become a jack client. f@0: jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption; f@0: jack_status_t *status = NULL; f@0: jack_client_t *client = jack_client_open( "RtApiJackCount", options, status ); f@0: if ( client == 0 ) return 0; f@0: f@0: const char **ports; f@0: std::string port, previousPort; f@0: unsigned int nChannels = 0, nDevices = 0; f@0: ports = jack_get_ports( client, NULL, NULL, 0 ); f@0: if ( ports ) { f@0: // Parse the port names up to the first colon (:). f@0: size_t iColon = 0; f@0: do { f@0: port = (char *) ports[ nChannels ]; f@0: iColon = port.find(":"); f@0: if ( iColon != std::string::npos ) { f@0: port = port.substr( 0, iColon + 1 ); f@0: if ( port != previousPort ) { f@0: nDevices++; f@0: previousPort = port; f@0: } f@0: } f@0: } while ( ports[++nChannels] ); f@0: free( ports ); f@0: } f@0: f@0: jack_client_close( client ); f@0: return nDevices; f@0: } f@0: f@0: RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device ) f@0: { f@0: RtAudio::DeviceInfo info; f@0: info.probed = false; f@0: f@0: jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption f@0: jack_status_t *status = NULL; f@0: jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status ); f@0: if ( client == 0 ) { f@0: errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!"; f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: const char **ports; f@0: std::string port, previousPort; f@0: unsigned int nPorts = 0, nDevices = 0; f@0: ports = jack_get_ports( client, NULL, NULL, 0 ); f@0: if ( ports ) { f@0: // Parse the port names up to the first colon (:). f@0: size_t iColon = 0; f@0: do { f@0: port = (char *) ports[ nPorts ]; f@0: iColon = port.find(":"); f@0: if ( iColon != std::string::npos ) { f@0: port = port.substr( 0, iColon ); f@0: if ( port != previousPort ) { f@0: if ( nDevices == device ) info.name = port; f@0: nDevices++; f@0: previousPort = port; f@0: } f@0: } f@0: } while ( ports[++nPorts] ); f@0: free( ports ); f@0: } f@0: f@0: if ( device >= nDevices ) { f@0: jack_client_close( client ); f@0: errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return info; f@0: } f@0: f@0: // Get the current jack server sample rate. f@0: info.sampleRates.clear(); f@0: info.sampleRates.push_back( jack_get_sample_rate( client ) ); f@0: f@0: // Count the available ports containing the client name as device f@0: // channels. Jack "input ports" equal RtAudio output channels. f@0: unsigned int nChannels = 0; f@0: ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput ); f@0: if ( ports ) { f@0: while ( ports[ nChannels ] ) nChannels++; f@0: free( ports ); f@0: info.outputChannels = nChannels; f@0: } f@0: f@0: // Jack "output ports" equal RtAudio input channels. f@0: nChannels = 0; f@0: ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput ); f@0: if ( ports ) { f@0: while ( ports[ nChannels ] ) nChannels++; f@0: free( ports ); f@0: info.inputChannels = nChannels; f@0: } f@0: f@0: if ( info.outputChannels == 0 && info.inputChannels == 0 ) { f@0: jack_client_close(client); f@0: errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!"; f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: // If device opens for both playback and capture, we determine the channels. f@0: if ( info.outputChannels > 0 && info.inputChannels > 0 ) f@0: info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels; f@0: f@0: // Jack always uses 32-bit floats. f@0: info.nativeFormats = RTAUDIO_FLOAT32; f@0: f@0: // Jack doesn't provide default devices so we'll use the first available one. f@0: if ( device == 0 && info.outputChannels > 0 ) f@0: info.isDefaultOutput = true; f@0: if ( device == 0 && info.inputChannels > 0 ) f@0: info.isDefaultInput = true; f@0: f@0: jack_client_close(client); f@0: info.probed = true; f@0: return info; f@0: } f@0: f@0: static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer ) f@0: { f@0: CallbackInfo *info = (CallbackInfo *) infoPointer; f@0: f@0: RtApiJack *object = (RtApiJack *) info->object; f@0: if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1; f@0: f@0: return 0; f@0: } f@0: f@0: // This function will be called by a spawned thread when the Jack f@0: // server signals that it is shutting down. It is necessary to handle f@0: // it this way because the jackShutdown() function must return before f@0: // the jack_deactivate() function (in closeStream()) will return. f@0: static void *jackCloseStream( void *ptr ) f@0: { f@0: CallbackInfo *info = (CallbackInfo *) ptr; f@0: RtApiJack *object = (RtApiJack *) info->object; f@0: f@0: object->closeStream(); f@0: f@0: pthread_exit( NULL ); f@0: } f@0: static void jackShutdown( void *infoPointer ) f@0: { f@0: CallbackInfo *info = (CallbackInfo *) infoPointer; f@0: RtApiJack *object = (RtApiJack *) info->object; f@0: f@0: // Check current stream state. If stopped, then we'll assume this f@0: // was called as a result of a call to RtApiJack::stopStream (the f@0: // deactivation of a client handle causes this function to be called). f@0: // If not, we'll assume the Jack server is shutting down or some f@0: // other problem occurred and we should close the stream. f@0: if ( object->isStreamRunning() == false ) return; f@0: f@0: ThreadHandle threadId; f@0: pthread_create( &threadId, NULL, jackCloseStream, info ); f@0: std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl; f@0: } f@0: f@0: static int jackXrun( void *infoPointer ) f@0: { f@0: JackHandle *handle = (JackHandle *) infoPointer; f@0: f@0: if ( handle->ports[0] ) handle->xrun[0] = true; f@0: if ( handle->ports[1] ) handle->xrun[1] = true; f@0: f@0: return 0; f@0: } f@0: f@0: bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels, f@0: unsigned int firstChannel, unsigned int sampleRate, f@0: RtAudioFormat format, unsigned int *bufferSize, f@0: RtAudio::StreamOptions *options ) f@0: { f@0: JackHandle *handle = (JackHandle *) stream_.apiHandle; f@0: f@0: // Look for jack server and try to become a client (only do once per stream). f@0: jack_client_t *client = 0; f@0: if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) { f@0: jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption; f@0: jack_status_t *status = NULL; f@0: if ( options && !options->streamName.empty() ) f@0: client = jack_client_open( options->streamName.c_str(), jackoptions, status ); f@0: else f@0: client = jack_client_open( "RtApiJack", jackoptions, status ); f@0: if ( client == 0 ) { f@0: errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!"; f@0: error( RtAudioError::WARNING ); f@0: return FAILURE; f@0: } f@0: } f@0: else { f@0: // The handle must have been created on an earlier pass. f@0: client = handle->client; f@0: } f@0: f@0: const char **ports; f@0: std::string port, previousPort, deviceName; f@0: unsigned int nPorts = 0, nDevices = 0; f@0: ports = jack_get_ports( client, NULL, NULL, 0 ); f@0: if ( ports ) { f@0: // Parse the port names up to the first colon (:). f@0: size_t iColon = 0; f@0: do { f@0: port = (char *) ports[ nPorts ]; f@0: iColon = port.find(":"); f@0: if ( iColon != std::string::npos ) { f@0: port = port.substr( 0, iColon ); f@0: if ( port != previousPort ) { f@0: if ( nDevices == device ) deviceName = port; f@0: nDevices++; f@0: previousPort = port; f@0: } f@0: } f@0: } while ( ports[++nPorts] ); f@0: free( ports ); f@0: } f@0: f@0: if ( device >= nDevices ) { f@0: errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!"; f@0: return FAILURE; f@0: } f@0: f@0: // Count the available ports containing the client name as device f@0: // channels. Jack "input ports" equal RtAudio output channels. f@0: unsigned int nChannels = 0; f@0: unsigned long flag = JackPortIsInput; f@0: if ( mode == INPUT ) flag = JackPortIsOutput; f@0: ports = jack_get_ports( client, deviceName.c_str(), NULL, flag ); f@0: if ( ports ) { f@0: while ( ports[ nChannels ] ) nChannels++; f@0: free( ports ); f@0: } f@0: f@0: // Compare the jack ports for specified client to the requested number of channels. f@0: if ( nChannels < (channels + firstChannel) ) { f@0: errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Check the jack server sample rate. f@0: unsigned int jackRate = jack_get_sample_rate( client ); f@0: if ( sampleRate != jackRate ) { f@0: jack_client_close( client ); f@0: errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: stream_.sampleRate = jackRate; f@0: f@0: // Get the latency of the JACK port. f@0: ports = jack_get_ports( client, deviceName.c_str(), NULL, flag ); f@0: if ( ports[ firstChannel ] ) { f@0: // Added by Ge Wang f@0: jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency); f@0: // the range (usually the min and max are equal) f@0: jack_latency_range_t latrange; latrange.min = latrange.max = 0; f@0: // get the latency range f@0: jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange ); f@0: // be optimistic, use the min! f@0: stream_.latency[mode] = latrange.min; f@0: //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) ); f@0: } f@0: free( ports ); f@0: f@0: // The jack server always uses 32-bit floating-point data. f@0: stream_.deviceFormat[mode] = RTAUDIO_FLOAT32; f@0: stream_.userFormat = format; f@0: f@0: if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false; f@0: else stream_.userInterleaved = true; f@0: f@0: // Jack always uses non-interleaved buffers. f@0: stream_.deviceInterleaved[mode] = false; f@0: f@0: // Jack always provides host byte-ordered data. f@0: stream_.doByteSwap[mode] = false; f@0: f@0: // Get the buffer size. The buffer size and number of buffers f@0: // (periods) is set when the jack server is started. f@0: stream_.bufferSize = (int) jack_get_buffer_size( client ); f@0: *bufferSize = stream_.bufferSize; f@0: f@0: stream_.nDeviceChannels[mode] = channels; f@0: stream_.nUserChannels[mode] = channels; f@0: f@0: // Set flags for buffer conversion. f@0: stream_.doConvertBuffer[mode] = false; f@0: if ( stream_.userFormat != stream_.deviceFormat[mode] ) f@0: stream_.doConvertBuffer[mode] = true; f@0: if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] && f@0: stream_.nUserChannels[mode] > 1 ) f@0: stream_.doConvertBuffer[mode] = true; f@0: f@0: // Allocate our JackHandle structure for the stream. f@0: if ( handle == 0 ) { f@0: try { f@0: handle = new JackHandle; f@0: } f@0: catch ( std::bad_alloc& ) { f@0: errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory."; f@0: goto error; f@0: } f@0: f@0: if ( pthread_cond_init(&handle->condition, NULL) ) { f@0: errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable."; f@0: goto error; f@0: } f@0: stream_.apiHandle = (void *) handle; f@0: handle->client = client; f@0: } f@0: handle->deviceName[mode] = deviceName; f@0: f@0: // Allocate necessary internal buffers. f@0: unsigned long bufferBytes; f@0: bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat ); f@0: stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 ); f@0: if ( stream_.userBuffer[mode] == NULL ) { f@0: errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory."; f@0: goto error; f@0: } f@0: f@0: if ( stream_.doConvertBuffer[mode] ) { f@0: f@0: bool makeBuffer = true; f@0: if ( mode == OUTPUT ) f@0: bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] ); f@0: else { // mode == INPUT f@0: bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] ); f@0: if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) { f@0: unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]); f@0: if ( bufferBytes < bytesOut ) makeBuffer = false; f@0: } f@0: } f@0: f@0: if ( makeBuffer ) { f@0: bufferBytes *= *bufferSize; f@0: if ( stream_.deviceBuffer ) free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 ); f@0: if ( stream_.deviceBuffer == NULL ) { f@0: errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory."; f@0: goto error; f@0: } f@0: } f@0: } f@0: f@0: // Allocate memory for the Jack ports (channels) identifiers. f@0: handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels ); f@0: if ( handle->ports[mode] == NULL ) { f@0: errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory."; f@0: goto error; f@0: } f@0: f@0: stream_.device[mode] = device; f@0: stream_.channelOffset[mode] = firstChannel; f@0: stream_.state = STREAM_STOPPED; f@0: stream_.callbackInfo.object = (void *) this; f@0: f@0: if ( stream_.mode == OUTPUT && mode == INPUT ) f@0: // We had already set up the stream for output. f@0: stream_.mode = DUPLEX; f@0: else { f@0: stream_.mode = mode; f@0: jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo ); f@0: jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle ); f@0: jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo ); f@0: } f@0: f@0: // Register our ports. f@0: char label[64]; f@0: if ( mode == OUTPUT ) { f@0: for ( unsigned int i=0; iports[0][i] = jack_port_register( handle->client, (const char *)label, f@0: JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 ); f@0: } f@0: } f@0: else { f@0: for ( unsigned int i=0; iports[1][i] = jack_port_register( handle->client, (const char *)label, f@0: JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 ); f@0: } f@0: } f@0: f@0: // Setup the buffer conversion information structure. We don't use f@0: // buffers to do channel offsets, so we override that parameter f@0: // here. f@0: if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 ); f@0: f@0: return SUCCESS; f@0: f@0: error: f@0: if ( handle ) { f@0: pthread_cond_destroy( &handle->condition ); f@0: jack_client_close( handle->client ); f@0: f@0: if ( handle->ports[0] ) free( handle->ports[0] ); f@0: if ( handle->ports[1] ) free( handle->ports[1] ); f@0: f@0: delete handle; f@0: stream_.apiHandle = 0; f@0: } f@0: f@0: for ( int i=0; i<2; i++ ) { f@0: if ( stream_.userBuffer[i] ) { f@0: free( stream_.userBuffer[i] ); f@0: stream_.userBuffer[i] = 0; f@0: } f@0: } f@0: f@0: if ( stream_.deviceBuffer ) { f@0: free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = 0; f@0: } f@0: f@0: return FAILURE; f@0: } f@0: f@0: void RtApiJack :: closeStream( void ) f@0: { f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiJack::closeStream(): no open stream to close!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: JackHandle *handle = (JackHandle *) stream_.apiHandle; f@0: if ( handle ) { f@0: f@0: if ( stream_.state == STREAM_RUNNING ) f@0: jack_deactivate( handle->client ); f@0: f@0: jack_client_close( handle->client ); f@0: } f@0: f@0: if ( handle ) { f@0: if ( handle->ports[0] ) free( handle->ports[0] ); f@0: if ( handle->ports[1] ) free( handle->ports[1] ); f@0: pthread_cond_destroy( &handle->condition ); f@0: delete handle; f@0: stream_.apiHandle = 0; f@0: } f@0: f@0: for ( int i=0; i<2; i++ ) { f@0: if ( stream_.userBuffer[i] ) { f@0: free( stream_.userBuffer[i] ); f@0: stream_.userBuffer[i] = 0; f@0: } f@0: } f@0: f@0: if ( stream_.deviceBuffer ) { f@0: free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = 0; f@0: } f@0: f@0: stream_.mode = UNINITIALIZED; f@0: stream_.state = STREAM_CLOSED; f@0: } f@0: f@0: void RtApiJack :: startStream( void ) f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_RUNNING ) { f@0: errorText_ = "RtApiJack::startStream(): the stream is already running!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: JackHandle *handle = (JackHandle *) stream_.apiHandle; f@0: int result = jack_activate( handle->client ); f@0: if ( result ) { f@0: errorText_ = "RtApiJack::startStream(): unable to activate JACK client!"; f@0: goto unlock; f@0: } f@0: f@0: const char **ports; f@0: f@0: // Get the list of available ports. f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: result = 1; f@0: ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput); f@0: if ( ports == NULL) { f@0: errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!"; f@0: goto unlock; f@0: } f@0: f@0: // Now make the port connections. Since RtAudio wasn't designed to f@0: // allow the user to select particular channels of a device, we'll f@0: // just open the first "nChannels" ports with offset. f@0: for ( unsigned int i=0; iclient, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] ); f@0: if ( result ) { f@0: free( ports ); f@0: errorText_ = "RtApiJack::startStream(): error connecting output ports!"; f@0: goto unlock; f@0: } f@0: } f@0: free(ports); f@0: } f@0: f@0: if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) { f@0: result = 1; f@0: ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput ); f@0: if ( ports == NULL) { f@0: errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!"; f@0: goto unlock; f@0: } f@0: f@0: // Now make the port connections. See note above. f@0: for ( unsigned int i=0; iclient, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) ); f@0: if ( result ) { f@0: free( ports ); f@0: errorText_ = "RtApiJack::startStream(): error connecting input ports!"; f@0: goto unlock; f@0: } f@0: } f@0: free(ports); f@0: } f@0: f@0: handle->drainCounter = 0; f@0: handle->internalDrain = false; f@0: stream_.state = STREAM_RUNNING; f@0: f@0: unlock: f@0: if ( result == 0 ) return; f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: } f@0: f@0: void RtApiJack :: stopStream( void ) f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiJack::stopStream(): the stream is already stopped!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: JackHandle *handle = (JackHandle *) stream_.apiHandle; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: f@0: if ( handle->drainCounter == 0 ) { f@0: handle->drainCounter = 2; f@0: pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled f@0: } f@0: } f@0: f@0: jack_deactivate( handle->client ); f@0: stream_.state = STREAM_STOPPED; f@0: } f@0: f@0: void RtApiJack :: abortStream( void ) f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiJack::abortStream(): the stream is already stopped!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: JackHandle *handle = (JackHandle *) stream_.apiHandle; f@0: handle->drainCounter = 2; f@0: f@0: stopStream(); f@0: } f@0: f@0: // This function will be called by a spawned thread when the user f@0: // callback function signals that the stream should be stopped or f@0: // aborted. It is necessary to handle it this way because the f@0: // callbackEvent() function must return before the jack_deactivate() f@0: // function will return. f@0: static void *jackStopStream( void *ptr ) f@0: { f@0: CallbackInfo *info = (CallbackInfo *) ptr; f@0: RtApiJack *object = (RtApiJack *) info->object; f@0: f@0: object->stopStream(); f@0: pthread_exit( NULL ); f@0: } f@0: f@0: bool RtApiJack :: callbackEvent( unsigned long nframes ) f@0: { f@0: if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS; f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!"; f@0: error( RtAudioError::WARNING ); f@0: return FAILURE; f@0: } f@0: if ( stream_.bufferSize != nframes ) { f@0: errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!"; f@0: error( RtAudioError::WARNING ); f@0: return FAILURE; f@0: } f@0: f@0: CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo; f@0: JackHandle *handle = (JackHandle *) stream_.apiHandle; f@0: f@0: // Check if we were draining the stream and signal is finished. f@0: if ( handle->drainCounter > 3 ) { f@0: ThreadHandle threadId; f@0: f@0: stream_.state = STREAM_STOPPING; f@0: if ( handle->internalDrain == true ) f@0: pthread_create( &threadId, NULL, jackStopStream, info ); f@0: else f@0: pthread_cond_signal( &handle->condition ); f@0: return SUCCESS; f@0: } f@0: f@0: // Invoke user callback first, to get fresh output data. f@0: if ( handle->drainCounter == 0 ) { f@0: RtAudioCallback callback = (RtAudioCallback) info->callback; f@0: double streamTime = getStreamTime(); f@0: RtAudioStreamStatus status = 0; f@0: if ( stream_.mode != INPUT && handle->xrun[0] == true ) { f@0: status |= RTAUDIO_OUTPUT_UNDERFLOW; f@0: handle->xrun[0] = false; f@0: } f@0: if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) { f@0: status |= RTAUDIO_INPUT_OVERFLOW; f@0: handle->xrun[1] = false; f@0: } f@0: int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1], f@0: stream_.bufferSize, streamTime, status, info->userData ); f@0: if ( cbReturnValue == 2 ) { f@0: stream_.state = STREAM_STOPPING; f@0: handle->drainCounter = 2; f@0: ThreadHandle id; f@0: pthread_create( &id, NULL, jackStopStream, info ); f@0: return SUCCESS; f@0: } f@0: else if ( cbReturnValue == 1 ) { f@0: handle->drainCounter = 1; f@0: handle->internalDrain = true; f@0: } f@0: } f@0: f@0: jack_default_audio_sample_t *jackbuffer; f@0: unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t ); f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: f@0: if ( handle->drainCounter > 1 ) { // write zeros to the output stream f@0: f@0: for ( unsigned int i=0; iports[0][i], (jack_nframes_t) nframes ); f@0: memset( jackbuffer, 0, bufferBytes ); f@0: } f@0: f@0: } f@0: else if ( stream_.doConvertBuffer[0] ) { f@0: f@0: convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] ); f@0: f@0: for ( unsigned int i=0; iports[0][i], (jack_nframes_t) nframes ); f@0: memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes ); f@0: } f@0: } f@0: else { // no buffer conversion f@0: for ( unsigned int i=0; iports[0][i], (jack_nframes_t) nframes ); f@0: memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes ); f@0: } f@0: } f@0: } f@0: f@0: // Don't bother draining input f@0: if ( handle->drainCounter ) { f@0: handle->drainCounter++; f@0: goto unlock; f@0: } f@0: f@0: if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) { f@0: f@0: if ( stream_.doConvertBuffer[1] ) { f@0: for ( unsigned int i=0; iports[1][i], (jack_nframes_t) nframes ); f@0: memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes ); f@0: } f@0: convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] ); f@0: } f@0: else { // no buffer conversion f@0: for ( unsigned int i=0; iports[1][i], (jack_nframes_t) nframes ); f@0: memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes ); f@0: } f@0: } f@0: } f@0: f@0: unlock: f@0: RtApi::tickStreamTime(); f@0: return SUCCESS; f@0: } f@0: //******************** End of __UNIX_JACK__ *********************// f@0: #endif f@0: f@0: #if defined(__WINDOWS_ASIO__) // ASIO API on Windows f@0: f@0: // The ASIO API is designed around a callback scheme, so this f@0: // implementation is similar to that used for OS-X CoreAudio and Linux f@0: // Jack. The primary constraint with ASIO is that it only allows f@0: // access to a single driver at a time. Thus, it is not possible to f@0: // have more than one simultaneous RtAudio stream. f@0: // f@0: // This implementation also requires a number of external ASIO files f@0: // and a few global variables. The ASIO callback scheme does not f@0: // allow for the passing of user data, so we must create a global f@0: // pointer to our callbackInfo structure. f@0: // f@0: // On unix systems, we make use of a pthread condition variable. f@0: // Since there is no equivalent in Windows, I hacked something based f@0: // on information found in f@0: // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html. f@0: f@0: #include "asiosys.h" f@0: #include "asio.h" f@0: #include "iasiothiscallresolver.h" f@0: #include "asiodrivers.h" f@0: #include f@0: f@0: static AsioDrivers drivers; f@0: static ASIOCallbacks asioCallbacks; f@0: static ASIODriverInfo driverInfo; f@0: static CallbackInfo *asioCallbackInfo; f@0: static bool asioXRun; f@0: f@0: struct AsioHandle { f@0: int drainCounter; // Tracks callback counts when draining f@0: bool internalDrain; // Indicates if stop is initiated from callback or not. f@0: ASIOBufferInfo *bufferInfos; f@0: HANDLE condition; f@0: f@0: AsioHandle() f@0: :drainCounter(0), internalDrain(false), bufferInfos(0) {} f@0: }; f@0: f@0: // Function declarations (definitions at end of section) f@0: static const char* getAsioErrorString( ASIOError result ); f@0: static void sampleRateChanged( ASIOSampleRate sRate ); f@0: static long asioMessages( long selector, long value, void* message, double* opt ); f@0: f@0: RtApiAsio :: RtApiAsio() f@0: { f@0: // ASIO cannot run on a multi-threaded appartment. You can call f@0: // CoInitialize beforehand, but it must be for appartment threading f@0: // (in which case, CoInitilialize will return S_FALSE here). f@0: coInitialized_ = false; f@0: HRESULT hr = CoInitialize( NULL ); f@0: if ( FAILED(hr) ) { f@0: errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)"; f@0: error( RtAudioError::WARNING ); f@0: } f@0: coInitialized_ = true; f@0: f@0: drivers.removeCurrentDriver(); f@0: driverInfo.asioVersion = 2; f@0: f@0: // See note in DirectSound implementation about GetDesktopWindow(). f@0: driverInfo.sysRef = GetForegroundWindow(); f@0: } f@0: f@0: RtApiAsio :: ~RtApiAsio() f@0: { f@0: if ( stream_.state != STREAM_CLOSED ) closeStream(); f@0: if ( coInitialized_ ) CoUninitialize(); f@0: } f@0: f@0: unsigned int RtApiAsio :: getDeviceCount( void ) f@0: { f@0: return (unsigned int) drivers.asioGetNumDev(); f@0: } f@0: f@0: RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device ) f@0: { f@0: RtAudio::DeviceInfo info; f@0: info.probed = false; f@0: f@0: // Get device ID f@0: unsigned int nDevices = getDeviceCount(); f@0: if ( nDevices == 0 ) { f@0: errorText_ = "RtApiAsio::getDeviceInfo: no devices found!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return info; f@0: } f@0: f@0: if ( device >= nDevices ) { f@0: errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return info; f@0: } f@0: f@0: // If a stream is already open, we cannot probe other devices. Thus, use the saved results. f@0: if ( stream_.state != STREAM_CLOSED ) { f@0: if ( device >= devices_.size() ) { f@0: errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened."; f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: return devices_[ device ]; f@0: } f@0: f@0: char driverName[32]; f@0: ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 ); f@0: if ( result != ASE_OK ) { f@0: errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ")."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: info.name = driverName; f@0: f@0: if ( !drivers.loadDriver( driverName ) ) { f@0: errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ")."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: result = ASIOInit( &driverInfo ); f@0: if ( result != ASE_OK ) { f@0: errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ")."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: // Determine the device channel information. f@0: long inputChannels, outputChannels; f@0: result = ASIOGetChannels( &inputChannels, &outputChannels ); f@0: if ( result != ASE_OK ) { f@0: drivers.removeCurrentDriver(); f@0: errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ")."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: info.outputChannels = outputChannels; f@0: info.inputChannels = inputChannels; f@0: if ( info.outputChannels > 0 && info.inputChannels > 0 ) f@0: info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels; f@0: f@0: // Determine the supported sample rates. f@0: info.sampleRates.clear(); f@0: for ( unsigned int i=0; i 0 ) f@0: if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true; f@0: if ( info.inputChannels > 0 ) f@0: if ( getDefaultInputDevice() == device ) info.isDefaultInput = true; f@0: f@0: info.probed = true; f@0: drivers.removeCurrentDriver(); f@0: return info; f@0: } f@0: f@0: static void bufferSwitch( long index, ASIOBool /*processNow*/ ) f@0: { f@0: RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object; f@0: object->callbackEvent( index ); f@0: } f@0: f@0: void RtApiAsio :: saveDeviceInfo( void ) f@0: { f@0: devices_.clear(); f@0: f@0: unsigned int nDevices = getDeviceCount(); f@0: devices_.resize( nDevices ); f@0: for ( unsigned int i=0; isaveDeviceInfo(); f@0: f@0: if ( !drivers.loadDriver( driverName ) ) { f@0: errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: result = ASIOInit( &driverInfo ); f@0: if ( result != ASE_OK ) { f@0: errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: } f@0: f@0: // Check the device channel count. f@0: long inputChannels, outputChannels; f@0: result = ASIOGetChannels( &inputChannels, &outputChannels ); f@0: if ( result != ASE_OK ) { f@0: drivers.removeCurrentDriver(); f@0: errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) || f@0: ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) { f@0: drivers.removeCurrentDriver(); f@0: errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: stream_.nDeviceChannels[mode] = channels; f@0: stream_.nUserChannels[mode] = channels; f@0: stream_.channelOffset[mode] = firstChannel; f@0: f@0: // Verify the sample rate is supported. f@0: result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate ); f@0: if ( result != ASE_OK ) { f@0: drivers.removeCurrentDriver(); f@0: errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Get the current sample rate f@0: ASIOSampleRate currentRate; f@0: result = ASIOGetSampleRate( ¤tRate ); f@0: if ( result != ASE_OK ) { f@0: drivers.removeCurrentDriver(); f@0: errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Set the sample rate only if necessary f@0: if ( currentRate != sampleRate ) { f@0: result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate ); f@0: if ( result != ASE_OK ) { f@0: drivers.removeCurrentDriver(); f@0: errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: } f@0: f@0: // Determine the driver data type. f@0: ASIOChannelInfo channelInfo; f@0: channelInfo.channel = 0; f@0: if ( mode == OUTPUT ) channelInfo.isInput = false; f@0: else channelInfo.isInput = true; f@0: result = ASIOGetChannelInfo( &channelInfo ); f@0: if ( result != ASE_OK ) { f@0: drivers.removeCurrentDriver(); f@0: errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Assuming WINDOWS host is always little-endian. f@0: stream_.doByteSwap[mode] = false; f@0: stream_.userFormat = format; f@0: stream_.deviceFormat[mode] = 0; f@0: if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) { f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT16; f@0: if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true; f@0: } f@0: else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) { f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT32; f@0: if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true; f@0: } f@0: else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) { f@0: stream_.deviceFormat[mode] = RTAUDIO_FLOAT32; f@0: if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true; f@0: } f@0: else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) { f@0: stream_.deviceFormat[mode] = RTAUDIO_FLOAT64; f@0: if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true; f@0: } f@0: else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) { f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT24; f@0: if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true; f@0: } f@0: f@0: if ( stream_.deviceFormat[mode] == 0 ) { f@0: drivers.removeCurrentDriver(); f@0: errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Set the buffer size. For a duplex stream, this will end up f@0: // setting the buffer size based on the input constraints, which f@0: // should be ok. f@0: long minSize, maxSize, preferSize, granularity; f@0: result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity ); f@0: if ( result != ASE_OK ) { f@0: drivers.removeCurrentDriver(); f@0: errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize; f@0: else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize; f@0: else if ( granularity == -1 ) { f@0: // Make sure bufferSize is a power of two. f@0: int log2_of_min_size = 0; f@0: int log2_of_max_size = 0; f@0: f@0: for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) { f@0: if ( minSize & ((long)1 << i) ) log2_of_min_size = i; f@0: if ( maxSize & ((long)1 << i) ) log2_of_max_size = i; f@0: } f@0: f@0: long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) ); f@0: int min_delta_num = log2_of_min_size; f@0: f@0: for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) { f@0: long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) ); f@0: if (current_delta < min_delta) { f@0: min_delta = current_delta; f@0: min_delta_num = i; f@0: } f@0: } f@0: f@0: *bufferSize = ( (unsigned int)1 << min_delta_num ); f@0: if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize; f@0: else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize; f@0: } f@0: else if ( granularity != 0 ) { f@0: // Set to an even multiple of granularity, rounding up. f@0: *bufferSize = (*bufferSize + granularity-1) / granularity * granularity; f@0: } f@0: f@0: if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) { f@0: drivers.removeCurrentDriver(); f@0: errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!"; f@0: return FAILURE; f@0: } f@0: f@0: stream_.bufferSize = *bufferSize; f@0: stream_.nBuffers = 2; f@0: f@0: if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false; f@0: else stream_.userInterleaved = true; f@0: f@0: // ASIO always uses non-interleaved buffers. f@0: stream_.deviceInterleaved[mode] = false; f@0: f@0: // Allocate, if necessary, our AsioHandle structure for the stream. f@0: AsioHandle *handle = (AsioHandle *) stream_.apiHandle; f@0: if ( handle == 0 ) { f@0: try { f@0: handle = new AsioHandle; f@0: } f@0: catch ( std::bad_alloc& ) { f@0: //if ( handle == NULL ) { f@0: drivers.removeCurrentDriver(); f@0: errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory."; f@0: return FAILURE; f@0: } f@0: handle->bufferInfos = 0; f@0: f@0: // Create a manual-reset event. f@0: handle->condition = CreateEvent( NULL, // no security f@0: TRUE, // manual-reset f@0: FALSE, // non-signaled initially f@0: NULL ); // unnamed f@0: stream_.apiHandle = (void *) handle; f@0: } f@0: f@0: // Create the ASIO internal buffers. Since RtAudio sets up input f@0: // and output separately, we'll have to dispose of previously f@0: // created output buffers for a duplex stream. f@0: long inputLatency, outputLatency; f@0: if ( mode == INPUT && stream_.mode == OUTPUT ) { f@0: ASIODisposeBuffers(); f@0: if ( handle->bufferInfos ) free( handle->bufferInfos ); f@0: } f@0: f@0: // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure. f@0: bool buffersAllocated = false; f@0: unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1]; f@0: handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) ); f@0: if ( handle->bufferInfos == NULL ) { f@0: errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ")."; f@0: errorText_ = errorStream_.str(); f@0: goto error; f@0: } f@0: f@0: ASIOBufferInfo *infos; f@0: infos = handle->bufferInfos; f@0: for ( i=0; iisInput = ASIOFalse; f@0: infos->channelNum = i + stream_.channelOffset[0]; f@0: infos->buffers[0] = infos->buffers[1] = 0; f@0: } f@0: for ( i=0; iisInput = ASIOTrue; f@0: infos->channelNum = i + stream_.channelOffset[1]; f@0: infos->buffers[0] = infos->buffers[1] = 0; f@0: } f@0: f@0: // Set up the ASIO callback structure and create the ASIO data buffers. f@0: asioCallbacks.bufferSwitch = &bufferSwitch; f@0: asioCallbacks.sampleRateDidChange = &sampleRateChanged; f@0: asioCallbacks.asioMessage = &asioMessages; f@0: asioCallbacks.bufferSwitchTimeInfo = NULL; f@0: result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks ); f@0: if ( result != ASE_OK ) { f@0: errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers."; f@0: errorText_ = errorStream_.str(); f@0: goto error; f@0: } f@0: buffersAllocated = true; f@0: f@0: // Set flags for buffer conversion. f@0: stream_.doConvertBuffer[mode] = false; f@0: if ( stream_.userFormat != stream_.deviceFormat[mode] ) f@0: stream_.doConvertBuffer[mode] = true; f@0: if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] && f@0: stream_.nUserChannels[mode] > 1 ) f@0: stream_.doConvertBuffer[mode] = true; f@0: f@0: // Allocate necessary internal buffers f@0: unsigned long bufferBytes; f@0: bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat ); f@0: stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 ); f@0: if ( stream_.userBuffer[mode] == NULL ) { f@0: errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory."; f@0: goto error; f@0: } f@0: f@0: if ( stream_.doConvertBuffer[mode] ) { f@0: f@0: bool makeBuffer = true; f@0: bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] ); f@0: if ( mode == INPUT ) { f@0: if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) { f@0: unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] ); f@0: if ( bufferBytes <= bytesOut ) makeBuffer = false; f@0: } f@0: } f@0: f@0: if ( makeBuffer ) { f@0: bufferBytes *= *bufferSize; f@0: if ( stream_.deviceBuffer ) free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 ); f@0: if ( stream_.deviceBuffer == NULL ) { f@0: errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory."; f@0: goto error; f@0: } f@0: } f@0: } f@0: f@0: stream_.sampleRate = sampleRate; f@0: stream_.device[mode] = device; f@0: stream_.state = STREAM_STOPPED; f@0: asioCallbackInfo = &stream_.callbackInfo; f@0: stream_.callbackInfo.object = (void *) this; f@0: if ( stream_.mode == OUTPUT && mode == INPUT ) f@0: // We had already set up an output stream. f@0: stream_.mode = DUPLEX; f@0: else f@0: stream_.mode = mode; f@0: f@0: // Determine device latencies f@0: result = ASIOGetLatencies( &inputLatency, &outputLatency ); f@0: if ( result != ASE_OK ) { f@0: errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING); // warn but don't fail f@0: } f@0: else { f@0: stream_.latency[0] = outputLatency; f@0: stream_.latency[1] = inputLatency; f@0: } f@0: f@0: // Setup the buffer conversion information structure. We don't use f@0: // buffers to do channel offsets, so we override that parameter f@0: // here. f@0: if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 ); f@0: f@0: return SUCCESS; f@0: f@0: error: f@0: if ( buffersAllocated ) f@0: ASIODisposeBuffers(); f@0: drivers.removeCurrentDriver(); f@0: f@0: if ( handle ) { f@0: CloseHandle( handle->condition ); f@0: if ( handle->bufferInfos ) f@0: free( handle->bufferInfos ); f@0: delete handle; f@0: stream_.apiHandle = 0; f@0: } f@0: f@0: for ( int i=0; i<2; i++ ) { f@0: if ( stream_.userBuffer[i] ) { f@0: free( stream_.userBuffer[i] ); f@0: stream_.userBuffer[i] = 0; f@0: } f@0: } f@0: f@0: if ( stream_.deviceBuffer ) { f@0: free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = 0; f@0: } f@0: f@0: return FAILURE; f@0: } f@0: f@0: void RtApiAsio :: closeStream() f@0: { f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiAsio::closeStream(): no open stream to close!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: if ( stream_.state == STREAM_RUNNING ) { f@0: stream_.state = STREAM_STOPPED; f@0: ASIOStop(); f@0: } f@0: ASIODisposeBuffers(); f@0: drivers.removeCurrentDriver(); f@0: f@0: AsioHandle *handle = (AsioHandle *) stream_.apiHandle; f@0: if ( handle ) { f@0: CloseHandle( handle->condition ); f@0: if ( handle->bufferInfos ) f@0: free( handle->bufferInfos ); f@0: delete handle; f@0: stream_.apiHandle = 0; f@0: } f@0: f@0: for ( int i=0; i<2; i++ ) { f@0: if ( stream_.userBuffer[i] ) { f@0: free( stream_.userBuffer[i] ); f@0: stream_.userBuffer[i] = 0; f@0: } f@0: } f@0: f@0: if ( stream_.deviceBuffer ) { f@0: free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = 0; f@0: } f@0: f@0: stream_.mode = UNINITIALIZED; f@0: stream_.state = STREAM_CLOSED; f@0: } f@0: f@0: bool stopThreadCalled = false; f@0: f@0: void RtApiAsio :: startStream() f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_RUNNING ) { f@0: errorText_ = "RtApiAsio::startStream(): the stream is already running!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: AsioHandle *handle = (AsioHandle *) stream_.apiHandle; f@0: ASIOError result = ASIOStart(); f@0: if ( result != ASE_OK ) { f@0: errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: f@0: handle->drainCounter = 0; f@0: handle->internalDrain = false; f@0: ResetEvent( handle->condition ); f@0: stream_.state = STREAM_RUNNING; f@0: asioXRun = false; f@0: f@0: unlock: f@0: stopThreadCalled = false; f@0: f@0: if ( result == ASE_OK ) return; f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: } f@0: f@0: void RtApiAsio :: stopStream() f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: AsioHandle *handle = (AsioHandle *) stream_.apiHandle; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: if ( handle->drainCounter == 0 ) { f@0: handle->drainCounter = 2; f@0: WaitForSingleObject( handle->condition, INFINITE ); // block until signaled f@0: } f@0: } f@0: f@0: stream_.state = STREAM_STOPPED; f@0: f@0: ASIOError result = ASIOStop(); f@0: if ( result != ASE_OK ) { f@0: errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device."; f@0: errorText_ = errorStream_.str(); f@0: } f@0: f@0: if ( result == ASE_OK ) return; f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: } f@0: f@0: void RtApiAsio :: abortStream() f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: // The following lines were commented-out because some behavior was f@0: // noted where the device buffers need to be zeroed to avoid f@0: // continuing sound, even when the device buffers are completely f@0: // disposed. So now, calling abort is the same as calling stop. f@0: // AsioHandle *handle = (AsioHandle *) stream_.apiHandle; f@0: // handle->drainCounter = 2; f@0: stopStream(); f@0: } f@0: f@0: // This function will be called by a spawned thread when the user f@0: // callback function signals that the stream should be stopped or f@0: // aborted. It is necessary to handle it this way because the f@0: // callbackEvent() function must return before the ASIOStop() f@0: // function will return. f@0: static unsigned __stdcall asioStopStream( void *ptr ) f@0: { f@0: CallbackInfo *info = (CallbackInfo *) ptr; f@0: RtApiAsio *object = (RtApiAsio *) info->object; f@0: f@0: object->stopStream(); f@0: _endthreadex( 0 ); f@0: return 0; f@0: } f@0: f@0: bool RtApiAsio :: callbackEvent( long bufferIndex ) f@0: { f@0: if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS; f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!"; f@0: error( RtAudioError::WARNING ); f@0: return FAILURE; f@0: } f@0: f@0: CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo; f@0: AsioHandle *handle = (AsioHandle *) stream_.apiHandle; f@0: f@0: // Check if we were draining the stream and signal if finished. f@0: if ( handle->drainCounter > 3 ) { f@0: f@0: stream_.state = STREAM_STOPPING; f@0: if ( handle->internalDrain == false ) f@0: SetEvent( handle->condition ); f@0: else { // spawn a thread to stop the stream f@0: unsigned threadId; f@0: stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream, f@0: &stream_.callbackInfo, 0, &threadId ); f@0: } f@0: return SUCCESS; f@0: } f@0: f@0: // Invoke user callback to get fresh output data UNLESS we are f@0: // draining stream. f@0: if ( handle->drainCounter == 0 ) { f@0: RtAudioCallback callback = (RtAudioCallback) info->callback; f@0: double streamTime = getStreamTime(); f@0: RtAudioStreamStatus status = 0; f@0: if ( stream_.mode != INPUT && asioXRun == true ) { f@0: status |= RTAUDIO_OUTPUT_UNDERFLOW; f@0: asioXRun = false; f@0: } f@0: if ( stream_.mode != OUTPUT && asioXRun == true ) { f@0: status |= RTAUDIO_INPUT_OVERFLOW; f@0: asioXRun = false; f@0: } f@0: int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1], f@0: stream_.bufferSize, streamTime, status, info->userData ); f@0: if ( cbReturnValue == 2 ) { f@0: stream_.state = STREAM_STOPPING; f@0: handle->drainCounter = 2; f@0: unsigned threadId; f@0: stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream, f@0: &stream_.callbackInfo, 0, &threadId ); f@0: return SUCCESS; f@0: } f@0: else if ( cbReturnValue == 1 ) { f@0: handle->drainCounter = 1; f@0: handle->internalDrain = true; f@0: } f@0: } f@0: f@0: unsigned int nChannels, bufferBytes, i, j; f@0: nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1]; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: f@0: bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] ); f@0: f@0: if ( handle->drainCounter > 1 ) { // write zeros to the output stream f@0: f@0: for ( i=0, j=0; ibufferInfos[i].isInput != ASIOTrue ) f@0: memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes ); f@0: } f@0: f@0: } f@0: else if ( stream_.doConvertBuffer[0] ) { f@0: f@0: convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] ); f@0: if ( stream_.doByteSwap[0] ) f@0: byteSwapBuffer( stream_.deviceBuffer, f@0: stream_.bufferSize * stream_.nDeviceChannels[0], f@0: stream_.deviceFormat[0] ); f@0: f@0: for ( i=0, j=0; ibufferInfos[i].isInput != ASIOTrue ) f@0: memcpy( handle->bufferInfos[i].buffers[bufferIndex], f@0: &stream_.deviceBuffer[j++*bufferBytes], bufferBytes ); f@0: } f@0: f@0: } f@0: else { f@0: f@0: if ( stream_.doByteSwap[0] ) f@0: byteSwapBuffer( stream_.userBuffer[0], f@0: stream_.bufferSize * stream_.nUserChannels[0], f@0: stream_.userFormat ); f@0: f@0: for ( i=0, j=0; ibufferInfos[i].isInput != ASIOTrue ) f@0: memcpy( handle->bufferInfos[i].buffers[bufferIndex], f@0: &stream_.userBuffer[0][bufferBytes*j++], bufferBytes ); f@0: } f@0: f@0: } f@0: } f@0: f@0: // Don't bother draining input f@0: if ( handle->drainCounter ) { f@0: handle->drainCounter++; f@0: goto unlock; f@0: } f@0: f@0: if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) { f@0: f@0: bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]); f@0: f@0: if (stream_.doConvertBuffer[1]) { f@0: f@0: // Always interleave ASIO input data. f@0: for ( i=0, j=0; ibufferInfos[i].isInput == ASIOTrue ) f@0: memcpy( &stream_.deviceBuffer[j++*bufferBytes], f@0: handle->bufferInfos[i].buffers[bufferIndex], f@0: bufferBytes ); f@0: } f@0: f@0: if ( stream_.doByteSwap[1] ) f@0: byteSwapBuffer( stream_.deviceBuffer, f@0: stream_.bufferSize * stream_.nDeviceChannels[1], f@0: stream_.deviceFormat[1] ); f@0: convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] ); f@0: f@0: } f@0: else { f@0: for ( i=0, j=0; ibufferInfos[i].isInput == ASIOTrue ) { f@0: memcpy( &stream_.userBuffer[1][bufferBytes*j++], f@0: handle->bufferInfos[i].buffers[bufferIndex], f@0: bufferBytes ); f@0: } f@0: } f@0: f@0: if ( stream_.doByteSwap[1] ) f@0: byteSwapBuffer( stream_.userBuffer[1], f@0: stream_.bufferSize * stream_.nUserChannels[1], f@0: stream_.userFormat ); f@0: } f@0: } f@0: f@0: unlock: f@0: // The following call was suggested by Malte Clasen. While the API f@0: // documentation indicates it should not be required, some device f@0: // drivers apparently do not function correctly without it. f@0: ASIOOutputReady(); f@0: f@0: RtApi::tickStreamTime(); f@0: return SUCCESS; f@0: } f@0: f@0: static void sampleRateChanged( ASIOSampleRate sRate ) f@0: { f@0: // The ASIO documentation says that this usually only happens during f@0: // external sync. Audio processing is not stopped by the driver, f@0: // actual sample rate might not have even changed, maybe only the f@0: // sample rate status of an AES/EBU or S/PDIF digital input at the f@0: // audio device. f@0: f@0: RtApi *object = (RtApi *) asioCallbackInfo->object; f@0: try { f@0: object->stopStream(); f@0: } f@0: catch ( RtAudioError &exception ) { f@0: std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl; f@0: return; f@0: } f@0: f@0: std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl; f@0: } f@0: f@0: static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ ) f@0: { f@0: long ret = 0; f@0: f@0: switch( selector ) { f@0: case kAsioSelectorSupported: f@0: if ( value == kAsioResetRequest f@0: || value == kAsioEngineVersion f@0: || value == kAsioResyncRequest f@0: || value == kAsioLatenciesChanged f@0: // The following three were added for ASIO 2.0, you don't f@0: // necessarily have to support them. f@0: || value == kAsioSupportsTimeInfo f@0: || value == kAsioSupportsTimeCode f@0: || value == kAsioSupportsInputMonitor) f@0: ret = 1L; f@0: break; f@0: case kAsioResetRequest: f@0: // Defer the task and perform the reset of the driver during the f@0: // next "safe" situation. You cannot reset the driver right now, f@0: // as this code is called from the driver. Reset the driver is f@0: // done by completely destruct is. I.e. ASIOStop(), f@0: // ASIODisposeBuffers(), Destruction Afterwards you initialize the f@0: // driver again. f@0: std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl; f@0: ret = 1L; f@0: break; f@0: case kAsioResyncRequest: f@0: // This informs the application that the driver encountered some f@0: // non-fatal data loss. It is used for synchronization purposes f@0: // of different media. Added mainly to work around the Win16Mutex f@0: // problems in Windows 95/98 with the Windows Multimedia system, f@0: // which could lose data because the Mutex was held too long by f@0: // another thread. However a driver can issue it in other f@0: // situations, too. f@0: // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl; f@0: asioXRun = true; f@0: ret = 1L; f@0: break; f@0: case kAsioLatenciesChanged: f@0: // This will inform the host application that the drivers were f@0: // latencies changed. Beware, it this does not mean that the f@0: // buffer sizes have changed! You might need to update internal f@0: // delay data. f@0: std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl; f@0: ret = 1L; f@0: break; f@0: case kAsioEngineVersion: f@0: // Return the supported ASIO version of the host application. If f@0: // a host application does not implement this selector, ASIO 1.0 f@0: // is assumed by the driver. f@0: ret = 2L; f@0: break; f@0: case kAsioSupportsTimeInfo: f@0: // Informs the driver whether the f@0: // asioCallbacks.bufferSwitchTimeInfo() callback is supported. f@0: // For compatibility with ASIO 1.0 drivers the host application f@0: // should always support the "old" bufferSwitch method, too. f@0: ret = 0; f@0: break; f@0: case kAsioSupportsTimeCode: f@0: // Informs the driver whether application is interested in time f@0: // code info. If an application does not need to know about time f@0: // code, the driver has less work to do. f@0: ret = 0; f@0: break; f@0: } f@0: return ret; f@0: } f@0: f@0: static const char* getAsioErrorString( ASIOError result ) f@0: { f@0: struct Messages f@0: { f@0: ASIOError value; f@0: const char*message; f@0: }; f@0: f@0: static const Messages m[] = f@0: { f@0: { ASE_NotPresent, "Hardware input or output is not present or available." }, f@0: { ASE_HWMalfunction, "Hardware is malfunctioning." }, f@0: { ASE_InvalidParameter, "Invalid input parameter." }, f@0: { ASE_InvalidMode, "Invalid mode." }, f@0: { ASE_SPNotAdvancing, "Sample position not advancing." }, f@0: { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." }, f@0: { ASE_NoMemory, "Not enough memory to complete the request." } f@0: }; f@0: f@0: for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i ) f@0: if ( m[i].value == result ) return m[i].message; f@0: f@0: return "Unknown error."; f@0: } f@0: f@0: //******************** End of __WINDOWS_ASIO__ *********************// f@0: #endif f@0: f@0: f@0: #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API f@0: f@0: // Authored by Marcus Tomlinson , April 2014 f@0: // - Introduces support for the Windows WASAPI API f@0: // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required f@0: // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface f@0: // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user f@0: f@0: #ifndef INITGUID f@0: #define INITGUID f@0: #endif f@0: #include f@0: #include f@0: #include f@0: #include f@0: f@0: //============================================================================= f@0: f@0: #define SAFE_RELEASE( objectPtr )\ f@0: if ( objectPtr )\ f@0: {\ f@0: objectPtr->Release();\ f@0: objectPtr = NULL;\ f@0: } f@0: f@0: typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex ); f@0: f@0: //----------------------------------------------------------------------------- f@0: f@0: // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size. f@0: // Therefore we must perform all necessary conversions to user buffers in order to satisfy these f@0: // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to f@0: // provide intermediate storage for read / write synchronization. f@0: class WasapiBuffer f@0: { f@0: public: f@0: WasapiBuffer() f@0: : buffer_( NULL ), f@0: bufferSize_( 0 ), f@0: inIndex_( 0 ), f@0: outIndex_( 0 ) {} f@0: f@0: ~WasapiBuffer() { f@0: delete buffer_; f@0: } f@0: f@0: // sets the length of the internal ring buffer f@0: void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) { f@0: delete buffer_; f@0: f@0: buffer_ = ( char* ) calloc( bufferSize, formatBytes ); f@0: f@0: bufferSize_ = bufferSize; f@0: inIndex_ = 0; f@0: outIndex_ = 0; f@0: } f@0: f@0: // attempt to push a buffer into the ring buffer at the current "in" index f@0: bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format ) f@0: { f@0: if ( !buffer || // incoming buffer is NULL f@0: bufferSize == 0 || // incoming buffer has no data f@0: bufferSize > bufferSize_ ) // incoming buffer too large f@0: { f@0: return false; f@0: } f@0: f@0: unsigned int relOutIndex = outIndex_; f@0: unsigned int inIndexEnd = inIndex_ + bufferSize; f@0: if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) { f@0: relOutIndex += bufferSize_; f@0: } f@0: f@0: // "in" index can end on the "out" index but cannot begin at it f@0: if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) { f@0: return false; // not enough space between "in" index and "out" index f@0: } f@0: f@0: // copy buffer from external to internal f@0: int fromZeroSize = inIndex_ + bufferSize - bufferSize_; f@0: fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize; f@0: int fromInSize = bufferSize - fromZeroSize; f@0: f@0: switch( format ) f@0: { f@0: case RTAUDIO_SINT8: f@0: memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) ); f@0: memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) ); f@0: break; f@0: case RTAUDIO_SINT16: f@0: memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) ); f@0: memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) ); f@0: break; f@0: case RTAUDIO_SINT24: f@0: memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) ); f@0: memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) ); f@0: break; f@0: case RTAUDIO_SINT32: f@0: memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) ); f@0: memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) ); f@0: break; f@0: case RTAUDIO_FLOAT32: f@0: memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) ); f@0: memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) ); f@0: break; f@0: case RTAUDIO_FLOAT64: f@0: memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) ); f@0: memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) ); f@0: break; f@0: } f@0: f@0: // update "in" index f@0: inIndex_ += bufferSize; f@0: inIndex_ %= bufferSize_; f@0: f@0: return true; f@0: } f@0: f@0: // attempt to pull a buffer from the ring buffer from the current "out" index f@0: bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format ) f@0: { f@0: if ( !buffer || // incoming buffer is NULL f@0: bufferSize == 0 || // incoming buffer has no data f@0: bufferSize > bufferSize_ ) // incoming buffer too large f@0: { f@0: return false; f@0: } f@0: f@0: unsigned int relInIndex = inIndex_; f@0: unsigned int outIndexEnd = outIndex_ + bufferSize; f@0: if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) { f@0: relInIndex += bufferSize_; f@0: } f@0: f@0: // "out" index can begin at and end on the "in" index f@0: if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) { f@0: return false; // not enough space between "out" index and "in" index f@0: } f@0: f@0: // copy buffer from internal to external f@0: int fromZeroSize = outIndex_ + bufferSize - bufferSize_; f@0: fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize; f@0: int fromOutSize = bufferSize - fromZeroSize; f@0: f@0: switch( format ) f@0: { f@0: case RTAUDIO_SINT8: f@0: memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) ); f@0: memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) ); f@0: break; f@0: case RTAUDIO_SINT16: f@0: memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) ); f@0: memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) ); f@0: break; f@0: case RTAUDIO_SINT24: f@0: memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) ); f@0: memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) ); f@0: break; f@0: case RTAUDIO_SINT32: f@0: memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) ); f@0: memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) ); f@0: break; f@0: case RTAUDIO_FLOAT32: f@0: memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) ); f@0: memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) ); f@0: break; f@0: case RTAUDIO_FLOAT64: f@0: memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) ); f@0: memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) ); f@0: break; f@0: } f@0: f@0: // update "out" index f@0: outIndex_ += bufferSize; f@0: outIndex_ %= bufferSize_; f@0: f@0: return true; f@0: } f@0: f@0: private: f@0: char* buffer_; f@0: unsigned int bufferSize_; f@0: unsigned int inIndex_; f@0: unsigned int outIndex_; f@0: }; f@0: f@0: //----------------------------------------------------------------------------- f@0: f@0: // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate f@0: // between HW and the user. The convertBufferWasapi function is used to perform this conversion f@0: // between HwIn->UserIn and UserOut->HwOut during the stream callback loop. f@0: // This sample rate converter favors speed over quality, and works best with conversions between f@0: // one rate and its multiple. f@0: void convertBufferWasapi( char* outBuffer, f@0: const char* inBuffer, f@0: const unsigned int& channelCount, f@0: const unsigned int& inSampleRate, f@0: const unsigned int& outSampleRate, f@0: const unsigned int& inSampleCount, f@0: unsigned int& outSampleCount, f@0: const RtAudioFormat& format ) f@0: { f@0: // calculate the new outSampleCount and relative sampleStep f@0: float sampleRatio = ( float ) outSampleRate / inSampleRate; f@0: float sampleStep = 1.0f / sampleRatio; f@0: float inSampleFraction = 0.0f; f@0: f@0: outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio ); f@0: f@0: // frame-by-frame, copy each relative input sample into it's corresponding output sample f@0: for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ ) f@0: { f@0: unsigned int inSample = ( unsigned int ) inSampleFraction; f@0: f@0: switch ( format ) f@0: { f@0: case RTAUDIO_SINT8: f@0: memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) ); f@0: break; f@0: case RTAUDIO_SINT16: f@0: memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) ); f@0: break; f@0: case RTAUDIO_SINT24: f@0: memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) ); f@0: break; f@0: case RTAUDIO_SINT32: f@0: memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) ); f@0: break; f@0: case RTAUDIO_FLOAT32: f@0: memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) ); f@0: break; f@0: case RTAUDIO_FLOAT64: f@0: memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) ); f@0: break; f@0: } f@0: f@0: // jump to next in sample f@0: inSampleFraction += sampleStep; f@0: } f@0: } f@0: f@0: //----------------------------------------------------------------------------- f@0: f@0: // A structure to hold various information related to the WASAPI implementation. f@0: struct WasapiHandle f@0: { f@0: IAudioClient* captureAudioClient; f@0: IAudioClient* renderAudioClient; f@0: IAudioCaptureClient* captureClient; f@0: IAudioRenderClient* renderClient; f@0: HANDLE captureEvent; f@0: HANDLE renderEvent; f@0: f@0: WasapiHandle() f@0: : captureAudioClient( NULL ), f@0: renderAudioClient( NULL ), f@0: captureClient( NULL ), f@0: renderClient( NULL ), f@0: captureEvent( NULL ), f@0: renderEvent( NULL ) {} f@0: }; f@0: f@0: //============================================================================= f@0: f@0: RtApiWasapi::RtApiWasapi() f@0: : coInitialized_( false ), deviceEnumerator_( NULL ) f@0: { f@0: // WASAPI can run either apartment or multi-threaded f@0: HRESULT hr = CoInitialize( NULL ); f@0: if ( !FAILED( hr ) ) f@0: coInitialized_ = true; f@0: f@0: // Instantiate device enumerator f@0: hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL, f@0: CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ), f@0: ( void** ) &deviceEnumerator_ ); f@0: f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator"; f@0: error( RtAudioError::DRIVER_ERROR ); f@0: } f@0: } f@0: f@0: //----------------------------------------------------------------------------- f@0: f@0: RtApiWasapi::~RtApiWasapi() f@0: { f@0: if ( stream_.state != STREAM_CLOSED ) f@0: closeStream(); f@0: f@0: SAFE_RELEASE( deviceEnumerator_ ); f@0: f@0: // If this object previously called CoInitialize() f@0: if ( coInitialized_ ) f@0: CoUninitialize(); f@0: } f@0: f@0: //============================================================================= f@0: f@0: unsigned int RtApiWasapi::getDeviceCount( void ) f@0: { f@0: unsigned int captureDeviceCount = 0; f@0: unsigned int renderDeviceCount = 0; f@0: f@0: IMMDeviceCollection* captureDevices = NULL; f@0: IMMDeviceCollection* renderDevices = NULL; f@0: f@0: // Count capture devices f@0: errorText_.clear(); f@0: HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection."; f@0: goto Exit; f@0: } f@0: f@0: hr = captureDevices->GetCount( &captureDeviceCount ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count."; f@0: goto Exit; f@0: } f@0: f@0: // Count render devices f@0: hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection."; f@0: goto Exit; f@0: } f@0: f@0: hr = renderDevices->GetCount( &renderDeviceCount ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count."; f@0: goto Exit; f@0: } f@0: f@0: Exit: f@0: // release all references f@0: SAFE_RELEASE( captureDevices ); f@0: SAFE_RELEASE( renderDevices ); f@0: f@0: if ( errorText_.empty() ) f@0: return captureDeviceCount + renderDeviceCount; f@0: f@0: error( RtAudioError::DRIVER_ERROR ); f@0: return 0; f@0: } f@0: f@0: //----------------------------------------------------------------------------- f@0: f@0: RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device ) f@0: { f@0: RtAudio::DeviceInfo info; f@0: unsigned int captureDeviceCount = 0; f@0: unsigned int renderDeviceCount = 0; f@0: std::wstring deviceName; f@0: std::string defaultDeviceName; f@0: bool isCaptureDevice = false; f@0: f@0: PROPVARIANT deviceNameProp; f@0: PROPVARIANT defaultDeviceNameProp; f@0: f@0: IMMDeviceCollection* captureDevices = NULL; f@0: IMMDeviceCollection* renderDevices = NULL; f@0: IMMDevice* devicePtr = NULL; f@0: IMMDevice* defaultDevicePtr = NULL; f@0: IAudioClient* audioClient = NULL; f@0: IPropertyStore* devicePropStore = NULL; f@0: IPropertyStore* defaultDevicePropStore = NULL; f@0: f@0: WAVEFORMATEX* deviceFormat = NULL; f@0: WAVEFORMATEX* closestMatchFormat = NULL; f@0: f@0: // probed f@0: info.probed = false; f@0: f@0: // Count capture devices f@0: errorText_.clear(); f@0: RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR; f@0: HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection."; f@0: goto Exit; f@0: } f@0: f@0: hr = captureDevices->GetCount( &captureDeviceCount ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count."; f@0: goto Exit; f@0: } f@0: f@0: // Count render devices f@0: hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection."; f@0: goto Exit; f@0: } f@0: f@0: hr = renderDevices->GetCount( &renderDeviceCount ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count."; f@0: goto Exit; f@0: } f@0: f@0: // validate device index f@0: if ( device >= captureDeviceCount + renderDeviceCount ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index."; f@0: errorType = RtAudioError::INVALID_USE; f@0: goto Exit; f@0: } f@0: f@0: // determine whether index falls within capture or render devices f@0: if ( device >= renderDeviceCount ) { f@0: hr = captureDevices->Item( device - renderDeviceCount, &devicePtr ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle."; f@0: goto Exit; f@0: } f@0: isCaptureDevice = true; f@0: } f@0: else { f@0: hr = renderDevices->Item( device, &devicePtr ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle."; f@0: goto Exit; f@0: } f@0: isCaptureDevice = false; f@0: } f@0: f@0: // get default device name f@0: if ( isCaptureDevice ) { f@0: hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle."; f@0: goto Exit; f@0: } f@0: } f@0: else { f@0: hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle."; f@0: goto Exit; f@0: } f@0: } f@0: f@0: hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store."; f@0: goto Exit; f@0: } f@0: PropVariantInit( &defaultDeviceNameProp ); f@0: f@0: hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName."; f@0: goto Exit; f@0: } f@0: f@0: deviceName = defaultDeviceNameProp.pwszVal; f@0: defaultDeviceName = std::string( deviceName.begin(), deviceName.end() ); f@0: f@0: // name f@0: hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store."; f@0: goto Exit; f@0: } f@0: f@0: PropVariantInit( &deviceNameProp ); f@0: f@0: hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName."; f@0: goto Exit; f@0: } f@0: f@0: deviceName = deviceNameProp.pwszVal; f@0: info.name = std::string( deviceName.begin(), deviceName.end() ); f@0: f@0: // is default f@0: if ( isCaptureDevice ) { f@0: info.isDefaultInput = info.name == defaultDeviceName; f@0: info.isDefaultOutput = false; f@0: } f@0: else { f@0: info.isDefaultInput = false; f@0: info.isDefaultOutput = info.name == defaultDeviceName; f@0: } f@0: f@0: // channel count f@0: hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client."; f@0: goto Exit; f@0: } f@0: f@0: hr = audioClient->GetMixFormat( &deviceFormat ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format."; f@0: goto Exit; f@0: } f@0: f@0: if ( isCaptureDevice ) { f@0: info.inputChannels = deviceFormat->nChannels; f@0: info.outputChannels = 0; f@0: info.duplexChannels = 0; f@0: } f@0: else { f@0: info.inputChannels = 0; f@0: info.outputChannels = deviceFormat->nChannels; f@0: info.duplexChannels = 0; f@0: } f@0: f@0: // sample rates f@0: info.sampleRates.clear(); f@0: f@0: // allow support for all sample rates as we have a built-in sample rate converter f@0: for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) { f@0: info.sampleRates.push_back( SAMPLE_RATES[i] ); f@0: } f@0: f@0: // native format f@0: info.nativeFormats = 0; f@0: f@0: if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT || f@0: ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE && f@0: ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) ) f@0: { f@0: if ( deviceFormat->wBitsPerSample == 32 ) { f@0: info.nativeFormats |= RTAUDIO_FLOAT32; f@0: } f@0: else if ( deviceFormat->wBitsPerSample == 64 ) { f@0: info.nativeFormats |= RTAUDIO_FLOAT64; f@0: } f@0: } f@0: else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM || f@0: ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE && f@0: ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) ) f@0: { f@0: if ( deviceFormat->wBitsPerSample == 8 ) { f@0: info.nativeFormats |= RTAUDIO_SINT8; f@0: } f@0: else if ( deviceFormat->wBitsPerSample == 16 ) { f@0: info.nativeFormats |= RTAUDIO_SINT16; f@0: } f@0: else if ( deviceFormat->wBitsPerSample == 24 ) { f@0: info.nativeFormats |= RTAUDIO_SINT24; f@0: } f@0: else if ( deviceFormat->wBitsPerSample == 32 ) { f@0: info.nativeFormats |= RTAUDIO_SINT32; f@0: } f@0: } f@0: f@0: // probed f@0: info.probed = true; f@0: f@0: Exit: f@0: // release all references f@0: PropVariantClear( &deviceNameProp ); f@0: PropVariantClear( &defaultDeviceNameProp ); f@0: f@0: SAFE_RELEASE( captureDevices ); f@0: SAFE_RELEASE( renderDevices ); f@0: SAFE_RELEASE( devicePtr ); f@0: SAFE_RELEASE( defaultDevicePtr ); f@0: SAFE_RELEASE( audioClient ); f@0: SAFE_RELEASE( devicePropStore ); f@0: SAFE_RELEASE( defaultDevicePropStore ); f@0: f@0: CoTaskMemFree( deviceFormat ); f@0: CoTaskMemFree( closestMatchFormat ); f@0: f@0: if ( !errorText_.empty() ) f@0: error( errorType ); f@0: return info; f@0: } f@0: f@0: //----------------------------------------------------------------------------- f@0: f@0: unsigned int RtApiWasapi::getDefaultOutputDevice( void ) f@0: { f@0: for ( unsigned int i = 0; i < getDeviceCount(); i++ ) { f@0: if ( getDeviceInfo( i ).isDefaultOutput ) { f@0: return i; f@0: } f@0: } f@0: f@0: return 0; f@0: } f@0: f@0: //----------------------------------------------------------------------------- f@0: f@0: unsigned int RtApiWasapi::getDefaultInputDevice( void ) f@0: { f@0: for ( unsigned int i = 0; i < getDeviceCount(); i++ ) { f@0: if ( getDeviceInfo( i ).isDefaultInput ) { f@0: return i; f@0: } f@0: } f@0: f@0: return 0; f@0: } f@0: f@0: //----------------------------------------------------------------------------- f@0: f@0: void RtApiWasapi::closeStream( void ) f@0: { f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiWasapi::closeStream: No open stream to close."; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: if ( stream_.state != STREAM_STOPPED ) f@0: stopStream(); f@0: f@0: // clean up stream memory f@0: SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) f@0: SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) f@0: f@0: SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient ) f@0: SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient ) f@0: f@0: if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent ) f@0: CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent ); f@0: f@0: if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent ) f@0: CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent ); f@0: f@0: delete ( WasapiHandle* ) stream_.apiHandle; f@0: stream_.apiHandle = NULL; f@0: f@0: for ( int i = 0; i < 2; i++ ) { f@0: if ( stream_.userBuffer[i] ) { f@0: free( stream_.userBuffer[i] ); f@0: stream_.userBuffer[i] = 0; f@0: } f@0: } f@0: f@0: if ( stream_.deviceBuffer ) { f@0: free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = 0; f@0: } f@0: f@0: // update stream state f@0: stream_.state = STREAM_CLOSED; f@0: } f@0: f@0: //----------------------------------------------------------------------------- f@0: f@0: void RtApiWasapi::startStream( void ) f@0: { f@0: verifyStream(); f@0: f@0: if ( stream_.state == STREAM_RUNNING ) { f@0: errorText_ = "RtApiWasapi::startStream: The stream is already running."; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: // update stream state f@0: stream_.state = STREAM_RUNNING; f@0: f@0: // create WASAPI stream thread f@0: stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL ); f@0: f@0: if ( !stream_.callbackInfo.thread ) { f@0: errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread."; f@0: error( RtAudioError::THREAD_ERROR ); f@0: } f@0: else { f@0: SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority ); f@0: ResumeThread( ( void* ) stream_.callbackInfo.thread ); f@0: } f@0: } f@0: f@0: //----------------------------------------------------------------------------- f@0: f@0: void RtApiWasapi::stopStream( void ) f@0: { f@0: verifyStream(); f@0: f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiWasapi::stopStream: The stream is already stopped."; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: // inform stream thread by setting stream state to STREAM_STOPPING f@0: stream_.state = STREAM_STOPPING; f@0: f@0: // wait until stream thread is stopped f@0: while( stream_.state != STREAM_STOPPED ) { f@0: Sleep( 1 ); f@0: } f@0: f@0: // Wait for the last buffer to play before stopping. f@0: Sleep( 1000 * stream_.bufferSize / stream_.sampleRate ); f@0: f@0: // stop capture client if applicable f@0: if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) { f@0: HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop(); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream."; f@0: error( RtAudioError::DRIVER_ERROR ); f@0: return; f@0: } f@0: } f@0: f@0: // stop render client if applicable f@0: if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) { f@0: HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop(); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream."; f@0: error( RtAudioError::DRIVER_ERROR ); f@0: return; f@0: } f@0: } f@0: f@0: // close thread handle f@0: if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) { f@0: errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread."; f@0: error( RtAudioError::THREAD_ERROR ); f@0: return; f@0: } f@0: f@0: stream_.callbackInfo.thread = (ThreadHandle) NULL; f@0: } f@0: f@0: //----------------------------------------------------------------------------- f@0: f@0: void RtApiWasapi::abortStream( void ) f@0: { f@0: verifyStream(); f@0: f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiWasapi::abortStream: The stream is already stopped."; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: // inform stream thread by setting stream state to STREAM_STOPPING f@0: stream_.state = STREAM_STOPPING; f@0: f@0: // wait until stream thread is stopped f@0: while ( stream_.state != STREAM_STOPPED ) { f@0: Sleep( 1 ); f@0: } f@0: f@0: // stop capture client if applicable f@0: if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) { f@0: HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop(); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream."; f@0: error( RtAudioError::DRIVER_ERROR ); f@0: return; f@0: } f@0: } f@0: f@0: // stop render client if applicable f@0: if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) { f@0: HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop(); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream."; f@0: error( RtAudioError::DRIVER_ERROR ); f@0: return; f@0: } f@0: } f@0: f@0: // close thread handle f@0: if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) { f@0: errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread."; f@0: error( RtAudioError::THREAD_ERROR ); f@0: return; f@0: } f@0: f@0: stream_.callbackInfo.thread = (ThreadHandle) NULL; f@0: } f@0: f@0: //----------------------------------------------------------------------------- f@0: f@0: bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels, f@0: unsigned int firstChannel, unsigned int sampleRate, f@0: RtAudioFormat format, unsigned int* bufferSize, f@0: RtAudio::StreamOptions* options ) f@0: { f@0: bool methodResult = FAILURE; f@0: unsigned int captureDeviceCount = 0; f@0: unsigned int renderDeviceCount = 0; f@0: f@0: IMMDeviceCollection* captureDevices = NULL; f@0: IMMDeviceCollection* renderDevices = NULL; f@0: IMMDevice* devicePtr = NULL; f@0: WAVEFORMATEX* deviceFormat = NULL; f@0: unsigned int bufferBytes; f@0: stream_.state = STREAM_STOPPED; f@0: f@0: // create API Handle if not already created f@0: if ( !stream_.apiHandle ) f@0: stream_.apiHandle = ( void* ) new WasapiHandle(); f@0: f@0: // Count capture devices f@0: errorText_.clear(); f@0: RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR; f@0: HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection."; f@0: goto Exit; f@0: } f@0: f@0: hr = captureDevices->GetCount( &captureDeviceCount ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count."; f@0: goto Exit; f@0: } f@0: f@0: // Count render devices f@0: hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection."; f@0: goto Exit; f@0: } f@0: f@0: hr = renderDevices->GetCount( &renderDeviceCount ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count."; f@0: goto Exit; f@0: } f@0: f@0: // validate device index f@0: if ( device >= captureDeviceCount + renderDeviceCount ) { f@0: errorType = RtAudioError::INVALID_USE; f@0: errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index."; f@0: goto Exit; f@0: } f@0: f@0: // determine whether index falls within capture or render devices f@0: if ( device >= renderDeviceCount ) { f@0: if ( mode != INPUT ) { f@0: errorType = RtAudioError::INVALID_USE; f@0: errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device."; f@0: goto Exit; f@0: } f@0: f@0: // retrieve captureAudioClient from devicePtr f@0: IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient; f@0: f@0: hr = captureDevices->Item( device - renderDeviceCount, &devicePtr ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle."; f@0: goto Exit; f@0: } f@0: f@0: hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, f@0: NULL, ( void** ) &captureAudioClient ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client."; f@0: goto Exit; f@0: } f@0: f@0: hr = captureAudioClient->GetMixFormat( &deviceFormat ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format."; f@0: goto Exit; f@0: } f@0: f@0: stream_.nDeviceChannels[mode] = deviceFormat->nChannels; f@0: captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] ); f@0: } f@0: else { f@0: if ( mode != OUTPUT ) { f@0: errorType = RtAudioError::INVALID_USE; f@0: errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device."; f@0: goto Exit; f@0: } f@0: f@0: // retrieve renderAudioClient from devicePtr f@0: IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient; f@0: f@0: hr = renderDevices->Item( device, &devicePtr ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle."; f@0: goto Exit; f@0: } f@0: f@0: hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, f@0: NULL, ( void** ) &renderAudioClient ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client."; f@0: goto Exit; f@0: } f@0: f@0: hr = renderAudioClient->GetMixFormat( &deviceFormat ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format."; f@0: goto Exit; f@0: } f@0: f@0: stream_.nDeviceChannels[mode] = deviceFormat->nChannels; f@0: renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] ); f@0: } f@0: f@0: // fill stream data f@0: if ( ( stream_.mode == OUTPUT && mode == INPUT ) || f@0: ( stream_.mode == INPUT && mode == OUTPUT ) ) { f@0: stream_.mode = DUPLEX; f@0: } f@0: else { f@0: stream_.mode = mode; f@0: } f@0: f@0: stream_.device[mode] = device; f@0: stream_.doByteSwap[mode] = false; f@0: stream_.sampleRate = sampleRate; f@0: stream_.bufferSize = *bufferSize; f@0: stream_.nBuffers = 1; f@0: stream_.nUserChannels[mode] = channels; f@0: stream_.channelOffset[mode] = firstChannel; f@0: stream_.userFormat = format; f@0: stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats; f@0: f@0: if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) f@0: stream_.userInterleaved = false; f@0: else f@0: stream_.userInterleaved = true; f@0: stream_.deviceInterleaved[mode] = true; f@0: f@0: // Set flags for buffer conversion. f@0: stream_.doConvertBuffer[mode] = false; f@0: if ( stream_.userFormat != stream_.deviceFormat[mode] || f@0: stream_.nUserChannels != stream_.nDeviceChannels ) f@0: stream_.doConvertBuffer[mode] = true; f@0: else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] && f@0: stream_.nUserChannels[mode] > 1 ) f@0: stream_.doConvertBuffer[mode] = true; f@0: f@0: if ( stream_.doConvertBuffer[mode] ) f@0: setConvertInfo( mode, 0 ); f@0: f@0: // Allocate necessary internal buffers f@0: bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat ); f@0: f@0: stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 ); f@0: if ( !stream_.userBuffer[mode] ) { f@0: errorType = RtAudioError::MEMORY_ERROR; f@0: errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory."; f@0: goto Exit; f@0: } f@0: f@0: if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) f@0: stream_.callbackInfo.priority = 15; f@0: else f@0: stream_.callbackInfo.priority = 0; f@0: f@0: ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback f@0: ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode f@0: f@0: methodResult = SUCCESS; f@0: f@0: Exit: f@0: //clean up f@0: SAFE_RELEASE( captureDevices ); f@0: SAFE_RELEASE( renderDevices ); f@0: SAFE_RELEASE( devicePtr ); f@0: CoTaskMemFree( deviceFormat ); f@0: f@0: // if method failed, close the stream f@0: if ( methodResult == FAILURE ) f@0: closeStream(); f@0: f@0: if ( !errorText_.empty() ) f@0: error( errorType ); f@0: return methodResult; f@0: } f@0: f@0: //============================================================================= f@0: f@0: DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr ) f@0: { f@0: if ( wasapiPtr ) f@0: ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread(); f@0: f@0: return 0; f@0: } f@0: f@0: DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr ) f@0: { f@0: if ( wasapiPtr ) f@0: ( ( RtApiWasapi* ) wasapiPtr )->stopStream(); f@0: f@0: return 0; f@0: } f@0: f@0: DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr ) f@0: { f@0: if ( wasapiPtr ) f@0: ( ( RtApiWasapi* ) wasapiPtr )->abortStream(); f@0: f@0: return 0; f@0: } f@0: f@0: //----------------------------------------------------------------------------- f@0: f@0: void RtApiWasapi::wasapiThread() f@0: { f@0: // as this is a new thread, we must CoInitialize it f@0: CoInitialize( NULL ); f@0: f@0: HRESULT hr; f@0: f@0: IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient; f@0: IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient; f@0: IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient; f@0: IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient; f@0: HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent; f@0: HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent; f@0: f@0: WAVEFORMATEX* captureFormat = NULL; f@0: WAVEFORMATEX* renderFormat = NULL; f@0: float captureSrRatio = 0.0f; f@0: float renderSrRatio = 0.0f; f@0: WasapiBuffer captureBuffer; f@0: WasapiBuffer renderBuffer; f@0: f@0: // declare local stream variables f@0: RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback; f@0: BYTE* streamBuffer = NULL; f@0: unsigned long captureFlags = 0; f@0: unsigned int bufferFrameCount = 0; f@0: unsigned int numFramesPadding = 0; f@0: unsigned int convBufferSize = 0; f@0: bool callbackPushed = false; f@0: bool callbackPulled = false; f@0: bool callbackStopped = false; f@0: int callbackResult = 0; f@0: f@0: // convBuffer is used to store converted buffers between WASAPI and the user f@0: char* convBuffer = NULL; f@0: unsigned int convBuffSize = 0; f@0: unsigned int deviceBuffSize = 0; f@0: f@0: errorText_.clear(); f@0: RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR; f@0: f@0: // Attempt to assign "Pro Audio" characteristic to thread f@0: HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" ); f@0: if ( AvrtDll ) { f@0: DWORD taskIndex = 0; f@0: TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" ); f@0: AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex ); f@0: FreeLibrary( AvrtDll ); f@0: } f@0: f@0: // start capture stream if applicable f@0: if ( captureAudioClient ) { f@0: hr = captureAudioClient->GetMixFormat( &captureFormat ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format."; f@0: goto Exit; f@0: } f@0: f@0: captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate ); f@0: f@0: // initialize capture stream according to desire buffer size f@0: float desiredBufferSize = stream_.bufferSize * captureSrRatio; f@0: REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec ); f@0: f@0: if ( !captureClient ) { f@0: hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, f@0: AUDCLNT_STREAMFLAGS_EVENTCALLBACK, f@0: desiredBufferPeriod, f@0: desiredBufferPeriod, f@0: captureFormat, f@0: NULL ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client."; f@0: goto Exit; f@0: } f@0: f@0: hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ), f@0: ( void** ) &captureClient ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle."; f@0: goto Exit; f@0: } f@0: f@0: // configure captureEvent to trigger on every available capture buffer f@0: captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL ); f@0: if ( !captureEvent ) { f@0: errorType = RtAudioError::SYSTEM_ERROR; f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event."; f@0: goto Exit; f@0: } f@0: f@0: hr = captureAudioClient->SetEventHandle( captureEvent ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle."; f@0: goto Exit; f@0: } f@0: f@0: ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient; f@0: ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent; f@0: } f@0: f@0: unsigned int inBufferSize = 0; f@0: hr = captureAudioClient->GetBufferSize( &inBufferSize ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size."; f@0: goto Exit; f@0: } f@0: f@0: // scale outBufferSize according to stream->user sample rate ratio f@0: unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT]; f@0: inBufferSize *= stream_.nDeviceChannels[INPUT]; f@0: f@0: // set captureBuffer size f@0: captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) ); f@0: f@0: // reset the capture stream f@0: hr = captureAudioClient->Reset(); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream."; f@0: goto Exit; f@0: } f@0: f@0: // start the capture stream f@0: hr = captureAudioClient->Start(); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream."; f@0: goto Exit; f@0: } f@0: } f@0: f@0: // start render stream if applicable f@0: if ( renderAudioClient ) { f@0: hr = renderAudioClient->GetMixFormat( &renderFormat ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format."; f@0: goto Exit; f@0: } f@0: f@0: renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate ); f@0: f@0: // initialize render stream according to desire buffer size f@0: float desiredBufferSize = stream_.bufferSize * renderSrRatio; f@0: REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec ); f@0: f@0: if ( !renderClient ) { f@0: hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, f@0: AUDCLNT_STREAMFLAGS_EVENTCALLBACK, f@0: desiredBufferPeriod, f@0: desiredBufferPeriod, f@0: renderFormat, f@0: NULL ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client."; f@0: goto Exit; f@0: } f@0: f@0: hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ), f@0: ( void** ) &renderClient ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle."; f@0: goto Exit; f@0: } f@0: f@0: // configure renderEvent to trigger on every available render buffer f@0: renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL ); f@0: if ( !renderEvent ) { f@0: errorType = RtAudioError::SYSTEM_ERROR; f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event."; f@0: goto Exit; f@0: } f@0: f@0: hr = renderAudioClient->SetEventHandle( renderEvent ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle."; f@0: goto Exit; f@0: } f@0: f@0: ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient; f@0: ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent; f@0: } f@0: f@0: unsigned int outBufferSize = 0; f@0: hr = renderAudioClient->GetBufferSize( &outBufferSize ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size."; f@0: goto Exit; f@0: } f@0: f@0: // scale inBufferSize according to user->stream sample rate ratio f@0: unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT]; f@0: outBufferSize *= stream_.nDeviceChannels[OUTPUT]; f@0: f@0: // set renderBuffer size f@0: renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) ); f@0: f@0: // reset the render stream f@0: hr = renderAudioClient->Reset(); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream."; f@0: goto Exit; f@0: } f@0: f@0: // start the render stream f@0: hr = renderAudioClient->Start(); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream."; f@0: goto Exit; f@0: } f@0: } f@0: f@0: if ( stream_.mode == INPUT ) { f@0: convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ); f@0: deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ); f@0: } f@0: else if ( stream_.mode == OUTPUT ) { f@0: convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ); f@0: deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ); f@0: } f@0: else if ( stream_.mode == DUPLEX ) { f@0: convBuffSize = max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ), f@0: ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) ); f@0: deviceBuffSize = max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ), f@0: stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) ); f@0: } f@0: f@0: convBuffer = ( char* ) malloc( convBuffSize ); f@0: stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize ); f@0: if ( !convBuffer || !stream_.deviceBuffer ) { f@0: errorType = RtAudioError::MEMORY_ERROR; f@0: errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory."; f@0: goto Exit; f@0: } f@0: f@0: // stream process loop f@0: while ( stream_.state != STREAM_STOPPING ) { f@0: if ( !callbackPulled ) { f@0: // Callback Input f@0: // ============== f@0: // 1. Pull callback buffer from inputBuffer f@0: // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count f@0: // Convert callback buffer to user format f@0: f@0: if ( captureAudioClient ) { f@0: // Pull callback buffer from inputBuffer f@0: callbackPulled = captureBuffer.pullBuffer( convBuffer, f@0: ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT], f@0: stream_.deviceFormat[INPUT] ); f@0: f@0: if ( callbackPulled ) { f@0: // Convert callback buffer to user sample rate f@0: convertBufferWasapi( stream_.deviceBuffer, f@0: convBuffer, f@0: stream_.nDeviceChannels[INPUT], f@0: captureFormat->nSamplesPerSec, f@0: stream_.sampleRate, f@0: ( unsigned int ) ( stream_.bufferSize * captureSrRatio ), f@0: convBufferSize, f@0: stream_.deviceFormat[INPUT] ); f@0: f@0: if ( stream_.doConvertBuffer[INPUT] ) { f@0: // Convert callback buffer to user format f@0: convertBuffer( stream_.userBuffer[INPUT], f@0: stream_.deviceBuffer, f@0: stream_.convertInfo[INPUT] ); f@0: } f@0: else { f@0: // no further conversion, simple copy deviceBuffer to userBuffer f@0: memcpy( stream_.userBuffer[INPUT], f@0: stream_.deviceBuffer, f@0: stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) ); f@0: } f@0: } f@0: } f@0: else { f@0: // if there is no capture stream, set callbackPulled flag f@0: callbackPulled = true; f@0: } f@0: f@0: // Execute Callback f@0: // ================ f@0: // 1. Execute user callback method f@0: // 2. Handle return value from callback f@0: f@0: // if callback has not requested the stream to stop f@0: if ( callbackPulled && !callbackStopped ) { f@0: // Execute user callback method f@0: callbackResult = callback( stream_.userBuffer[OUTPUT], f@0: stream_.userBuffer[INPUT], f@0: stream_.bufferSize, f@0: getStreamTime(), f@0: captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0, f@0: stream_.callbackInfo.userData ); f@0: f@0: // Handle return value from callback f@0: if ( callbackResult == 1 ) { f@0: // instantiate a thread to stop this thread f@0: HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL ); f@0: if ( !threadHandle ) { f@0: errorType = RtAudioError::THREAD_ERROR; f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread."; f@0: goto Exit; f@0: } f@0: else if ( !CloseHandle( threadHandle ) ) { f@0: errorType = RtAudioError::THREAD_ERROR; f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle."; f@0: goto Exit; f@0: } f@0: f@0: callbackStopped = true; f@0: } f@0: else if ( callbackResult == 2 ) { f@0: // instantiate a thread to stop this thread f@0: HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL ); f@0: if ( !threadHandle ) { f@0: errorType = RtAudioError::THREAD_ERROR; f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread."; f@0: goto Exit; f@0: } f@0: else if ( !CloseHandle( threadHandle ) ) { f@0: errorType = RtAudioError::THREAD_ERROR; f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle."; f@0: goto Exit; f@0: } f@0: f@0: callbackStopped = true; f@0: } f@0: } f@0: } f@0: f@0: // Callback Output f@0: // =============== f@0: // 1. Convert callback buffer to stream format f@0: // 2. Convert callback buffer to stream sample rate and channel count f@0: // 3. Push callback buffer into outputBuffer f@0: f@0: if ( renderAudioClient && callbackPulled ) { f@0: if ( stream_.doConvertBuffer[OUTPUT] ) { f@0: // Convert callback buffer to stream format f@0: convertBuffer( stream_.deviceBuffer, f@0: stream_.userBuffer[OUTPUT], f@0: stream_.convertInfo[OUTPUT] ); f@0: f@0: } f@0: f@0: // Convert callback buffer to stream sample rate f@0: convertBufferWasapi( convBuffer, f@0: stream_.deviceBuffer, f@0: stream_.nDeviceChannels[OUTPUT], f@0: stream_.sampleRate, f@0: renderFormat->nSamplesPerSec, f@0: stream_.bufferSize, f@0: convBufferSize, f@0: stream_.deviceFormat[OUTPUT] ); f@0: f@0: // Push callback buffer into outputBuffer f@0: callbackPushed = renderBuffer.pushBuffer( convBuffer, f@0: convBufferSize * stream_.nDeviceChannels[OUTPUT], f@0: stream_.deviceFormat[OUTPUT] ); f@0: } f@0: else { f@0: // if there is no render stream, set callbackPushed flag f@0: callbackPushed = true; f@0: } f@0: f@0: // Stream Capture f@0: // ============== f@0: // 1. Get capture buffer from stream f@0: // 2. Push capture buffer into inputBuffer f@0: // 3. If 2. was successful: Release capture buffer f@0: f@0: if ( captureAudioClient ) { f@0: // if the callback input buffer was not pulled from captureBuffer, wait for next capture event f@0: if ( !callbackPulled ) { f@0: WaitForSingleObject( captureEvent, INFINITE ); f@0: } f@0: f@0: // Get capture buffer from stream f@0: hr = captureClient->GetBuffer( &streamBuffer, f@0: &bufferFrameCount, f@0: &captureFlags, NULL, NULL ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer."; f@0: goto Exit; f@0: } f@0: f@0: if ( bufferFrameCount != 0 ) { f@0: // Push capture buffer into inputBuffer f@0: if ( captureBuffer.pushBuffer( ( char* ) streamBuffer, f@0: bufferFrameCount * stream_.nDeviceChannels[INPUT], f@0: stream_.deviceFormat[INPUT] ) ) f@0: { f@0: // Release capture buffer f@0: hr = captureClient->ReleaseBuffer( bufferFrameCount ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer."; f@0: goto Exit; f@0: } f@0: } f@0: else f@0: { f@0: // Inform WASAPI that capture was unsuccessful f@0: hr = captureClient->ReleaseBuffer( 0 ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer."; f@0: goto Exit; f@0: } f@0: } f@0: } f@0: else f@0: { f@0: // Inform WASAPI that capture was unsuccessful f@0: hr = captureClient->ReleaseBuffer( 0 ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer."; f@0: goto Exit; f@0: } f@0: } f@0: } f@0: f@0: // Stream Render f@0: // ============= f@0: // 1. Get render buffer from stream f@0: // 2. Pull next buffer from outputBuffer f@0: // 3. If 2. was successful: Fill render buffer with next buffer f@0: // Release render buffer f@0: f@0: if ( renderAudioClient ) { f@0: // if the callback output buffer was not pushed to renderBuffer, wait for next render event f@0: if ( callbackPulled && !callbackPushed ) { f@0: WaitForSingleObject( renderEvent, INFINITE ); f@0: } f@0: f@0: // Get render buffer from stream f@0: hr = renderAudioClient->GetBufferSize( &bufferFrameCount ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size."; f@0: goto Exit; f@0: } f@0: f@0: hr = renderAudioClient->GetCurrentPadding( &numFramesPadding ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding."; f@0: goto Exit; f@0: } f@0: f@0: bufferFrameCount -= numFramesPadding; f@0: f@0: if ( bufferFrameCount != 0 ) { f@0: hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer."; f@0: goto Exit; f@0: } f@0: f@0: // Pull next buffer from outputBuffer f@0: // Fill render buffer with next buffer f@0: if ( renderBuffer.pullBuffer( ( char* ) streamBuffer, f@0: bufferFrameCount * stream_.nDeviceChannels[OUTPUT], f@0: stream_.deviceFormat[OUTPUT] ) ) f@0: { f@0: // Release render buffer f@0: hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer."; f@0: goto Exit; f@0: } f@0: } f@0: else f@0: { f@0: // Inform WASAPI that render was unsuccessful f@0: hr = renderClient->ReleaseBuffer( 0, 0 ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer."; f@0: goto Exit; f@0: } f@0: } f@0: } f@0: else f@0: { f@0: // Inform WASAPI that render was unsuccessful f@0: hr = renderClient->ReleaseBuffer( 0, 0 ); f@0: if ( FAILED( hr ) ) { f@0: errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer."; f@0: goto Exit; f@0: } f@0: } f@0: } f@0: f@0: // if the callback buffer was pushed renderBuffer reset callbackPulled flag f@0: if ( callbackPushed ) { f@0: callbackPulled = false; f@0: } f@0: f@0: // tick stream time f@0: RtApi::tickStreamTime(); f@0: } f@0: f@0: Exit: f@0: // clean up f@0: CoTaskMemFree( captureFormat ); f@0: CoTaskMemFree( renderFormat ); f@0: f@0: free ( convBuffer ); f@0: f@0: CoUninitialize(); f@0: f@0: // update stream state f@0: stream_.state = STREAM_STOPPED; f@0: f@0: if ( errorText_.empty() ) f@0: return; f@0: else f@0: error( errorType ); f@0: } f@0: f@0: //******************** End of __WINDOWS_WASAPI__ *********************// f@0: #endif f@0: f@0: f@0: #if defined(__WINDOWS_DS__) // Windows DirectSound API f@0: f@0: // Modified by Robin Davies, October 2005 f@0: // - Improvements to DirectX pointer chasing. f@0: // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30. f@0: // - Auto-call CoInitialize for DSOUND and ASIO platforms. f@0: // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007 f@0: // Changed device query structure for RtAudio 4.0.7, January 2010 f@0: f@0: #include f@0: #include f@0: #include f@0: f@0: #if defined(__MINGW32__) f@0: // missing from latest mingw winapi f@0: #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */ f@0: #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */ f@0: #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */ f@0: #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */ f@0: #endif f@0: f@0: #define MINIMUM_DEVICE_BUFFER_SIZE 32768 f@0: f@0: #ifdef _MSC_VER // if Microsoft Visual C++ f@0: #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually. f@0: #endif f@0: f@0: static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize ) f@0: { f@0: if ( pointer > bufferSize ) pointer -= bufferSize; f@0: if ( laterPointer < earlierPointer ) laterPointer += bufferSize; f@0: if ( pointer < earlierPointer ) pointer += bufferSize; f@0: return pointer >= earlierPointer && pointer < laterPointer; f@0: } f@0: f@0: // A structure to hold various information related to the DirectSound f@0: // API implementation. f@0: struct DsHandle { f@0: unsigned int drainCounter; // Tracks callback counts when draining f@0: bool internalDrain; // Indicates if stop is initiated from callback or not. f@0: void *id[2]; f@0: void *buffer[2]; f@0: bool xrun[2]; f@0: UINT bufferPointer[2]; f@0: DWORD dsBufferSize[2]; f@0: DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by. f@0: HANDLE condition; f@0: f@0: DsHandle() f@0: :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; } f@0: }; f@0: f@0: // Declarations for utility functions, callbacks, and structures f@0: // specific to the DirectSound implementation. f@0: static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid, f@0: LPCTSTR description, f@0: LPCTSTR module, f@0: LPVOID lpContext ); f@0: f@0: static const char* getErrorString( int code ); f@0: f@0: static unsigned __stdcall callbackHandler( void *ptr ); f@0: f@0: struct DsDevice { f@0: LPGUID id[2]; f@0: bool validId[2]; f@0: bool found; f@0: std::string name; f@0: f@0: DsDevice() f@0: : found(false) { validId[0] = false; validId[1] = false; } f@0: }; f@0: f@0: struct DsProbeData { f@0: bool isInput; f@0: std::vector* dsDevices; f@0: }; f@0: f@0: RtApiDs :: RtApiDs() f@0: { f@0: // Dsound will run both-threaded. If CoInitialize fails, then just f@0: // accept whatever the mainline chose for a threading model. f@0: coInitialized_ = false; f@0: HRESULT hr = CoInitialize( NULL ); f@0: if ( !FAILED( hr ) ) coInitialized_ = true; f@0: } f@0: f@0: RtApiDs :: ~RtApiDs() f@0: { f@0: if ( coInitialized_ ) CoUninitialize(); // balanced call. f@0: if ( stream_.state != STREAM_CLOSED ) closeStream(); f@0: } f@0: f@0: // The DirectSound default output is always the first device. f@0: unsigned int RtApiDs :: getDefaultOutputDevice( void ) f@0: { f@0: return 0; f@0: } f@0: f@0: // The DirectSound default input is always the first input device, f@0: // which is the first capture device enumerated. f@0: unsigned int RtApiDs :: getDefaultInputDevice( void ) f@0: { f@0: return 0; f@0: } f@0: f@0: unsigned int RtApiDs :: getDeviceCount( void ) f@0: { f@0: // Set query flag for previously found devices to false, so that we f@0: // can check for any devices that have disappeared. f@0: for ( unsigned int i=0; i indices; f@0: for ( unsigned int i=0; i(dsDevices.size()); f@0: } f@0: f@0: RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device ) f@0: { f@0: RtAudio::DeviceInfo info; f@0: info.probed = false; f@0: f@0: if ( dsDevices.size() == 0 ) { f@0: // Force a query of all devices f@0: getDeviceCount(); f@0: if ( dsDevices.size() == 0 ) { f@0: errorText_ = "RtApiDs::getDeviceInfo: no devices found!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return info; f@0: } f@0: } f@0: f@0: if ( device >= dsDevices.size() ) { f@0: errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return info; f@0: } f@0: f@0: HRESULT result; f@0: if ( dsDevices[ device ].validId[0] == false ) goto probeInput; f@0: f@0: LPDIRECTSOUND output; f@0: DSCAPS outCaps; f@0: result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: goto probeInput; f@0: } f@0: f@0: outCaps.dwSize = sizeof( outCaps ); f@0: result = output->GetCaps( &outCaps ); f@0: if ( FAILED( result ) ) { f@0: output->Release(); f@0: errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: goto probeInput; f@0: } f@0: f@0: // Get output channel information. f@0: info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1; f@0: f@0: // Get sample rate information. f@0: info.sampleRates.clear(); f@0: for ( unsigned int k=0; k= (unsigned int) outCaps.dwMinSecondarySampleRate && f@0: SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) f@0: info.sampleRates.push_back( SAMPLE_RATES[k] ); f@0: } f@0: f@0: // Get format information. f@0: if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16; f@0: if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8; f@0: f@0: output->Release(); f@0: f@0: if ( getDefaultOutputDevice() == device ) f@0: info.isDefaultOutput = true; f@0: f@0: if ( dsDevices[ device ].validId[1] == false ) { f@0: info.name = dsDevices[ device ].name; f@0: info.probed = true; f@0: return info; f@0: } f@0: f@0: probeInput: f@0: f@0: LPDIRECTSOUNDCAPTURE input; f@0: result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: DSCCAPS inCaps; f@0: inCaps.dwSize = sizeof( inCaps ); f@0: result = input->GetCaps( &inCaps ); f@0: if ( FAILED( result ) ) { f@0: input->Release(); f@0: errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: // Get input channel information. f@0: info.inputChannels = inCaps.dwChannels; f@0: f@0: // Get sample rate and format information. f@0: std::vector rates; f@0: if ( inCaps.dwChannels >= 2 ) { f@0: if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16; f@0: if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16; f@0: if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16; f@0: if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16; f@0: if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8; f@0: if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8; f@0: if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8; f@0: if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8; f@0: f@0: if ( info.nativeFormats & RTAUDIO_SINT16 ) { f@0: if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 ); f@0: if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 ); f@0: if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 ); f@0: if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 ); f@0: } f@0: else if ( info.nativeFormats & RTAUDIO_SINT8 ) { f@0: if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 ); f@0: if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 ); f@0: if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 ); f@0: if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 ); f@0: } f@0: } f@0: else if ( inCaps.dwChannels == 1 ) { f@0: if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16; f@0: if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16; f@0: if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16; f@0: if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16; f@0: if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8; f@0: if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8; f@0: if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8; f@0: if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8; f@0: f@0: if ( info.nativeFormats & RTAUDIO_SINT16 ) { f@0: if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 ); f@0: if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 ); f@0: if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 ); f@0: if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 ); f@0: } f@0: else if ( info.nativeFormats & RTAUDIO_SINT8 ) { f@0: if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 ); f@0: if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 ); f@0: if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 ); f@0: if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 ); f@0: } f@0: } f@0: else info.inputChannels = 0; // technically, this would be an error f@0: f@0: input->Release(); f@0: f@0: if ( info.inputChannels == 0 ) return info; f@0: f@0: // Copy the supported rates to the info structure but avoid duplication. f@0: bool found; f@0: for ( unsigned int i=0; i 0 && info.inputChannels > 0 ) f@0: info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels; f@0: f@0: if ( device == 0 ) info.isDefaultInput = true; f@0: f@0: // Copy name and return. f@0: info.name = dsDevices[ device ].name; f@0: info.probed = true; f@0: return info; f@0: } f@0: f@0: bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels, f@0: unsigned int firstChannel, unsigned int sampleRate, f@0: RtAudioFormat format, unsigned int *bufferSize, f@0: RtAudio::StreamOptions *options ) f@0: { f@0: if ( channels + firstChannel > 2 ) { f@0: errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device."; f@0: return FAILURE; f@0: } f@0: f@0: size_t nDevices = dsDevices.size(); f@0: if ( nDevices == 0 ) { f@0: // This should not happen because a check is made before this function is called. f@0: errorText_ = "RtApiDs::probeDeviceOpen: no devices found!"; f@0: return FAILURE; f@0: } f@0: f@0: if ( device >= nDevices ) { f@0: // This should not happen because a check is made before this function is called. f@0: errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!"; f@0: return FAILURE; f@0: } f@0: f@0: if ( mode == OUTPUT ) { f@0: if ( dsDevices[ device ].validId[0] == false ) { f@0: errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: } f@0: else { // mode == INPUT f@0: if ( dsDevices[ device ].validId[1] == false ) { f@0: errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: } f@0: f@0: // According to a note in PortAudio, using GetDesktopWindow() f@0: // instead of GetForegroundWindow() is supposed to avoid problems f@0: // that occur when the application's window is not the foreground f@0: // window. Also, if the application window closes before the f@0: // DirectSound buffer, DirectSound can crash. In the past, I had f@0: // problems when using GetDesktopWindow() but it seems fine now f@0: // (January 2010). I'll leave it commented here. f@0: // HWND hWnd = GetForegroundWindow(); f@0: HWND hWnd = GetDesktopWindow(); f@0: f@0: // Check the numberOfBuffers parameter and limit the lowest value to f@0: // two. This is a judgement call and a value of two is probably too f@0: // low for capture, but it should work for playback. f@0: int nBuffers = 0; f@0: if ( options ) nBuffers = options->numberOfBuffers; f@0: if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2; f@0: if ( nBuffers < 2 ) nBuffers = 3; f@0: f@0: // Check the lower range of the user-specified buffer size and set f@0: // (arbitrarily) to a lower bound of 32. f@0: if ( *bufferSize < 32 ) *bufferSize = 32; f@0: f@0: // Create the wave format structure. The data format setting will f@0: // be determined later. f@0: WAVEFORMATEX waveFormat; f@0: ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) ); f@0: waveFormat.wFormatTag = WAVE_FORMAT_PCM; f@0: waveFormat.nChannels = channels + firstChannel; f@0: waveFormat.nSamplesPerSec = (unsigned long) sampleRate; f@0: f@0: // Determine the device buffer size. By default, we'll use the value f@0: // defined above (32K), but we will grow it to make allowances for f@0: // very large software buffer sizes. f@0: DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE; f@0: DWORD dsPointerLeadTime = 0; f@0: f@0: void *ohandle = 0, *bhandle = 0; f@0: HRESULT result; f@0: if ( mode == OUTPUT ) { f@0: f@0: LPDIRECTSOUND output; f@0: result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: DSCAPS outCaps; f@0: outCaps.dwSize = sizeof( outCaps ); f@0: result = output->GetCaps( &outCaps ); f@0: if ( FAILED( result ) ) { f@0: output->Release(); f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Check channel information. f@0: if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) { f@0: errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Check format information. Use 16-bit format unless not f@0: // supported or user requests 8-bit. f@0: if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT && f@0: !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) { f@0: waveFormat.wBitsPerSample = 16; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT16; f@0: } f@0: else { f@0: waveFormat.wBitsPerSample = 8; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT8; f@0: } f@0: stream_.userFormat = format; f@0: f@0: // Update wave format structure and buffer information. f@0: waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8; f@0: waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign; f@0: dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels; f@0: f@0: // If the user wants an even bigger buffer, increase the device buffer size accordingly. f@0: while ( dsPointerLeadTime * 2U > dsBufferSize ) f@0: dsBufferSize *= 2; f@0: f@0: // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes. f@0: // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE ); f@0: // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes. f@0: result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY ); f@0: if ( FAILED( result ) ) { f@0: output->Release(); f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Even though we will write to the secondary buffer, we need to f@0: // access the primary buffer to set the correct output format f@0: // (since the default is 8-bit, 22 kHz!). Setup the DS primary f@0: // buffer description. f@0: DSBUFFERDESC bufferDescription; f@0: ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) ); f@0: bufferDescription.dwSize = sizeof( DSBUFFERDESC ); f@0: bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER; f@0: f@0: // Obtain the primary buffer f@0: LPDIRECTSOUNDBUFFER buffer; f@0: result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL ); f@0: if ( FAILED( result ) ) { f@0: output->Release(); f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Set the primary DS buffer sound format. f@0: result = buffer->SetFormat( &waveFormat ); f@0: if ( FAILED( result ) ) { f@0: output->Release(); f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Setup the secondary DS buffer description. f@0: ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) ); f@0: bufferDescription.dwSize = sizeof( DSBUFFERDESC ); f@0: bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS | f@0: DSBCAPS_GLOBALFOCUS | f@0: DSBCAPS_GETCURRENTPOSITION2 | f@0: DSBCAPS_LOCHARDWARE ); // Force hardware mixing f@0: bufferDescription.dwBufferBytes = dsBufferSize; f@0: bufferDescription.lpwfxFormat = &waveFormat; f@0: f@0: // Try to create the secondary DS buffer. If that doesn't work, f@0: // try to use software mixing. Otherwise, there's a problem. f@0: result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL ); f@0: if ( FAILED( result ) ) { f@0: bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS | f@0: DSBCAPS_GLOBALFOCUS | f@0: DSBCAPS_GETCURRENTPOSITION2 | f@0: DSBCAPS_LOCSOFTWARE ); // Force software mixing f@0: result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL ); f@0: if ( FAILED( result ) ) { f@0: output->Release(); f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: } f@0: f@0: // Get the buffer size ... might be different from what we specified. f@0: DSBCAPS dsbcaps; f@0: dsbcaps.dwSize = sizeof( DSBCAPS ); f@0: result = buffer->GetCaps( &dsbcaps ); f@0: if ( FAILED( result ) ) { f@0: output->Release(); f@0: buffer->Release(); f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: dsBufferSize = dsbcaps.dwBufferBytes; f@0: f@0: // Lock the DS buffer f@0: LPVOID audioPtr; f@0: DWORD dataLen; f@0: result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 ); f@0: if ( FAILED( result ) ) { f@0: output->Release(); f@0: buffer->Release(); f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Zero the DS buffer f@0: ZeroMemory( audioPtr, dataLen ); f@0: f@0: // Unlock the DS buffer f@0: result = buffer->Unlock( audioPtr, dataLen, NULL, 0 ); f@0: if ( FAILED( result ) ) { f@0: output->Release(); f@0: buffer->Release(); f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: ohandle = (void *) output; f@0: bhandle = (void *) buffer; f@0: } f@0: f@0: if ( mode == INPUT ) { f@0: f@0: LPDIRECTSOUNDCAPTURE input; f@0: result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: DSCCAPS inCaps; f@0: inCaps.dwSize = sizeof( inCaps ); f@0: result = input->GetCaps( &inCaps ); f@0: if ( FAILED( result ) ) { f@0: input->Release(); f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Check channel information. f@0: if ( inCaps.dwChannels < channels + firstChannel ) { f@0: errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels."; f@0: return FAILURE; f@0: } f@0: f@0: // Check format information. Use 16-bit format unless user f@0: // requests 8-bit. f@0: DWORD deviceFormats; f@0: if ( channels + firstChannel == 2 ) { f@0: deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08; f@0: if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) { f@0: waveFormat.wBitsPerSample = 8; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT8; f@0: } f@0: else { // assume 16-bit is supported f@0: waveFormat.wBitsPerSample = 16; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT16; f@0: } f@0: } f@0: else { // channel == 1 f@0: deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08; f@0: if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) { f@0: waveFormat.wBitsPerSample = 8; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT8; f@0: } f@0: else { // assume 16-bit is supported f@0: waveFormat.wBitsPerSample = 16; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT16; f@0: } f@0: } f@0: stream_.userFormat = format; f@0: f@0: // Update wave format structure and buffer information. f@0: waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8; f@0: waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign; f@0: dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels; f@0: f@0: // If the user wants an even bigger buffer, increase the device buffer size accordingly. f@0: while ( dsPointerLeadTime * 2U > dsBufferSize ) f@0: dsBufferSize *= 2; f@0: f@0: // Setup the secondary DS buffer description. f@0: DSCBUFFERDESC bufferDescription; f@0: ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) ); f@0: bufferDescription.dwSize = sizeof( DSCBUFFERDESC ); f@0: bufferDescription.dwFlags = 0; f@0: bufferDescription.dwReserved = 0; f@0: bufferDescription.dwBufferBytes = dsBufferSize; f@0: bufferDescription.lpwfxFormat = &waveFormat; f@0: f@0: // Create the capture buffer. f@0: LPDIRECTSOUNDCAPTUREBUFFER buffer; f@0: result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL ); f@0: if ( FAILED( result ) ) { f@0: input->Release(); f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Get the buffer size ... might be different from what we specified. f@0: DSCBCAPS dscbcaps; f@0: dscbcaps.dwSize = sizeof( DSCBCAPS ); f@0: result = buffer->GetCaps( &dscbcaps ); f@0: if ( FAILED( result ) ) { f@0: input->Release(); f@0: buffer->Release(); f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: dsBufferSize = dscbcaps.dwBufferBytes; f@0: f@0: // NOTE: We could have a problem here if this is a duplex stream f@0: // and the play and capture hardware buffer sizes are different f@0: // (I'm actually not sure if that is a problem or not). f@0: // Currently, we are not verifying that. f@0: f@0: // Lock the capture buffer f@0: LPVOID audioPtr; f@0: DWORD dataLen; f@0: result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 ); f@0: if ( FAILED( result ) ) { f@0: input->Release(); f@0: buffer->Release(); f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Zero the buffer f@0: ZeroMemory( audioPtr, dataLen ); f@0: f@0: // Unlock the buffer f@0: result = buffer->Unlock( audioPtr, dataLen, NULL, 0 ); f@0: if ( FAILED( result ) ) { f@0: input->Release(); f@0: buffer->Release(); f@0: errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!"; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: ohandle = (void *) input; f@0: bhandle = (void *) buffer; f@0: } f@0: f@0: // Set various stream parameters f@0: DsHandle *handle = 0; f@0: stream_.nDeviceChannels[mode] = channels + firstChannel; f@0: stream_.nUserChannels[mode] = channels; f@0: stream_.bufferSize = *bufferSize; f@0: stream_.channelOffset[mode] = firstChannel; f@0: stream_.deviceInterleaved[mode] = true; f@0: if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false; f@0: else stream_.userInterleaved = true; f@0: f@0: // Set flag for buffer conversion f@0: stream_.doConvertBuffer[mode] = false; f@0: if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode]) f@0: stream_.doConvertBuffer[mode] = true; f@0: if (stream_.userFormat != stream_.deviceFormat[mode]) f@0: stream_.doConvertBuffer[mode] = true; f@0: if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] && f@0: stream_.nUserChannels[mode] > 1 ) f@0: stream_.doConvertBuffer[mode] = true; f@0: f@0: // Allocate necessary internal buffers f@0: long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat ); f@0: stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 ); f@0: if ( stream_.userBuffer[mode] == NULL ) { f@0: errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory."; f@0: goto error; f@0: } f@0: f@0: if ( stream_.doConvertBuffer[mode] ) { f@0: f@0: bool makeBuffer = true; f@0: bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] ); f@0: if ( mode == INPUT ) { f@0: if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) { f@0: unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] ); f@0: if ( bufferBytes <= (long) bytesOut ) makeBuffer = false; f@0: } f@0: } f@0: f@0: if ( makeBuffer ) { f@0: bufferBytes *= *bufferSize; f@0: if ( stream_.deviceBuffer ) free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 ); f@0: if ( stream_.deviceBuffer == NULL ) { f@0: errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory."; f@0: goto error; f@0: } f@0: } f@0: } f@0: f@0: // Allocate our DsHandle structures for the stream. f@0: if ( stream_.apiHandle == 0 ) { f@0: try { f@0: handle = new DsHandle; f@0: } f@0: catch ( std::bad_alloc& ) { f@0: errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory."; f@0: goto error; f@0: } f@0: f@0: // Create a manual-reset event. f@0: handle->condition = CreateEvent( NULL, // no security f@0: TRUE, // manual-reset f@0: FALSE, // non-signaled initially f@0: NULL ); // unnamed f@0: stream_.apiHandle = (void *) handle; f@0: } f@0: else f@0: handle = (DsHandle *) stream_.apiHandle; f@0: handle->id[mode] = ohandle; f@0: handle->buffer[mode] = bhandle; f@0: handle->dsBufferSize[mode] = dsBufferSize; f@0: handle->dsPointerLeadTime[mode] = dsPointerLeadTime; f@0: f@0: stream_.device[mode] = device; f@0: stream_.state = STREAM_STOPPED; f@0: if ( stream_.mode == OUTPUT && mode == INPUT ) f@0: // We had already set up an output stream. f@0: stream_.mode = DUPLEX; f@0: else f@0: stream_.mode = mode; f@0: stream_.nBuffers = nBuffers; f@0: stream_.sampleRate = sampleRate; f@0: f@0: // Setup the buffer conversion information structure. f@0: if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel ); f@0: f@0: // Setup the callback thread. f@0: if ( stream_.callbackInfo.isRunning == false ) { f@0: unsigned threadId; f@0: stream_.callbackInfo.isRunning = true; f@0: stream_.callbackInfo.object = (void *) this; f@0: stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler, f@0: &stream_.callbackInfo, 0, &threadId ); f@0: if ( stream_.callbackInfo.thread == 0 ) { f@0: errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!"; f@0: goto error; f@0: } f@0: f@0: // Boost DS thread priority f@0: SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST ); f@0: } f@0: return SUCCESS; f@0: f@0: error: f@0: if ( handle ) { f@0: if ( handle->buffer[0] ) { // the object pointer can be NULL and valid f@0: LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0]; f@0: LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; f@0: if ( buffer ) buffer->Release(); f@0: object->Release(); f@0: } f@0: if ( handle->buffer[1] ) { f@0: LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1]; f@0: LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1]; f@0: if ( buffer ) buffer->Release(); f@0: object->Release(); f@0: } f@0: CloseHandle( handle->condition ); f@0: delete handle; f@0: stream_.apiHandle = 0; f@0: } f@0: f@0: for ( int i=0; i<2; i++ ) { f@0: if ( stream_.userBuffer[i] ) { f@0: free( stream_.userBuffer[i] ); f@0: stream_.userBuffer[i] = 0; f@0: } f@0: } f@0: f@0: if ( stream_.deviceBuffer ) { f@0: free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = 0; f@0: } f@0: f@0: stream_.state = STREAM_CLOSED; f@0: return FAILURE; f@0: } f@0: f@0: void RtApiDs :: closeStream() f@0: { f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiDs::closeStream(): no open stream to close!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: // Stop the callback thread. f@0: stream_.callbackInfo.isRunning = false; f@0: WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE ); f@0: CloseHandle( (HANDLE) stream_.callbackInfo.thread ); f@0: f@0: DsHandle *handle = (DsHandle *) stream_.apiHandle; f@0: if ( handle ) { f@0: if ( handle->buffer[0] ) { // the object pointer can be NULL and valid f@0: LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0]; f@0: LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; f@0: if ( buffer ) { f@0: buffer->Stop(); f@0: buffer->Release(); f@0: } f@0: object->Release(); f@0: } f@0: if ( handle->buffer[1] ) { f@0: LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1]; f@0: LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1]; f@0: if ( buffer ) { f@0: buffer->Stop(); f@0: buffer->Release(); f@0: } f@0: object->Release(); f@0: } f@0: CloseHandle( handle->condition ); f@0: delete handle; f@0: stream_.apiHandle = 0; f@0: } f@0: f@0: for ( int i=0; i<2; i++ ) { f@0: if ( stream_.userBuffer[i] ) { f@0: free( stream_.userBuffer[i] ); f@0: stream_.userBuffer[i] = 0; f@0: } f@0: } f@0: f@0: if ( stream_.deviceBuffer ) { f@0: free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = 0; f@0: } f@0: f@0: stream_.mode = UNINITIALIZED; f@0: stream_.state = STREAM_CLOSED; f@0: } f@0: f@0: void RtApiDs :: startStream() f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_RUNNING ) { f@0: errorText_ = "RtApiDs::startStream(): the stream is already running!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: DsHandle *handle = (DsHandle *) stream_.apiHandle; f@0: f@0: // Increase scheduler frequency on lesser windows (a side-effect of f@0: // increasing timer accuracy). On greater windows (Win2K or later), f@0: // this is already in effect. f@0: timeBeginPeriod( 1 ); f@0: f@0: buffersRolling = false; f@0: duplexPrerollBytes = 0; f@0: f@0: if ( stream_.mode == DUPLEX ) { f@0: // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize. f@0: duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] ); f@0: } f@0: f@0: HRESULT result = 0; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: f@0: LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; f@0: result = buffer->Play( 0, 0, DSBPLAY_LOOPING ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!"; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: } f@0: f@0: if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) { f@0: f@0: LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1]; f@0: result = buffer->Start( DSCBSTART_LOOPING ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!"; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: } f@0: f@0: handle->drainCounter = 0; f@0: handle->internalDrain = false; f@0: ResetEvent( handle->condition ); f@0: stream_.state = STREAM_RUNNING; f@0: f@0: unlock: f@0: if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR ); f@0: } f@0: f@0: void RtApiDs :: stopStream() f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiDs::stopStream(): the stream is already stopped!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: HRESULT result = 0; f@0: LPVOID audioPtr; f@0: DWORD dataLen; f@0: DsHandle *handle = (DsHandle *) stream_.apiHandle; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: if ( handle->drainCounter == 0 ) { f@0: handle->drainCounter = 2; f@0: WaitForSingleObject( handle->condition, INFINITE ); // block until signaled f@0: } f@0: f@0: stream_.state = STREAM_STOPPED; f@0: f@0: MUTEX_LOCK( &stream_.mutex ); f@0: f@0: // Stop the buffer and clear memory f@0: LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; f@0: result = buffer->Stop(); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!"; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: f@0: // Lock the buffer and clear it so that if we start to play again, f@0: // we won't have old data playing. f@0: result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!"; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: f@0: // Zero the DS buffer f@0: ZeroMemory( audioPtr, dataLen ); f@0: f@0: // Unlock the DS buffer f@0: result = buffer->Unlock( audioPtr, dataLen, NULL, 0 ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!"; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: f@0: // If we start playing again, we must begin at beginning of buffer. f@0: handle->bufferPointer[0] = 0; f@0: } f@0: f@0: if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) { f@0: LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1]; f@0: audioPtr = NULL; f@0: dataLen = 0; f@0: f@0: stream_.state = STREAM_STOPPED; f@0: f@0: if ( stream_.mode != DUPLEX ) f@0: MUTEX_LOCK( &stream_.mutex ); f@0: f@0: result = buffer->Stop(); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!"; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: f@0: // Lock the buffer and clear it so that if we start to play again, f@0: // we won't have old data playing. f@0: result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!"; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: f@0: // Zero the DS buffer f@0: ZeroMemory( audioPtr, dataLen ); f@0: f@0: // Unlock the DS buffer f@0: result = buffer->Unlock( audioPtr, dataLen, NULL, 0 ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!"; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: f@0: // If we start recording again, we must begin at beginning of buffer. f@0: handle->bufferPointer[1] = 0; f@0: } f@0: f@0: unlock: f@0: timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows. f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: f@0: if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR ); f@0: } f@0: f@0: void RtApiDs :: abortStream() f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiDs::abortStream(): the stream is already stopped!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: DsHandle *handle = (DsHandle *) stream_.apiHandle; f@0: handle->drainCounter = 2; f@0: f@0: stopStream(); f@0: } f@0: f@0: void RtApiDs :: callbackEvent() f@0: { f@0: if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) { f@0: Sleep( 50 ); // sleep 50 milliseconds f@0: return; f@0: } f@0: f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo; f@0: DsHandle *handle = (DsHandle *) stream_.apiHandle; f@0: f@0: // Check if we were draining the stream and signal is finished. f@0: if ( handle->drainCounter > stream_.nBuffers + 2 ) { f@0: f@0: stream_.state = STREAM_STOPPING; f@0: if ( handle->internalDrain == false ) f@0: SetEvent( handle->condition ); f@0: else f@0: stopStream(); f@0: return; f@0: } f@0: f@0: // Invoke user callback to get fresh output data UNLESS we are f@0: // draining stream. f@0: if ( handle->drainCounter == 0 ) { f@0: RtAudioCallback callback = (RtAudioCallback) info->callback; f@0: double streamTime = getStreamTime(); f@0: RtAudioStreamStatus status = 0; f@0: if ( stream_.mode != INPUT && handle->xrun[0] == true ) { f@0: status |= RTAUDIO_OUTPUT_UNDERFLOW; f@0: handle->xrun[0] = false; f@0: } f@0: if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) { f@0: status |= RTAUDIO_INPUT_OVERFLOW; f@0: handle->xrun[1] = false; f@0: } f@0: int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1], f@0: stream_.bufferSize, streamTime, status, info->userData ); f@0: if ( cbReturnValue == 2 ) { f@0: stream_.state = STREAM_STOPPING; f@0: handle->drainCounter = 2; f@0: abortStream(); f@0: return; f@0: } f@0: else if ( cbReturnValue == 1 ) { f@0: handle->drainCounter = 1; f@0: handle->internalDrain = true; f@0: } f@0: } f@0: f@0: HRESULT result; f@0: DWORD currentWritePointer, safeWritePointer; f@0: DWORD currentReadPointer, safeReadPointer; f@0: UINT nextWritePointer; f@0: f@0: LPVOID buffer1 = NULL; f@0: LPVOID buffer2 = NULL; f@0: DWORD bufferSize1 = 0; f@0: DWORD bufferSize2 = 0; f@0: f@0: char *buffer; f@0: long bufferBytes; f@0: f@0: MUTEX_LOCK( &stream_.mutex ); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: return; f@0: } f@0: f@0: if ( buffersRolling == false ) { f@0: if ( stream_.mode == DUPLEX ) { f@0: //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] ); f@0: f@0: // It takes a while for the devices to get rolling. As a result, f@0: // there's no guarantee that the capture and write device pointers f@0: // will move in lockstep. Wait here for both devices to start f@0: // rolling, and then set our buffer pointers accordingly. f@0: // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600 f@0: // bytes later than the write buffer. f@0: f@0: // Stub: a serious risk of having a pre-emptive scheduling round f@0: // take place between the two GetCurrentPosition calls... but I'm f@0: // really not sure how to solve the problem. Temporarily boost to f@0: // Realtime priority, maybe; but I'm not sure what priority the f@0: // DirectSound service threads run at. We *should* be roughly f@0: // within a ms or so of correct. f@0: f@0: LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; f@0: LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1]; f@0: f@0: DWORD startSafeWritePointer, startSafeReadPointer; f@0: f@0: result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: while ( true ) { f@0: result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break; f@0: Sleep( 1 ); f@0: } f@0: f@0: //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] ); f@0: f@0: handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0]; f@0: if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0]; f@0: handle->bufferPointer[1] = safeReadPointer; f@0: } f@0: else if ( stream_.mode == OUTPUT ) { f@0: f@0: // Set the proper nextWritePosition after initial startup. f@0: LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; f@0: result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0]; f@0: if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0]; f@0: } f@0: f@0: buffersRolling = true; f@0: } f@0: f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: f@0: LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; f@0: f@0: if ( handle->drainCounter > 1 ) { // write zeros to the output stream f@0: bufferBytes = stream_.bufferSize * stream_.nUserChannels[0]; f@0: bufferBytes *= formatBytes( stream_.userFormat ); f@0: memset( stream_.userBuffer[0], 0, bufferBytes ); f@0: } f@0: f@0: // Setup parameters and do buffer conversion if necessary. f@0: if ( stream_.doConvertBuffer[0] ) { f@0: buffer = stream_.deviceBuffer; f@0: convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] ); f@0: bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0]; f@0: bufferBytes *= formatBytes( stream_.deviceFormat[0] ); f@0: } f@0: else { f@0: buffer = stream_.userBuffer[0]; f@0: bufferBytes = stream_.bufferSize * stream_.nUserChannels[0]; f@0: bufferBytes *= formatBytes( stream_.userFormat ); f@0: } f@0: f@0: // No byte swapping necessary in DirectSound implementation. f@0: f@0: // Ahhh ... windoze. 16-bit data is signed but 8-bit data is f@0: // unsigned. So, we need to convert our signed 8-bit data here to f@0: // unsigned. f@0: if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 ) f@0: for ( int i=0; idsBufferSize[0]; f@0: nextWritePointer = handle->bufferPointer[0]; f@0: f@0: DWORD endWrite, leadPointer; f@0: while ( true ) { f@0: // Find out where the read and "safe write" pointers are. f@0: result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: f@0: // We will copy our output buffer into the region between f@0: // safeWritePointer and leadPointer. If leadPointer is not f@0: // beyond the next endWrite position, wait until it is. f@0: leadPointer = safeWritePointer + handle->dsPointerLeadTime[0]; f@0: //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl; f@0: if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize; f@0: if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset f@0: endWrite = nextWritePointer + bufferBytes; f@0: f@0: // Check whether the entire write region is behind the play pointer. f@0: if ( leadPointer >= endWrite ) break; f@0: f@0: // If we are here, then we must wait until the leadPointer advances f@0: // beyond the end of our next write region. We use the f@0: // Sleep() function to suspend operation until that happens. f@0: double millis = ( endWrite - leadPointer ) * 1000.0; f@0: millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate); f@0: if ( millis < 1.0 ) millis = 1.0; f@0: Sleep( (DWORD) millis ); f@0: } f@0: f@0: if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize ) f@0: || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) { f@0: // We've strayed into the forbidden zone ... resync the read pointer. f@0: handle->xrun[0] = true; f@0: nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes; f@0: if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize; f@0: handle->bufferPointer[0] = nextWritePointer; f@0: endWrite = nextWritePointer + bufferBytes; f@0: } f@0: f@0: // Lock free space in the buffer f@0: result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1, f@0: &bufferSize1, &buffer2, &bufferSize2, 0 ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: f@0: // Copy our buffer into the DS buffer f@0: CopyMemory( buffer1, buffer, bufferSize1 ); f@0: if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 ); f@0: f@0: // Update our buffer offset and unlock sound buffer f@0: dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize; f@0: handle->bufferPointer[0] = nextWritePointer; f@0: } f@0: f@0: // Don't bother draining input f@0: if ( handle->drainCounter ) { f@0: handle->drainCounter++; f@0: goto unlock; f@0: } f@0: f@0: if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) { f@0: f@0: // Setup parameters. f@0: if ( stream_.doConvertBuffer[1] ) { f@0: buffer = stream_.deviceBuffer; f@0: bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1]; f@0: bufferBytes *= formatBytes( stream_.deviceFormat[1] ); f@0: } f@0: else { f@0: buffer = stream_.userBuffer[1]; f@0: bufferBytes = stream_.bufferSize * stream_.nUserChannels[1]; f@0: bufferBytes *= formatBytes( stream_.userFormat ); f@0: } f@0: f@0: LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1]; f@0: long nextReadPointer = handle->bufferPointer[1]; f@0: DWORD dsBufferSize = handle->dsBufferSize[1]; f@0: f@0: // Find out where the write and "safe read" pointers are. f@0: result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: f@0: if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset f@0: DWORD endRead = nextReadPointer + bufferBytes; f@0: f@0: // Handling depends on whether we are INPUT or DUPLEX. f@0: // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode, f@0: // then a wait here will drag the write pointers into the forbidden zone. f@0: // f@0: // In DUPLEX mode, rather than wait, we will back off the read pointer until f@0: // it's in a safe position. This causes dropouts, but it seems to be the only f@0: // practical way to sync up the read and write pointers reliably, given the f@0: // the very complex relationship between phase and increment of the read and write f@0: // pointers. f@0: // f@0: // In order to minimize audible dropouts in DUPLEX mode, we will f@0: // provide a pre-roll period of 0.5 seconds in which we return f@0: // zeros from the read buffer while the pointers sync up. f@0: f@0: if ( stream_.mode == DUPLEX ) { f@0: if ( safeReadPointer < endRead ) { f@0: if ( duplexPrerollBytes <= 0 ) { f@0: // Pre-roll time over. Be more agressive. f@0: int adjustment = endRead-safeReadPointer; f@0: f@0: handle->xrun[1] = true; f@0: // Two cases: f@0: // - large adjustments: we've probably run out of CPU cycles, so just resync exactly, f@0: // and perform fine adjustments later. f@0: // - small adjustments: back off by twice as much. f@0: if ( adjustment >= 2*bufferBytes ) f@0: nextReadPointer = safeReadPointer-2*bufferBytes; f@0: else f@0: nextReadPointer = safeReadPointer-bufferBytes-adjustment; f@0: f@0: if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize; f@0: f@0: } f@0: else { f@0: // In pre=roll time. Just do it. f@0: nextReadPointer = safeReadPointer - bufferBytes; f@0: while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize; f@0: } f@0: endRead = nextReadPointer + bufferBytes; f@0: } f@0: } f@0: else { // mode == INPUT f@0: while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) { f@0: // See comments for playback. f@0: double millis = (endRead - safeReadPointer) * 1000.0; f@0: millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate); f@0: if ( millis < 1.0 ) millis = 1.0; f@0: Sleep( (DWORD) millis ); f@0: f@0: // Wake up and find out where we are now. f@0: result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: f@0: if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset f@0: } f@0: } f@0: f@0: // Lock free space in the buffer f@0: result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1, f@0: &bufferSize1, &buffer2, &bufferSize2, 0 ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: f@0: if ( duplexPrerollBytes <= 0 ) { f@0: // Copy our buffer into the DS buffer f@0: CopyMemory( buffer, buffer1, bufferSize1 ); f@0: if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 ); f@0: } f@0: else { f@0: memset( buffer, 0, bufferSize1 ); f@0: if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 ); f@0: duplexPrerollBytes -= bufferSize1 + bufferSize2; f@0: } f@0: f@0: // Update our buffer offset and unlock sound buffer f@0: nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize; f@0: dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 ); f@0: if ( FAILED( result ) ) { f@0: errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!"; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: handle->bufferPointer[1] = nextReadPointer; f@0: f@0: // No byte swapping necessary in DirectSound implementation. f@0: f@0: // If necessary, convert 8-bit data from unsigned to signed. f@0: if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 ) f@0: for ( int j=0; jobject; f@0: bool* isRunning = &info->isRunning; f@0: f@0: while ( *isRunning == true ) { f@0: object->callbackEvent(); f@0: } f@0: f@0: _endthreadex( 0 ); f@0: return 0; f@0: } f@0: f@0: #include "tchar.h" f@0: f@0: static std::string convertTChar( LPCTSTR name ) f@0: { f@0: #if defined( UNICODE ) || defined( _UNICODE ) f@0: int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL); f@0: std::string s( length-1, '\0' ); f@0: WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL); f@0: #else f@0: std::string s( name ); f@0: #endif f@0: f@0: return s; f@0: } f@0: f@0: static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid, f@0: LPCTSTR description, f@0: LPCTSTR /*module*/, f@0: LPVOID lpContext ) f@0: { f@0: struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext; f@0: std::vector& dsDevices = *probeInfo.dsDevices; f@0: f@0: HRESULT hr; f@0: bool validDevice = false; f@0: if ( probeInfo.isInput == true ) { f@0: DSCCAPS caps; f@0: LPDIRECTSOUNDCAPTURE object; f@0: f@0: hr = DirectSoundCaptureCreate( lpguid, &object, NULL ); f@0: if ( hr != DS_OK ) return TRUE; f@0: f@0: caps.dwSize = sizeof(caps); f@0: hr = object->GetCaps( &caps ); f@0: if ( hr == DS_OK ) { f@0: if ( caps.dwChannels > 0 && caps.dwFormats > 0 ) f@0: validDevice = true; f@0: } f@0: object->Release(); f@0: } f@0: else { f@0: DSCAPS caps; f@0: LPDIRECTSOUND object; f@0: hr = DirectSoundCreate( lpguid, &object, NULL ); f@0: if ( hr != DS_OK ) return TRUE; f@0: f@0: caps.dwSize = sizeof(caps); f@0: hr = object->GetCaps( &caps ); f@0: if ( hr == DS_OK ) { f@0: if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO ) f@0: validDevice = true; f@0: } f@0: object->Release(); f@0: } f@0: f@0: // If good device, then save its name and guid. f@0: std::string name = convertTChar( description ); f@0: //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" ) f@0: if ( lpguid == NULL ) f@0: name = "Default Device"; f@0: if ( validDevice ) { f@0: for ( unsigned int i=0; i f@0: #include f@0: f@0: // A structure to hold various information related to the ALSA API f@0: // implementation. f@0: struct AlsaHandle { f@0: snd_pcm_t *handles[2]; f@0: bool synchronized; f@0: bool xrun[2]; f@0: pthread_cond_t runnable_cv; f@0: bool runnable; f@0: f@0: AlsaHandle() f@0: :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; } f@0: }; f@0: f@0: static void *alsaCallbackHandler( void * ptr ); f@0: f@0: RtApiAlsa :: RtApiAlsa() f@0: { f@0: // Nothing to do here. f@0: } f@0: f@0: RtApiAlsa :: ~RtApiAlsa() f@0: { f@0: if ( stream_.state != STREAM_CLOSED ) closeStream(); f@0: } f@0: f@0: unsigned int RtApiAlsa :: getDeviceCount( void ) f@0: { f@0: unsigned nDevices = 0; f@0: int result, subdevice, card; f@0: char name[64]; f@0: snd_ctl_t *handle; f@0: f@0: // Count cards and devices f@0: card = -1; f@0: snd_card_next( &card ); f@0: while ( card >= 0 ) { f@0: sprintf( name, "hw:%d", card ); f@0: result = snd_ctl_open( &handle, name, 0 ); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: goto nextcard; f@0: } f@0: subdevice = -1; f@0: while( 1 ) { f@0: result = snd_ctl_pcm_next_device( handle, &subdevice ); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: break; f@0: } f@0: if ( subdevice < 0 ) f@0: break; f@0: nDevices++; f@0: } f@0: nextcard: f@0: snd_ctl_close( handle ); f@0: snd_card_next( &card ); f@0: } f@0: f@0: result = snd_ctl_open( &handle, "default", 0 ); f@0: if (result == 0) { f@0: nDevices++; f@0: snd_ctl_close( handle ); f@0: } f@0: f@0: return nDevices; f@0: } f@0: f@0: RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device ) f@0: { f@0: RtAudio::DeviceInfo info; f@0: info.probed = false; f@0: f@0: unsigned nDevices = 0; f@0: int result, subdevice, card; f@0: char name[64]; f@0: snd_ctl_t *chandle; f@0: f@0: // Count cards and devices f@0: card = -1; f@0: snd_card_next( &card ); f@0: while ( card >= 0 ) { f@0: sprintf( name, "hw:%d", card ); f@0: result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK ); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: goto nextcard; f@0: } f@0: subdevice = -1; f@0: while( 1 ) { f@0: result = snd_ctl_pcm_next_device( chandle, &subdevice ); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: break; f@0: } f@0: if ( subdevice < 0 ) break; f@0: if ( nDevices == device ) { f@0: sprintf( name, "hw:%d,%d", card, subdevice ); f@0: goto foundDevice; f@0: } f@0: nDevices++; f@0: } f@0: nextcard: f@0: snd_ctl_close( chandle ); f@0: snd_card_next( &card ); f@0: } f@0: f@0: result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK ); f@0: if ( result == 0 ) { f@0: if ( nDevices == device ) { f@0: strcpy( name, "default" ); f@0: goto foundDevice; f@0: } f@0: nDevices++; f@0: } f@0: f@0: if ( nDevices == 0 ) { f@0: errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return info; f@0: } f@0: f@0: if ( device >= nDevices ) { f@0: errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return info; f@0: } f@0: f@0: foundDevice: f@0: f@0: // If a stream is already open, we cannot probe the stream devices. f@0: // Thus, use the saved results. f@0: if ( stream_.state != STREAM_CLOSED && f@0: ( stream_.device[0] == device || stream_.device[1] == device ) ) { f@0: snd_ctl_close( chandle ); f@0: if ( device >= devices_.size() ) { f@0: errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened."; f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: return devices_[ device ]; f@0: } f@0: f@0: int openMode = SND_PCM_ASYNC; f@0: snd_pcm_stream_t stream; f@0: snd_pcm_info_t *pcminfo; f@0: snd_pcm_info_alloca( &pcminfo ); f@0: snd_pcm_t *phandle; f@0: snd_pcm_hw_params_t *params; f@0: snd_pcm_hw_params_alloca( ¶ms ); f@0: f@0: // First try for playback unless default device (which has subdev -1) f@0: stream = SND_PCM_STREAM_PLAYBACK; f@0: snd_pcm_info_set_stream( pcminfo, stream ); f@0: if ( subdevice != -1 ) { f@0: snd_pcm_info_set_device( pcminfo, subdevice ); f@0: snd_pcm_info_set_subdevice( pcminfo, 0 ); f@0: f@0: result = snd_ctl_pcm_info( chandle, pcminfo ); f@0: if ( result < 0 ) { f@0: // Device probably doesn't support playback. f@0: goto captureProbe; f@0: } f@0: } f@0: f@0: result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK ); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: goto captureProbe; f@0: } f@0: f@0: // The device is open ... fill the parameter structure. f@0: result = snd_pcm_hw_params_any( phandle, params ); f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: goto captureProbe; f@0: } f@0: f@0: // Get output channel information. f@0: unsigned int value; f@0: result = snd_pcm_hw_params_get_channels_max( params, &value ); f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: goto captureProbe; f@0: } f@0: info.outputChannels = value; f@0: snd_pcm_close( phandle ); f@0: f@0: captureProbe: f@0: stream = SND_PCM_STREAM_CAPTURE; f@0: snd_pcm_info_set_stream( pcminfo, stream ); f@0: f@0: // Now try for capture unless default device (with subdev = -1) f@0: if ( subdevice != -1 ) { f@0: result = snd_ctl_pcm_info( chandle, pcminfo ); f@0: snd_ctl_close( chandle ); f@0: if ( result < 0 ) { f@0: // Device probably doesn't support capture. f@0: if ( info.outputChannels == 0 ) return info; f@0: goto probeParameters; f@0: } f@0: } f@0: else f@0: snd_ctl_close( chandle ); f@0: f@0: result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: if ( info.outputChannels == 0 ) return info; f@0: goto probeParameters; f@0: } f@0: f@0: // The device is open ... fill the parameter structure. f@0: result = snd_pcm_hw_params_any( phandle, params ); f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: if ( info.outputChannels == 0 ) return info; f@0: goto probeParameters; f@0: } f@0: f@0: result = snd_pcm_hw_params_get_channels_max( params, &value ); f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: if ( info.outputChannels == 0 ) return info; f@0: goto probeParameters; f@0: } f@0: info.inputChannels = value; f@0: snd_pcm_close( phandle ); f@0: f@0: // If device opens for both playback and capture, we determine the channels. f@0: if ( info.outputChannels > 0 && info.inputChannels > 0 ) f@0: info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels; f@0: f@0: // ALSA doesn't provide default devices so we'll use the first available one. f@0: if ( device == 0 && info.outputChannels > 0 ) f@0: info.isDefaultOutput = true; f@0: if ( device == 0 && info.inputChannels > 0 ) f@0: info.isDefaultInput = true; f@0: f@0: probeParameters: f@0: // At this point, we just need to figure out the supported data f@0: // formats and sample rates. We'll proceed by opening the device in f@0: // the direction with the maximum number of channels, or playback if f@0: // they are equal. This might limit our sample rate options, but so f@0: // be it. f@0: f@0: if ( info.outputChannels >= info.inputChannels ) f@0: stream = SND_PCM_STREAM_PLAYBACK; f@0: else f@0: stream = SND_PCM_STREAM_CAPTURE; f@0: snd_pcm_info_set_stream( pcminfo, stream ); f@0: f@0: result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: // The device is open ... fill the parameter structure. f@0: result = snd_pcm_hw_params_any( phandle, params ); f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: // Test our discrete set of sample rate values. f@0: info.sampleRates.clear(); f@0: for ( unsigned int i=0; i= 0 ) { f@0: sprintf( name, "hw:%s,%d", cardname, subdevice ); f@0: free( cardname ); f@0: } f@0: info.name = name; f@0: f@0: // That's all ... close the device and return f@0: snd_pcm_close( phandle ); f@0: info.probed = true; f@0: return info; f@0: } f@0: f@0: void RtApiAlsa :: saveDeviceInfo( void ) f@0: { f@0: devices_.clear(); f@0: f@0: unsigned int nDevices = getDeviceCount(); f@0: devices_.resize( nDevices ); f@0: for ( unsigned int i=0; iflags & RTAUDIO_ALSA_USE_DEFAULT ) f@0: snprintf(name, sizeof(name), "%s", "default"); f@0: else { f@0: // Count cards and devices f@0: card = -1; f@0: snd_card_next( &card ); f@0: while ( card >= 0 ) { f@0: sprintf( name, "hw:%d", card ); f@0: result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK ); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: subdevice = -1; f@0: while( 1 ) { f@0: result = snd_ctl_pcm_next_device( chandle, &subdevice ); f@0: if ( result < 0 ) break; f@0: if ( subdevice < 0 ) break; f@0: if ( nDevices == device ) { f@0: sprintf( name, "hw:%d,%d", card, subdevice ); f@0: snd_ctl_close( chandle ); f@0: goto foundDevice; f@0: } f@0: nDevices++; f@0: } f@0: snd_ctl_close( chandle ); f@0: snd_card_next( &card ); f@0: } f@0: f@0: result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK ); f@0: if ( result == 0 ) { f@0: if ( nDevices == device ) { f@0: strcpy( name, "default" ); f@0: goto foundDevice; f@0: } f@0: nDevices++; f@0: } f@0: f@0: if ( nDevices == 0 ) { f@0: // This should not happen because a check is made before this function is called. f@0: errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!"; f@0: return FAILURE; f@0: } f@0: f@0: if ( device >= nDevices ) { f@0: // This should not happen because a check is made before this function is called. f@0: errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!"; f@0: return FAILURE; f@0: } f@0: } f@0: f@0: foundDevice: f@0: f@0: // The getDeviceInfo() function will not work for a device that is f@0: // already open. Thus, we'll probe the system before opening a f@0: // stream and save the results for use by getDeviceInfo(). f@0: if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once f@0: this->saveDeviceInfo(); f@0: f@0: snd_pcm_stream_t stream; f@0: if ( mode == OUTPUT ) f@0: stream = SND_PCM_STREAM_PLAYBACK; f@0: else f@0: stream = SND_PCM_STREAM_CAPTURE; f@0: f@0: snd_pcm_t *phandle; f@0: int openMode = SND_PCM_ASYNC; f@0: result = snd_pcm_open( &phandle, name, stream, openMode ); f@0: if ( result < 0 ) { f@0: if ( mode == OUTPUT ) f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output."; f@0: else f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Fill the parameter structure. f@0: snd_pcm_hw_params_t *hw_params; f@0: snd_pcm_hw_params_alloca( &hw_params ); f@0: result = snd_pcm_hw_params_any( phandle, hw_params ); f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: #if defined(__RTAUDIO_DEBUG__) f@0: fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" ); f@0: snd_pcm_hw_params_dump( hw_params, out ); f@0: #endif f@0: f@0: // Set access ... check user preference. f@0: if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) { f@0: stream_.userInterleaved = false; f@0: result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED ); f@0: if ( result < 0 ) { f@0: result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED ); f@0: stream_.deviceInterleaved[mode] = true; f@0: } f@0: else f@0: stream_.deviceInterleaved[mode] = false; f@0: } f@0: else { f@0: stream_.userInterleaved = true; f@0: result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED ); f@0: if ( result < 0 ) { f@0: result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED ); f@0: stream_.deviceInterleaved[mode] = false; f@0: } f@0: else f@0: stream_.deviceInterleaved[mode] = true; f@0: } f@0: f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Determine how to set the device format. f@0: stream_.userFormat = format; f@0: snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN; f@0: f@0: if ( format == RTAUDIO_SINT8 ) f@0: deviceFormat = SND_PCM_FORMAT_S8; f@0: else if ( format == RTAUDIO_SINT16 ) f@0: deviceFormat = SND_PCM_FORMAT_S16; f@0: else if ( format == RTAUDIO_SINT24 ) f@0: deviceFormat = SND_PCM_FORMAT_S24; f@0: else if ( format == RTAUDIO_SINT32 ) f@0: deviceFormat = SND_PCM_FORMAT_S32; f@0: else if ( format == RTAUDIO_FLOAT32 ) f@0: deviceFormat = SND_PCM_FORMAT_FLOAT; f@0: else if ( format == RTAUDIO_FLOAT64 ) f@0: deviceFormat = SND_PCM_FORMAT_FLOAT64; f@0: f@0: if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) { f@0: stream_.deviceFormat[mode] = format; f@0: goto setFormat; f@0: } f@0: f@0: // The user requested format is not natively supported by the device. f@0: deviceFormat = SND_PCM_FORMAT_FLOAT64; f@0: if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) { f@0: stream_.deviceFormat[mode] = RTAUDIO_FLOAT64; f@0: goto setFormat; f@0: } f@0: f@0: deviceFormat = SND_PCM_FORMAT_FLOAT; f@0: if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) { f@0: stream_.deviceFormat[mode] = RTAUDIO_FLOAT32; f@0: goto setFormat; f@0: } f@0: f@0: deviceFormat = SND_PCM_FORMAT_S32; f@0: if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) { f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT32; f@0: goto setFormat; f@0: } f@0: f@0: deviceFormat = SND_PCM_FORMAT_S24; f@0: if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) { f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT24; f@0: goto setFormat; f@0: } f@0: f@0: deviceFormat = SND_PCM_FORMAT_S16; f@0: if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) { f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT16; f@0: goto setFormat; f@0: } f@0: f@0: deviceFormat = SND_PCM_FORMAT_S8; f@0: if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) { f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT8; f@0: goto setFormat; f@0: } f@0: f@0: // If we get here, no supported format was found. f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: f@0: setFormat: f@0: result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat ); f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Determine whether byte-swaping is necessary. f@0: stream_.doByteSwap[mode] = false; f@0: if ( deviceFormat != SND_PCM_FORMAT_S8 ) { f@0: result = snd_pcm_format_cpu_endian( deviceFormat ); f@0: if ( result == 0 ) f@0: stream_.doByteSwap[mode] = true; f@0: else if (result < 0) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: } f@0: f@0: // Set the sample rate. f@0: result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 ); f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Determine the number of channels for this device. We support a possible f@0: // minimum device channel number > than the value requested by the user. f@0: stream_.nUserChannels[mode] = channels; f@0: unsigned int value; f@0: result = snd_pcm_hw_params_get_channels_max( hw_params, &value ); f@0: unsigned int deviceChannels = value; f@0: if ( result < 0 || deviceChannels < channels + firstChannel ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: result = snd_pcm_hw_params_get_channels_min( hw_params, &value ); f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: deviceChannels = value; f@0: if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel; f@0: stream_.nDeviceChannels[mode] = deviceChannels; f@0: f@0: // Set the device channels. f@0: result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels ); f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Set the buffer (or period) size. f@0: int dir = 0; f@0: snd_pcm_uframes_t periodSize = *bufferSize; f@0: result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir ); f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: *bufferSize = periodSize; f@0: f@0: // Set the buffer number, which in ALSA is referred to as the "period". f@0: unsigned int periods = 0; f@0: if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2; f@0: if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers; f@0: if ( periods < 2 ) periods = 4; // a fairly safe default value f@0: result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir ); f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // If attempting to setup a duplex stream, the bufferSize parameter f@0: // MUST be the same in both directions! f@0: if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: stream_.bufferSize = *bufferSize; f@0: f@0: // Install the hardware configuration f@0: result = snd_pcm_hw_params( phandle, hw_params ); f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: #if defined(__RTAUDIO_DEBUG__) f@0: fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n"); f@0: snd_pcm_hw_params_dump( hw_params, out ); f@0: #endif f@0: f@0: // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns. f@0: snd_pcm_sw_params_t *sw_params = NULL; f@0: snd_pcm_sw_params_alloca( &sw_params ); f@0: snd_pcm_sw_params_current( phandle, sw_params ); f@0: snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize ); f@0: snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX ); f@0: snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 ); f@0: f@0: // The following two settings were suggested by Theo Veenker f@0: //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize ); f@0: //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 ); f@0: f@0: // here are two options for a fix f@0: //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX ); f@0: snd_pcm_uframes_t val; f@0: snd_pcm_sw_params_get_boundary( sw_params, &val ); f@0: snd_pcm_sw_params_set_silence_size( phandle, sw_params, val ); f@0: f@0: result = snd_pcm_sw_params( phandle, sw_params ); f@0: if ( result < 0 ) { f@0: snd_pcm_close( phandle ); f@0: errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: #if defined(__RTAUDIO_DEBUG__) f@0: fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n"); f@0: snd_pcm_sw_params_dump( sw_params, out ); f@0: #endif f@0: f@0: // Set flags for buffer conversion f@0: stream_.doConvertBuffer[mode] = false; f@0: if ( stream_.userFormat != stream_.deviceFormat[mode] ) f@0: stream_.doConvertBuffer[mode] = true; f@0: if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] ) f@0: stream_.doConvertBuffer[mode] = true; f@0: if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] && f@0: stream_.nUserChannels[mode] > 1 ) f@0: stream_.doConvertBuffer[mode] = true; f@0: f@0: // Allocate the ApiHandle if necessary and then save. f@0: AlsaHandle *apiInfo = 0; f@0: if ( stream_.apiHandle == 0 ) { f@0: try { f@0: apiInfo = (AlsaHandle *) new AlsaHandle; f@0: } f@0: catch ( std::bad_alloc& ) { f@0: errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory."; f@0: goto error; f@0: } f@0: f@0: if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) { f@0: errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable."; f@0: goto error; f@0: } f@0: f@0: stream_.apiHandle = (void *) apiInfo; f@0: apiInfo->handles[0] = 0; f@0: apiInfo->handles[1] = 0; f@0: } f@0: else { f@0: apiInfo = (AlsaHandle *) stream_.apiHandle; f@0: } f@0: apiInfo->handles[mode] = phandle; f@0: phandle = 0; f@0: f@0: // Allocate necessary internal buffers. f@0: unsigned long bufferBytes; f@0: bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat ); f@0: stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 ); f@0: if ( stream_.userBuffer[mode] == NULL ) { f@0: errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory."; f@0: goto error; f@0: } f@0: f@0: if ( stream_.doConvertBuffer[mode] ) { f@0: f@0: bool makeBuffer = true; f@0: bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] ); f@0: if ( mode == INPUT ) { f@0: if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) { f@0: unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] ); f@0: if ( bufferBytes <= bytesOut ) makeBuffer = false; f@0: } f@0: } f@0: f@0: if ( makeBuffer ) { f@0: bufferBytes *= *bufferSize; f@0: if ( stream_.deviceBuffer ) free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 ); f@0: if ( stream_.deviceBuffer == NULL ) { f@0: errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory."; f@0: goto error; f@0: } f@0: } f@0: } f@0: f@0: stream_.sampleRate = sampleRate; f@0: stream_.nBuffers = periods; f@0: stream_.device[mode] = device; f@0: stream_.state = STREAM_STOPPED; f@0: f@0: // Setup the buffer conversion information structure. f@0: if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel ); f@0: f@0: // Setup thread if necessary. f@0: if ( stream_.mode == OUTPUT && mode == INPUT ) { f@0: // We had already set up an output stream. f@0: stream_.mode = DUPLEX; f@0: // Link the streams if possible. f@0: apiInfo->synchronized = false; f@0: if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 ) f@0: apiInfo->synchronized = true; f@0: else { f@0: errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices."; f@0: error( RtAudioError::WARNING ); f@0: } f@0: } f@0: else { f@0: stream_.mode = mode; f@0: f@0: // Setup callback thread. f@0: stream_.callbackInfo.object = (void *) this; f@0: f@0: // Set the thread attributes for joinable and realtime scheduling f@0: // priority (optional). The higher priority will only take affect f@0: // if the program is run as root or suid. Note, under Linux f@0: // processes with CAP_SYS_NICE privilege, a user can change f@0: // scheduling policy and priority (thus need not be root). See f@0: // POSIX "capabilities". f@0: pthread_attr_t attr; f@0: pthread_attr_init( &attr ); f@0: pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE ); f@0: f@0: #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread) f@0: if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) { f@0: // We previously attempted to increase the audio callback priority f@0: // to SCHED_RR here via the attributes. However, while no errors f@0: // were reported in doing so, it did not work. So, now this is f@0: // done in the alsaCallbackHandler function. f@0: stream_.callbackInfo.doRealtime = true; f@0: int priority = options->priority; f@0: int min = sched_get_priority_min( SCHED_RR ); f@0: int max = sched_get_priority_max( SCHED_RR ); f@0: if ( priority < min ) priority = min; f@0: else if ( priority > max ) priority = max; f@0: stream_.callbackInfo.priority = priority; f@0: } f@0: #endif f@0: f@0: stream_.callbackInfo.isRunning = true; f@0: result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo ); f@0: pthread_attr_destroy( &attr ); f@0: if ( result ) { f@0: stream_.callbackInfo.isRunning = false; f@0: errorText_ = "RtApiAlsa::error creating callback thread!"; f@0: goto error; f@0: } f@0: } f@0: f@0: return SUCCESS; f@0: f@0: error: f@0: if ( apiInfo ) { f@0: pthread_cond_destroy( &apiInfo->runnable_cv ); f@0: if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] ); f@0: if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] ); f@0: delete apiInfo; f@0: stream_.apiHandle = 0; f@0: } f@0: f@0: if ( phandle) snd_pcm_close( phandle ); f@0: f@0: for ( int i=0; i<2; i++ ) { f@0: if ( stream_.userBuffer[i] ) { f@0: free( stream_.userBuffer[i] ); f@0: stream_.userBuffer[i] = 0; f@0: } f@0: } f@0: f@0: if ( stream_.deviceBuffer ) { f@0: free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = 0; f@0: } f@0: f@0: stream_.state = STREAM_CLOSED; f@0: return FAILURE; f@0: } f@0: f@0: void RtApiAlsa :: closeStream() f@0: { f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiAlsa::closeStream(): no open stream to close!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle; f@0: stream_.callbackInfo.isRunning = false; f@0: MUTEX_LOCK( &stream_.mutex ); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: apiInfo->runnable = true; f@0: pthread_cond_signal( &apiInfo->runnable_cv ); f@0: } f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: pthread_join( stream_.callbackInfo.thread, NULL ); f@0: f@0: if ( stream_.state == STREAM_RUNNING ) { f@0: stream_.state = STREAM_STOPPED; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) f@0: snd_pcm_drop( apiInfo->handles[0] ); f@0: if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) f@0: snd_pcm_drop( apiInfo->handles[1] ); f@0: } f@0: f@0: if ( apiInfo ) { f@0: pthread_cond_destroy( &apiInfo->runnable_cv ); f@0: if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] ); f@0: if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] ); f@0: delete apiInfo; f@0: stream_.apiHandle = 0; f@0: } f@0: f@0: for ( int i=0; i<2; i++ ) { f@0: if ( stream_.userBuffer[i] ) { f@0: free( stream_.userBuffer[i] ); f@0: stream_.userBuffer[i] = 0; f@0: } f@0: } f@0: f@0: if ( stream_.deviceBuffer ) { f@0: free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = 0; f@0: } f@0: f@0: stream_.mode = UNINITIALIZED; f@0: stream_.state = STREAM_CLOSED; f@0: } f@0: f@0: void RtApiAlsa :: startStream() f@0: { f@0: // This method calls snd_pcm_prepare if the device isn't already in that state. f@0: f@0: verifyStream(); f@0: if ( stream_.state == STREAM_RUNNING ) { f@0: errorText_ = "RtApiAlsa::startStream(): the stream is already running!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: MUTEX_LOCK( &stream_.mutex ); f@0: f@0: int result = 0; f@0: snd_pcm_state_t state; f@0: AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle; f@0: snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: state = snd_pcm_state( handle[0] ); f@0: if ( state != SND_PCM_STATE_PREPARED ) { f@0: result = snd_pcm_prepare( handle[0] ); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: } f@0: } f@0: f@0: if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) { f@0: result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open f@0: state = snd_pcm_state( handle[1] ); f@0: if ( state != SND_PCM_STATE_PREPARED ) { f@0: result = snd_pcm_prepare( handle[1] ); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: } f@0: } f@0: f@0: stream_.state = STREAM_RUNNING; f@0: f@0: unlock: f@0: apiInfo->runnable = true; f@0: pthread_cond_signal( &apiInfo->runnable_cv ); f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: f@0: if ( result >= 0 ) return; f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: } f@0: f@0: void RtApiAlsa :: stopStream() f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: stream_.state = STREAM_STOPPED; f@0: MUTEX_LOCK( &stream_.mutex ); f@0: f@0: int result = 0; f@0: AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle; f@0: snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: if ( apiInfo->synchronized ) f@0: result = snd_pcm_drop( handle[0] ); f@0: else f@0: result = snd_pcm_drain( handle[0] ); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: } f@0: f@0: if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) { f@0: result = snd_pcm_drop( handle[1] ); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: } f@0: f@0: unlock: f@0: apiInfo->runnable = false; // fixes high CPU usage when stopped f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: f@0: if ( result >= 0 ) return; f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: } f@0: f@0: void RtApiAlsa :: abortStream() f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: stream_.state = STREAM_STOPPED; f@0: MUTEX_LOCK( &stream_.mutex ); f@0: f@0: int result = 0; f@0: AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle; f@0: snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: result = snd_pcm_drop( handle[0] ); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: } f@0: f@0: if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) { f@0: result = snd_pcm_drop( handle[1] ); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: } f@0: f@0: unlock: f@0: apiInfo->runnable = false; // fixes high CPU usage when stopped f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: f@0: if ( result >= 0 ) return; f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: } f@0: f@0: void RtApiAlsa :: callbackEvent() f@0: { f@0: AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle; f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: MUTEX_LOCK( &stream_.mutex ); f@0: while ( !apiInfo->runnable ) f@0: pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex ); f@0: f@0: if ( stream_.state != STREAM_RUNNING ) { f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: return; f@0: } f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: } f@0: f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: int doStopStream = 0; f@0: RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback; f@0: double streamTime = getStreamTime(); f@0: RtAudioStreamStatus status = 0; f@0: if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) { f@0: status |= RTAUDIO_OUTPUT_UNDERFLOW; f@0: apiInfo->xrun[0] = false; f@0: } f@0: if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) { f@0: status |= RTAUDIO_INPUT_OVERFLOW; f@0: apiInfo->xrun[1] = false; f@0: } f@0: doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1], f@0: stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData ); f@0: f@0: if ( doStopStream == 2 ) { f@0: abortStream(); f@0: return; f@0: } f@0: f@0: MUTEX_LOCK( &stream_.mutex ); f@0: f@0: // The state might change while waiting on a mutex. f@0: if ( stream_.state == STREAM_STOPPED ) goto unlock; f@0: f@0: int result; f@0: char *buffer; f@0: int channels; f@0: snd_pcm_t **handle; f@0: snd_pcm_sframes_t frames; f@0: RtAudioFormat format; f@0: handle = (snd_pcm_t **) apiInfo->handles; f@0: f@0: if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) { f@0: f@0: // Setup parameters. f@0: if ( stream_.doConvertBuffer[1] ) { f@0: buffer = stream_.deviceBuffer; f@0: channels = stream_.nDeviceChannels[1]; f@0: format = stream_.deviceFormat[1]; f@0: } f@0: else { f@0: buffer = stream_.userBuffer[1]; f@0: channels = stream_.nUserChannels[1]; f@0: format = stream_.userFormat; f@0: } f@0: f@0: // Read samples from device in interleaved/non-interleaved format. f@0: if ( stream_.deviceInterleaved[1] ) f@0: result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize ); f@0: else { f@0: void *bufs[channels]; f@0: size_t offset = stream_.bufferSize * formatBytes( format ); f@0: for ( int i=0; ixrun[1] = true; f@0: result = snd_pcm_prepare( handle[1] ); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: } f@0: } f@0: else { f@0: errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: } f@0: } f@0: else { f@0: errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: } f@0: error( RtAudioError::WARNING ); f@0: goto tryOutput; f@0: } f@0: f@0: // Do byte swapping if necessary. f@0: if ( stream_.doByteSwap[1] ) f@0: byteSwapBuffer( buffer, stream_.bufferSize * channels, format ); f@0: f@0: // Do buffer conversion if necessary. f@0: if ( stream_.doConvertBuffer[1] ) f@0: convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] ); f@0: f@0: // Check stream latency f@0: result = snd_pcm_delay( handle[1], &frames ); f@0: if ( result == 0 && frames > 0 ) stream_.latency[1] = frames; f@0: } f@0: f@0: tryOutput: f@0: f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: f@0: // Setup parameters and do buffer conversion if necessary. f@0: if ( stream_.doConvertBuffer[0] ) { f@0: buffer = stream_.deviceBuffer; f@0: convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] ); f@0: channels = stream_.nDeviceChannels[0]; f@0: format = stream_.deviceFormat[0]; f@0: } f@0: else { f@0: buffer = stream_.userBuffer[0]; f@0: channels = stream_.nUserChannels[0]; f@0: format = stream_.userFormat; f@0: } f@0: f@0: // Do byte swapping if necessary. f@0: if ( stream_.doByteSwap[0] ) f@0: byteSwapBuffer(buffer, stream_.bufferSize * channels, format); f@0: f@0: // Write samples to device in interleaved/non-interleaved format. f@0: if ( stream_.deviceInterleaved[0] ) f@0: result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize ); f@0: else { f@0: void *bufs[channels]; f@0: size_t offset = stream_.bufferSize * formatBytes( format ); f@0: for ( int i=0; ixrun[0] = true; f@0: result = snd_pcm_prepare( handle[0] ); f@0: if ( result < 0 ) { f@0: errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: } f@0: } f@0: else { f@0: errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: } f@0: } f@0: else { f@0: errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << "."; f@0: errorText_ = errorStream_.str(); f@0: } f@0: error( RtAudioError::WARNING ); f@0: goto unlock; f@0: } f@0: f@0: // Check stream latency f@0: result = snd_pcm_delay( handle[0], &frames ); f@0: if ( result == 0 && frames > 0 ) stream_.latency[0] = frames; f@0: } f@0: f@0: unlock: f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: f@0: RtApi::tickStreamTime(); f@0: if ( doStopStream == 1 ) this->stopStream(); f@0: } f@0: f@0: static void *alsaCallbackHandler( void *ptr ) f@0: { f@0: CallbackInfo *info = (CallbackInfo *) ptr; f@0: RtApiAlsa *object = (RtApiAlsa *) info->object; f@0: bool *isRunning = &info->isRunning; f@0: f@0: #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread) f@0: if ( &info->doRealtime ) { f@0: pthread_t tID = pthread_self(); // ID of this thread f@0: sched_param prio = { info->priority }; // scheduling priority of thread f@0: pthread_setschedparam( tID, SCHED_RR, &prio ); f@0: } f@0: #endif f@0: f@0: while ( *isRunning == true ) { f@0: pthread_testcancel(); f@0: object->callbackEvent(); f@0: } f@0: f@0: pthread_exit( NULL ); f@0: } f@0: f@0: //******************** End of __LINUX_ALSA__ *********************// f@0: #endif f@0: f@0: #if defined(__LINUX_PULSE__) f@0: f@0: // Code written by Peter Meerwald, pmeerw@pmeerw.net f@0: // and Tristan Matthews. f@0: f@0: #include f@0: #include f@0: #include f@0: f@0: static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000, f@0: 44100, 48000, 96000, 0}; f@0: f@0: struct rtaudio_pa_format_mapping_t { f@0: RtAudioFormat rtaudio_format; f@0: pa_sample_format_t pa_format; f@0: }; f@0: f@0: static const rtaudio_pa_format_mapping_t supported_sampleformats[] = { f@0: {RTAUDIO_SINT16, PA_SAMPLE_S16LE}, f@0: {RTAUDIO_SINT32, PA_SAMPLE_S32LE}, f@0: {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE}, f@0: {0, PA_SAMPLE_INVALID}}; f@0: f@0: struct PulseAudioHandle { f@0: pa_simple *s_play; f@0: pa_simple *s_rec; f@0: pthread_t thread; f@0: pthread_cond_t runnable_cv; f@0: bool runnable; f@0: PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { } f@0: }; f@0: f@0: RtApiPulse::~RtApiPulse() f@0: { f@0: if ( stream_.state != STREAM_CLOSED ) f@0: closeStream(); f@0: } f@0: f@0: unsigned int RtApiPulse::getDeviceCount( void ) f@0: { f@0: return 1; f@0: } f@0: f@0: RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ ) f@0: { f@0: RtAudio::DeviceInfo info; f@0: info.probed = true; f@0: info.name = "PulseAudio"; f@0: info.outputChannels = 2; f@0: info.inputChannels = 2; f@0: info.duplexChannels = 2; f@0: info.isDefaultOutput = true; f@0: info.isDefaultInput = true; f@0: f@0: for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) f@0: info.sampleRates.push_back( *sr ); f@0: f@0: info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32; f@0: f@0: return info; f@0: } f@0: f@0: static void *pulseaudio_callback( void * user ) f@0: { f@0: CallbackInfo *cbi = static_cast( user ); f@0: RtApiPulse *context = static_cast( cbi->object ); f@0: volatile bool *isRunning = &cbi->isRunning; f@0: f@0: while ( *isRunning ) { f@0: pthread_testcancel(); f@0: context->callbackEvent(); f@0: } f@0: f@0: pthread_exit( NULL ); f@0: } f@0: f@0: void RtApiPulse::closeStream( void ) f@0: { f@0: PulseAudioHandle *pah = static_cast( stream_.apiHandle ); f@0: f@0: stream_.callbackInfo.isRunning = false; f@0: if ( pah ) { f@0: MUTEX_LOCK( &stream_.mutex ); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: pah->runnable = true; f@0: pthread_cond_signal( &pah->runnable_cv ); f@0: } f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: f@0: pthread_join( pah->thread, 0 ); f@0: if ( pah->s_play ) { f@0: pa_simple_flush( pah->s_play, NULL ); f@0: pa_simple_free( pah->s_play ); f@0: } f@0: if ( pah->s_rec ) f@0: pa_simple_free( pah->s_rec ); f@0: f@0: pthread_cond_destroy( &pah->runnable_cv ); f@0: delete pah; f@0: stream_.apiHandle = 0; f@0: } f@0: f@0: if ( stream_.userBuffer[0] ) { f@0: free( stream_.userBuffer[0] ); f@0: stream_.userBuffer[0] = 0; f@0: } f@0: if ( stream_.userBuffer[1] ) { f@0: free( stream_.userBuffer[1] ); f@0: stream_.userBuffer[1] = 0; f@0: } f@0: f@0: stream_.state = STREAM_CLOSED; f@0: stream_.mode = UNINITIALIZED; f@0: } f@0: f@0: void RtApiPulse::callbackEvent( void ) f@0: { f@0: PulseAudioHandle *pah = static_cast( stream_.apiHandle ); f@0: f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: MUTEX_LOCK( &stream_.mutex ); f@0: while ( !pah->runnable ) f@0: pthread_cond_wait( &pah->runnable_cv, &stream_.mutex ); f@0: f@0: if ( stream_.state != STREAM_RUNNING ) { f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: return; f@0: } f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: } f@0: f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... " f@0: "this shouldn't happen!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback; f@0: double streamTime = getStreamTime(); f@0: RtAudioStreamStatus status = 0; f@0: int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT], f@0: stream_.bufferSize, streamTime, status, f@0: stream_.callbackInfo.userData ); f@0: f@0: if ( doStopStream == 2 ) { f@0: abortStream(); f@0: return; f@0: } f@0: f@0: MUTEX_LOCK( &stream_.mutex ); f@0: void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT]; f@0: void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT]; f@0: f@0: if ( stream_.state != STREAM_RUNNING ) f@0: goto unlock; f@0: f@0: int pa_error; f@0: size_t bytes; f@0: if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: if ( stream_.doConvertBuffer[OUTPUT] ) { f@0: convertBuffer( stream_.deviceBuffer, f@0: stream_.userBuffer[OUTPUT], f@0: stream_.convertInfo[OUTPUT] ); f@0: bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize * f@0: formatBytes( stream_.deviceFormat[OUTPUT] ); f@0: } else f@0: bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize * f@0: formatBytes( stream_.userFormat ); f@0: f@0: if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) { f@0: errorStream_ << "RtApiPulse::callbackEvent: audio write error, " << f@0: pa_strerror( pa_error ) << "."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: } f@0: } f@0: f@0: if ( stream_.mode == INPUT || stream_.mode == DUPLEX) { f@0: if ( stream_.doConvertBuffer[INPUT] ) f@0: bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize * f@0: formatBytes( stream_.deviceFormat[INPUT] ); f@0: else f@0: bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize * f@0: formatBytes( stream_.userFormat ); f@0: f@0: if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) { f@0: errorStream_ << "RtApiPulse::callbackEvent: audio read error, " << f@0: pa_strerror( pa_error ) << "."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: } f@0: if ( stream_.doConvertBuffer[INPUT] ) { f@0: convertBuffer( stream_.userBuffer[INPUT], f@0: stream_.deviceBuffer, f@0: stream_.convertInfo[INPUT] ); f@0: } f@0: } f@0: f@0: unlock: f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: RtApi::tickStreamTime(); f@0: f@0: if ( doStopStream == 1 ) f@0: stopStream(); f@0: } f@0: f@0: void RtApiPulse::startStream( void ) f@0: { f@0: PulseAudioHandle *pah = static_cast( stream_.apiHandle ); f@0: f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiPulse::startStream(): the stream is not open!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return; f@0: } f@0: if ( stream_.state == STREAM_RUNNING ) { f@0: errorText_ = "RtApiPulse::startStream(): the stream is already running!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: MUTEX_LOCK( &stream_.mutex ); f@0: f@0: stream_.state = STREAM_RUNNING; f@0: f@0: pah->runnable = true; f@0: pthread_cond_signal( &pah->runnable_cv ); f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: } f@0: f@0: void RtApiPulse::stopStream( void ) f@0: { f@0: PulseAudioHandle *pah = static_cast( stream_.apiHandle ); f@0: f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiPulse::stopStream(): the stream is not open!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return; f@0: } f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: stream_.state = STREAM_STOPPED; f@0: MUTEX_LOCK( &stream_.mutex ); f@0: f@0: if ( pah && pah->s_play ) { f@0: int pa_error; f@0: if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) { f@0: errorStream_ << "RtApiPulse::stopStream: error draining output device, " << f@0: pa_strerror( pa_error ) << "."; f@0: errorText_ = errorStream_.str(); f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: } f@0: f@0: stream_.state = STREAM_STOPPED; f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: } f@0: f@0: void RtApiPulse::abortStream( void ) f@0: { f@0: PulseAudioHandle *pah = static_cast( stream_.apiHandle ); f@0: f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiPulse::abortStream(): the stream is not open!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return; f@0: } f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: stream_.state = STREAM_STOPPED; f@0: MUTEX_LOCK( &stream_.mutex ); f@0: f@0: if ( pah && pah->s_play ) { f@0: int pa_error; f@0: if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) { f@0: errorStream_ << "RtApiPulse::abortStream: error flushing output device, " << f@0: pa_strerror( pa_error ) << "."; f@0: errorText_ = errorStream_.str(); f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: return; f@0: } f@0: } f@0: f@0: stream_.state = STREAM_STOPPED; f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: } f@0: f@0: bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode, f@0: unsigned int channels, unsigned int firstChannel, f@0: unsigned int sampleRate, RtAudioFormat format, f@0: unsigned int *bufferSize, RtAudio::StreamOptions *options ) f@0: { f@0: PulseAudioHandle *pah = 0; f@0: unsigned long bufferBytes = 0; f@0: pa_sample_spec ss; f@0: f@0: if ( device != 0 ) return false; f@0: if ( mode != INPUT && mode != OUTPUT ) return false; f@0: if ( channels != 1 && channels != 2 ) { f@0: errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels."; f@0: return false; f@0: } f@0: ss.channels = channels; f@0: f@0: if ( firstChannel != 0 ) return false; f@0: f@0: bool sr_found = false; f@0: for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) { f@0: if ( sampleRate == *sr ) { f@0: sr_found = true; f@0: stream_.sampleRate = sampleRate; f@0: ss.rate = sampleRate; f@0: break; f@0: } f@0: } f@0: if ( !sr_found ) { f@0: errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate."; f@0: return false; f@0: } f@0: f@0: bool sf_found = 0; f@0: for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats; f@0: sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) { f@0: if ( format == sf->rtaudio_format ) { f@0: sf_found = true; f@0: stream_.userFormat = sf->rtaudio_format; f@0: stream_.deviceFormat[mode] = stream_.userFormat; f@0: ss.format = sf->pa_format; f@0: break; f@0: } f@0: } f@0: if ( !sf_found ) { // Use internal data format conversion. f@0: stream_.userFormat = format; f@0: stream_.deviceFormat[mode] = RTAUDIO_FLOAT32; f@0: ss.format = PA_SAMPLE_FLOAT32LE; f@0: } f@0: f@0: // Set other stream parameters. f@0: if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false; f@0: else stream_.userInterleaved = true; f@0: stream_.deviceInterleaved[mode] = true; f@0: stream_.nBuffers = 1; f@0: stream_.doByteSwap[mode] = false; f@0: stream_.nUserChannels[mode] = channels; f@0: stream_.nDeviceChannels[mode] = channels + firstChannel; f@0: stream_.channelOffset[mode] = 0; f@0: std::string streamName = "RtAudio"; f@0: f@0: // Set flags for buffer conversion. f@0: stream_.doConvertBuffer[mode] = false; f@0: if ( stream_.userFormat != stream_.deviceFormat[mode] ) f@0: stream_.doConvertBuffer[mode] = true; f@0: if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] ) f@0: stream_.doConvertBuffer[mode] = true; f@0: f@0: // Allocate necessary internal buffers. f@0: bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat ); f@0: stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 ); f@0: if ( stream_.userBuffer[mode] == NULL ) { f@0: errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory."; f@0: goto error; f@0: } f@0: stream_.bufferSize = *bufferSize; f@0: f@0: if ( stream_.doConvertBuffer[mode] ) { f@0: f@0: bool makeBuffer = true; f@0: bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] ); f@0: if ( mode == INPUT ) { f@0: if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) { f@0: unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] ); f@0: if ( bufferBytes <= bytesOut ) makeBuffer = false; f@0: } f@0: } f@0: f@0: if ( makeBuffer ) { f@0: bufferBytes *= *bufferSize; f@0: if ( stream_.deviceBuffer ) free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 ); f@0: if ( stream_.deviceBuffer == NULL ) { f@0: errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory."; f@0: goto error; f@0: } f@0: } f@0: } f@0: f@0: stream_.device[mode] = device; f@0: f@0: // Setup the buffer conversion information structure. f@0: if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel ); f@0: f@0: if ( !stream_.apiHandle ) { f@0: PulseAudioHandle *pah = new PulseAudioHandle; f@0: if ( !pah ) { f@0: errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle."; f@0: goto error; f@0: } f@0: f@0: stream_.apiHandle = pah; f@0: if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) { f@0: errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable."; f@0: goto error; f@0: } f@0: } f@0: pah = static_cast( stream_.apiHandle ); f@0: f@0: int error; f@0: if ( !options->streamName.empty() ) streamName = options->streamName; f@0: switch ( mode ) { f@0: case INPUT: f@0: pa_buffer_attr buffer_attr; f@0: buffer_attr.fragsize = bufferBytes; f@0: buffer_attr.maxlength = -1; f@0: f@0: pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error ); f@0: if ( !pah->s_rec ) { f@0: errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server."; f@0: goto error; f@0: } f@0: break; f@0: case OUTPUT: f@0: pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error ); f@0: if ( !pah->s_play ) { f@0: errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server."; f@0: goto error; f@0: } f@0: break; f@0: default: f@0: goto error; f@0: } f@0: f@0: if ( stream_.mode == UNINITIALIZED ) f@0: stream_.mode = mode; f@0: else if ( stream_.mode == mode ) f@0: goto error; f@0: else f@0: stream_.mode = DUPLEX; f@0: f@0: if ( !stream_.callbackInfo.isRunning ) { f@0: stream_.callbackInfo.object = this; f@0: stream_.callbackInfo.isRunning = true; f@0: if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) { f@0: errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread."; f@0: goto error; f@0: } f@0: } f@0: f@0: stream_.state = STREAM_STOPPED; f@0: return true; f@0: f@0: error: f@0: if ( pah && stream_.callbackInfo.isRunning ) { f@0: pthread_cond_destroy( &pah->runnable_cv ); f@0: delete pah; f@0: stream_.apiHandle = 0; f@0: } f@0: f@0: for ( int i=0; i<2; i++ ) { f@0: if ( stream_.userBuffer[i] ) { f@0: free( stream_.userBuffer[i] ); f@0: stream_.userBuffer[i] = 0; f@0: } f@0: } f@0: f@0: if ( stream_.deviceBuffer ) { f@0: free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = 0; f@0: } f@0: f@0: return FAILURE; f@0: } f@0: f@0: //******************** End of __LINUX_PULSE__ *********************// f@0: #endif f@0: f@0: #if defined(__LINUX_OSS__) f@0: f@0: #include f@0: #include f@0: #include f@0: #include f@0: #include f@0: #include f@0: #include f@0: f@0: static void *ossCallbackHandler(void * ptr); f@0: f@0: // A structure to hold various information related to the OSS API f@0: // implementation. f@0: struct OssHandle { f@0: int id[2]; // device ids f@0: bool xrun[2]; f@0: bool triggered; f@0: pthread_cond_t runnable; f@0: f@0: OssHandle() f@0: :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; } f@0: }; f@0: f@0: RtApiOss :: RtApiOss() f@0: { f@0: // Nothing to do here. f@0: } f@0: f@0: RtApiOss :: ~RtApiOss() f@0: { f@0: if ( stream_.state != STREAM_CLOSED ) closeStream(); f@0: } f@0: f@0: unsigned int RtApiOss :: getDeviceCount( void ) f@0: { f@0: int mixerfd = open( "/dev/mixer", O_RDWR, 0 ); f@0: if ( mixerfd == -1 ) { f@0: errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'."; f@0: error( RtAudioError::WARNING ); f@0: return 0; f@0: } f@0: f@0: oss_sysinfo sysinfo; f@0: if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) { f@0: close( mixerfd ); f@0: errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required."; f@0: error( RtAudioError::WARNING ); f@0: return 0; f@0: } f@0: f@0: close( mixerfd ); f@0: return sysinfo.numaudios; f@0: } f@0: f@0: RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device ) f@0: { f@0: RtAudio::DeviceInfo info; f@0: info.probed = false; f@0: f@0: int mixerfd = open( "/dev/mixer", O_RDWR, 0 ); f@0: if ( mixerfd == -1 ) { f@0: errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'."; f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: oss_sysinfo sysinfo; f@0: int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ); f@0: if ( result == -1 ) { f@0: close( mixerfd ); f@0: errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required."; f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: unsigned nDevices = sysinfo.numaudios; f@0: if ( nDevices == 0 ) { f@0: close( mixerfd ); f@0: errorText_ = "RtApiOss::getDeviceInfo: no devices found!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return info; f@0: } f@0: f@0: if ( device >= nDevices ) { f@0: close( mixerfd ); f@0: errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!"; f@0: error( RtAudioError::INVALID_USE ); f@0: return info; f@0: } f@0: f@0: oss_audioinfo ainfo; f@0: ainfo.dev = device; f@0: result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo ); f@0: close( mixerfd ); f@0: if ( result == -1 ) { f@0: errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: // Probe channels f@0: if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels; f@0: if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels; f@0: if ( ainfo.caps & PCM_CAP_DUPLEX ) { f@0: if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX ) f@0: info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels; f@0: } f@0: f@0: // Probe data formats ... do for input f@0: unsigned long mask = ainfo.iformats; f@0: if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE ) f@0: info.nativeFormats |= RTAUDIO_SINT16; f@0: if ( mask & AFMT_S8 ) f@0: info.nativeFormats |= RTAUDIO_SINT8; f@0: if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE ) f@0: info.nativeFormats |= RTAUDIO_SINT32; f@0: if ( mask & AFMT_FLOAT ) f@0: info.nativeFormats |= RTAUDIO_FLOAT32; f@0: if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE ) f@0: info.nativeFormats |= RTAUDIO_SINT24; f@0: f@0: // Check that we have at least one supported format f@0: if ( info.nativeFormats == 0 ) { f@0: errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: return info; f@0: } f@0: f@0: // Probe the supported sample rates. f@0: info.sampleRates.clear(); f@0: if ( ainfo.nrates ) { f@0: for ( unsigned int i=0; i= (int) SAMPLE_RATES[k] ) f@0: info.sampleRates.push_back( SAMPLE_RATES[k] ); f@0: } f@0: } f@0: f@0: if ( info.sampleRates.size() == 0 ) { f@0: errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ")."; f@0: errorText_ = errorStream_.str(); f@0: error( RtAudioError::WARNING ); f@0: } f@0: else { f@0: info.probed = true; f@0: info.name = ainfo.name; f@0: } f@0: f@0: return info; f@0: } f@0: f@0: f@0: bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels, f@0: unsigned int firstChannel, unsigned int sampleRate, f@0: RtAudioFormat format, unsigned int *bufferSize, f@0: RtAudio::StreamOptions *options ) f@0: { f@0: int mixerfd = open( "/dev/mixer", O_RDWR, 0 ); f@0: if ( mixerfd == -1 ) { f@0: errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'."; f@0: return FAILURE; f@0: } f@0: f@0: oss_sysinfo sysinfo; f@0: int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ); f@0: if ( result == -1 ) { f@0: close( mixerfd ); f@0: errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required."; f@0: return FAILURE; f@0: } f@0: f@0: unsigned nDevices = sysinfo.numaudios; f@0: if ( nDevices == 0 ) { f@0: // This should not happen because a check is made before this function is called. f@0: close( mixerfd ); f@0: errorText_ = "RtApiOss::probeDeviceOpen: no devices found!"; f@0: return FAILURE; f@0: } f@0: f@0: if ( device >= nDevices ) { f@0: // This should not happen because a check is made before this function is called. f@0: close( mixerfd ); f@0: errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!"; f@0: return FAILURE; f@0: } f@0: f@0: oss_audioinfo ainfo; f@0: ainfo.dev = device; f@0: result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo ); f@0: close( mixerfd ); f@0: if ( result == -1 ) { f@0: errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Check if device supports input or output f@0: if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) || f@0: ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) { f@0: if ( mode == OUTPUT ) f@0: errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output."; f@0: else f@0: errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: int flags = 0; f@0: OssHandle *handle = (OssHandle *) stream_.apiHandle; f@0: if ( mode == OUTPUT ) f@0: flags |= O_WRONLY; f@0: else { // mode == INPUT f@0: if (stream_.mode == OUTPUT && stream_.device[0] == device) { f@0: // We just set the same device for playback ... close and reopen for duplex (OSS only). f@0: close( handle->id[0] ); f@0: handle->id[0] = 0; f@0: if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) { f@0: errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: // Check that the number previously set channels is the same. f@0: if ( stream_.nUserChannels[0] != channels ) { f@0: errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: flags |= O_RDWR; f@0: } f@0: else f@0: flags |= O_RDONLY; f@0: } f@0: f@0: // Set exclusive access if specified. f@0: if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL; f@0: f@0: // Try to open the device. f@0: int fd; f@0: fd = open( ainfo.devnode, flags, 0 ); f@0: if ( fd == -1 ) { f@0: if ( errno == EBUSY ) f@0: errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy."; f@0: else f@0: errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // For duplex operation, specifically set this mode (this doesn't seem to work). f@0: /* f@0: if ( flags | O_RDWR ) { f@0: result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL ); f@0: if ( result == -1) { f@0: errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: } f@0: */ f@0: f@0: // Check the device channel support. f@0: stream_.nUserChannels[mode] = channels; f@0: if ( ainfo.max_channels < (int)(channels + firstChannel) ) { f@0: close( fd ); f@0: errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Set the number of channels. f@0: int deviceChannels = channels + firstChannel; f@0: result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels ); f@0: if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) { f@0: close( fd ); f@0: errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: stream_.nDeviceChannels[mode] = deviceChannels; f@0: f@0: // Get the data format mask f@0: int mask; f@0: result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask ); f@0: if ( result == -1 ) { f@0: close( fd ); f@0: errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Determine how to set the device format. f@0: stream_.userFormat = format; f@0: int deviceFormat = -1; f@0: stream_.doByteSwap[mode] = false; f@0: if ( format == RTAUDIO_SINT8 ) { f@0: if ( mask & AFMT_S8 ) { f@0: deviceFormat = AFMT_S8; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT8; f@0: } f@0: } f@0: else if ( format == RTAUDIO_SINT16 ) { f@0: if ( mask & AFMT_S16_NE ) { f@0: deviceFormat = AFMT_S16_NE; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT16; f@0: } f@0: else if ( mask & AFMT_S16_OE ) { f@0: deviceFormat = AFMT_S16_OE; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT16; f@0: stream_.doByteSwap[mode] = true; f@0: } f@0: } f@0: else if ( format == RTAUDIO_SINT24 ) { f@0: if ( mask & AFMT_S24_NE ) { f@0: deviceFormat = AFMT_S24_NE; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT24; f@0: } f@0: else if ( mask & AFMT_S24_OE ) { f@0: deviceFormat = AFMT_S24_OE; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT24; f@0: stream_.doByteSwap[mode] = true; f@0: } f@0: } f@0: else if ( format == RTAUDIO_SINT32 ) { f@0: if ( mask & AFMT_S32_NE ) { f@0: deviceFormat = AFMT_S32_NE; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT32; f@0: } f@0: else if ( mask & AFMT_S32_OE ) { f@0: deviceFormat = AFMT_S32_OE; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT32; f@0: stream_.doByteSwap[mode] = true; f@0: } f@0: } f@0: f@0: if ( deviceFormat == -1 ) { f@0: // The user requested format is not natively supported by the device. f@0: if ( mask & AFMT_S16_NE ) { f@0: deviceFormat = AFMT_S16_NE; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT16; f@0: } f@0: else if ( mask & AFMT_S32_NE ) { f@0: deviceFormat = AFMT_S32_NE; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT32; f@0: } f@0: else if ( mask & AFMT_S24_NE ) { f@0: deviceFormat = AFMT_S24_NE; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT24; f@0: } f@0: else if ( mask & AFMT_S16_OE ) { f@0: deviceFormat = AFMT_S16_OE; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT16; f@0: stream_.doByteSwap[mode] = true; f@0: } f@0: else if ( mask & AFMT_S32_OE ) { f@0: deviceFormat = AFMT_S32_OE; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT32; f@0: stream_.doByteSwap[mode] = true; f@0: } f@0: else if ( mask & AFMT_S24_OE ) { f@0: deviceFormat = AFMT_S24_OE; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT24; f@0: stream_.doByteSwap[mode] = true; f@0: } f@0: else if ( mask & AFMT_S8) { f@0: deviceFormat = AFMT_S8; f@0: stream_.deviceFormat[mode] = RTAUDIO_SINT8; f@0: } f@0: } f@0: f@0: if ( stream_.deviceFormat[mode] == 0 ) { f@0: // This really shouldn't happen ... f@0: close( fd ); f@0: errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Set the data format. f@0: int temp = deviceFormat; f@0: result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat ); f@0: if ( result == -1 || deviceFormat != temp ) { f@0: close( fd ); f@0: errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Attempt to set the buffer size. According to OSS, the minimum f@0: // number of buffers is two. The supposed minimum buffer size is 16 f@0: // bytes, so that will be our lower bound. The argument to this f@0: // call is in the form 0xMMMMSSSS (hex), where the buffer size (in f@0: // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM. f@0: // We'll check the actual value used near the end of the setup f@0: // procedure. f@0: int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels; f@0: if ( ossBufferBytes < 16 ) ossBufferBytes = 16; f@0: int buffers = 0; f@0: if ( options ) buffers = options->numberOfBuffers; f@0: if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2; f@0: if ( buffers < 2 ) buffers = 3; f@0: temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) ); f@0: result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp ); f@0: if ( result == -1 ) { f@0: close( fd ); f@0: errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: stream_.nBuffers = buffers; f@0: f@0: // Save buffer size (in sample frames). f@0: *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels ); f@0: stream_.bufferSize = *bufferSize; f@0: f@0: // Set the sample rate. f@0: int srate = sampleRate; f@0: result = ioctl( fd, SNDCTL_DSP_SPEED, &srate ); f@0: if ( result == -1 ) { f@0: close( fd ); f@0: errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: f@0: // Verify the sample rate setup worked. f@0: if ( abs( srate - sampleRate ) > 100 ) { f@0: close( fd ); f@0: errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ")."; f@0: errorText_ = errorStream_.str(); f@0: return FAILURE; f@0: } f@0: stream_.sampleRate = sampleRate; f@0: f@0: if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) { f@0: // We're doing duplex setup here. f@0: stream_.deviceFormat[0] = stream_.deviceFormat[1]; f@0: stream_.nDeviceChannels[0] = deviceChannels; f@0: } f@0: f@0: // Set interleaving parameters. f@0: stream_.userInterleaved = true; f@0: stream_.deviceInterleaved[mode] = true; f@0: if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) f@0: stream_.userInterleaved = false; f@0: f@0: // Set flags for buffer conversion f@0: stream_.doConvertBuffer[mode] = false; f@0: if ( stream_.userFormat != stream_.deviceFormat[mode] ) f@0: stream_.doConvertBuffer[mode] = true; f@0: if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] ) f@0: stream_.doConvertBuffer[mode] = true; f@0: if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] && f@0: stream_.nUserChannels[mode] > 1 ) f@0: stream_.doConvertBuffer[mode] = true; f@0: f@0: // Allocate the stream handles if necessary and then save. f@0: if ( stream_.apiHandle == 0 ) { f@0: try { f@0: handle = new OssHandle; f@0: } f@0: catch ( std::bad_alloc& ) { f@0: errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory."; f@0: goto error; f@0: } f@0: f@0: if ( pthread_cond_init( &handle->runnable, NULL ) ) { f@0: errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable."; f@0: goto error; f@0: } f@0: f@0: stream_.apiHandle = (void *) handle; f@0: } f@0: else { f@0: handle = (OssHandle *) stream_.apiHandle; f@0: } f@0: handle->id[mode] = fd; f@0: f@0: // Allocate necessary internal buffers. f@0: unsigned long bufferBytes; f@0: bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat ); f@0: stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 ); f@0: if ( stream_.userBuffer[mode] == NULL ) { f@0: errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory."; f@0: goto error; f@0: } f@0: f@0: if ( stream_.doConvertBuffer[mode] ) { f@0: f@0: bool makeBuffer = true; f@0: bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] ); f@0: if ( mode == INPUT ) { f@0: if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) { f@0: unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] ); f@0: if ( bufferBytes <= bytesOut ) makeBuffer = false; f@0: } f@0: } f@0: f@0: if ( makeBuffer ) { f@0: bufferBytes *= *bufferSize; f@0: if ( stream_.deviceBuffer ) free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 ); f@0: if ( stream_.deviceBuffer == NULL ) { f@0: errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory."; f@0: goto error; f@0: } f@0: } f@0: } f@0: f@0: stream_.device[mode] = device; f@0: stream_.state = STREAM_STOPPED; f@0: f@0: // Setup the buffer conversion information structure. f@0: if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel ); f@0: f@0: // Setup thread if necessary. f@0: if ( stream_.mode == OUTPUT && mode == INPUT ) { f@0: // We had already set up an output stream. f@0: stream_.mode = DUPLEX; f@0: if ( stream_.device[0] == device ) handle->id[0] = fd; f@0: } f@0: else { f@0: stream_.mode = mode; f@0: f@0: // Setup callback thread. f@0: stream_.callbackInfo.object = (void *) this; f@0: f@0: // Set the thread attributes for joinable and realtime scheduling f@0: // priority. The higher priority will only take affect if the f@0: // program is run as root or suid. f@0: pthread_attr_t attr; f@0: pthread_attr_init( &attr ); f@0: pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE ); f@0: #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread) f@0: if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) { f@0: struct sched_param param; f@0: int priority = options->priority; f@0: int min = sched_get_priority_min( SCHED_RR ); f@0: int max = sched_get_priority_max( SCHED_RR ); f@0: if ( priority < min ) priority = min; f@0: else if ( priority > max ) priority = max; f@0: param.sched_priority = priority; f@0: pthread_attr_setschedparam( &attr, ¶m ); f@0: pthread_attr_setschedpolicy( &attr, SCHED_RR ); f@0: } f@0: else f@0: pthread_attr_setschedpolicy( &attr, SCHED_OTHER ); f@0: #else f@0: pthread_attr_setschedpolicy( &attr, SCHED_OTHER ); f@0: #endif f@0: f@0: stream_.callbackInfo.isRunning = true; f@0: result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo ); f@0: pthread_attr_destroy( &attr ); f@0: if ( result ) { f@0: stream_.callbackInfo.isRunning = false; f@0: errorText_ = "RtApiOss::error creating callback thread!"; f@0: goto error; f@0: } f@0: } f@0: f@0: return SUCCESS; f@0: f@0: error: f@0: if ( handle ) { f@0: pthread_cond_destroy( &handle->runnable ); f@0: if ( handle->id[0] ) close( handle->id[0] ); f@0: if ( handle->id[1] ) close( handle->id[1] ); f@0: delete handle; f@0: stream_.apiHandle = 0; f@0: } f@0: f@0: for ( int i=0; i<2; i++ ) { f@0: if ( stream_.userBuffer[i] ) { f@0: free( stream_.userBuffer[i] ); f@0: stream_.userBuffer[i] = 0; f@0: } f@0: } f@0: f@0: if ( stream_.deviceBuffer ) { f@0: free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = 0; f@0: } f@0: f@0: return FAILURE; f@0: } f@0: f@0: void RtApiOss :: closeStream() f@0: { f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiOss::closeStream(): no open stream to close!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: OssHandle *handle = (OssHandle *) stream_.apiHandle; f@0: stream_.callbackInfo.isRunning = false; f@0: MUTEX_LOCK( &stream_.mutex ); f@0: if ( stream_.state == STREAM_STOPPED ) f@0: pthread_cond_signal( &handle->runnable ); f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: pthread_join( stream_.callbackInfo.thread, NULL ); f@0: f@0: if ( stream_.state == STREAM_RUNNING ) { f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) f@0: ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 ); f@0: else f@0: ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 ); f@0: stream_.state = STREAM_STOPPED; f@0: } f@0: f@0: if ( handle ) { f@0: pthread_cond_destroy( &handle->runnable ); f@0: if ( handle->id[0] ) close( handle->id[0] ); f@0: if ( handle->id[1] ) close( handle->id[1] ); f@0: delete handle; f@0: stream_.apiHandle = 0; f@0: } f@0: f@0: for ( int i=0; i<2; i++ ) { f@0: if ( stream_.userBuffer[i] ) { f@0: free( stream_.userBuffer[i] ); f@0: stream_.userBuffer[i] = 0; f@0: } f@0: } f@0: f@0: if ( stream_.deviceBuffer ) { f@0: free( stream_.deviceBuffer ); f@0: stream_.deviceBuffer = 0; f@0: } f@0: f@0: stream_.mode = UNINITIALIZED; f@0: stream_.state = STREAM_CLOSED; f@0: } f@0: f@0: void RtApiOss :: startStream() f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_RUNNING ) { f@0: errorText_ = "RtApiOss::startStream(): the stream is already running!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: MUTEX_LOCK( &stream_.mutex ); f@0: f@0: stream_.state = STREAM_RUNNING; f@0: f@0: // No need to do anything else here ... OSS automatically starts f@0: // when fed samples. f@0: f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: f@0: OssHandle *handle = (OssHandle *) stream_.apiHandle; f@0: pthread_cond_signal( &handle->runnable ); f@0: } f@0: f@0: void RtApiOss :: stopStream() f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiOss::stopStream(): the stream is already stopped!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: MUTEX_LOCK( &stream_.mutex ); f@0: f@0: // The state might change while waiting on a mutex. f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: return; f@0: } f@0: f@0: int result = 0; f@0: OssHandle *handle = (OssHandle *) stream_.apiHandle; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: f@0: // Flush the output with zeros a few times. f@0: char *buffer; f@0: int samples; f@0: RtAudioFormat format; f@0: f@0: if ( stream_.doConvertBuffer[0] ) { f@0: buffer = stream_.deviceBuffer; f@0: samples = stream_.bufferSize * stream_.nDeviceChannels[0]; f@0: format = stream_.deviceFormat[0]; f@0: } f@0: else { f@0: buffer = stream_.userBuffer[0]; f@0: samples = stream_.bufferSize * stream_.nUserChannels[0]; f@0: format = stream_.userFormat; f@0: } f@0: f@0: memset( buffer, 0, samples * formatBytes(format) ); f@0: for ( unsigned int i=0; iid[0], buffer, samples * formatBytes(format) ); f@0: if ( result == -1 ) { f@0: errorText_ = "RtApiOss::stopStream: audio write error."; f@0: error( RtAudioError::WARNING ); f@0: } f@0: } f@0: f@0: result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 ); f@0: if ( result == -1 ) { f@0: errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ")."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: handle->triggered = false; f@0: } f@0: f@0: if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) { f@0: result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 ); f@0: if ( result == -1 ) { f@0: errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ")."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: } f@0: f@0: unlock: f@0: stream_.state = STREAM_STOPPED; f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: f@0: if ( result != -1 ) return; f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: } f@0: f@0: void RtApiOss :: abortStream() f@0: { f@0: verifyStream(); f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: errorText_ = "RtApiOss::abortStream(): the stream is already stopped!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: MUTEX_LOCK( &stream_.mutex ); f@0: f@0: // The state might change while waiting on a mutex. f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: return; f@0: } f@0: f@0: int result = 0; f@0: OssHandle *handle = (OssHandle *) stream_.apiHandle; f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 ); f@0: if ( result == -1 ) { f@0: errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ")."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: handle->triggered = false; f@0: } f@0: f@0: if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) { f@0: result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 ); f@0: if ( result == -1 ) { f@0: errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ")."; f@0: errorText_ = errorStream_.str(); f@0: goto unlock; f@0: } f@0: } f@0: f@0: unlock: f@0: stream_.state = STREAM_STOPPED; f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: f@0: if ( result != -1 ) return; f@0: error( RtAudioError::SYSTEM_ERROR ); f@0: } f@0: f@0: void RtApiOss :: callbackEvent() f@0: { f@0: OssHandle *handle = (OssHandle *) stream_.apiHandle; f@0: if ( stream_.state == STREAM_STOPPED ) { f@0: MUTEX_LOCK( &stream_.mutex ); f@0: pthread_cond_wait( &handle->runnable, &stream_.mutex ); f@0: if ( stream_.state != STREAM_RUNNING ) { f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: return; f@0: } f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: } f@0: f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!"; f@0: error( RtAudioError::WARNING ); f@0: return; f@0: } f@0: f@0: // Invoke user callback to get fresh output data. f@0: int doStopStream = 0; f@0: RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback; f@0: double streamTime = getStreamTime(); f@0: RtAudioStreamStatus status = 0; f@0: if ( stream_.mode != INPUT && handle->xrun[0] == true ) { f@0: status |= RTAUDIO_OUTPUT_UNDERFLOW; f@0: handle->xrun[0] = false; f@0: } f@0: if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) { f@0: status |= RTAUDIO_INPUT_OVERFLOW; f@0: handle->xrun[1] = false; f@0: } f@0: doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1], f@0: stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData ); f@0: if ( doStopStream == 2 ) { f@0: this->abortStream(); f@0: return; f@0: } f@0: f@0: MUTEX_LOCK( &stream_.mutex ); f@0: f@0: // The state might change while waiting on a mutex. f@0: if ( stream_.state == STREAM_STOPPED ) goto unlock; f@0: f@0: int result; f@0: char *buffer; f@0: int samples; f@0: RtAudioFormat format; f@0: f@0: if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) { f@0: f@0: // Setup parameters and do buffer conversion if necessary. f@0: if ( stream_.doConvertBuffer[0] ) { f@0: buffer = stream_.deviceBuffer; f@0: convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] ); f@0: samples = stream_.bufferSize * stream_.nDeviceChannels[0]; f@0: format = stream_.deviceFormat[0]; f@0: } f@0: else { f@0: buffer = stream_.userBuffer[0]; f@0: samples = stream_.bufferSize * stream_.nUserChannels[0]; f@0: format = stream_.userFormat; f@0: } f@0: f@0: // Do byte swapping if necessary. f@0: if ( stream_.doByteSwap[0] ) f@0: byteSwapBuffer( buffer, samples, format ); f@0: f@0: if ( stream_.mode == DUPLEX && handle->triggered == false ) { f@0: int trig = 0; f@0: ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig ); f@0: result = write( handle->id[0], buffer, samples * formatBytes(format) ); f@0: trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT; f@0: ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig ); f@0: handle->triggered = true; f@0: } f@0: else f@0: // Write samples to device. f@0: result = write( handle->id[0], buffer, samples * formatBytes(format) ); f@0: f@0: if ( result == -1 ) { f@0: // We'll assume this is an underrun, though there isn't a f@0: // specific means for determining that. f@0: handle->xrun[0] = true; f@0: errorText_ = "RtApiOss::callbackEvent: audio write error."; f@0: error( RtAudioError::WARNING ); f@0: // Continue on to input section. f@0: } f@0: } f@0: f@0: if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) { f@0: f@0: // Setup parameters. f@0: if ( stream_.doConvertBuffer[1] ) { f@0: buffer = stream_.deviceBuffer; f@0: samples = stream_.bufferSize * stream_.nDeviceChannels[1]; f@0: format = stream_.deviceFormat[1]; f@0: } f@0: else { f@0: buffer = stream_.userBuffer[1]; f@0: samples = stream_.bufferSize * stream_.nUserChannels[1]; f@0: format = stream_.userFormat; f@0: } f@0: f@0: // Read samples from device. f@0: result = read( handle->id[1], buffer, samples * formatBytes(format) ); f@0: f@0: if ( result == -1 ) { f@0: // We'll assume this is an overrun, though there isn't a f@0: // specific means for determining that. f@0: handle->xrun[1] = true; f@0: errorText_ = "RtApiOss::callbackEvent: audio read error."; f@0: error( RtAudioError::WARNING ); f@0: goto unlock; f@0: } f@0: f@0: // Do byte swapping if necessary. f@0: if ( stream_.doByteSwap[1] ) f@0: byteSwapBuffer( buffer, samples, format ); f@0: f@0: // Do buffer conversion if necessary. f@0: if ( stream_.doConvertBuffer[1] ) f@0: convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] ); f@0: } f@0: f@0: unlock: f@0: MUTEX_UNLOCK( &stream_.mutex ); f@0: f@0: RtApi::tickStreamTime(); f@0: if ( doStopStream == 1 ) this->stopStream(); f@0: } f@0: f@0: static void *ossCallbackHandler( void *ptr ) f@0: { f@0: CallbackInfo *info = (CallbackInfo *) ptr; f@0: RtApiOss *object = (RtApiOss *) info->object; f@0: bool *isRunning = &info->isRunning; f@0: f@0: while ( *isRunning == true ) { f@0: pthread_testcancel(); f@0: object->callbackEvent(); f@0: } f@0: f@0: pthread_exit( NULL ); f@0: } f@0: f@0: //******************** End of __LINUX_OSS__ *********************// f@0: #endif f@0: f@0: f@0: // *************************************************** // f@0: // f@0: // Protected common (OS-independent) RtAudio methods. f@0: // f@0: // *************************************************** // f@0: f@0: // This method can be modified to control the behavior of error f@0: // message printing. f@0: void RtApi :: error( RtAudioError::Type type ) f@0: { f@0: errorStream_.str(""); // clear the ostringstream f@0: f@0: RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback; f@0: if ( errorCallback ) { f@0: // abortStream() can generate new error messages. Ignore them. Just keep original one. f@0: f@0: if ( firstErrorOccurred_ ) f@0: return; f@0: f@0: firstErrorOccurred_ = true; f@0: const std::string errorMessage = errorText_; f@0: f@0: if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) { f@0: stream_.callbackInfo.isRunning = false; // exit from the thread f@0: abortStream(); f@0: } f@0: f@0: errorCallback( type, errorMessage ); f@0: firstErrorOccurred_ = false; f@0: return; f@0: } f@0: f@0: if ( type == RtAudioError::WARNING && showWarnings_ == true ) f@0: std::cerr << '\n' << errorText_ << "\n\n"; f@0: else if ( type != RtAudioError::WARNING ) f@0: throw( RtAudioError( errorText_, type ) ); f@0: } f@0: f@0: void RtApi :: verifyStream() f@0: { f@0: if ( stream_.state == STREAM_CLOSED ) { f@0: errorText_ = "RtApi:: a stream is not open!"; f@0: error( RtAudioError::INVALID_USE ); f@0: } f@0: } f@0: f@0: void RtApi :: clearStreamInfo() f@0: { f@0: stream_.mode = UNINITIALIZED; f@0: stream_.state = STREAM_CLOSED; f@0: stream_.sampleRate = 0; f@0: stream_.bufferSize = 0; f@0: stream_.nBuffers = 0; f@0: stream_.userFormat = 0; f@0: stream_.userInterleaved = true; f@0: stream_.streamTime = 0.0; f@0: stream_.apiHandle = 0; f@0: stream_.deviceBuffer = 0; f@0: stream_.callbackInfo.callback = 0; f@0: stream_.callbackInfo.userData = 0; f@0: stream_.callbackInfo.isRunning = false; f@0: stream_.callbackInfo.errorCallback = 0; f@0: for ( int i=0; i<2; i++ ) { f@0: stream_.device[i] = 11111; f@0: stream_.doConvertBuffer[i] = false; f@0: stream_.deviceInterleaved[i] = true; f@0: stream_.doByteSwap[i] = false; f@0: stream_.nUserChannels[i] = 0; f@0: stream_.nDeviceChannels[i] = 0; f@0: stream_.channelOffset[i] = 0; f@0: stream_.deviceFormat[i] = 0; f@0: stream_.latency[i] = 0; f@0: stream_.userBuffer[i] = 0; f@0: stream_.convertInfo[i].channels = 0; f@0: stream_.convertInfo[i].inJump = 0; f@0: stream_.convertInfo[i].outJump = 0; f@0: stream_.convertInfo[i].inFormat = 0; f@0: stream_.convertInfo[i].outFormat = 0; f@0: stream_.convertInfo[i].inOffset.clear(); f@0: stream_.convertInfo[i].outOffset.clear(); f@0: } f@0: } f@0: f@0: unsigned int RtApi :: formatBytes( RtAudioFormat format ) f@0: { f@0: if ( format == RTAUDIO_SINT16 ) f@0: return 2; f@0: else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 ) f@0: return 4; f@0: else if ( format == RTAUDIO_FLOAT64 ) f@0: return 8; f@0: else if ( format == RTAUDIO_SINT24 ) f@0: return 3; f@0: else if ( format == RTAUDIO_SINT8 ) f@0: return 1; f@0: f@0: errorText_ = "RtApi::formatBytes: undefined format."; f@0: error( RtAudioError::WARNING ); f@0: f@0: return 0; f@0: } f@0: f@0: void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel ) f@0: { f@0: if ( mode == INPUT ) { // convert device to user buffer f@0: stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1]; f@0: stream_.convertInfo[mode].outJump = stream_.nUserChannels[1]; f@0: stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1]; f@0: stream_.convertInfo[mode].outFormat = stream_.userFormat; f@0: } f@0: else { // convert user to device buffer f@0: stream_.convertInfo[mode].inJump = stream_.nUserChannels[0]; f@0: stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0]; f@0: stream_.convertInfo[mode].inFormat = stream_.userFormat; f@0: stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0]; f@0: } f@0: f@0: if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump ) f@0: stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump; f@0: else f@0: stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump; f@0: f@0: // Set up the interleave/deinterleave offsets. f@0: if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) { f@0: if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) || f@0: ( mode == INPUT && stream_.userInterleaved ) ) { f@0: for ( int k=0; k 0 ) { f@0: if ( stream_.deviceInterleaved[mode] ) { f@0: if ( mode == OUTPUT ) { f@0: for ( int k=0; k> 8); f@0: //out[info.outOffset[j]] >>= 8; f@0: } f@0: in += info.inJump; f@0: out += info.outJump; f@0: } f@0: } f@0: else if (info.inFormat == RTAUDIO_FLOAT32) { f@0: Float32 *in = (Float32 *)inBuffer; f@0: for (unsigned int i=0; i> 8); f@0: } f@0: in += info.inJump; f@0: out += info.outJump; f@0: } f@0: } f@0: else if (info.inFormat == RTAUDIO_SINT32) { f@0: Int32 *in = (Int32 *)inBuffer; f@0: for (unsigned int i=0; i> 16) & 0x0000ffff); f@0: } f@0: in += info.inJump; f@0: out += info.outJump; f@0: } f@0: } f@0: else if (info.inFormat == RTAUDIO_FLOAT32) { f@0: Float32 *in = (Float32 *)inBuffer; f@0: for (unsigned int i=0; i> 8) & 0x00ff); f@0: } f@0: in += info.inJump; f@0: out += info.outJump; f@0: } f@0: } f@0: else if (info.inFormat == RTAUDIO_SINT24) { f@0: Int24 *in = (Int24 *)inBuffer; f@0: for (unsigned int i=0; i> 16); f@0: } f@0: in += info.inJump; f@0: out += info.outJump; f@0: } f@0: } f@0: else if (info.inFormat == RTAUDIO_SINT32) { f@0: Int32 *in = (Int32 *)inBuffer; f@0: for (unsigned int i=0; i> 24) & 0x000000ff); f@0: } f@0: in += info.inJump; f@0: out += info.outJump; f@0: } f@0: } f@0: else if (info.inFormat == RTAUDIO_FLOAT32) { f@0: Float32 *in = (Float32 *)inBuffer; f@0: for (unsigned int i=0; i>8) | (x<<8); } f@0: //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); } f@0: //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); } f@0: f@0: void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format ) f@0: { f@0: register char val; f@0: register char *ptr; f@0: f@0: ptr = buffer; f@0: if ( format == RTAUDIO_SINT16 ) { f@0: for ( unsigned int i=0; i