To check out this repository please hg clone the following URL, or open the URL using EasyMercurial or your preferred Mercurial client.

The primary repository for this project is hosted at https://github.com/sonic-visualiser/sv-dependency-builds .
This repository is a read-only copy which is updated automatically every hour.

Statistics Download as Zip
| Branch: | Tag: | Revision:

root / src / portaudio_20161030_catalina_patch / src / hostapi / coreaudio / pa_mac_core.c @ 162:d43aab368df9

History | View | Annotate | Download (114 KB)

1
/*
2
 * Implementation of the PortAudio API for Apple AUHAL
3
 *
4
 * PortAudio Portable Real-Time Audio Library
5
 * Latest Version at: http://www.portaudio.com
6
 *
7
 * Written by Bjorn Roche of XO Audio LLC, from PA skeleton code.
8
 * Portions copied from code by Dominic Mazzoni (who wrote a HAL implementation)
9
 *
10
 * Dominic's code was based on code by Phil Burk, Darren Gibbs,
11
 * Gord Peters, Stephane Letz, and Greg Pfiel.
12
 *
13
 * The following people also deserve acknowledgements:
14
 *
15
 * Olivier Tristan for feedback and testing
16
 * Glenn Zelniker and Z-Systems engineering for sponsoring the Blocking I/O
17
 * interface.
18
 * 
19
 *
20
 * Based on the Open Source API proposed by Ross Bencina
21
 * Copyright (c) 1999-2002 Ross Bencina, Phil Burk
22
 *
23
 * Permission is hereby granted, free of charge, to any person obtaining
24
 * a copy of this software and associated documentation files
25
 * (the "Software"), to deal in the Software without restriction,
26
 * including without limitation the rights to use, copy, modify, merge,
27
 * publish, distribute, sublicense, and/or sell copies of the Software,
28
 * and to permit persons to whom the Software is furnished to do so,
29
 * subject to the following conditions:
30
 *
31
 * The above copyright notice and this permission notice shall be
32
 * included in all copies or substantial portions of the Software.
33
 *
34
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
35
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
36
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
37
 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
38
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
39
 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
40
 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41
 */
42

    
43
/*
44
 * The text above constitutes the entire PortAudio license; however, 
45
 * the PortAudio community also makes the following non-binding requests:
46
 *
47
 * Any person wishing to distribute modifications to the Software is
48
 * requested to send the modifications to the original developer so that
49
 * they can be incorporated into the canonical version. It is also 
50
 * requested that these non-binding requests be included along with the 
51
 * license above.
52
 */
53

    
54
/**
55
 @file pa_mac_core
56
 @ingroup hostapi_src
57
 @author Bjorn Roche
58
 @brief AUHAL implementation of PortAudio
59
*/
60

    
61
/* FIXME: not all error conditions call PaUtil_SetLastHostErrorInfo()
62
 * PaMacCore_SetError() will do this.
63
 */
64

    
65
#include "pa_mac_core_internal.h"
66

    
67
#include <string.h> /* strlen(), memcmp() etc. */
68
#include <libkern/OSAtomic.h>
69

    
70
#include "pa_mac_core.h"
71
#include "pa_mac_core_utilities.h"
72
#include "pa_mac_core_blocking.h"
73

    
74

    
75
#ifdef __cplusplus
76
extern "C"
77
{
78
#endif /* __cplusplus */
79
        
80
/* This is a reasonable size for a small buffer based on experience. */
81
#define PA_MAC_SMALL_BUFFER_SIZE    (64)
82
        
83
/* prototypes for functions declared in this file */
84
PaError PaMacCore_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex index );
85

    
86
/*
87
 * Function declared in pa_mac_core.h. Sets up a PaMacCoreStreamInfoStruct
88
 * with the requested flags and initializes channel map.
89
 */
90
void PaMacCore_SetupStreamInfo(  PaMacCoreStreamInfo *data, const unsigned long flags )
91
{
92
   bzero( data, sizeof( PaMacCoreStreamInfo ) );
93
   data->size = sizeof( PaMacCoreStreamInfo );
94
   data->hostApiType = paCoreAudio;
95
   data->version = 0x01;
96
   data->flags = flags;
97
   data->channelMap = NULL;
98
   data->channelMapSize = 0;
99
}
100

    
101
/*
102
 * Function declared in pa_mac_core.h. Adds channel mapping to a PaMacCoreStreamInfoStruct
103
 */
104
void PaMacCore_SetupChannelMap( PaMacCoreStreamInfo *data, const SInt32 * const channelMap, const unsigned long channelMapSize )
105
{
106
   data->channelMap = channelMap;
107
   data->channelMapSize = channelMapSize;
108
}
109
static char *channelName = NULL;
110
static int channelNameSize = 0;
111
static bool ensureChannelNameSize( int size )
112
{
113
   if( size >= channelNameSize ) {
114
      free( channelName );
115
      channelName = (char *) malloc( ( channelNameSize = size ) + 1 );
116
      if( !channelName ) {
117
         channelNameSize = 0;
118
         return false;
119
      }
120
   }
121
   return true;
122
}
123
/*
124
 * Function declared in pa_mac_core.h. retrives channel names.
125
 */
126
const char *PaMacCore_GetChannelName( int device, int channelIndex, bool input )
127
{
128
        struct PaUtilHostApiRepresentation *hostApi;
129
        PaError err;
130
        OSStatus error;
131
        err = PaUtil_GetHostApiRepresentation( &hostApi, paCoreAudio );
132
        assert(err == paNoError);
133
        if( err != paNoError )
134
                return NULL;
135
        PaMacAUHAL *macCoreHostApi = (PaMacAUHAL*)hostApi;
136
        AudioDeviceID hostApiDevice = macCoreHostApi->devIds[device];
137
        CFStringRef nameRef;
138
        
139
        /* First try with CFString */
140
        UInt32 size = sizeof(nameRef);
141
        error = AudioDeviceGetProperty( hostApiDevice,
142
                                                                   channelIndex + 1,
143
                                                                   input,
144
                                                                   kAudioDevicePropertyChannelNameCFString,
145
                                                                   &size,
146
                                                                   &nameRef );
147
        if( error )
148
        {
149
                /* try the C String */
150
                size = 0;
151
                error = AudioDeviceGetPropertyInfo( hostApiDevice,
152
                                                                                   channelIndex + 1,
153
                                                                                   input,
154
                                                                                   kAudioDevicePropertyChannelName,
155
                                                                                   &size,
156
                                                                                   NULL);
157
                if( !error )
158
                {
159
                        if( !ensureChannelNameSize( size ) )
160
                                return NULL;
161
                        
162
                        error = AudioDeviceGetProperty( hostApiDevice,
163
                                                                                   channelIndex + 1,
164
                                                                                   input,
165
                                                                                   kAudioDevicePropertyChannelName,
166
                                                                                   &size,
167
                                                                                   channelName );
168
                        
169
                        
170
                        if( !error )
171
                                return channelName;
172
                }
173
                
174
                /* as a last-ditch effort, we use the device name and append the channel number. */
175
                nameRef = CFStringCreateWithFormat( NULL, NULL, CFSTR( "%s: %d"), hostApi->deviceInfos[device]->name, channelIndex + 1 );
176
                
177
                
178
                size = CFStringGetMaximumSizeForEncoding(CFStringGetLength(nameRef), kCFStringEncodingUTF8);;
179
                if( !ensureChannelNameSize( size ) )
180
                {
181
                        CFRelease( nameRef );
182
                        return NULL;
183
                }
184
                CFStringGetCString( nameRef, channelName, size+1, kCFStringEncodingUTF8 );
185
                CFRelease( nameRef );
186
        }
187
        else
188
        {
189
                size = CFStringGetMaximumSizeForEncoding(CFStringGetLength(nameRef), kCFStringEncodingUTF8);;
190
                if( !ensureChannelNameSize( size ) )
191
                {
192
                        CFRelease( nameRef );
193
                        return NULL;
194
                }
195
                CFStringGetCString( nameRef, channelName, size+1, kCFStringEncodingUTF8 );
196
                CFRelease( nameRef );
197
        }
198
        
199
        return channelName;
200
}
201

    
202
    
203
PaError PaMacCore_GetBufferSizeRange( PaDeviceIndex device,
204
                                      long *minBufferSizeFrames, long *maxBufferSizeFrames )
205
{
206
    PaError result;
207
    PaUtilHostApiRepresentation *hostApi;
208
    
209
    result = PaUtil_GetHostApiRepresentation( &hostApi, paCoreAudio );
210
    
211
    if( result == paNoError )
212
    {
213
        PaDeviceIndex hostApiDeviceIndex;
214
        result = PaUtil_DeviceIndexToHostApiDeviceIndex( &hostApiDeviceIndex, device, hostApi );
215
        if( result == paNoError )
216
        {
217
            PaMacAUHAL *macCoreHostApi = (PaMacAUHAL*)hostApi;
218
            AudioDeviceID macCoreDeviceId = macCoreHostApi->devIds[hostApiDeviceIndex];
219
            AudioValueRange audioRange;
220
            UInt32 propSize = sizeof( audioRange );
221
            
222
            // return the size range for the output scope unless we only have inputs
223
            Boolean isInput = 0;
224
            if( macCoreHostApi->inheritedHostApiRep.deviceInfos[hostApiDeviceIndex]->maxOutputChannels == 0 )
225
                isInput = 1;
226
           
227
            result = WARNING(AudioDeviceGetProperty( macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSizeRange, &propSize, &audioRange ) );
228

    
229
            *minBufferSizeFrames = audioRange.mMinimum;
230
            *maxBufferSizeFrames = audioRange.mMaximum;
231
        }
232
    }
233
    
234
    return result;
235
}
236

    
237

    
238
AudioDeviceID PaMacCore_GetStreamInputDevice( PaStream* s )
239
{
240
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
241
    VVDBUG(("PaMacCore_GetStreamInputHandle()\n"));
242

    
243
    return ( stream->inputDevice );
244
}
245

    
246
AudioDeviceID PaMacCore_GetStreamOutputDevice( PaStream* s )
247
{
248
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
249
    VVDBUG(("PaMacCore_GetStreamOutputHandle()\n"));
250

    
251
    return ( stream->outputDevice );
252
}
253

    
254
#ifdef __cplusplus
255
}
256
#endif /* __cplusplus */
257

    
258
#define RING_BUFFER_ADVANCE_DENOMINATOR (4)
259

    
260
static void Terminate( struct PaUtilHostApiRepresentation *hostApi );
261
static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi,
262
                                  const PaStreamParameters *inputParameters,
263
                                  const PaStreamParameters *outputParameters,
264
                                  double sampleRate );
265
static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi,
266
                           PaStream** s,
267
                           const PaStreamParameters *inputParameters,
268
                           const PaStreamParameters *outputParameters,
269
                           double sampleRate,
270
                           unsigned long framesPerBuffer,
271
                           PaStreamFlags streamFlags,
272
                           PaStreamCallback *streamCallback,
273
                           void *userData );
274
static PaError CloseStream( PaStream* stream );
275
static PaError StartStream( PaStream *stream );
276
static PaError StopStream( PaStream *stream );
277
static PaError AbortStream( PaStream *stream );
278
static PaError IsStreamStopped( PaStream *s );
279
static PaError IsStreamActive( PaStream *stream );
280
static PaTime GetStreamTime( PaStream *stream );
281
static OSStatus AudioIOProc( void *inRefCon,
282
                               AudioUnitRenderActionFlags *ioActionFlags,
283
                               const AudioTimeStamp *inTimeStamp,
284
                               UInt32 inBusNumber,
285
                               UInt32 inNumberFrames,
286
                               AudioBufferList *ioData );
287
static double GetStreamCpuLoad( PaStream* stream );
288

    
289
static PaError GetChannelInfo( PaMacAUHAL *auhalHostApi,
290
                               PaDeviceInfo *deviceInfo,
291
                               AudioDeviceID macCoreDeviceId,
292
                               int isInput);
293

    
294
static PaError OpenAndSetupOneAudioUnit(
295
                                   const PaMacCoreStream *stream,
296
                                   const PaStreamParameters *inStreamParams,
297
                                   const PaStreamParameters *outStreamParams,
298
                                   const UInt32 requestedFramesPerBuffer,
299
                                   UInt32 *actualInputFramesPerBuffer,
300
                                   UInt32 *actualOutputFramesPerBuffer,
301
                                   const PaMacAUHAL *auhalHostApi,
302
                                   AudioUnit *audioUnit,
303
                                   AudioConverterRef *srConverter,
304
                                   AudioDeviceID *audioDevice,
305
                                   const double sampleRate,
306
                                   void *refCon );
307

    
308
/* for setting errors. */
309
#define PA_AUHAL_SET_LAST_HOST_ERROR( errorCode, errorText ) \
310
    PaUtil_SetLastHostErrorInfo( paCoreAudio, errorCode, errorText )
311

    
312
/*
313
 * Callback called when starting or stopping a stream.
314
 */
315
static void startStopCallback(
316
   void *               inRefCon,
317
   AudioUnit            ci,
318
   AudioUnitPropertyID  inID,
319
   AudioUnitScope       inScope,
320
   AudioUnitElement     inElement )
321
{
322
   PaMacCoreStream *stream = (PaMacCoreStream *) inRefCon;
323
   UInt32 isRunning;
324
   UInt32 size = sizeof( isRunning );
325
   OSStatus err;
326
   err = AudioUnitGetProperty( ci, kAudioOutputUnitProperty_IsRunning, inScope, inElement, &isRunning, &size );
327
   assert( !err );
328
   if( err )
329
      isRunning = false; //it's very unclear what to do in case of error here. There's no real way to notify the user, and crashing seems unreasonable.
330
   if( isRunning )
331
      return; //We are only interested in when we are stopping
332
   // -- if we are using 2 I/O units, we only need one notification!
333
   if( stream->inputUnit && stream->outputUnit && stream->inputUnit != stream->outputUnit && ci == stream->inputUnit )
334
      return;
335
   PaStreamFinishedCallback *sfc = stream->streamRepresentation.streamFinishedCallback;
336
   if( stream->state == STOPPING )
337
      stream->state = STOPPED ;
338
   if( sfc )
339
      sfc( stream->streamRepresentation.userData );
340
}
341

    
342

    
343
/*currently, this is only used in initialization, but it might be modified
344
  to be used when the list of devices changes.*/
345
static PaError gatherDeviceInfo(PaMacAUHAL *auhalHostApi)
346
{
347
    UInt32 size;
348
    UInt32 propsize;
349
    VVDBUG(("gatherDeviceInfo()\n"));
350
    /* -- free any previous allocations -- */
351
    if( auhalHostApi->devIds )
352
        PaUtil_GroupFreeMemory(auhalHostApi->allocations, auhalHostApi->devIds);
353
    auhalHostApi->devIds = NULL;
354

    
355
    /* -- figure out how many devices there are -- */
356
    AudioHardwareGetPropertyInfo( kAudioHardwarePropertyDevices,
357
                                  &propsize,
358
                                  NULL );
359
    auhalHostApi->devCount = propsize / sizeof( AudioDeviceID );
360

    
361
    VDBUG( ( "Found %ld device(s).\n", auhalHostApi->devCount ) );
362

    
363
    /* -- copy the device IDs -- */
364
    auhalHostApi->devIds = (AudioDeviceID *)PaUtil_GroupAllocateMemory(
365
                             auhalHostApi->allocations,
366
                             propsize );
367
    if( !auhalHostApi->devIds )
368
        return paInsufficientMemory;
369
    AudioHardwareGetProperty( kAudioHardwarePropertyDevices,
370
                                  &propsize,
371
                                  auhalHostApi->devIds );
372
#ifdef MAC_CORE_VERBOSE_DEBUG
373
    {
374
       int i;
375
       for( i=0; i<auhalHostApi->devCount; ++i )
376
          printf( "Device %d\t: %ld\n", i, auhalHostApi->devIds[i] );
377
    }
378
#endif
379

    
380
    size = sizeof(AudioDeviceID);
381
    auhalHostApi->defaultIn  = kAudioDeviceUnknown;
382
    auhalHostApi->defaultOut = kAudioDeviceUnknown;
383

    
384
    /* determine the default device. */
385
    /* I am not sure how these calls to AudioHardwareGetProperty()
386
       could fail, but in case they do, we use the first available
387
       device as the default. */
388
    if( 0 != AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice,
389
                     &size,
390
                     &auhalHostApi->defaultIn) ) {
391
       int i;
392
       auhalHostApi->defaultIn  = kAudioDeviceUnknown;
393
       VDBUG(("Failed to get default input device from OS."));
394
       VDBUG((" I will substitute the first available input Device."));
395
       for( i=0; i<auhalHostApi->devCount; ++i ) {
396
          PaDeviceInfo devInfo;
397
          if( 0 != GetChannelInfo( auhalHostApi, &devInfo,
398
                                   auhalHostApi->devIds[i], TRUE ) )
399
             if( devInfo.maxInputChannels ) {
400
                auhalHostApi->defaultIn = auhalHostApi->devIds[i];
401
                break;
402
             }
403
       }
404
    }   
405
    if( 0 != AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice,
406
                     &size,
407
                     &auhalHostApi->defaultOut) ) {
408
       int i;
409
       auhalHostApi->defaultIn  = kAudioDeviceUnknown;
410
       VDBUG(("Failed to get default output device from OS."));
411
       VDBUG((" I will substitute the first available output Device."));
412
       for( i=0; i<auhalHostApi->devCount; ++i ) {
413
          PaDeviceInfo devInfo;
414
          if( 0 != GetChannelInfo( auhalHostApi, &devInfo,
415
                                   auhalHostApi->devIds[i], FALSE ) )
416
             if( devInfo.maxOutputChannels ) {
417
                auhalHostApi->defaultOut = auhalHostApi->devIds[i];
418
                break;
419
             }
420
       }
421
    }   
422

    
423
    VDBUG( ( "Default in : %ld\n", auhalHostApi->defaultIn  ) );
424
    VDBUG( ( "Default out: %ld\n", auhalHostApi->defaultOut ) );
425

    
426
    return paNoError;
427
}
428

    
429
/* =================================================================================================== */
430
/**
431
 * @internal
432
 * @brief Clip the desired size against the allowed IO buffer size range for the device.
433
 */
434
static PaError ClipToDeviceBufferSize( AudioDeviceID macCoreDeviceId,
435
                                                                        int isInput, UInt32 desiredSize, UInt32 *allowedSize )
436
{
437
        UInt32 resultSize = desiredSize;
438
        AudioValueRange audioRange;
439
        UInt32 propSize = sizeof( audioRange );
440
        PaError err = WARNING(AudioDeviceGetProperty( macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSizeRange, &propSize, &audioRange ) );
441
        resultSize = MAX( resultSize, audioRange.mMinimum );
442
        resultSize = MIN( resultSize, audioRange.mMaximum );
443
        *allowedSize = resultSize;
444
        return err;
445
}
446

    
447
/* =================================================================================================== */
448
#if 0
449
static void DumpDeviceProperties( AudioDeviceID macCoreDeviceId,
450
                          int isInput )
451
{
452
    PaError err;
453
    int i;
454
    UInt32 propSize;
455
    UInt32 deviceLatency;
456
    UInt32 streamLatency;
457
    UInt32 bufferFrames;
458
    UInt32 safetyOffset;
459
    AudioStreamID streamIDs[128];
460
    
461
    printf("\n======= latency query : macCoreDeviceId = %d, isInput %d =======\n", (int)macCoreDeviceId, isInput );    
462
    
463
    propSize = sizeof(UInt32);
464
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSize, &propSize, &bufferFrames));
465
    printf("kAudioDevicePropertyBufferFrameSize: err = %d, propSize = %d, value = %d\n", err, propSize, bufferFrames );
466
    
467
    propSize = sizeof(UInt32);
468
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertySafetyOffset, &propSize, &safetyOffset));
469
    printf("kAudioDevicePropertySafetyOffset: err = %d, propSize = %d, value = %d\n", err, propSize, safetyOffset );
470
    
471
    propSize = sizeof(UInt32);
472
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyLatency, &propSize, &deviceLatency));
473
    printf("kAudioDevicePropertyLatency: err = %d, propSize = %d, value = %d\n", err, propSize, deviceLatency );
474
    
475
    AudioValueRange audioRange;
476
    propSize = sizeof( audioRange );
477
    err = WARNING(AudioDeviceGetProperty( macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSizeRange, &propSize, &audioRange ) );
478
    printf("kAudioDevicePropertyBufferFrameSizeRange: err = %d, propSize = %u, minimum = %g\n", err, propSize, audioRange.mMinimum);
479
    printf("kAudioDevicePropertyBufferFrameSizeRange: err = %d, propSize = %u, maximum = %g\n", err, propSize, audioRange.mMaximum );
480
    
481
    /* Get the streams from the device and query their latency. */
482
    propSize = sizeof(streamIDs);
483
    err  = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreams, &propSize, &streamIDs[0]));
484
    int numStreams = propSize / sizeof(AudioStreamID);
485
    for( i=0; i<numStreams; i++ )
486
    {
487
        printf("Stream #%d = %d---------------------- \n", i, streamIDs[i] );
488
        
489
        propSize = sizeof(UInt32);
490
        err  = WARNING(AudioStreamGetProperty(streamIDs[i], 0, kAudioStreamPropertyLatency, &propSize, &streamLatency));
491
        printf("  kAudioStreamPropertyLatency: err = %d, propSize = %d, value = %d\n", err, propSize, streamLatency );
492
    }
493
}
494
#endif
495

    
496
/* =================================================================================================== */
497
/**
498
 * @internal
499
 * Calculate the fixed latency from the system and the device.
500
 * Sum of kAudioStreamPropertyLatency +
501
 *        kAudioDevicePropertySafetyOffset +
502
 *        kAudioDevicePropertyLatency
503
 *
504
 * Some useful info from Jeff Moore on latency.
505
 * http://osdir.com/ml/coreaudio-api/2010-01/msg00046.html
506
 * http://osdir.com/ml/coreaudio-api/2009-07/msg00140.html
507
 */
508
static PaError CalculateFixedDeviceLatency( AudioDeviceID macCoreDeviceId, int isInput, UInt32 *fixedLatencyPtr )
509
{
510
    PaError err;
511
    UInt32 propSize;
512
    UInt32 deviceLatency;
513
    UInt32 streamLatency;
514
    UInt32 safetyOffset;
515
    AudioStreamID streamIDs[1];
516
    
517
    // To get stream latency we have to get a streamID from the device.
518
    // We are only going to look at the first stream so only fetch one stream.
519
    propSize = sizeof(streamIDs);
520
    err  = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreams, &propSize, &streamIDs[0]));
521
    if( err != paNoError ) goto error;
522
    if( propSize == sizeof(AudioStreamID) )
523
    {        
524
        propSize = sizeof(UInt32);
525
        err  = WARNING(AudioStreamGetProperty(streamIDs[0], 0, kAudioStreamPropertyLatency, &propSize, &streamLatency));
526
    }
527
    
528
    propSize = sizeof(UInt32);
529
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertySafetyOffset, &propSize, &safetyOffset));
530
    if( err != paNoError ) goto error;
531
    
532
    propSize = sizeof(UInt32);
533
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyLatency, &propSize, &deviceLatency));
534
    if( err != paNoError ) goto error;
535

    
536
    *fixedLatencyPtr = deviceLatency + streamLatency + safetyOffset;
537
    return err;
538
error:
539
    return err;
540
}
541

    
542
/* =================================================================================================== */
543
static PaError CalculateDefaultDeviceLatencies( AudioDeviceID macCoreDeviceId,
544
                                               int isInput, UInt32 *lowLatencyFramesPtr,
545
                                               UInt32 *highLatencyFramesPtr )
546
{
547
    UInt32 propSize;
548
    UInt32 bufferFrames = 0;
549
    UInt32 fixedLatency = 0;
550
    UInt32 clippedMinBufferSize = 0;
551
    
552
    //DumpDeviceProperties( macCoreDeviceId, isInput );
553
    
554
    PaError err = CalculateFixedDeviceLatency( macCoreDeviceId, isInput, &fixedLatency );
555
    if( err != paNoError ) goto error;
556
    
557
    // For low latency use a small fixed size buffer clipped to the device range.
558
    err = ClipToDeviceBufferSize( macCoreDeviceId, isInput, PA_MAC_SMALL_BUFFER_SIZE, &clippedMinBufferSize );
559
    if( err != paNoError ) goto error;
560
    
561
    // For high latency use the default device buffer size.
562
    propSize = sizeof(UInt32);
563
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSize, &propSize, &bufferFrames));
564
    if( err != paNoError ) goto error;
565
    
566
    *lowLatencyFramesPtr = fixedLatency + clippedMinBufferSize;
567
    *highLatencyFramesPtr = fixedLatency + bufferFrames;
568
    
569
    return err;
570
error:
571
    return err;
572
}
573

    
574
/* =================================================================================================== */
575

    
576
static PaError GetChannelInfo( PaMacAUHAL *auhalHostApi,
577
                               PaDeviceInfo *deviceInfo,
578
                               AudioDeviceID macCoreDeviceId,
579
                               int isInput)
580
{
581
    UInt32 propSize;
582
    PaError err = paNoError;
583
    UInt32 i;
584
    int numChannels = 0;
585
    AudioBufferList *buflist = NULL;
586
    
587
    VVDBUG(("GetChannelInfo()\n"));
588

    
589
    /* Get the number of channels from the stream configuration.
590
       Fail if we can't get this. */
591

    
592
    err = ERR(AudioDeviceGetPropertyInfo(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreamConfiguration, &propSize, NULL));
593
    if (err)
594
        return err;
595

    
596
    buflist = PaUtil_AllocateMemory(propSize);
597
    if( !buflist )
598
       return paInsufficientMemory;
599
    err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreamConfiguration, &propSize, buflist));
600
    if (err)
601
        goto error;
602

    
603
    for (i = 0; i < buflist->mNumberBuffers; ++i)
604
        numChannels += buflist->mBuffers[i].mNumberChannels;
605

    
606
    if (isInput)
607
        deviceInfo->maxInputChannels = numChannels;
608
    else
609
        deviceInfo->maxOutputChannels = numChannels;
610
      
611
    if (numChannels > 0) /* do not try to retrieve the latency if there are no channels. */
612
    {
613
        /* Get the latency.  Don't fail if we can't get this. */
614
        /* default to something reasonable */
615
        deviceInfo->defaultLowInputLatency = .01;
616
        deviceInfo->defaultHighInputLatency = .10;
617
        deviceInfo->defaultLowOutputLatency = .01;
618
        deviceInfo->defaultHighOutputLatency = .10;        
619
        UInt32 lowLatencyFrames = 0;
620
        UInt32 highLatencyFrames = 0;
621
        err = CalculateDefaultDeviceLatencies( macCoreDeviceId, isInput, &lowLatencyFrames, &highLatencyFrames );
622
        if( err == 0 )
623
        {
624
            
625
            double lowLatencySeconds = lowLatencyFrames / deviceInfo->defaultSampleRate;
626
            double highLatencySeconds = highLatencyFrames / deviceInfo->defaultSampleRate;
627
            if (isInput)
628
            {
629
                deviceInfo->defaultLowInputLatency = lowLatencySeconds;
630
                deviceInfo->defaultHighInputLatency = highLatencySeconds;
631
            }
632
            else
633
            {
634
                deviceInfo->defaultLowOutputLatency = lowLatencySeconds;
635
                deviceInfo->defaultHighOutputLatency = highLatencySeconds;
636
            }
637
        }
638
    }
639
    PaUtil_FreeMemory( buflist );
640
    return paNoError;
641
 error:
642
    PaUtil_FreeMemory( buflist );
643
    return err;
644
}
645

    
646
/* =================================================================================================== */
647
static PaError InitializeDeviceInfo( PaMacAUHAL *auhalHostApi,
648
                                     PaDeviceInfo *deviceInfo,
649
                                     AudioDeviceID macCoreDeviceId,
650
                                     PaHostApiIndex hostApiIndex )
651
{
652
    Float64 sampleRate;
653
    char *name;
654
    PaError err = paNoError;
655
        CFStringRef nameRef;
656
    UInt32 propSize;
657

    
658
    VVDBUG(("InitializeDeviceInfo(): macCoreDeviceId=%ld\n", macCoreDeviceId));
659

    
660
    memset(deviceInfo, 0, sizeof(PaDeviceInfo));
661

    
662
    deviceInfo->structVersion = 2;
663
    deviceInfo->hostApi = hostApiIndex;
664
  
665
    /* Get the device name using CFString */
666
        propSize = sizeof(nameRef);
667
    err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceNameCFString, &propSize, &nameRef));
668
    if (err)
669
    {
670
                /* Get the device name using c string.  Fail if we can't get it. */
671
                err = ERR(AudioDeviceGetPropertyInfo(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceName, &propSize, NULL));
672
                if (err)
673
                        return err;
674

    
675
                name = PaUtil_GroupAllocateMemory(auhalHostApi->allocations,propSize+1);
676
                if ( !name )
677
                        return paInsufficientMemory;
678
                err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceName, &propSize, name));
679
                if (err)
680
                        return err;
681
        }
682
        else
683
        {
684
                /* valid CFString so we just allocate a c string big enough to contain the data */
685
                propSize = CFStringGetMaximumSizeForEncoding(CFStringGetLength(nameRef), kCFStringEncodingUTF8);
686
                name = PaUtil_GroupAllocateMemory(auhalHostApi->allocations, propSize+1);
687
                if ( !name )
688
                {
689
                        CFRelease(nameRef);
690
                        return paInsufficientMemory;
691
                }
692
                CFStringGetCString(nameRef, name, propSize+1, kCFStringEncodingUTF8);
693
                CFRelease(nameRef);
694
        }
695
    deviceInfo->name = name;
696

    
697
    /* Try to get the default sample rate.  Don't fail if we can't get this. */
698
    propSize = sizeof(Float64);
699
    err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyNominalSampleRate, &propSize, &sampleRate));
700
    if (err)
701
        deviceInfo->defaultSampleRate = 0.0;
702
    else
703
        deviceInfo->defaultSampleRate = sampleRate;
704

    
705
    /* Get the maximum number of input and output channels.  Fail if we can't get this. */
706

    
707
    err = GetChannelInfo(auhalHostApi, deviceInfo, macCoreDeviceId, 1);
708
    if (err)
709
        return err;
710

    
711
    err = GetChannelInfo(auhalHostApi, deviceInfo, macCoreDeviceId, 0);
712
    if (err)
713
        return err;
714

    
715
    return paNoError;
716
}
717

    
718
PaError PaMacCore_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex hostApiIndex )
719
{
720
    PaError result = paNoError;
721
    int i;
722
    PaMacAUHAL *auhalHostApi = NULL;
723
    PaDeviceInfo *deviceInfoArray;
724
    int unixErr;
725

    
726
    VVDBUG(("PaMacCore_Initialize(): hostApiIndex=%d\n", hostApiIndex));
727
        
728
        SInt32 major;
729
        SInt32 minor;
730
        Gestalt(gestaltSystemVersionMajor, &major);
731
        Gestalt(gestaltSystemVersionMinor, &minor);
732
        
733
        // Starting with 10.6 systems, the HAL notification thread is created internally
734
        if (major == 10 && minor >= 6) {
735
                CFRunLoopRef theRunLoop = NULL;
736
                AudioObjectPropertyAddress theAddress = { kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
737
                OSStatus osErr = AudioObjectSetPropertyData (kAudioObjectSystemObject, &theAddress, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
738
                if (osErr != noErr) {
739
                        goto error;
740
                }
741
        }
742
        
743
    unixErr = initializeXRunListenerList();
744
    if( 0 != unixErr ) {
745
       return UNIX_ERR(unixErr);
746
    }
747

    
748
    auhalHostApi = (PaMacAUHAL*)PaUtil_AllocateMemory( sizeof(PaMacAUHAL) );
749
    if( !auhalHostApi )
750
    {
751
        result = paInsufficientMemory;
752
        goto error;
753
    }
754

    
755
    auhalHostApi->allocations = PaUtil_CreateAllocationGroup();
756
    if( !auhalHostApi->allocations )
757
    {
758
        result = paInsufficientMemory;
759
        goto error;
760
    }
761

    
762
    auhalHostApi->devIds = NULL;
763
    auhalHostApi->devCount = 0;
764

    
765
    /* get the info we need about the devices */
766
    result = gatherDeviceInfo( auhalHostApi );
767
    if( result != paNoError )
768
       goto error;
769

    
770
    *hostApi = &auhalHostApi->inheritedHostApiRep;
771
    (*hostApi)->info.structVersion = 1;
772
    (*hostApi)->info.type = paCoreAudio;
773
    (*hostApi)->info.name = "Core Audio";
774

    
775
    (*hostApi)->info.defaultInputDevice = paNoDevice;
776
    (*hostApi)->info.defaultOutputDevice = paNoDevice;
777

    
778
    (*hostApi)->info.deviceCount = 0;  
779

    
780
    if( auhalHostApi->devCount > 0 )
781
    {
782
        (*hostApi)->deviceInfos = (PaDeviceInfo**)PaUtil_GroupAllocateMemory(
783
                auhalHostApi->allocations, sizeof(PaDeviceInfo*) * auhalHostApi->devCount);
784
        if( !(*hostApi)->deviceInfos )
785
        {
786
            result = paInsufficientMemory;
787
            goto error;
788
        }
789

    
790
        /* allocate all device info structs in a contiguous block */
791
        deviceInfoArray = (PaDeviceInfo*)PaUtil_GroupAllocateMemory(
792
                auhalHostApi->allocations, sizeof(PaDeviceInfo) * auhalHostApi->devCount );
793
        if( !deviceInfoArray )
794
        {
795
            result = paInsufficientMemory;
796
            goto error;
797
        }
798

    
799
        for( i=0; i < auhalHostApi->devCount; ++i )
800
        {
801
            int err;
802
            err = InitializeDeviceInfo( auhalHostApi, &deviceInfoArray[i],
803
                                      auhalHostApi->devIds[i],
804
                                      hostApiIndex );
805
            if (err == paNoError)
806
            { /* copy some info and set the defaults */
807
                (*hostApi)->deviceInfos[(*hostApi)->info.deviceCount] = &deviceInfoArray[i];
808
                if (auhalHostApi->devIds[i] == auhalHostApi->defaultIn)
809
                    (*hostApi)->info.defaultInputDevice = (*hostApi)->info.deviceCount;
810
                if (auhalHostApi->devIds[i] == auhalHostApi->defaultOut)
811
                    (*hostApi)->info.defaultOutputDevice = (*hostApi)->info.deviceCount;
812
                (*hostApi)->info.deviceCount++;
813
            }
814
            else
815
            { /* there was an error. we need to shift the devices down, so we ignore this one */
816
                int j;
817
                auhalHostApi->devCount--;
818
                for( j=i; j<auhalHostApi->devCount; ++j )
819
                   auhalHostApi->devIds[j] = auhalHostApi->devIds[j+1];
820
                i--;
821
            }
822
        }
823
    }
824

    
825
    (*hostApi)->Terminate = Terminate;
826
    (*hostApi)->OpenStream = OpenStream;
827
    (*hostApi)->IsFormatSupported = IsFormatSupported;
828

    
829
    PaUtil_InitializeStreamInterface( &auhalHostApi->callbackStreamInterface,
830
                                      CloseStream, StartStream,
831
                                      StopStream, AbortStream, IsStreamStopped,
832
                                      IsStreamActive,
833
                                      GetStreamTime, GetStreamCpuLoad,
834
                                      PaUtil_DummyRead, PaUtil_DummyWrite,
835
                                      PaUtil_DummyGetReadAvailable,
836
                                      PaUtil_DummyGetWriteAvailable );
837

    
838
    PaUtil_InitializeStreamInterface( &auhalHostApi->blockingStreamInterface,
839
                                      CloseStream, StartStream,
840
                                      StopStream, AbortStream, IsStreamStopped,
841
                                      IsStreamActive,
842
                                      GetStreamTime, PaUtil_DummyGetCpuLoad,
843
                                      ReadStream, WriteStream,
844
                                      GetStreamReadAvailable,
845
                                      GetStreamWriteAvailable );
846

    
847
    return result;
848

    
849
error:
850
    if( auhalHostApi )
851
    {
852
        if( auhalHostApi->allocations )
853
        {
854
            PaUtil_FreeAllAllocations( auhalHostApi->allocations );
855
            PaUtil_DestroyAllocationGroup( auhalHostApi->allocations );
856
        }
857
                
858
        PaUtil_FreeMemory( auhalHostApi );
859
    }
860
    return result;
861
}
862

    
863

    
864
static void Terminate( struct PaUtilHostApiRepresentation *hostApi )
865
{
866
    int unixErr;
867

    
868
    PaMacAUHAL *auhalHostApi = (PaMacAUHAL*)hostApi;
869

    
870
    VVDBUG(("Terminate()\n"));
871

    
872
    unixErr = destroyXRunListenerList();
873
    if( 0 != unixErr )
874
       UNIX_ERR(unixErr);
875

    
876
    /*
877
        IMPLEMENT ME:
878
            - clean up any resources not handled by the allocation group
879
        TODO: Double check that everything is handled by alloc group
880
    */
881

    
882
    if( auhalHostApi->allocations )
883
    {
884
        PaUtil_FreeAllAllocations( auhalHostApi->allocations );
885
        PaUtil_DestroyAllocationGroup( auhalHostApi->allocations );
886
    }
887

    
888
    PaUtil_FreeMemory( auhalHostApi );
889
}
890

    
891

    
892
static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi,
893
                                  const PaStreamParameters *inputParameters,
894
                                  const PaStreamParameters *outputParameters,
895
                                  double sampleRate )
896
{
897
    int inputChannelCount, outputChannelCount;
898
    PaSampleFormat inputSampleFormat, outputSampleFormat;
899

    
900
    VVDBUG(("IsFormatSupported(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld sampleRate=%g\n",
901
                inputParameters  ? inputParameters->channelCount  : -1,
902
                inputParameters  ? inputParameters->sampleFormat  : -1,
903
                outputParameters ? outputParameters->channelCount : -1,
904
                outputParameters ? outputParameters->sampleFormat : -1,
905
                (float) sampleRate ));
906
 
907
    /** These first checks are standard PA checks. We do some fancier checks
908
        later. */
909
    if( inputParameters )
910
    {
911
        inputChannelCount = inputParameters->channelCount;
912
        inputSampleFormat = inputParameters->sampleFormat;
913

    
914
        /* all standard sample formats are supported by the buffer adapter,
915
            this implementation doesn't support any custom sample formats */
916
        if( inputSampleFormat & paCustomFormat )
917
            return paSampleFormatNotSupported;
918
            
919
        /* unless alternate device specification is supported, reject the use of
920
            paUseHostApiSpecificDeviceSpecification */
921

    
922
        if( inputParameters->device == paUseHostApiSpecificDeviceSpecification )
923
            return paInvalidDevice;
924

    
925
        /* check that input device can support inputChannelCount */
926
        if( inputChannelCount > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels )
927
            return paInvalidChannelCount;
928
    }
929
    else
930
    {
931
        inputChannelCount = 0;
932
    }
933

    
934
    if( outputParameters )
935
    {
936
        outputChannelCount = outputParameters->channelCount;
937
        outputSampleFormat = outputParameters->sampleFormat;
938

    
939
        /* all standard sample formats are supported by the buffer adapter,
940
            this implementation doesn't support any custom sample formats */
941
        if( outputSampleFormat & paCustomFormat )
942
            return paSampleFormatNotSupported;
943
            
944
        /* unless alternate device specification is supported, reject the use of
945
            paUseHostApiSpecificDeviceSpecification */
946

    
947
        if( outputParameters->device == paUseHostApiSpecificDeviceSpecification )
948
            return paInvalidDevice;
949

    
950
        /* check that output device can support outputChannelCount */
951
        if( outputChannelCount > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels )
952
            return paInvalidChannelCount;
953

    
954
    }
955
    else
956
    {
957
        outputChannelCount = 0;
958
    }
959
 
960
    /* FEEDBACK */
961
    /*        I think the only way to check a given format SR combo is     */
962
    /*        to try opening it. This could be disruptive, is that Okay?   */
963
    /*        The alternative is to just read off available sample rates,  */
964
    /*        but this will not work %100 of the time (eg, a device that   */
965
    /*        supports N output at one rate but only N/2 at a higher rate.)*/
966

    
967
    /* The following code opens the device with the requested parameters to
968
       see if it works. */
969
    {
970
       PaError err;
971
       PaStream *s;
972
       err = OpenStream( hostApi, &s, inputParameters, outputParameters,
973
                           sampleRate, 1024, 0, (PaStreamCallback *)1, NULL );
974
       if( err != paNoError && err != paInvalidSampleRate )
975
          DBUG( ( "OpenStream @ %g returned: %d: %s\n",
976
                  (float) sampleRate, err, Pa_GetErrorText( err ) ) );
977
       if( err ) 
978
          return err;
979
       err = CloseStream( s );
980
       if( err ) {
981
          /* FEEDBACK: is this more serious? should we assert? */
982
          DBUG( ( "WARNING: could not close Stream. %d: %s\n",
983
                  err, Pa_GetErrorText( err ) ) );
984
       }
985
    }
986

    
987
    return paFormatIsSupported;
988
}
989

    
990
/* ================================================================================= */
991
static void InitializeDeviceProperties( PaMacCoreDeviceProperties *deviceProperties )
992
{
993
    memset( deviceProperties, 0, sizeof(PaMacCoreDeviceProperties) );
994
    deviceProperties->sampleRate = 1.0; // Better than random. Overwritten by actual values later on.
995
    deviceProperties->samplePeriod = 1.0 / deviceProperties->sampleRate;
996
}
997

    
998
static Float64 CalculateSoftwareLatencyFromProperties( PaMacCoreStream *stream, PaMacCoreDeviceProperties *deviceProperties )
999
{
1000
    UInt32 latencyFrames = deviceProperties->bufferFrameSize + deviceProperties->deviceLatency + deviceProperties->safetyOffset;
1001
    return latencyFrames * deviceProperties->samplePeriod; // same as dividing by sampleRate but faster
1002
}
1003

    
1004
static Float64 CalculateHardwareLatencyFromProperties( PaMacCoreStream *stream, PaMacCoreDeviceProperties *deviceProperties )
1005
{
1006
    return deviceProperties->deviceLatency * deviceProperties->samplePeriod; // same as dividing by sampleRate but faster
1007
}
1008

    
1009
/* Calculate values used to convert Apple timestamps into PA timestamps
1010
 * from the device properties. The final results of this calculation
1011
 * will be used in the audio callback function.
1012
 */
1013
static void UpdateTimeStampOffsets( PaMacCoreStream *stream )
1014
{
1015
    Float64 inputSoftwareLatency = 0.0;
1016
    Float64 inputHardwareLatency = 0.0;
1017
    Float64 outputSoftwareLatency = 0.0;
1018
    Float64 outputHardwareLatency = 0.0;
1019
    
1020
    if( stream->inputUnit != NULL )
1021
    {
1022
        inputSoftwareLatency = CalculateSoftwareLatencyFromProperties( stream, &stream->inputProperties );
1023
        inputHardwareLatency = CalculateHardwareLatencyFromProperties( stream, &stream->inputProperties );
1024
    }    
1025
    if( stream->outputUnit != NULL )
1026
    {
1027
        outputSoftwareLatency = CalculateSoftwareLatencyFromProperties( stream, &stream->outputProperties );
1028
        outputHardwareLatency = CalculateHardwareLatencyFromProperties( stream, &stream->outputProperties );
1029
    }    
1030
    
1031
    /* We only need a mutex around setting these variables as a group. */
1032
        pthread_mutex_lock( &stream->timingInformationMutex );
1033
    stream->timestampOffsetCombined = inputSoftwareLatency + outputSoftwareLatency;
1034
    stream->timestampOffsetInputDevice = inputHardwareLatency;
1035
    stream->timestampOffsetOutputDevice = outputHardwareLatency;
1036
        pthread_mutex_unlock( &stream->timingInformationMutex );
1037
}
1038

    
1039
/* ================================================================================= */
1040

    
1041
/* can be used to update from nominal or actual sample rate */
1042
static OSStatus UpdateSampleRateFromDeviceProperty( PaMacCoreStream *stream, AudioDeviceID deviceID, Boolean isInput, AudioDevicePropertyID sampleRatePropertyID )
1043
{
1044
    PaMacCoreDeviceProperties * deviceProperties = isInput ? &stream->inputProperties : &stream->outputProperties;
1045
        
1046
        Float64 sampleRate = 0.0;
1047
        UInt32 propSize = sizeof(Float64);
1048
    OSStatus osErr = AudioDeviceGetProperty( deviceID, 0, isInput, sampleRatePropertyID, &propSize, &sampleRate);
1049
        if( (osErr == noErr) && (sampleRate > 1000.0) ) /* avoid divide by zero if there's an error */
1050
        {
1051
        deviceProperties->sampleRate = sampleRate;
1052
        deviceProperties->samplePeriod = 1.0 / sampleRate;
1053
    }
1054
    return osErr;
1055
}
1056

    
1057
static OSStatus AudioDevicePropertyActualSampleRateListenerProc( AudioDeviceID inDevice, UInt32 inChannel, Boolean isInput, AudioDevicePropertyID inPropertyID, void *inClientData )
1058
{
1059
        PaMacCoreStream *stream = (PaMacCoreStream*)inClientData;
1060
    
1061
    // Make sure the callback is operating on a stream that is still valid!
1062
    assert( stream->streamRepresentation.magic == PA_STREAM_MAGIC );
1063

    
1064
        OSStatus osErr = UpdateSampleRateFromDeviceProperty( stream, inDevice, isInput, kAudioDevicePropertyActualSampleRate );
1065
    if( osErr == noErr )
1066
    {
1067
        UpdateTimeStampOffsets( stream );
1068
    }
1069
    return osErr;
1070
}
1071

    
1072
/* ================================================================================= */
1073
static OSStatus QueryUInt32DeviceProperty( AudioDeviceID deviceID, Boolean isInput, AudioDevicePropertyID propertyID, UInt32 *outValue )
1074
{
1075
        UInt32 propertyValue = 0;
1076
        UInt32 propertySize = sizeof(UInt32);
1077
        OSStatus osErr = AudioDeviceGetProperty( deviceID, 0, isInput, propertyID, &propertySize, &propertyValue);
1078
        if( osErr == noErr )
1079
        {
1080
        *outValue = propertyValue;
1081
        }
1082
    return osErr;
1083
}
1084

    
1085
static OSStatus AudioDevicePropertyGenericListenerProc( AudioDeviceID inDevice, UInt32 inChannel, Boolean isInput, AudioDevicePropertyID inPropertyID, void *inClientData )
1086
{
1087
    OSStatus osErr = noErr;
1088
        PaMacCoreStream *stream = (PaMacCoreStream*)inClientData;
1089
    
1090
    // Make sure the callback is operating on a stream that is still valid!
1091
    assert( stream->streamRepresentation.magic == PA_STREAM_MAGIC );
1092
    
1093
    PaMacCoreDeviceProperties *deviceProperties = isInput ? &stream->inputProperties : &stream->outputProperties;
1094
    UInt32 *valuePtr = NULL;
1095
    switch( inPropertyID )
1096
    {
1097
        case kAudioDevicePropertySafetyOffset:
1098
            valuePtr = &deviceProperties->safetyOffset;
1099
            break;
1100
                        
1101
        case kAudioDevicePropertyLatency:
1102
            valuePtr = &deviceProperties->deviceLatency;
1103
            break;
1104
            
1105
        case kAudioDevicePropertyBufferFrameSize:
1106
            valuePtr = &deviceProperties->bufferFrameSize;
1107
            break;            
1108
    }
1109
    if( valuePtr != NULL )
1110
    {
1111
        osErr = QueryUInt32DeviceProperty( inDevice, isInput, inPropertyID, valuePtr );
1112
        if( osErr == noErr )
1113
        {
1114
            UpdateTimeStampOffsets( stream );
1115
        }
1116
    }
1117
    return osErr;
1118
}
1119

    
1120
/* ================================================================================= */
1121
/*
1122
 * Setup listeners in case device properties change during the run. */
1123
static OSStatus SetupDevicePropertyListeners( PaMacCoreStream *stream, AudioDeviceID deviceID, Boolean isInput )
1124
{
1125
    OSStatus osErr = noErr;
1126
    PaMacCoreDeviceProperties *deviceProperties = isInput ? &stream->inputProperties : &stream->outputProperties;
1127
    
1128
    if( (osErr = QueryUInt32DeviceProperty( deviceID, isInput,
1129
                                           kAudioDevicePropertyLatency, &deviceProperties->deviceLatency )) != noErr ) return osErr;
1130
    if( (osErr = QueryUInt32DeviceProperty( deviceID, isInput,
1131
                                           kAudioDevicePropertyBufferFrameSize, &deviceProperties->bufferFrameSize )) != noErr ) return osErr;
1132
    if( (osErr = QueryUInt32DeviceProperty( deviceID, isInput,
1133
                                           kAudioDevicePropertySafetyOffset, &deviceProperties->safetyOffset )) != noErr ) return osErr;
1134
    
1135
    AudioDeviceAddPropertyListener( deviceID, 0, isInput, kAudioDevicePropertyActualSampleRate, 
1136
                                   AudioDevicePropertyActualSampleRateListenerProc, stream );
1137
    
1138
    AudioDeviceAddPropertyListener( deviceID, 0, isInput, kAudioStreamPropertyLatency, 
1139
                                   AudioDevicePropertyGenericListenerProc, stream );
1140
    AudioDeviceAddPropertyListener( deviceID, 0, isInput, kAudioDevicePropertyBufferFrameSize, 
1141
                                   AudioDevicePropertyGenericListenerProc, stream );
1142
    AudioDeviceAddPropertyListener( deviceID, 0, isInput, kAudioDevicePropertySafetyOffset, 
1143
                                   AudioDevicePropertyGenericListenerProc, stream );
1144
    
1145
    return osErr;
1146
}
1147

    
1148
static void CleanupDevicePropertyListeners( PaMacCoreStream *stream, AudioDeviceID deviceID, Boolean isInput )
1149
{    
1150
    AudioDeviceRemovePropertyListener( deviceID, 0, isInput, kAudioDevicePropertyActualSampleRate, 
1151
                                   AudioDevicePropertyActualSampleRateListenerProc );
1152
    
1153
    AudioDeviceRemovePropertyListener( deviceID, 0, isInput, kAudioDevicePropertyLatency, 
1154
                                   AudioDevicePropertyGenericListenerProc );        
1155
    AudioDeviceRemovePropertyListener( deviceID, 0, isInput, kAudioDevicePropertyBufferFrameSize, 
1156
                                   AudioDevicePropertyGenericListenerProc );
1157
    AudioDeviceRemovePropertyListener( deviceID, 0, isInput, kAudioDevicePropertySafetyOffset, 
1158
                                   AudioDevicePropertyGenericListenerProc );
1159
}
1160

    
1161
/* ================================================================================= */
1162
static PaError OpenAndSetupOneAudioUnit(
1163
                                   const PaMacCoreStream *stream,
1164
                                   const PaStreamParameters *inStreamParams,
1165
                                   const PaStreamParameters *outStreamParams,
1166
                                   const UInt32 requestedFramesPerBuffer,
1167
                                   UInt32 *actualInputFramesPerBuffer,
1168
                                   UInt32 *actualOutputFramesPerBuffer,
1169
                                   const PaMacAUHAL *auhalHostApi,
1170
                                   AudioUnit *audioUnit,
1171
                                   AudioConverterRef *srConverter,
1172
                                   AudioDeviceID *audioDevice,
1173
                                   const double sampleRate,
1174
                                   void *refCon )
1175
{
1176
    ComponentDescription desc;
1177
    Component comp;
1178
    /*An Apple TN suggests using CAStreamBasicDescription, but that is C++*/
1179
    AudioStreamBasicDescription desiredFormat;
1180
    OSStatus result = noErr;
1181
    PaError paResult = paNoError;
1182
    int line = 0;
1183
    UInt32 callbackKey;
1184
    AURenderCallbackStruct rcbs;
1185
    unsigned long macInputStreamFlags  = paMacCorePlayNice;
1186
    unsigned long macOutputStreamFlags = paMacCorePlayNice;
1187
    SInt32 const *inChannelMap = NULL;
1188
    SInt32 const *outChannelMap = NULL;
1189
    unsigned long inChannelMapSize = 0;
1190
    unsigned long outChannelMapSize = 0;
1191

    
1192
    VVDBUG(("OpenAndSetupOneAudioUnit(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld, requestedFramesPerBuffer=%ld\n",
1193
                inStreamParams  ? inStreamParams->channelCount  : -1,
1194
                inStreamParams  ? inStreamParams->sampleFormat  : -1,
1195
                outStreamParams ? outStreamParams->channelCount : -1,
1196
                outStreamParams ? outStreamParams->sampleFormat : -1,
1197
                requestedFramesPerBuffer ));
1198

    
1199
    /* -- handle the degenerate case  -- */
1200
    if( !inStreamParams && !outStreamParams ) {
1201
       *audioUnit = NULL;
1202
       *audioDevice = kAudioDeviceUnknown;
1203
       return paNoError;
1204
    }
1205

    
1206
    /* -- get the user's api specific info, if they set any -- */
1207
    if( inStreamParams && inStreamParams->hostApiSpecificStreamInfo )
1208
    {
1209
       macInputStreamFlags=
1210
            ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
1211
                  ->flags;
1212
       inChannelMap = ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
1213
                  ->channelMap;
1214
       inChannelMapSize = ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
1215
                  ->channelMapSize;
1216
    }
1217
    if( outStreamParams && outStreamParams->hostApiSpecificStreamInfo )
1218
    {
1219
       macOutputStreamFlags=
1220
            ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
1221
                  ->flags;
1222
       outChannelMap = ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
1223
                  ->channelMap;
1224
       outChannelMapSize = ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
1225
                  ->channelMapSize; 
1226
    }
1227
    /* Override user's flags here, if desired for testing. */
1228

    
1229
    /*
1230
     * The HAL AU is a Mac OS style "component".
1231
     * the first few steps deal with that.
1232
     * Later steps work on a combination of Mac OS
1233
     * components and the slightly lower level
1234
     * HAL.
1235
     */
1236

    
1237
    /* -- describe the output type AudioUnit -- */
1238
    /*  Note: for the default AudioUnit, we could use the
1239
     *  componentSubType value kAudioUnitSubType_DefaultOutput;
1240
     *  but I don't think that's relevant here.
1241
     */
1242
    desc.componentType         = kAudioUnitType_Output;
1243
    desc.componentSubType      = kAudioUnitSubType_HALOutput;
1244
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
1245
    desc.componentFlags        = 0;
1246
    desc.componentFlagsMask    = 0;
1247
    /* -- find the component -- */
1248
    comp = FindNextComponent( NULL, &desc );
1249
    if( !comp )
1250
    {
1251
       DBUG( ( "AUHAL component not found." ) );
1252
       *audioUnit = NULL;
1253
       *audioDevice = kAudioDeviceUnknown;
1254
       return paUnanticipatedHostError;
1255
    }
1256
    /* -- open it -- */
1257
    result = OpenAComponent( comp, audioUnit );
1258
    if( result )
1259
    {
1260
       DBUG( ( "Failed to open AUHAL component." ) );
1261
       *audioUnit = NULL;
1262
       *audioDevice = kAudioDeviceUnknown;
1263
       return ERR( result );
1264
    }
1265
    /* -- prepare a little error handling logic / hackery -- */
1266
#define ERR_WRAP(mac_err) do { result = mac_err ; line = __LINE__ ; if ( result != noErr ) goto error ; } while(0)
1267

    
1268
    /* -- if there is input, we have to explicitly enable input -- */
1269
    if( inStreamParams )
1270
    {
1271
       UInt32 enableIO = 1;
1272
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1273
                 kAudioOutputUnitProperty_EnableIO,
1274
                 kAudioUnitScope_Input,
1275
                 INPUT_ELEMENT,
1276
                 &enableIO,
1277
                 sizeof(enableIO) ) );
1278
    }
1279
    /* -- if there is no output, we must explicitly disable output -- */
1280
    if( !outStreamParams )
1281
    {
1282
       UInt32 enableIO = 0;
1283
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1284
                 kAudioOutputUnitProperty_EnableIO,
1285
                 kAudioUnitScope_Output,
1286
                 OUTPUT_ELEMENT,
1287
                 &enableIO,
1288
                 sizeof(enableIO) ) );
1289
    }
1290

    
1291
    /* -- set the devices -- */
1292
    /* make sure input and output are the same device if we are doing input and
1293
       output. */
1294
    if( inStreamParams && outStreamParams )
1295
    {
1296
       assert( outStreamParams->device == inStreamParams->device );
1297
    }
1298
    if( inStreamParams )
1299
    {
1300
       *audioDevice = auhalHostApi->devIds[inStreamParams->device] ;
1301
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1302
                    kAudioOutputUnitProperty_CurrentDevice,
1303
                    kAudioUnitScope_Global,
1304
                    INPUT_ELEMENT,
1305
                    audioDevice,
1306
                    sizeof(AudioDeviceID) ) );
1307
    }
1308
    if( outStreamParams && outStreamParams != inStreamParams )
1309
    {
1310
       *audioDevice = auhalHostApi->devIds[outStreamParams->device] ;
1311
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1312
                    kAudioOutputUnitProperty_CurrentDevice,
1313
                    kAudioUnitScope_Global,
1314
                    OUTPUT_ELEMENT,
1315
                    audioDevice,
1316
                    sizeof(AudioDeviceID) ) );
1317
    }
1318
    /* -- add listener for dropouts -- */
1319
    result = AudioDeviceAddPropertyListener( *audioDevice,
1320
                                             0,
1321
                                             outStreamParams ? false : true,
1322
                                             kAudioDeviceProcessorOverload,
1323
                                             xrunCallback,
1324
                                             addToXRunListenerList( (void *)stream ) ) ;
1325
    if( result == kAudioHardwareIllegalOperationError ) {
1326
       // -- already registered, we're good
1327
    } else {
1328
       // -- not already registered, just check for errors
1329
       ERR_WRAP( result );
1330
    }
1331
    /* -- listen for stream start and stop -- */
1332
    ERR_WRAP( AudioUnitAddPropertyListener( *audioUnit,
1333
                                            kAudioOutputUnitProperty_IsRunning,
1334
                                            startStopCallback,
1335
                                            (void *)stream ) );
1336

    
1337
    /* -- set format -- */
1338
    bzero( &desiredFormat, sizeof(desiredFormat) );
1339
    desiredFormat.mFormatID         = kAudioFormatLinearPCM ;
1340
    desiredFormat.mFormatFlags      = kAudioFormatFlagsNativeFloatPacked;
1341
    desiredFormat.mFramesPerPacket  = 1;
1342
    desiredFormat.mBitsPerChannel   = sizeof( float ) * 8;
1343

    
1344
    result = 0;
1345
    /*  set device format first, but only touch the device if the user asked */
1346
    if( inStreamParams ) {
1347
       /*The callback never calls back if we don't set the FPB */
1348
       /*This seems wierd, because I would think setting anything on the device
1349
         would be disruptive.*/
1350
       paResult = setBestFramesPerBuffer( *audioDevice, FALSE,
1351
                                          requestedFramesPerBuffer,
1352
                                          actualInputFramesPerBuffer );
1353
       if( paResult ) goto error;
1354
       if( macInputStreamFlags & paMacCoreChangeDeviceParameters ) {
1355
          bool requireExact;
1356
          requireExact=macInputStreamFlags & paMacCoreFailIfConversionRequired;
1357
          paResult = setBestSampleRateForDevice( *audioDevice, FALSE,
1358
                                                 requireExact, sampleRate );
1359
          if( paResult ) goto error;
1360
       }
1361
       if( actualInputFramesPerBuffer && actualOutputFramesPerBuffer )
1362
          *actualOutputFramesPerBuffer = *actualInputFramesPerBuffer ;
1363
    }
1364
    if( outStreamParams && !inStreamParams ) {
1365
       /*The callback never calls back if we don't set the FPB */
1366
       /*This seems wierd, because I would think setting anything on the device
1367
         would be disruptive.*/
1368
       paResult = setBestFramesPerBuffer( *audioDevice, TRUE,
1369
                                          requestedFramesPerBuffer,
1370
                                          actualOutputFramesPerBuffer );
1371
       if( paResult ) goto error;
1372
       if( macOutputStreamFlags & paMacCoreChangeDeviceParameters ) {
1373
          bool requireExact;
1374
          requireExact=macOutputStreamFlags & paMacCoreFailIfConversionRequired;
1375
          paResult = setBestSampleRateForDevice( *audioDevice, TRUE,
1376
                                                 requireExact, sampleRate );
1377
          if( paResult ) goto error;
1378
       }
1379
    }
1380

    
1381
    /* -- set the quality of the output converter -- */
1382
    if( outStreamParams ) {
1383
       UInt32 value = kAudioConverterQuality_Max;
1384
       switch( macOutputStreamFlags & 0x0700 ) {
1385
       case 0x0100: /*paMacCore_ConversionQualityMin:*/
1386
          value=kRenderQuality_Min;
1387
          break;
1388
       case 0x0200: /*paMacCore_ConversionQualityLow:*/
1389
          value=kRenderQuality_Low;
1390
          break;
1391
       case 0x0300: /*paMacCore_ConversionQualityMedium:*/
1392
          value=kRenderQuality_Medium;
1393
          break;
1394
       case 0x0400: /*paMacCore_ConversionQualityHigh:*/
1395
          value=kRenderQuality_High;
1396
          break;
1397
       }
1398
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1399
                    kAudioUnitProperty_RenderQuality,
1400
                    kAudioUnitScope_Global,
1401
                    OUTPUT_ELEMENT,
1402
                    &value,
1403
                    sizeof(value) ) );
1404
    }
1405
    /* now set the format on the Audio Units. */
1406
    if( outStreamParams )
1407
    {
1408
       desiredFormat.mSampleRate    =sampleRate;
1409
       desiredFormat.mBytesPerPacket=sizeof(float)*outStreamParams->channelCount;
1410
       desiredFormat.mBytesPerFrame =sizeof(float)*outStreamParams->channelCount;
1411
       desiredFormat.mChannelsPerFrame = outStreamParams->channelCount;
1412
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1413
                            kAudioUnitProperty_StreamFormat,
1414
                            kAudioUnitScope_Input,
1415
                            OUTPUT_ELEMENT,
1416
                            &desiredFormat,
1417
                            sizeof(AudioStreamBasicDescription) ) );
1418
    }
1419
    if( inStreamParams )
1420
    {
1421
       AudioStreamBasicDescription sourceFormat;
1422
       UInt32 size = sizeof( AudioStreamBasicDescription );
1423

    
1424
       /* keep the sample rate of the device, or we confuse AUHAL */
1425
       ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1426
                            kAudioUnitProperty_StreamFormat,
1427
                            kAudioUnitScope_Input,
1428
                            INPUT_ELEMENT,
1429
                            &sourceFormat,
1430
                            &size ) );
1431
       desiredFormat.mSampleRate = sourceFormat.mSampleRate;
1432
       desiredFormat.mBytesPerPacket=sizeof(float)*inStreamParams->channelCount;
1433
       desiredFormat.mBytesPerFrame =sizeof(float)*inStreamParams->channelCount;
1434
       desiredFormat.mChannelsPerFrame = inStreamParams->channelCount;
1435
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1436
                            kAudioUnitProperty_StreamFormat,
1437
                            kAudioUnitScope_Output,
1438
                            INPUT_ELEMENT,
1439
                            &desiredFormat,
1440
                            sizeof(AudioStreamBasicDescription) ) );
1441
    }
1442
    /* set the maximumFramesPerSlice */
1443
    /* not doing this causes real problems
1444
       (eg. the callback might not be called). The idea of setting both this
1445
       and the frames per buffer on the device is that we'll be most likely
1446
       to actually get the frame size we requested in the callback with the
1447
       minimum latency. */
1448
    if( outStreamParams ) {
1449
       UInt32 size = sizeof( *actualOutputFramesPerBuffer );
1450
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1451
                            kAudioUnitProperty_MaximumFramesPerSlice,
1452
                            kAudioUnitScope_Input,
1453
                            OUTPUT_ELEMENT,
1454
                            actualOutputFramesPerBuffer,
1455
                            sizeof(*actualOutputFramesPerBuffer) ) );
1456
       ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1457
                            kAudioUnitProperty_MaximumFramesPerSlice,
1458
                            kAudioUnitScope_Global,
1459
                            OUTPUT_ELEMENT,
1460
                            actualOutputFramesPerBuffer,
1461
                            &size ) );
1462
    }
1463
    if( inStreamParams ) {
1464
       /*UInt32 size = sizeof( *actualInputFramesPerBuffer );*/
1465
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1466
                            kAudioUnitProperty_MaximumFramesPerSlice,
1467
                            kAudioUnitScope_Output,
1468
                            INPUT_ELEMENT,
1469
                            actualInputFramesPerBuffer,
1470
                            sizeof(*actualInputFramesPerBuffer) ) );
1471
/* Don't know why this causes problems
1472
       ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1473
                            kAudioUnitProperty_MaximumFramesPerSlice,
1474
                            kAudioUnitScope_Global, //Output,
1475
                            INPUT_ELEMENT,
1476
                            actualInputFramesPerBuffer,
1477
                            &size ) );
1478
*/
1479
    }
1480

    
1481
    /* -- if we have input, we may need to setup an SR converter -- */
1482
    /* even if we got the sample rate we asked for, we need to do
1483
       the conversion in case another program changes the underlying SR. */
1484
    /* FIXME: I think we need to monitor stream and change the converter if the incoming format changes. */
1485
    if( inStreamParams ) {
1486
       AudioStreamBasicDescription desiredFormat;
1487
       AudioStreamBasicDescription sourceFormat;
1488
       UInt32 sourceSize = sizeof( sourceFormat );
1489
       bzero( &desiredFormat, sizeof(desiredFormat) );
1490
       desiredFormat.mSampleRate       = sampleRate;
1491
       desiredFormat.mFormatID         = kAudioFormatLinearPCM ;
1492
       desiredFormat.mFormatFlags      = kAudioFormatFlagsNativeFloatPacked;
1493
       desiredFormat.mFramesPerPacket  = 1;
1494
       desiredFormat.mBitsPerChannel   = sizeof( float ) * 8;
1495
       desiredFormat.mBytesPerPacket=sizeof(float)*inStreamParams->channelCount;
1496
       desiredFormat.mBytesPerFrame =sizeof(float)*inStreamParams->channelCount;
1497
       desiredFormat.mChannelsPerFrame = inStreamParams->channelCount;
1498

    
1499
       /* get the source format */
1500
       ERR_WRAP( AudioUnitGetProperty(
1501
                         *audioUnit,
1502
                         kAudioUnitProperty_StreamFormat,
1503
                         kAudioUnitScope_Output,
1504
                         INPUT_ELEMENT,
1505
                         &sourceFormat,
1506
                         &sourceSize ) );
1507

    
1508
       if( desiredFormat.mSampleRate != sourceFormat.mSampleRate )
1509
       {
1510
          UInt32 value = kAudioConverterQuality_Max;
1511
          switch( macInputStreamFlags & 0x0700 ) {
1512
          case 0x0100: /*paMacCore_ConversionQualityMin:*/
1513
             value=kAudioConverterQuality_Min;
1514
             break;
1515
          case 0x0200: /*paMacCore_ConversionQualityLow:*/
1516
             value=kAudioConverterQuality_Low;
1517
             break;
1518
          case 0x0300: /*paMacCore_ConversionQualityMedium:*/
1519
             value=kAudioConverterQuality_Medium;
1520
             break;
1521
          case 0x0400: /*paMacCore_ConversionQualityHigh:*/
1522
             value=kAudioConverterQuality_High;
1523
             break;
1524
          }
1525
          VDBUG(( "Creating sample rate converter for input"
1526
                  " to convert from %g to %g\n",
1527
                  (float)sourceFormat.mSampleRate,
1528
                  (float)desiredFormat.mSampleRate ) );
1529
          /* create our converter */
1530
          ERR_WRAP( AudioConverterNew( 
1531
                             &sourceFormat,
1532
                             &desiredFormat,
1533
                             srConverter ) );
1534
          /* Set quality */
1535
          ERR_WRAP( AudioConverterSetProperty(
1536
                             *srConverter,
1537
                             kAudioConverterSampleRateConverterQuality,
1538
                             sizeof( value ),
1539
                             &value ) );
1540
       }
1541
    }
1542
    /* -- set IOProc (callback) -- */
1543
    callbackKey = outStreamParams ? kAudioUnitProperty_SetRenderCallback
1544
                                  : kAudioOutputUnitProperty_SetInputCallback ;
1545
    rcbs.inputProc = AudioIOProc;
1546
    rcbs.inputProcRefCon = refCon;
1547
    ERR_WRAP( AudioUnitSetProperty(
1548
                               *audioUnit,
1549
                               callbackKey,
1550
                               kAudioUnitScope_Output,
1551
                               outStreamParams ? OUTPUT_ELEMENT : INPUT_ELEMENT,
1552
                               &rcbs,
1553
                               sizeof(rcbs)) );
1554

    
1555
    if( inStreamParams && outStreamParams && *srConverter )
1556
           ERR_WRAP( AudioUnitSetProperty(
1557
                               *audioUnit,
1558
                               kAudioOutputUnitProperty_SetInputCallback,
1559
                               kAudioUnitScope_Output,
1560
                               INPUT_ELEMENT,
1561
                               &rcbs,
1562
                               sizeof(rcbs)) );
1563

    
1564
    /* channel mapping. */
1565
    if(inChannelMap)
1566
    {
1567
        UInt32 mapSize = inChannelMapSize *sizeof(SInt32);
1568

    
1569
        //for each channel of desired input, map the channel from
1570
        //the device's output channel.
1571
        ERR_WRAP( AudioUnitSetProperty(*audioUnit,
1572
                                kAudioOutputUnitProperty_ChannelMap,
1573
                                kAudioUnitScope_Output,
1574
                                INPUT_ELEMENT,
1575
                                inChannelMap,
1576
                                mapSize));
1577
    }
1578
    if(outChannelMap)
1579
    {
1580
        UInt32 mapSize = outChannelMapSize *sizeof(SInt32);
1581

    
1582
        //for each channel of desired output, map the channel from
1583
        //the device's output channel.
1584
        ERR_WRAP(AudioUnitSetProperty(*audioUnit,
1585
                                kAudioOutputUnitProperty_ChannelMap,
1586
                                kAudioUnitScope_Output,
1587
                                OUTPUT_ELEMENT,
1588
                                outChannelMap,
1589
                                mapSize));
1590
    }
1591
    /* initialize the audio unit */
1592
    ERR_WRAP( AudioUnitInitialize(*audioUnit) );
1593

    
1594
    if( inStreamParams && outStreamParams )
1595
    {
1596
        VDBUG( ("Opened device %ld for input and output.\n", *audioDevice ) );
1597
    }
1598
    else if( inStreamParams )
1599
    {
1600
        VDBUG( ("Opened device %ld for input.\n", *audioDevice ) );
1601
    }
1602
    else if( outStreamParams )
1603
    {
1604
        VDBUG( ("Opened device %ld for output.\n", *audioDevice ) );
1605
    }
1606
    return paNoError;
1607
#undef ERR_WRAP
1608

    
1609
    error:
1610
       CloseComponent( *audioUnit );
1611
       *audioUnit = NULL;
1612
       if( result )
1613
          return PaMacCore_SetError( result, line, 1 );
1614
       return paResult;
1615
}
1616

    
1617
/* =================================================================================================== */
1618

    
1619
static UInt32 CalculateOptimalBufferSize( PaMacAUHAL *auhalHostApi,
1620
                                  const PaStreamParameters *inputParameters,
1621
                                  const PaStreamParameters *outputParameters,
1622
                                  UInt32 fixedInputLatency,
1623
                                  UInt32 fixedOutputLatency,
1624
                                  double sampleRate,
1625
                                  UInt32 requestedFramesPerBuffer )
1626
{
1627
    UInt32 resultBufferSizeFrames = 0;  
1628
    // Use maximum of suggested input and output latencies.
1629
    if( inputParameters )
1630
    {
1631
        UInt32 suggestedLatencyFrames = inputParameters->suggestedLatency * sampleRate;
1632
        // Calculate a buffer size assuming we are double buffered.
1633
        SInt32 variableLatencyFrames = suggestedLatencyFrames - fixedInputLatency;
1634
        // Prevent negative latency.
1635
        variableLatencyFrames = MAX( variableLatencyFrames, 0 );       
1636
        resultBufferSizeFrames = MAX( resultBufferSizeFrames, (UInt32) variableLatencyFrames );
1637
    }
1638
    if( outputParameters )
1639
    {        
1640
        UInt32 suggestedLatencyFrames = outputParameters->suggestedLatency * sampleRate;
1641
        SInt32 variableLatencyFrames = suggestedLatencyFrames - fixedOutputLatency;
1642
        variableLatencyFrames = MAX( variableLatencyFrames, 0 );
1643
        resultBufferSizeFrames = MAX( resultBufferSizeFrames, (UInt32) variableLatencyFrames );
1644
    }
1645
    
1646
    // can't have zero frames. code to round up to next user buffer requires non-zero
1647
    resultBufferSizeFrames = MAX( resultBufferSizeFrames, 1 );
1648
    
1649
    if( requestedFramesPerBuffer != paFramesPerBufferUnspecified )
1650
    {
1651
        // make host buffer the next highest integer multiple of user frames per buffer
1652
        UInt32 n = (resultBufferSizeFrames + requestedFramesPerBuffer - 1) / requestedFramesPerBuffer;
1653
        resultBufferSizeFrames = n * requestedFramesPerBuffer;
1654

    
1655
        
1656
        // FIXME: really we should be searching for a multiple of requestedFramesPerBuffer
1657
        // that is >= suggested latency and also fits within device buffer min/max
1658
        
1659
    }else{
1660
            VDBUG( ("Block Size unspecified. Based on Latency, the user wants a Block Size near: %ld.\n",
1661
            resultBufferSizeFrames ) );
1662
    }
1663
    
1664
    // Clip to the capabilities of the device.
1665
    if( inputParameters )
1666
    {
1667
        ClipToDeviceBufferSize( auhalHostApi->devIds[inputParameters->device],
1668
                               true, // In the old code isInput was false!
1669
                               resultBufferSizeFrames, &resultBufferSizeFrames );
1670
    }
1671
    if( outputParameters )
1672
    {
1673
        ClipToDeviceBufferSize( auhalHostApi->devIds[outputParameters->device],
1674
                               false, resultBufferSizeFrames, &resultBufferSizeFrames );
1675
    }
1676
    VDBUG(("After querying hardware, setting block size to %ld.\n", resultBufferSizeFrames));
1677

    
1678
    return resultBufferSizeFrames;
1679
}
1680

    
1681
/* =================================================================================================== */
1682
/* see pa_hostapi.h for a list of validity guarantees made about OpenStream parameters */
1683
static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi,
1684
                           PaStream** s,
1685
                           const PaStreamParameters *inputParameters,
1686
                           const PaStreamParameters *outputParameters,
1687
                           double sampleRate,
1688
                           unsigned long requestedFramesPerBuffer,
1689
                           PaStreamFlags streamFlags,
1690
                           PaStreamCallback *streamCallback,
1691
                           void *userData )
1692
{
1693
    PaError result = paNoError;
1694
    PaMacAUHAL *auhalHostApi = (PaMacAUHAL*)hostApi;
1695
    PaMacCoreStream *stream = 0;
1696
    int inputChannelCount, outputChannelCount;
1697
    PaSampleFormat inputSampleFormat, outputSampleFormat;
1698
    PaSampleFormat hostInputSampleFormat, hostOutputSampleFormat;
1699
    UInt32 fixedInputLatency = 0;
1700
    UInt32 fixedOutputLatency = 0;
1701
    // Accumulate contributions to latency in these variables.
1702
    UInt32 inputLatencyFrames = 0;
1703
    UInt32 outputLatencyFrames = 0;
1704
    UInt32 suggestedLatencyFramesPerBuffer = requestedFramesPerBuffer;
1705
    
1706
    VVDBUG(("OpenStream(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld SR=%g, FPB=%ld\n",
1707
                inputParameters  ? inputParameters->channelCount  : -1,
1708
                inputParameters  ? inputParameters->sampleFormat  : -1,
1709
                outputParameters ? outputParameters->channelCount : -1,
1710
                outputParameters ? outputParameters->sampleFormat : -1,
1711
                (float) sampleRate,
1712
                requestedFramesPerBuffer ));
1713
    VDBUG( ("Opening Stream.\n") );
1714
        
1715
    /* These first few bits of code are from paSkeleton with few modifications. */
1716
    if( inputParameters )
1717
    {
1718
        inputChannelCount = inputParameters->channelCount;
1719
        inputSampleFormat = inputParameters->sampleFormat;
1720

    
1721
                /* @todo Blocking read/write on Mac is not yet supported. */
1722
                if( !streamCallback && inputSampleFormat & paNonInterleaved )
1723
                {
1724
                        return paSampleFormatNotSupported;
1725
                }
1726
                
1727
        /* unless alternate device specification is supported, reject the use of
1728
            paUseHostApiSpecificDeviceSpecification */
1729

    
1730
        if( inputParameters->device == paUseHostApiSpecificDeviceSpecification )
1731
            return paInvalidDevice;
1732

    
1733
        /* check that input device can support inputChannelCount */
1734
        if( inputChannelCount > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels )
1735
            return paInvalidChannelCount;
1736

    
1737
        /* Host supports interleaved float32 */
1738
        hostInputSampleFormat = paFloat32;
1739
    }
1740
    else
1741
    {
1742
        inputChannelCount = 0;
1743
        inputSampleFormat = hostInputSampleFormat = paFloat32; /* Surpress 'uninitialised var' warnings. */
1744
    }
1745

    
1746
    if( outputParameters )
1747
    {
1748
        outputChannelCount = outputParameters->channelCount;
1749
        outputSampleFormat = outputParameters->sampleFormat;
1750
        
1751
                /* @todo Blocking read/write on Mac is not yet supported. */
1752
                if( !streamCallback && outputSampleFormat & paNonInterleaved )
1753
                {
1754
                        return paSampleFormatNotSupported;
1755
                }
1756
                
1757
        /* unless alternate device specification is supported, reject the use of
1758
            paUseHostApiSpecificDeviceSpecification */
1759

    
1760
        if( outputParameters->device == paUseHostApiSpecificDeviceSpecification )
1761
            return paInvalidDevice;
1762

    
1763
        /* check that output device can support inputChannelCount */
1764
        if( outputChannelCount > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels )
1765
            return paInvalidChannelCount;
1766

    
1767
        /* Host supports interleaved float32 */
1768
        hostOutputSampleFormat = paFloat32;
1769
    }
1770
    else
1771
    {
1772
        outputChannelCount = 0;
1773
        outputSampleFormat = hostOutputSampleFormat = paFloat32; /* Surpress 'uninitialized var' warnings. */
1774
    }
1775

    
1776
    /* validate platform specific flags */
1777
    if( (streamFlags & paPlatformSpecificFlags) != 0 )
1778
        return paInvalidFlag; /* unexpected platform specific flag */
1779

    
1780
    stream = (PaMacCoreStream*)PaUtil_AllocateMemory( sizeof(PaMacCoreStream) );
1781
    if( !stream )
1782
    {
1783
        result = paInsufficientMemory;
1784
        goto error;
1785
    }
1786

    
1787
    /* If we fail after this point, we my be left in a bad state, with
1788
       some data structures setup and others not. So, first thing we
1789
       do is initialize everything so that if we fail, we know what hasn't
1790
       been touched.
1791
     */
1792
    bzero( stream, sizeof( PaMacCoreStream ) );
1793
    
1794
    /*
1795
    stream->blio.inputRingBuffer.buffer = NULL;
1796
    stream->blio.outputRingBuffer.buffer = NULL;
1797
    stream->blio.inputSampleFormat = inputParameters?inputParameters->sampleFormat:0;
1798
    stream->blio.inputSampleSize = computeSampleSizeFromFormat(stream->blio.inputSampleFormat);
1799
    stream->blio.outputSampleFormat=outputParameters?outputParameters->sampleFormat:0;
1800
    stream->blio.outputSampleSize = computeSampleSizeFromFormat(stream->blio.outputSampleFormat);
1801
    */
1802

    
1803
    /* assert( streamCallback ) ; */ /* only callback mode is implemented */
1804
    if( streamCallback )
1805
    {
1806
        PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation,
1807
                                        &auhalHostApi->callbackStreamInterface,
1808
                                        streamCallback, userData );
1809
    }
1810
    else
1811
    {
1812
        PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation,
1813
                                        &auhalHostApi->blockingStreamInterface,
1814
                                        BlioCallback, &stream->blio );
1815
    }
1816

    
1817
    PaUtil_InitializeCpuLoadMeasurer( &stream->cpuLoadMeasurer, sampleRate );
1818

    
1819
    
1820
    if( inputParameters )
1821
    {
1822
        CalculateFixedDeviceLatency( auhalHostApi->devIds[inputParameters->device], true, &fixedInputLatency );
1823
        inputLatencyFrames += fixedInputLatency;
1824
    }
1825
    if( outputParameters )
1826
    {        
1827
        CalculateFixedDeviceLatency( auhalHostApi->devIds[outputParameters->device], false, &fixedOutputLatency );
1828
        outputLatencyFrames += fixedOutputLatency;
1829

    
1830
    }
1831
    
1832
    suggestedLatencyFramesPerBuffer = CalculateOptimalBufferSize( auhalHostApi, inputParameters, outputParameters,
1833
                                                                 fixedInputLatency, fixedOutputLatency,
1834
                                                                 sampleRate, requestedFramesPerBuffer );
1835
    if( requestedFramesPerBuffer == paFramesPerBufferUnspecified )
1836
        {
1837
        requestedFramesPerBuffer = suggestedLatencyFramesPerBuffer;
1838
    }
1839

    
1840
    /* -- Now we actually open and setup streams. -- */
1841
    if( inputParameters && outputParameters && outputParameters->device == inputParameters->device )
1842
    { /* full duplex. One device. */
1843
       UInt32 inputFramesPerBuffer  = (UInt32) stream->inputFramesPerBuffer;
1844
       UInt32 outputFramesPerBuffer = (UInt32) stream->outputFramesPerBuffer;
1845
       result = OpenAndSetupOneAudioUnit( stream,
1846
                                          inputParameters,
1847
                                          outputParameters,
1848
                                          suggestedLatencyFramesPerBuffer,
1849
                                          &inputFramesPerBuffer,
1850
                                          &outputFramesPerBuffer,
1851
                                          auhalHostApi,
1852
                                          &(stream->inputUnit),
1853
                                          &(stream->inputSRConverter),
1854
                                          &(stream->inputDevice),
1855
                                          sampleRate,
1856
                                          stream );
1857
       stream->inputFramesPerBuffer = inputFramesPerBuffer;
1858
       stream->outputFramesPerBuffer = outputFramesPerBuffer;
1859
       stream->outputUnit = stream->inputUnit;
1860
       stream->outputDevice = stream->inputDevice;
1861
       if( result != paNoError )
1862
           goto error;
1863
    }
1864
    else
1865
    { /* full duplex, different devices OR simplex */
1866
       UInt32 outputFramesPerBuffer = (UInt32) stream->outputFramesPerBuffer;
1867
       UInt32 inputFramesPerBuffer  = (UInt32) stream->inputFramesPerBuffer;
1868
       result = OpenAndSetupOneAudioUnit( stream,
1869
                                          NULL,
1870
                                          outputParameters,
1871
                                          suggestedLatencyFramesPerBuffer,
1872
                                          NULL,
1873
                                          &outputFramesPerBuffer,
1874
                                          auhalHostApi,
1875
                                          &(stream->outputUnit),
1876
                                          NULL,
1877
                                          &(stream->outputDevice),
1878
                                          sampleRate,
1879
                                          stream );
1880
       if( result != paNoError )
1881
           goto error;
1882
       result = OpenAndSetupOneAudioUnit( stream,
1883
                                          inputParameters,
1884
                                          NULL,
1885
                                          suggestedLatencyFramesPerBuffer,
1886
                                          &inputFramesPerBuffer,
1887
                                          NULL,
1888
                                          auhalHostApi,
1889
                                          &(stream->inputUnit),
1890
                                          &(stream->inputSRConverter),
1891
                                          &(stream->inputDevice),
1892
                                          sampleRate,
1893
                                          stream );
1894
       if( result != paNoError )
1895
           goto error;
1896
       stream->inputFramesPerBuffer = inputFramesPerBuffer;
1897
       stream->outputFramesPerBuffer = outputFramesPerBuffer;
1898
    }
1899
    
1900
    inputLatencyFrames += stream->inputFramesPerBuffer;
1901
    outputLatencyFrames += stream->outputFramesPerBuffer;
1902
    
1903
    if( stream->inputUnit ) {
1904
       const size_t szfl = sizeof(float);
1905
       /* setup the AudioBufferList used for input */
1906
       bzero( &stream->inputAudioBufferList, sizeof( AudioBufferList ) );
1907
       stream->inputAudioBufferList.mNumberBuffers = 1;
1908
       stream->inputAudioBufferList.mBuffers[0].mNumberChannels
1909
                 = inputChannelCount;
1910
       stream->inputAudioBufferList.mBuffers[0].mDataByteSize
1911
                 = stream->inputFramesPerBuffer*inputChannelCount*szfl;
1912
       stream->inputAudioBufferList.mBuffers[0].mData
1913
                 = (float *) calloc(
1914
                               stream->inputFramesPerBuffer*inputChannelCount,
1915
                               szfl );
1916
       if( !stream->inputAudioBufferList.mBuffers[0].mData )
1917
       {
1918
          result = paInsufficientMemory;
1919
          goto error;
1920
       }
1921
        
1922
       /*
1923
        * If input and output devs are different or we are doing SR conversion,
1924
        * we also need a ring buffer to store input data while waiting for
1925
        * output data.
1926
        */
1927
       if( (stream->outputUnit && (stream->inputUnit != stream->outputUnit))
1928
           || stream->inputSRConverter )
1929
       {
1930
          /* May want the ringSize or initial position in
1931
             ring buffer to depend somewhat on sample rate change */
1932

    
1933
          void *data;
1934
          long ringSize;
1935

    
1936
          ringSize = computeRingBufferSize( inputParameters,
1937
                                            outputParameters,
1938
                                            stream->inputFramesPerBuffer,
1939
                                            stream->outputFramesPerBuffer,
1940
                                            sampleRate );
1941
          /*ringSize <<= 4; *//*16x bigger, for testing */
1942

    
1943

    
1944
          /*now, we need to allocate memory for the ring buffer*/
1945
          data = calloc( ringSize, szfl*inputParameters->channelCount );
1946
          if( !data )
1947
          {
1948
             result = paInsufficientMemory;
1949
             goto error;
1950
          }
1951

    
1952
          /* now we can initialize the ring buffer */
1953
          result = PaUtil_InitializeRingBuffer( &stream->inputRingBuffer, szfl*inputParameters->channelCount, ringSize, data );
1954
          if( result != 0 )
1955
          {
1956
              /* The only reason this should fail is if ringSize is not a power of 2, which we do not anticipate happening. */
1957
              result = paUnanticipatedHostError;
1958
              free(data);
1959
              goto error;
1960
          }
1961

    
1962
          /* advance the read point a little, so we are reading from the
1963
             middle of the buffer */
1964
          if( stream->outputUnit )
1965
             PaUtil_AdvanceRingBufferWriteIndex( &stream->inputRingBuffer, ringSize / RING_BUFFER_ADVANCE_DENOMINATOR );
1966
           
1967
           // Just adds to input latency between input device and PA full duplex callback.
1968
           inputLatencyFrames += ringSize;
1969
       }
1970
    }
1971

    
1972
    /* -- initialize Blio Buffer Processors -- */
1973
    if( !streamCallback )
1974
    {
1975
       long ringSize;
1976

    
1977
       ringSize = computeRingBufferSize( inputParameters,
1978
                                         outputParameters,
1979
                                         stream->inputFramesPerBuffer,
1980
                                         stream->outputFramesPerBuffer,
1981
                                         sampleRate );
1982
       result = initializeBlioRingBuffers( &stream->blio,
1983
              inputParameters ? inputParameters->sampleFormat : 0,
1984
              outputParameters ? outputParameters->sampleFormat : 0,
1985
              ringSize,
1986
              inputParameters ? inputChannelCount : 0,
1987
              outputParameters ? outputChannelCount : 0 ) ;
1988
       if( result != paNoError )
1989
          goto error;
1990
        
1991
        inputLatencyFrames += ringSize;
1992
        outputLatencyFrames += ringSize;
1993
        
1994
    }
1995

    
1996
    /* -- initialize Buffer Processor -- */
1997
    {
1998
       unsigned long maxHostFrames = stream->inputFramesPerBuffer;
1999
       if( stream->outputFramesPerBuffer > maxHostFrames )
2000
          maxHostFrames = stream->outputFramesPerBuffer;
2001
       result = PaUtil_InitializeBufferProcessor( &stream->bufferProcessor,
2002
                 inputChannelCount, inputSampleFormat,
2003
                 hostInputSampleFormat,
2004
                 outputChannelCount, outputSampleFormat,
2005
                 hostOutputSampleFormat,
2006
                 sampleRate,
2007
                 streamFlags,
2008
                 requestedFramesPerBuffer,
2009
                 /* If sample rate conversion takes place, the buffer size
2010
                    will not be known. */
2011
                 maxHostFrames,
2012
                 stream->inputSRConverter
2013
                              ? paUtilUnknownHostBufferSize
2014
                              : paUtilBoundedHostBufferSize,
2015
                 streamCallback ? streamCallback : BlioCallback,
2016
                 streamCallback ? userData : &stream->blio );
2017
       if( result != paNoError )
2018
           goto error;
2019
    }
2020
    stream->bufferProcessorIsInitialized = TRUE;
2021

    
2022
    // Calculate actual latency from the sum of individual latencies.
2023
    if( inputParameters ) 
2024
    {
2025
        inputLatencyFrames += PaUtil_GetBufferProcessorInputLatencyFrames(&stream->bufferProcessor);
2026
        stream->streamRepresentation.streamInfo.inputLatency = inputLatencyFrames / sampleRate;
2027
    }
2028
    else
2029
    {
2030
        stream->streamRepresentation.streamInfo.inputLatency = 0.0;
2031
    }
2032
    
2033
    if( outputParameters ) 
2034
    {
2035
        outputLatencyFrames += PaUtil_GetBufferProcessorOutputLatencyFrames(&stream->bufferProcessor);
2036
        stream->streamRepresentation.streamInfo.outputLatency = outputLatencyFrames / sampleRate;
2037
    }
2038
    else
2039
    {
2040
        stream->streamRepresentation.streamInfo.outputLatency = 0.0;
2041
    }
2042
    
2043
    stream->streamRepresentation.streamInfo.sampleRate = sampleRate;
2044

    
2045
    stream->sampleRate = sampleRate;
2046
    
2047
    stream->userInChan  = inputChannelCount;
2048
    stream->userOutChan = outputChannelCount;
2049

    
2050
    // Setup property listeners for timestamp and latency calculations.
2051
        pthread_mutex_init( &stream->timingInformationMutex, NULL );
2052
        stream->timingInformationMutexIsInitialized = 1;
2053
    InitializeDeviceProperties( &stream->inputProperties );     // zeros the struct. doesn't actually init it to useful values
2054
    InitializeDeviceProperties( &stream->outputProperties );    // zeros the struct. doesn't actually init it to useful values
2055
        if( stream->outputUnit )
2056
    {
2057
        Boolean isInput = FALSE;
2058
        
2059
        // Start with the current values for the device properties.
2060
        // Init with nominal sample rate. Use actual sample rate where available
2061
        
2062
        result = ERR( UpdateSampleRateFromDeviceProperty( 
2063
                stream, stream->outputDevice, isInput, kAudioDevicePropertyNominalSampleRate )  );
2064
        if( result )
2065
            goto error; /* fail if we can't even get a nominal device sample rate */
2066
        
2067
        UpdateSampleRateFromDeviceProperty( stream, stream->outputDevice, isInput, kAudioDevicePropertyActualSampleRate );
2068
        
2069
        SetupDevicePropertyListeners( stream, stream->outputDevice, isInput );
2070
    }
2071
        if( stream->inputUnit )
2072
    {
2073
        Boolean isInput = TRUE;
2074
       
2075
        // as above
2076
        result = ERR( UpdateSampleRateFromDeviceProperty( 
2077
                stream, stream->inputDevice, isInput, kAudioDevicePropertyNominalSampleRate )  );
2078
        if( result )
2079
            goto error;
2080
        
2081
        UpdateSampleRateFromDeviceProperty( stream, stream->inputDevice, isInput, kAudioDevicePropertyActualSampleRate );
2082
        
2083
        SetupDevicePropertyListeners( stream, stream->inputDevice, isInput );
2084
        }
2085
    UpdateTimeStampOffsets( stream );
2086
    // Setup timestamp copies to be used by audio callback.
2087
    stream->timestampOffsetCombined_ioProcCopy = stream->timestampOffsetCombined;
2088
    stream->timestampOffsetInputDevice_ioProcCopy = stream->timestampOffsetInputDevice;
2089
    stream->timestampOffsetOutputDevice_ioProcCopy = stream->timestampOffsetOutputDevice;
2090

    
2091
    stream->state = STOPPED;
2092
    stream->xrunFlags = 0;
2093

    
2094
    *s = (PaStream*)stream;
2095

    
2096
    return result;
2097

    
2098
error:
2099
    CloseStream( stream );
2100
    return result;
2101
}
2102

    
2103

    
2104
#define HOST_TIME_TO_PA_TIME( x ) ( AudioConvertHostTimeToNanos( (x) ) * 1.0E-09) /* convert to nanoseconds and then to seconds */
2105

    
2106
PaTime GetStreamTime( PaStream *s )
2107
{
2108
        return HOST_TIME_TO_PA_TIME( AudioGetCurrentHostTime() ); 
2109
}
2110

    
2111
#define RING_BUFFER_EMPTY (1000)
2112

    
2113
static OSStatus ringBufferIOProc( AudioConverterRef inAudioConverter, 
2114
                             UInt32*ioDataSize, 
2115
                             void** outData, 
2116
                             void*inUserData )
2117
{
2118
   void *dummyData;
2119
   ring_buffer_size_t dummySize;
2120
   PaUtilRingBuffer *rb = (PaUtilRingBuffer *) inUserData;
2121

    
2122
   VVDBUG(("ringBufferIOProc()\n"));
2123

    
2124
   if( PaUtil_GetRingBufferReadAvailable( rb ) == 0 ) {
2125
      *outData = NULL;
2126
      *ioDataSize = 0;
2127
      return RING_BUFFER_EMPTY;
2128
   }
2129
   assert(sizeof(UInt32) == sizeof(ring_buffer_size_t));
2130
   assert( ( (*ioDataSize) / rb->elementSizeBytes ) * rb->elementSizeBytes == (*ioDataSize) ) ;
2131
   (*ioDataSize) /= rb->elementSizeBytes ;
2132
   PaUtil_GetRingBufferReadRegions( rb, *ioDataSize,
2133
                                    outData, (ring_buffer_size_t *)ioDataSize, 
2134
                                    &dummyData, &dummySize );
2135
   assert( *ioDataSize );
2136
   PaUtil_AdvanceRingBufferReadIndex( rb, *ioDataSize );
2137
   (*ioDataSize) *= rb->elementSizeBytes ;
2138

    
2139
   return noErr;
2140
}
2141

    
2142
/*
2143
 * Called by the AudioUnit API to process audio from the sound card.
2144
 * This is where the magic happens.
2145
 */
2146
/* FEEDBACK: there is a lot of redundant code here because of how all the cases differ. This makes it hard to maintain, so if there are suggestinos for cleaning it up, I'm all ears. */
2147
static OSStatus AudioIOProc( void *inRefCon,
2148
                               AudioUnitRenderActionFlags *ioActionFlags,
2149
                               const AudioTimeStamp *inTimeStamp,
2150
                               UInt32 inBusNumber,
2151
                               UInt32 inNumberFrames,
2152
                               AudioBufferList *ioData )
2153
{
2154
   unsigned long framesProcessed     = 0;
2155
   PaStreamCallbackTimeInfo timeInfo = {0,0,0};
2156
   PaMacCoreStream *stream           = (PaMacCoreStream*)inRefCon;
2157
   const bool isRender               = inBusNumber == OUTPUT_ELEMENT;
2158
   int callbackResult                = paContinue ;
2159
   double hostTimeStampInPaTime      = HOST_TIME_TO_PA_TIME(inTimeStamp->mHostTime);
2160
    
2161
   VVDBUG(("AudioIOProc()\n"));
2162

    
2163
   PaUtil_BeginCpuLoadMeasurement( &stream->cpuLoadMeasurer );
2164
    
2165
   /* -----------------------------------------------------------------*\
2166
      This output may be useful for debugging,
2167
      But printing durring the callback is a bad enough idea that
2168
      this is not enabled by enableing the usual debugging calls.
2169
   \* -----------------------------------------------------------------*/
2170
   /*
2171
   static int renderCount = 0;
2172
   static int inputCount = 0;
2173
   printf( "-------------------  starting reder/input\n" );
2174
   if( isRender )
2175
      printf("Render callback (%d):\t", ++renderCount);
2176
   else
2177
      printf("Input callback  (%d):\t", ++inputCount);
2178
   printf( "Call totals: %d (input), %d (render)\n", inputCount, renderCount );
2179

2180
   printf( "--- inBusNumber: %lu\n", inBusNumber );
2181
   printf( "--- inNumberFrames: %lu\n", inNumberFrames );
2182
   printf( "--- %x ioData\n", (unsigned) ioData );
2183
   if( ioData )
2184
   {
2185
      int i=0;
2186
      printf( "--- ioData.mNumBuffers %lu: \n", ioData->mNumberBuffers );
2187
      for( i=0; i<ioData->mNumberBuffers; ++i )
2188
         printf( "--- ioData buffer %d size: %lu.\n", i, ioData->mBuffers[i].mDataByteSize );
2189
   }
2190
      ----------------------------------------------------------------- */
2191

    
2192
        /* compute PaStreamCallbackTimeInfo */
2193
        
2194
        if( pthread_mutex_trylock( &stream->timingInformationMutex ) == 0 ){
2195
                /* snapshot the ioproc copy of timing information */
2196
                stream->timestampOffsetCombined_ioProcCopy = stream->timestampOffsetCombined;
2197
                stream->timestampOffsetInputDevice_ioProcCopy = stream->timestampOffsetInputDevice;
2198
                stream->timestampOffsetOutputDevice_ioProcCopy = stream->timestampOffsetOutputDevice;
2199
                pthread_mutex_unlock( &stream->timingInformationMutex );
2200
        }
2201
        
2202
        /* For timeInfo.currentTime we could calculate current time backwards from the HAL audio 
2203
         output time to give a more accurate impression of the current timeslice but it doesn't 
2204
         seem worth it at the moment since other PA host APIs don't do any better.
2205
         */
2206
        timeInfo.currentTime = HOST_TIME_TO_PA_TIME( AudioGetCurrentHostTime() );
2207
        
2208
        /*
2209
         For an input HAL AU, inTimeStamp is the time the samples are received from the hardware,
2210
         for an output HAL AU inTimeStamp is the time the samples are sent to the hardware. 
2211
         PA expresses timestamps in terms of when the samples enter the ADC or leave the DAC
2212
         so we add or subtract kAudioDevicePropertyLatency below.
2213
         */
2214
        
2215
        /* FIXME: not sure what to do below if the host timestamps aren't valid (kAudioTimeStampHostTimeValid isn't set)
2216
         Could ask on CA mailing list if it is possible for it not to be set. If so, could probably grab a now timestamp
2217
         at the top and compute from there (modulo scheduling jitter) or ask on mailing list for other options. */
2218
        
2219
        if( isRender )
2220
        {
2221
                if( stream->inputUnit ) /* full duplex */
2222
                {
2223
                        if( stream->inputUnit == stream->outputUnit ) /* full duplex AUHAL IOProc */
2224
                        {
2225
                // Ross and Phil agreed that the following calculation is correct based on an email from Jeff Moore:
2226
                // http://osdir.com/ml/coreaudio-api/2009-07/msg00140.html
2227
                // Basically the difference between the Apple output timestamp and the PA timestamp is kAudioDevicePropertyLatency.
2228
                                timeInfo.inputBufferAdcTime = hostTimeStampInPaTime - 
2229
                    (stream->timestampOffsetCombined_ioProcCopy + stream->timestampOffsetInputDevice_ioProcCopy);
2230
                                 timeInfo.outputBufferDacTime = hostTimeStampInPaTime + stream->timestampOffsetOutputDevice_ioProcCopy;
2231
                        }
2232
                        else /* full duplex with ring-buffer from a separate input AUHAL ioproc */
2233
                        {
2234
                                /* FIXME: take the ring buffer latency into account */
2235
                                timeInfo.inputBufferAdcTime = hostTimeStampInPaTime - 
2236
                    (stream->timestampOffsetCombined_ioProcCopy + stream->timestampOffsetInputDevice_ioProcCopy);
2237
                                timeInfo.outputBufferDacTime = hostTimeStampInPaTime + stream->timestampOffsetOutputDevice_ioProcCopy;
2238
                        }
2239
                }
2240
                else /* output only */
2241
                {
2242
                        timeInfo.inputBufferAdcTime = 0;
2243
                        timeInfo.outputBufferDacTime = hostTimeStampInPaTime + stream->timestampOffsetOutputDevice_ioProcCopy;
2244
                }
2245
        }
2246
        else /* input only */
2247
        {
2248
                timeInfo.inputBufferAdcTime = hostTimeStampInPaTime - stream->timestampOffsetInputDevice_ioProcCopy; 
2249
                timeInfo.outputBufferDacTime = 0;
2250
        }
2251
        
2252
   //printf( "---%g, %g, %g\n", timeInfo.inputBufferAdcTime, timeInfo.currentTime, timeInfo.outputBufferDacTime );
2253

    
2254
   if( isRender && stream->inputUnit == stream->outputUnit
2255
                && !stream->inputSRConverter )
2256
   {
2257
      /* --------- Full Duplex, One Device, no SR Conversion -------
2258
       *
2259
       * This is the lowest latency case, and also the simplest.
2260
       * Input data and output data are available at the same time.
2261
       * we do not use the input SR converter or the input ring buffer.
2262
       *
2263
       */
2264
      OSStatus err = 0;
2265
       unsigned long frames;
2266
       long bytesPerFrame = sizeof( float ) * ioData->mBuffers[0].mNumberChannels;
2267

    
2268
      /* -- start processing -- */
2269
      PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2270
                                    &timeInfo,
2271
                                    stream->xrunFlags );
2272
      stream->xrunFlags = 0; //FIXME: this flag also gets set outside by a callback, which calls the xrunCallback function. It should be in the same thread as the main audio callback, but the apple docs just use the word "usually" so it may be possible to loose an xrun notification, if that callback happens here.
2273

    
2274
      /* -- compute frames. do some checks -- */
2275
      assert( ioData->mNumberBuffers == 1 );
2276
      assert( ioData->mBuffers[0].mNumberChannels == stream->userOutChan );
2277

    
2278
      frames = ioData->mBuffers[0].mDataByteSize / bytesPerFrame;
2279
      /* -- copy and process input data -- */
2280
      err= AudioUnitRender(stream->inputUnit,
2281
                    ioActionFlags,
2282
                    inTimeStamp,
2283
                    INPUT_ELEMENT,
2284
                    inNumberFrames,
2285
                    &stream->inputAudioBufferList );
2286
      if(err != noErr)
2287
      {
2288
        goto stop_stream;
2289
      }
2290

    
2291
      PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2292
      PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2293
                          0,
2294
                          stream->inputAudioBufferList.mBuffers[0].mData,
2295
                          stream->inputAudioBufferList.mBuffers[0].mNumberChannels);
2296
      /* -- Copy and process output data -- */
2297
      PaUtil_SetOutputFrameCount( &(stream->bufferProcessor), frames );
2298
      PaUtil_SetInterleavedOutputChannels( &(stream->bufferProcessor),
2299
                                        0,
2300
                                        ioData->mBuffers[0].mData,
2301
                                        ioData->mBuffers[0].mNumberChannels);
2302
      /* -- complete processing -- */
2303
      framesProcessed =
2304
                 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2305
                                             &callbackResult );
2306
   }
2307
   else if( isRender )
2308
   {
2309
      /* -------- Output Side of Full Duplex (Separate Devices or SR Conversion)
2310
       *       -- OR Simplex Output
2311
       *
2312
       * This case handles output data as in the full duplex case,
2313
       * and, if there is input data, reads it off the ring buffer 
2314
       * and into the PA buffer processor. If sample rate conversion
2315
       * is required on input, that is done here as well.
2316
       */
2317
       unsigned long frames;
2318
       long bytesPerFrame = sizeof( float ) * ioData->mBuffers[0].mNumberChannels;
2319

    
2320
      /* Sometimes, when stopping a duplex stream we get erroneous
2321
         xrun flags, so if this is our last run, clear the flags. */
2322
      int xrunFlags = stream->xrunFlags;
2323
/*
2324
      if( xrunFlags & paInputUnderflow )
2325
         printf( "input underflow.\n" );
2326
      if( xrunFlags & paInputOverflow )
2327
         printf( "input overflow.\n" );
2328
*/
2329
      if( stream->state == STOPPING || stream->state == CALLBACK_STOPPED )
2330
         xrunFlags = 0;
2331

    
2332
      /* -- start processing -- */
2333
      PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2334
                                    &timeInfo,
2335
                                    xrunFlags );
2336
      stream->xrunFlags = 0; /* FEEDBACK: we only send flags to Buf Proc once */
2337

    
2338
      /* -- Copy and process output data -- */
2339
      assert( ioData->mNumberBuffers == 1 );
2340
      frames = ioData->mBuffers[0].mDataByteSize / bytesPerFrame;
2341
      assert( ioData->mBuffers[0].mNumberChannels == stream->userOutChan );
2342
      PaUtil_SetOutputFrameCount( &(stream->bufferProcessor), frames );
2343
      PaUtil_SetInterleavedOutputChannels( &(stream->bufferProcessor),
2344
                                     0,
2345
                                     ioData->mBuffers[0].mData,
2346
                                     ioData->mBuffers[0].mNumberChannels);
2347

    
2348
      /* -- copy and process input data, and complete processing -- */
2349
      if( stream->inputUnit ) {
2350
         const int flsz = sizeof( float );
2351
         /* Here, we read the data out of the ring buffer, through the
2352
            audio converter. */
2353
         int inChan = stream->inputAudioBufferList.mBuffers[0].mNumberChannels;
2354
         long bytesPerFrame = flsz * inChan;
2355
          
2356
         if( stream->inputSRConverter )
2357
         {
2358
               OSStatus err;
2359
               UInt32 size;
2360
               float data[ inChan * frames ];
2361
               size = sizeof( data );
2362
               err = AudioConverterFillBuffer( 
2363
                             stream->inputSRConverter,
2364
                             ringBufferIOProc,
2365
                             &stream->inputRingBuffer,
2366
                             &size,
2367
                             (void *)&data );
2368
               if( err == RING_BUFFER_EMPTY )
2369
               { /* the ring buffer callback underflowed */
2370
                  err = 0;
2371
                  bzero( ((char *)data) + size, sizeof(data)-size );
2372
                  /* The ring buffer can underflow normally when the stream is stopping.
2373
                   * So only report an error if the stream is active. */
2374
                  if( stream->state == ACTIVE )
2375
                  {
2376
                      stream->xrunFlags |= paInputUnderflow;
2377
                  }
2378
               }
2379
               ERR( err );
2380
               if(err != noErr)
2381
               {
2382
                 goto stop_stream;
2383
               }
2384

    
2385
               PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2386
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2387
                                   0,
2388
                                   data,
2389
                                   inChan );
2390
               framesProcessed =
2391
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2392
                                                &callbackResult );
2393
         }
2394
         else
2395
         {
2396
            /* Without the AudioConverter is actually a bit more complex
2397
               because we have to do a little buffer processing that the
2398
               AudioConverter would otherwise handle for us. */
2399
            void *data1, *data2;
2400
            ring_buffer_size_t size1, size2;
2401
            ring_buffer_size_t framesReadable = PaUtil_GetRingBufferReadRegions( &stream->inputRingBuffer,
2402
                                             frames,
2403
                                             &data1, &size1,
2404
                                             &data2, &size2 );
2405
            if( size1 == frames ) {
2406
               /* simplest case: all in first buffer */
2407
               PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2408
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2409
                                   0,
2410
                                   data1,
2411
                                   inChan );
2412
               framesProcessed =
2413
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2414
                                                &callbackResult );
2415
               PaUtil_AdvanceRingBufferReadIndex(&stream->inputRingBuffer, size1 );
2416
            } else if( framesReadable < frames ) {
2417
                
2418
                long sizeBytes1 = size1 * bytesPerFrame;
2419
                long sizeBytes2 = size2 * bytesPerFrame;
2420
               /*we underflowed. take what data we can, zero the rest.*/
2421
               unsigned char data[ frames * bytesPerFrame ];
2422
               if( size1 > 0 )
2423
               {   
2424
                   memcpy( data, data1, sizeBytes1 );
2425
               }
2426
               if( size2 > 0 )
2427
               {
2428
                   memcpy( data+sizeBytes1, data2, sizeBytes2 );
2429
               }
2430
               bzero( data+sizeBytes1+sizeBytes2, (frames*bytesPerFrame) - sizeBytes1 - sizeBytes2 );
2431

    
2432
               PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2433
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2434
                                   0,
2435
                                   data,
2436
                                   inChan );
2437
               framesProcessed =
2438
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2439
                                                &callbackResult );
2440
               PaUtil_AdvanceRingBufferReadIndex( &stream->inputRingBuffer,
2441
                                                  framesReadable );
2442
               /* flag underflow */
2443
               stream->xrunFlags |= paInputUnderflow;
2444
            } else {
2445
               /*we got all the data, but split between buffers*/
2446
               PaUtil_SetInputFrameCount( &(stream->bufferProcessor), size1 );
2447
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2448
                                   0,
2449
                                   data1,
2450
                                   inChan );
2451
               PaUtil_Set2ndInputFrameCount( &(stream->bufferProcessor), size2 );
2452
               PaUtil_Set2ndInterleavedInputChannels( &(stream->bufferProcessor),
2453
                                   0,
2454
                                   data2,
2455
                                   inChan );
2456
               framesProcessed =
2457
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2458
                                                &callbackResult );
2459
               PaUtil_AdvanceRingBufferReadIndex(&stream->inputRingBuffer, framesReadable );
2460
            }
2461
         }
2462
      } else {
2463
         framesProcessed =
2464
                 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2465
                                             &callbackResult );
2466
      }
2467

    
2468
   }
2469
   else
2470
   {
2471
      /* ------------------ Input
2472
       *
2473
       * First, we read off the audio data and put it in the ring buffer.
2474
       * if this is an input-only stream, we need to process it more,
2475
       * otherwise, we let the output case deal with it.
2476
       */
2477
      OSStatus err = 0;
2478
      int chan = stream->inputAudioBufferList.mBuffers[0].mNumberChannels ;
2479
      /* FIXME: looping here may not actually be necessary, but it was something I tried in testing. */
2480
      do {
2481
         err= AudioUnitRender(stream->inputUnit,
2482
                 ioActionFlags,
2483
                 inTimeStamp,
2484
                 INPUT_ELEMENT,
2485
                 inNumberFrames,
2486
                 &stream->inputAudioBufferList );
2487
         if( err == -10874 )
2488
            inNumberFrames /= 2;
2489
      } while( err == -10874 && inNumberFrames > 1 );
2490
      ERR( err );
2491
      if(err != noErr)
2492
      {
2493
          goto stop_stream;
2494
      }
2495

    
2496
      if( stream->inputSRConverter || stream->outputUnit )
2497
      {
2498
         /* If this is duplex or we use a converter, put the data
2499
            into the ring buffer. */
2500
          ring_buffer_size_t framesWritten = PaUtil_WriteRingBuffer( &stream->inputRingBuffer,
2501
                                            stream->inputAudioBufferList.mBuffers[0].mData,
2502
                                            inNumberFrames );
2503
         if( framesWritten != inNumberFrames )
2504
         {
2505
             stream->xrunFlags |= paInputOverflow ;
2506
         }
2507
      }
2508
      else
2509
      {
2510
         /* for simplex input w/o SR conversion,
2511
            just pop the data into the buffer processor.*/
2512
         PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2513
                              &timeInfo,
2514
                              stream->xrunFlags );
2515
         stream->xrunFlags = 0;
2516

    
2517
         PaUtil_SetInputFrameCount( &(stream->bufferProcessor), inNumberFrames);
2518
         PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2519
                             0,
2520
                             stream->inputAudioBufferList.mBuffers[0].mData,
2521
                             chan );
2522
         framesProcessed =
2523
              PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2524
                                          &callbackResult );
2525
      }
2526
      if( !stream->outputUnit && stream->inputSRConverter )
2527
      {
2528
         /* ------------------ Simplex Input w/ SR Conversion
2529
          *
2530
          * if this is a simplex input stream, we need to read off the buffer,
2531
          * do our sample rate conversion and pass the results to the buffer
2532
          * processor.
2533
          * The logic here is complicated somewhat by the fact that we don't
2534
          * know how much data is available, so we loop on reasonably sized
2535
          * chunks, and let the BufferProcessor deal with the rest.
2536
          *
2537
          */
2538
         /* This might be too big or small depending on SR conversion. */
2539
         float data[ chan * inNumberFrames ];
2540
         OSStatus err;
2541
         do
2542
         { /* Run the buffer processor until we are out of data. */
2543
            UInt32 size;
2544
            long f;
2545

    
2546
            size = sizeof( data );
2547
            err = AudioConverterFillBuffer( 
2548
                          stream->inputSRConverter,
2549
                          ringBufferIOProc,
2550
                          &stream->inputRingBuffer,
2551
                          &size,
2552
                          (void *)data );
2553
            if( err != RING_BUFFER_EMPTY )
2554
               ERR( err );
2555
            if( err != noErr && err != RING_BUFFER_EMPTY )
2556
            {
2557
                goto stop_stream;
2558
            }
2559

    
2560

    
2561
            f = size / ( chan * sizeof(float) );
2562
            PaUtil_SetInputFrameCount( &(stream->bufferProcessor), f );
2563
            if( f )
2564
            {
2565
               PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2566
                                             &timeInfo,
2567
                                             stream->xrunFlags );
2568
               stream->xrunFlags = 0;
2569

    
2570
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2571
                                0,
2572
                                data,
2573
                                chan );
2574
               framesProcessed =
2575
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2576
                                                &callbackResult );
2577
            }
2578
         } while( callbackResult == paContinue && !err );
2579
      }
2580
   }
2581

    
2582
    // Should we return successfully or fall through to stopping the stream?
2583
    if( callbackResult == paContinue )
2584
    {
2585
        PaUtil_EndCpuLoadMeasurement( &stream->cpuLoadMeasurer, framesProcessed );
2586
        return noErr;
2587
    }
2588

    
2589
stop_stream:
2590
    stream->state = CALLBACK_STOPPED ;
2591
    if( stream->outputUnit )
2592
        AudioOutputUnitStop(stream->outputUnit);
2593
    if( stream->inputUnit )
2594
        AudioOutputUnitStop(stream->inputUnit);
2595

    
2596
    PaUtil_EndCpuLoadMeasurement( &stream->cpuLoadMeasurer, framesProcessed );
2597
    return noErr;
2598
}
2599

    
2600
/*
2601
    When CloseStream() is called, the multi-api layer ensures that
2602
    the stream has already been stopped or aborted.
2603
*/
2604
static PaError CloseStream( PaStream* s )
2605
{
2606
    /* This may be called from a failed OpenStream.
2607
       Therefore, each piece of info is treated seperately. */
2608
    PaError result = paNoError;
2609
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2610

    
2611
    VVDBUG(("CloseStream()\n"));
2612
    VDBUG( ( "Closing stream.\n" ) );
2613

    
2614
    if( stream ) {
2615
                
2616
                if( stream->outputUnit )
2617
        {
2618
            Boolean isInput = FALSE;
2619
            CleanupDevicePropertyListeners( stream, stream->outputDevice, isInput );
2620
                }
2621
                
2622
                if( stream->inputUnit )
2623
        {
2624
            Boolean isInput = TRUE;
2625
            CleanupDevicePropertyListeners( stream, stream->inputDevice, isInput );
2626
                }
2627
                
2628
       if( stream->outputUnit ) {
2629
          int count = removeFromXRunListenerList( stream );
2630
          if( count == 0 )
2631
             AudioDeviceRemovePropertyListener( stream->outputDevice,
2632
                                                0,
2633
                                                false,
2634
                                                kAudioDeviceProcessorOverload,
2635
                                                xrunCallback );
2636
       }
2637
       if( stream->inputUnit && stream->outputUnit != stream->inputUnit ) {
2638
          int count = removeFromXRunListenerList( stream );
2639
          if( count == 0 )
2640
             AudioDeviceRemovePropertyListener( stream->inputDevice,
2641
                                                0,
2642
                                                true,
2643
                                                kAudioDeviceProcessorOverload,
2644
                                                xrunCallback );
2645
       }
2646
       if( stream->outputUnit && stream->outputUnit != stream->inputUnit ) {
2647
          AudioUnitUninitialize( stream->outputUnit );
2648
          CloseComponent( stream->outputUnit );
2649
       }
2650
       stream->outputUnit = NULL;
2651
       if( stream->inputUnit )
2652
       {
2653
          AudioUnitUninitialize( stream->inputUnit );
2654
          CloseComponent( stream->inputUnit );
2655
          stream->inputUnit = NULL;
2656
       }
2657
       if( stream->inputRingBuffer.buffer )
2658
          free( (void *) stream->inputRingBuffer.buffer );
2659
       stream->inputRingBuffer.buffer = NULL;
2660
       /*TODO: is there more that needs to be done on error
2661
               from AudioConverterDispose?*/
2662
       if( stream->inputSRConverter )
2663
          ERR( AudioConverterDispose( stream->inputSRConverter ) );
2664
       stream->inputSRConverter = NULL;
2665
       if( stream->inputAudioBufferList.mBuffers[0].mData )
2666
          free( stream->inputAudioBufferList.mBuffers[0].mData );
2667
       stream->inputAudioBufferList.mBuffers[0].mData = NULL;
2668

    
2669
       result = destroyBlioRingBuffers( &stream->blio );
2670
       if( result )
2671
          return result;
2672
       if( stream->bufferProcessorIsInitialized )
2673
          PaUtil_TerminateBufferProcessor( &stream->bufferProcessor );
2674
                
2675
       if( stream->timingInformationMutexIsInitialized )
2676
          pthread_mutex_destroy( &stream->timingInformationMutex );
2677

    
2678
       PaUtil_TerminateStreamRepresentation( &stream->streamRepresentation );
2679
       PaUtil_FreeMemory( stream );
2680
    }
2681

    
2682
    return result;
2683
}
2684

    
2685
static PaError StartStream( PaStream *s )
2686
{
2687
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2688
    OSStatus result = noErr;
2689
    VVDBUG(("StartStream()\n"));
2690
    VDBUG( ( "Starting stream.\n" ) );
2691

    
2692
#define ERR_WRAP(mac_err) do { result = mac_err ; if ( result != noErr ) return ERR(result) ; } while(0)
2693

    
2694
    /*FIXME: maybe want to do this on close/abort for faster start? */
2695
    PaUtil_ResetBufferProcessor( &stream->bufferProcessor );
2696
    if(  stream->inputSRConverter )
2697
       ERR_WRAP( AudioConverterReset( stream->inputSRConverter ) );
2698

    
2699
    /* -- start -- */
2700
    stream->state = ACTIVE;
2701
    if( stream->inputUnit ) {
2702
       ERR_WRAP( AudioOutputUnitStart(stream->inputUnit) );
2703
    }
2704
    if( stream->outputUnit && stream->outputUnit != stream->inputUnit ) {
2705
       ERR_WRAP( AudioOutputUnitStart(stream->outputUnit) );
2706
    }
2707
        
2708
    return paNoError;
2709
#undef ERR_WRAP
2710
}
2711

    
2712
// it's not clear from appl's docs that this really waits
2713
// until all data is flushed.
2714
static ComponentResult BlockWhileAudioUnitIsRunning( AudioUnit audioUnit, AudioUnitElement element )
2715
{
2716
    Boolean isRunning = 1;
2717
    while( isRunning ) {
2718
       UInt32 s = sizeof( isRunning );
2719
       ComponentResult err = AudioUnitGetProperty( audioUnit, kAudioOutputUnitProperty_IsRunning, kAudioUnitScope_Global, element,  &isRunning, &s );
2720
       if( err )
2721
          return err;
2722
       Pa_Sleep( 100 );
2723
    }
2724
    return noErr;
2725
}
2726

    
2727
static PaError FinishStoppingStream( PaMacCoreStream *stream )
2728
{
2729
    OSStatus result = noErr;
2730
    PaError paErr;
2731

    
2732
#define ERR_WRAP(mac_err) do { result = mac_err ; if ( result != noErr ) return ERR(result) ; } while(0)
2733
    /* -- stop and reset -- */
2734
    if( stream->inputUnit == stream->outputUnit && stream->inputUnit )
2735
    {
2736
       ERR_WRAP( AudioOutputUnitStop(stream->inputUnit) );
2737
       ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,0) );
2738
       ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,1) );
2739
       ERR_WRAP( AudioUnitReset(stream->inputUnit, kAudioUnitScope_Global, 1) );
2740
       ERR_WRAP( AudioUnitReset(stream->inputUnit, kAudioUnitScope_Global, 0) );
2741
    }
2742
    else
2743
    {
2744
       if( stream->inputUnit )
2745
       {
2746
          ERR_WRAP(AudioOutputUnitStop(stream->inputUnit) );
2747
          ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,1) );
2748
          ERR_WRAP(AudioUnitReset(stream->inputUnit,kAudioUnitScope_Global,1));
2749
       }
2750
       if( stream->outputUnit )
2751
       {
2752
          ERR_WRAP(AudioOutputUnitStop(stream->outputUnit));
2753
          ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->outputUnit,0) );
2754
          ERR_WRAP(AudioUnitReset(stream->outputUnit,kAudioUnitScope_Global,0));
2755
       }
2756
    }
2757
    if( stream->inputRingBuffer.buffer ) {
2758
       PaUtil_FlushRingBuffer( &stream->inputRingBuffer );
2759
       bzero( (void *)stream->inputRingBuffer.buffer,
2760
              stream->inputRingBuffer.bufferSize );
2761
       /* advance the write point a little, so we are reading from the
2762
          middle of the buffer. We'll need extra at the end because
2763
          testing has shown that this helps. */
2764
       if( stream->outputUnit )
2765
          PaUtil_AdvanceRingBufferWriteIndex( &stream->inputRingBuffer,
2766
                                              stream->inputRingBuffer.bufferSize
2767
                                              / RING_BUFFER_ADVANCE_DENOMINATOR );
2768
    }
2769

    
2770
    stream->xrunFlags = 0;
2771
    stream->state = STOPPED;
2772

    
2773
    paErr = resetBlioRingBuffers( &stream->blio );
2774
    if( paErr )
2775
       return paErr;
2776

    
2777
    VDBUG( ( "Stream Stopped.\n" ) );
2778
    return paNoError;
2779
#undef ERR_WRAP
2780
}
2781

    
2782
/* Block until buffer is empty then stop the stream. */
2783
static PaError StopStream( PaStream *s )
2784
{
2785
    PaError paErr;
2786
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2787
    VVDBUG(("StopStream()\n"));
2788

    
2789
    /* Tell WriteStream to stop filling the buffer. */
2790
    stream->state = STOPPING;
2791

    
2792
    if( stream->userOutChan > 0 ) /* Does this stream do output? */
2793
    {
2794
        size_t maxHostFrames = MAX( stream->inputFramesPerBuffer, stream->outputFramesPerBuffer );
2795
        VDBUG( ("Waiting for write buffer to be drained.\n") );
2796
        paErr = waitUntilBlioWriteBufferIsEmpty( &stream->blio, stream->sampleRate,
2797
                                                maxHostFrames );
2798
        VDBUG( ( "waitUntilBlioWriteBufferIsEmpty returned %d\n", paErr ) );
2799
    }
2800
    return FinishStoppingStream( stream );
2801
}
2802

    
2803
/* Immediately stop the stream. */
2804
static PaError AbortStream( PaStream *s )
2805
{
2806
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2807
    VDBUG( ( "AbortStream()\n" ) );
2808
    stream->state = STOPPING;
2809
    return FinishStoppingStream( stream );
2810
}
2811

    
2812

    
2813
static PaError IsStreamStopped( PaStream *s )
2814
{
2815
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2816
    VVDBUG(("IsStreamStopped()\n"));
2817

    
2818
    return stream->state == STOPPED ? 1 : 0;
2819
}
2820

    
2821

    
2822
static PaError IsStreamActive( PaStream *s )
2823
{
2824
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2825
    VVDBUG(("IsStreamActive()\n"));
2826
    return ( stream->state == ACTIVE || stream->state == STOPPING );
2827
}
2828

    
2829

    
2830
static double GetStreamCpuLoad( PaStream* s )
2831
{
2832
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2833
    VVDBUG(("GetStreamCpuLoad()\n"));
2834

    
2835
    return PaUtil_GetCpuLoad( &stream->cpuLoadMeasurer );
2836
}