To check out this repository please hg clone the following URL, or open the URL using EasyMercurial or your preferred Mercurial client.

The primary repository for this project is hosted at https://github.com/sonic-visualiser/sv-dependency-builds .
This repository is a read-only copy which is updated automatically every hour.

Statistics Download as Zip
| Branch: | Tag: | Revision:

root / src / portaudio_20161030_catalina_patch / src / hostapi / coreaudio / pa_mac_core.c @ 164:9fa11135915a

History | View | Annotate | Download (115 KB)

1
/*
2
 * Implementation of the PortAudio API for Apple AUHAL
3
 *
4
 * PortAudio Portable Real-Time Audio Library
5
 * Latest Version at: http://www.portaudio.com
6
 *
7
 * Written by Bjorn Roche of XO Audio LLC, from PA skeleton code.
8
 * Portions copied from code by Dominic Mazzoni (who wrote a HAL implementation)
9
 *
10
 * Dominic's code was based on code by Phil Burk, Darren Gibbs,
11
 * Gord Peters, Stephane Letz, and Greg Pfiel.
12
 *
13
 * The following people also deserve acknowledgements:
14
 *
15
 * Olivier Tristan for feedback and testing
16
 * Glenn Zelniker and Z-Systems engineering for sponsoring the Blocking I/O
17
 * interface.
18
 * 
19
 *
20
 * Based on the Open Source API proposed by Ross Bencina
21
 * Copyright (c) 1999-2002 Ross Bencina, Phil Burk
22
 *
23
 * Permission is hereby granted, free of charge, to any person obtaining
24
 * a copy of this software and associated documentation files
25
 * (the "Software"), to deal in the Software without restriction,
26
 * including without limitation the rights to use, copy, modify, merge,
27
 * publish, distribute, sublicense, and/or sell copies of the Software,
28
 * and to permit persons to whom the Software is furnished to do so,
29
 * subject to the following conditions:
30
 *
31
 * The above copyright notice and this permission notice shall be
32
 * included in all copies or substantial portions of the Software.
33
 *
34
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
35
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
36
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
37
 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
38
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
39
 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
40
 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41
 */
42

    
43
/*
44
 * The text above constitutes the entire PortAudio license; however, 
45
 * the PortAudio community also makes the following non-binding requests:
46
 *
47
 * Any person wishing to distribute modifications to the Software is
48
 * requested to send the modifications to the original developer so that
49
 * they can be incorporated into the canonical version. It is also 
50
 * requested that these non-binding requests be included along with the 
51
 * license above.
52
 */
53

    
54
/**
55
 @file pa_mac_core
56
 @ingroup hostapi_src
57
 @author Bjorn Roche
58
 @brief AUHAL implementation of PortAudio
59
*/
60

    
61
/* FIXME: not all error conditions call PaUtil_SetLastHostErrorInfo()
62
 * PaMacCore_SetError() will do this.
63
 */
64

    
65
#include "pa_mac_core_internal.h"
66

    
67
#include <string.h> /* strlen(), memcmp() etc. */
68
#include <libkern/OSAtomic.h>
69

    
70
#include "pa_mac_core.h"
71
#include "pa_mac_core_utilities.h"
72
#include "pa_mac_core_blocking.h"
73

    
74

    
75
#ifdef __cplusplus
76
extern "C"
77
{
78
#endif /* __cplusplus */
79
        
80
/* This is a reasonable size for a small buffer based on experience. */
81
#define PA_MAC_SMALL_BUFFER_SIZE    (64)
82
        
83
/* prototypes for functions declared in this file */
84
PaError PaMacCore_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex index );
85

    
86
/*
87
 * Function declared in pa_mac_core.h. Sets up a PaMacCoreStreamInfoStruct
88
 * with the requested flags and initializes channel map.
89
 */
90
void PaMacCore_SetupStreamInfo(  PaMacCoreStreamInfo *data, const unsigned long flags )
91
{
92
   bzero( data, sizeof( PaMacCoreStreamInfo ) );
93
   data->size = sizeof( PaMacCoreStreamInfo );
94
   data->hostApiType = paCoreAudio;
95
   data->version = 0x01;
96
   data->flags = flags;
97
   data->channelMap = NULL;
98
   data->channelMapSize = 0;
99
}
100

    
101
/*
102
 * Function declared in pa_mac_core.h. Adds channel mapping to a PaMacCoreStreamInfoStruct
103
 */
104
void PaMacCore_SetupChannelMap( PaMacCoreStreamInfo *data, const SInt32 * const channelMap, const unsigned long channelMapSize )
105
{
106
   data->channelMap = channelMap;
107
   data->channelMapSize = channelMapSize;
108
}
109
static char *channelName = NULL;
110
static int channelNameSize = 0;
111
static bool ensureChannelNameSize( int size )
112
{
113
   if( size >= channelNameSize ) {
114
      free( channelName );
115
      channelName = (char *) malloc( ( channelNameSize = size ) + 1 );
116
      if( !channelName ) {
117
         channelNameSize = 0;
118
         return false;
119
      }
120
   }
121
   return true;
122
}
123
/*
124
 * Function declared in pa_mac_core.h. retrives channel names.
125
 */
126
const char *PaMacCore_GetChannelName( int device, int channelIndex, bool input )
127
{
128
        struct PaUtilHostApiRepresentation *hostApi;
129
        PaError err;
130
        OSStatus error;
131
        err = PaUtil_GetHostApiRepresentation( &hostApi, paCoreAudio );
132
        assert(err == paNoError);
133
        if( err != paNoError )
134
                return NULL;
135
        PaMacAUHAL *macCoreHostApi = (PaMacAUHAL*)hostApi;
136
        AudioDeviceID hostApiDevice = macCoreHostApi->devIds[device];
137
        CFStringRef nameRef;
138
        
139
        /* First try with CFString */
140
        UInt32 size = sizeof(nameRef);
141
        error = AudioDeviceGetProperty( hostApiDevice,
142
                                                                   channelIndex + 1,
143
                                                                   input,
144
                                                                   kAudioDevicePropertyChannelNameCFString,
145
                                                                   &size,
146
                                                                   &nameRef );
147
        if( error )
148
        {
149
                /* try the C String */
150
                size = 0;
151
                error = AudioDeviceGetPropertyInfo( hostApiDevice,
152
                                                                                   channelIndex + 1,
153
                                                                                   input,
154
                                                                                   kAudioDevicePropertyChannelName,
155
                                                                                   &size,
156
                                                                                   NULL);
157
                if( !error )
158
                {
159
                        if( !ensureChannelNameSize( size ) )
160
                                return NULL;
161
                        
162
                        error = AudioDeviceGetProperty( hostApiDevice,
163
                                                                                   channelIndex + 1,
164
                                                                                   input,
165
                                                                                   kAudioDevicePropertyChannelName,
166
                                                                                   &size,
167
                                                                                   channelName );
168
                        
169
                        
170
                        if( !error )
171
                                return channelName;
172
                }
173
                
174
                /* as a last-ditch effort, we use the device name and append the channel number. */
175
                nameRef = CFStringCreateWithFormat( NULL, NULL, CFSTR( "%s: %d"), hostApi->deviceInfos[device]->name, channelIndex + 1 );
176
                
177
                
178
                size = CFStringGetMaximumSizeForEncoding(CFStringGetLength(nameRef), kCFStringEncodingUTF8);;
179
                if( !ensureChannelNameSize( size ) )
180
                {
181
                        CFRelease( nameRef );
182
                        return NULL;
183
                }
184
                CFStringGetCString( nameRef, channelName, size+1, kCFStringEncodingUTF8 );
185
                CFRelease( nameRef );
186
        }
187
        else
188
        {
189
                size = CFStringGetMaximumSizeForEncoding(CFStringGetLength(nameRef), kCFStringEncodingUTF8);;
190
                if( !ensureChannelNameSize( size ) )
191
                {
192
                        CFRelease( nameRef );
193
                        return NULL;
194
                }
195
                CFStringGetCString( nameRef, channelName, size+1, kCFStringEncodingUTF8 );
196
                CFRelease( nameRef );
197
        }
198
        
199
        return channelName;
200
}
201

    
202
    
203
PaError PaMacCore_GetBufferSizeRange( PaDeviceIndex device,
204
                                      long *minBufferSizeFrames, long *maxBufferSizeFrames )
205
{
206
    PaError result;
207
    PaUtilHostApiRepresentation *hostApi;
208
    
209
    result = PaUtil_GetHostApiRepresentation( &hostApi, paCoreAudio );
210
    
211
    if( result == paNoError )
212
    {
213
        PaDeviceIndex hostApiDeviceIndex;
214
        result = PaUtil_DeviceIndexToHostApiDeviceIndex( &hostApiDeviceIndex, device, hostApi );
215
        if( result == paNoError )
216
        {
217
            PaMacAUHAL *macCoreHostApi = (PaMacAUHAL*)hostApi;
218
            AudioDeviceID macCoreDeviceId = macCoreHostApi->devIds[hostApiDeviceIndex];
219
            AudioValueRange audioRange;
220
            UInt32 propSize = sizeof( audioRange );
221
            
222
            // return the size range for the output scope unless we only have inputs
223
            Boolean isInput = 0;
224
            if( macCoreHostApi->inheritedHostApiRep.deviceInfos[hostApiDeviceIndex]->maxOutputChannels == 0 )
225
                isInput = 1;
226
           
227
            result = WARNING(AudioDeviceGetProperty( macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSizeRange, &propSize, &audioRange ) );
228

    
229
            *minBufferSizeFrames = audioRange.mMinimum;
230
            *maxBufferSizeFrames = audioRange.mMaximum;
231
        }
232
    }
233
    
234
    return result;
235
}
236

    
237

    
238
AudioDeviceID PaMacCore_GetStreamInputDevice( PaStream* s )
239
{
240
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
241
    VVDBUG(("PaMacCore_GetStreamInputHandle()\n"));
242

    
243
    return ( stream->inputDevice );
244
}
245

    
246
AudioDeviceID PaMacCore_GetStreamOutputDevice( PaStream* s )
247
{
248
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
249
    VVDBUG(("PaMacCore_GetStreamOutputHandle()\n"));
250

    
251
    return ( stream->outputDevice );
252
}
253

    
254
#ifdef __cplusplus
255
}
256
#endif /* __cplusplus */
257

    
258
#define RING_BUFFER_ADVANCE_DENOMINATOR (4)
259

    
260
static void Terminate( struct PaUtilHostApiRepresentation *hostApi );
261
static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi,
262
                                  const PaStreamParameters *inputParameters,
263
                                  const PaStreamParameters *outputParameters,
264
                                  double sampleRate );
265
static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi,
266
                           PaStream** s,
267
                           const PaStreamParameters *inputParameters,
268
                           const PaStreamParameters *outputParameters,
269
                           double sampleRate,
270
                           unsigned long framesPerBuffer,
271
                           PaStreamFlags streamFlags,
272
                           PaStreamCallback *streamCallback,
273
                           void *userData );
274
static PaError CloseStream( PaStream* stream );
275
static PaError StartStream( PaStream *stream );
276
static PaError StopStream( PaStream *stream );
277
static PaError AbortStream( PaStream *stream );
278
static PaError IsStreamStopped( PaStream *s );
279
static PaError IsStreamActive( PaStream *stream );
280
static PaTime GetStreamTime( PaStream *stream );
281
static OSStatus AudioIOProc( void *inRefCon,
282
                               AudioUnitRenderActionFlags *ioActionFlags,
283
                               const AudioTimeStamp *inTimeStamp,
284
                               UInt32 inBusNumber,
285
                               UInt32 inNumberFrames,
286
                               AudioBufferList *ioData );
287
static double GetStreamCpuLoad( PaStream* stream );
288

    
289
static PaError GetChannelInfo( PaMacAUHAL *auhalHostApi,
290
                               PaDeviceInfo *deviceInfo,
291
                               AudioDeviceID macCoreDeviceId,
292
                               int isInput);
293

    
294
static PaError OpenAndSetupOneAudioUnit(
295
                                   const PaMacCoreStream *stream,
296
                                   const PaStreamParameters *inStreamParams,
297
                                   const PaStreamParameters *outStreamParams,
298
                                   const UInt32 requestedFramesPerBuffer,
299
                                   UInt32 *actualInputFramesPerBuffer,
300
                                   UInt32 *actualOutputFramesPerBuffer,
301
                                   const PaMacAUHAL *auhalHostApi,
302
#ifndef AUDIO_COMPONENT_FIX
303
                                   AudioUnit *audioUnit,
304
#else
305
                                   AudioComponentInstance *audioUnit,
306
#endif
307
                                   AudioConverterRef *srConverter,
308
                                   AudioDeviceID *audioDevice,
309
                                   const double sampleRate,
310
                                   void *refCon );
311

    
312
/* for setting errors. */
313
#define PA_AUHAL_SET_LAST_HOST_ERROR( errorCode, errorText ) \
314
    PaUtil_SetLastHostErrorInfo( paCoreAudio, errorCode, errorText )
315

    
316
/*
317
 * Callback called when starting or stopping a stream.
318
 */
319
static void startStopCallback(
320
   void *               inRefCon,
321
#ifndef AUDIO_COMPONENT_FIX
322
   AudioUnit            ci,
323
#else
324
   AudioComponentInstance            ci,
325
#endif
326
   AudioUnitPropertyID  inID,
327
   AudioUnitScope       inScope,
328
   AudioUnitElement     inElement )
329
{
330
   PaMacCoreStream *stream = (PaMacCoreStream *) inRefCon;
331
   UInt32 isRunning;
332
   UInt32 size = sizeof( isRunning );
333
   OSStatus err;
334
   err = AudioUnitGetProperty( ci, kAudioOutputUnitProperty_IsRunning, inScope, inElement, &isRunning, &size );
335
   assert( !err );
336
   if( err )
337
      isRunning = false; //it's very unclear what to do in case of error here. There's no real way to notify the user, and crashing seems unreasonable.
338
   if( isRunning )
339
      return; //We are only interested in when we are stopping
340
   // -- if we are using 2 I/O units, we only need one notification!
341
   if( stream->inputUnit && stream->outputUnit && stream->inputUnit != stream->outputUnit && ci == stream->inputUnit )
342
      return;
343
   PaStreamFinishedCallback *sfc = stream->streamRepresentation.streamFinishedCallback;
344
   if( stream->state == STOPPING )
345
      stream->state = STOPPED ;
346
   if( sfc )
347
      sfc( stream->streamRepresentation.userData );
348
}
349

    
350

    
351
/*currently, this is only used in initialization, but it might be modified
352
  to be used when the list of devices changes.*/
353
static PaError gatherDeviceInfo(PaMacAUHAL *auhalHostApi)
354
{
355
    UInt32 size;
356
    UInt32 propsize;
357
    VVDBUG(("gatherDeviceInfo()\n"));
358
    /* -- free any previous allocations -- */
359
    if( auhalHostApi->devIds )
360
        PaUtil_GroupFreeMemory(auhalHostApi->allocations, auhalHostApi->devIds);
361
    auhalHostApi->devIds = NULL;
362

    
363
    /* -- figure out how many devices there are -- */
364
    AudioHardwareGetPropertyInfo( kAudioHardwarePropertyDevices,
365
                                  &propsize,
366
                                  NULL );
367
    auhalHostApi->devCount = propsize / sizeof( AudioDeviceID );
368

    
369
    VDBUG( ( "Found %ld device(s).\n", auhalHostApi->devCount ) );
370

    
371
    /* -- copy the device IDs -- */
372
    auhalHostApi->devIds = (AudioDeviceID *)PaUtil_GroupAllocateMemory(
373
                             auhalHostApi->allocations,
374
                             propsize );
375
    if( !auhalHostApi->devIds )
376
        return paInsufficientMemory;
377
    AudioHardwareGetProperty( kAudioHardwarePropertyDevices,
378
                                  &propsize,
379
                                  auhalHostApi->devIds );
380
#ifdef MAC_CORE_VERBOSE_DEBUG
381
    {
382
       int i;
383
       for( i=0; i<auhalHostApi->devCount; ++i )
384
          printf( "Device %d\t: %ld\n", i, auhalHostApi->devIds[i] );
385
    }
386
#endif
387

    
388
    size = sizeof(AudioDeviceID);
389
    auhalHostApi->defaultIn  = kAudioDeviceUnknown;
390
    auhalHostApi->defaultOut = kAudioDeviceUnknown;
391

    
392
    /* determine the default device. */
393
    /* I am not sure how these calls to AudioHardwareGetProperty()
394
       could fail, but in case they do, we use the first available
395
       device as the default. */
396
    if( 0 != AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice,
397
                     &size,
398
                     &auhalHostApi->defaultIn) ) {
399
       int i;
400
       auhalHostApi->defaultIn  = kAudioDeviceUnknown;
401
       VDBUG(("Failed to get default input device from OS."));
402
       VDBUG((" I will substitute the first available input Device."));
403
       for( i=0; i<auhalHostApi->devCount; ++i ) {
404
          PaDeviceInfo devInfo;
405
          if( 0 != GetChannelInfo( auhalHostApi, &devInfo,
406
                                   auhalHostApi->devIds[i], TRUE ) )
407
             if( devInfo.maxInputChannels ) {
408
                auhalHostApi->defaultIn = auhalHostApi->devIds[i];
409
                break;
410
             }
411
       }
412
    }   
413
    if( 0 != AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice,
414
                     &size,
415
                     &auhalHostApi->defaultOut) ) {
416
       int i;
417
       auhalHostApi->defaultIn  = kAudioDeviceUnknown;
418
       VDBUG(("Failed to get default output device from OS."));
419
       VDBUG((" I will substitute the first available output Device."));
420
       for( i=0; i<auhalHostApi->devCount; ++i ) {
421
          PaDeviceInfo devInfo;
422
          if( 0 != GetChannelInfo( auhalHostApi, &devInfo,
423
                                   auhalHostApi->devIds[i], FALSE ) )
424
             if( devInfo.maxOutputChannels ) {
425
                auhalHostApi->defaultOut = auhalHostApi->devIds[i];
426
                break;
427
             }
428
       }
429
    }   
430

    
431
    VDBUG( ( "Default in : %ld\n", auhalHostApi->defaultIn  ) );
432
    VDBUG( ( "Default out: %ld\n", auhalHostApi->defaultOut ) );
433

    
434
    return paNoError;
435
}
436

    
437
/* =================================================================================================== */
438
/**
439
 * @internal
440
 * @brief Clip the desired size against the allowed IO buffer size range for the device.
441
 */
442
static PaError ClipToDeviceBufferSize( AudioDeviceID macCoreDeviceId,
443
                                                                        int isInput, UInt32 desiredSize, UInt32 *allowedSize )
444
{
445
        UInt32 resultSize = desiredSize;
446
        AudioValueRange audioRange;
447
        UInt32 propSize = sizeof( audioRange );
448
        PaError err = WARNING(AudioDeviceGetProperty( macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSizeRange, &propSize, &audioRange ) );
449
        resultSize = MAX( resultSize, audioRange.mMinimum );
450
        resultSize = MIN( resultSize, audioRange.mMaximum );
451
        *allowedSize = resultSize;
452
        return err;
453
}
454

    
455
/* =================================================================================================== */
456
#if 0
457
static void DumpDeviceProperties( AudioDeviceID macCoreDeviceId,
458
                          int isInput )
459
{
460
    PaError err;
461
    int i;
462
    UInt32 propSize;
463
    UInt32 deviceLatency;
464
    UInt32 streamLatency;
465
    UInt32 bufferFrames;
466
    UInt32 safetyOffset;
467
    AudioStreamID streamIDs[128];
468
    
469
    printf("\n======= latency query : macCoreDeviceId = %d, isInput %d =======\n", (int)macCoreDeviceId, isInput );    
470
    
471
    propSize = sizeof(UInt32);
472
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSize, &propSize, &bufferFrames));
473
    printf("kAudioDevicePropertyBufferFrameSize: err = %d, propSize = %d, value = %d\n", err, propSize, bufferFrames );
474
    
475
    propSize = sizeof(UInt32);
476
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertySafetyOffset, &propSize, &safetyOffset));
477
    printf("kAudioDevicePropertySafetyOffset: err = %d, propSize = %d, value = %d\n", err, propSize, safetyOffset );
478
    
479
    propSize = sizeof(UInt32);
480
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyLatency, &propSize, &deviceLatency));
481
    printf("kAudioDevicePropertyLatency: err = %d, propSize = %d, value = %d\n", err, propSize, deviceLatency );
482
    
483
    AudioValueRange audioRange;
484
    propSize = sizeof( audioRange );
485
    err = WARNING(AudioDeviceGetProperty( macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSizeRange, &propSize, &audioRange ) );
486
    printf("kAudioDevicePropertyBufferFrameSizeRange: err = %d, propSize = %u, minimum = %g\n", err, propSize, audioRange.mMinimum);
487
    printf("kAudioDevicePropertyBufferFrameSizeRange: err = %d, propSize = %u, maximum = %g\n", err, propSize, audioRange.mMaximum );
488
    
489
    /* Get the streams from the device and query their latency. */
490
    propSize = sizeof(streamIDs);
491
    err  = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreams, &propSize, &streamIDs[0]));
492
    int numStreams = propSize / sizeof(AudioStreamID);
493
    for( i=0; i<numStreams; i++ )
494
    {
495
        printf("Stream #%d = %d---------------------- \n", i, streamIDs[i] );
496
        
497
        propSize = sizeof(UInt32);
498
        err  = WARNING(AudioStreamGetProperty(streamIDs[i], 0, kAudioStreamPropertyLatency, &propSize, &streamLatency));
499
        printf("  kAudioStreamPropertyLatency: err = %d, propSize = %d, value = %d\n", err, propSize, streamLatency );
500
    }
501
}
502
#endif
503

    
504
/* =================================================================================================== */
505
/**
506
 * @internal
507
 * Calculate the fixed latency from the system and the device.
508
 * Sum of kAudioStreamPropertyLatency +
509
 *        kAudioDevicePropertySafetyOffset +
510
 *        kAudioDevicePropertyLatency
511
 *
512
 * Some useful info from Jeff Moore on latency.
513
 * http://osdir.com/ml/coreaudio-api/2010-01/msg00046.html
514
 * http://osdir.com/ml/coreaudio-api/2009-07/msg00140.html
515
 */
516
static PaError CalculateFixedDeviceLatency( AudioDeviceID macCoreDeviceId, int isInput, UInt32 *fixedLatencyPtr )
517
{
518
    PaError err;
519
    UInt32 propSize;
520
    UInt32 deviceLatency;
521
    UInt32 streamLatency;
522
    UInt32 safetyOffset;
523
    AudioStreamID streamIDs[1];
524
    
525
    // To get stream latency we have to get a streamID from the device.
526
    // We are only going to look at the first stream so only fetch one stream.
527
    propSize = sizeof(streamIDs);
528
    err  = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreams, &propSize, &streamIDs[0]));
529
    if( err != paNoError ) goto error;
530
    if( propSize == sizeof(AudioStreamID) )
531
    {        
532
        propSize = sizeof(UInt32);
533
        err  = WARNING(AudioStreamGetProperty(streamIDs[0], 0, kAudioStreamPropertyLatency, &propSize, &streamLatency));
534
    }
535
    
536
    propSize = sizeof(UInt32);
537
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertySafetyOffset, &propSize, &safetyOffset));
538
    if( err != paNoError ) goto error;
539
    
540
    propSize = sizeof(UInt32);
541
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyLatency, &propSize, &deviceLatency));
542
    if( err != paNoError ) goto error;
543

    
544
    *fixedLatencyPtr = deviceLatency + streamLatency + safetyOffset;
545
    return err;
546
error:
547
    return err;
548
}
549

    
550
/* =================================================================================================== */
551
static PaError CalculateDefaultDeviceLatencies( AudioDeviceID macCoreDeviceId,
552
                                               int isInput, UInt32 *lowLatencyFramesPtr,
553
                                               UInt32 *highLatencyFramesPtr )
554
{
555
    UInt32 propSize;
556
    UInt32 bufferFrames = 0;
557
    UInt32 fixedLatency = 0;
558
    UInt32 clippedMinBufferSize = 0;
559
    
560
    //DumpDeviceProperties( macCoreDeviceId, isInput );
561
    
562
    PaError err = CalculateFixedDeviceLatency( macCoreDeviceId, isInput, &fixedLatency );
563
    if( err != paNoError ) goto error;
564
    
565
    // For low latency use a small fixed size buffer clipped to the device range.
566
    err = ClipToDeviceBufferSize( macCoreDeviceId, isInput, PA_MAC_SMALL_BUFFER_SIZE, &clippedMinBufferSize );
567
    if( err != paNoError ) goto error;
568
    
569
    // For high latency use the default device buffer size.
570
    propSize = sizeof(UInt32);
571
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSize, &propSize, &bufferFrames));
572
    if( err != paNoError ) goto error;
573
    
574
    *lowLatencyFramesPtr = fixedLatency + clippedMinBufferSize;
575
    *highLatencyFramesPtr = fixedLatency + bufferFrames;
576
    
577
    return err;
578
error:
579
    return err;
580
}
581

    
582
/* =================================================================================================== */
583

    
584
static PaError GetChannelInfo( PaMacAUHAL *auhalHostApi,
585
                               PaDeviceInfo *deviceInfo,
586
                               AudioDeviceID macCoreDeviceId,
587
                               int isInput)
588
{
589
    UInt32 propSize;
590
    PaError err = paNoError;
591
    UInt32 i;
592
    int numChannels = 0;
593
    AudioBufferList *buflist = NULL;
594
    
595
    VVDBUG(("GetChannelInfo()\n"));
596

    
597
    /* Get the number of channels from the stream configuration.
598
       Fail if we can't get this. */
599

    
600
    err = ERR(AudioDeviceGetPropertyInfo(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreamConfiguration, &propSize, NULL));
601
    if (err)
602
        return err;
603

    
604
    buflist = PaUtil_AllocateMemory(propSize);
605
    if( !buflist )
606
       return paInsufficientMemory;
607
    err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreamConfiguration, &propSize, buflist));
608
    if (err)
609
        goto error;
610

    
611
    for (i = 0; i < buflist->mNumberBuffers; ++i)
612
        numChannels += buflist->mBuffers[i].mNumberChannels;
613

    
614
    if (isInput)
615
        deviceInfo->maxInputChannels = numChannels;
616
    else
617
        deviceInfo->maxOutputChannels = numChannels;
618
      
619
    if (numChannels > 0) /* do not try to retrieve the latency if there are no channels. */
620
    {
621
        /* Get the latency.  Don't fail if we can't get this. */
622
        /* default to something reasonable */
623
        deviceInfo->defaultLowInputLatency = .01;
624
        deviceInfo->defaultHighInputLatency = .10;
625
        deviceInfo->defaultLowOutputLatency = .01;
626
        deviceInfo->defaultHighOutputLatency = .10;        
627
        UInt32 lowLatencyFrames = 0;
628
        UInt32 highLatencyFrames = 0;
629
        err = CalculateDefaultDeviceLatencies( macCoreDeviceId, isInput, &lowLatencyFrames, &highLatencyFrames );
630
        if( err == 0 )
631
        {
632
            
633
            double lowLatencySeconds = lowLatencyFrames / deviceInfo->defaultSampleRate;
634
            double highLatencySeconds = highLatencyFrames / deviceInfo->defaultSampleRate;
635
            if (isInput)
636
            {
637
                deviceInfo->defaultLowInputLatency = lowLatencySeconds;
638
                deviceInfo->defaultHighInputLatency = highLatencySeconds;
639
            }
640
            else
641
            {
642
                deviceInfo->defaultLowOutputLatency = lowLatencySeconds;
643
                deviceInfo->defaultHighOutputLatency = highLatencySeconds;
644
            }
645
        }
646
    }
647
    PaUtil_FreeMemory( buflist );
648
    return paNoError;
649
 error:
650
    PaUtil_FreeMemory( buflist );
651
    return err;
652
}
653

    
654
/* =================================================================================================== */
655
static PaError InitializeDeviceInfo( PaMacAUHAL *auhalHostApi,
656
                                     PaDeviceInfo *deviceInfo,
657
                                     AudioDeviceID macCoreDeviceId,
658
                                     PaHostApiIndex hostApiIndex )
659
{
660
    Float64 sampleRate;
661
    char *name;
662
    PaError err = paNoError;
663
        CFStringRef nameRef;
664
    UInt32 propSize;
665

    
666
    VVDBUG(("InitializeDeviceInfo(): macCoreDeviceId=%ld\n", macCoreDeviceId));
667

    
668
    memset(deviceInfo, 0, sizeof(PaDeviceInfo));
669

    
670
    deviceInfo->structVersion = 2;
671
    deviceInfo->hostApi = hostApiIndex;
672
  
673
    /* Get the device name using CFString */
674
        propSize = sizeof(nameRef);
675
    err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceNameCFString, &propSize, &nameRef));
676
    if (err)
677
    {
678
                /* Get the device name using c string.  Fail if we can't get it. */
679
                err = ERR(AudioDeviceGetPropertyInfo(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceName, &propSize, NULL));
680
                if (err)
681
                        return err;
682

    
683
                name = PaUtil_GroupAllocateMemory(auhalHostApi->allocations,propSize+1);
684
                if ( !name )
685
                        return paInsufficientMemory;
686
                err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceName, &propSize, name));
687
                if (err)
688
                        return err;
689
        }
690
        else
691
        {
692
                /* valid CFString so we just allocate a c string big enough to contain the data */
693
                propSize = CFStringGetMaximumSizeForEncoding(CFStringGetLength(nameRef), kCFStringEncodingUTF8);
694
                name = PaUtil_GroupAllocateMemory(auhalHostApi->allocations, propSize+1);
695
                if ( !name )
696
                {
697
                        CFRelease(nameRef);
698
                        return paInsufficientMemory;
699
                }
700
                CFStringGetCString(nameRef, name, propSize+1, kCFStringEncodingUTF8);
701
                CFRelease(nameRef);
702
        }
703
    deviceInfo->name = name;
704

    
705
    /* Try to get the default sample rate.  Don't fail if we can't get this. */
706
    propSize = sizeof(Float64);
707
    err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyNominalSampleRate, &propSize, &sampleRate));
708
    if (err)
709
        deviceInfo->defaultSampleRate = 0.0;
710
    else
711
        deviceInfo->defaultSampleRate = sampleRate;
712

    
713
    /* Get the maximum number of input and output channels.  Fail if we can't get this. */
714

    
715
    err = GetChannelInfo(auhalHostApi, deviceInfo, macCoreDeviceId, 1);
716
    if (err)
717
        return err;
718

    
719
    err = GetChannelInfo(auhalHostApi, deviceInfo, macCoreDeviceId, 0);
720
    if (err)
721
        return err;
722

    
723
    return paNoError;
724
}
725

    
726
PaError PaMacCore_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex hostApiIndex )
727
{
728
    PaError result = paNoError;
729
    int i;
730
    PaMacAUHAL *auhalHostApi = NULL;
731
    PaDeviceInfo *deviceInfoArray;
732
    int unixErr;
733

    
734
    VVDBUG(("PaMacCore_Initialize(): hostApiIndex=%d\n", hostApiIndex));
735
        
736
        SInt32 major;
737
        SInt32 minor;
738
        Gestalt(gestaltSystemVersionMajor, &major);
739
        Gestalt(gestaltSystemVersionMinor, &minor);
740
        
741
        // Starting with 10.6 systems, the HAL notification thread is created internally
742
        if (major == 10 && minor >= 6) {
743
                CFRunLoopRef theRunLoop = NULL;
744
                AudioObjectPropertyAddress theAddress = { kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
745
                OSStatus osErr = AudioObjectSetPropertyData (kAudioObjectSystemObject, &theAddress, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
746
                if (osErr != noErr) {
747
                        goto error;
748
                }
749
        }
750
        
751
    unixErr = initializeXRunListenerList();
752
    if( 0 != unixErr ) {
753
       return UNIX_ERR(unixErr);
754
    }
755

    
756
    auhalHostApi = (PaMacAUHAL*)PaUtil_AllocateMemory( sizeof(PaMacAUHAL) );
757
    if( !auhalHostApi )
758
    {
759
        result = paInsufficientMemory;
760
        goto error;
761
    }
762

    
763
    auhalHostApi->allocations = PaUtil_CreateAllocationGroup();
764
    if( !auhalHostApi->allocations )
765
    {
766
        result = paInsufficientMemory;
767
        goto error;
768
    }
769

    
770
    auhalHostApi->devIds = NULL;
771
    auhalHostApi->devCount = 0;
772

    
773
    /* get the info we need about the devices */
774
    result = gatherDeviceInfo( auhalHostApi );
775
    if( result != paNoError )
776
       goto error;
777

    
778
    *hostApi = &auhalHostApi->inheritedHostApiRep;
779
    (*hostApi)->info.structVersion = 1;
780
    (*hostApi)->info.type = paCoreAudio;
781
    (*hostApi)->info.name = "Core Audio";
782

    
783
    (*hostApi)->info.defaultInputDevice = paNoDevice;
784
    (*hostApi)->info.defaultOutputDevice = paNoDevice;
785

    
786
    (*hostApi)->info.deviceCount = 0;  
787

    
788
    if( auhalHostApi->devCount > 0 )
789
    {
790
        (*hostApi)->deviceInfos = (PaDeviceInfo**)PaUtil_GroupAllocateMemory(
791
                auhalHostApi->allocations, sizeof(PaDeviceInfo*) * auhalHostApi->devCount);
792
        if( !(*hostApi)->deviceInfos )
793
        {
794
            result = paInsufficientMemory;
795
            goto error;
796
        }
797

    
798
        /* allocate all device info structs in a contiguous block */
799
        deviceInfoArray = (PaDeviceInfo*)PaUtil_GroupAllocateMemory(
800
                auhalHostApi->allocations, sizeof(PaDeviceInfo) * auhalHostApi->devCount );
801
        if( !deviceInfoArray )
802
        {
803
            result = paInsufficientMemory;
804
            goto error;
805
        }
806

    
807
        for( i=0; i < auhalHostApi->devCount; ++i )
808
        {
809
            int err;
810
            err = InitializeDeviceInfo( auhalHostApi, &deviceInfoArray[i],
811
                                      auhalHostApi->devIds[i],
812
                                      hostApiIndex );
813
            if (err == paNoError)
814
            { /* copy some info and set the defaults */
815
                (*hostApi)->deviceInfos[(*hostApi)->info.deviceCount] = &deviceInfoArray[i];
816
                if (auhalHostApi->devIds[i] == auhalHostApi->defaultIn)
817
                    (*hostApi)->info.defaultInputDevice = (*hostApi)->info.deviceCount;
818
                if (auhalHostApi->devIds[i] == auhalHostApi->defaultOut)
819
                    (*hostApi)->info.defaultOutputDevice = (*hostApi)->info.deviceCount;
820
                (*hostApi)->info.deviceCount++;
821
            }
822
            else
823
            { /* there was an error. we need to shift the devices down, so we ignore this one */
824
                int j;
825
                auhalHostApi->devCount--;
826
                for( j=i; j<auhalHostApi->devCount; ++j )
827
                   auhalHostApi->devIds[j] = auhalHostApi->devIds[j+1];
828
                i--;
829
            }
830
        }
831
    }
832

    
833
    (*hostApi)->Terminate = Terminate;
834
    (*hostApi)->OpenStream = OpenStream;
835
    (*hostApi)->IsFormatSupported = IsFormatSupported;
836

    
837
    PaUtil_InitializeStreamInterface( &auhalHostApi->callbackStreamInterface,
838
                                      CloseStream, StartStream,
839
                                      StopStream, AbortStream, IsStreamStopped,
840
                                      IsStreamActive,
841
                                      GetStreamTime, GetStreamCpuLoad,
842
                                      PaUtil_DummyRead, PaUtil_DummyWrite,
843
                                      PaUtil_DummyGetReadAvailable,
844
                                      PaUtil_DummyGetWriteAvailable );
845

    
846
    PaUtil_InitializeStreamInterface( &auhalHostApi->blockingStreamInterface,
847
                                      CloseStream, StartStream,
848
                                      StopStream, AbortStream, IsStreamStopped,
849
                                      IsStreamActive,
850
                                      GetStreamTime, PaUtil_DummyGetCpuLoad,
851
                                      ReadStream, WriteStream,
852
                                      GetStreamReadAvailable,
853
                                      GetStreamWriteAvailable );
854

    
855
    return result;
856

    
857
error:
858
    if( auhalHostApi )
859
    {
860
        if( auhalHostApi->allocations )
861
        {
862
            PaUtil_FreeAllAllocations( auhalHostApi->allocations );
863
            PaUtil_DestroyAllocationGroup( auhalHostApi->allocations );
864
        }
865
                
866
        PaUtil_FreeMemory( auhalHostApi );
867
    }
868
    return result;
869
}
870

    
871

    
872
static void Terminate( struct PaUtilHostApiRepresentation *hostApi )
873
{
874
    int unixErr;
875

    
876
    PaMacAUHAL *auhalHostApi = (PaMacAUHAL*)hostApi;
877

    
878
    VVDBUG(("Terminate()\n"));
879

    
880
    unixErr = destroyXRunListenerList();
881
    if( 0 != unixErr )
882
       UNIX_ERR(unixErr);
883

    
884
    /*
885
        IMPLEMENT ME:
886
            - clean up any resources not handled by the allocation group
887
        TODO: Double check that everything is handled by alloc group
888
    */
889

    
890
    if( auhalHostApi->allocations )
891
    {
892
        PaUtil_FreeAllAllocations( auhalHostApi->allocations );
893
        PaUtil_DestroyAllocationGroup( auhalHostApi->allocations );
894
    }
895

    
896
    PaUtil_FreeMemory( auhalHostApi );
897
}
898

    
899

    
900
static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi,
901
                                  const PaStreamParameters *inputParameters,
902
                                  const PaStreamParameters *outputParameters,
903
                                  double sampleRate )
904
{
905
    int inputChannelCount, outputChannelCount;
906
    PaSampleFormat inputSampleFormat, outputSampleFormat;
907

    
908
    VVDBUG(("IsFormatSupported(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld sampleRate=%g\n",
909
                inputParameters  ? inputParameters->channelCount  : -1,
910
                inputParameters  ? inputParameters->sampleFormat  : -1,
911
                outputParameters ? outputParameters->channelCount : -1,
912
                outputParameters ? outputParameters->sampleFormat : -1,
913
                (float) sampleRate ));
914
 
915
    /** These first checks are standard PA checks. We do some fancier checks
916
        later. */
917
    if( inputParameters )
918
    {
919
        inputChannelCount = inputParameters->channelCount;
920
        inputSampleFormat = inputParameters->sampleFormat;
921

    
922
        /* all standard sample formats are supported by the buffer adapter,
923
            this implementation doesn't support any custom sample formats */
924
        if( inputSampleFormat & paCustomFormat )
925
            return paSampleFormatNotSupported;
926
            
927
        /* unless alternate device specification is supported, reject the use of
928
            paUseHostApiSpecificDeviceSpecification */
929

    
930
        if( inputParameters->device == paUseHostApiSpecificDeviceSpecification )
931
            return paInvalidDevice;
932

    
933
        /* check that input device can support inputChannelCount */
934
        if( inputChannelCount > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels )
935
            return paInvalidChannelCount;
936
    }
937
    else
938
    {
939
        inputChannelCount = 0;
940
    }
941

    
942
    if( outputParameters )
943
    {
944
        outputChannelCount = outputParameters->channelCount;
945
        outputSampleFormat = outputParameters->sampleFormat;
946

    
947
        /* all standard sample formats are supported by the buffer adapter,
948
            this implementation doesn't support any custom sample formats */
949
        if( outputSampleFormat & paCustomFormat )
950
            return paSampleFormatNotSupported;
951
            
952
        /* unless alternate device specification is supported, reject the use of
953
            paUseHostApiSpecificDeviceSpecification */
954

    
955
        if( outputParameters->device == paUseHostApiSpecificDeviceSpecification )
956
            return paInvalidDevice;
957

    
958
        /* check that output device can support outputChannelCount */
959
        if( outputChannelCount > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels )
960
            return paInvalidChannelCount;
961

    
962
    }
963
    else
964
    {
965
        outputChannelCount = 0;
966
    }
967
 
968
    /* FEEDBACK */
969
    /*        I think the only way to check a given format SR combo is     */
970
    /*        to try opening it. This could be disruptive, is that Okay?   */
971
    /*        The alternative is to just read off available sample rates,  */
972
    /*        but this will not work %100 of the time (eg, a device that   */
973
    /*        supports N output at one rate but only N/2 at a higher rate.)*/
974

    
975
    /* The following code opens the device with the requested parameters to
976
       see if it works. */
977
    {
978
       PaError err;
979
       PaStream *s;
980
       err = OpenStream( hostApi, &s, inputParameters, outputParameters,
981
                           sampleRate, 1024, 0, (PaStreamCallback *)1, NULL );
982
       if( err != paNoError && err != paInvalidSampleRate )
983
          DBUG( ( "OpenStream @ %g returned: %d: %s\n",
984
                  (float) sampleRate, err, Pa_GetErrorText( err ) ) );
985
       if( err ) 
986
          return err;
987
       err = CloseStream( s );
988
       if( err ) {
989
          /* FEEDBACK: is this more serious? should we assert? */
990
          DBUG( ( "WARNING: could not close Stream. %d: %s\n",
991
                  err, Pa_GetErrorText( err ) ) );
992
       }
993
    }
994

    
995
    return paFormatIsSupported;
996
}
997

    
998
/* ================================================================================= */
999
static void InitializeDeviceProperties( PaMacCoreDeviceProperties *deviceProperties )
1000
{
1001
    memset( deviceProperties, 0, sizeof(PaMacCoreDeviceProperties) );
1002
    deviceProperties->sampleRate = 1.0; // Better than random. Overwritten by actual values later on.
1003
    deviceProperties->samplePeriod = 1.0 / deviceProperties->sampleRate;
1004
}
1005

    
1006
static Float64 CalculateSoftwareLatencyFromProperties( PaMacCoreStream *stream, PaMacCoreDeviceProperties *deviceProperties )
1007
{
1008
    UInt32 latencyFrames = deviceProperties->bufferFrameSize + deviceProperties->deviceLatency + deviceProperties->safetyOffset;
1009
    return latencyFrames * deviceProperties->samplePeriod; // same as dividing by sampleRate but faster
1010
}
1011

    
1012
static Float64 CalculateHardwareLatencyFromProperties( PaMacCoreStream *stream, PaMacCoreDeviceProperties *deviceProperties )
1013
{
1014
    return deviceProperties->deviceLatency * deviceProperties->samplePeriod; // same as dividing by sampleRate but faster
1015
}
1016

    
1017
/* Calculate values used to convert Apple timestamps into PA timestamps
1018
 * from the device properties. The final results of this calculation
1019
 * will be used in the audio callback function.
1020
 */
1021
static void UpdateTimeStampOffsets( PaMacCoreStream *stream )
1022
{
1023
    Float64 inputSoftwareLatency = 0.0;
1024
    Float64 inputHardwareLatency = 0.0;
1025
    Float64 outputSoftwareLatency = 0.0;
1026
    Float64 outputHardwareLatency = 0.0;
1027
    
1028
    if( stream->inputUnit != NULL )
1029
    {
1030
        inputSoftwareLatency = CalculateSoftwareLatencyFromProperties( stream, &stream->inputProperties );
1031
        inputHardwareLatency = CalculateHardwareLatencyFromProperties( stream, &stream->inputProperties );
1032
    }    
1033
    if( stream->outputUnit != NULL )
1034
    {
1035
        outputSoftwareLatency = CalculateSoftwareLatencyFromProperties( stream, &stream->outputProperties );
1036
        outputHardwareLatency = CalculateHardwareLatencyFromProperties( stream, &stream->outputProperties );
1037
    }    
1038
    
1039
    /* We only need a mutex around setting these variables as a group. */
1040
        pthread_mutex_lock( &stream->timingInformationMutex );
1041
    stream->timestampOffsetCombined = inputSoftwareLatency + outputSoftwareLatency;
1042
    stream->timestampOffsetInputDevice = inputHardwareLatency;
1043
    stream->timestampOffsetOutputDevice = outputHardwareLatency;
1044
        pthread_mutex_unlock( &stream->timingInformationMutex );
1045
}
1046

    
1047
/* ================================================================================= */
1048

    
1049
/* can be used to update from nominal or actual sample rate */
1050
static OSStatus UpdateSampleRateFromDeviceProperty( PaMacCoreStream *stream, AudioDeviceID deviceID, Boolean isInput, AudioDevicePropertyID sampleRatePropertyID )
1051
{
1052
    PaMacCoreDeviceProperties * deviceProperties = isInput ? &stream->inputProperties : &stream->outputProperties;
1053
        
1054
        Float64 sampleRate = 0.0;
1055
        UInt32 propSize = sizeof(Float64);
1056
    OSStatus osErr = AudioDeviceGetProperty( deviceID, 0, isInput, sampleRatePropertyID, &propSize, &sampleRate);
1057
        if( (osErr == noErr) && (sampleRate > 1000.0) ) /* avoid divide by zero if there's an error */
1058
        {
1059
        deviceProperties->sampleRate = sampleRate;
1060
        deviceProperties->samplePeriod = 1.0 / sampleRate;
1061
    }
1062
    return osErr;
1063
}
1064

    
1065
static OSStatus AudioDevicePropertyActualSampleRateListenerProc( AudioDeviceID inDevice, UInt32 inChannel, Boolean isInput, AudioDevicePropertyID inPropertyID, void *inClientData )
1066
{
1067
        PaMacCoreStream *stream = (PaMacCoreStream*)inClientData;
1068
    
1069
    // Make sure the callback is operating on a stream that is still valid!
1070
    assert( stream->streamRepresentation.magic == PA_STREAM_MAGIC );
1071

    
1072
        OSStatus osErr = UpdateSampleRateFromDeviceProperty( stream, inDevice, isInput, kAudioDevicePropertyActualSampleRate );
1073
    if( osErr == noErr )
1074
    {
1075
        UpdateTimeStampOffsets( stream );
1076
    }
1077
    return osErr;
1078
}
1079

    
1080
/* ================================================================================= */
1081
static OSStatus QueryUInt32DeviceProperty( AudioDeviceID deviceID, Boolean isInput, AudioDevicePropertyID propertyID, UInt32 *outValue )
1082
{
1083
        UInt32 propertyValue = 0;
1084
        UInt32 propertySize = sizeof(UInt32);
1085
        OSStatus osErr = AudioDeviceGetProperty( deviceID, 0, isInput, propertyID, &propertySize, &propertyValue);
1086
        if( osErr == noErr )
1087
        {
1088
        *outValue = propertyValue;
1089
        }
1090
    return osErr;
1091
}
1092

    
1093
static OSStatus AudioDevicePropertyGenericListenerProc( AudioDeviceID inDevice, UInt32 inChannel, Boolean isInput, AudioDevicePropertyID inPropertyID, void *inClientData )
1094
{
1095
    OSStatus osErr = noErr;
1096
        PaMacCoreStream *stream = (PaMacCoreStream*)inClientData;
1097
    
1098
    // Make sure the callback is operating on a stream that is still valid!
1099
    assert( stream->streamRepresentation.magic == PA_STREAM_MAGIC );
1100
    
1101
    PaMacCoreDeviceProperties *deviceProperties = isInput ? &stream->inputProperties : &stream->outputProperties;
1102
    UInt32 *valuePtr = NULL;
1103
    switch( inPropertyID )
1104
    {
1105
        case kAudioDevicePropertySafetyOffset:
1106
            valuePtr = &deviceProperties->safetyOffset;
1107
            break;
1108
                        
1109
        case kAudioDevicePropertyLatency:
1110
            valuePtr = &deviceProperties->deviceLatency;
1111
            break;
1112
            
1113
        case kAudioDevicePropertyBufferFrameSize:
1114
            valuePtr = &deviceProperties->bufferFrameSize;
1115
            break;            
1116
    }
1117
    if( valuePtr != NULL )
1118
    {
1119
        osErr = QueryUInt32DeviceProperty( inDevice, isInput, inPropertyID, valuePtr );
1120
        if( osErr == noErr )
1121
        {
1122
            UpdateTimeStampOffsets( stream );
1123
        }
1124
    }
1125
    return osErr;
1126
}
1127

    
1128
/* ================================================================================= */
1129
/*
1130
 * Setup listeners in case device properties change during the run. */
1131
static OSStatus SetupDevicePropertyListeners( PaMacCoreStream *stream, AudioDeviceID deviceID, Boolean isInput )
1132
{
1133
    OSStatus osErr = noErr;
1134
    PaMacCoreDeviceProperties *deviceProperties = isInput ? &stream->inputProperties : &stream->outputProperties;
1135
    
1136
    if( (osErr = QueryUInt32DeviceProperty( deviceID, isInput,
1137
                                           kAudioDevicePropertyLatency, &deviceProperties->deviceLatency )) != noErr ) return osErr;
1138
    if( (osErr = QueryUInt32DeviceProperty( deviceID, isInput,
1139
                                           kAudioDevicePropertyBufferFrameSize, &deviceProperties->bufferFrameSize )) != noErr ) return osErr;
1140
    if( (osErr = QueryUInt32DeviceProperty( deviceID, isInput,
1141
                                           kAudioDevicePropertySafetyOffset, &deviceProperties->safetyOffset )) != noErr ) return osErr;
1142
    
1143
    AudioDeviceAddPropertyListener( deviceID, 0, isInput, kAudioDevicePropertyActualSampleRate, 
1144
                                   AudioDevicePropertyActualSampleRateListenerProc, stream );
1145
    
1146
    AudioDeviceAddPropertyListener( deviceID, 0, isInput, kAudioStreamPropertyLatency, 
1147
                                   AudioDevicePropertyGenericListenerProc, stream );
1148
    AudioDeviceAddPropertyListener( deviceID, 0, isInput, kAudioDevicePropertyBufferFrameSize, 
1149
                                   AudioDevicePropertyGenericListenerProc, stream );
1150
    AudioDeviceAddPropertyListener( deviceID, 0, isInput, kAudioDevicePropertySafetyOffset, 
1151
                                   AudioDevicePropertyGenericListenerProc, stream );
1152
    
1153
    return osErr;
1154
}
1155

    
1156
static void CleanupDevicePropertyListeners( PaMacCoreStream *stream, AudioDeviceID deviceID, Boolean isInput )
1157
{    
1158
    AudioDeviceRemovePropertyListener( deviceID, 0, isInput, kAudioDevicePropertyActualSampleRate, 
1159
                                   AudioDevicePropertyActualSampleRateListenerProc );
1160
    
1161
    AudioDeviceRemovePropertyListener( deviceID, 0, isInput, kAudioDevicePropertyLatency, 
1162
                                   AudioDevicePropertyGenericListenerProc );        
1163
    AudioDeviceRemovePropertyListener( deviceID, 0, isInput, kAudioDevicePropertyBufferFrameSize, 
1164
                                   AudioDevicePropertyGenericListenerProc );
1165
    AudioDeviceRemovePropertyListener( deviceID, 0, isInput, kAudioDevicePropertySafetyOffset, 
1166
                                   AudioDevicePropertyGenericListenerProc );
1167
}
1168

    
1169
/* ================================================================================= */
1170
static PaError OpenAndSetupOneAudioUnit(
1171
                                   const PaMacCoreStream *stream,
1172
                                   const PaStreamParameters *inStreamParams,
1173
                                   const PaStreamParameters *outStreamParams,
1174
                                   const UInt32 requestedFramesPerBuffer,
1175
                                   UInt32 *actualInputFramesPerBuffer,
1176
                                   UInt32 *actualOutputFramesPerBuffer,
1177
                                   const PaMacAUHAL *auhalHostApi,
1178
#ifndef AUDIO_COMPONENT_FIX
1179
                                   AudioUnit *audioUnit,
1180
#else
1181
                                   AudioComponentInstance *audioUnit,
1182
#endif
1183
                                   AudioConverterRef *srConverter,
1184
                                   AudioDeviceID *audioDevice,
1185
                                   const double sampleRate,
1186
                                   void *refCon )
1187
{
1188
#ifndef AUDIO_COMPONENT_FIX
1189
    ComponentDescription desc;
1190
    Component comp;
1191
#else
1192
    AudioComponentDescription desc;
1193
    AudioComponent comp;
1194
#endif
1195
    /*An Apple TN suggests using CAStreamBasicDescription, but that is C++*/
1196
    AudioStreamBasicDescription desiredFormat;
1197
    OSStatus result = noErr;
1198
    PaError paResult = paNoError;
1199
    int line = 0;
1200
    UInt32 callbackKey;
1201
    AURenderCallbackStruct rcbs;
1202
    unsigned long macInputStreamFlags  = paMacCorePlayNice;
1203
    unsigned long macOutputStreamFlags = paMacCorePlayNice;
1204
    SInt32 const *inChannelMap = NULL;
1205
    SInt32 const *outChannelMap = NULL;
1206
    unsigned long inChannelMapSize = 0;
1207
    unsigned long outChannelMapSize = 0;
1208

    
1209
    VVDBUG(("OpenAndSetupOneAudioUnit(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld, requestedFramesPerBuffer=%ld\n",
1210
                inStreamParams  ? inStreamParams->channelCount  : -1,
1211
                inStreamParams  ? inStreamParams->sampleFormat  : -1,
1212
                outStreamParams ? outStreamParams->channelCount : -1,
1213
                outStreamParams ? outStreamParams->sampleFormat : -1,
1214
                requestedFramesPerBuffer ));
1215

    
1216
    /* -- handle the degenerate case  -- */
1217
    if( !inStreamParams && !outStreamParams ) {
1218
       *audioUnit = NULL;
1219
       *audioDevice = kAudioDeviceUnknown;
1220
       return paNoError;
1221
    }
1222

    
1223
    /* -- get the user's api specific info, if they set any -- */
1224
    if( inStreamParams && inStreamParams->hostApiSpecificStreamInfo )
1225
    {
1226
       macInputStreamFlags=
1227
            ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
1228
                  ->flags;
1229
       inChannelMap = ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
1230
                  ->channelMap;
1231
       inChannelMapSize = ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
1232
                  ->channelMapSize;
1233
    }
1234
    if( outStreamParams && outStreamParams->hostApiSpecificStreamInfo )
1235
    {
1236
       macOutputStreamFlags=
1237
            ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
1238
                  ->flags;
1239
       outChannelMap = ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
1240
                  ->channelMap;
1241
       outChannelMapSize = ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
1242
                  ->channelMapSize; 
1243
    }
1244
    /* Override user's flags here, if desired for testing. */
1245

    
1246
    /*
1247
     * The HAL AU is a Mac OS style "component".
1248
     * the first few steps deal with that.
1249
     * Later steps work on a combination of Mac OS
1250
     * components and the slightly lower level
1251
     * HAL.
1252
     */
1253

    
1254
    /* -- describe the output type AudioUnit -- */
1255
    /*  Note: for the default AudioUnit, we could use the
1256
     *  componentSubType value kAudioUnitSubType_DefaultOutput;
1257
     *  but I don't think that's relevant here.
1258
     */
1259
    desc.componentType         = kAudioUnitType_Output;
1260
    desc.componentSubType      = kAudioUnitSubType_HALOutput;
1261
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
1262
    desc.componentFlags        = 0;
1263
    desc.componentFlagsMask    = 0;
1264

    
1265
    /* -- find the component -- */
1266
#ifndef AUDIO_COMPONENT_FIX
1267
    comp = FindNextComponent( NULL, &desc );
1268
#else
1269
    comp = AudioComponentFindNext( NULL, &desc );
1270
#endif
1271
    if( !comp )
1272
    {
1273
       DBUG( ( "AUHAL component not found." ) );
1274
       *audioUnit = NULL;
1275
       *audioDevice = kAudioDeviceUnknown;
1276
       return paUnanticipatedHostError;
1277
    }
1278
    /* -- open it -- */
1279
#ifndef AUDIO_COMPONENT_FIX
1280
                result = OpenAComponent(comp, audioUnit);
1281
#else
1282
                result = AudioComponentInstanceNew(comp, audioUnit);
1283
#endif
1284
    if( result )
1285
    {
1286
       DBUG( ( "Failed to open AUHAL component." ) );
1287
       *audioUnit = NULL;
1288
       *audioDevice = kAudioDeviceUnknown;
1289
       return ERR( result );
1290
    }
1291
    /* -- prepare a little error handling logic / hackery -- */
1292
#define ERR_WRAP(mac_err) do { result = mac_err ; line = __LINE__ ; if ( result != noErr ) goto error ; } while(0)
1293

    
1294
    /* -- if there is input, we have to explicitly enable input -- */
1295
    if( inStreamParams )
1296
    {
1297
       UInt32 enableIO = 1;
1298
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1299
                 kAudioOutputUnitProperty_EnableIO,
1300
                 kAudioUnitScope_Input,
1301
                 INPUT_ELEMENT,
1302
                 &enableIO,
1303
                 sizeof(enableIO) ) );
1304
    }
1305
    /* -- if there is no output, we must explicitly disable output -- */
1306
    if( !outStreamParams )
1307
    {
1308
       UInt32 enableIO = 0;
1309
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1310
                 kAudioOutputUnitProperty_EnableIO,
1311
                 kAudioUnitScope_Output,
1312
                 OUTPUT_ELEMENT,
1313
                 &enableIO,
1314
                 sizeof(enableIO) ) );
1315
    }
1316

    
1317
    /* -- set the devices -- */
1318
    /* make sure input and output are the same device if we are doing input and
1319
       output. */
1320
    if( inStreamParams && outStreamParams )
1321
    {
1322
       assert( outStreamParams->device == inStreamParams->device );
1323
    }
1324
    if( inStreamParams )
1325
    {
1326
       *audioDevice = auhalHostApi->devIds[inStreamParams->device] ;
1327
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1328
                    kAudioOutputUnitProperty_CurrentDevice,
1329
                    kAudioUnitScope_Global,
1330
                    INPUT_ELEMENT,
1331
                    audioDevice,
1332
                    sizeof(AudioDeviceID) ) );
1333
    }
1334
    if( outStreamParams && outStreamParams != inStreamParams )
1335
    {
1336
       *audioDevice = auhalHostApi->devIds[outStreamParams->device] ;
1337
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1338
                    kAudioOutputUnitProperty_CurrentDevice,
1339
                    kAudioUnitScope_Global,
1340
                    OUTPUT_ELEMENT,
1341
                    audioDevice,
1342
                    sizeof(AudioDeviceID) ) );
1343
    }
1344
    /* -- add listener for dropouts -- */
1345
    result = AudioDeviceAddPropertyListener( *audioDevice,
1346
                                             0,
1347
                                             outStreamParams ? false : true,
1348
                                             kAudioDeviceProcessorOverload,
1349
                                             xrunCallback,
1350
                                             addToXRunListenerList( (void *)stream ) ) ;
1351
    if( result == kAudioHardwareIllegalOperationError ) {
1352
       // -- already registered, we're good
1353
    } else {
1354
       // -- not already registered, just check for errors
1355
       ERR_WRAP( result );
1356
    }
1357
    /* -- listen for stream start and stop -- */
1358
    ERR_WRAP( AudioUnitAddPropertyListener( *audioUnit,
1359
                                            kAudioOutputUnitProperty_IsRunning,
1360
                                            startStopCallback,
1361
                                            (void *)stream ) );
1362

    
1363
    /* -- set format -- */
1364
    bzero( &desiredFormat, sizeof(desiredFormat) );
1365
    desiredFormat.mFormatID         = kAudioFormatLinearPCM ;
1366
    desiredFormat.mFormatFlags      = kAudioFormatFlagsNativeFloatPacked;
1367
    desiredFormat.mFramesPerPacket  = 1;
1368
    desiredFormat.mBitsPerChannel   = sizeof( float ) * 8;
1369

    
1370
    result = 0;
1371
    /*  set device format first, but only touch the device if the user asked */
1372
    if( inStreamParams ) {
1373
       /*The callback never calls back if we don't set the FPB */
1374
       /*This seems wierd, because I would think setting anything on the device
1375
         would be disruptive.*/
1376
       paResult = setBestFramesPerBuffer( *audioDevice, FALSE,
1377
                                          requestedFramesPerBuffer,
1378
                                          actualInputFramesPerBuffer );
1379
       if( paResult ) goto error;
1380
       if( macInputStreamFlags & paMacCoreChangeDeviceParameters ) {
1381
          bool requireExact;
1382
          requireExact=macInputStreamFlags & paMacCoreFailIfConversionRequired;
1383
          paResult = setBestSampleRateForDevice( *audioDevice, FALSE,
1384
                                                 requireExact, sampleRate );
1385
          if( paResult ) goto error;
1386
       }
1387
       if( actualInputFramesPerBuffer && actualOutputFramesPerBuffer )
1388
          *actualOutputFramesPerBuffer = *actualInputFramesPerBuffer ;
1389
    }
1390
    if( outStreamParams && !inStreamParams ) {
1391
       /*The callback never calls back if we don't set the FPB */
1392
       /*This seems wierd, because I would think setting anything on the device
1393
         would be disruptive.*/
1394
       paResult = setBestFramesPerBuffer( *audioDevice, TRUE,
1395
                                          requestedFramesPerBuffer,
1396
                                          actualOutputFramesPerBuffer );
1397
       if( paResult ) goto error;
1398
       if( macOutputStreamFlags & paMacCoreChangeDeviceParameters ) {
1399
          bool requireExact;
1400
          requireExact=macOutputStreamFlags & paMacCoreFailIfConversionRequired;
1401
          paResult = setBestSampleRateForDevice( *audioDevice, TRUE,
1402
                                                 requireExact, sampleRate );
1403
          if( paResult ) goto error;
1404
       }
1405
    }
1406

    
1407
    /* -- set the quality of the output converter -- */
1408
    if( outStreamParams ) {
1409
       UInt32 value = kAudioConverterQuality_Max;
1410
       switch( macOutputStreamFlags & 0x0700 ) {
1411
       case 0x0100: /*paMacCore_ConversionQualityMin:*/
1412
          value=kRenderQuality_Min;
1413
          break;
1414
       case 0x0200: /*paMacCore_ConversionQualityLow:*/
1415
          value=kRenderQuality_Low;
1416
          break;
1417
       case 0x0300: /*paMacCore_ConversionQualityMedium:*/
1418
          value=kRenderQuality_Medium;
1419
          break;
1420
       case 0x0400: /*paMacCore_ConversionQualityHigh:*/
1421
          value=kRenderQuality_High;
1422
          break;
1423
       }
1424
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1425
                    kAudioUnitProperty_RenderQuality,
1426
                    kAudioUnitScope_Global,
1427
                    OUTPUT_ELEMENT,
1428
                    &value,
1429
                    sizeof(value) ) );
1430
    }
1431
    /* now set the format on the Audio Units. */
1432
    if( outStreamParams )
1433
    {
1434
       desiredFormat.mSampleRate    =sampleRate;
1435
       desiredFormat.mBytesPerPacket=sizeof(float)*outStreamParams->channelCount;
1436
       desiredFormat.mBytesPerFrame =sizeof(float)*outStreamParams->channelCount;
1437
       desiredFormat.mChannelsPerFrame = outStreamParams->channelCount;
1438
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1439
                            kAudioUnitProperty_StreamFormat,
1440
                            kAudioUnitScope_Input,
1441
                            OUTPUT_ELEMENT,
1442
                            &desiredFormat,
1443
                            sizeof(AudioStreamBasicDescription) ) );
1444
    }
1445
    if( inStreamParams )
1446
    {
1447
       AudioStreamBasicDescription sourceFormat;
1448
       UInt32 size = sizeof( AudioStreamBasicDescription );
1449

    
1450
       /* keep the sample rate of the device, or we confuse AUHAL */
1451
       ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1452
                            kAudioUnitProperty_StreamFormat,
1453
                            kAudioUnitScope_Input,
1454
                            INPUT_ELEMENT,
1455
                            &sourceFormat,
1456
                            &size ) );
1457
       desiredFormat.mSampleRate = sourceFormat.mSampleRate;
1458
       desiredFormat.mBytesPerPacket=sizeof(float)*inStreamParams->channelCount;
1459
       desiredFormat.mBytesPerFrame =sizeof(float)*inStreamParams->channelCount;
1460
       desiredFormat.mChannelsPerFrame = inStreamParams->channelCount;
1461
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1462
                            kAudioUnitProperty_StreamFormat,
1463
                            kAudioUnitScope_Output,
1464
                            INPUT_ELEMENT,
1465
                            &desiredFormat,
1466
                            sizeof(AudioStreamBasicDescription) ) );
1467
    }
1468
    /* set the maximumFramesPerSlice */
1469
    /* not doing this causes real problems
1470
       (eg. the callback might not be called). The idea of setting both this
1471
       and the frames per buffer on the device is that we'll be most likely
1472
       to actually get the frame size we requested in the callback with the
1473
       minimum latency. */
1474
    if( outStreamParams ) {
1475
       UInt32 size = sizeof( *actualOutputFramesPerBuffer );
1476
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1477
                            kAudioUnitProperty_MaximumFramesPerSlice,
1478
                            kAudioUnitScope_Input,
1479
                            OUTPUT_ELEMENT,
1480
                            actualOutputFramesPerBuffer,
1481
                            sizeof(*actualOutputFramesPerBuffer) ) );
1482
       ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1483
                            kAudioUnitProperty_MaximumFramesPerSlice,
1484
                            kAudioUnitScope_Global,
1485
                            OUTPUT_ELEMENT,
1486
                            actualOutputFramesPerBuffer,
1487
                            &size ) );
1488
    }
1489
    if( inStreamParams ) {
1490
       /*UInt32 size = sizeof( *actualInputFramesPerBuffer );*/
1491
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1492
                            kAudioUnitProperty_MaximumFramesPerSlice,
1493
                            kAudioUnitScope_Output,
1494
                            INPUT_ELEMENT,
1495
                            actualInputFramesPerBuffer,
1496
                            sizeof(*actualInputFramesPerBuffer) ) );
1497
/* Don't know why this causes problems
1498
       ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1499
                            kAudioUnitProperty_MaximumFramesPerSlice,
1500
                            kAudioUnitScope_Global, //Output,
1501
                            INPUT_ELEMENT,
1502
                            actualInputFramesPerBuffer,
1503
                            &size ) );
1504
*/
1505
    }
1506

    
1507
    /* -- if we have input, we may need to setup an SR converter -- */
1508
    /* even if we got the sample rate we asked for, we need to do
1509
       the conversion in case another program changes the underlying SR. */
1510
    /* FIXME: I think we need to monitor stream and change the converter if the incoming format changes. */
1511
    if( inStreamParams ) {
1512
       AudioStreamBasicDescription desiredFormat;
1513
       AudioStreamBasicDescription sourceFormat;
1514
       UInt32 sourceSize = sizeof( sourceFormat );
1515
       bzero( &desiredFormat, sizeof(desiredFormat) );
1516
       desiredFormat.mSampleRate       = sampleRate;
1517
       desiredFormat.mFormatID         = kAudioFormatLinearPCM ;
1518
       desiredFormat.mFormatFlags      = kAudioFormatFlagsNativeFloatPacked;
1519
       desiredFormat.mFramesPerPacket  = 1;
1520
       desiredFormat.mBitsPerChannel   = sizeof( float ) * 8;
1521
       desiredFormat.mBytesPerPacket=sizeof(float)*inStreamParams->channelCount;
1522
       desiredFormat.mBytesPerFrame =sizeof(float)*inStreamParams->channelCount;
1523
       desiredFormat.mChannelsPerFrame = inStreamParams->channelCount;
1524

    
1525
       /* get the source format */
1526
       ERR_WRAP( AudioUnitGetProperty(
1527
                         *audioUnit,
1528
                         kAudioUnitProperty_StreamFormat,
1529
                         kAudioUnitScope_Output,
1530
                         INPUT_ELEMENT,
1531
                         &sourceFormat,
1532
                         &sourceSize ) );
1533

    
1534
       if( desiredFormat.mSampleRate != sourceFormat.mSampleRate )
1535
       {
1536
          UInt32 value = kAudioConverterQuality_Max;
1537
          switch( macInputStreamFlags & 0x0700 ) {
1538
          case 0x0100: /*paMacCore_ConversionQualityMin:*/
1539
             value=kAudioConverterQuality_Min;
1540
             break;
1541
          case 0x0200: /*paMacCore_ConversionQualityLow:*/
1542
             value=kAudioConverterQuality_Low;
1543
             break;
1544
          case 0x0300: /*paMacCore_ConversionQualityMedium:*/
1545
             value=kAudioConverterQuality_Medium;
1546
             break;
1547
          case 0x0400: /*paMacCore_ConversionQualityHigh:*/
1548
             value=kAudioConverterQuality_High;
1549
             break;
1550
          }
1551
          VDBUG(( "Creating sample rate converter for input"
1552
                  " to convert from %g to %g\n",
1553
                  (float)sourceFormat.mSampleRate,
1554
                  (float)desiredFormat.mSampleRate ) );
1555
          /* create our converter */
1556
          ERR_WRAP( AudioConverterNew( 
1557
                             &sourceFormat,
1558
                             &desiredFormat,
1559
                             srConverter ) );
1560
          /* Set quality */
1561
          ERR_WRAP( AudioConverterSetProperty(
1562
                             *srConverter,
1563
                             kAudioConverterSampleRateConverterQuality,
1564
                             sizeof( value ),
1565
                             &value ) );
1566
       }
1567
    }
1568
    /* -- set IOProc (callback) -- */
1569
    callbackKey = outStreamParams ? kAudioUnitProperty_SetRenderCallback
1570
                                  : kAudioOutputUnitProperty_SetInputCallback ;
1571
    rcbs.inputProc = AudioIOProc;
1572
    rcbs.inputProcRefCon = refCon;
1573
    ERR_WRAP( AudioUnitSetProperty(
1574
                               *audioUnit,
1575
                               callbackKey,
1576
                               kAudioUnitScope_Output,
1577
                               outStreamParams ? OUTPUT_ELEMENT : INPUT_ELEMENT,
1578
                               &rcbs,
1579
                               sizeof(rcbs)) );
1580

    
1581
    if( inStreamParams && outStreamParams && *srConverter )
1582
           ERR_WRAP( AudioUnitSetProperty(
1583
                               *audioUnit,
1584
                               kAudioOutputUnitProperty_SetInputCallback,
1585
                               kAudioUnitScope_Output,
1586
                               INPUT_ELEMENT,
1587
                               &rcbs,
1588
                               sizeof(rcbs)) );
1589

    
1590
    /* channel mapping. */
1591
    if(inChannelMap)
1592
    {
1593
        UInt32 mapSize = inChannelMapSize *sizeof(SInt32);
1594

    
1595
        //for each channel of desired input, map the channel from
1596
        //the device's output channel.
1597
        ERR_WRAP( AudioUnitSetProperty(*audioUnit,
1598
                                kAudioOutputUnitProperty_ChannelMap,
1599
                                kAudioUnitScope_Output,
1600
                                INPUT_ELEMENT,
1601
                                inChannelMap,
1602
                                mapSize));
1603
    }
1604
    if(outChannelMap)
1605
    {
1606
        UInt32 mapSize = outChannelMapSize *sizeof(SInt32);
1607

    
1608
        //for each channel of desired output, map the channel from
1609
        //the device's output channel.
1610
        ERR_WRAP(AudioUnitSetProperty(*audioUnit,
1611
                                kAudioOutputUnitProperty_ChannelMap,
1612
                                kAudioUnitScope_Output,
1613
                                OUTPUT_ELEMENT,
1614
                                outChannelMap,
1615
                                mapSize));
1616
    }
1617
    /* initialize the audio unit */
1618
    ERR_WRAP( AudioUnitInitialize(*audioUnit) );
1619

    
1620
    if( inStreamParams && outStreamParams )
1621
    {
1622
        VDBUG( ("Opened device %ld for input and output.\n", *audioDevice ) );
1623
    }
1624
    else if( inStreamParams )
1625
    {
1626
        VDBUG( ("Opened device %ld for input.\n", *audioDevice ) );
1627
    }
1628
    else if( outStreamParams )
1629
    {
1630
        VDBUG( ("Opened device %ld for output.\n", *audioDevice ) );
1631
    }
1632
    return paNoError;
1633
#undef ERR_WRAP
1634

    
1635
    error:
1636
#ifndef AUDIO_COMPONENT_FIX
1637
       CloseComponent( *audioUnit );
1638
#else
1639
       AudioComponentInstanceDispose( *audioUnit );
1640
#endif
1641
       *audioUnit = NULL;
1642
       if( result )
1643
          return PaMacCore_SetError( result, line, 1 );
1644
       return paResult;
1645
}
1646

    
1647
/* =================================================================================================== */
1648

    
1649
static UInt32 CalculateOptimalBufferSize( PaMacAUHAL *auhalHostApi,
1650
                                  const PaStreamParameters *inputParameters,
1651
                                  const PaStreamParameters *outputParameters,
1652
                                  UInt32 fixedInputLatency,
1653
                                  UInt32 fixedOutputLatency,
1654
                                  double sampleRate,
1655
                                  UInt32 requestedFramesPerBuffer )
1656
{
1657
    UInt32 resultBufferSizeFrames = 0;  
1658
    // Use maximum of suggested input and output latencies.
1659
    if( inputParameters )
1660
    {
1661
        UInt32 suggestedLatencyFrames = inputParameters->suggestedLatency * sampleRate;
1662
        // Calculate a buffer size assuming we are double buffered.
1663
        SInt32 variableLatencyFrames = suggestedLatencyFrames - fixedInputLatency;
1664
        // Prevent negative latency.
1665
        variableLatencyFrames = MAX( variableLatencyFrames, 0 );       
1666
        resultBufferSizeFrames = MAX( resultBufferSizeFrames, (UInt32) variableLatencyFrames );
1667
    }
1668
    if( outputParameters )
1669
    {        
1670
        UInt32 suggestedLatencyFrames = outputParameters->suggestedLatency * sampleRate;
1671
        SInt32 variableLatencyFrames = suggestedLatencyFrames - fixedOutputLatency;
1672
        variableLatencyFrames = MAX( variableLatencyFrames, 0 );
1673
        resultBufferSizeFrames = MAX( resultBufferSizeFrames, (UInt32) variableLatencyFrames );
1674
    }
1675
    
1676
    // can't have zero frames. code to round up to next user buffer requires non-zero
1677
    resultBufferSizeFrames = MAX( resultBufferSizeFrames, 1 );
1678
    
1679
    if( requestedFramesPerBuffer != paFramesPerBufferUnspecified )
1680
    {
1681
        // make host buffer the next highest integer multiple of user frames per buffer
1682
        UInt32 n = (resultBufferSizeFrames + requestedFramesPerBuffer - 1) / requestedFramesPerBuffer;
1683
        resultBufferSizeFrames = n * requestedFramesPerBuffer;
1684

    
1685
        
1686
        // FIXME: really we should be searching for a multiple of requestedFramesPerBuffer
1687
        // that is >= suggested latency and also fits within device buffer min/max
1688
        
1689
    }else{
1690
            VDBUG( ("Block Size unspecified. Based on Latency, the user wants a Block Size near: %ld.\n",
1691
            resultBufferSizeFrames ) );
1692
    }
1693
    
1694
    // Clip to the capabilities of the device.
1695
    if( inputParameters )
1696
    {
1697
        ClipToDeviceBufferSize( auhalHostApi->devIds[inputParameters->device],
1698
                               true, // In the old code isInput was false!
1699
                               resultBufferSizeFrames, &resultBufferSizeFrames );
1700
    }
1701
    if( outputParameters )
1702
    {
1703
        ClipToDeviceBufferSize( auhalHostApi->devIds[outputParameters->device],
1704
                               false, resultBufferSizeFrames, &resultBufferSizeFrames );
1705
    }
1706
    VDBUG(("After querying hardware, setting block size to %ld.\n", resultBufferSizeFrames));
1707

    
1708
    return resultBufferSizeFrames;
1709
}
1710

    
1711
/* =================================================================================================== */
1712
/* see pa_hostapi.h for a list of validity guarantees made about OpenStream parameters */
1713
static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi,
1714
                           PaStream** s,
1715
                           const PaStreamParameters *inputParameters,
1716
                           const PaStreamParameters *outputParameters,
1717
                           double sampleRate,
1718
                           unsigned long requestedFramesPerBuffer,
1719
                           PaStreamFlags streamFlags,
1720
                           PaStreamCallback *streamCallback,
1721
                           void *userData )
1722
{
1723
    PaError result = paNoError;
1724
    PaMacAUHAL *auhalHostApi = (PaMacAUHAL*)hostApi;
1725
    PaMacCoreStream *stream = 0;
1726
    int inputChannelCount, outputChannelCount;
1727
    PaSampleFormat inputSampleFormat, outputSampleFormat;
1728
    PaSampleFormat hostInputSampleFormat, hostOutputSampleFormat;
1729
    UInt32 fixedInputLatency = 0;
1730
    UInt32 fixedOutputLatency = 0;
1731
    // Accumulate contributions to latency in these variables.
1732
    UInt32 inputLatencyFrames = 0;
1733
    UInt32 outputLatencyFrames = 0;
1734
    UInt32 suggestedLatencyFramesPerBuffer = requestedFramesPerBuffer;
1735
    
1736
    VVDBUG(("OpenStream(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld SR=%g, FPB=%ld\n",
1737
                inputParameters  ? inputParameters->channelCount  : -1,
1738
                inputParameters  ? inputParameters->sampleFormat  : -1,
1739
                outputParameters ? outputParameters->channelCount : -1,
1740
                outputParameters ? outputParameters->sampleFormat : -1,
1741
                (float) sampleRate,
1742
                requestedFramesPerBuffer ));
1743
    VDBUG( ("Opening Stream.\n") );
1744
        
1745
    /* These first few bits of code are from paSkeleton with few modifications. */
1746
    if( inputParameters )
1747
    {
1748
        inputChannelCount = inputParameters->channelCount;
1749
        inputSampleFormat = inputParameters->sampleFormat;
1750

    
1751
                /* @todo Blocking read/write on Mac is not yet supported. */
1752
                if( !streamCallback && inputSampleFormat & paNonInterleaved )
1753
                {
1754
                        return paSampleFormatNotSupported;
1755
                }
1756
                
1757
        /* unless alternate device specification is supported, reject the use of
1758
            paUseHostApiSpecificDeviceSpecification */
1759

    
1760
        if( inputParameters->device == paUseHostApiSpecificDeviceSpecification )
1761
            return paInvalidDevice;
1762

    
1763
        /* check that input device can support inputChannelCount */
1764
        if( inputChannelCount > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels )
1765
            return paInvalidChannelCount;
1766

    
1767
        /* Host supports interleaved float32 */
1768
        hostInputSampleFormat = paFloat32;
1769
    }
1770
    else
1771
    {
1772
        inputChannelCount = 0;
1773
        inputSampleFormat = hostInputSampleFormat = paFloat32; /* Surpress 'uninitialised var' warnings. */
1774
    }
1775

    
1776
    if( outputParameters )
1777
    {
1778
        outputChannelCount = outputParameters->channelCount;
1779
        outputSampleFormat = outputParameters->sampleFormat;
1780
        
1781
                /* @todo Blocking read/write on Mac is not yet supported. */
1782
                if( !streamCallback && outputSampleFormat & paNonInterleaved )
1783
                {
1784
                        return paSampleFormatNotSupported;
1785
                }
1786
                
1787
        /* unless alternate device specification is supported, reject the use of
1788
            paUseHostApiSpecificDeviceSpecification */
1789

    
1790
        if( outputParameters->device == paUseHostApiSpecificDeviceSpecification )
1791
            return paInvalidDevice;
1792

    
1793
        /* check that output device can support inputChannelCount */
1794
        if( outputChannelCount > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels )
1795
            return paInvalidChannelCount;
1796

    
1797
        /* Host supports interleaved float32 */
1798
        hostOutputSampleFormat = paFloat32;
1799
    }
1800
    else
1801
    {
1802
        outputChannelCount = 0;
1803
        outputSampleFormat = hostOutputSampleFormat = paFloat32; /* Surpress 'uninitialized var' warnings. */
1804
    }
1805

    
1806
    /* validate platform specific flags */
1807
    if( (streamFlags & paPlatformSpecificFlags) != 0 )
1808
        return paInvalidFlag; /* unexpected platform specific flag */
1809

    
1810
    stream = (PaMacCoreStream*)PaUtil_AllocateMemory( sizeof(PaMacCoreStream) );
1811
    if( !stream )
1812
    {
1813
        result = paInsufficientMemory;
1814
        goto error;
1815
    }
1816

    
1817
    /* If we fail after this point, we my be left in a bad state, with
1818
       some data structures setup and others not. So, first thing we
1819
       do is initialize everything so that if we fail, we know what hasn't
1820
       been touched.
1821
     */
1822
    bzero( stream, sizeof( PaMacCoreStream ) );
1823
    
1824
    /*
1825
    stream->blio.inputRingBuffer.buffer = NULL;
1826
    stream->blio.outputRingBuffer.buffer = NULL;
1827
    stream->blio.inputSampleFormat = inputParameters?inputParameters->sampleFormat:0;
1828
    stream->blio.inputSampleSize = computeSampleSizeFromFormat(stream->blio.inputSampleFormat);
1829
    stream->blio.outputSampleFormat=outputParameters?outputParameters->sampleFormat:0;
1830
    stream->blio.outputSampleSize = computeSampleSizeFromFormat(stream->blio.outputSampleFormat);
1831
    */
1832

    
1833
    /* assert( streamCallback ) ; */ /* only callback mode is implemented */
1834
    if( streamCallback )
1835
    {
1836
        PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation,
1837
                                        &auhalHostApi->callbackStreamInterface,
1838
                                        streamCallback, userData );
1839
    }
1840
    else
1841
    {
1842
        PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation,
1843
                                        &auhalHostApi->blockingStreamInterface,
1844
                                        BlioCallback, &stream->blio );
1845
    }
1846

    
1847
    PaUtil_InitializeCpuLoadMeasurer( &stream->cpuLoadMeasurer, sampleRate );
1848

    
1849
    
1850
    if( inputParameters )
1851
    {
1852
        CalculateFixedDeviceLatency( auhalHostApi->devIds[inputParameters->device], true, &fixedInputLatency );
1853
        inputLatencyFrames += fixedInputLatency;
1854
    }
1855
    if( outputParameters )
1856
    {        
1857
        CalculateFixedDeviceLatency( auhalHostApi->devIds[outputParameters->device], false, &fixedOutputLatency );
1858
        outputLatencyFrames += fixedOutputLatency;
1859

    
1860
    }
1861
    
1862
    suggestedLatencyFramesPerBuffer = CalculateOptimalBufferSize( auhalHostApi, inputParameters, outputParameters,
1863
                                                                 fixedInputLatency, fixedOutputLatency,
1864
                                                                 sampleRate, requestedFramesPerBuffer );
1865
    if( requestedFramesPerBuffer == paFramesPerBufferUnspecified )
1866
        {
1867
        requestedFramesPerBuffer = suggestedLatencyFramesPerBuffer;
1868
    }
1869

    
1870
    /* -- Now we actually open and setup streams. -- */
1871
    if( inputParameters && outputParameters && outputParameters->device == inputParameters->device )
1872
    { /* full duplex. One device. */
1873
       UInt32 inputFramesPerBuffer  = (UInt32) stream->inputFramesPerBuffer;
1874
       UInt32 outputFramesPerBuffer = (UInt32) stream->outputFramesPerBuffer;
1875
       result = OpenAndSetupOneAudioUnit( stream,
1876
                                          inputParameters,
1877
                                          outputParameters,
1878
                                          suggestedLatencyFramesPerBuffer,
1879
                                          &inputFramesPerBuffer,
1880
                                          &outputFramesPerBuffer,
1881
                                          auhalHostApi,
1882
                                          &(stream->inputUnit),
1883
                                          &(stream->inputSRConverter),
1884
                                          &(stream->inputDevice),
1885
                                          sampleRate,
1886
                                          stream );
1887
       stream->inputFramesPerBuffer = inputFramesPerBuffer;
1888
       stream->outputFramesPerBuffer = outputFramesPerBuffer;
1889
       stream->outputUnit = stream->inputUnit;
1890
       stream->outputDevice = stream->inputDevice;
1891
       if( result != paNoError )
1892
           goto error;
1893
    }
1894
    else
1895
    { /* full duplex, different devices OR simplex */
1896
       UInt32 outputFramesPerBuffer = (UInt32) stream->outputFramesPerBuffer;
1897
       UInt32 inputFramesPerBuffer  = (UInt32) stream->inputFramesPerBuffer;
1898
       result = OpenAndSetupOneAudioUnit( stream,
1899
                                          NULL,
1900
                                          outputParameters,
1901
                                          suggestedLatencyFramesPerBuffer,
1902
                                          NULL,
1903
                                          &outputFramesPerBuffer,
1904
                                          auhalHostApi,
1905
                                          &(stream->outputUnit),
1906
                                          NULL,
1907
                                          &(stream->outputDevice),
1908
                                          sampleRate,
1909
                                          stream );
1910
       if( result != paNoError )
1911
           goto error;
1912
       result = OpenAndSetupOneAudioUnit( stream,
1913
                                          inputParameters,
1914
                                          NULL,
1915
                                          suggestedLatencyFramesPerBuffer,
1916
                                          &inputFramesPerBuffer,
1917
                                          NULL,
1918
                                          auhalHostApi,
1919
                                          &(stream->inputUnit),
1920
                                          &(stream->inputSRConverter),
1921
                                          &(stream->inputDevice),
1922
                                          sampleRate,
1923
                                          stream );
1924
       if( result != paNoError )
1925
           goto error;
1926
       stream->inputFramesPerBuffer = inputFramesPerBuffer;
1927
       stream->outputFramesPerBuffer = outputFramesPerBuffer;
1928
    }
1929
    
1930
    inputLatencyFrames += stream->inputFramesPerBuffer;
1931
    outputLatencyFrames += stream->outputFramesPerBuffer;
1932
    
1933
    if( stream->inputUnit ) {
1934
       const size_t szfl = sizeof(float);
1935
       /* setup the AudioBufferList used for input */
1936
       bzero( &stream->inputAudioBufferList, sizeof( AudioBufferList ) );
1937
       stream->inputAudioBufferList.mNumberBuffers = 1;
1938
       stream->inputAudioBufferList.mBuffers[0].mNumberChannels
1939
                 = inputChannelCount;
1940
       stream->inputAudioBufferList.mBuffers[0].mDataByteSize
1941
                 = stream->inputFramesPerBuffer*inputChannelCount*szfl;
1942
       stream->inputAudioBufferList.mBuffers[0].mData
1943
                 = (float *) calloc(
1944
                               stream->inputFramesPerBuffer*inputChannelCount,
1945
                               szfl );
1946
       if( !stream->inputAudioBufferList.mBuffers[0].mData )
1947
       {
1948
          result = paInsufficientMemory;
1949
          goto error;
1950
       }
1951
        
1952
       /*
1953
        * If input and output devs are different or we are doing SR conversion,
1954
        * we also need a ring buffer to store input data while waiting for
1955
        * output data.
1956
        */
1957
       if( (stream->outputUnit && (stream->inputUnit != stream->outputUnit))
1958
           || stream->inputSRConverter )
1959
       {
1960
          /* May want the ringSize or initial position in
1961
             ring buffer to depend somewhat on sample rate change */
1962

    
1963
          void *data;
1964
          long ringSize;
1965

    
1966
          ringSize = computeRingBufferSize( inputParameters,
1967
                                            outputParameters,
1968
                                            stream->inputFramesPerBuffer,
1969
                                            stream->outputFramesPerBuffer,
1970
                                            sampleRate );
1971
          /*ringSize <<= 4; *//*16x bigger, for testing */
1972

    
1973

    
1974
          /*now, we need to allocate memory for the ring buffer*/
1975
          data = calloc( ringSize, szfl*inputParameters->channelCount );
1976
          if( !data )
1977
          {
1978
             result = paInsufficientMemory;
1979
             goto error;
1980
          }
1981

    
1982
          /* now we can initialize the ring buffer */
1983
          result = PaUtil_InitializeRingBuffer( &stream->inputRingBuffer, szfl*inputParameters->channelCount, ringSize, data );
1984
          if( result != 0 )
1985
          {
1986
              /* The only reason this should fail is if ringSize is not a power of 2, which we do not anticipate happening. */
1987
              result = paUnanticipatedHostError;
1988
              free(data);
1989
              goto error;
1990
          }
1991

    
1992
          /* advance the read point a little, so we are reading from the
1993
             middle of the buffer */
1994
          if( stream->outputUnit )
1995
             PaUtil_AdvanceRingBufferWriteIndex( &stream->inputRingBuffer, ringSize / RING_BUFFER_ADVANCE_DENOMINATOR );
1996
           
1997
           // Just adds to input latency between input device and PA full duplex callback.
1998
           inputLatencyFrames += ringSize;
1999
       }
2000
    }
2001

    
2002
    /* -- initialize Blio Buffer Processors -- */
2003
    if( !streamCallback )
2004
    {
2005
       long ringSize;
2006

    
2007
       ringSize = computeRingBufferSize( inputParameters,
2008
                                         outputParameters,
2009
                                         stream->inputFramesPerBuffer,
2010
                                         stream->outputFramesPerBuffer,
2011
                                         sampleRate );
2012
       result = initializeBlioRingBuffers( &stream->blio,
2013
              inputParameters ? inputParameters->sampleFormat : 0,
2014
              outputParameters ? outputParameters->sampleFormat : 0,
2015
              ringSize,
2016
              inputParameters ? inputChannelCount : 0,
2017
              outputParameters ? outputChannelCount : 0 ) ;
2018
       if( result != paNoError )
2019
          goto error;
2020
        
2021
        inputLatencyFrames += ringSize;
2022
        outputLatencyFrames += ringSize;
2023
        
2024
    }
2025

    
2026
    /* -- initialize Buffer Processor -- */
2027
    {
2028
       unsigned long maxHostFrames = stream->inputFramesPerBuffer;
2029
       if( stream->outputFramesPerBuffer > maxHostFrames )
2030
          maxHostFrames = stream->outputFramesPerBuffer;
2031
       result = PaUtil_InitializeBufferProcessor( &stream->bufferProcessor,
2032
                 inputChannelCount, inputSampleFormat,
2033
                 hostInputSampleFormat,
2034
                 outputChannelCount, outputSampleFormat,
2035
                 hostOutputSampleFormat,
2036
                 sampleRate,
2037
                 streamFlags,
2038
                 requestedFramesPerBuffer,
2039
                 /* If sample rate conversion takes place, the buffer size
2040
                    will not be known. */
2041
                 maxHostFrames,
2042
                 stream->inputSRConverter
2043
                              ? paUtilUnknownHostBufferSize
2044
                              : paUtilBoundedHostBufferSize,
2045
                 streamCallback ? streamCallback : BlioCallback,
2046
                 streamCallback ? userData : &stream->blio );
2047
       if( result != paNoError )
2048
           goto error;
2049
    }
2050
    stream->bufferProcessorIsInitialized = TRUE;
2051

    
2052
    // Calculate actual latency from the sum of individual latencies.
2053
    if( inputParameters ) 
2054
    {
2055
        inputLatencyFrames += PaUtil_GetBufferProcessorInputLatencyFrames(&stream->bufferProcessor);
2056
        stream->streamRepresentation.streamInfo.inputLatency = inputLatencyFrames / sampleRate;
2057
    }
2058
    else
2059
    {
2060
        stream->streamRepresentation.streamInfo.inputLatency = 0.0;
2061
    }
2062
    
2063
    if( outputParameters ) 
2064
    {
2065
        outputLatencyFrames += PaUtil_GetBufferProcessorOutputLatencyFrames(&stream->bufferProcessor);
2066
        stream->streamRepresentation.streamInfo.outputLatency = outputLatencyFrames / sampleRate;
2067
    }
2068
    else
2069
    {
2070
        stream->streamRepresentation.streamInfo.outputLatency = 0.0;
2071
    }
2072
    
2073
    stream->streamRepresentation.streamInfo.sampleRate = sampleRate;
2074

    
2075
    stream->sampleRate = sampleRate;
2076
    
2077
    stream->userInChan  = inputChannelCount;
2078
    stream->userOutChan = outputChannelCount;
2079

    
2080
    // Setup property listeners for timestamp and latency calculations.
2081
        pthread_mutex_init( &stream->timingInformationMutex, NULL );
2082
        stream->timingInformationMutexIsInitialized = 1;
2083
    InitializeDeviceProperties( &stream->inputProperties );     // zeros the struct. doesn't actually init it to useful values
2084
    InitializeDeviceProperties( &stream->outputProperties );    // zeros the struct. doesn't actually init it to useful values
2085
        if( stream->outputUnit )
2086
    {
2087
        Boolean isInput = FALSE;
2088
        
2089
        // Start with the current values for the device properties.
2090
        // Init with nominal sample rate. Use actual sample rate where available
2091
        
2092
        result = ERR( UpdateSampleRateFromDeviceProperty( 
2093
                stream, stream->outputDevice, isInput, kAudioDevicePropertyNominalSampleRate )  );
2094
        if( result )
2095
            goto error; /* fail if we can't even get a nominal device sample rate */
2096
        
2097
        UpdateSampleRateFromDeviceProperty( stream, stream->outputDevice, isInput, kAudioDevicePropertyActualSampleRate );
2098
        
2099
        SetupDevicePropertyListeners( stream, stream->outputDevice, isInput );
2100
    }
2101
        if( stream->inputUnit )
2102
    {
2103
        Boolean isInput = TRUE;
2104
       
2105
        // as above
2106
        result = ERR( UpdateSampleRateFromDeviceProperty( 
2107
                stream, stream->inputDevice, isInput, kAudioDevicePropertyNominalSampleRate )  );
2108
        if( result )
2109
            goto error;
2110
        
2111
        UpdateSampleRateFromDeviceProperty( stream, stream->inputDevice, isInput, kAudioDevicePropertyActualSampleRate );
2112
        
2113
        SetupDevicePropertyListeners( stream, stream->inputDevice, isInput );
2114
        }
2115
    UpdateTimeStampOffsets( stream );
2116
    // Setup timestamp copies to be used by audio callback.
2117
    stream->timestampOffsetCombined_ioProcCopy = stream->timestampOffsetCombined;
2118
    stream->timestampOffsetInputDevice_ioProcCopy = stream->timestampOffsetInputDevice;
2119
    stream->timestampOffsetOutputDevice_ioProcCopy = stream->timestampOffsetOutputDevice;
2120

    
2121
    stream->state = STOPPED;
2122
    stream->xrunFlags = 0;
2123

    
2124
    *s = (PaStream*)stream;
2125

    
2126
    return result;
2127

    
2128
error:
2129
    CloseStream( stream );
2130
    return result;
2131
}
2132

    
2133

    
2134
#define HOST_TIME_TO_PA_TIME( x ) ( AudioConvertHostTimeToNanos( (x) ) * 1.0E-09) /* convert to nanoseconds and then to seconds */
2135

    
2136
PaTime GetStreamTime( PaStream *s )
2137
{
2138
        return HOST_TIME_TO_PA_TIME( AudioGetCurrentHostTime() ); 
2139
}
2140

    
2141
#define RING_BUFFER_EMPTY (1000)
2142

    
2143
static OSStatus ringBufferIOProc( AudioConverterRef inAudioConverter, 
2144
                             UInt32*ioDataSize, 
2145
                             void** outData, 
2146
                             void*inUserData )
2147
{
2148
   void *dummyData;
2149
   ring_buffer_size_t dummySize;
2150
   PaUtilRingBuffer *rb = (PaUtilRingBuffer *) inUserData;
2151

    
2152
   VVDBUG(("ringBufferIOProc()\n"));
2153

    
2154
   if( PaUtil_GetRingBufferReadAvailable( rb ) == 0 ) {
2155
      *outData = NULL;
2156
      *ioDataSize = 0;
2157
      return RING_BUFFER_EMPTY;
2158
   }
2159
   assert(sizeof(UInt32) == sizeof(ring_buffer_size_t));
2160
   assert( ( (*ioDataSize) / rb->elementSizeBytes ) * rb->elementSizeBytes == (*ioDataSize) ) ;
2161
   (*ioDataSize) /= rb->elementSizeBytes ;
2162
   PaUtil_GetRingBufferReadRegions( rb, *ioDataSize,
2163
                                    outData, (ring_buffer_size_t *)ioDataSize, 
2164
                                    &dummyData, &dummySize );
2165
   assert( *ioDataSize );
2166
   PaUtil_AdvanceRingBufferReadIndex( rb, *ioDataSize );
2167
   (*ioDataSize) *= rb->elementSizeBytes ;
2168

    
2169
   return noErr;
2170
}
2171

    
2172
/*
2173
 * Called by the AudioUnit API to process audio from the sound card.
2174
 * This is where the magic happens.
2175
 */
2176
/* FEEDBACK: there is a lot of redundant code here because of how all the cases differ. This makes it hard to maintain, so if there are suggestinos for cleaning it up, I'm all ears. */
2177
static OSStatus AudioIOProc( void *inRefCon,
2178
                               AudioUnitRenderActionFlags *ioActionFlags,
2179
                               const AudioTimeStamp *inTimeStamp,
2180
                               UInt32 inBusNumber,
2181
                               UInt32 inNumberFrames,
2182
                               AudioBufferList *ioData )
2183
{
2184
   unsigned long framesProcessed     = 0;
2185
   PaStreamCallbackTimeInfo timeInfo = {0,0,0};
2186
   PaMacCoreStream *stream           = (PaMacCoreStream*)inRefCon;
2187
   const bool isRender               = inBusNumber == OUTPUT_ELEMENT;
2188
   int callbackResult                = paContinue ;
2189
   double hostTimeStampInPaTime      = HOST_TIME_TO_PA_TIME(inTimeStamp->mHostTime);
2190
    
2191
   VVDBUG(("AudioIOProc()\n"));
2192

    
2193
   PaUtil_BeginCpuLoadMeasurement( &stream->cpuLoadMeasurer );
2194
    
2195
   /* -----------------------------------------------------------------*\
2196
      This output may be useful for debugging,
2197
      But printing durring the callback is a bad enough idea that
2198
      this is not enabled by enableing the usual debugging calls.
2199
   \* -----------------------------------------------------------------*/
2200
   /*
2201
   static int renderCount = 0;
2202
   static int inputCount = 0;
2203
   printf( "-------------------  starting reder/input\n" );
2204
   if( isRender )
2205
      printf("Render callback (%d):\t", ++renderCount);
2206
   else
2207
      printf("Input callback  (%d):\t", ++inputCount);
2208
   printf( "Call totals: %d (input), %d (render)\n", inputCount, renderCount );
2209

2210
   printf( "--- inBusNumber: %lu\n", inBusNumber );
2211
   printf( "--- inNumberFrames: %lu\n", inNumberFrames );
2212
   printf( "--- %x ioData\n", (unsigned) ioData );
2213
   if( ioData )
2214
   {
2215
      int i=0;
2216
      printf( "--- ioData.mNumBuffers %lu: \n", ioData->mNumberBuffers );
2217
      for( i=0; i<ioData->mNumberBuffers; ++i )
2218
         printf( "--- ioData buffer %d size: %lu.\n", i, ioData->mBuffers[i].mDataByteSize );
2219
   }
2220
      ----------------------------------------------------------------- */
2221

    
2222
        /* compute PaStreamCallbackTimeInfo */
2223
        
2224
        if( pthread_mutex_trylock( &stream->timingInformationMutex ) == 0 ){
2225
                /* snapshot the ioproc copy of timing information */
2226
                stream->timestampOffsetCombined_ioProcCopy = stream->timestampOffsetCombined;
2227
                stream->timestampOffsetInputDevice_ioProcCopy = stream->timestampOffsetInputDevice;
2228
                stream->timestampOffsetOutputDevice_ioProcCopy = stream->timestampOffsetOutputDevice;
2229
                pthread_mutex_unlock( &stream->timingInformationMutex );
2230
        }
2231
        
2232
        /* For timeInfo.currentTime we could calculate current time backwards from the HAL audio 
2233
         output time to give a more accurate impression of the current timeslice but it doesn't 
2234
         seem worth it at the moment since other PA host APIs don't do any better.
2235
         */
2236
        timeInfo.currentTime = HOST_TIME_TO_PA_TIME( AudioGetCurrentHostTime() );
2237
        
2238
        /*
2239
         For an input HAL AU, inTimeStamp is the time the samples are received from the hardware,
2240
         for an output HAL AU inTimeStamp is the time the samples are sent to the hardware. 
2241
         PA expresses timestamps in terms of when the samples enter the ADC or leave the DAC
2242
         so we add or subtract kAudioDevicePropertyLatency below.
2243
         */
2244
        
2245
        /* FIXME: not sure what to do below if the host timestamps aren't valid (kAudioTimeStampHostTimeValid isn't set)
2246
         Could ask on CA mailing list if it is possible for it not to be set. If so, could probably grab a now timestamp
2247
         at the top and compute from there (modulo scheduling jitter) or ask on mailing list for other options. */
2248
        
2249
        if( isRender )
2250
        {
2251
                if( stream->inputUnit ) /* full duplex */
2252
                {
2253
                        if( stream->inputUnit == stream->outputUnit ) /* full duplex AUHAL IOProc */
2254
                        {
2255
                // Ross and Phil agreed that the following calculation is correct based on an email from Jeff Moore:
2256
                // http://osdir.com/ml/coreaudio-api/2009-07/msg00140.html
2257
                // Basically the difference between the Apple output timestamp and the PA timestamp is kAudioDevicePropertyLatency.
2258
                                timeInfo.inputBufferAdcTime = hostTimeStampInPaTime - 
2259
                    (stream->timestampOffsetCombined_ioProcCopy + stream->timestampOffsetInputDevice_ioProcCopy);
2260
                                 timeInfo.outputBufferDacTime = hostTimeStampInPaTime + stream->timestampOffsetOutputDevice_ioProcCopy;
2261
                        }
2262
                        else /* full duplex with ring-buffer from a separate input AUHAL ioproc */
2263
                        {
2264
                                /* FIXME: take the ring buffer latency into account */
2265
                                timeInfo.inputBufferAdcTime = hostTimeStampInPaTime - 
2266
                    (stream->timestampOffsetCombined_ioProcCopy + stream->timestampOffsetInputDevice_ioProcCopy);
2267
                                timeInfo.outputBufferDacTime = hostTimeStampInPaTime + stream->timestampOffsetOutputDevice_ioProcCopy;
2268
                        }
2269
                }
2270
                else /* output only */
2271
                {
2272
                        timeInfo.inputBufferAdcTime = 0;
2273
                        timeInfo.outputBufferDacTime = hostTimeStampInPaTime + stream->timestampOffsetOutputDevice_ioProcCopy;
2274
                }
2275
        }
2276
        else /* input only */
2277
        {
2278
                timeInfo.inputBufferAdcTime = hostTimeStampInPaTime - stream->timestampOffsetInputDevice_ioProcCopy; 
2279
                timeInfo.outputBufferDacTime = 0;
2280
        }
2281
        
2282
   //printf( "---%g, %g, %g\n", timeInfo.inputBufferAdcTime, timeInfo.currentTime, timeInfo.outputBufferDacTime );
2283

    
2284
   if( isRender && stream->inputUnit == stream->outputUnit
2285
                && !stream->inputSRConverter )
2286
   {
2287
      /* --------- Full Duplex, One Device, no SR Conversion -------
2288
       *
2289
       * This is the lowest latency case, and also the simplest.
2290
       * Input data and output data are available at the same time.
2291
       * we do not use the input SR converter or the input ring buffer.
2292
       *
2293
       */
2294
      OSStatus err = 0;
2295
       unsigned long frames;
2296
       long bytesPerFrame = sizeof( float ) * ioData->mBuffers[0].mNumberChannels;
2297

    
2298
      /* -- start processing -- */
2299
      PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2300
                                    &timeInfo,
2301
                                    stream->xrunFlags );
2302
      stream->xrunFlags = 0; //FIXME: this flag also gets set outside by a callback, which calls the xrunCallback function. It should be in the same thread as the main audio callback, but the apple docs just use the word "usually" so it may be possible to loose an xrun notification, if that callback happens here.
2303

    
2304
      /* -- compute frames. do some checks -- */
2305
      assert( ioData->mNumberBuffers == 1 );
2306
      assert( ioData->mBuffers[0].mNumberChannels == stream->userOutChan );
2307

    
2308
      frames = ioData->mBuffers[0].mDataByteSize / bytesPerFrame;
2309
      /* -- copy and process input data -- */
2310
      err= AudioUnitRender(stream->inputUnit,
2311
                    ioActionFlags,
2312
                    inTimeStamp,
2313
                    INPUT_ELEMENT,
2314
                    inNumberFrames,
2315
                    &stream->inputAudioBufferList );
2316
      if(err != noErr)
2317
      {
2318
        goto stop_stream;
2319
      }
2320

    
2321
      PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2322
      PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2323
                          0,
2324
                          stream->inputAudioBufferList.mBuffers[0].mData,
2325
                          stream->inputAudioBufferList.mBuffers[0].mNumberChannels);
2326
      /* -- Copy and process output data -- */
2327
      PaUtil_SetOutputFrameCount( &(stream->bufferProcessor), frames );
2328
      PaUtil_SetInterleavedOutputChannels( &(stream->bufferProcessor),
2329
                                        0,
2330
                                        ioData->mBuffers[0].mData,
2331
                                        ioData->mBuffers[0].mNumberChannels);
2332
      /* -- complete processing -- */
2333
      framesProcessed =
2334
                 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2335
                                             &callbackResult );
2336
   }
2337
   else if( isRender )
2338
   {
2339
      /* -------- Output Side of Full Duplex (Separate Devices or SR Conversion)
2340
       *       -- OR Simplex Output
2341
       *
2342
       * This case handles output data as in the full duplex case,
2343
       * and, if there is input data, reads it off the ring buffer 
2344
       * and into the PA buffer processor. If sample rate conversion
2345
       * is required on input, that is done here as well.
2346
       */
2347
       unsigned long frames;
2348
       long bytesPerFrame = sizeof( float ) * ioData->mBuffers[0].mNumberChannels;
2349

    
2350
      /* Sometimes, when stopping a duplex stream we get erroneous
2351
         xrun flags, so if this is our last run, clear the flags. */
2352
      int xrunFlags = stream->xrunFlags;
2353
/*
2354
      if( xrunFlags & paInputUnderflow )
2355
         printf( "input underflow.\n" );
2356
      if( xrunFlags & paInputOverflow )
2357
         printf( "input overflow.\n" );
2358
*/
2359
      if( stream->state == STOPPING || stream->state == CALLBACK_STOPPED )
2360
         xrunFlags = 0;
2361

    
2362
      /* -- start processing -- */
2363
      PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2364
                                    &timeInfo,
2365
                                    xrunFlags );
2366
      stream->xrunFlags = 0; /* FEEDBACK: we only send flags to Buf Proc once */
2367

    
2368
      /* -- Copy and process output data -- */
2369
      assert( ioData->mNumberBuffers == 1 );
2370
      frames = ioData->mBuffers[0].mDataByteSize / bytesPerFrame;
2371
      assert( ioData->mBuffers[0].mNumberChannels == stream->userOutChan );
2372
      PaUtil_SetOutputFrameCount( &(stream->bufferProcessor), frames );
2373
      PaUtil_SetInterleavedOutputChannels( &(stream->bufferProcessor),
2374
                                     0,
2375
                                     ioData->mBuffers[0].mData,
2376
                                     ioData->mBuffers[0].mNumberChannels);
2377

    
2378
      /* -- copy and process input data, and complete processing -- */
2379
      if( stream->inputUnit ) {
2380
         const int flsz = sizeof( float );
2381
         /* Here, we read the data out of the ring buffer, through the
2382
            audio converter. */
2383
         int inChan = stream->inputAudioBufferList.mBuffers[0].mNumberChannels;
2384
         long bytesPerFrame = flsz * inChan;
2385
          
2386
         if( stream->inputSRConverter )
2387
         {
2388
               OSStatus err;
2389
               UInt32 size;
2390
               float data[ inChan * frames ];
2391
               size = sizeof( data );
2392
               err = AudioConverterFillBuffer( 
2393
                             stream->inputSRConverter,
2394
                             ringBufferIOProc,
2395
                             &stream->inputRingBuffer,
2396
                             &size,
2397
                             (void *)&data );
2398
               if( err == RING_BUFFER_EMPTY )
2399
               { /* the ring buffer callback underflowed */
2400
                  err = 0;
2401
                  bzero( ((char *)data) + size, sizeof(data)-size );
2402
                  /* The ring buffer can underflow normally when the stream is stopping.
2403
                   * So only report an error if the stream is active. */
2404
                  if( stream->state == ACTIVE )
2405
                  {
2406
                      stream->xrunFlags |= paInputUnderflow;
2407
                  }
2408
               }
2409
               ERR( err );
2410
               if(err != noErr)
2411
               {
2412
                 goto stop_stream;
2413
               }
2414

    
2415
               PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2416
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2417
                                   0,
2418
                                   data,
2419
                                   inChan );
2420
               framesProcessed =
2421
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2422
                                                &callbackResult );
2423
         }
2424
         else
2425
         {
2426
            /* Without the AudioConverter is actually a bit more complex
2427
               because we have to do a little buffer processing that the
2428
               AudioConverter would otherwise handle for us. */
2429
            void *data1, *data2;
2430
            ring_buffer_size_t size1, size2;
2431
            ring_buffer_size_t framesReadable = PaUtil_GetRingBufferReadRegions( &stream->inputRingBuffer,
2432
                                             frames,
2433
                                             &data1, &size1,
2434
                                             &data2, &size2 );
2435
            if( size1 == frames ) {
2436
               /* simplest case: all in first buffer */
2437
               PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2438
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2439
                                   0,
2440
                                   data1,
2441
                                   inChan );
2442
               framesProcessed =
2443
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2444
                                                &callbackResult );
2445
               PaUtil_AdvanceRingBufferReadIndex(&stream->inputRingBuffer, size1 );
2446
            } else if( framesReadable < frames ) {
2447
                
2448
                long sizeBytes1 = size1 * bytesPerFrame;
2449
                long sizeBytes2 = size2 * bytesPerFrame;
2450
               /*we underflowed. take what data we can, zero the rest.*/
2451
               unsigned char data[ frames * bytesPerFrame ];
2452
               if( size1 > 0 )
2453
               {   
2454
                   memcpy( data, data1, sizeBytes1 );
2455
               }
2456
               if( size2 > 0 )
2457
               {
2458
                   memcpy( data+sizeBytes1, data2, sizeBytes2 );
2459
               }
2460
               bzero( data+sizeBytes1+sizeBytes2, (frames*bytesPerFrame) - sizeBytes1 - sizeBytes2 );
2461

    
2462
               PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2463
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2464
                                   0,
2465
                                   data,
2466
                                   inChan );
2467
               framesProcessed =
2468
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2469
                                                &callbackResult );
2470
               PaUtil_AdvanceRingBufferReadIndex( &stream->inputRingBuffer,
2471
                                                  framesReadable );
2472
               /* flag underflow */
2473
               stream->xrunFlags |= paInputUnderflow;
2474
            } else {
2475
               /*we got all the data, but split between buffers*/
2476
               PaUtil_SetInputFrameCount( &(stream->bufferProcessor), size1 );
2477
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2478
                                   0,
2479
                                   data1,
2480
                                   inChan );
2481
               PaUtil_Set2ndInputFrameCount( &(stream->bufferProcessor), size2 );
2482
               PaUtil_Set2ndInterleavedInputChannels( &(stream->bufferProcessor),
2483
                                   0,
2484
                                   data2,
2485
                                   inChan );
2486
               framesProcessed =
2487
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2488
                                                &callbackResult );
2489
               PaUtil_AdvanceRingBufferReadIndex(&stream->inputRingBuffer, framesReadable );
2490
            }
2491
         }
2492
      } else {
2493
         framesProcessed =
2494
                 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2495
                                             &callbackResult );
2496
      }
2497

    
2498
   }
2499
   else
2500
   {
2501
      /* ------------------ Input
2502
       *
2503
       * First, we read off the audio data and put it in the ring buffer.
2504
       * if this is an input-only stream, we need to process it more,
2505
       * otherwise, we let the output case deal with it.
2506
       */
2507
      OSStatus err = 0;
2508
      int chan = stream->inputAudioBufferList.mBuffers[0].mNumberChannels ;
2509
      /* FIXME: looping here may not actually be necessary, but it was something I tried in testing. */
2510
      do {
2511
         err= AudioUnitRender(stream->inputUnit,
2512
                 ioActionFlags,
2513
                 inTimeStamp,
2514
                 INPUT_ELEMENT,
2515
                 inNumberFrames,
2516
                 &stream->inputAudioBufferList );
2517
         if( err == -10874 )
2518
            inNumberFrames /= 2;
2519
      } while( err == -10874 && inNumberFrames > 1 );
2520
      ERR( err );
2521
      if(err != noErr)
2522
      {
2523
          goto stop_stream;
2524
      }
2525

    
2526
      if( stream->inputSRConverter || stream->outputUnit )
2527
      {
2528
         /* If this is duplex or we use a converter, put the data
2529
            into the ring buffer. */
2530
          ring_buffer_size_t framesWritten = PaUtil_WriteRingBuffer( &stream->inputRingBuffer,
2531
                                            stream->inputAudioBufferList.mBuffers[0].mData,
2532
                                            inNumberFrames );
2533
         if( framesWritten != inNumberFrames )
2534
         {
2535
             stream->xrunFlags |= paInputOverflow ;
2536
         }
2537
      }
2538
      else
2539
      {
2540
         /* for simplex input w/o SR conversion,
2541
            just pop the data into the buffer processor.*/
2542
         PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2543
                              &timeInfo,
2544
                              stream->xrunFlags );
2545
         stream->xrunFlags = 0;
2546

    
2547
         PaUtil_SetInputFrameCount( &(stream->bufferProcessor), inNumberFrames);
2548
         PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2549
                             0,
2550
                             stream->inputAudioBufferList.mBuffers[0].mData,
2551
                             chan );
2552
         framesProcessed =
2553
              PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2554
                                          &callbackResult );
2555
      }
2556
      if( !stream->outputUnit && stream->inputSRConverter )
2557
      {
2558
         /* ------------------ Simplex Input w/ SR Conversion
2559
          *
2560
          * if this is a simplex input stream, we need to read off the buffer,
2561
          * do our sample rate conversion and pass the results to the buffer
2562
          * processor.
2563
          * The logic here is complicated somewhat by the fact that we don't
2564
          * know how much data is available, so we loop on reasonably sized
2565
          * chunks, and let the BufferProcessor deal with the rest.
2566
          *
2567
          */
2568
         /* This might be too big or small depending on SR conversion. */
2569
         float data[ chan * inNumberFrames ];
2570
         OSStatus err;
2571
         do
2572
         { /* Run the buffer processor until we are out of data. */
2573
            UInt32 size;
2574
            long f;
2575

    
2576
            size = sizeof( data );
2577
            err = AudioConverterFillBuffer( 
2578
                          stream->inputSRConverter,
2579
                          ringBufferIOProc,
2580
                          &stream->inputRingBuffer,
2581
                          &size,
2582
                          (void *)data );
2583
            if( err != RING_BUFFER_EMPTY )
2584
               ERR( err );
2585
            if( err != noErr && err != RING_BUFFER_EMPTY )
2586
            {
2587
                goto stop_stream;
2588
            }
2589

    
2590

    
2591
            f = size / ( chan * sizeof(float) );
2592
            PaUtil_SetInputFrameCount( &(stream->bufferProcessor), f );
2593
            if( f )
2594
            {
2595
               PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2596
                                             &timeInfo,
2597
                                             stream->xrunFlags );
2598
               stream->xrunFlags = 0;
2599

    
2600
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2601
                                0,
2602
                                data,
2603
                                chan );
2604
               framesProcessed =
2605
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2606
                                                &callbackResult );
2607
            }
2608
         } while( callbackResult == paContinue && !err );
2609
      }
2610
   }
2611

    
2612
    // Should we return successfully or fall through to stopping the stream?
2613
    if( callbackResult == paContinue )
2614
    {
2615
        PaUtil_EndCpuLoadMeasurement( &stream->cpuLoadMeasurer, framesProcessed );
2616
        return noErr;
2617
    }
2618

    
2619
stop_stream:
2620
    stream->state = CALLBACK_STOPPED ;
2621
    if( stream->outputUnit )
2622
        AudioOutputUnitStop(stream->outputUnit);
2623
    if( stream->inputUnit )
2624
        AudioOutputUnitStop(stream->inputUnit);
2625

    
2626
    PaUtil_EndCpuLoadMeasurement( &stream->cpuLoadMeasurer, framesProcessed );
2627
    return noErr;
2628
}
2629

    
2630
/*
2631
    When CloseStream() is called, the multi-api layer ensures that
2632
    the stream has already been stopped or aborted.
2633
*/
2634
static PaError CloseStream( PaStream* s )
2635
{
2636
    /* This may be called from a failed OpenStream.
2637
       Therefore, each piece of info is treated seperately. */
2638
    PaError result = paNoError;
2639
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2640

    
2641
    VVDBUG(("CloseStream()\n"));
2642
    VDBUG( ( "Closing stream.\n" ) );
2643

    
2644
    if( stream ) {
2645
                
2646
                if( stream->outputUnit )
2647
        {
2648
            Boolean isInput = FALSE;
2649
            CleanupDevicePropertyListeners( stream, stream->outputDevice, isInput );
2650
                }
2651
                
2652
                if( stream->inputUnit )
2653
        {
2654
            Boolean isInput = TRUE;
2655
            CleanupDevicePropertyListeners( stream, stream->inputDevice, isInput );
2656
                }
2657
                
2658
       if( stream->outputUnit ) {
2659
          int count = removeFromXRunListenerList( stream );
2660
          if( count == 0 )
2661
             AudioDeviceRemovePropertyListener( stream->outputDevice,
2662
                                                0,
2663
                                                false,
2664
                                                kAudioDeviceProcessorOverload,
2665
                                                xrunCallback );
2666
       }
2667
       if( stream->inputUnit && stream->outputUnit != stream->inputUnit ) {
2668
          int count = removeFromXRunListenerList( stream );
2669
          if( count == 0 )
2670
             AudioDeviceRemovePropertyListener( stream->inputDevice,
2671
                                                0,
2672
                                                true,
2673
                                                kAudioDeviceProcessorOverload,
2674
                                                xrunCallback );
2675
       }
2676
       if( stream->outputUnit && stream->outputUnit != stream->inputUnit ) {
2677
          AudioUnitUninitialize( stream->outputUnit );
2678
#ifndef AUDIO_COMPONENT_FIX
2679
          CloseComponent( stream->outputUnit );
2680
#else
2681
          AudioComponentInstanceDispose( stream->outputUnit );
2682
#endif
2683
       }
2684
       stream->outputUnit = NULL;
2685
       if( stream->inputUnit )
2686
       {
2687
          AudioUnitUninitialize( stream->inputUnit );
2688
#ifndef AUDIO_COMPONENT_FIX
2689
          CloseComponent( stream->inputUnit );
2690
#else
2691
          AudioComponentInstanceDispose( stream->inputUnit );
2692
#endif
2693
          stream->inputUnit = NULL;
2694
       }
2695
       if( stream->inputRingBuffer.buffer )
2696
          free( (void *) stream->inputRingBuffer.buffer );
2697
       stream->inputRingBuffer.buffer = NULL;
2698
       /*TODO: is there more that needs to be done on error
2699
               from AudioConverterDispose?*/
2700
       if( stream->inputSRConverter )
2701
          ERR( AudioConverterDispose( stream->inputSRConverter ) );
2702
       stream->inputSRConverter = NULL;
2703
       if( stream->inputAudioBufferList.mBuffers[0].mData )
2704
          free( stream->inputAudioBufferList.mBuffers[0].mData );
2705
       stream->inputAudioBufferList.mBuffers[0].mData = NULL;
2706

    
2707
       result = destroyBlioRingBuffers( &stream->blio );
2708
       if( result )
2709
          return result;
2710
       if( stream->bufferProcessorIsInitialized )
2711
          PaUtil_TerminateBufferProcessor( &stream->bufferProcessor );
2712
                
2713
       if( stream->timingInformationMutexIsInitialized )
2714
          pthread_mutex_destroy( &stream->timingInformationMutex );
2715

    
2716
       PaUtil_TerminateStreamRepresentation( &stream->streamRepresentation );
2717
       PaUtil_FreeMemory( stream );
2718
    }
2719

    
2720
    return result;
2721
}
2722

    
2723
static PaError StartStream( PaStream *s )
2724
{
2725
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2726
    OSStatus result = noErr;
2727
    VVDBUG(("StartStream()\n"));
2728
    VDBUG( ( "Starting stream.\n" ) );
2729

    
2730
#define ERR_WRAP(mac_err) do { result = mac_err ; if ( result != noErr ) return ERR(result) ; } while(0)
2731

    
2732
    /*FIXME: maybe want to do this on close/abort for faster start? */
2733
    PaUtil_ResetBufferProcessor( &stream->bufferProcessor );
2734
    if(  stream->inputSRConverter )
2735
       ERR_WRAP( AudioConverterReset( stream->inputSRConverter ) );
2736

    
2737
    /* -- start -- */
2738
    stream->state = ACTIVE;
2739
    if( stream->inputUnit ) {
2740
       ERR_WRAP( AudioOutputUnitStart(stream->inputUnit) );
2741
    }
2742
    if( stream->outputUnit && stream->outputUnit != stream->inputUnit ) {
2743
       ERR_WRAP( AudioOutputUnitStart(stream->outputUnit) );
2744
    }
2745
        
2746
    return paNoError;
2747
#undef ERR_WRAP
2748
}
2749

    
2750
// it's not clear from appl's docs that this really waits
2751
// until all data is flushed.
2752
static ComponentResult BlockWhileAudioUnitIsRunning( AudioUnit audioUnit, AudioUnitElement element )
2753
{
2754
    Boolean isRunning = 1;
2755
    while( isRunning ) {
2756
       UInt32 s = sizeof( isRunning );
2757
       ComponentResult err = AudioUnitGetProperty( audioUnit, kAudioOutputUnitProperty_IsRunning, kAudioUnitScope_Global, element,  &isRunning, &s );
2758
       if( err )
2759
          return err;
2760
       Pa_Sleep( 100 );
2761
    }
2762
    return noErr;
2763
}
2764

    
2765
static PaError FinishStoppingStream( PaMacCoreStream *stream )
2766
{
2767
    OSStatus result = noErr;
2768
    PaError paErr;
2769

    
2770
#define ERR_WRAP(mac_err) do { result = mac_err ; if ( result != noErr ) return ERR(result) ; } while(0)
2771
    /* -- stop and reset -- */
2772
    if( stream->inputUnit == stream->outputUnit && stream->inputUnit )
2773
    {
2774
       ERR_WRAP( AudioOutputUnitStop(stream->inputUnit) );
2775
       ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,0) );
2776
       ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,1) );
2777
       ERR_WRAP( AudioUnitReset(stream->inputUnit, kAudioUnitScope_Global, 1) );
2778
       ERR_WRAP( AudioUnitReset(stream->inputUnit, kAudioUnitScope_Global, 0) );
2779
    }
2780
    else
2781
    {
2782
       if( stream->inputUnit )
2783
       {
2784
          ERR_WRAP(AudioOutputUnitStop(stream->inputUnit) );
2785
          ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,1) );
2786
          ERR_WRAP(AudioUnitReset(stream->inputUnit,kAudioUnitScope_Global,1));
2787
       }
2788
       if( stream->outputUnit )
2789
       {
2790
          ERR_WRAP(AudioOutputUnitStop(stream->outputUnit));
2791
          ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->outputUnit,0) );
2792
          ERR_WRAP(AudioUnitReset(stream->outputUnit,kAudioUnitScope_Global,0));
2793
       }
2794
    }
2795
    if( stream->inputRingBuffer.buffer ) {
2796
       PaUtil_FlushRingBuffer( &stream->inputRingBuffer );
2797
       bzero( (void *)stream->inputRingBuffer.buffer,
2798
              stream->inputRingBuffer.bufferSize );
2799
       /* advance the write point a little, so we are reading from the
2800
          middle of the buffer. We'll need extra at the end because
2801
          testing has shown that this helps. */
2802
       if( stream->outputUnit )
2803
          PaUtil_AdvanceRingBufferWriteIndex( &stream->inputRingBuffer,
2804
                                              stream->inputRingBuffer.bufferSize
2805
                                              / RING_BUFFER_ADVANCE_DENOMINATOR );
2806
    }
2807

    
2808
    stream->xrunFlags = 0;
2809
    stream->state = STOPPED;
2810

    
2811
    paErr = resetBlioRingBuffers( &stream->blio );
2812
    if( paErr )
2813
       return paErr;
2814

    
2815
    VDBUG( ( "Stream Stopped.\n" ) );
2816
    return paNoError;
2817
#undef ERR_WRAP
2818
}
2819

    
2820
/* Block until buffer is empty then stop the stream. */
2821
static PaError StopStream( PaStream *s )
2822
{
2823
    PaError paErr;
2824
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2825
    VVDBUG(("StopStream()\n"));
2826

    
2827
    /* Tell WriteStream to stop filling the buffer. */
2828
    stream->state = STOPPING;
2829

    
2830
    if( stream->userOutChan > 0 ) /* Does this stream do output? */
2831
    {
2832
        size_t maxHostFrames = MAX( stream->inputFramesPerBuffer, stream->outputFramesPerBuffer );
2833
        VDBUG( ("Waiting for write buffer to be drained.\n") );
2834
        paErr = waitUntilBlioWriteBufferIsEmpty( &stream->blio, stream->sampleRate,
2835
                                                maxHostFrames );
2836
        VDBUG( ( "waitUntilBlioWriteBufferIsEmpty returned %d\n", paErr ) );
2837
    }
2838
    return FinishStoppingStream( stream );
2839
}
2840

    
2841
/* Immediately stop the stream. */
2842
static PaError AbortStream( PaStream *s )
2843
{
2844
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2845
    VDBUG( ( "AbortStream()\n" ) );
2846
    stream->state = STOPPING;
2847
    return FinishStoppingStream( stream );
2848
}
2849

    
2850

    
2851
static PaError IsStreamStopped( PaStream *s )
2852
{
2853
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2854
    VVDBUG(("IsStreamStopped()\n"));
2855

    
2856
    return stream->state == STOPPED ? 1 : 0;
2857
}
2858

    
2859

    
2860
static PaError IsStreamActive( PaStream *s )
2861
{
2862
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2863
    VVDBUG(("IsStreamActive()\n"));
2864
    return ( stream->state == ACTIVE || stream->state == STOPPING );
2865
}
2866

    
2867

    
2868
static double GetStreamCpuLoad( PaStream* s )
2869
{
2870
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2871
    VVDBUG(("GetStreamCpuLoad()\n"));
2872

    
2873
    return PaUtil_GetCpuLoad( &stream->cpuLoadMeasurer );
2874
}