To check out this repository please hg clone the following URL, or open the URL using EasyMercurial or your preferred Mercurial client.

The primary repository for this project is hosted at https://github.com/sonic-visualiser/sv-dependency-builds .
This repository is a read-only copy which is updated automatically every hour.

Statistics Download as Zip
| Branch: | Tag: | Revision:

root / src / portaudio_20161030_catalina_patch / src / hostapi / coreaudio / pa_mac_core.c @ 166:cbd6d7e562c7

History | View | Annotate | Download (115 KB)

1
/*
2
 * Implementation of the PortAudio API for Apple AUHAL
3
 *
4
 * PortAudio Portable Real-Time Audio Library
5
 * Latest Version at: http://www.portaudio.com
6
 *
7
 * Written by Bjorn Roche of XO Audio LLC, from PA skeleton code.
8
 * Portions copied from code by Dominic Mazzoni (who wrote a HAL implementation)
9
 *
10
 * Dominic's code was based on code by Phil Burk, Darren Gibbs,
11
 * Gord Peters, Stephane Letz, and Greg Pfiel.
12
 *
13
 * The following people also deserve acknowledgements:
14
 *
15
 * Olivier Tristan for feedback and testing
16
 * Glenn Zelniker and Z-Systems engineering for sponsoring the Blocking I/O
17
 * interface.
18
 * 
19
 *
20
 * Based on the Open Source API proposed by Ross Bencina
21
 * Copyright (c) 1999-2002 Ross Bencina, Phil Burk
22
 *
23
 * Permission is hereby granted, free of charge, to any person obtaining
24
 * a copy of this software and associated documentation files
25
 * (the "Software"), to deal in the Software without restriction,
26
 * including without limitation the rights to use, copy, modify, merge,
27
 * publish, distribute, sublicense, and/or sell copies of the Software,
28
 * and to permit persons to whom the Software is furnished to do so,
29
 * subject to the following conditions:
30
 *
31
 * The above copyright notice and this permission notice shall be
32
 * included in all copies or substantial portions of the Software.
33
 *
34
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
35
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
36
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
37
 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
38
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
39
 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
40
 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41
 */
42

    
43
/*
44
 * The text above constitutes the entire PortAudio license; however, 
45
 * the PortAudio community also makes the following non-binding requests:
46
 *
47
 * Any person wishing to distribute modifications to the Software is
48
 * requested to send the modifications to the original developer so that
49
 * they can be incorporated into the canonical version. It is also 
50
 * requested that these non-binding requests be included along with the 
51
 * license above.
52
 */
53

    
54
/**
55
 @file pa_mac_core
56
 @ingroup hostapi_src
57
 @author Bjorn Roche
58
 @brief AUHAL implementation of PortAudio
59
*/
60

    
61
/* FIXME: not all error conditions call PaUtil_SetLastHostErrorInfo()
62
 * PaMacCore_SetError() will do this.
63
 */
64

    
65
#include "pa_mac_core_internal.h"
66

    
67
#include <string.h> /* strlen(), memcmp() etc. */
68
#include <libkern/OSAtomic.h>
69

    
70
#include "pa_mac_core.h"
71
#include "pa_mac_core_utilities.h"
72
#include "pa_mac_core_blocking.h"
73

    
74

    
75
#ifdef __cplusplus
76
extern "C"
77
{
78
#endif /* __cplusplus */
79
        
80
/* This is a reasonable size for a small buffer based on experience. */
81
#define PA_MAC_SMALL_BUFFER_SIZE    (64)
82
        
83
/* prototypes for functions declared in this file */
84
PaError PaMacCore_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex index );
85

    
86
/*
87
 * Function declared in pa_mac_core.h. Sets up a PaMacCoreStreamInfoStruct
88
 * with the requested flags and initializes channel map.
89
 */
90
void PaMacCore_SetupStreamInfo(  PaMacCoreStreamInfo *data, const unsigned long flags )
91
{
92
   bzero( data, sizeof( PaMacCoreStreamInfo ) );
93
   data->size = sizeof( PaMacCoreStreamInfo );
94
   data->hostApiType = paCoreAudio;
95
   data->version = 0x01;
96
   data->flags = flags;
97
   data->channelMap = NULL;
98
   data->channelMapSize = 0;
99
}
100

    
101
/*
102
 * Function declared in pa_mac_core.h. Adds channel mapping to a PaMacCoreStreamInfoStruct
103
 */
104
void PaMacCore_SetupChannelMap( PaMacCoreStreamInfo *data, const SInt32 * const channelMap, const unsigned long channelMapSize )
105
{
106
   data->channelMap = channelMap;
107
   data->channelMapSize = channelMapSize;
108
}
109
static char *channelName = NULL;
110
static int channelNameSize = 0;
111
static bool ensureChannelNameSize( int size )
112
{
113
   if( size >= channelNameSize ) {
114
      free( channelName );
115
      channelName = (char *) malloc( ( channelNameSize = size ) + 1 );
116
      if( !channelName ) {
117
         channelNameSize = 0;
118
         return false;
119
      }
120
   }
121
   return true;
122
}
123
/*
124
 * Function declared in pa_mac_core.h. retrives channel names.
125
 */
126
const char *PaMacCore_GetChannelName( int device, int channelIndex, bool input )
127
{
128
        struct PaUtilHostApiRepresentation *hostApi;
129
        PaError err;
130
        OSStatus error;
131
        err = PaUtil_GetHostApiRepresentation( &hostApi, paCoreAudio );
132
        assert(err == paNoError);
133
        if( err != paNoError )
134
                return NULL;
135
        PaMacAUHAL *macCoreHostApi = (PaMacAUHAL*)hostApi;
136
        AudioDeviceID hostApiDevice = macCoreHostApi->devIds[device];
137
        CFStringRef nameRef;
138
        
139
        /* First try with CFString */
140
        UInt32 size = sizeof(nameRef);
141
        error = AudioDeviceGetProperty( hostApiDevice,
142
                                                                   channelIndex + 1,
143
                                                                   input,
144
                                                                   kAudioDevicePropertyChannelNameCFString,
145
                                                                   &size,
146
                                                                   &nameRef );
147
        if( error )
148
        {
149
                /* try the C String */
150
                size = 0;
151
                error = AudioDeviceGetPropertyInfo( hostApiDevice,
152
                                                                                   channelIndex + 1,
153
                                                                                   input,
154
                                                                                   kAudioDevicePropertyChannelName,
155
                                                                                   &size,
156
                                                                                   NULL);
157
                if( !error )
158
                {
159
                        if( !ensureChannelNameSize( size ) )
160
                                return NULL;
161
                        
162
                        error = AudioDeviceGetProperty( hostApiDevice,
163
                                                                                   channelIndex + 1,
164
                                                                                   input,
165
                                                                                   kAudioDevicePropertyChannelName,
166
                                                                                   &size,
167
                                                                                   channelName );
168
                        
169
                        
170
                        if( !error )
171
                                return channelName;
172
                }
173
                
174
                /* as a last-ditch effort, we use the device name and append the channel number. */
175
                nameRef = CFStringCreateWithFormat( NULL, NULL, CFSTR( "%s: %d"), hostApi->deviceInfos[device]->name, channelIndex + 1 );
176
                
177
                
178
                size = CFStringGetMaximumSizeForEncoding(CFStringGetLength(nameRef), kCFStringEncodingUTF8);;
179
                if( !ensureChannelNameSize( size ) )
180
                {
181
                        CFRelease( nameRef );
182
                        return NULL;
183
                }
184
                CFStringGetCString( nameRef, channelName, size+1, kCFStringEncodingUTF8 );
185
                CFRelease( nameRef );
186
        }
187
        else
188
        {
189
                size = CFStringGetMaximumSizeForEncoding(CFStringGetLength(nameRef), kCFStringEncodingUTF8);;
190
                if( !ensureChannelNameSize( size ) )
191
                {
192
                        CFRelease( nameRef );
193
                        return NULL;
194
                }
195
                CFStringGetCString( nameRef, channelName, size+1, kCFStringEncodingUTF8 );
196
                CFRelease( nameRef );
197
        }
198
        
199
        return channelName;
200
}
201

    
202
    
203
PaError PaMacCore_GetBufferSizeRange( PaDeviceIndex device,
204
                                      long *minBufferSizeFrames, long *maxBufferSizeFrames )
205
{
206
    PaError result;
207
    PaUtilHostApiRepresentation *hostApi;
208
    
209
    result = PaUtil_GetHostApiRepresentation( &hostApi, paCoreAudio );
210
    
211
    if( result == paNoError )
212
    {
213
        PaDeviceIndex hostApiDeviceIndex;
214
        result = PaUtil_DeviceIndexToHostApiDeviceIndex( &hostApiDeviceIndex, device, hostApi );
215
        if( result == paNoError )
216
        {
217
            PaMacAUHAL *macCoreHostApi = (PaMacAUHAL*)hostApi;
218
            AudioDeviceID macCoreDeviceId = macCoreHostApi->devIds[hostApiDeviceIndex];
219
            AudioValueRange audioRange;
220
            UInt32 propSize = sizeof( audioRange );
221
            
222
            // return the size range for the output scope unless we only have inputs
223
            Boolean isInput = 0;
224
            if( macCoreHostApi->inheritedHostApiRep.deviceInfos[hostApiDeviceIndex]->maxOutputChannels == 0 )
225
                isInput = 1;
226
           
227
            result = WARNING(AudioDeviceGetProperty( macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSizeRange, &propSize, &audioRange ) );
228

    
229
            *minBufferSizeFrames = audioRange.mMinimum;
230
            *maxBufferSizeFrames = audioRange.mMaximum;
231
        }
232
    }
233
    
234
    return result;
235
}
236

    
237

    
238
AudioDeviceID PaMacCore_GetStreamInputDevice( PaStream* s )
239
{
240
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
241
    VVDBUG(("PaMacCore_GetStreamInputHandle()\n"));
242

    
243
    return ( stream->inputDevice );
244
}
245

    
246
AudioDeviceID PaMacCore_GetStreamOutputDevice( PaStream* s )
247
{
248
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
249
    VVDBUG(("PaMacCore_GetStreamOutputHandle()\n"));
250

    
251
    return ( stream->outputDevice );
252
}
253

    
254
#ifdef __cplusplus
255
}
256
#endif /* __cplusplus */
257

    
258
#define RING_BUFFER_ADVANCE_DENOMINATOR (4)
259

    
260
static void Terminate( struct PaUtilHostApiRepresentation *hostApi );
261
static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi,
262
                                  const PaStreamParameters *inputParameters,
263
                                  const PaStreamParameters *outputParameters,
264
                                  double sampleRate );
265
static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi,
266
                           PaStream** s,
267
                           const PaStreamParameters *inputParameters,
268
                           const PaStreamParameters *outputParameters,
269
                           double sampleRate,
270
                           unsigned long framesPerBuffer,
271
                           PaStreamFlags streamFlags,
272
                           PaStreamCallback *streamCallback,
273
                           void *userData );
274
static PaError CloseStream( PaStream* stream );
275
static PaError StartStream( PaStream *stream );
276
static PaError StopStream( PaStream *stream );
277
static PaError AbortStream( PaStream *stream );
278
static PaError IsStreamStopped( PaStream *s );
279
static PaError IsStreamActive( PaStream *stream );
280
static PaTime GetStreamTime( PaStream *stream );
281
static OSStatus AudioIOProc( void *inRefCon,
282
                               AudioUnitRenderActionFlags *ioActionFlags,
283
                               const AudioTimeStamp *inTimeStamp,
284
                               UInt32 inBusNumber,
285
                               UInt32 inNumberFrames,
286
                               AudioBufferList *ioData );
287
static double GetStreamCpuLoad( PaStream* stream );
288

    
289
static PaError GetChannelInfo( PaMacAUHAL *auhalHostApi,
290
                               PaDeviceInfo *deviceInfo,
291
                               AudioDeviceID macCoreDeviceId,
292
                               int isInput);
293

    
294
static PaError OpenAndSetupOneAudioUnit(
295
                                   const PaMacCoreStream *stream,
296
                                   const PaStreamParameters *inStreamParams,
297
                                   const PaStreamParameters *outStreamParams,
298
                                   const UInt32 requestedFramesPerBuffer,
299
                                   UInt32 *actualInputFramesPerBuffer,
300
                                   UInt32 *actualOutputFramesPerBuffer,
301
                                   const PaMacAUHAL *auhalHostApi,
302
#ifndef AUDIO_COMPONENT_FIX
303
                                   AudioUnit *audioUnit,
304
#else
305
                                   AudioComponentInstance *audioUnit,
306
#endif
307
                                   AudioConverterRef *srConverter,
308
                                   AudioDeviceID *audioDevice,
309
                                   const double sampleRate,
310
                                   void *refCon );
311

    
312
/* for setting errors. */
313
#define PA_AUHAL_SET_LAST_HOST_ERROR( errorCode, errorText ) \
314
    PaUtil_SetLastHostErrorInfo( paCoreAudio, errorCode, errorText )
315

    
316
/*
317
 * Callback called when starting or stopping a stream.
318
 */
319
static void startStopCallback(
320
   void *               inRefCon,
321
#ifndef AUDIO_COMPONENT_FIX
322
   AudioUnit            ci,
323
#else
324
   AudioComponentInstance            ci,
325
#endif
326
   AudioUnitPropertyID  inID,
327
   AudioUnitScope       inScope,
328
   AudioUnitElement     inElement )
329
{
330
   PaMacCoreStream *stream = (PaMacCoreStream *) inRefCon;
331
   UInt32 isRunning;
332
   UInt32 size = sizeof( isRunning );
333
   OSStatus err;
334
   err = AudioUnitGetProperty( ci, kAudioOutputUnitProperty_IsRunning, inScope, inElement, &isRunning, &size );
335
   assert( !err );
336
   if( err )
337
      isRunning = false; //it's very unclear what to do in case of error here. There's no real way to notify the user, and crashing seems unreasonable.
338
   if( isRunning )
339
      return; //We are only interested in when we are stopping
340
   // -- if we are using 2 I/O units, we only need one notification!
341
   if( stream->inputUnit && stream->outputUnit && stream->inputUnit != stream->outputUnit && ci == stream->inputUnit )
342
      return;
343
   PaStreamFinishedCallback *sfc = stream->streamRepresentation.streamFinishedCallback;
344
   if( stream->state == STOPPING )
345
      stream->state = STOPPED ;
346
   if( sfc )
347
      sfc( stream->streamRepresentation.userData );
348
}
349

    
350

    
351
/*currently, this is only used in initialization, but it might be modified
352
  to be used when the list of devices changes.*/
353
static PaError gatherDeviceInfo(PaMacAUHAL *auhalHostApi)
354
{
355
    UInt32 size;
356
    UInt32 propsize;
357
    VVDBUG(("gatherDeviceInfo()\n"));
358
    /* -- free any previous allocations -- */
359
    if( auhalHostApi->devIds )
360
        PaUtil_GroupFreeMemory(auhalHostApi->allocations, auhalHostApi->devIds);
361
    auhalHostApi->devIds = NULL;
362

    
363
    /* -- figure out how many devices there are -- */
364
    AudioHardwareGetPropertyInfo( kAudioHardwarePropertyDevices,
365
                                  &propsize,
366
                                  NULL );
367
    auhalHostApi->devCount = propsize / sizeof( AudioDeviceID );
368

    
369
    VDBUG( ( "Found %ld device(s).\n", auhalHostApi->devCount ) );
370

    
371
    /* -- copy the device IDs -- */
372
    auhalHostApi->devIds = (AudioDeviceID *)PaUtil_GroupAllocateMemory(
373
                             auhalHostApi->allocations,
374
                             propsize );
375
    if( !auhalHostApi->devIds )
376
        return paInsufficientMemory;
377
    AudioHardwareGetProperty( kAudioHardwarePropertyDevices,
378
                                  &propsize,
379
                                  auhalHostApi->devIds );
380
#ifdef MAC_CORE_VERBOSE_DEBUG
381
    {
382
       int i;
383
       for( i=0; i<auhalHostApi->devCount; ++i )
384
          printf( "Device %d\t: %ld\n", i, auhalHostApi->devIds[i] );
385
    }
386
#endif
387

    
388
    size = sizeof(AudioDeviceID);
389
    auhalHostApi->defaultIn  = kAudioDeviceUnknown;
390
    auhalHostApi->defaultOut = kAudioDeviceUnknown;
391

    
392
    /* determine the default device. */
393
    /* I am not sure how these calls to AudioHardwareGetProperty()
394
       could fail, but in case they do, we use the first available
395
       device as the default. */
396
    if( 0 != AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice,
397
                     &size,
398
                     &auhalHostApi->defaultIn) ) {
399
       int i;
400
       auhalHostApi->defaultIn  = kAudioDeviceUnknown;
401
       VDBUG(("Failed to get default input device from OS."));
402
       VDBUG((" I will substitute the first available input Device."));
403
       for( i=0; i<auhalHostApi->devCount; ++i ) {
404
          PaDeviceInfo devInfo;
405
          if( 0 != GetChannelInfo( auhalHostApi, &devInfo,
406
                                   auhalHostApi->devIds[i], TRUE ) )
407
             if( devInfo.maxInputChannels ) {
408
                auhalHostApi->defaultIn = auhalHostApi->devIds[i];
409
                break;
410
             }
411
       }
412
    }   
413
    if( 0 != AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice,
414
                     &size,
415
                     &auhalHostApi->defaultOut) ) {
416
       int i;
417
       auhalHostApi->defaultIn  = kAudioDeviceUnknown;
418
       VDBUG(("Failed to get default output device from OS."));
419
       VDBUG((" I will substitute the first available output Device."));
420
       for( i=0; i<auhalHostApi->devCount; ++i ) {
421
          PaDeviceInfo devInfo;
422
          if( 0 != GetChannelInfo( auhalHostApi, &devInfo,
423
                                   auhalHostApi->devIds[i], FALSE ) )
424
             if( devInfo.maxOutputChannels ) {
425
                auhalHostApi->defaultOut = auhalHostApi->devIds[i];
426
                break;
427
             }
428
       }
429
    }   
430

    
431
    VDBUG( ( "Default in : %ld\n", auhalHostApi->defaultIn  ) );
432
    VDBUG( ( "Default out: %ld\n", auhalHostApi->defaultOut ) );
433

    
434
    return paNoError;
435
}
436

    
437
/* =================================================================================================== */
438
/**
439
 * @internal
440
 * @brief Clip the desired size against the allowed IO buffer size range for the device.
441
 */
442
static PaError ClipToDeviceBufferSize( AudioDeviceID macCoreDeviceId,
443
                                                                        int isInput, UInt32 desiredSize, UInt32 *allowedSize )
444
{
445
        UInt32 resultSize = desiredSize;
446
        AudioValueRange audioRange;
447
        UInt32 propSize = sizeof( audioRange );
448
        PaError err = WARNING(AudioDeviceGetProperty( macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSizeRange, &propSize, &audioRange ) );
449
        resultSize = MAX( resultSize, audioRange.mMinimum );
450
        resultSize = MIN( resultSize, audioRange.mMaximum );
451
        *allowedSize = resultSize;
452
        return err;
453
}
454

    
455
/* =================================================================================================== */
456
#if 0
457
static void DumpDeviceProperties( AudioDeviceID macCoreDeviceId,
458
                          int isInput )
459
{
460
    PaError err;
461
    int i;
462
    UInt32 propSize;
463
    UInt32 deviceLatency;
464
    UInt32 streamLatency;
465
    UInt32 bufferFrames;
466
    UInt32 safetyOffset;
467
    AudioStreamID streamIDs[128];
468
    
469
    printf("\n======= latency query : macCoreDeviceId = %d, isInput %d =======\n", (int)macCoreDeviceId, isInput );    
470
    
471
    propSize = sizeof(UInt32);
472
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSize, &propSize, &bufferFrames));
473
    printf("kAudioDevicePropertyBufferFrameSize: err = %d, propSize = %d, value = %d\n", err, propSize, bufferFrames );
474
    
475
    propSize = sizeof(UInt32);
476
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertySafetyOffset, &propSize, &safetyOffset));
477
    printf("kAudioDevicePropertySafetyOffset: err = %d, propSize = %d, value = %d\n", err, propSize, safetyOffset );
478
    
479
    propSize = sizeof(UInt32);
480
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyLatency, &propSize, &deviceLatency));
481
    printf("kAudioDevicePropertyLatency: err = %d, propSize = %d, value = %d\n", err, propSize, deviceLatency );
482
    
483
    AudioValueRange audioRange;
484
    propSize = sizeof( audioRange );
485
    err = WARNING(AudioDeviceGetProperty( macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSizeRange, &propSize, &audioRange ) );
486
    printf("kAudioDevicePropertyBufferFrameSizeRange: err = %d, propSize = %u, minimum = %g\n", err, propSize, audioRange.mMinimum);
487
    printf("kAudioDevicePropertyBufferFrameSizeRange: err = %d, propSize = %u, maximum = %g\n", err, propSize, audioRange.mMaximum );
488
    
489
    /* Get the streams from the device and query their latency. */
490
    propSize = sizeof(streamIDs);
491
    err  = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreams, &propSize, &streamIDs[0]));
492
    int numStreams = propSize / sizeof(AudioStreamID);
493
    for( i=0; i<numStreams; i++ )
494
    {
495
        printf("Stream #%d = %d---------------------- \n", i, streamIDs[i] );
496
        
497
        propSize = sizeof(UInt32);
498
        err  = WARNING(AudioStreamGetProperty(streamIDs[i], 0, kAudioStreamPropertyLatency, &propSize, &streamLatency));
499
        printf("  kAudioStreamPropertyLatency: err = %d, propSize = %d, value = %d\n", err, propSize, streamLatency );
500
    }
501
}
502
#endif
503

    
504
/* =================================================================================================== */
505
/**
506
 * @internal
507
 * Calculate the fixed latency from the system and the device.
508
 * Sum of kAudioStreamPropertyLatency +
509
 *        kAudioDevicePropertySafetyOffset +
510
 *        kAudioDevicePropertyLatency
511
 *
512
 * Some useful info from Jeff Moore on latency.
513
 * http://osdir.com/ml/coreaudio-api/2010-01/msg00046.html
514
 * http://osdir.com/ml/coreaudio-api/2009-07/msg00140.html
515
 */
516
static PaError CalculateFixedDeviceLatency( AudioDeviceID macCoreDeviceId, int isInput, UInt32 *fixedLatencyPtr )
517
{
518
    PaError err;
519
    UInt32 propSize;
520
    UInt32 deviceLatency;
521
    UInt32 streamLatency;
522
    UInt32 safetyOffset;
523
    AudioStreamID streamIDs[1];
524
    
525
    // To get stream latency we have to get a streamID from the device.
526
    // We are only going to look at the first stream so only fetch one stream.
527
    propSize = sizeof(streamIDs);
528
    err  = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreams, &propSize, &streamIDs[0]));
529
    if( err != paNoError ) goto error;
530
    if( propSize == sizeof(AudioStreamID) )
531
    {        
532
        propSize = sizeof(UInt32);
533
        err  = WARNING(AudioStreamGetProperty(streamIDs[0], 0, kAudioStreamPropertyLatency, &propSize, &streamLatency));
534
    }
535
    
536
    propSize = sizeof(UInt32);
537
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertySafetyOffset, &propSize, &safetyOffset));
538
    if( err != paNoError ) goto error;
539
    
540
    propSize = sizeof(UInt32);
541
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyLatency, &propSize, &deviceLatency));
542
    if( err != paNoError ) goto error;
543

    
544
    *fixedLatencyPtr = deviceLatency + streamLatency + safetyOffset;
545
    return err;
546
error:
547
    return err;
548
}
549

    
550
/* =================================================================================================== */
551
static PaError CalculateDefaultDeviceLatencies( AudioDeviceID macCoreDeviceId,
552
                                               int isInput, UInt32 *lowLatencyFramesPtr,
553
                                               UInt32 *highLatencyFramesPtr )
554
{
555
    UInt32 propSize;
556
    UInt32 bufferFrames = 0;
557
    UInt32 fixedLatency = 0;
558
    UInt32 clippedMinBufferSize = 0;
559
    
560
    //DumpDeviceProperties( macCoreDeviceId, isInput );
561
    
562
    PaError err = CalculateFixedDeviceLatency( macCoreDeviceId, isInput, &fixedLatency );
563
    if( err != paNoError ) goto error;
564
    
565
    // For low latency use a small fixed size buffer clipped to the device range.
566
    err = ClipToDeviceBufferSize( macCoreDeviceId, isInput, PA_MAC_SMALL_BUFFER_SIZE, &clippedMinBufferSize );
567
    if( err != paNoError ) goto error;
568
    
569
    // For high latency use the default device buffer size.
570
    propSize = sizeof(UInt32);
571
    err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyBufferFrameSize, &propSize, &bufferFrames));
572
    if( err != paNoError ) goto error;
573
    
574
    *lowLatencyFramesPtr = fixedLatency + clippedMinBufferSize;
575
    *highLatencyFramesPtr = fixedLatency + bufferFrames;
576
    
577
    return err;
578
error:
579
    return err;
580
}
581

    
582
/* =================================================================================================== */
583

    
584
static PaError GetChannelInfo( PaMacAUHAL *auhalHostApi,
585
                               PaDeviceInfo *deviceInfo,
586
                               AudioDeviceID macCoreDeviceId,
587
                               int isInput)
588
{
589
    UInt32 propSize;
590
    PaError err = paNoError;
591
    UInt32 i;
592
    int numChannels = 0;
593
    AudioBufferList *buflist = NULL;
594
    
595
    VVDBUG(("GetChannelInfo()\n"));
596

    
597
    /* Get the number of channels from the stream configuration.
598
       Fail if we can't get this. */
599

    
600
    err = ERR(AudioDeviceGetPropertyInfo(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreamConfiguration, &propSize, NULL));
601
    if (err)
602
        return err;
603

    
604
    buflist = PaUtil_AllocateMemory(propSize);
605
    if( !buflist )
606
       return paInsufficientMemory;
607
    err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreamConfiguration, &propSize, buflist));
608
    if (err)
609
        goto error;
610

    
611
    for (i = 0; i < buflist->mNumberBuffers; ++i)
612
        numChannels += buflist->mBuffers[i].mNumberChannels;
613

    
614
    if (isInput)
615
        deviceInfo->maxInputChannels = numChannels;
616
    else
617
        deviceInfo->maxOutputChannels = numChannels;
618
      
619
    if (numChannels > 0) /* do not try to retrieve the latency if there are no channels. */
620
    {
621
        /* Get the latency.  Don't fail if we can't get this. */
622
        /* default to something reasonable */
623
        deviceInfo->defaultLowInputLatency = .01;
624
        deviceInfo->defaultHighInputLatency = .10;
625
        deviceInfo->defaultLowOutputLatency = .01;
626
        deviceInfo->defaultHighOutputLatency = .10;        
627
        UInt32 lowLatencyFrames = 0;
628
        UInt32 highLatencyFrames = 0;
629
        err = CalculateDefaultDeviceLatencies( macCoreDeviceId, isInput, &lowLatencyFrames, &highLatencyFrames );
630
        if( err == 0 )
631
        {
632
            
633
            double lowLatencySeconds = lowLatencyFrames / deviceInfo->defaultSampleRate;
634
            double highLatencySeconds = highLatencyFrames / deviceInfo->defaultSampleRate;
635
            if (isInput)
636
            {
637
                deviceInfo->defaultLowInputLatency = lowLatencySeconds;
638
                deviceInfo->defaultHighInputLatency = highLatencySeconds;
639
            }
640
            else
641
            {
642
                deviceInfo->defaultLowOutputLatency = lowLatencySeconds;
643
                deviceInfo->defaultHighOutputLatency = highLatencySeconds;
644
            }
645
        }
646
    }
647
    PaUtil_FreeMemory( buflist );
648
    return paNoError;
649
 error:
650
    PaUtil_FreeMemory( buflist );
651
    return err;
652
}
653

    
654
/* =================================================================================================== */
655
static PaError InitializeDeviceInfo( PaMacAUHAL *auhalHostApi,
656
                                     PaDeviceInfo *deviceInfo,
657
                                     AudioDeviceID macCoreDeviceId,
658
                                     PaHostApiIndex hostApiIndex )
659
{
660
    Float64 sampleRate;
661
    char *name;
662
    PaError err = paNoError;
663
        CFStringRef nameRef;
664
    UInt32 propSize;
665

    
666
    VVDBUG(("InitializeDeviceInfo(): macCoreDeviceId=%ld\n", macCoreDeviceId));
667

    
668
    memset(deviceInfo, 0, sizeof(PaDeviceInfo));
669

    
670
    deviceInfo->structVersion = 2;
671
    deviceInfo->hostApi = hostApiIndex;
672
  
673
    /* Get the device name using CFString */
674
        propSize = sizeof(nameRef);
675
    err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceNameCFString, &propSize, &nameRef));
676
    if (err)
677
    {
678
                /* Get the device name using c string.  Fail if we can't get it. */
679
                err = ERR(AudioDeviceGetPropertyInfo(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceName, &propSize, NULL));
680
                if (err)
681
                        return err;
682

    
683
                name = PaUtil_GroupAllocateMemory(auhalHostApi->allocations,propSize+1);
684
                if ( !name )
685
                        return paInsufficientMemory;
686
                err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceName, &propSize, name));
687
                if (err)
688
                        return err;
689
        }
690
        else
691
        {
692
                /* valid CFString so we just allocate a c string big enough to contain the data */
693
                propSize = CFStringGetMaximumSizeForEncoding(CFStringGetLength(nameRef), kCFStringEncodingUTF8);
694
                name = PaUtil_GroupAllocateMemory(auhalHostApi->allocations, propSize+1);
695
                if ( !name )
696
                {
697
                        CFRelease(nameRef);
698
                        return paInsufficientMemory;
699
                }
700
                CFStringGetCString(nameRef, name, propSize+1, kCFStringEncodingUTF8);
701
                CFRelease(nameRef);
702
        }
703
    deviceInfo->name = name;
704

    
705
    /* Try to get the default sample rate.  Don't fail if we can't get this. */
706
    propSize = sizeof(Float64);
707
    err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyNominalSampleRate, &propSize, &sampleRate));
708
    if (err)
709
        deviceInfo->defaultSampleRate = 0.0;
710
    else
711
        deviceInfo->defaultSampleRate = sampleRate;
712

    
713
    /* Get the maximum number of input and output channels.  Fail if we can't get this. */
714

    
715
    err = GetChannelInfo(auhalHostApi, deviceInfo, macCoreDeviceId, 1);
716
    if (err)
717
        return err;
718

    
719
    err = GetChannelInfo(auhalHostApi, deviceInfo, macCoreDeviceId, 0);
720
    if (err)
721
        return err;
722

    
723
    return paNoError;
724
}
725

    
726
PaError PaMacCore_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex hostApiIndex )
727
{
728
    PaError result = paNoError;
729
    int i;
730
    PaMacAUHAL *auhalHostApi = NULL;
731
    PaDeviceInfo *deviceInfoArray;
732
    int unixErr;
733

    
734
    VVDBUG(("PaMacCore_Initialize(): hostApiIndex=%d\n", hostApiIndex));
735
        
736
        SInt32 major;
737
        SInt32 minor;
738
        Gestalt(gestaltSystemVersionMajor, &major);
739
        Gestalt(gestaltSystemVersionMinor, &minor);
740
        
741
        // Starting with 10.6 systems, the HAL notification thread is created internally
742
        if (major == 10 && minor >= 6) {
743
                CFRunLoopRef theRunLoop = NULL;
744
                AudioObjectPropertyAddress theAddress = { kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
745
                OSStatus osErr = AudioObjectSetPropertyData (kAudioObjectSystemObject, &theAddress, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
746
                if (osErr != noErr) {
747
                        goto error;
748
                }
749
        }
750
        
751
    unixErr = initializeXRunListenerList();
752
    if( 0 != unixErr ) {
753
       return UNIX_ERR(unixErr);
754
    }
755

    
756
    auhalHostApi = (PaMacAUHAL*)PaUtil_AllocateMemory( sizeof(PaMacAUHAL) );
757
    if( !auhalHostApi )
758
    {
759
        result = paInsufficientMemory;
760
        goto error;
761
    }
762

    
763
    auhalHostApi->allocations = PaUtil_CreateAllocationGroup();
764
    if( !auhalHostApi->allocations )
765
    {
766
        result = paInsufficientMemory;
767
        goto error;
768
    }
769

    
770
    auhalHostApi->devIds = NULL;
771
    auhalHostApi->devCount = 0;
772

    
773
    /* get the info we need about the devices */
774
    result = gatherDeviceInfo( auhalHostApi );
775
    if( result != paNoError )
776
       goto error;
777

    
778
    *hostApi = &auhalHostApi->inheritedHostApiRep;
779
    (*hostApi)->info.structVersion = 1;
780
    (*hostApi)->info.type = paCoreAudio;
781
    (*hostApi)->info.name = "Core Audio";
782

    
783
    (*hostApi)->info.defaultInputDevice = paNoDevice;
784
    (*hostApi)->info.defaultOutputDevice = paNoDevice;
785

    
786
    (*hostApi)->info.deviceCount = 0;  
787

    
788
    if( auhalHostApi->devCount > 0 )
789
    {
790
        (*hostApi)->deviceInfos = (PaDeviceInfo**)PaUtil_GroupAllocateMemory(
791
                auhalHostApi->allocations, sizeof(PaDeviceInfo*) * auhalHostApi->devCount);
792
        if( !(*hostApi)->deviceInfos )
793
        {
794
            result = paInsufficientMemory;
795
            goto error;
796
        }
797

    
798
        /* allocate all device info structs in a contiguous block */
799
        deviceInfoArray = (PaDeviceInfo*)PaUtil_GroupAllocateMemory(
800
                auhalHostApi->allocations, sizeof(PaDeviceInfo) * auhalHostApi->devCount );
801
        if( !deviceInfoArray )
802
        {
803
            result = paInsufficientMemory;
804
            goto error;
805
        }
806

    
807
        for( i=0; i < auhalHostApi->devCount; ++i )
808
        {
809
            int err;
810
            err = InitializeDeviceInfo( auhalHostApi, &deviceInfoArray[i],
811
                                      auhalHostApi->devIds[i],
812
                                      hostApiIndex );
813
            if (err == paNoError)
814
            { /* copy some info and set the defaults */
815
                (*hostApi)->deviceInfos[(*hostApi)->info.deviceCount] = &deviceInfoArray[i];
816
                if (auhalHostApi->devIds[i] == auhalHostApi->defaultIn)
817
                    (*hostApi)->info.defaultInputDevice = (*hostApi)->info.deviceCount;
818
                if (auhalHostApi->devIds[i] == auhalHostApi->defaultOut)
819
                    (*hostApi)->info.defaultOutputDevice = (*hostApi)->info.deviceCount;
820
                (*hostApi)->info.deviceCount++;
821
            }
822
            else
823
            { /* there was an error. we need to shift the devices down, so we ignore this one */
824
                int j;
825
                auhalHostApi->devCount--;
826
                for( j=i; j<auhalHostApi->devCount; ++j )
827
                   auhalHostApi->devIds[j] = auhalHostApi->devIds[j+1];
828
                i--;
829
            }
830
        }
831
    }
832

    
833
    (*hostApi)->Terminate = Terminate;
834
    (*hostApi)->OpenStream = OpenStream;
835
    (*hostApi)->IsFormatSupported = IsFormatSupported;
836

    
837
    PaUtil_InitializeStreamInterface( &auhalHostApi->callbackStreamInterface,
838
                                      CloseStream, StartStream,
839
                                      StopStream, AbortStream, IsStreamStopped,
840
                                      IsStreamActive,
841
                                      GetStreamTime, GetStreamCpuLoad,
842
                                      PaUtil_DummyRead, PaUtil_DummyWrite,
843
                                      PaUtil_DummyGetReadAvailable,
844
                                      PaUtil_DummyGetWriteAvailable );
845

    
846
    PaUtil_InitializeStreamInterface( &auhalHostApi->blockingStreamInterface,
847
                                      CloseStream, StartStream,
848
                                      StopStream, AbortStream, IsStreamStopped,
849
                                      IsStreamActive,
850
                                      GetStreamTime, PaUtil_DummyGetCpuLoad,
851
                                      ReadStream, WriteStream,
852
                                      GetStreamReadAvailable,
853
                                      GetStreamWriteAvailable );
854

    
855
    return result;
856

    
857
error:
858
    if( auhalHostApi )
859
    {
860
        if( auhalHostApi->allocations )
861
        {
862
            PaUtil_FreeAllAllocations( auhalHostApi->allocations );
863
            PaUtil_DestroyAllocationGroup( auhalHostApi->allocations );
864
        }
865
                
866
        PaUtil_FreeMemory( auhalHostApi );
867
    }
868
    return result;
869
}
870

    
871

    
872
static void Terminate( struct PaUtilHostApiRepresentation *hostApi )
873
{
874
    int unixErr;
875

    
876
    PaMacAUHAL *auhalHostApi = (PaMacAUHAL*)hostApi;
877

    
878
    VVDBUG(("Terminate()\n"));
879

    
880
    unixErr = destroyXRunListenerList();
881
    if( 0 != unixErr )
882
       UNIX_ERR(unixErr);
883

    
884
    /*
885
        IMPLEMENT ME:
886
            - clean up any resources not handled by the allocation group
887
        TODO: Double check that everything is handled by alloc group
888
    */
889

    
890
    if( auhalHostApi->allocations )
891
    {
892
        PaUtil_FreeAllAllocations( auhalHostApi->allocations );
893
        PaUtil_DestroyAllocationGroup( auhalHostApi->allocations );
894
    }
895

    
896
    PaUtil_FreeMemory( auhalHostApi );
897
}
898

    
899

    
900
static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi,
901
                                  const PaStreamParameters *inputParameters,
902
                                  const PaStreamParameters *outputParameters,
903
                                  double sampleRate )
904
{
905
    int inputChannelCount, outputChannelCount;
906
    PaSampleFormat inputSampleFormat, outputSampleFormat;
907

    
908
    VVDBUG(("IsFormatSupported(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld sampleRate=%g\n",
909
                inputParameters  ? inputParameters->channelCount  : -1,
910
                inputParameters  ? inputParameters->sampleFormat  : -1,
911
                outputParameters ? outputParameters->channelCount : -1,
912
                outputParameters ? outputParameters->sampleFormat : -1,
913
                (float) sampleRate ));
914
 
915
    /** These first checks are standard PA checks. We do some fancier checks
916
        later. */
917
    if( inputParameters )
918
    {
919
        inputChannelCount = inputParameters->channelCount;
920
        inputSampleFormat = inputParameters->sampleFormat;
921

    
922
        /* all standard sample formats are supported by the buffer adapter,
923
            this implementation doesn't support any custom sample formats */
924
        if( inputSampleFormat & paCustomFormat )
925
            return paSampleFormatNotSupported;
926
            
927
        /* unless alternate device specification is supported, reject the use of
928
            paUseHostApiSpecificDeviceSpecification */
929

    
930
        if( inputParameters->device == paUseHostApiSpecificDeviceSpecification )
931
            return paInvalidDevice;
932

    
933
        /* check that input device can support inputChannelCount */
934
        if( inputChannelCount > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels )
935
            return paInvalidChannelCount;
936
    }
937
    else
938
    {
939
        inputChannelCount = 0;
940
    }
941

    
942
    if( outputParameters )
943
    {
944
        outputChannelCount = outputParameters->channelCount;
945
        outputSampleFormat = outputParameters->sampleFormat;
946

    
947
        /* all standard sample formats are supported by the buffer adapter,
948
            this implementation doesn't support any custom sample formats */
949
        if( outputSampleFormat & paCustomFormat )
950
            return paSampleFormatNotSupported;
951
            
952
        /* unless alternate device specification is supported, reject the use of
953
            paUseHostApiSpecificDeviceSpecification */
954

    
955
        if( outputParameters->device == paUseHostApiSpecificDeviceSpecification )
956
            return paInvalidDevice;
957

    
958
        /* check that output device can support outputChannelCount */
959
        if( outputChannelCount > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels )
960
            return paInvalidChannelCount;
961

    
962
    }
963
    else
964
    {
965
        outputChannelCount = 0;
966
    }
967
 
968
    /* FEEDBACK */
969
    /*        I think the only way to check a given format SR combo is     */
970
    /*        to try opening it. This could be disruptive, is that Okay?   */
971
    /*        The alternative is to just read off available sample rates,  */
972
    /*        but this will not work %100 of the time (eg, a device that   */
973
    /*        supports N output at one rate but only N/2 at a higher rate.)*/
974

    
975
    /* The following code opens the device with the requested parameters to
976
       see if it works. */
977
    {
978
       PaError err;
979
       PaStream *s;
980
       err = OpenStream( hostApi, &s, inputParameters, outputParameters,
981
                           sampleRate, 1024, 0, (PaStreamCallback *)1, NULL );
982
       if( err != paNoError && err != paInvalidSampleRate )
983
          DBUG( ( "OpenStream @ %g returned: %d: %s\n",
984
                  (float) sampleRate, err, Pa_GetErrorText( err ) ) );
985
       if( err ) 
986
          return err;
987
       err = CloseStream( s );
988
       if( err ) {
989
          /* FEEDBACK: is this more serious? should we assert? */
990
          DBUG( ( "WARNING: could not close Stream. %d: %s\n",
991
                  err, Pa_GetErrorText( err ) ) );
992
       }
993
    }
994

    
995
    return paFormatIsSupported;
996
}
997

    
998
/* ================================================================================= */
999
static void InitializeDeviceProperties( PaMacCoreDeviceProperties *deviceProperties )
1000
{
1001
    memset( deviceProperties, 0, sizeof(PaMacCoreDeviceProperties) );
1002
    deviceProperties->sampleRate = 1.0; // Better than random. Overwritten by actual values later on.
1003
    deviceProperties->samplePeriod = 1.0 / deviceProperties->sampleRate;
1004
}
1005

    
1006
static Float64 CalculateSoftwareLatencyFromProperties( PaMacCoreStream *stream, PaMacCoreDeviceProperties *deviceProperties )
1007
{
1008
    UInt32 latencyFrames = deviceProperties->bufferFrameSize + deviceProperties->deviceLatency + deviceProperties->safetyOffset;
1009
    return latencyFrames * deviceProperties->samplePeriod; // same as dividing by sampleRate but faster
1010
}
1011

    
1012
static Float64 CalculateHardwareLatencyFromProperties( PaMacCoreStream *stream, PaMacCoreDeviceProperties *deviceProperties )
1013
{
1014
    return deviceProperties->deviceLatency * deviceProperties->samplePeriod; // same as dividing by sampleRate but faster
1015
}
1016

    
1017
/* Calculate values used to convert Apple timestamps into PA timestamps
1018
 * from the device properties. The final results of this calculation
1019
 * will be used in the audio callback function.
1020
 */
1021
static void UpdateTimeStampOffsets( PaMacCoreStream *stream )
1022
{
1023
    Float64 inputSoftwareLatency = 0.0;
1024
    Float64 inputHardwareLatency = 0.0;
1025
    Float64 outputSoftwareLatency = 0.0;
1026
    Float64 outputHardwareLatency = 0.0;
1027
    
1028
    if( stream->inputUnit != NULL )
1029
    {
1030
        inputSoftwareLatency = CalculateSoftwareLatencyFromProperties( stream, &stream->inputProperties );
1031
        inputHardwareLatency = CalculateHardwareLatencyFromProperties( stream, &stream->inputProperties );
1032
    }    
1033
    if( stream->outputUnit != NULL )
1034
    {
1035
        outputSoftwareLatency = CalculateSoftwareLatencyFromProperties( stream, &stream->outputProperties );
1036
        outputHardwareLatency = CalculateHardwareLatencyFromProperties( stream, &stream->outputProperties );
1037
    }    
1038
    
1039
    /* We only need a mutex around setting these variables as a group. */
1040
        pthread_mutex_lock( &stream->timingInformationMutex );
1041
    stream->timestampOffsetCombined = inputSoftwareLatency + outputSoftwareLatency;
1042
    stream->timestampOffsetInputDevice = inputHardwareLatency;
1043
    stream->timestampOffsetOutputDevice = outputHardwareLatency;
1044
        pthread_mutex_unlock( &stream->timingInformationMutex );
1045
}
1046

    
1047
/* ================================================================================= */
1048

    
1049
/* can be used to update from nominal or actual sample rate */
1050
static OSStatus UpdateSampleRateFromDeviceProperty( PaMacCoreStream *stream, AudioDeviceID deviceID, Boolean isInput, AudioDevicePropertyID sampleRatePropertyID )
1051
{
1052
    PaMacCoreDeviceProperties * deviceProperties = isInput ? &stream->inputProperties : &stream->outputProperties;
1053
        
1054
        Float64 sampleRate = 0.0;
1055
        UInt32 propSize = sizeof(Float64);
1056
    OSStatus osErr = AudioDeviceGetProperty( deviceID, 0, isInput, sampleRatePropertyID, &propSize, &sampleRate);
1057
        if( (osErr == noErr) && (sampleRate > 1000.0) ) /* avoid divide by zero if there's an error */
1058
        {
1059
        deviceProperties->sampleRate = sampleRate;
1060
        deviceProperties->samplePeriod = 1.0 / sampleRate;
1061
    }
1062
    return osErr;
1063
}
1064

    
1065
static OSStatus AudioDevicePropertyActualSampleRateListenerProc( AudioDeviceID inDevice, UInt32 inChannel, Boolean isInput, AudioDevicePropertyID inPropertyID, void *inClientData )
1066
{
1067
        PaMacCoreStream *stream = (PaMacCoreStream*)inClientData;
1068
    
1069
    // Make sure the callback is operating on a stream that is still valid!
1070
    assert( stream->streamRepresentation.magic == PA_STREAM_MAGIC );
1071

    
1072
        OSStatus osErr = UpdateSampleRateFromDeviceProperty( stream, inDevice, isInput, kAudioDevicePropertyActualSampleRate );
1073
    if( osErr == noErr )
1074
    {
1075
        UpdateTimeStampOffsets( stream );
1076
    }
1077
    return osErr;
1078
}
1079

    
1080
/* ================================================================================= */
1081
static OSStatus QueryUInt32DeviceProperty( AudioDeviceID deviceID, Boolean isInput, AudioDevicePropertyID propertyID, UInt32 *outValue )
1082
{
1083
        UInt32 propertyValue = 0;
1084
        UInt32 propertySize = sizeof(UInt32);
1085
        OSStatus osErr = AudioDeviceGetProperty( deviceID, 0, isInput, propertyID, &propertySize, &propertyValue);
1086
        if( osErr == noErr )
1087
        {
1088
        *outValue = propertyValue;
1089
        }
1090
    return osErr;
1091
}
1092

    
1093
static OSStatus AudioDevicePropertyGenericListenerProc( AudioDeviceID inDevice, UInt32 inChannel, Boolean isInput, AudioDevicePropertyID inPropertyID, void *inClientData )
1094
{
1095
    OSStatus osErr = noErr;
1096
        PaMacCoreStream *stream = (PaMacCoreStream*)inClientData;
1097
    
1098
    // Make sure the callback is operating on a stream that is still valid!
1099
    assert( stream->streamRepresentation.magic == PA_STREAM_MAGIC );
1100
    
1101
    PaMacCoreDeviceProperties *deviceProperties = isInput ? &stream->inputProperties : &stream->outputProperties;
1102
    UInt32 *valuePtr = NULL;
1103
    switch( inPropertyID )
1104
    {
1105
        case kAudioDevicePropertySafetyOffset:
1106
            valuePtr = &deviceProperties->safetyOffset;
1107
            break;
1108
                        
1109
        case kAudioDevicePropertyLatency:
1110
            valuePtr = &deviceProperties->deviceLatency;
1111
            break;
1112
            
1113
        case kAudioDevicePropertyBufferFrameSize:
1114
            valuePtr = &deviceProperties->bufferFrameSize;
1115
            break;            
1116
    }
1117
    if( valuePtr != NULL )
1118
    {
1119
        osErr = QueryUInt32DeviceProperty( inDevice, isInput, inPropertyID, valuePtr );
1120
        if( osErr == noErr )
1121
        {
1122
            UpdateTimeStampOffsets( stream );
1123
        }
1124
    }
1125
    return osErr;
1126
}
1127

    
1128
/* ================================================================================= */
1129
/*
1130
 * Setup listeners in case device properties change during the run. */
1131
static OSStatus SetupDevicePropertyListeners( PaMacCoreStream *stream, AudioDeviceID deviceID, Boolean isInput )
1132
{
1133
    OSStatus osErr = noErr;
1134
    PaMacCoreDeviceProperties *deviceProperties = isInput ? &stream->inputProperties : &stream->outputProperties;
1135
    
1136
    if( (osErr = QueryUInt32DeviceProperty( deviceID, isInput,
1137
                                           kAudioDevicePropertyLatency, &deviceProperties->deviceLatency )) != noErr ) return osErr;
1138
    if( (osErr = QueryUInt32DeviceProperty( deviceID, isInput,
1139
                                           kAudioDevicePropertyBufferFrameSize, &deviceProperties->bufferFrameSize )) != noErr ) return osErr;
1140
    if( (osErr = QueryUInt32DeviceProperty( deviceID, isInput,
1141
                                           kAudioDevicePropertySafetyOffset, &deviceProperties->safetyOffset )) != noErr ) return osErr;
1142
    
1143
    AudioDeviceAddPropertyListener( deviceID, 0, isInput, kAudioDevicePropertyActualSampleRate, 
1144
                                   AudioDevicePropertyActualSampleRateListenerProc, stream );
1145
    
1146
    AudioDeviceAddPropertyListener( deviceID, 0, isInput, kAudioStreamPropertyLatency, 
1147
                                   AudioDevicePropertyGenericListenerProc, stream );
1148
    AudioDeviceAddPropertyListener( deviceID, 0, isInput, kAudioDevicePropertyBufferFrameSize, 
1149
                                   AudioDevicePropertyGenericListenerProc, stream );
1150
    AudioDeviceAddPropertyListener( deviceID, 0, isInput, kAudioDevicePropertySafetyOffset, 
1151
                                   AudioDevicePropertyGenericListenerProc, stream );
1152
    
1153
    return osErr;
1154
}
1155

    
1156
static void CleanupDevicePropertyListeners( PaMacCoreStream *stream, AudioDeviceID deviceID, Boolean isInput )
1157
{    
1158
    AudioDeviceRemovePropertyListener( deviceID, 0, isInput, kAudioDevicePropertyActualSampleRate, 
1159
                                   AudioDevicePropertyActualSampleRateListenerProc );
1160
    
1161
    AudioDeviceRemovePropertyListener( deviceID, 0, isInput, kAudioDevicePropertyLatency, 
1162
                                   AudioDevicePropertyGenericListenerProc );        
1163
    AudioDeviceRemovePropertyListener( deviceID, 0, isInput, kAudioDevicePropertyBufferFrameSize, 
1164
                                   AudioDevicePropertyGenericListenerProc );
1165
    AudioDeviceRemovePropertyListener( deviceID, 0, isInput, kAudioDevicePropertySafetyOffset, 
1166
                                   AudioDevicePropertyGenericListenerProc );
1167
}
1168

    
1169
/* ================================================================================= */
1170
static PaError OpenAndSetupOneAudioUnit(
1171
                                   const PaMacCoreStream *stream,
1172
                                   const PaStreamParameters *inStreamParams,
1173
                                   const PaStreamParameters *outStreamParams,
1174
                                   const UInt32 requestedFramesPerBuffer,
1175
                                   UInt32 *actualInputFramesPerBuffer,
1176
                                   UInt32 *actualOutputFramesPerBuffer,
1177
                                   const PaMacAUHAL *auhalHostApi,
1178
#ifndef AUDIO_COMPONENT_FIX
1179
                                   AudioUnit *audioUnit,
1180
#else
1181
                                   AudioComponentInstance *audioUnit,
1182
#endif
1183
                                   AudioConverterRef *srConverter,
1184
                                   AudioDeviceID *audioDevice,
1185
                                   const double sampleRate,
1186
                                   void *refCon )
1187
{
1188
#ifndef AUDIO_COMPONENT_FIX
1189
    ComponentDescription desc;
1190
    Component comp;
1191
#else
1192
    AudioComponentDescription desc;
1193
    AudioComponent comp;
1194
#endif
1195
    /*An Apple TN suggests using CAStreamBasicDescription, but that is C++*/
1196
    AudioStreamBasicDescription desiredFormat;
1197
    OSStatus result = noErr;
1198
    PaError paResult = paNoError;
1199
    int line = 0;
1200
    UInt32 callbackKey;
1201
    AURenderCallbackStruct rcbs;
1202
    unsigned long macInputStreamFlags  = paMacCorePlayNice;
1203
    unsigned long macOutputStreamFlags = paMacCorePlayNice;
1204
    SInt32 const *inChannelMap = NULL;
1205
    SInt32 const *outChannelMap = NULL;
1206
    unsigned long inChannelMapSize = 0;
1207
    unsigned long outChannelMapSize = 0;
1208

    
1209
    VVDBUG(("OpenAndSetupOneAudioUnit(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld, requestedFramesPerBuffer=%ld\n",
1210
                inStreamParams  ? inStreamParams->channelCount  : -1,
1211
                inStreamParams  ? inStreamParams->sampleFormat  : -1,
1212
                outStreamParams ? outStreamParams->channelCount : -1,
1213
                outStreamParams ? outStreamParams->sampleFormat : -1,
1214
                requestedFramesPerBuffer ));
1215

    
1216
    /* -- handle the degenerate case  -- */
1217
    if( !inStreamParams && !outStreamParams ) {
1218
       *audioUnit = NULL;
1219
       *audioDevice = kAudioDeviceUnknown;
1220
       return paNoError;
1221
    }
1222

    
1223
    /* -- get the user's api specific info, if they set any -- */
1224
    if( inStreamParams && inStreamParams->hostApiSpecificStreamInfo )
1225
    {
1226
       macInputStreamFlags=
1227
            ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
1228
                  ->flags;
1229
       inChannelMap = ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
1230
                  ->channelMap;
1231
       inChannelMapSize = ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
1232
                  ->channelMapSize;
1233
    }
1234
    if( outStreamParams && outStreamParams->hostApiSpecificStreamInfo )
1235
    {
1236
       macOutputStreamFlags=
1237
            ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
1238
                  ->flags;
1239
       outChannelMap = ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
1240
                  ->channelMap;
1241
       outChannelMapSize = ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
1242
                  ->channelMapSize; 
1243
    }
1244
    /* Override user's flags here, if desired for testing. */
1245

    
1246
    /*
1247
     * The HAL AU is a Mac OS style "component".
1248
     * the first few steps deal with that.
1249
     * Later steps work on a combination of Mac OS
1250
     * components and the slightly lower level
1251
     * HAL.
1252
     */
1253

    
1254
    /* -- describe the output type AudioUnit -- */
1255
    /*  Note: for the default AudioUnit, we could use the
1256
     *  componentSubType value kAudioUnitSubType_DefaultOutput;
1257
     *  but I don't think that's relevant here.
1258
     */
1259
    desc.componentType         = kAudioUnitType_Output;
1260
    desc.componentSubType      = kAudioUnitSubType_HALOutput;
1261
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
1262
    desc.componentFlags        = 0;
1263
    desc.componentFlagsMask    = 0;
1264

    
1265
#ifdef AUDIO_COMPONENT_FIX
1266
    DBUG( ( "Using AUDIO_COMPONENT_FIX logic." ) );
1267
#endif
1268

    
1269
    /* -- find the component -- */
1270
#ifndef AUDIO_COMPONENT_FIX
1271
    comp = FindNextComponent( NULL, &desc );
1272
#else
1273
    comp = AudioComponentFindNext( NULL, &desc );
1274
#endif
1275
    if( !comp )
1276
    {
1277
       DBUG( ( "AUHAL component not found." ) );
1278
       *audioUnit = NULL;
1279
       *audioDevice = kAudioDeviceUnknown;
1280
       return paUnanticipatedHostError;
1281
    }
1282
    /* -- open it -- */
1283
#ifndef AUDIO_COMPONENT_FIX
1284
                result = OpenAComponent(comp, audioUnit);
1285
#else
1286
                result = AudioComponentInstanceNew(comp, audioUnit);
1287
#endif
1288
    if( result )
1289
    {
1290
       DBUG( ( "Failed to open AUHAL component." ) );
1291
       *audioUnit = NULL;
1292
       *audioDevice = kAudioDeviceUnknown;
1293
       return ERR( result );
1294
    }
1295
    /* -- prepare a little error handling logic / hackery -- */
1296
#define ERR_WRAP(mac_err) do { result = mac_err ; line = __LINE__ ; if ( result != noErr ) goto error ; } while(0)
1297

    
1298
    /* -- if there is input, we have to explicitly enable input -- */
1299
    if( inStreamParams )
1300
    {
1301
       UInt32 enableIO = 1;
1302
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1303
                 kAudioOutputUnitProperty_EnableIO,
1304
                 kAudioUnitScope_Input,
1305
                 INPUT_ELEMENT,
1306
                 &enableIO,
1307
                 sizeof(enableIO) ) );
1308
    }
1309
    /* -- if there is no output, we must explicitly disable output -- */
1310
    if( !outStreamParams )
1311
    {
1312
       UInt32 enableIO = 0;
1313
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1314
                 kAudioOutputUnitProperty_EnableIO,
1315
                 kAudioUnitScope_Output,
1316
                 OUTPUT_ELEMENT,
1317
                 &enableIO,
1318
                 sizeof(enableIO) ) );
1319
    }
1320

    
1321
    /* -- set the devices -- */
1322
    /* make sure input and output are the same device if we are doing input and
1323
       output. */
1324
    if( inStreamParams && outStreamParams )
1325
    {
1326
       assert( outStreamParams->device == inStreamParams->device );
1327
    }
1328
    if( inStreamParams )
1329
    {
1330
       *audioDevice = auhalHostApi->devIds[inStreamParams->device] ;
1331
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1332
                    kAudioOutputUnitProperty_CurrentDevice,
1333
                    kAudioUnitScope_Global,
1334
                    INPUT_ELEMENT,
1335
                    audioDevice,
1336
                    sizeof(AudioDeviceID) ) );
1337
    }
1338
    if( outStreamParams && outStreamParams != inStreamParams )
1339
    {
1340
       *audioDevice = auhalHostApi->devIds[outStreamParams->device] ;
1341
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1342
                    kAudioOutputUnitProperty_CurrentDevice,
1343
                    kAudioUnitScope_Global,
1344
                    OUTPUT_ELEMENT,
1345
                    audioDevice,
1346
                    sizeof(AudioDeviceID) ) );
1347
    }
1348
    /* -- add listener for dropouts -- */
1349
    result = AudioDeviceAddPropertyListener( *audioDevice,
1350
                                             0,
1351
                                             outStreamParams ? false : true,
1352
                                             kAudioDeviceProcessorOverload,
1353
                                             xrunCallback,
1354
                                             addToXRunListenerList( (void *)stream ) ) ;
1355
    if( result == kAudioHardwareIllegalOperationError ) {
1356
       // -- already registered, we're good
1357
    } else {
1358
       // -- not already registered, just check for errors
1359
       ERR_WRAP( result );
1360
    }
1361
    /* -- listen for stream start and stop -- */
1362
    ERR_WRAP( AudioUnitAddPropertyListener( *audioUnit,
1363
                                            kAudioOutputUnitProperty_IsRunning,
1364
                                            startStopCallback,
1365
                                            (void *)stream ) );
1366

    
1367
    /* -- set format -- */
1368
    bzero( &desiredFormat, sizeof(desiredFormat) );
1369
    desiredFormat.mFormatID         = kAudioFormatLinearPCM ;
1370
    desiredFormat.mFormatFlags      = kAudioFormatFlagsNativeFloatPacked;
1371
    desiredFormat.mFramesPerPacket  = 1;
1372
    desiredFormat.mBitsPerChannel   = sizeof( float ) * 8;
1373

    
1374
    result = 0;
1375
    /*  set device format first, but only touch the device if the user asked */
1376
    if( inStreamParams ) {
1377
       /*The callback never calls back if we don't set the FPB */
1378
       /*This seems wierd, because I would think setting anything on the device
1379
         would be disruptive.*/
1380
       paResult = setBestFramesPerBuffer( *audioDevice, FALSE,
1381
                                          requestedFramesPerBuffer,
1382
                                          actualInputFramesPerBuffer );
1383
       if( paResult ) goto error;
1384
       if( macInputStreamFlags & paMacCoreChangeDeviceParameters ) {
1385
          bool requireExact;
1386
          requireExact=macInputStreamFlags & paMacCoreFailIfConversionRequired;
1387
          paResult = setBestSampleRateForDevice( *audioDevice, FALSE,
1388
                                                 requireExact, sampleRate );
1389
          if( paResult ) goto error;
1390
       }
1391
       if( actualInputFramesPerBuffer && actualOutputFramesPerBuffer )
1392
          *actualOutputFramesPerBuffer = *actualInputFramesPerBuffer ;
1393
    }
1394
    if( outStreamParams && !inStreamParams ) {
1395
       /*The callback never calls back if we don't set the FPB */
1396
       /*This seems wierd, because I would think setting anything on the device
1397
         would be disruptive.*/
1398
       paResult = setBestFramesPerBuffer( *audioDevice, TRUE,
1399
                                          requestedFramesPerBuffer,
1400
                                          actualOutputFramesPerBuffer );
1401
       if( paResult ) goto error;
1402
       if( macOutputStreamFlags & paMacCoreChangeDeviceParameters ) {
1403
          bool requireExact;
1404
          requireExact=macOutputStreamFlags & paMacCoreFailIfConversionRequired;
1405
          paResult = setBestSampleRateForDevice( *audioDevice, TRUE,
1406
                                                 requireExact, sampleRate );
1407
          if( paResult ) goto error;
1408
       }
1409
    }
1410

    
1411
    /* -- set the quality of the output converter -- */
1412
    if( outStreamParams ) {
1413
       UInt32 value = kAudioConverterQuality_Max;
1414
       switch( macOutputStreamFlags & 0x0700 ) {
1415
       case 0x0100: /*paMacCore_ConversionQualityMin:*/
1416
          value=kRenderQuality_Min;
1417
          break;
1418
       case 0x0200: /*paMacCore_ConversionQualityLow:*/
1419
          value=kRenderQuality_Low;
1420
          break;
1421
       case 0x0300: /*paMacCore_ConversionQualityMedium:*/
1422
          value=kRenderQuality_Medium;
1423
          break;
1424
       case 0x0400: /*paMacCore_ConversionQualityHigh:*/
1425
          value=kRenderQuality_High;
1426
          break;
1427
       }
1428
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1429
                    kAudioUnitProperty_RenderQuality,
1430
                    kAudioUnitScope_Global,
1431
                    OUTPUT_ELEMENT,
1432
                    &value,
1433
                    sizeof(value) ) );
1434
    }
1435
    /* now set the format on the Audio Units. */
1436
    if( outStreamParams )
1437
    {
1438
       desiredFormat.mSampleRate    =sampleRate;
1439
       desiredFormat.mBytesPerPacket=sizeof(float)*outStreamParams->channelCount;
1440
       desiredFormat.mBytesPerFrame =sizeof(float)*outStreamParams->channelCount;
1441
       desiredFormat.mChannelsPerFrame = outStreamParams->channelCount;
1442
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1443
                            kAudioUnitProperty_StreamFormat,
1444
                            kAudioUnitScope_Input,
1445
                            OUTPUT_ELEMENT,
1446
                            &desiredFormat,
1447
                            sizeof(AudioStreamBasicDescription) ) );
1448
    }
1449
    if( inStreamParams )
1450
    {
1451
       AudioStreamBasicDescription sourceFormat;
1452
       UInt32 size = sizeof( AudioStreamBasicDescription );
1453

    
1454
       /* keep the sample rate of the device, or we confuse AUHAL */
1455
       ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1456
                            kAudioUnitProperty_StreamFormat,
1457
                            kAudioUnitScope_Input,
1458
                            INPUT_ELEMENT,
1459
                            &sourceFormat,
1460
                            &size ) );
1461
       desiredFormat.mSampleRate = sourceFormat.mSampleRate;
1462
       desiredFormat.mBytesPerPacket=sizeof(float)*inStreamParams->channelCount;
1463
       desiredFormat.mBytesPerFrame =sizeof(float)*inStreamParams->channelCount;
1464
       desiredFormat.mChannelsPerFrame = inStreamParams->channelCount;
1465
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1466
                            kAudioUnitProperty_StreamFormat,
1467
                            kAudioUnitScope_Output,
1468
                            INPUT_ELEMENT,
1469
                            &desiredFormat,
1470
                            sizeof(AudioStreamBasicDescription) ) );
1471
    }
1472
    /* set the maximumFramesPerSlice */
1473
    /* not doing this causes real problems
1474
       (eg. the callback might not be called). The idea of setting both this
1475
       and the frames per buffer on the device is that we'll be most likely
1476
       to actually get the frame size we requested in the callback with the
1477
       minimum latency. */
1478
    if( outStreamParams ) {
1479
       UInt32 size = sizeof( *actualOutputFramesPerBuffer );
1480
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1481
                            kAudioUnitProperty_MaximumFramesPerSlice,
1482
                            kAudioUnitScope_Input,
1483
                            OUTPUT_ELEMENT,
1484
                            actualOutputFramesPerBuffer,
1485
                            sizeof(*actualOutputFramesPerBuffer) ) );
1486
       ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1487
                            kAudioUnitProperty_MaximumFramesPerSlice,
1488
                            kAudioUnitScope_Global,
1489
                            OUTPUT_ELEMENT,
1490
                            actualOutputFramesPerBuffer,
1491
                            &size ) );
1492
    }
1493
    if( inStreamParams ) {
1494
       /*UInt32 size = sizeof( *actualInputFramesPerBuffer );*/
1495
       ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1496
                            kAudioUnitProperty_MaximumFramesPerSlice,
1497
                            kAudioUnitScope_Output,
1498
                            INPUT_ELEMENT,
1499
                            actualInputFramesPerBuffer,
1500
                            sizeof(*actualInputFramesPerBuffer) ) );
1501
/* Don't know why this causes problems
1502
       ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1503
                            kAudioUnitProperty_MaximumFramesPerSlice,
1504
                            kAudioUnitScope_Global, //Output,
1505
                            INPUT_ELEMENT,
1506
                            actualInputFramesPerBuffer,
1507
                            &size ) );
1508
*/
1509
    }
1510

    
1511
    /* -- if we have input, we may need to setup an SR converter -- */
1512
    /* even if we got the sample rate we asked for, we need to do
1513
       the conversion in case another program changes the underlying SR. */
1514
    /* FIXME: I think we need to monitor stream and change the converter if the incoming format changes. */
1515
    if( inStreamParams ) {
1516
       AudioStreamBasicDescription desiredFormat;
1517
       AudioStreamBasicDescription sourceFormat;
1518
       UInt32 sourceSize = sizeof( sourceFormat );
1519
       bzero( &desiredFormat, sizeof(desiredFormat) );
1520
       desiredFormat.mSampleRate       = sampleRate;
1521
       desiredFormat.mFormatID         = kAudioFormatLinearPCM ;
1522
       desiredFormat.mFormatFlags      = kAudioFormatFlagsNativeFloatPacked;
1523
       desiredFormat.mFramesPerPacket  = 1;
1524
       desiredFormat.mBitsPerChannel   = sizeof( float ) * 8;
1525
       desiredFormat.mBytesPerPacket=sizeof(float)*inStreamParams->channelCount;
1526
       desiredFormat.mBytesPerFrame =sizeof(float)*inStreamParams->channelCount;
1527
       desiredFormat.mChannelsPerFrame = inStreamParams->channelCount;
1528

    
1529
       /* get the source format */
1530
       ERR_WRAP( AudioUnitGetProperty(
1531
                         *audioUnit,
1532
                         kAudioUnitProperty_StreamFormat,
1533
                         kAudioUnitScope_Output,
1534
                         INPUT_ELEMENT,
1535
                         &sourceFormat,
1536
                         &sourceSize ) );
1537

    
1538
       if( desiredFormat.mSampleRate != sourceFormat.mSampleRate )
1539
       {
1540
          UInt32 value = kAudioConverterQuality_Max;
1541
          switch( macInputStreamFlags & 0x0700 ) {
1542
          case 0x0100: /*paMacCore_ConversionQualityMin:*/
1543
             value=kAudioConverterQuality_Min;
1544
             break;
1545
          case 0x0200: /*paMacCore_ConversionQualityLow:*/
1546
             value=kAudioConverterQuality_Low;
1547
             break;
1548
          case 0x0300: /*paMacCore_ConversionQualityMedium:*/
1549
             value=kAudioConverterQuality_Medium;
1550
             break;
1551
          case 0x0400: /*paMacCore_ConversionQualityHigh:*/
1552
             value=kAudioConverterQuality_High;
1553
             break;
1554
          }
1555
          VDBUG(( "Creating sample rate converter for input"
1556
                  " to convert from %g to %g\n",
1557
                  (float)sourceFormat.mSampleRate,
1558
                  (float)desiredFormat.mSampleRate ) );
1559
          /* create our converter */
1560
          ERR_WRAP( AudioConverterNew( 
1561
                             &sourceFormat,
1562
                             &desiredFormat,
1563
                             srConverter ) );
1564
          /* Set quality */
1565
          ERR_WRAP( AudioConverterSetProperty(
1566
                             *srConverter,
1567
                             kAudioConverterSampleRateConverterQuality,
1568
                             sizeof( value ),
1569
                             &value ) );
1570
       }
1571
    }
1572
    /* -- set IOProc (callback) -- */
1573
    callbackKey = outStreamParams ? kAudioUnitProperty_SetRenderCallback
1574
                                  : kAudioOutputUnitProperty_SetInputCallback ;
1575
    rcbs.inputProc = AudioIOProc;
1576
    rcbs.inputProcRefCon = refCon;
1577
    ERR_WRAP( AudioUnitSetProperty(
1578
                               *audioUnit,
1579
                               callbackKey,
1580
                               kAudioUnitScope_Output,
1581
                               outStreamParams ? OUTPUT_ELEMENT : INPUT_ELEMENT,
1582
                               &rcbs,
1583
                               sizeof(rcbs)) );
1584

    
1585
    if( inStreamParams && outStreamParams && *srConverter )
1586
           ERR_WRAP( AudioUnitSetProperty(
1587
                               *audioUnit,
1588
                               kAudioOutputUnitProperty_SetInputCallback,
1589
                               kAudioUnitScope_Output,
1590
                               INPUT_ELEMENT,
1591
                               &rcbs,
1592
                               sizeof(rcbs)) );
1593

    
1594
    /* channel mapping. */
1595
    if(inChannelMap)
1596
    {
1597
        UInt32 mapSize = inChannelMapSize *sizeof(SInt32);
1598

    
1599
        //for each channel of desired input, map the channel from
1600
        //the device's output channel.
1601
        ERR_WRAP( AudioUnitSetProperty(*audioUnit,
1602
                                kAudioOutputUnitProperty_ChannelMap,
1603
                                kAudioUnitScope_Output,
1604
                                INPUT_ELEMENT,
1605
                                inChannelMap,
1606
                                mapSize));
1607
    }
1608
    if(outChannelMap)
1609
    {
1610
        UInt32 mapSize = outChannelMapSize *sizeof(SInt32);
1611

    
1612
        //for each channel of desired output, map the channel from
1613
        //the device's output channel.
1614
        ERR_WRAP(AudioUnitSetProperty(*audioUnit,
1615
                                kAudioOutputUnitProperty_ChannelMap,
1616
                                kAudioUnitScope_Output,
1617
                                OUTPUT_ELEMENT,
1618
                                outChannelMap,
1619
                                mapSize));
1620
    }
1621
    /* initialize the audio unit */
1622
    ERR_WRAP( AudioUnitInitialize(*audioUnit) );
1623

    
1624
    if( inStreamParams && outStreamParams )
1625
    {
1626
        VDBUG( ("Opened device %ld for input and output.\n", *audioDevice ) );
1627
    }
1628
    else if( inStreamParams )
1629
    {
1630
        VDBUG( ("Opened device %ld for input.\n", *audioDevice ) );
1631
    }
1632
    else if( outStreamParams )
1633
    {
1634
        VDBUG( ("Opened device %ld for output.\n", *audioDevice ) );
1635
    }
1636
    return paNoError;
1637
#undef ERR_WRAP
1638

    
1639
    error:
1640
#ifndef AUDIO_COMPONENT_FIX
1641
       CloseComponent( *audioUnit );
1642
#else
1643
       AudioComponentInstanceDispose( *audioUnit );
1644
#endif
1645
       *audioUnit = NULL;
1646
       if( result )
1647
          return PaMacCore_SetError( result, line, 1 );
1648
       return paResult;
1649
}
1650

    
1651
/* =================================================================================================== */
1652

    
1653
static UInt32 CalculateOptimalBufferSize( PaMacAUHAL *auhalHostApi,
1654
                                  const PaStreamParameters *inputParameters,
1655
                                  const PaStreamParameters *outputParameters,
1656
                                  UInt32 fixedInputLatency,
1657
                                  UInt32 fixedOutputLatency,
1658
                                  double sampleRate,
1659
                                  UInt32 requestedFramesPerBuffer )
1660
{
1661
    UInt32 resultBufferSizeFrames = 0;  
1662
    // Use maximum of suggested input and output latencies.
1663
    if( inputParameters )
1664
    {
1665
        UInt32 suggestedLatencyFrames = inputParameters->suggestedLatency * sampleRate;
1666
        // Calculate a buffer size assuming we are double buffered.
1667
        SInt32 variableLatencyFrames = suggestedLatencyFrames - fixedInputLatency;
1668
        // Prevent negative latency.
1669
        variableLatencyFrames = MAX( variableLatencyFrames, 0 );       
1670
        resultBufferSizeFrames = MAX( resultBufferSizeFrames, (UInt32) variableLatencyFrames );
1671
    }
1672
    if( outputParameters )
1673
    {        
1674
        UInt32 suggestedLatencyFrames = outputParameters->suggestedLatency * sampleRate;
1675
        SInt32 variableLatencyFrames = suggestedLatencyFrames - fixedOutputLatency;
1676
        variableLatencyFrames = MAX( variableLatencyFrames, 0 );
1677
        resultBufferSizeFrames = MAX( resultBufferSizeFrames, (UInt32) variableLatencyFrames );
1678
    }
1679
    
1680
    // can't have zero frames. code to round up to next user buffer requires non-zero
1681
    resultBufferSizeFrames = MAX( resultBufferSizeFrames, 1 );
1682
    
1683
    if( requestedFramesPerBuffer != paFramesPerBufferUnspecified )
1684
    {
1685
        // make host buffer the next highest integer multiple of user frames per buffer
1686
        UInt32 n = (resultBufferSizeFrames + requestedFramesPerBuffer - 1) / requestedFramesPerBuffer;
1687
        resultBufferSizeFrames = n * requestedFramesPerBuffer;
1688

    
1689
        
1690
        // FIXME: really we should be searching for a multiple of requestedFramesPerBuffer
1691
        // that is >= suggested latency and also fits within device buffer min/max
1692
        
1693
    }else{
1694
            VDBUG( ("Block Size unspecified. Based on Latency, the user wants a Block Size near: %ld.\n",
1695
            resultBufferSizeFrames ) );
1696
    }
1697
    
1698
    // Clip to the capabilities of the device.
1699
    if( inputParameters )
1700
    {
1701
        ClipToDeviceBufferSize( auhalHostApi->devIds[inputParameters->device],
1702
                               true, // In the old code isInput was false!
1703
                               resultBufferSizeFrames, &resultBufferSizeFrames );
1704
    }
1705
    if( outputParameters )
1706
    {
1707
        ClipToDeviceBufferSize( auhalHostApi->devIds[outputParameters->device],
1708
                               false, resultBufferSizeFrames, &resultBufferSizeFrames );
1709
    }
1710
    VDBUG(("After querying hardware, setting block size to %ld.\n", resultBufferSizeFrames));
1711

    
1712
    return resultBufferSizeFrames;
1713
}
1714

    
1715
/* =================================================================================================== */
1716
/* see pa_hostapi.h for a list of validity guarantees made about OpenStream parameters */
1717
static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi,
1718
                           PaStream** s,
1719
                           const PaStreamParameters *inputParameters,
1720
                           const PaStreamParameters *outputParameters,
1721
                           double sampleRate,
1722
                           unsigned long requestedFramesPerBuffer,
1723
                           PaStreamFlags streamFlags,
1724
                           PaStreamCallback *streamCallback,
1725
                           void *userData )
1726
{
1727
    PaError result = paNoError;
1728
    PaMacAUHAL *auhalHostApi = (PaMacAUHAL*)hostApi;
1729
    PaMacCoreStream *stream = 0;
1730
    int inputChannelCount, outputChannelCount;
1731
    PaSampleFormat inputSampleFormat, outputSampleFormat;
1732
    PaSampleFormat hostInputSampleFormat, hostOutputSampleFormat;
1733
    UInt32 fixedInputLatency = 0;
1734
    UInt32 fixedOutputLatency = 0;
1735
    // Accumulate contributions to latency in these variables.
1736
    UInt32 inputLatencyFrames = 0;
1737
    UInt32 outputLatencyFrames = 0;
1738
    UInt32 suggestedLatencyFramesPerBuffer = requestedFramesPerBuffer;
1739
    
1740
    VVDBUG(("OpenStream(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld SR=%g, FPB=%ld\n",
1741
                inputParameters  ? inputParameters->channelCount  : -1,
1742
                inputParameters  ? inputParameters->sampleFormat  : -1,
1743
                outputParameters ? outputParameters->channelCount : -1,
1744
                outputParameters ? outputParameters->sampleFormat : -1,
1745
                (float) sampleRate,
1746
                requestedFramesPerBuffer ));
1747
    VDBUG( ("Opening Stream.\n") );
1748
        
1749
    /* These first few bits of code are from paSkeleton with few modifications. */
1750
    if( inputParameters )
1751
    {
1752
        inputChannelCount = inputParameters->channelCount;
1753
        inputSampleFormat = inputParameters->sampleFormat;
1754

    
1755
                /* @todo Blocking read/write on Mac is not yet supported. */
1756
                if( !streamCallback && inputSampleFormat & paNonInterleaved )
1757
                {
1758
                        return paSampleFormatNotSupported;
1759
                }
1760
                
1761
        /* unless alternate device specification is supported, reject the use of
1762
            paUseHostApiSpecificDeviceSpecification */
1763

    
1764
        if( inputParameters->device == paUseHostApiSpecificDeviceSpecification )
1765
            return paInvalidDevice;
1766

    
1767
        /* check that input device can support inputChannelCount */
1768
        if( inputChannelCount > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels )
1769
            return paInvalidChannelCount;
1770

    
1771
        /* Host supports interleaved float32 */
1772
        hostInputSampleFormat = paFloat32;
1773
    }
1774
    else
1775
    {
1776
        inputChannelCount = 0;
1777
        inputSampleFormat = hostInputSampleFormat = paFloat32; /* Surpress 'uninitialised var' warnings. */
1778
    }
1779

    
1780
    if( outputParameters )
1781
    {
1782
        outputChannelCount = outputParameters->channelCount;
1783
        outputSampleFormat = outputParameters->sampleFormat;
1784
        
1785
                /* @todo Blocking read/write on Mac is not yet supported. */
1786
                if( !streamCallback && outputSampleFormat & paNonInterleaved )
1787
                {
1788
                        return paSampleFormatNotSupported;
1789
                }
1790
                
1791
        /* unless alternate device specification is supported, reject the use of
1792
            paUseHostApiSpecificDeviceSpecification */
1793

    
1794
        if( outputParameters->device == paUseHostApiSpecificDeviceSpecification )
1795
            return paInvalidDevice;
1796

    
1797
        /* check that output device can support inputChannelCount */
1798
        if( outputChannelCount > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels )
1799
            return paInvalidChannelCount;
1800

    
1801
        /* Host supports interleaved float32 */
1802
        hostOutputSampleFormat = paFloat32;
1803
    }
1804
    else
1805
    {
1806
        outputChannelCount = 0;
1807
        outputSampleFormat = hostOutputSampleFormat = paFloat32; /* Surpress 'uninitialized var' warnings. */
1808
    }
1809

    
1810
    /* validate platform specific flags */
1811
    if( (streamFlags & paPlatformSpecificFlags) != 0 )
1812
        return paInvalidFlag; /* unexpected platform specific flag */
1813

    
1814
    stream = (PaMacCoreStream*)PaUtil_AllocateMemory( sizeof(PaMacCoreStream) );
1815
    if( !stream )
1816
    {
1817
        result = paInsufficientMemory;
1818
        goto error;
1819
    }
1820

    
1821
    /* If we fail after this point, we my be left in a bad state, with
1822
       some data structures setup and others not. So, first thing we
1823
       do is initialize everything so that if we fail, we know what hasn't
1824
       been touched.
1825
     */
1826
    bzero( stream, sizeof( PaMacCoreStream ) );
1827
    
1828
    /*
1829
    stream->blio.inputRingBuffer.buffer = NULL;
1830
    stream->blio.outputRingBuffer.buffer = NULL;
1831
    stream->blio.inputSampleFormat = inputParameters?inputParameters->sampleFormat:0;
1832
    stream->blio.inputSampleSize = computeSampleSizeFromFormat(stream->blio.inputSampleFormat);
1833
    stream->blio.outputSampleFormat=outputParameters?outputParameters->sampleFormat:0;
1834
    stream->blio.outputSampleSize = computeSampleSizeFromFormat(stream->blio.outputSampleFormat);
1835
    */
1836

    
1837
    /* assert( streamCallback ) ; */ /* only callback mode is implemented */
1838
    if( streamCallback )
1839
    {
1840
        PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation,
1841
                                        &auhalHostApi->callbackStreamInterface,
1842
                                        streamCallback, userData );
1843
    }
1844
    else
1845
    {
1846
        PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation,
1847
                                        &auhalHostApi->blockingStreamInterface,
1848
                                        BlioCallback, &stream->blio );
1849
    }
1850

    
1851
    PaUtil_InitializeCpuLoadMeasurer( &stream->cpuLoadMeasurer, sampleRate );
1852

    
1853
    
1854
    if( inputParameters )
1855
    {
1856
        CalculateFixedDeviceLatency( auhalHostApi->devIds[inputParameters->device], true, &fixedInputLatency );
1857
        inputLatencyFrames += fixedInputLatency;
1858
    }
1859
    if( outputParameters )
1860
    {        
1861
        CalculateFixedDeviceLatency( auhalHostApi->devIds[outputParameters->device], false, &fixedOutputLatency );
1862
        outputLatencyFrames += fixedOutputLatency;
1863

    
1864
    }
1865
    
1866
    suggestedLatencyFramesPerBuffer = CalculateOptimalBufferSize( auhalHostApi, inputParameters, outputParameters,
1867
                                                                 fixedInputLatency, fixedOutputLatency,
1868
                                                                 sampleRate, requestedFramesPerBuffer );
1869
    if( requestedFramesPerBuffer == paFramesPerBufferUnspecified )
1870
        {
1871
        requestedFramesPerBuffer = suggestedLatencyFramesPerBuffer;
1872
    }
1873

    
1874
    /* -- Now we actually open and setup streams. -- */
1875
    if( inputParameters && outputParameters && outputParameters->device == inputParameters->device )
1876
    { /* full duplex. One device. */
1877
       UInt32 inputFramesPerBuffer  = (UInt32) stream->inputFramesPerBuffer;
1878
       UInt32 outputFramesPerBuffer = (UInt32) stream->outputFramesPerBuffer;
1879
       result = OpenAndSetupOneAudioUnit( stream,
1880
                                          inputParameters,
1881
                                          outputParameters,
1882
                                          suggestedLatencyFramesPerBuffer,
1883
                                          &inputFramesPerBuffer,
1884
                                          &outputFramesPerBuffer,
1885
                                          auhalHostApi,
1886
                                          &(stream->inputUnit),
1887
                                          &(stream->inputSRConverter),
1888
                                          &(stream->inputDevice),
1889
                                          sampleRate,
1890
                                          stream );
1891
       stream->inputFramesPerBuffer = inputFramesPerBuffer;
1892
       stream->outputFramesPerBuffer = outputFramesPerBuffer;
1893
       stream->outputUnit = stream->inputUnit;
1894
       stream->outputDevice = stream->inputDevice;
1895
       if( result != paNoError )
1896
           goto error;
1897
    }
1898
    else
1899
    { /* full duplex, different devices OR simplex */
1900
       UInt32 outputFramesPerBuffer = (UInt32) stream->outputFramesPerBuffer;
1901
       UInt32 inputFramesPerBuffer  = (UInt32) stream->inputFramesPerBuffer;
1902
       result = OpenAndSetupOneAudioUnit( stream,
1903
                                          NULL,
1904
                                          outputParameters,
1905
                                          suggestedLatencyFramesPerBuffer,
1906
                                          NULL,
1907
                                          &outputFramesPerBuffer,
1908
                                          auhalHostApi,
1909
                                          &(stream->outputUnit),
1910
                                          NULL,
1911
                                          &(stream->outputDevice),
1912
                                          sampleRate,
1913
                                          stream );
1914
       if( result != paNoError )
1915
           goto error;
1916
       result = OpenAndSetupOneAudioUnit( stream,
1917
                                          inputParameters,
1918
                                          NULL,
1919
                                          suggestedLatencyFramesPerBuffer,
1920
                                          &inputFramesPerBuffer,
1921
                                          NULL,
1922
                                          auhalHostApi,
1923
                                          &(stream->inputUnit),
1924
                                          &(stream->inputSRConverter),
1925
                                          &(stream->inputDevice),
1926
                                          sampleRate,
1927
                                          stream );
1928
       if( result != paNoError )
1929
           goto error;
1930
       stream->inputFramesPerBuffer = inputFramesPerBuffer;
1931
       stream->outputFramesPerBuffer = outputFramesPerBuffer;
1932
    }
1933
    
1934
    inputLatencyFrames += stream->inputFramesPerBuffer;
1935
    outputLatencyFrames += stream->outputFramesPerBuffer;
1936
    
1937
    if( stream->inputUnit ) {
1938
       const size_t szfl = sizeof(float);
1939
       /* setup the AudioBufferList used for input */
1940
       bzero( &stream->inputAudioBufferList, sizeof( AudioBufferList ) );
1941
       stream->inputAudioBufferList.mNumberBuffers = 1;
1942
       stream->inputAudioBufferList.mBuffers[0].mNumberChannels
1943
                 = inputChannelCount;
1944
       stream->inputAudioBufferList.mBuffers[0].mDataByteSize
1945
                 = stream->inputFramesPerBuffer*inputChannelCount*szfl;
1946
       stream->inputAudioBufferList.mBuffers[0].mData
1947
                 = (float *) calloc(
1948
                               stream->inputFramesPerBuffer*inputChannelCount,
1949
                               szfl );
1950
       if( !stream->inputAudioBufferList.mBuffers[0].mData )
1951
       {
1952
          result = paInsufficientMemory;
1953
          goto error;
1954
       }
1955
        
1956
       /*
1957
        * If input and output devs are different or we are doing SR conversion,
1958
        * we also need a ring buffer to store input data while waiting for
1959
        * output data.
1960
        */
1961
       if( (stream->outputUnit && (stream->inputUnit != stream->outputUnit))
1962
           || stream->inputSRConverter )
1963
       {
1964
          /* May want the ringSize or initial position in
1965
             ring buffer to depend somewhat on sample rate change */
1966

    
1967
          void *data;
1968
          long ringSize;
1969

    
1970
          ringSize = computeRingBufferSize( inputParameters,
1971
                                            outputParameters,
1972
                                            stream->inputFramesPerBuffer,
1973
                                            stream->outputFramesPerBuffer,
1974
                                            sampleRate );
1975
          /*ringSize <<= 4; *//*16x bigger, for testing */
1976

    
1977

    
1978
          /*now, we need to allocate memory for the ring buffer*/
1979
          data = calloc( ringSize, szfl*inputParameters->channelCount );
1980
          if( !data )
1981
          {
1982
             result = paInsufficientMemory;
1983
             goto error;
1984
          }
1985

    
1986
          /* now we can initialize the ring buffer */
1987
          result = PaUtil_InitializeRingBuffer( &stream->inputRingBuffer, szfl*inputParameters->channelCount, ringSize, data );
1988
          if( result != 0 )
1989
          {
1990
              /* The only reason this should fail is if ringSize is not a power of 2, which we do not anticipate happening. */
1991
              result = paUnanticipatedHostError;
1992
              free(data);
1993
              goto error;
1994
          }
1995

    
1996
          /* advance the read point a little, so we are reading from the
1997
             middle of the buffer */
1998
          if( stream->outputUnit )
1999
             PaUtil_AdvanceRingBufferWriteIndex( &stream->inputRingBuffer, ringSize / RING_BUFFER_ADVANCE_DENOMINATOR );
2000
           
2001
           // Just adds to input latency between input device and PA full duplex callback.
2002
           inputLatencyFrames += ringSize;
2003
       }
2004
    }
2005

    
2006
    /* -- initialize Blio Buffer Processors -- */
2007
    if( !streamCallback )
2008
    {
2009
       long ringSize;
2010

    
2011
       ringSize = computeRingBufferSize( inputParameters,
2012
                                         outputParameters,
2013
                                         stream->inputFramesPerBuffer,
2014
                                         stream->outputFramesPerBuffer,
2015
                                         sampleRate );
2016
       result = initializeBlioRingBuffers( &stream->blio,
2017
              inputParameters ? inputParameters->sampleFormat : 0,
2018
              outputParameters ? outputParameters->sampleFormat : 0,
2019
              ringSize,
2020
              inputParameters ? inputChannelCount : 0,
2021
              outputParameters ? outputChannelCount : 0 ) ;
2022
       if( result != paNoError )
2023
          goto error;
2024
        
2025
        inputLatencyFrames += ringSize;
2026
        outputLatencyFrames += ringSize;
2027
        
2028
    }
2029

    
2030
    /* -- initialize Buffer Processor -- */
2031
    {
2032
       unsigned long maxHostFrames = stream->inputFramesPerBuffer;
2033
       if( stream->outputFramesPerBuffer > maxHostFrames )
2034
          maxHostFrames = stream->outputFramesPerBuffer;
2035
       result = PaUtil_InitializeBufferProcessor( &stream->bufferProcessor,
2036
                 inputChannelCount, inputSampleFormat,
2037
                 hostInputSampleFormat,
2038
                 outputChannelCount, outputSampleFormat,
2039
                 hostOutputSampleFormat,
2040
                 sampleRate,
2041
                 streamFlags,
2042
                 requestedFramesPerBuffer,
2043
                 /* If sample rate conversion takes place, the buffer size
2044
                    will not be known. */
2045
                 maxHostFrames,
2046
                 stream->inputSRConverter
2047
                              ? paUtilUnknownHostBufferSize
2048
                              : paUtilBoundedHostBufferSize,
2049
                 streamCallback ? streamCallback : BlioCallback,
2050
                 streamCallback ? userData : &stream->blio );
2051
       if( result != paNoError )
2052
           goto error;
2053
    }
2054
    stream->bufferProcessorIsInitialized = TRUE;
2055

    
2056
    // Calculate actual latency from the sum of individual latencies.
2057
    if( inputParameters ) 
2058
    {
2059
        inputLatencyFrames += PaUtil_GetBufferProcessorInputLatencyFrames(&stream->bufferProcessor);
2060
        stream->streamRepresentation.streamInfo.inputLatency = inputLatencyFrames / sampleRate;
2061
    }
2062
    else
2063
    {
2064
        stream->streamRepresentation.streamInfo.inputLatency = 0.0;
2065
    }
2066
    
2067
    if( outputParameters ) 
2068
    {
2069
        outputLatencyFrames += PaUtil_GetBufferProcessorOutputLatencyFrames(&stream->bufferProcessor);
2070
        stream->streamRepresentation.streamInfo.outputLatency = outputLatencyFrames / sampleRate;
2071
    }
2072
    else
2073
    {
2074
        stream->streamRepresentation.streamInfo.outputLatency = 0.0;
2075
    }
2076
    
2077
    stream->streamRepresentation.streamInfo.sampleRate = sampleRate;
2078

    
2079
    stream->sampleRate = sampleRate;
2080
    
2081
    stream->userInChan  = inputChannelCount;
2082
    stream->userOutChan = outputChannelCount;
2083

    
2084
    // Setup property listeners for timestamp and latency calculations.
2085
        pthread_mutex_init( &stream->timingInformationMutex, NULL );
2086
        stream->timingInformationMutexIsInitialized = 1;
2087
    InitializeDeviceProperties( &stream->inputProperties );     // zeros the struct. doesn't actually init it to useful values
2088
    InitializeDeviceProperties( &stream->outputProperties );    // zeros the struct. doesn't actually init it to useful values
2089
        if( stream->outputUnit )
2090
    {
2091
        Boolean isInput = FALSE;
2092
        
2093
        // Start with the current values for the device properties.
2094
        // Init with nominal sample rate. Use actual sample rate where available
2095
        
2096
        result = ERR( UpdateSampleRateFromDeviceProperty( 
2097
                stream, stream->outputDevice, isInput, kAudioDevicePropertyNominalSampleRate )  );
2098
        if( result )
2099
            goto error; /* fail if we can't even get a nominal device sample rate */
2100
        
2101
        UpdateSampleRateFromDeviceProperty( stream, stream->outputDevice, isInput, kAudioDevicePropertyActualSampleRate );
2102
        
2103
        SetupDevicePropertyListeners( stream, stream->outputDevice, isInput );
2104
    }
2105
        if( stream->inputUnit )
2106
    {
2107
        Boolean isInput = TRUE;
2108
       
2109
        // as above
2110
        result = ERR( UpdateSampleRateFromDeviceProperty( 
2111
                stream, stream->inputDevice, isInput, kAudioDevicePropertyNominalSampleRate )  );
2112
        if( result )
2113
            goto error;
2114
        
2115
        UpdateSampleRateFromDeviceProperty( stream, stream->inputDevice, isInput, kAudioDevicePropertyActualSampleRate );
2116
        
2117
        SetupDevicePropertyListeners( stream, stream->inputDevice, isInput );
2118
        }
2119
    UpdateTimeStampOffsets( stream );
2120
    // Setup timestamp copies to be used by audio callback.
2121
    stream->timestampOffsetCombined_ioProcCopy = stream->timestampOffsetCombined;
2122
    stream->timestampOffsetInputDevice_ioProcCopy = stream->timestampOffsetInputDevice;
2123
    stream->timestampOffsetOutputDevice_ioProcCopy = stream->timestampOffsetOutputDevice;
2124

    
2125
    stream->state = STOPPED;
2126
    stream->xrunFlags = 0;
2127

    
2128
    *s = (PaStream*)stream;
2129

    
2130
    return result;
2131

    
2132
error:
2133
    CloseStream( stream );
2134
    return result;
2135
}
2136

    
2137

    
2138
#define HOST_TIME_TO_PA_TIME( x ) ( AudioConvertHostTimeToNanos( (x) ) * 1.0E-09) /* convert to nanoseconds and then to seconds */
2139

    
2140
PaTime GetStreamTime( PaStream *s )
2141
{
2142
        return HOST_TIME_TO_PA_TIME( AudioGetCurrentHostTime() ); 
2143
}
2144

    
2145
#define RING_BUFFER_EMPTY (1000)
2146

    
2147
static OSStatus ringBufferIOProc( AudioConverterRef inAudioConverter, 
2148
                             UInt32*ioDataSize, 
2149
                             void** outData, 
2150
                             void*inUserData )
2151
{
2152
   void *dummyData;
2153
   ring_buffer_size_t dummySize;
2154
   PaUtilRingBuffer *rb = (PaUtilRingBuffer *) inUserData;
2155

    
2156
   VVDBUG(("ringBufferIOProc()\n"));
2157

    
2158
   if( PaUtil_GetRingBufferReadAvailable( rb ) == 0 ) {
2159
      *outData = NULL;
2160
      *ioDataSize = 0;
2161
      return RING_BUFFER_EMPTY;
2162
   }
2163
   assert(sizeof(UInt32) == sizeof(ring_buffer_size_t));
2164
   assert( ( (*ioDataSize) / rb->elementSizeBytes ) * rb->elementSizeBytes == (*ioDataSize) ) ;
2165
   (*ioDataSize) /= rb->elementSizeBytes ;
2166
   PaUtil_GetRingBufferReadRegions( rb, *ioDataSize,
2167
                                    outData, (ring_buffer_size_t *)ioDataSize, 
2168
                                    &dummyData, &dummySize );
2169
   assert( *ioDataSize );
2170
   PaUtil_AdvanceRingBufferReadIndex( rb, *ioDataSize );
2171
   (*ioDataSize) *= rb->elementSizeBytes ;
2172

    
2173
   return noErr;
2174
}
2175

    
2176
/*
2177
 * Called by the AudioUnit API to process audio from the sound card.
2178
 * This is where the magic happens.
2179
 */
2180
/* FEEDBACK: there is a lot of redundant code here because of how all the cases differ. This makes it hard to maintain, so if there are suggestinos for cleaning it up, I'm all ears. */
2181
static OSStatus AudioIOProc( void *inRefCon,
2182
                               AudioUnitRenderActionFlags *ioActionFlags,
2183
                               const AudioTimeStamp *inTimeStamp,
2184
                               UInt32 inBusNumber,
2185
                               UInt32 inNumberFrames,
2186
                               AudioBufferList *ioData )
2187
{
2188
   unsigned long framesProcessed     = 0;
2189
   PaStreamCallbackTimeInfo timeInfo = {0,0,0};
2190
   PaMacCoreStream *stream           = (PaMacCoreStream*)inRefCon;
2191
   const bool isRender               = inBusNumber == OUTPUT_ELEMENT;
2192
   int callbackResult                = paContinue ;
2193
   double hostTimeStampInPaTime      = HOST_TIME_TO_PA_TIME(inTimeStamp->mHostTime);
2194
    
2195
   VVDBUG(("AudioIOProc()\n"));
2196

    
2197
   PaUtil_BeginCpuLoadMeasurement( &stream->cpuLoadMeasurer );
2198
    
2199
   /* -----------------------------------------------------------------*\
2200
      This output may be useful for debugging,
2201
      But printing durring the callback is a bad enough idea that
2202
      this is not enabled by enableing the usual debugging calls.
2203
   \* -----------------------------------------------------------------*/
2204
   /*
2205
   static int renderCount = 0;
2206
   static int inputCount = 0;
2207
   printf( "-------------------  starting reder/input\n" );
2208
   if( isRender )
2209
      printf("Render callback (%d):\t", ++renderCount);
2210
   else
2211
      printf("Input callback  (%d):\t", ++inputCount);
2212
   printf( "Call totals: %d (input), %d (render)\n", inputCount, renderCount );
2213

2214
   printf( "--- inBusNumber: %lu\n", inBusNumber );
2215
   printf( "--- inNumberFrames: %lu\n", inNumberFrames );
2216
   printf( "--- %x ioData\n", (unsigned) ioData );
2217
   if( ioData )
2218
   {
2219
      int i=0;
2220
      printf( "--- ioData.mNumBuffers %lu: \n", ioData->mNumberBuffers );
2221
      for( i=0; i<ioData->mNumberBuffers; ++i )
2222
         printf( "--- ioData buffer %d size: %lu.\n", i, ioData->mBuffers[i].mDataByteSize );
2223
   }
2224
      ----------------------------------------------------------------- */
2225

    
2226
        /* compute PaStreamCallbackTimeInfo */
2227
        
2228
        if( pthread_mutex_trylock( &stream->timingInformationMutex ) == 0 ){
2229
                /* snapshot the ioproc copy of timing information */
2230
                stream->timestampOffsetCombined_ioProcCopy = stream->timestampOffsetCombined;
2231
                stream->timestampOffsetInputDevice_ioProcCopy = stream->timestampOffsetInputDevice;
2232
                stream->timestampOffsetOutputDevice_ioProcCopy = stream->timestampOffsetOutputDevice;
2233
                pthread_mutex_unlock( &stream->timingInformationMutex );
2234
        }
2235
        
2236
        /* For timeInfo.currentTime we could calculate current time backwards from the HAL audio 
2237
         output time to give a more accurate impression of the current timeslice but it doesn't 
2238
         seem worth it at the moment since other PA host APIs don't do any better.
2239
         */
2240
        timeInfo.currentTime = HOST_TIME_TO_PA_TIME( AudioGetCurrentHostTime() );
2241
        
2242
        /*
2243
         For an input HAL AU, inTimeStamp is the time the samples are received from the hardware,
2244
         for an output HAL AU inTimeStamp is the time the samples are sent to the hardware. 
2245
         PA expresses timestamps in terms of when the samples enter the ADC or leave the DAC
2246
         so we add or subtract kAudioDevicePropertyLatency below.
2247
         */
2248
        
2249
        /* FIXME: not sure what to do below if the host timestamps aren't valid (kAudioTimeStampHostTimeValid isn't set)
2250
         Could ask on CA mailing list if it is possible for it not to be set. If so, could probably grab a now timestamp
2251
         at the top and compute from there (modulo scheduling jitter) or ask on mailing list for other options. */
2252
        
2253
        if( isRender )
2254
        {
2255
                if( stream->inputUnit ) /* full duplex */
2256
                {
2257
                        if( stream->inputUnit == stream->outputUnit ) /* full duplex AUHAL IOProc */
2258
                        {
2259
                // Ross and Phil agreed that the following calculation is correct based on an email from Jeff Moore:
2260
                // http://osdir.com/ml/coreaudio-api/2009-07/msg00140.html
2261
                // Basically the difference between the Apple output timestamp and the PA timestamp is kAudioDevicePropertyLatency.
2262
                                timeInfo.inputBufferAdcTime = hostTimeStampInPaTime - 
2263
                    (stream->timestampOffsetCombined_ioProcCopy + stream->timestampOffsetInputDevice_ioProcCopy);
2264
                                 timeInfo.outputBufferDacTime = hostTimeStampInPaTime + stream->timestampOffsetOutputDevice_ioProcCopy;
2265
                        }
2266
                        else /* full duplex with ring-buffer from a separate input AUHAL ioproc */
2267
                        {
2268
                                /* FIXME: take the ring buffer latency into account */
2269
                                timeInfo.inputBufferAdcTime = hostTimeStampInPaTime - 
2270
                    (stream->timestampOffsetCombined_ioProcCopy + stream->timestampOffsetInputDevice_ioProcCopy);
2271
                                timeInfo.outputBufferDacTime = hostTimeStampInPaTime + stream->timestampOffsetOutputDevice_ioProcCopy;
2272
                        }
2273
                }
2274
                else /* output only */
2275
                {
2276
                        timeInfo.inputBufferAdcTime = 0;
2277
                        timeInfo.outputBufferDacTime = hostTimeStampInPaTime + stream->timestampOffsetOutputDevice_ioProcCopy;
2278
                }
2279
        }
2280
        else /* input only */
2281
        {
2282
                timeInfo.inputBufferAdcTime = hostTimeStampInPaTime - stream->timestampOffsetInputDevice_ioProcCopy; 
2283
                timeInfo.outputBufferDacTime = 0;
2284
        }
2285
        
2286
   //printf( "---%g, %g, %g\n", timeInfo.inputBufferAdcTime, timeInfo.currentTime, timeInfo.outputBufferDacTime );
2287

    
2288
   if( isRender && stream->inputUnit == stream->outputUnit
2289
                && !stream->inputSRConverter )
2290
   {
2291
      /* --------- Full Duplex, One Device, no SR Conversion -------
2292
       *
2293
       * This is the lowest latency case, and also the simplest.
2294
       * Input data and output data are available at the same time.
2295
       * we do not use the input SR converter or the input ring buffer.
2296
       *
2297
       */
2298
      OSStatus err = 0;
2299
       unsigned long frames;
2300
       long bytesPerFrame = sizeof( float ) * ioData->mBuffers[0].mNumberChannels;
2301

    
2302
      /* -- start processing -- */
2303
      PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2304
                                    &timeInfo,
2305
                                    stream->xrunFlags );
2306
      stream->xrunFlags = 0; //FIXME: this flag also gets set outside by a callback, which calls the xrunCallback function. It should be in the same thread as the main audio callback, but the apple docs just use the word "usually" so it may be possible to loose an xrun notification, if that callback happens here.
2307

    
2308
      /* -- compute frames. do some checks -- */
2309
      assert( ioData->mNumberBuffers == 1 );
2310
      assert( ioData->mBuffers[0].mNumberChannels == stream->userOutChan );
2311

    
2312
      frames = ioData->mBuffers[0].mDataByteSize / bytesPerFrame;
2313
      /* -- copy and process input data -- */
2314
      err= AudioUnitRender(stream->inputUnit,
2315
                    ioActionFlags,
2316
                    inTimeStamp,
2317
                    INPUT_ELEMENT,
2318
                    inNumberFrames,
2319
                    &stream->inputAudioBufferList );
2320
      if(err != noErr)
2321
      {
2322
        goto stop_stream;
2323
      }
2324

    
2325
      PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2326
      PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2327
                          0,
2328
                          stream->inputAudioBufferList.mBuffers[0].mData,
2329
                          stream->inputAudioBufferList.mBuffers[0].mNumberChannels);
2330
      /* -- Copy and process output data -- */
2331
      PaUtil_SetOutputFrameCount( &(stream->bufferProcessor), frames );
2332
      PaUtil_SetInterleavedOutputChannels( &(stream->bufferProcessor),
2333
                                        0,
2334
                                        ioData->mBuffers[0].mData,
2335
                                        ioData->mBuffers[0].mNumberChannels);
2336
      /* -- complete processing -- */
2337
      framesProcessed =
2338
                 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2339
                                             &callbackResult );
2340
   }
2341
   else if( isRender )
2342
   {
2343
      /* -------- Output Side of Full Duplex (Separate Devices or SR Conversion)
2344
       *       -- OR Simplex Output
2345
       *
2346
       * This case handles output data as in the full duplex case,
2347
       * and, if there is input data, reads it off the ring buffer 
2348
       * and into the PA buffer processor. If sample rate conversion
2349
       * is required on input, that is done here as well.
2350
       */
2351
       unsigned long frames;
2352
       long bytesPerFrame = sizeof( float ) * ioData->mBuffers[0].mNumberChannels;
2353

    
2354
      /* Sometimes, when stopping a duplex stream we get erroneous
2355
         xrun flags, so if this is our last run, clear the flags. */
2356
      int xrunFlags = stream->xrunFlags;
2357
/*
2358
      if( xrunFlags & paInputUnderflow )
2359
         printf( "input underflow.\n" );
2360
      if( xrunFlags & paInputOverflow )
2361
         printf( "input overflow.\n" );
2362
*/
2363
      if( stream->state == STOPPING || stream->state == CALLBACK_STOPPED )
2364
         xrunFlags = 0;
2365

    
2366
      /* -- start processing -- */
2367
      PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2368
                                    &timeInfo,
2369
                                    xrunFlags );
2370
      stream->xrunFlags = 0; /* FEEDBACK: we only send flags to Buf Proc once */
2371

    
2372
      /* -- Copy and process output data -- */
2373
      assert( ioData->mNumberBuffers == 1 );
2374
      frames = ioData->mBuffers[0].mDataByteSize / bytesPerFrame;
2375
      assert( ioData->mBuffers[0].mNumberChannels == stream->userOutChan );
2376
      PaUtil_SetOutputFrameCount( &(stream->bufferProcessor), frames );
2377
      PaUtil_SetInterleavedOutputChannels( &(stream->bufferProcessor),
2378
                                     0,
2379
                                     ioData->mBuffers[0].mData,
2380
                                     ioData->mBuffers[0].mNumberChannels);
2381

    
2382
      /* -- copy and process input data, and complete processing -- */
2383
      if( stream->inputUnit ) {
2384
         const int flsz = sizeof( float );
2385
         /* Here, we read the data out of the ring buffer, through the
2386
            audio converter. */
2387
         int inChan = stream->inputAudioBufferList.mBuffers[0].mNumberChannels;
2388
         long bytesPerFrame = flsz * inChan;
2389
          
2390
         if( stream->inputSRConverter )
2391
         {
2392
               OSStatus err;
2393
               UInt32 size;
2394
               float data[ inChan * frames ];
2395
               size = sizeof( data );
2396
               err = AudioConverterFillBuffer( 
2397
                             stream->inputSRConverter,
2398
                             ringBufferIOProc,
2399
                             &stream->inputRingBuffer,
2400
                             &size,
2401
                             (void *)&data );
2402
               if( err == RING_BUFFER_EMPTY )
2403
               { /* the ring buffer callback underflowed */
2404
                  err = 0;
2405
                  bzero( ((char *)data) + size, sizeof(data)-size );
2406
                  /* The ring buffer can underflow normally when the stream is stopping.
2407
                   * So only report an error if the stream is active. */
2408
                  if( stream->state == ACTIVE )
2409
                  {
2410
                      stream->xrunFlags |= paInputUnderflow;
2411
                  }
2412
               }
2413
               ERR( err );
2414
               if(err != noErr)
2415
               {
2416
                 goto stop_stream;
2417
               }
2418

    
2419
               PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2420
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2421
                                   0,
2422
                                   data,
2423
                                   inChan );
2424
               framesProcessed =
2425
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2426
                                                &callbackResult );
2427
         }
2428
         else
2429
         {
2430
            /* Without the AudioConverter is actually a bit more complex
2431
               because we have to do a little buffer processing that the
2432
               AudioConverter would otherwise handle for us. */
2433
            void *data1, *data2;
2434
            ring_buffer_size_t size1, size2;
2435
            ring_buffer_size_t framesReadable = PaUtil_GetRingBufferReadRegions( &stream->inputRingBuffer,
2436
                                             frames,
2437
                                             &data1, &size1,
2438
                                             &data2, &size2 );
2439
            if( size1 == frames ) {
2440
               /* simplest case: all in first buffer */
2441
               PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2442
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2443
                                   0,
2444
                                   data1,
2445
                                   inChan );
2446
               framesProcessed =
2447
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2448
                                                &callbackResult );
2449
               PaUtil_AdvanceRingBufferReadIndex(&stream->inputRingBuffer, size1 );
2450
            } else if( framesReadable < frames ) {
2451
                
2452
                long sizeBytes1 = size1 * bytesPerFrame;
2453
                long sizeBytes2 = size2 * bytesPerFrame;
2454
               /*we underflowed. take what data we can, zero the rest.*/
2455
               unsigned char data[ frames * bytesPerFrame ];
2456
               if( size1 > 0 )
2457
               {   
2458
                   memcpy( data, data1, sizeBytes1 );
2459
               }
2460
               if( size2 > 0 )
2461
               {
2462
                   memcpy( data+sizeBytes1, data2, sizeBytes2 );
2463
               }
2464
               bzero( data+sizeBytes1+sizeBytes2, (frames*bytesPerFrame) - sizeBytes1 - sizeBytes2 );
2465

    
2466
               PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2467
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2468
                                   0,
2469
                                   data,
2470
                                   inChan );
2471
               framesProcessed =
2472
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2473
                                                &callbackResult );
2474
               PaUtil_AdvanceRingBufferReadIndex( &stream->inputRingBuffer,
2475
                                                  framesReadable );
2476
               /* flag underflow */
2477
               stream->xrunFlags |= paInputUnderflow;
2478
            } else {
2479
               /*we got all the data, but split between buffers*/
2480
               PaUtil_SetInputFrameCount( &(stream->bufferProcessor), size1 );
2481
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2482
                                   0,
2483
                                   data1,
2484
                                   inChan );
2485
               PaUtil_Set2ndInputFrameCount( &(stream->bufferProcessor), size2 );
2486
               PaUtil_Set2ndInterleavedInputChannels( &(stream->bufferProcessor),
2487
                                   0,
2488
                                   data2,
2489
                                   inChan );
2490
               framesProcessed =
2491
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2492
                                                &callbackResult );
2493
               PaUtil_AdvanceRingBufferReadIndex(&stream->inputRingBuffer, framesReadable );
2494
            }
2495
         }
2496
      } else {
2497
         framesProcessed =
2498
                 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2499
                                             &callbackResult );
2500
      }
2501

    
2502
   }
2503
   else
2504
   {
2505
      /* ------------------ Input
2506
       *
2507
       * First, we read off the audio data and put it in the ring buffer.
2508
       * if this is an input-only stream, we need to process it more,
2509
       * otherwise, we let the output case deal with it.
2510
       */
2511
      OSStatus err = 0;
2512
      int chan = stream->inputAudioBufferList.mBuffers[0].mNumberChannels ;
2513
      /* FIXME: looping here may not actually be necessary, but it was something I tried in testing. */
2514
      do {
2515
         err= AudioUnitRender(stream->inputUnit,
2516
                 ioActionFlags,
2517
                 inTimeStamp,
2518
                 INPUT_ELEMENT,
2519
                 inNumberFrames,
2520
                 &stream->inputAudioBufferList );
2521
         if( err == -10874 )
2522
            inNumberFrames /= 2;
2523
      } while( err == -10874 && inNumberFrames > 1 );
2524
      ERR( err );
2525
      if(err != noErr)
2526
      {
2527
          goto stop_stream;
2528
      }
2529

    
2530
      if( stream->inputSRConverter || stream->outputUnit )
2531
      {
2532
         /* If this is duplex or we use a converter, put the data
2533
            into the ring buffer. */
2534
          ring_buffer_size_t framesWritten = PaUtil_WriteRingBuffer( &stream->inputRingBuffer,
2535
                                            stream->inputAudioBufferList.mBuffers[0].mData,
2536
                                            inNumberFrames );
2537
         if( framesWritten != inNumberFrames )
2538
         {
2539
             stream->xrunFlags |= paInputOverflow ;
2540
         }
2541
      }
2542
      else
2543
      {
2544
         /* for simplex input w/o SR conversion,
2545
            just pop the data into the buffer processor.*/
2546
         PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2547
                              &timeInfo,
2548
                              stream->xrunFlags );
2549
         stream->xrunFlags = 0;
2550

    
2551
         PaUtil_SetInputFrameCount( &(stream->bufferProcessor), inNumberFrames);
2552
         PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2553
                             0,
2554
                             stream->inputAudioBufferList.mBuffers[0].mData,
2555
                             chan );
2556
         framesProcessed =
2557
              PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2558
                                          &callbackResult );
2559
      }
2560
      if( !stream->outputUnit && stream->inputSRConverter )
2561
      {
2562
         /* ------------------ Simplex Input w/ SR Conversion
2563
          *
2564
          * if this is a simplex input stream, we need to read off the buffer,
2565
          * do our sample rate conversion and pass the results to the buffer
2566
          * processor.
2567
          * The logic here is complicated somewhat by the fact that we don't
2568
          * know how much data is available, so we loop on reasonably sized
2569
          * chunks, and let the BufferProcessor deal with the rest.
2570
          *
2571
          */
2572
         /* This might be too big or small depending on SR conversion. */
2573
         float data[ chan * inNumberFrames ];
2574
         OSStatus err;
2575
         do
2576
         { /* Run the buffer processor until we are out of data. */
2577
            UInt32 size;
2578
            long f;
2579

    
2580
            size = sizeof( data );
2581
            err = AudioConverterFillBuffer( 
2582
                          stream->inputSRConverter,
2583
                          ringBufferIOProc,
2584
                          &stream->inputRingBuffer,
2585
                          &size,
2586
                          (void *)data );
2587
            if( err != RING_BUFFER_EMPTY )
2588
               ERR( err );
2589
            if( err != noErr && err != RING_BUFFER_EMPTY )
2590
            {
2591
                goto stop_stream;
2592
            }
2593

    
2594

    
2595
            f = size / ( chan * sizeof(float) );
2596
            PaUtil_SetInputFrameCount( &(stream->bufferProcessor), f );
2597
            if( f )
2598
            {
2599
               PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2600
                                             &timeInfo,
2601
                                             stream->xrunFlags );
2602
               stream->xrunFlags = 0;
2603

    
2604
               PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2605
                                0,
2606
                                data,
2607
                                chan );
2608
               framesProcessed =
2609
                    PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2610
                                                &callbackResult );
2611
            }
2612
         } while( callbackResult == paContinue && !err );
2613
      }
2614
   }
2615

    
2616
    // Should we return successfully or fall through to stopping the stream?
2617
    if( callbackResult == paContinue )
2618
    {
2619
        PaUtil_EndCpuLoadMeasurement( &stream->cpuLoadMeasurer, framesProcessed );
2620
        return noErr;
2621
    }
2622

    
2623
stop_stream:
2624
    stream->state = CALLBACK_STOPPED ;
2625
    if( stream->outputUnit )
2626
        AudioOutputUnitStop(stream->outputUnit);
2627
    if( stream->inputUnit )
2628
        AudioOutputUnitStop(stream->inputUnit);
2629

    
2630
    PaUtil_EndCpuLoadMeasurement( &stream->cpuLoadMeasurer, framesProcessed );
2631
    return noErr;
2632
}
2633

    
2634
/*
2635
    When CloseStream() is called, the multi-api layer ensures that
2636
    the stream has already been stopped or aborted.
2637
*/
2638
static PaError CloseStream( PaStream* s )
2639
{
2640
    /* This may be called from a failed OpenStream.
2641
       Therefore, each piece of info is treated seperately. */
2642
    PaError result = paNoError;
2643
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2644

    
2645
    VVDBUG(("CloseStream()\n"));
2646
    VDBUG( ( "Closing stream.\n" ) );
2647

    
2648
    if( stream ) {
2649
                
2650
                if( stream->outputUnit )
2651
        {
2652
            Boolean isInput = FALSE;
2653
            CleanupDevicePropertyListeners( stream, stream->outputDevice, isInput );
2654
                }
2655
                
2656
                if( stream->inputUnit )
2657
        {
2658
            Boolean isInput = TRUE;
2659
            CleanupDevicePropertyListeners( stream, stream->inputDevice, isInput );
2660
                }
2661
                
2662
       if( stream->outputUnit ) {
2663
          int count = removeFromXRunListenerList( stream );
2664
          if( count == 0 )
2665
             AudioDeviceRemovePropertyListener( stream->outputDevice,
2666
                                                0,
2667
                                                false,
2668
                                                kAudioDeviceProcessorOverload,
2669
                                                xrunCallback );
2670
       }
2671
       if( stream->inputUnit && stream->outputUnit != stream->inputUnit ) {
2672
          int count = removeFromXRunListenerList( stream );
2673
          if( count == 0 )
2674
             AudioDeviceRemovePropertyListener( stream->inputDevice,
2675
                                                0,
2676
                                                true,
2677
                                                kAudioDeviceProcessorOverload,
2678
                                                xrunCallback );
2679
       }
2680
       if( stream->outputUnit && stream->outputUnit != stream->inputUnit ) {
2681
          AudioUnitUninitialize( stream->outputUnit );
2682
#ifndef AUDIO_COMPONENT_FIX
2683
          CloseComponent( stream->outputUnit );
2684
#else
2685
          AudioComponentInstanceDispose( stream->outputUnit );
2686
#endif
2687
       }
2688
       stream->outputUnit = NULL;
2689
       if( stream->inputUnit )
2690
       {
2691
          AudioUnitUninitialize( stream->inputUnit );
2692
#ifndef AUDIO_COMPONENT_FIX
2693
          CloseComponent( stream->inputUnit );
2694
#else
2695
          AudioComponentInstanceDispose( stream->inputUnit );
2696
#endif
2697
          stream->inputUnit = NULL;
2698
       }
2699
       if( stream->inputRingBuffer.buffer )
2700
          free( (void *) stream->inputRingBuffer.buffer );
2701
       stream->inputRingBuffer.buffer = NULL;
2702
       /*TODO: is there more that needs to be done on error
2703
               from AudioConverterDispose?*/
2704
       if( stream->inputSRConverter )
2705
          ERR( AudioConverterDispose( stream->inputSRConverter ) );
2706
       stream->inputSRConverter = NULL;
2707
       if( stream->inputAudioBufferList.mBuffers[0].mData )
2708
          free( stream->inputAudioBufferList.mBuffers[0].mData );
2709
       stream->inputAudioBufferList.mBuffers[0].mData = NULL;
2710

    
2711
       result = destroyBlioRingBuffers( &stream->blio );
2712
       if( result )
2713
          return result;
2714
       if( stream->bufferProcessorIsInitialized )
2715
          PaUtil_TerminateBufferProcessor( &stream->bufferProcessor );
2716
                
2717
       if( stream->timingInformationMutexIsInitialized )
2718
          pthread_mutex_destroy( &stream->timingInformationMutex );
2719

    
2720
       PaUtil_TerminateStreamRepresentation( &stream->streamRepresentation );
2721
       PaUtil_FreeMemory( stream );
2722
    }
2723

    
2724
    return result;
2725
}
2726

    
2727
static PaError StartStream( PaStream *s )
2728
{
2729
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2730
    OSStatus result = noErr;
2731
    VVDBUG(("StartStream()\n"));
2732
    VDBUG( ( "Starting stream.\n" ) );
2733

    
2734
#define ERR_WRAP(mac_err) do { result = mac_err ; if ( result != noErr ) return ERR(result) ; } while(0)
2735

    
2736
    /*FIXME: maybe want to do this on close/abort for faster start? */
2737
    PaUtil_ResetBufferProcessor( &stream->bufferProcessor );
2738
    if(  stream->inputSRConverter )
2739
       ERR_WRAP( AudioConverterReset( stream->inputSRConverter ) );
2740

    
2741
    /* -- start -- */
2742
    stream->state = ACTIVE;
2743
    if( stream->inputUnit ) {
2744
       ERR_WRAP( AudioOutputUnitStart(stream->inputUnit) );
2745
    }
2746
    if( stream->outputUnit && stream->outputUnit != stream->inputUnit ) {
2747
       ERR_WRAP( AudioOutputUnitStart(stream->outputUnit) );
2748
    }
2749
        
2750
    return paNoError;
2751
#undef ERR_WRAP
2752
}
2753

    
2754
// it's not clear from appl's docs that this really waits
2755
// until all data is flushed.
2756
static ComponentResult BlockWhileAudioUnitIsRunning( AudioUnit audioUnit, AudioUnitElement element )
2757
{
2758
    Boolean isRunning = 1;
2759
    while( isRunning ) {
2760
       UInt32 s = sizeof( isRunning );
2761
       ComponentResult err = AudioUnitGetProperty( audioUnit, kAudioOutputUnitProperty_IsRunning, kAudioUnitScope_Global, element,  &isRunning, &s );
2762
       if( err )
2763
          return err;
2764
       Pa_Sleep( 100 );
2765
    }
2766
    return noErr;
2767
}
2768

    
2769
static PaError FinishStoppingStream( PaMacCoreStream *stream )
2770
{
2771
    OSStatus result = noErr;
2772
    PaError paErr;
2773

    
2774
#define ERR_WRAP(mac_err) do { result = mac_err ; if ( result != noErr ) return ERR(result) ; } while(0)
2775
    /* -- stop and reset -- */
2776
    if( stream->inputUnit == stream->outputUnit && stream->inputUnit )
2777
    {
2778
       ERR_WRAP( AudioOutputUnitStop(stream->inputUnit) );
2779
       ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,0) );
2780
       ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,1) );
2781
       ERR_WRAP( AudioUnitReset(stream->inputUnit, kAudioUnitScope_Global, 1) );
2782
       ERR_WRAP( AudioUnitReset(stream->inputUnit, kAudioUnitScope_Global, 0) );
2783
    }
2784
    else
2785
    {
2786
       if( stream->inputUnit )
2787
       {
2788
          ERR_WRAP(AudioOutputUnitStop(stream->inputUnit) );
2789
          ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,1) );
2790
          ERR_WRAP(AudioUnitReset(stream->inputUnit,kAudioUnitScope_Global,1));
2791
       }
2792
       if( stream->outputUnit )
2793
       {
2794
          ERR_WRAP(AudioOutputUnitStop(stream->outputUnit));
2795
          ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->outputUnit,0) );
2796
          ERR_WRAP(AudioUnitReset(stream->outputUnit,kAudioUnitScope_Global,0));
2797
       }
2798
    }
2799
    if( stream->inputRingBuffer.buffer ) {
2800
       PaUtil_FlushRingBuffer( &stream->inputRingBuffer );
2801
       bzero( (void *)stream->inputRingBuffer.buffer,
2802
              stream->inputRingBuffer.bufferSize );
2803
       /* advance the write point a little, so we are reading from the
2804
          middle of the buffer. We'll need extra at the end because
2805
          testing has shown that this helps. */
2806
       if( stream->outputUnit )
2807
          PaUtil_AdvanceRingBufferWriteIndex( &stream->inputRingBuffer,
2808
                                              stream->inputRingBuffer.bufferSize
2809
                                              / RING_BUFFER_ADVANCE_DENOMINATOR );
2810
    }
2811

    
2812
    stream->xrunFlags = 0;
2813
    stream->state = STOPPED;
2814

    
2815
    paErr = resetBlioRingBuffers( &stream->blio );
2816
    if( paErr )
2817
       return paErr;
2818

    
2819
    VDBUG( ( "Stream Stopped.\n" ) );
2820
    return paNoError;
2821
#undef ERR_WRAP
2822
}
2823

    
2824
/* Block until buffer is empty then stop the stream. */
2825
static PaError StopStream( PaStream *s )
2826
{
2827
    PaError paErr;
2828
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2829
    VVDBUG(("StopStream()\n"));
2830

    
2831
    /* Tell WriteStream to stop filling the buffer. */
2832
    stream->state = STOPPING;
2833

    
2834
    if( stream->userOutChan > 0 ) /* Does this stream do output? */
2835
    {
2836
        size_t maxHostFrames = MAX( stream->inputFramesPerBuffer, stream->outputFramesPerBuffer );
2837
        VDBUG( ("Waiting for write buffer to be drained.\n") );
2838
        paErr = waitUntilBlioWriteBufferIsEmpty( &stream->blio, stream->sampleRate,
2839
                                                maxHostFrames );
2840
        VDBUG( ( "waitUntilBlioWriteBufferIsEmpty returned %d\n", paErr ) );
2841
    }
2842
    return FinishStoppingStream( stream );
2843
}
2844

    
2845
/* Immediately stop the stream. */
2846
static PaError AbortStream( PaStream *s )
2847
{
2848
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2849
    VDBUG( ( "AbortStream()\n" ) );
2850
    stream->state = STOPPING;
2851
    return FinishStoppingStream( stream );
2852
}
2853

    
2854

    
2855
static PaError IsStreamStopped( PaStream *s )
2856
{
2857
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2858
    VVDBUG(("IsStreamStopped()\n"));
2859

    
2860
    return stream->state == STOPPED ? 1 : 0;
2861
}
2862

    
2863

    
2864
static PaError IsStreamActive( PaStream *s )
2865
{
2866
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2867
    VVDBUG(("IsStreamActive()\n"));
2868
    return ( stream->state == ACTIVE || stream->state == STOPPING );
2869
}
2870

    
2871

    
2872
static double GetStreamCpuLoad( PaStream* s )
2873
{
2874
    PaMacCoreStream *stream = (PaMacCoreStream*)s;
2875
    VVDBUG(("GetStreamCpuLoad()\n"));
2876

    
2877
    return PaUtil_GetCpuLoad( &stream->cpuLoadMeasurer );
2878
}