2
* Implementation of the PortAudio API for Apple AUHAL
4
* PortAudio Portable Real-Time Audio Library
5
* Latest Version at: http://www.portaudio.com
7
* Written by Bjorn Roche of XO Audio LLC, from PA skeleton code.
8
* Portions copied from code by Dominic Mazzoni (who wrote a HAL implementation)
10
* Dominic's code was based on code by Phil Burk, Darren Gibbs,
11
* Gord Peters, Stephane Letz, and Greg Pfiel.
13
* The following people also deserve acknowledgements:
15
* Olivier Tristan for feedback and testing
16
* Glenn Zelniker and Z-Systems engineering for sponsoring the Blocking I/O
20
* Based on the Open Source API proposed by Ross Bencina
21
* Copyright (c) 1999-2002 Ross Bencina, Phil Burk
23
* Permission is hereby granted, free of charge, to any person obtaining
24
* a copy of this software and associated documentation files
25
* (the "Software"), to deal in the Software without restriction,
26
* including without limitation the rights to use, copy, modify, merge,
27
* publish, distribute, sublicense, and/or sell copies of the Software,
28
* and to permit persons to whom the Software is furnished to do so,
29
* subject to the following conditions:
31
* The above copyright notice and this permission notice shall be
32
* included in all copies or substantial portions of the Software.
34
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
35
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
36
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
37
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
38
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
39
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
40
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
44
* The text above constitutes the entire PortAudio license; however,
45
* the PortAudio community also makes the following non-binding requests:
47
* Any person wishing to distribute modifications to the Software is
48
* requested to send the modifications to the original developer so that
49
* they can be incorporated into the canonical version. It is also
50
* requested that these non-binding requests be included along with the
58
@brief AUHAL implementation of PortAudio
61
/* FIXME: not all error conditions call PaUtil_SetLastHostErrorInfo()
62
* PaMacCore_SetError() will do this.
65
#include "pa_mac_core_internal.h"
67
#include <string.h> /* strlen(), memcmp() etc. */
68
#include <libkern/OSAtomic.h>
70
#include "pa_mac_core.h"
71
#include "pa_mac_core_utilities.h"
72
#include "pa_mac_core_blocking.h"
78
#endif /* __cplusplus */
80
/* prototypes for functions declared in this file */
82
PaError PaMacCore_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex index );
85
* Function declared in pa_mac_core.h. Sets up a PaMacCoreStreamInfoStruct
86
* with the requested flags and initializes channel map.
88
void PaMacCore_SetupStreamInfo( PaMacCoreStreamInfo *data, const unsigned long flags )
90
bzero( data, sizeof( PaMacCoreStreamInfo ) );
91
data->size = sizeof( PaMacCoreStreamInfo );
92
data->hostApiType = paCoreAudio;
95
data->channelMap = NULL;
96
data->channelMapSize = 0;
100
* Function declared in pa_mac_core.h. Adds channel mapping to a PaMacCoreStreamInfoStruct
102
void PaMacCore_SetupChannelMap( PaMacCoreStreamInfo *data, const SInt32 * const channelMap, const unsigned long channelMapSize )
104
data->channelMap = channelMap;
105
data->channelMapSize = channelMapSize;
107
static char *channelName = NULL;
108
static int channelNameSize = 0;
109
static bool ensureChannelNameSize( int size )
111
if( size >= channelNameSize ) {
113
channelName = (char *) malloc( ( channelNameSize = size ) + 1 );
122
* Function declared in pa_mac_core.h. retrives channel names.
124
const char *PaMacCore_GetChannelName( int device, int channelIndex, bool input )
126
struct PaUtilHostApiRepresentation *hostApi;
129
err = PaUtil_GetHostApiRepresentation( &hostApi, paCoreAudio );
130
assert(err == paNoError);
131
if( err != paNoError )
133
PaMacAUHAL *macCoreHostApi = (PaMacAUHAL*)hostApi;
134
AudioDeviceID hostApiDevice = macCoreHostApi->devIds[device];
138
error = AudioDeviceGetPropertyInfo( hostApiDevice,
141
kAudioDevicePropertyChannelName,
147
bool isDeviceName = false;
148
size = sizeof( name );
149
error = AudioDeviceGetProperty( hostApiDevice,
152
kAudioDevicePropertyChannelNameCFString,
155
if( error ) { //as a last-ditch effort, get the device name. Later we'll append the channel number.
156
size = sizeof( name );
157
error = AudioDeviceGetProperty( hostApiDevice,
160
kAudioDevicePropertyDeviceNameCFString,
168
name = CFStringCreateWithFormat( NULL, NULL, CFSTR( "%@: %d"), name, channelIndex + 1 );
171
CFIndex length = CFStringGetLength(name);
172
while( ensureChannelNameSize( length * sizeof(UniChar) + 1 ) ) {
173
if( CFStringGetCString( name, channelName, channelNameSize, kCFStringEncodingUTF8 ) ) {
187
//continue with C string:
188
if( !ensureChannelNameSize( size ) )
191
error = AudioDeviceGetProperty( hostApiDevice,
194
kAudioDevicePropertyChannelName,
209
AudioDeviceID PaMacCore_GetStreamInputDevice( PaStream* s )
211
PaMacCoreStream *stream = (PaMacCoreStream*)s;
212
VVDBUG(("PaMacCore_GetStreamInputHandle()\n"));
214
return ( stream->inputDevice );
217
AudioDeviceID PaMacCore_GetStreamOutputDevice( PaStream* s )
219
PaMacCoreStream *stream = (PaMacCoreStream*)s;
220
VVDBUG(("PaMacCore_GetStreamOutputHandle()\n"));
222
return ( stream->outputDevice );
227
#endif /* __cplusplus */
229
#define RING_BUFFER_ADVANCE_DENOMINATOR (4)
231
static void Terminate( struct PaUtilHostApiRepresentation *hostApi );
232
static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi,
233
const PaStreamParameters *inputParameters,
234
const PaStreamParameters *outputParameters,
236
static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi,
238
const PaStreamParameters *inputParameters,
239
const PaStreamParameters *outputParameters,
241
unsigned long framesPerBuffer,
242
PaStreamFlags streamFlags,
243
PaStreamCallback *streamCallback,
245
static PaError CloseStream( PaStream* stream );
246
static PaError StartStream( PaStream *stream );
247
static PaError StopStream( PaStream *stream );
248
static PaError AbortStream( PaStream *stream );
249
static PaError IsStreamStopped( PaStream *s );
250
static PaError IsStreamActive( PaStream *stream );
251
static PaTime GetStreamTime( PaStream *stream );
252
static void setStreamStartTime( PaStream *stream );
253
static OSStatus AudioIOProc( void *inRefCon,
254
AudioUnitRenderActionFlags *ioActionFlags,
255
const AudioTimeStamp *inTimeStamp,
257
UInt32 inNumberFrames,
258
AudioBufferList *ioData );
259
static double GetStreamCpuLoad( PaStream* stream );
261
static PaError GetChannelInfo( PaMacAUHAL *auhalHostApi,
262
PaDeviceInfo *deviceInfo,
263
AudioDeviceID macCoreDeviceId,
266
static PaError OpenAndSetupOneAudioUnit(
267
const PaMacCoreStream *stream,
268
const PaStreamParameters *inStreamParams,
269
const PaStreamParameters *outStreamParams,
270
const UInt32 requestedFramesPerBuffer,
271
UInt32 *actualInputFramesPerBuffer,
272
UInt32 *actualOutputFramesPerBuffer,
273
const PaMacAUHAL *auhalHostApi,
274
AudioUnit *audioUnit,
275
AudioConverterRef *srConverter,
276
AudioDeviceID *audioDevice,
277
const double sampleRate,
280
/* for setting errors. */
281
#define PA_AUHAL_SET_LAST_HOST_ERROR( errorCode, errorText ) \
282
PaUtil_SetLastHostErrorInfo( paInDevelopment, errorCode, errorText )
285
* Callback called when starting or stopping a stream.
287
static void startStopCallback(
290
AudioUnitPropertyID inID,
291
AudioUnitScope inScope,
292
AudioUnitElement inElement )
294
PaMacCoreStream *stream = (PaMacCoreStream *) inRefCon;
296
UInt32 size = sizeof( isRunning );
298
err = AudioUnitGetProperty( ci, kAudioOutputUnitProperty_IsRunning, inScope, inElement, &isRunning, &size );
301
isRunning = false; //it's very unclear what to do in case of error here. There's no real way to notify the user, and crashing seems unreasonable.
303
return; //We are only interested in when we are stopping
304
// -- if we are using 2 I/O units, we only need one notification!
305
if( stream->inputUnit && stream->outputUnit && stream->inputUnit != stream->outputUnit && ci == stream->inputUnit )
307
PaStreamFinishedCallback *sfc = stream->streamRepresentation.streamFinishedCallback;
308
if( stream->state == STOPPING )
309
stream->state = STOPPED ;
311
sfc( stream->streamRepresentation.userData );
315
/*currently, this is only used in initialization, but it might be modified
316
to be used when the list of devices changes.*/
317
static PaError gatherDeviceInfo(PaMacAUHAL *auhalHostApi)
321
VVDBUG(("gatherDeviceInfo()\n"));
322
/* -- free any previous allocations -- */
323
if( auhalHostApi->devIds )
324
PaUtil_GroupFreeMemory(auhalHostApi->allocations, auhalHostApi->devIds);
325
auhalHostApi->devIds = NULL;
327
/* -- figure out how many devices there are -- */
328
AudioHardwareGetPropertyInfo( kAudioHardwarePropertyDevices,
331
auhalHostApi->devCount = propsize / sizeof( AudioDeviceID );
333
VDBUG( ( "Found %ld device(s).\n", auhalHostApi->devCount ) );
335
/* -- copy the device IDs -- */
336
auhalHostApi->devIds = (AudioDeviceID *)PaUtil_GroupAllocateMemory(
337
auhalHostApi->allocations,
339
if( !auhalHostApi->devIds )
340
return paInsufficientMemory;
341
AudioHardwareGetProperty( kAudioHardwarePropertyDevices,
343
auhalHostApi->devIds );
344
#ifdef MAC_CORE_VERBOSE_DEBUG
347
for( i=0; i<auhalHostApi->devCount; ++i )
348
printf( "Device %d\t: %ld\n", i, auhalHostApi->devIds[i] );
352
size = sizeof(AudioDeviceID);
353
auhalHostApi->defaultIn = kAudioDeviceUnknown;
354
auhalHostApi->defaultOut = kAudioDeviceUnknown;
356
/* determine the default device. */
357
/* I am not sure how these calls to AudioHardwareGetProperty()
358
could fail, but in case they do, we use the first available
359
device as the default. */
360
if( 0 != AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice,
362
&auhalHostApi->defaultIn) ) {
364
auhalHostApi->defaultIn = kAudioDeviceUnknown;
365
VDBUG(("Failed to get default input device from OS."));
366
VDBUG((" I will substitute the first available input Device."));
367
for( i=0; i<auhalHostApi->devCount; ++i ) {
368
PaDeviceInfo devInfo;
369
if( 0 != GetChannelInfo( auhalHostApi, &devInfo,
370
auhalHostApi->devIds[i], TRUE ) )
371
if( devInfo.maxInputChannels ) {
372
auhalHostApi->defaultIn = auhalHostApi->devIds[i];
377
if( 0 != AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice,
379
&auhalHostApi->defaultOut) ) {
381
auhalHostApi->defaultIn = kAudioDeviceUnknown;
382
VDBUG(("Failed to get default output device from OS."));
383
VDBUG((" I will substitute the first available output Device."));
384
for( i=0; i<auhalHostApi->devCount; ++i ) {
385
PaDeviceInfo devInfo;
386
if( 0 != GetChannelInfo( auhalHostApi, &devInfo,
387
auhalHostApi->devIds[i], FALSE ) )
388
if( devInfo.maxOutputChannels ) {
389
auhalHostApi->defaultOut = auhalHostApi->devIds[i];
395
VDBUG( ( "Default in : %ld\n", auhalHostApi->defaultIn ) );
396
VDBUG( ( "Default out: %ld\n", auhalHostApi->defaultOut ) );
401
static PaError GetChannelInfo( PaMacAUHAL *auhalHostApi,
402
PaDeviceInfo *deviceInfo,
403
AudioDeviceID macCoreDeviceId,
407
PaError err = paNoError;
410
AudioBufferList *buflist = NULL;
413
VVDBUG(("GetChannelInfo()\n"));
415
/* Get the number of channels from the stream configuration.
416
Fail if we can't get this. */
418
err = ERR(AudioDeviceGetPropertyInfo(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreamConfiguration, &propSize, NULL));
422
buflist = PaUtil_AllocateMemory(propSize);
424
return paInsufficientMemory;
425
err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreamConfiguration, &propSize, buflist));
429
for (i = 0; i < buflist->mNumberBuffers; ++i)
430
numChannels += buflist->mBuffers[i].mNumberChannels;
433
deviceInfo->maxInputChannels = numChannels;
435
deviceInfo->maxOutputChannels = numChannels;
437
if (numChannels > 0) /* do not try to retrieve the latency if there is no channels. */
439
/* Get the latency. Don't fail if we can't get this. */
440
/* default to something reasonable */
441
deviceInfo->defaultLowInputLatency = .01;
442
deviceInfo->defaultHighInputLatency = .10;
443
deviceInfo->defaultLowOutputLatency = .01;
444
deviceInfo->defaultHighOutputLatency = .10;
445
propSize = sizeof(UInt32);
446
err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyLatency, &propSize, &frameLatency));
450
* This code was arrived at by trial and error, and some extentive, but not exhaustive
451
* testing. Sebastien Beaulieu <seb@plogue.com> has suggested using
452
* kAudioDevicePropertyLatency + kAudioDevicePropertySafetyOffset + buffer size instead.
453
* At the time this code was written, many users were reporting dropouts with audio
454
* programs that probably used this formula. This was probably
455
* around 10.4.4, and the problem is probably fixed now. So perhaps
456
* his formula should be reviewed and used.
458
double secondLatency = frameLatency / deviceInfo->defaultSampleRate;
461
deviceInfo->defaultLowInputLatency = 3 * secondLatency;
462
deviceInfo->defaultHighInputLatency = 3 * 10 * secondLatency;
466
deviceInfo->defaultLowOutputLatency = 3 * secondLatency;
467
deviceInfo->defaultHighOutputLatency = 3 * 10 * secondLatency;
471
PaUtil_FreeMemory( buflist );
474
PaUtil_FreeMemory( buflist );
478
static PaError InitializeDeviceInfo( PaMacAUHAL *auhalHostApi,
479
PaDeviceInfo *deviceInfo,
480
AudioDeviceID macCoreDeviceId,
481
PaHostApiIndex hostApiIndex )
485
PaError err = paNoError;
488
VVDBUG(("InitializeDeviceInfo(): macCoreDeviceId=%ld\n", macCoreDeviceId));
490
memset(deviceInfo, 0, sizeof(deviceInfo));
492
deviceInfo->structVersion = 2;
493
deviceInfo->hostApi = hostApiIndex;
495
/* Get the device name. Fail if we can't get it. */
496
err = ERR(AudioDeviceGetPropertyInfo(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceName, &propSize, NULL));
500
name = PaUtil_GroupAllocateMemory(auhalHostApi->allocations,propSize);
502
return paInsufficientMemory;
503
err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceName, &propSize, name));
506
deviceInfo->name = name;
508
/* Try to get the default sample rate. Don't fail if we can't get this. */
509
propSize = sizeof(Float64);
510
err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyNominalSampleRate, &propSize, &sampleRate));
512
deviceInfo->defaultSampleRate = 0.0;
514
deviceInfo->defaultSampleRate = sampleRate;
516
/* Get the maximum number of input and output channels. Fail if we can't get this. */
518
err = GetChannelInfo(auhalHostApi, deviceInfo, macCoreDeviceId, 1);
522
err = GetChannelInfo(auhalHostApi, deviceInfo, macCoreDeviceId, 0);
529
PaError PaMacCore_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex hostApiIndex )
531
PaError result = paNoError;
533
PaMacAUHAL *auhalHostApi;
534
PaDeviceInfo *deviceInfoArray;
537
VVDBUG(("PaMacCore_Initialize(): hostApiIndex=%d\n", hostApiIndex));
539
unixErr = initializeXRunListenerList();
541
return UNIX_ERR(unixErr);
544
auhalHostApi = (PaMacAUHAL*)PaUtil_AllocateMemory( sizeof(PaMacAUHAL) );
547
result = paInsufficientMemory;
551
auhalHostApi->allocations = PaUtil_CreateAllocationGroup();
552
if( !auhalHostApi->allocations )
554
result = paInsufficientMemory;
558
auhalHostApi->devIds = NULL;
559
auhalHostApi->devCount = 0;
561
/* get the info we need about the devices */
562
result = gatherDeviceInfo( auhalHostApi );
563
if( result != paNoError )
566
*hostApi = &auhalHostApi->inheritedHostApiRep;
567
(*hostApi)->info.structVersion = 1;
568
(*hostApi)->info.type = paCoreAudio;
569
(*hostApi)->info.name = "Core Audio";
571
(*hostApi)->info.defaultInputDevice = paNoDevice;
572
(*hostApi)->info.defaultOutputDevice = paNoDevice;
574
(*hostApi)->info.deviceCount = 0;
576
if( auhalHostApi->devCount > 0 )
578
(*hostApi)->deviceInfos = (PaDeviceInfo**)PaUtil_GroupAllocateMemory(
579
auhalHostApi->allocations, sizeof(PaDeviceInfo*) * auhalHostApi->devCount);
580
if( !(*hostApi)->deviceInfos )
582
result = paInsufficientMemory;
586
/* allocate all device info structs in a contiguous block */
587
deviceInfoArray = (PaDeviceInfo*)PaUtil_GroupAllocateMemory(
588
auhalHostApi->allocations, sizeof(PaDeviceInfo) * auhalHostApi->devCount );
589
if( !deviceInfoArray )
591
result = paInsufficientMemory;
595
for( i=0; i < auhalHostApi->devCount; ++i )
598
err = InitializeDeviceInfo( auhalHostApi, &deviceInfoArray[i],
599
auhalHostApi->devIds[i],
601
if (err == paNoError)
602
{ /* copy some info and set the defaults */
603
(*hostApi)->deviceInfos[(*hostApi)->info.deviceCount] = &deviceInfoArray[i];
604
if (auhalHostApi->devIds[i] == auhalHostApi->defaultIn)
605
(*hostApi)->info.defaultInputDevice = (*hostApi)->info.deviceCount;
606
if (auhalHostApi->devIds[i] == auhalHostApi->defaultOut)
607
(*hostApi)->info.defaultOutputDevice = (*hostApi)->info.deviceCount;
608
(*hostApi)->info.deviceCount++;
611
{ /* there was an error. we need to shift the devices down, so we ignore this one */
613
auhalHostApi->devCount--;
614
for( j=i; j<auhalHostApi->devCount; ++j )
615
auhalHostApi->devIds[j] = auhalHostApi->devIds[j+1];
621
(*hostApi)->Terminate = Terminate;
622
(*hostApi)->OpenStream = OpenStream;
623
(*hostApi)->IsFormatSupported = IsFormatSupported;
625
PaUtil_InitializeStreamInterface( &auhalHostApi->callbackStreamInterface,
626
CloseStream, StartStream,
627
StopStream, AbortStream, IsStreamStopped,
629
GetStreamTime, GetStreamCpuLoad,
630
PaUtil_DummyRead, PaUtil_DummyWrite,
631
PaUtil_DummyGetReadAvailable,
632
PaUtil_DummyGetWriteAvailable );
634
PaUtil_InitializeStreamInterface( &auhalHostApi->blockingStreamInterface,
635
CloseStream, StartStream,
636
StopStream, AbortStream, IsStreamStopped,
638
GetStreamTime, PaUtil_DummyGetCpuLoad,
639
ReadStream, WriteStream,
640
GetStreamReadAvailable,
641
GetStreamWriteAvailable );
648
if( auhalHostApi->allocations )
650
PaUtil_FreeAllAllocations( auhalHostApi->allocations );
651
PaUtil_DestroyAllocationGroup( auhalHostApi->allocations );
654
PaUtil_FreeMemory( auhalHostApi );
660
static void Terminate( struct PaUtilHostApiRepresentation *hostApi )
664
PaMacAUHAL *auhalHostApi = (PaMacAUHAL*)hostApi;
666
VVDBUG(("Terminate()\n"));
668
unixErr = destroyXRunListenerList();
674
- clean up any resources not handled by the allocation group
675
TODO: Double check that everything is handled by alloc group
678
if( auhalHostApi->allocations )
680
PaUtil_FreeAllAllocations( auhalHostApi->allocations );
681
PaUtil_DestroyAllocationGroup( auhalHostApi->allocations );
684
PaUtil_FreeMemory( auhalHostApi );
688
static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi,
689
const PaStreamParameters *inputParameters,
690
const PaStreamParameters *outputParameters,
693
int inputChannelCount, outputChannelCount;
694
PaSampleFormat inputSampleFormat, outputSampleFormat;
696
VVDBUG(("IsFormatSupported(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld sampleRate=%g\n",
697
inputParameters ? inputParameters->channelCount : -1,
698
inputParameters ? inputParameters->sampleFormat : -1,
699
outputParameters ? outputParameters->channelCount : -1,
700
outputParameters ? outputParameters->sampleFormat : -1,
701
(float) sampleRate ));
703
/** These first checks are standard PA checks. We do some fancier checks
705
if( inputParameters )
707
inputChannelCount = inputParameters->channelCount;
708
inputSampleFormat = inputParameters->sampleFormat;
710
/* all standard sample formats are supported by the buffer adapter,
711
this implementation doesn't support any custom sample formats */
712
if( inputSampleFormat & paCustomFormat )
713
return paSampleFormatNotSupported;
715
/* unless alternate device specification is supported, reject the use of
716
paUseHostApiSpecificDeviceSpecification */
718
if( inputParameters->device == paUseHostApiSpecificDeviceSpecification )
719
return paInvalidDevice;
721
/* check that input device can support inputChannelCount */
722
if( inputChannelCount > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels )
723
return paInvalidChannelCount;
727
inputChannelCount = 0;
730
if( outputParameters )
732
outputChannelCount = outputParameters->channelCount;
733
outputSampleFormat = outputParameters->sampleFormat;
735
/* all standard sample formats are supported by the buffer adapter,
736
this implementation doesn't support any custom sample formats */
737
if( outputSampleFormat & paCustomFormat )
738
return paSampleFormatNotSupported;
740
/* unless alternate device specification is supported, reject the use of
741
paUseHostApiSpecificDeviceSpecification */
743
if( outputParameters->device == paUseHostApiSpecificDeviceSpecification )
744
return paInvalidDevice;
746
/* check that output device can support outputChannelCount */
747
if( outputChannelCount > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels )
748
return paInvalidChannelCount;
753
outputChannelCount = 0;
757
/* I think the only way to check a given format SR combo is */
758
/* to try opening it. This could be disruptive, is that Okay? */
759
/* The alternative is to just read off available sample rates, */
760
/* but this will not work %100 of the time (eg, a device that */
761
/* supports N output at one rate but only N/2 at a higher rate.)*/
763
/* The following code opens the device with the requested parameters to
768
err = OpenStream( hostApi, &s, inputParameters, outputParameters,
769
sampleRate, 1024, 0, (PaStreamCallback *)1, NULL );
770
if( err != paNoError && err != paInvalidSampleRate )
771
DBUG( ( "OpenStream @ %g returned: %d: %s\n",
772
(float) sampleRate, err, Pa_GetErrorText( err ) ) );
775
err = CloseStream( s );
777
/* FEEDBACK: is this more serious? should we assert? */
778
DBUG( ( "WARNING: could not close Stream. %d: %s\n",
779
err, Pa_GetErrorText( err ) ) );
783
return paFormatIsSupported;
786
static PaError OpenAndSetupOneAudioUnit(
787
const PaMacCoreStream *stream,
788
const PaStreamParameters *inStreamParams,
789
const PaStreamParameters *outStreamParams,
790
const UInt32 requestedFramesPerBuffer,
791
UInt32 *actualInputFramesPerBuffer,
792
UInt32 *actualOutputFramesPerBuffer,
793
const PaMacAUHAL *auhalHostApi,
794
AudioUnit *audioUnit,
795
AudioConverterRef *srConverter,
796
AudioDeviceID *audioDevice,
797
const double sampleRate,
800
ComponentDescription desc;
802
/*An Apple TN suggests using CAStreamBasicDescription, but that is C++*/
803
AudioStreamBasicDescription desiredFormat;
804
OSStatus result = noErr;
805
PaError paResult = paNoError;
808
AURenderCallbackStruct rcbs;
809
unsigned long macInputStreamFlags = paMacCorePlayNice;
810
unsigned long macOutputStreamFlags = paMacCorePlayNice;
811
SInt32 const *inChannelMap = NULL;
812
SInt32 const *outChannelMap = NULL;
813
unsigned long inChannelMapSize = 0;
814
unsigned long outChannelMapSize = 0;
816
VVDBUG(("OpenAndSetupOneAudioUnit(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld, requestedFramesPerBuffer=%ld\n",
817
inStreamParams ? inStreamParams->channelCount : -1,
818
inStreamParams ? inStreamParams->sampleFormat : -1,
819
outStreamParams ? outStreamParams->channelCount : -1,
820
outStreamParams ? outStreamParams->sampleFormat : -1,
821
requestedFramesPerBuffer ));
823
/* -- handle the degenerate case -- */
824
if( !inStreamParams && !outStreamParams ) {
826
*audioDevice = kAudioDeviceUnknown;
830
/* -- get the user's api specific info, if they set any -- */
831
if( inStreamParams && inStreamParams->hostApiSpecificStreamInfo )
834
((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
836
inChannelMap = ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
838
inChannelMapSize = ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
841
if( outStreamParams && outStreamParams->hostApiSpecificStreamInfo )
843
macOutputStreamFlags=
844
((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
846
outChannelMap = ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
848
outChannelMapSize = ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
851
/* Override user's flags here, if desired for testing. */
854
* The HAL AU is a Mac OS style "component".
855
* the first few steps deal with that.
856
* Later steps work on a combination of Mac OS
857
* components and the slightly lower level
861
/* -- describe the output type AudioUnit -- */
862
/* Note: for the default AudioUnit, we could use the
863
* componentSubType value kAudioUnitSubType_DefaultOutput;
864
* but I don't think that's relevant here.
866
desc.componentType = kAudioUnitType_Output;
867
desc.componentSubType = kAudioUnitSubType_HALOutput;
868
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
869
desc.componentFlags = 0;
870
desc.componentFlagsMask = 0;
871
/* -- find the component -- */
872
comp = FindNextComponent( NULL, &desc );
875
DBUG( ( "AUHAL component not found." ) );
877
*audioDevice = kAudioDeviceUnknown;
878
return paUnanticipatedHostError;
881
result = OpenAComponent( comp, audioUnit );
884
DBUG( ( "Failed to open AUHAL component." ) );
886
*audioDevice = kAudioDeviceUnknown;
887
return ERR( result );
889
/* -- prepare a little error handling logic / hackery -- */
890
#define ERR_WRAP(mac_err) do { result = mac_err ; line = __LINE__ ; if ( result != noErr ) goto error ; } while(0)
892
/* -- if there is input, we have to explicitly enable input -- */
896
ERR_WRAP( AudioUnitSetProperty( *audioUnit,
897
kAudioOutputUnitProperty_EnableIO,
898
kAudioUnitScope_Input,
901
sizeof(enableIO) ) );
903
/* -- if there is no output, we must explicitly disable output -- */
904
if( !outStreamParams )
907
ERR_WRAP( AudioUnitSetProperty( *audioUnit,
908
kAudioOutputUnitProperty_EnableIO,
909
kAudioUnitScope_Output,
912
sizeof(enableIO) ) );
915
/* -- set the devices -- */
916
/* make sure input and output are the same device if we are doing input and
918
if( inStreamParams && outStreamParams )
920
assert( outStreamParams->device == inStreamParams->device );
924
*audioDevice = auhalHostApi->devIds[inStreamParams->device] ;
925
ERR_WRAP( AudioUnitSetProperty( *audioUnit,
926
kAudioOutputUnitProperty_CurrentDevice,
927
kAudioUnitScope_Global,
930
sizeof(AudioDeviceID) ) );
932
if( outStreamParams && outStreamParams != inStreamParams )
934
*audioDevice = auhalHostApi->devIds[outStreamParams->device] ;
935
ERR_WRAP( AudioUnitSetProperty( *audioUnit,
936
kAudioOutputUnitProperty_CurrentDevice,
937
kAudioUnitScope_Global,
940
sizeof(AudioDeviceID) ) );
942
/* -- add listener for dropouts -- */
943
result = AudioDeviceAddPropertyListener( *audioDevice,
945
outStreamParams ? false : true,
946
kAudioDeviceProcessorOverload,
948
addToXRunListenerList( (void *)stream ) ) ;
949
if( result == kAudioHardwareIllegalOperationError ) {
950
// -- already registered, we're good
952
// -- not already registered, just check for errors
955
/* -- listen for stream start and stop -- */
956
ERR_WRAP( AudioUnitAddPropertyListener( *audioUnit,
957
kAudioOutputUnitProperty_IsRunning,
961
/* -- set format -- */
962
bzero( &desiredFormat, sizeof(desiredFormat) );
963
desiredFormat.mFormatID = kAudioFormatLinearPCM ;
964
desiredFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
965
desiredFormat.mFramesPerPacket = 1;
966
desiredFormat.mBitsPerChannel = sizeof( float ) * 8;
969
/* set device format first, but only touch the device if the user asked */
970
if( inStreamParams ) {
971
/*The callback never calls back if we don't set the FPB */
972
/*This seems wierd, because I would think setting anything on the device
973
would be disruptive.*/
974
paResult = setBestFramesPerBuffer( *audioDevice, FALSE,
975
requestedFramesPerBuffer,
976
actualInputFramesPerBuffer );
977
if( paResult ) goto error;
978
if( macInputStreamFlags & paMacCoreChangeDeviceParameters ) {
980
requireExact=macInputStreamFlags & paMacCoreFailIfConversionRequired;
981
paResult = setBestSampleRateForDevice( *audioDevice, FALSE,
982
requireExact, sampleRate );
983
if( paResult ) goto error;
985
if( actualInputFramesPerBuffer && actualOutputFramesPerBuffer )
986
*actualOutputFramesPerBuffer = *actualInputFramesPerBuffer ;
988
if( outStreamParams && !inStreamParams ) {
989
/*The callback never calls back if we don't set the FPB */
990
/*This seems wierd, because I would think setting anything on the device
991
would be disruptive.*/
992
paResult = setBestFramesPerBuffer( *audioDevice, TRUE,
993
requestedFramesPerBuffer,
994
actualOutputFramesPerBuffer );
995
if( paResult ) goto error;
996
if( macOutputStreamFlags & paMacCoreChangeDeviceParameters ) {
998
requireExact=macOutputStreamFlags & paMacCoreFailIfConversionRequired;
999
paResult = setBestSampleRateForDevice( *audioDevice, TRUE,
1000
requireExact, sampleRate );
1001
if( paResult ) goto error;
1005
/* -- set the quality of the output converter -- */
1006
if( outStreamParams ) {
1007
UInt32 value = kAudioConverterQuality_Max;
1008
switch( macOutputStreamFlags & 0x0700 ) {
1009
case 0x0100: /*paMacCore_ConversionQualityMin:*/
1010
value=kRenderQuality_Min;
1012
case 0x0200: /*paMacCore_ConversionQualityLow:*/
1013
value=kRenderQuality_Low;
1015
case 0x0300: /*paMacCore_ConversionQualityMedium:*/
1016
value=kRenderQuality_Medium;
1018
case 0x0400: /*paMacCore_ConversionQualityHigh:*/
1019
value=kRenderQuality_High;
1022
ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1023
kAudioUnitProperty_RenderQuality,
1024
kAudioUnitScope_Global,
1029
/* now set the format on the Audio Units. */
1030
if( outStreamParams )
1032
desiredFormat.mSampleRate =sampleRate;
1033
desiredFormat.mBytesPerPacket=sizeof(float)*outStreamParams->channelCount;
1034
desiredFormat.mBytesPerFrame =sizeof(float)*outStreamParams->channelCount;
1035
desiredFormat.mChannelsPerFrame = outStreamParams->channelCount;
1036
ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1037
kAudioUnitProperty_StreamFormat,
1038
kAudioUnitScope_Input,
1041
sizeof(AudioStreamBasicDescription) ) );
1043
if( inStreamParams )
1045
AudioStreamBasicDescription sourceFormat;
1046
UInt32 size = sizeof( AudioStreamBasicDescription );
1048
/* keep the sample rate of the device, or we confuse AUHAL */
1049
ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1050
kAudioUnitProperty_StreamFormat,
1051
kAudioUnitScope_Input,
1055
desiredFormat.mSampleRate = sourceFormat.mSampleRate;
1056
desiredFormat.mBytesPerPacket=sizeof(float)*inStreamParams->channelCount;
1057
desiredFormat.mBytesPerFrame =sizeof(float)*inStreamParams->channelCount;
1058
desiredFormat.mChannelsPerFrame = inStreamParams->channelCount;
1059
ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1060
kAudioUnitProperty_StreamFormat,
1061
kAudioUnitScope_Output,
1064
sizeof(AudioStreamBasicDescription) ) );
1066
/* set the maximumFramesPerSlice */
1067
/* not doing this causes real problems
1068
(eg. the callback might not be called). The idea of setting both this
1069
and the frames per buffer on the device is that we'll be most likely
1070
to actually get the frame size we requested in the callback with the
1072
if( outStreamParams ) {
1073
UInt32 size = sizeof( *actualOutputFramesPerBuffer );
1074
ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1075
kAudioUnitProperty_MaximumFramesPerSlice,
1076
kAudioUnitScope_Input,
1078
actualOutputFramesPerBuffer,
1079
sizeof(*actualOutputFramesPerBuffer) ) );
1080
ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1081
kAudioUnitProperty_MaximumFramesPerSlice,
1082
kAudioUnitScope_Global,
1084
actualOutputFramesPerBuffer,
1087
if( inStreamParams ) {
1088
/*UInt32 size = sizeof( *actualInputFramesPerBuffer );*/
1089
ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1090
kAudioUnitProperty_MaximumFramesPerSlice,
1091
kAudioUnitScope_Output,
1093
actualInputFramesPerBuffer,
1094
sizeof(*actualInputFramesPerBuffer) ) );
1095
/* Don't know why this causes problems
1096
ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1097
kAudioUnitProperty_MaximumFramesPerSlice,
1098
kAudioUnitScope_Global, //Output,
1100
actualInputFramesPerBuffer,
1105
/* -- if we have input, we may need to setup an SR converter -- */
1106
/* even if we got the sample rate we asked for, we need to do
1107
the conversion in case another program changes the underlying SR. */
1108
/* FIXME: I think we need to monitor stream and change the converter if the incoming format changes. */
1109
if( inStreamParams ) {
1110
AudioStreamBasicDescription desiredFormat;
1111
AudioStreamBasicDescription sourceFormat;
1112
UInt32 sourceSize = sizeof( sourceFormat );
1113
bzero( &desiredFormat, sizeof(desiredFormat) );
1114
desiredFormat.mSampleRate = sampleRate;
1115
desiredFormat.mFormatID = kAudioFormatLinearPCM ;
1116
desiredFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
1117
desiredFormat.mFramesPerPacket = 1;
1118
desiredFormat.mBitsPerChannel = sizeof( float ) * 8;
1119
desiredFormat.mBytesPerPacket=sizeof(float)*inStreamParams->channelCount;
1120
desiredFormat.mBytesPerFrame =sizeof(float)*inStreamParams->channelCount;
1121
desiredFormat.mChannelsPerFrame = inStreamParams->channelCount;
1123
/* get the source format */
1124
ERR_WRAP( AudioUnitGetProperty(
1126
kAudioUnitProperty_StreamFormat,
1127
kAudioUnitScope_Output,
1132
if( desiredFormat.mSampleRate != sourceFormat.mSampleRate )
1134
UInt32 value = kAudioConverterQuality_Max;
1135
switch( macInputStreamFlags & 0x0700 ) {
1136
case 0x0100: /*paMacCore_ConversionQualityMin:*/
1137
value=kAudioConverterQuality_Min;
1139
case 0x0200: /*paMacCore_ConversionQualityLow:*/
1140
value=kAudioConverterQuality_Low;
1142
case 0x0300: /*paMacCore_ConversionQualityMedium:*/
1143
value=kAudioConverterQuality_Medium;
1145
case 0x0400: /*paMacCore_ConversionQualityHigh:*/
1146
value=kAudioConverterQuality_High;
1149
VDBUG(( "Creating sample rate converter for input"
1150
" to convert from %g to %g\n",
1151
(float)sourceFormat.mSampleRate,
1152
(float)desiredFormat.mSampleRate ) );
1153
/* create our converter */
1154
ERR_WRAP( AudioConverterNew(
1159
ERR_WRAP( AudioConverterSetProperty(
1161
kAudioConverterSampleRateConverterQuality,
1166
/* -- set IOProc (callback) -- */
1167
callbackKey = outStreamParams ? kAudioUnitProperty_SetRenderCallback
1168
: kAudioOutputUnitProperty_SetInputCallback ;
1169
rcbs.inputProc = AudioIOProc;
1170
rcbs.inputProcRefCon = refCon;
1171
ERR_WRAP( AudioUnitSetProperty(
1174
kAudioUnitScope_Output,
1175
outStreamParams ? OUTPUT_ELEMENT : INPUT_ELEMENT,
1179
if( inStreamParams && outStreamParams && *srConverter )
1180
ERR_WRAP( AudioUnitSetProperty(
1182
kAudioOutputUnitProperty_SetInputCallback,
1183
kAudioUnitScope_Output,
1188
/* channel mapping. */
1191
UInt32 mapSize = inChannelMapSize *sizeof(SInt32);
1193
//for each channel of desired input, map the channel from
1194
//the device's output channel.
1195
ERR_WRAP( AudioUnitSetProperty(*audioUnit,
1196
kAudioOutputUnitProperty_ChannelMap,
1197
kAudioUnitScope_Output,
1204
UInt32 mapSize = outChannelMapSize *sizeof(SInt32);
1206
//for each channel of desired output, map the channel from
1207
//the device's output channel.
1208
ERR_WRAP(AudioUnitSetProperty(*audioUnit,
1209
kAudioOutputUnitProperty_ChannelMap,
1210
kAudioUnitScope_Output,
1215
/* initialize the audio unit */
1216
ERR_WRAP( AudioUnitInitialize(*audioUnit) );
1218
if( inStreamParams && outStreamParams )
1219
VDBUG( ("Opened device %ld for input and output.\n", *audioDevice ) );
1220
else if( inStreamParams )
1221
VDBUG( ("Opened device %ld for input.\n", *audioDevice ) );
1222
else if( outStreamParams )
1223
VDBUG( ("Opened device %ld for output.\n", *audioDevice ) );
1228
CloseComponent( *audioUnit );
1231
return PaMacCore_SetError( result, line, 1 );
1235
/* see pa_hostapi.h for a list of validity guarantees made about OpenStream parameters */
1236
static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi,
1238
const PaStreamParameters *inputParameters,
1239
const PaStreamParameters *outputParameters,
1241
unsigned long framesPerBuffer,
1242
PaStreamFlags streamFlags,
1243
PaStreamCallback *streamCallback,
1246
PaError result = paNoError;
1247
PaMacAUHAL *auhalHostApi = (PaMacAUHAL*)hostApi;
1248
PaMacCoreStream *stream = 0;
1249
int inputChannelCount, outputChannelCount;
1250
PaSampleFormat inputSampleFormat, outputSampleFormat;
1251
PaSampleFormat hostInputSampleFormat, hostOutputSampleFormat;
1252
VVDBUG(("OpenStream(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld SR=%g, FPB=%ld\n",
1253
inputParameters ? inputParameters->channelCount : -1,
1254
inputParameters ? inputParameters->sampleFormat : -1,
1255
outputParameters ? outputParameters->channelCount : -1,
1256
outputParameters ? outputParameters->sampleFormat : -1,
1259
VDBUG( ("Opening Stream.\n") );
1261
/*These first few bits of code are from paSkeleton with few modifications.*/
1262
if( inputParameters )
1264
inputChannelCount = inputParameters->channelCount;
1265
inputSampleFormat = inputParameters->sampleFormat;
1267
/* unless alternate device specification is supported, reject the use of
1268
paUseHostApiSpecificDeviceSpecification */
1270
if( inputParameters->device == paUseHostApiSpecificDeviceSpecification )
1271
return paInvalidDevice;
1273
/* check that input device can support inputChannelCount */
1274
if( inputChannelCount > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels )
1275
return paInvalidChannelCount;
1277
/* Host supports interleaved float32 */
1278
hostInputSampleFormat = paFloat32;
1282
inputChannelCount = 0;
1283
inputSampleFormat = hostInputSampleFormat = paFloat32; /* Surpress 'uninitialised var' warnings. */
1286
if( outputParameters )
1288
outputChannelCount = outputParameters->channelCount;
1289
outputSampleFormat = outputParameters->sampleFormat;
1291
/* unless alternate device specification is supported, reject the use of
1292
paUseHostApiSpecificDeviceSpecification */
1294
if( outputParameters->device == paUseHostApiSpecificDeviceSpecification )
1295
return paInvalidDevice;
1297
/* check that output device can support inputChannelCount */
1298
if( outputChannelCount > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels )
1299
return paInvalidChannelCount;
1301
/* Host supports interleaved float32 */
1302
hostOutputSampleFormat = paFloat32;
1306
outputChannelCount = 0;
1307
outputSampleFormat = hostOutputSampleFormat = paFloat32; /* Surpress 'uninitialized var' warnings. */
1310
/* validate platform specific flags */
1311
if( (streamFlags & paPlatformSpecificFlags) != 0 )
1312
return paInvalidFlag; /* unexpected platform specific flag */
1314
stream = (PaMacCoreStream*)PaUtil_AllocateMemory( sizeof(PaMacCoreStream) );
1317
result = paInsufficientMemory;
1321
/* If we fail after this point, we my be left in a bad state, with
1322
some data structures setup and others not. So, first thing we
1323
do is initialize everything so that if we fail, we know what hasn't
1327
stream->inputAudioBufferList.mBuffers[0].mData = NULL;
1328
stream->inputRingBuffer.buffer = NULL;
1329
bzero( &stream->blio, sizeof( PaMacBlio ) );
1331
stream->blio.inputRingBuffer.buffer = NULL;
1332
stream->blio.outputRingBuffer.buffer = NULL;
1333
stream->blio.inputSampleFormat = inputParameters?inputParameters->sampleFormat:0;
1334
stream->blio.inputSampleSize = computeSampleSizeFromFormat(stream->blio.inputSampleFormat);
1335
stream->blio.outputSampleFormat=outputParameters?outputParameters->sampleFormat:0;
1336
stream->blio.outputSampleSize = computeSampleSizeFromFormat(stream->blio.outputSampleFormat);
1338
stream->inputSRConverter = NULL;
1339
stream->inputUnit = NULL;
1340
stream->outputUnit = NULL;
1341
stream->inputFramesPerBuffer = 0;
1342
stream->outputFramesPerBuffer = 0;
1343
stream->bufferProcessorIsInitialized = FALSE;
1345
/* assert( streamCallback ) ; */ /* only callback mode is implemented */
1346
if( streamCallback )
1348
PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation,
1349
&auhalHostApi->callbackStreamInterface,
1350
streamCallback, userData );
1354
PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation,
1355
&auhalHostApi->blockingStreamInterface,
1356
BlioCallback, &stream->blio );
1359
PaUtil_InitializeCpuLoadMeasurer( &stream->cpuLoadMeasurer, sampleRate );
1361
/* -- handle paFramesPerBufferUnspecified -- */
1362
if( framesPerBuffer == paFramesPerBufferUnspecified ) {
1363
long requested = 64;
1364
if( inputParameters )
1365
requested = MAX( requested, inputParameters->suggestedLatency * sampleRate / 2 );
1366
if( outputParameters )
1367
requested = MAX( requested, outputParameters->suggestedLatency *sampleRate / 2 );
1368
VDBUG( ("Block Size unspecified. Based on Latency, the user wants a Block Size near: %ld.\n",
1370
if( requested <= 64 ) {
1371
/*requested a realtively low latency. make sure this is in range of devices */
1372
/*try to get the device's min natural buffer size and use that (but no smaller than 64).*/
1373
AudioValueRange audioRange;
1374
UInt32 size = sizeof( audioRange );
1375
if( inputParameters ) {
1376
WARNING( result = AudioDeviceGetProperty( auhalHostApi->devIds[inputParameters->device],
1379
kAudioDevicePropertyBufferFrameSizeRange,
1380
&size, &audioRange ) );
1382
requested = MAX( requested, audioRange.mMinimum );
1384
size = sizeof( audioRange );
1385
if( outputParameters ) {
1386
WARNING( result = AudioDeviceGetProperty( auhalHostApi->devIds[outputParameters->device],
1389
kAudioDevicePropertyBufferFrameSizeRange,
1390
&size, &audioRange ) );
1392
requested = MAX( requested, audioRange.mMinimum );
1395
/* requested a realtively high latency. make sure this is in range of devices */
1396
/*try to get the device's max natural buffer size and use that (but no larger than 1024).*/
1397
AudioValueRange audioRange;
1398
UInt32 size = sizeof( audioRange );
1399
requested = MIN( requested, 1024 );
1400
if( inputParameters ) {
1401
WARNING( result = AudioDeviceGetProperty( auhalHostApi->devIds[inputParameters->device],
1404
kAudioDevicePropertyBufferFrameSizeRange,
1405
&size, &audioRange ) );
1407
requested = MIN( requested, audioRange.mMaximum );
1409
size = sizeof( audioRange );
1410
if( outputParameters ) {
1411
WARNING( result = AudioDeviceGetProperty( auhalHostApi->devIds[outputParameters->device],
1414
kAudioDevicePropertyBufferFrameSizeRange,
1415
&size, &audioRange ) );
1417
requested = MIN( requested, audioRange.mMaximum );
1420
/* -- double check ranges -- */
1421
if( requested > 1024 ) requested = 1024;
1422
if( requested < 64 ) requested = 64;
1423
VDBUG(("After querying hardware, setting block size to %ld.\n", requested));
1424
framesPerBuffer = requested;
1427
/* -- Now we actually open and setup streams. -- */
1428
if( inputParameters && outputParameters && outputParameters->device == inputParameters->device )
1429
{ /* full duplex. One device. */
1430
UInt32 inputFramesPerBuffer = (UInt32) stream->inputFramesPerBuffer;
1431
UInt32 outputFramesPerBuffer = (UInt32) stream->outputFramesPerBuffer;
1432
result = OpenAndSetupOneAudioUnit( stream,
1436
&inputFramesPerBuffer,
1437
&outputFramesPerBuffer,
1439
&(stream->inputUnit),
1440
&(stream->inputSRConverter),
1441
&(stream->inputDevice),
1444
stream->inputFramesPerBuffer = inputFramesPerBuffer;
1445
stream->outputFramesPerBuffer = outputFramesPerBuffer;
1446
stream->outputUnit = stream->inputUnit;
1447
stream->outputDevice = stream->inputDevice;
1448
if( result != paNoError )
1452
{ /* full duplex, different devices OR simplex */
1453
UInt32 outputFramesPerBuffer = (UInt32) stream->outputFramesPerBuffer;
1454
UInt32 inputFramesPerBuffer = (UInt32) stream->inputFramesPerBuffer;
1455
result = OpenAndSetupOneAudioUnit( stream,
1460
&outputFramesPerBuffer,
1462
&(stream->outputUnit),
1464
&(stream->outputDevice),
1467
if( result != paNoError )
1469
result = OpenAndSetupOneAudioUnit( stream,
1473
&inputFramesPerBuffer,
1476
&(stream->inputUnit),
1477
&(stream->inputSRConverter),
1478
&(stream->inputDevice),
1481
if( result != paNoError )
1483
stream->inputFramesPerBuffer = inputFramesPerBuffer;
1484
stream->outputFramesPerBuffer = outputFramesPerBuffer;
1487
if( stream->inputUnit ) {
1488
const size_t szfl = sizeof(float);
1489
/* setup the AudioBufferList used for input */
1490
bzero( &stream->inputAudioBufferList, sizeof( AudioBufferList ) );
1491
stream->inputAudioBufferList.mNumberBuffers = 1;
1492
stream->inputAudioBufferList.mBuffers[0].mNumberChannels
1493
= inputChannelCount;
1494
stream->inputAudioBufferList.mBuffers[0].mDataByteSize
1495
= stream->inputFramesPerBuffer*inputChannelCount*szfl;
1496
stream->inputAudioBufferList.mBuffers[0].mData
1498
stream->inputFramesPerBuffer*inputChannelCount,
1500
if( !stream->inputAudioBufferList.mBuffers[0].mData )
1502
result = paInsufficientMemory;
1507
* If input and output devs are different or we are doing SR conversion,
1509
* ring buffer to store inpt data while waiting for output
1512
if( (stream->outputUnit && stream->inputUnit != stream->outputUnit)
1513
|| stream->inputSRConverter )
1515
/* May want the ringSize ot initial position in
1516
ring buffer to depend somewhat on sample rate change */
1521
ringSize = computeRingBufferSize( inputParameters,
1523
stream->inputFramesPerBuffer,
1524
stream->outputFramesPerBuffer,
1526
/*ringSize <<= 4; *//*16x bigger, for testing */
1529
/*now, we need to allocate memory for the ring buffer*/
1530
data = calloc( ringSize, szfl );
1533
result = paInsufficientMemory;
1537
/* now we can initialize the ring buffer */
1538
//FIXME: element size whould probably be szfl*inputchan
1539
// but that will require some work all over the
1540
// place to patch up. szfl may be sufficient and would
1541
// be way easier to handle, but it seems clear from the
1542
// discussion that buffer processor compatibility
1543
// requires szfl*inputchan.
1544
// See revision 1346 and discussion:
1545
// http://techweb.rfa.org/pipermail/portaudio/2008-February/008295.html
1546
PaUtil_InitializeRingBuffer( &stream->inputRingBuffer,
1547
1, ringSize*szfl, data ) ;
1548
/* advance the read point a little, so we are reading from the
1549
middle of the buffer */
1550
if( stream->outputUnit )
1551
PaUtil_AdvanceRingBufferWriteIndex( &stream->inputRingBuffer, ringSize*szfl / RING_BUFFER_ADVANCE_DENOMINATOR );
1555
/* -- initialize Blio Buffer Processors -- */
1556
if( !streamCallback )
1560
ringSize = computeRingBufferSize( inputParameters,
1562
stream->inputFramesPerBuffer,
1563
stream->outputFramesPerBuffer,
1565
result = initializeBlioRingBuffers( &stream->blio,
1566
inputParameters?inputParameters->sampleFormat:0 ,
1567
outputParameters?outputParameters->sampleFormat:0 ,
1568
MAX(stream->inputFramesPerBuffer,stream->outputFramesPerBuffer),
1570
inputParameters?inputChannelCount:0 ,
1571
outputParameters?outputChannelCount:0 ) ;
1572
if( result != paNoError )
1576
/* -- initialize Buffer Processor -- */
1578
unsigned long maxHostFrames = stream->inputFramesPerBuffer;
1579
if( stream->outputFramesPerBuffer > maxHostFrames )
1580
maxHostFrames = stream->outputFramesPerBuffer;
1581
result = PaUtil_InitializeBufferProcessor( &stream->bufferProcessor,
1582
inputChannelCount, inputSampleFormat,
1583
hostInputSampleFormat,
1584
outputChannelCount, outputSampleFormat,
1585
hostOutputSampleFormat,
1589
/* If sample rate conversion takes place, the buffer size
1590
will not be known. */
1592
stream->inputSRConverter
1593
? paUtilUnknownHostBufferSize
1594
: paUtilBoundedHostBufferSize,
1595
streamCallback ? streamCallback : BlioCallback,
1596
streamCallback ? userData : &stream->blio );
1597
if( result != paNoError )
1600
stream->bufferProcessorIsInitialized = TRUE;
1603
IMPLEMENT ME: initialise the following fields with estimated or actual
1605
I think this is okay the way it is br 12/1/05
1606
maybe need to change input latency estimate if IO devs differ
1608
stream->streamRepresentation.streamInfo.inputLatency =
1609
PaUtil_GetBufferProcessorInputLatency(&stream->bufferProcessor)/sampleRate;
1610
stream->streamRepresentation.streamInfo.outputLatency =
1611
PaUtil_GetBufferProcessorOutputLatency(&stream->bufferProcessor)/sampleRate;
1612
stream->streamRepresentation.streamInfo.sampleRate = sampleRate;
1614
stream->sampleRate = sampleRate;
1615
stream->outDeviceSampleRate = 0;
1616
if( stream->outputUnit ) {
1618
UInt32 size = sizeof( rate );
1619
result = ERR( AudioDeviceGetProperty( stream->outputDevice,
1622
kAudioDevicePropertyNominalSampleRate,
1626
stream->outDeviceSampleRate = rate;
1628
stream->inDeviceSampleRate = 0;
1629
if( stream->inputUnit ) {
1631
UInt32 size = sizeof( rate );
1632
result = ERR( AudioDeviceGetProperty( stream->inputDevice,
1635
kAudioDevicePropertyNominalSampleRate,
1639
stream->inDeviceSampleRate = rate;
1641
stream->userInChan = inputChannelCount;
1642
stream->userOutChan = outputChannelCount;
1644
stream->isTimeSet = FALSE;
1645
stream->state = STOPPED;
1646
stream->xrunFlags = 0;
1648
*s = (PaStream*)stream;
1653
CloseStream( stream );
1657
PaTime GetStreamTime( PaStream *s )
1659
/* FIXME: I am not at all sure this timing info stuff is right.
1660
patest_sine_time reports negative latencies, which is wierd.*/
1661
PaMacCoreStream *stream = (PaMacCoreStream*)s;
1662
AudioTimeStamp timeStamp;
1664
VVDBUG(("GetStreamTime()\n"));
1666
if ( !stream->isTimeSet )
1669
if ( stream->outputDevice ) {
1670
AudioDeviceGetCurrentTime( stream->outputDevice, &timeStamp);
1671
return (PaTime)(timeStamp.mSampleTime - stream->startTime.mSampleTime)/stream->outDeviceSampleRate;
1672
} else if ( stream->inputDevice ) {
1673
AudioDeviceGetCurrentTime( stream->inputDevice, &timeStamp);
1674
return (PaTime)(timeStamp.mSampleTime - stream->startTime.mSampleTime)/stream->inDeviceSampleRate;
1680
static void setStreamStartTime( PaStream *stream )
1682
/* FIXME: I am not at all sure this timing info stuff is right.
1683
patest_sine_time reports negative latencies, which is wierd.*/
1684
PaMacCoreStream *s = (PaMacCoreStream *) stream;
1685
VVDBUG(("setStreamStartTime()\n"));
1686
if( s->outputDevice )
1687
AudioDeviceGetCurrentTime( s->outputDevice, &s->startTime);
1688
else if( s->inputDevice )
1689
AudioDeviceGetCurrentTime( s->inputDevice, &s->startTime);
1691
bzero( &s->startTime, sizeof( s->startTime ) );
1693
//FIXME: we need a memory barier here
1695
s->isTimeSet = TRUE;
1699
static PaTime TimeStampToSecs(PaMacCoreStream *stream, const AudioTimeStamp* timeStamp)
1701
VVDBUG(("TimeStampToSecs()\n"));
1702
//printf( "ATS: %lu, %g, %g\n", timeStamp->mFlags, timeStamp->mSampleTime, timeStamp->mRateScalar );
1703
if (timeStamp->mFlags & kAudioTimeStampSampleTimeValid)
1704
return (timeStamp->mSampleTime / stream->sampleRate);
1709
#define RING_BUFFER_EMPTY (1000)
1711
static OSStatus ringBufferIOProc( AudioConverterRef inAudioConverter,
1717
ring_buffer_size_t dummySize;
1718
PaUtilRingBuffer *rb = (PaUtilRingBuffer *) inUserData;
1720
VVDBUG(("ringBufferIOProc()\n"));
1722
if( PaUtil_GetRingBufferReadAvailable( rb ) == 0 ) {
1725
return RING_BUFFER_EMPTY;
1727
assert(sizeof(UInt32) == sizeof(ring_buffer_size_t));
1728
PaUtil_GetRingBufferReadRegions( rb, *ioDataSize,
1729
outData, (ring_buffer_size_t *)ioDataSize,
1730
&dummyData, &dummySize );
1732
assert( *ioDataSize );
1733
PaUtil_AdvanceRingBufferReadIndex( rb, *ioDataSize );
1739
* Called by the AudioUnit API to process audio from the sound card.
1740
* This is where the magic happens.
1742
/* FEEDBACK: there is a lot of redundant code here because of how all the cases differ. This makes it hard to maintain, so if there are suggestinos for cleaning it up, I'm all ears. */
1743
static OSStatus AudioIOProc( void *inRefCon,
1744
AudioUnitRenderActionFlags *ioActionFlags,
1745
const AudioTimeStamp *inTimeStamp,
1747
UInt32 inNumberFrames,
1748
AudioBufferList *ioData )
1750
unsigned long framesProcessed = 0;
1751
PaStreamCallbackTimeInfo timeInfo = {0,0,0};
1752
PaMacCoreStream *stream = (PaMacCoreStream*)inRefCon;
1753
const bool isRender = inBusNumber == OUTPUT_ELEMENT;
1754
int callbackResult = paContinue ;
1756
VVDBUG(("AudioIOProc()\n"));
1758
PaUtil_BeginCpuLoadMeasurement( &stream->cpuLoadMeasurer );
1760
/* -----------------------------------------------------------------*\
1761
This output may be useful for debugging,
1762
But printing durring the callback is a bad enough idea that
1763
this is not enabled by enableing the usual debugging calls.
1764
\* -----------------------------------------------------------------*/
1766
static int renderCount = 0;
1767
static int inputCount = 0;
1768
printf( "------------------- starting reder/input\n" );
1770
printf("Render callback (%d):\t", ++renderCount);
1772
printf("Input callback (%d):\t", ++inputCount);
1773
printf( "Call totals: %d (input), %d (render)\n", inputCount, renderCount );
1775
printf( "--- inBusNumber: %lu\n", inBusNumber );
1776
printf( "--- inNumberFrames: %lu\n", inNumberFrames );
1777
printf( "--- %x ioData\n", (unsigned) ioData );
1781
printf( "--- ioData.mNumBuffers %lu: \n", ioData->mNumberBuffers );
1782
for( i=0; i<ioData->mNumberBuffers; ++i )
1783
printf( "--- ioData buffer %d size: %lu.\n", i, ioData->mBuffers[i].mDataByteSize );
1785
----------------------------------------------------------------- */
1787
if( !stream->isTimeSet )
1788
setStreamStartTime( stream );
1791
AudioTimeStamp currentTime;
1792
timeInfo.outputBufferDacTime = TimeStampToSecs(stream, inTimeStamp);
1793
AudioDeviceGetCurrentTime(stream->outputDevice, ¤tTime);
1794
timeInfo.currentTime = TimeStampToSecs(stream, ¤tTime);
1796
if( isRender && stream->inputUnit == stream->outputUnit )
1797
timeInfo.inputBufferAdcTime = TimeStampToSecs(stream, inTimeStamp);
1799
AudioTimeStamp currentTime;
1800
timeInfo.inputBufferAdcTime = TimeStampToSecs(stream, inTimeStamp);
1801
AudioDeviceGetCurrentTime(stream->inputDevice, ¤tTime);
1802
timeInfo.currentTime = TimeStampToSecs(stream, ¤tTime);
1805
//printf( "---%g, %g, %g\n", timeInfo.inputBufferAdcTime, timeInfo.currentTime, timeInfo.outputBufferDacTime );
1807
if( isRender && stream->inputUnit == stream->outputUnit
1808
&& !stream->inputSRConverter )
1810
/* --------- Full Duplex, One Device, no SR Conversion -------
1812
* This is the lowest latency case, and also the simplest.
1813
* Input data and output data are available at the same time.
1814
* we do not use the input SR converter or the input ring buffer.
1818
unsigned long frames;
1820
/* -- start processing -- */
1821
PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
1823
stream->xrunFlags );
1824
stream->xrunFlags = 0; //FIXME: this flag also gets set outside by a callback, which calls the xrunCallback function. It should be in the same thread as the main audio callback, but the apple docs just use the word "usually" so it may be possible to loose an xrun notification, if that callback happens here.
1826
/* -- compute frames. do some checks -- */
1827
assert( ioData->mNumberBuffers == 1 );
1828
assert( ioData->mBuffers[0].mNumberChannels == stream->userOutChan );
1829
frames = ioData->mBuffers[0].mDataByteSize;
1830
frames /= sizeof( float ) * ioData->mBuffers[0].mNumberChannels;
1831
/* -- copy and process input data -- */
1832
err= AudioUnitRender(stream->inputUnit,
1837
&stream->inputAudioBufferList );
1838
/* FEEDBACK: I'm not sure what to do when this call fails. There's nothing in the PA API to
1839
* do about failures in the callback system. */
1842
PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
1843
PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
1845
stream->inputAudioBufferList.mBuffers[0].mData,
1846
stream->inputAudioBufferList.mBuffers[0].mNumberChannels);
1847
/* -- Copy and process output data -- */
1848
PaUtil_SetOutputFrameCount( &(stream->bufferProcessor), frames );
1849
PaUtil_SetInterleavedOutputChannels( &(stream->bufferProcessor),
1851
ioData->mBuffers[0].mData,
1852
ioData->mBuffers[0].mNumberChannels);
1853
/* -- complete processing -- */
1855
PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
1860
/* -------- Output Side of Full Duplex (Separate Devices or SR Conversion)
1861
* -- OR Simplex Output
1863
* This case handles output data as in the full duplex case,
1864
* and, if there is input data, reads it off the ring buffer
1865
* and into the PA buffer processor. If sample rate conversion
1866
* is required on input, that is done here as well.
1868
unsigned long frames;
1870
/* Sometimes, when stopping a duplex stream we get erroneous
1871
xrun flags, so if this is our last run, clear the flags. */
1872
int xrunFlags = stream->xrunFlags;
1874
if( xrunFlags & paInputUnderflow )
1875
printf( "input underflow.\n" );
1876
if( xrunFlags & paInputOverflow )
1877
printf( "input overflow.\n" );
1879
if( stream->state == STOPPING || stream->state == CALLBACK_STOPPED )
1882
/* -- start processing -- */
1883
PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
1886
stream->xrunFlags = 0; /* FEEDBACK: we only send flags to Buf Proc once */
1888
/* -- Copy and process output data -- */
1889
assert( ioData->mNumberBuffers == 1 );
1890
frames = ioData->mBuffers[0].mDataByteSize;
1891
frames /= sizeof( float ) * ioData->mBuffers[0].mNumberChannels;
1892
assert( ioData->mBuffers[0].mNumberChannels == stream->userOutChan );
1893
PaUtil_SetOutputFrameCount( &(stream->bufferProcessor), frames );
1894
PaUtil_SetInterleavedOutputChannels( &(stream->bufferProcessor),
1896
ioData->mBuffers[0].mData,
1897
ioData->mBuffers[0].mNumberChannels);
1899
/* -- copy and process input data, and complete processing -- */
1900
if( stream->inputUnit ) {
1901
const int flsz = sizeof( float );
1902
/* Here, we read the data out of the ring buffer, through the
1904
int inChan = stream->inputAudioBufferList.mBuffers[0].mNumberChannels;
1905
if( stream->inputSRConverter )
1909
float data[ inChan * frames ];
1910
size = sizeof( data );
1911
err = AudioConverterFillBuffer(
1912
stream->inputSRConverter,
1914
&stream->inputRingBuffer,
1917
if( err == RING_BUFFER_EMPTY )
1918
{ /*the ring buffer callback underflowed */
1920
bzero( ((char *)data) + size, sizeof(data)-size );
1921
stream->xrunFlags |= paInputUnderflow;
1926
PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
1927
PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
1932
PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
1937
/* Without the AudioConverter is actually a bit more complex
1938
because we have to do a little buffer processing that the
1939
AudioConverter would otherwise handle for us. */
1940
void *data1, *data2;
1941
ring_buffer_size_t size1, size2;
1942
PaUtil_GetRingBufferReadRegions( &stream->inputRingBuffer,
1946
if( size1 / ( flsz * inChan ) == frames ) {
1947
/* simplest case: all in first buffer */
1948
PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
1949
PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
1954
PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
1956
PaUtil_AdvanceRingBufferReadIndex(&stream->inputRingBuffer, size1 );
1957
} else if( ( size1 + size2 ) / ( flsz * inChan ) < frames ) {
1958
/*we underflowed. take what data we can, zero the rest.*/
1959
unsigned char data[frames*inChan*flsz];
1961
memcpy( data, data1, size1 );
1963
memcpy( data+size1, data2, size2 );
1964
bzero( data+size1+size2, frames*flsz*inChan - size1 - size2 );
1966
PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
1967
PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
1972
PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
1974
PaUtil_AdvanceRingBufferReadIndex( &stream->inputRingBuffer,
1976
/* flag underflow */
1977
stream->xrunFlags |= paInputUnderflow;
1979
/*we got all the data, but split between buffers*/
1980
PaUtil_SetInputFrameCount( &(stream->bufferProcessor),
1981
size1 / ( flsz * inChan ) );
1982
PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
1986
PaUtil_Set2ndInputFrameCount( &(stream->bufferProcessor),
1987
size2 / ( flsz * inChan ) );
1988
PaUtil_Set2ndInterleavedInputChannels( &(stream->bufferProcessor),
1993
PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
1995
PaUtil_AdvanceRingBufferReadIndex(&stream->inputRingBuffer, size1+size2 );
2000
PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2007
/* ------------------ Input
2009
* First, we read off the audio data and put it in the ring buffer.
2010
* if this is an input-only stream, we need to process it more,
2011
* otherwise, we let the output case deal with it.
2014
int chan = stream->inputAudioBufferList.mBuffers[0].mNumberChannels ;
2015
/* FIXME: looping here may not actually be necessary, but it was something I tried in testing. */
2017
err= AudioUnitRender(stream->inputUnit,
2022
&stream->inputAudioBufferList );
2024
inNumberFrames /= 2;
2025
} while( err == -10874 && inNumberFrames > 1 );
2026
/* FEEDBACK: I'm not sure what to do when this call fails */
2029
if( stream->inputSRConverter || stream->outputUnit )
2031
/* If this is duplex or we use a converter, put the data
2032
into the ring buffer. */
2033
long bytesIn, bytesOut;
2034
bytesIn = sizeof( float ) * inNumberFrames * chan;
2035
bytesOut = PaUtil_WriteRingBuffer( &stream->inputRingBuffer,
2036
stream->inputAudioBufferList.mBuffers[0].mData,
2038
if( bytesIn != bytesOut )
2039
stream->xrunFlags |= paInputOverflow ;
2043
/* for simplex input w/o SR conversion,
2044
just pop the data into the buffer processor.*/
2045
PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2047
stream->xrunFlags );
2048
stream->xrunFlags = 0;
2050
PaUtil_SetInputFrameCount( &(stream->bufferProcessor), inNumberFrames);
2051
PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2053
stream->inputAudioBufferList.mBuffers[0].mData,
2056
PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2059
if( !stream->outputUnit && stream->inputSRConverter )
2061
/* ------------------ Simplex Input w/ SR Conversion
2063
* if this is a simplex input stream, we need to read off the buffer,
2064
* do our sample rate conversion and pass the results to the buffer
2066
* The logic here is complicated somewhat by the fact that we don't
2067
* know how much data is available, so we loop on reasonably sized
2068
* chunks, and let the BufferProcessor deal with the rest.
2071
/*This might be too big or small depending on SR conversion*/
2072
float data[ chan * inNumberFrames ];
2075
{ /*Run the buffer processor until we are out of data*/
2079
size = sizeof( data );
2080
err = AudioConverterFillBuffer(
2081
stream->inputSRConverter,
2083
&stream->inputRingBuffer,
2086
if( err != RING_BUFFER_EMPTY )
2088
assert( err == 0 || err == RING_BUFFER_EMPTY );
2090
f = size / ( chan * sizeof(float) );
2091
PaUtil_SetInputFrameCount( &(stream->bufferProcessor), f );
2094
PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2096
stream->xrunFlags );
2097
stream->xrunFlags = 0;
2099
PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2104
PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2107
} while( callbackResult == paContinue && !err );
2111
switch( callbackResult )
2113
case paContinue: break;
2116
stream->isTimeSet = FALSE;
2117
stream->state = CALLBACK_STOPPED ;
2118
if( stream->outputUnit )
2119
AudioOutputUnitStop(stream->outputUnit);
2120
if( stream->inputUnit )
2121
AudioOutputUnitStop(stream->inputUnit);
2125
PaUtil_EndCpuLoadMeasurement( &stream->cpuLoadMeasurer, framesProcessed );
2131
When CloseStream() is called, the multi-api layer ensures that
2132
the stream has already been stopped or aborted.
2134
static PaError CloseStream( PaStream* s )
2136
/* This may be called from a failed OpenStream.
2137
Therefore, each piece of info is treated seperately. */
2138
PaError result = paNoError;
2139
PaMacCoreStream *stream = (PaMacCoreStream*)s;
2141
VVDBUG(("CloseStream()\n"));
2142
VDBUG( ( "Closing stream.\n" ) );
2145
if( stream->outputUnit ) {
2146
int count = removeFromXRunListenerList( stream );
2148
AudioDeviceRemovePropertyListener( stream->outputDevice,
2151
kAudioDeviceProcessorOverload,
2154
if( stream->inputUnit && stream->outputUnit != stream->inputUnit ) {
2155
int count = removeFromXRunListenerList( stream );
2157
AudioDeviceRemovePropertyListener( stream->inputDevice,
2160
kAudioDeviceProcessorOverload,
2163
if( stream->outputUnit && stream->outputUnit != stream->inputUnit ) {
2164
AudioUnitUninitialize( stream->outputUnit );
2165
CloseComponent( stream->outputUnit );
2167
stream->outputUnit = NULL;
2168
if( stream->inputUnit )
2170
AudioUnitUninitialize( stream->inputUnit );
2171
CloseComponent( stream->inputUnit );
2172
stream->inputUnit = NULL;
2174
if( stream->inputRingBuffer.buffer )
2175
free( (void *) stream->inputRingBuffer.buffer );
2176
stream->inputRingBuffer.buffer = NULL;
2177
/*TODO: is there more that needs to be done on error
2178
from AudioConverterDispose?*/
2179
if( stream->inputSRConverter )
2180
ERR( AudioConverterDispose( stream->inputSRConverter ) );
2181
stream->inputSRConverter = NULL;
2182
if( stream->inputAudioBufferList.mBuffers[0].mData )
2183
free( stream->inputAudioBufferList.mBuffers[0].mData );
2184
stream->inputAudioBufferList.mBuffers[0].mData = NULL;
2186
result = destroyBlioRingBuffers( &stream->blio );
2189
if( stream->bufferProcessorIsInitialized )
2190
PaUtil_TerminateBufferProcessor( &stream->bufferProcessor );
2191
PaUtil_TerminateStreamRepresentation( &stream->streamRepresentation );
2192
PaUtil_FreeMemory( stream );
2198
static PaError StartStream( PaStream *s )
2200
PaMacCoreStream *stream = (PaMacCoreStream*)s;
2201
OSStatus result = noErr;
2202
VVDBUG(("StartStream()\n"));
2203
VDBUG( ( "Starting stream.\n" ) );
2205
#define ERR_WRAP(mac_err) do { result = mac_err ; if ( result != noErr ) return ERR(result) ; } while(0)
2207
/*FIXME: maybe want to do this on close/abort for faster start? */
2208
PaUtil_ResetBufferProcessor( &stream->bufferProcessor );
2209
if( stream->inputSRConverter )
2210
ERR_WRAP( AudioConverterReset( stream->inputSRConverter ) );
2213
stream->state = ACTIVE;
2214
if( stream->inputUnit ) {
2215
ERR_WRAP( AudioOutputUnitStart(stream->inputUnit) );
2217
if( stream->outputUnit && stream->outputUnit != stream->inputUnit ) {
2218
ERR_WRAP( AudioOutputUnitStart(stream->outputUnit) );
2221
//setStreamStartTime( stream );
2222
//stream->isTimeSet = TRUE;
2228
// it's not clear from appl's docs that this really waits
2229
// until all data is flushed.
2230
static ComponentResult BlockWhileAudioUnitIsRunning( AudioUnit audioUnit, AudioUnitElement element )
2232
Boolean isRunning = 1;
2233
while( isRunning ) {
2234
UInt32 s = sizeof( isRunning );
2235
ComponentResult err = AudioUnitGetProperty( audioUnit, kAudioOutputUnitProperty_IsRunning, kAudioUnitScope_Global, element, &isRunning, &s );
2243
static PaError StopStream( PaStream *s )
2245
PaMacCoreStream *stream = (PaMacCoreStream*)s;
2246
OSStatus result = noErr;
2248
VVDBUG(("StopStream()\n"));
2250
VDBUG( ("Waiting for BLIO.\n") );
2251
waitUntilBlioWriteBufferIsFlushed( &stream->blio );
2252
VDBUG( ( "Stopping stream.\n" ) );
2254
stream->isTimeSet = FALSE;
2255
stream->state = STOPPING;
2257
#define ERR_WRAP(mac_err) do { result = mac_err ; if ( result != noErr ) return ERR(result) ; } while(0)
2258
/* -- stop and reset -- */
2259
if( stream->inputUnit == stream->outputUnit && stream->inputUnit )
2261
ERR_WRAP( AudioOutputUnitStop(stream->inputUnit) );
2262
ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,0) );
2263
ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,1) );
2264
ERR_WRAP( AudioUnitReset(stream->inputUnit, kAudioUnitScope_Global, 1) );
2265
ERR_WRAP( AudioUnitReset(stream->inputUnit, kAudioUnitScope_Global, 0) );
2269
if( stream->inputUnit )
2271
ERR_WRAP(AudioOutputUnitStop(stream->inputUnit) );
2272
ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,1) );
2273
ERR_WRAP(AudioUnitReset(stream->inputUnit,kAudioUnitScope_Global,1));
2275
if( stream->outputUnit )
2277
ERR_WRAP(AudioOutputUnitStop(stream->outputUnit));
2278
ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->outputUnit,0) );
2279
ERR_WRAP(AudioUnitReset(stream->outputUnit,kAudioUnitScope_Global,0));
2282
if( stream->inputRingBuffer.buffer ) {
2283
PaUtil_FlushRingBuffer( &stream->inputRingBuffer );
2284
bzero( (void *)stream->inputRingBuffer.buffer,
2285
stream->inputRingBuffer.bufferSize );
2286
/* advance the write point a little, so we are reading from the
2287
middle of the buffer. We'll need extra at the end because
2288
testing has shown that this helps. */
2289
if( stream->outputUnit )
2290
PaUtil_AdvanceRingBufferWriteIndex( &stream->inputRingBuffer,
2291
stream->inputRingBuffer.bufferSize
2292
/ RING_BUFFER_ADVANCE_DENOMINATOR );
2295
stream->xrunFlags = 0;
2296
stream->state = STOPPED;
2298
paErr = resetBlioRingBuffers( &stream->blio );
2303
//stream->isTimeSet = FALSE;
2306
VDBUG( ( "Stream Stopped.\n" ) );
2311
static PaError AbortStream( PaStream *s )
2313
VVDBUG(("AbortStream()->StopStream()\n"));
2314
VDBUG( ( "Aborting stream.\n" ) );
2315
/* We have nothing faster than StopStream. */
2316
return StopStream(s);
2320
static PaError IsStreamStopped( PaStream *s )
2322
PaMacCoreStream *stream = (PaMacCoreStream*)s;
2323
VVDBUG(("IsStreamStopped()\n"));
2325
return stream->state == STOPPED ? 1 : 0;
2329
static PaError IsStreamActive( PaStream *s )
2331
PaMacCoreStream *stream = (PaMacCoreStream*)s;
2332
VVDBUG(("IsStreamActive()\n"));
2333
return ( stream->state == ACTIVE || stream->state == STOPPING );
2337
static double GetStreamCpuLoad( PaStream* s )
2339
PaMacCoreStream *stream = (PaMacCoreStream*)s;
2340
VVDBUG(("GetStreamCpuLoad()\n"));
2342
return PaUtil_GetCpuLoad( &stream->cpuLoadMeasurer );