/[pcsx2_0.9.7]/trunk/3rdparty/portaudio/src/hostapi/coreaudio/pa_mac_core.c
ViewVC logotype

Contents of /trunk/3rdparty/portaudio/src/hostapi/coreaudio/pa_mac_core.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 273 - (show annotations) (download)
Fri Nov 12 01:10:22 2010 UTC (9 years, 2 months ago) by william
File MIME type: text/plain
File size: 102189 byte(s)
Auto Commited Import of: pcsx2-0.9.7-DEBUG (upstream: v0.9.7.4013 local: v0.9.7.197-latest) in ./trunk
1 /*
2 * Implementation of the PortAudio API for Apple AUHAL
3 *
4 * PortAudio Portable Real-Time Audio Library
5 * Latest Version at: http://www.portaudio.com
6 *
7 * Written by Bjorn Roche of XO Audio LLC, from PA skeleton code.
8 * Portions copied from code by Dominic Mazzoni (who wrote a HAL implementation)
9 *
10 * Dominic's code was based on code by Phil Burk, Darren Gibbs,
11 * Gord Peters, Stephane Letz, and Greg Pfiel.
12 *
13 * The following people also deserve acknowledgements:
14 *
15 * Olivier Tristan for feedback and testing
16 * Glenn Zelniker and Z-Systems engineering for sponsoring the Blocking I/O
17 * interface.
18 *
19 *
20 * Based on the Open Source API proposed by Ross Bencina
21 * Copyright (c) 1999-2002 Ross Bencina, Phil Burk
22 *
23 * Permission is hereby granted, free of charge, to any person obtaining
24 * a copy of this software and associated documentation files
25 * (the "Software"), to deal in the Software without restriction,
26 * including without limitation the rights to use, copy, modify, merge,
27 * publish, distribute, sublicense, and/or sell copies of the Software,
28 * and to permit persons to whom the Software is furnished to do so,
29 * subject to the following conditions:
30 *
31 * The above copyright notice and this permission notice shall be
32 * included in all copies or substantial portions of the Software.
33 *
34 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
35 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
36 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
37 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
38 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
39 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
40 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41 */
42
43 /*
44 * The text above constitutes the entire PortAudio license; however,
45 * the PortAudio community also makes the following non-binding requests:
46 *
47 * Any person wishing to distribute modifications to the Software is
48 * requested to send the modifications to the original developer so that
49 * they can be incorporated into the canonical version. It is also
50 * requested that these non-binding requests be included along with the
51 * license above.
52 */
53
54 /**
55 @file pa_mac_core
56 @ingroup hostapi_src
57 @author Bjorn Roche
58 @brief AUHAL implementation of PortAudio
59 */
60
61 /* FIXME: not all error conditions call PaUtil_SetLastHostErrorInfo()
62 * PaMacCore_SetError() will do this.
63 */
64
65 #include "pa_mac_core_internal.h"
66
67 #include <string.h> /* strlen(), memcmp() etc. */
68 #include <libkern/OSAtomic.h>
69
70 #include "pa_mac_core.h"
71 #include "pa_mac_core_utilities.h"
72 #include "pa_mac_core_blocking.h"
73
74
75 #ifdef __cplusplus
76 extern "C"
77 {
78 #endif /* __cplusplus */
79
80 /* prototypes for functions declared in this file */
81
82 PaError PaMacCore_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex index );
83
84 /*
85 * Function declared in pa_mac_core.h. Sets up a PaMacCoreStreamInfoStruct
86 * with the requested flags and initializes channel map.
87 */
88 void PaMacCore_SetupStreamInfo( PaMacCoreStreamInfo *data, const unsigned long flags )
89 {
90 bzero( data, sizeof( PaMacCoreStreamInfo ) );
91 data->size = sizeof( PaMacCoreStreamInfo );
92 data->hostApiType = paCoreAudio;
93 data->version = 0x01;
94 data->flags = flags;
95 data->channelMap = NULL;
96 data->channelMapSize = 0;
97 }
98
99 /*
100 * Function declared in pa_mac_core.h. Adds channel mapping to a PaMacCoreStreamInfoStruct
101 */
102 void PaMacCore_SetupChannelMap( PaMacCoreStreamInfo *data, const SInt32 * const channelMap, const unsigned long channelMapSize )
103 {
104 data->channelMap = channelMap;
105 data->channelMapSize = channelMapSize;
106 }
107 static char *channelName = NULL;
108 static int channelNameSize = 0;
109 static bool ensureChannelNameSize( int size )
110 {
111 if( size >= channelNameSize ) {
112 free( channelName );
113 channelName = (char *) malloc( ( channelNameSize = size ) + 1 );
114 if( !channelName ) {
115 channelNameSize = 0;
116 return false;
117 }
118 }
119 return true;
120 }
121 /*
122 * Function declared in pa_mac_core.h. retrives channel names.
123 */
124 const char *PaMacCore_GetChannelName( int device, int channelIndex, bool input )
125 {
126 struct PaUtilHostApiRepresentation *hostApi;
127 PaError err;
128 OSStatus error;
129 err = PaUtil_GetHostApiRepresentation( &hostApi, paCoreAudio );
130 assert(err == paNoError);
131 if( err != paNoError )
132 return NULL;
133 PaMacAUHAL *macCoreHostApi = (PaMacAUHAL*)hostApi;
134 AudioDeviceID hostApiDevice = macCoreHostApi->devIds[device];
135
136 UInt32 size = 0;
137
138 error = AudioDeviceGetPropertyInfo( hostApiDevice,
139 channelIndex + 1,
140 input,
141 kAudioDevicePropertyChannelName,
142 &size,
143 NULL );
144 if( error ) {
145 //try the CFString
146 CFStringRef name;
147 bool isDeviceName = false;
148 size = sizeof( name );
149 error = AudioDeviceGetProperty( hostApiDevice,
150 channelIndex + 1,
151 input,
152 kAudioDevicePropertyChannelNameCFString,
153 &size,
154 &name );
155 if( error ) { //as a last-ditch effort, get the device name. Later we'll append the channel number.
156 size = sizeof( name );
157 error = AudioDeviceGetProperty( hostApiDevice,
158 channelIndex + 1,
159 input,
160 kAudioDevicePropertyDeviceNameCFString,
161 &size,
162 &name );
163 if( error )
164 return NULL;
165 isDeviceName = true;
166 }
167 if( isDeviceName ) {
168 name = CFStringCreateWithFormat( NULL, NULL, CFSTR( "%@: %d"), name, channelIndex + 1 );
169 }
170
171 CFIndex length = CFStringGetLength(name);
172 while( ensureChannelNameSize( length * sizeof(UniChar) + 1 ) ) {
173 if( CFStringGetCString( name, channelName, channelNameSize, kCFStringEncodingUTF8 ) ) {
174 if( isDeviceName )
175 CFRelease( name );
176 return channelName;
177 }
178 if( length == 0 )
179 ++length;
180 length *= 2;
181 }
182 if( isDeviceName )
183 CFRelease( name );
184 return NULL;
185 }
186
187 //continue with C string:
188 if( !ensureChannelNameSize( size ) )
189 return NULL;
190
191 error = AudioDeviceGetProperty( hostApiDevice,
192 channelIndex + 1,
193 input,
194 kAudioDevicePropertyChannelName,
195 &size,
196 channelName );
197
198 if( error ) {
199 ERR( error );
200 return NULL;
201 }
202 return channelName;
203 }
204
205
206
207
208
209 AudioDeviceID PaMacCore_GetStreamInputDevice( PaStream* s )
210 {
211 PaMacCoreStream *stream = (PaMacCoreStream*)s;
212 VVDBUG(("PaMacCore_GetStreamInputHandle()\n"));
213
214 return ( stream->inputDevice );
215 }
216
217 AudioDeviceID PaMacCore_GetStreamOutputDevice( PaStream* s )
218 {
219 PaMacCoreStream *stream = (PaMacCoreStream*)s;
220 VVDBUG(("PaMacCore_GetStreamOutputHandle()\n"));
221
222 return ( stream->outputDevice );
223 }
224
225 #ifdef __cplusplus
226 }
227 #endif /* __cplusplus */
228
229 #define RING_BUFFER_ADVANCE_DENOMINATOR (4)
230
231 static void Terminate( struct PaUtilHostApiRepresentation *hostApi );
232 static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi,
233 const PaStreamParameters *inputParameters,
234 const PaStreamParameters *outputParameters,
235 double sampleRate );
236 static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi,
237 PaStream** s,
238 const PaStreamParameters *inputParameters,
239 const PaStreamParameters *outputParameters,
240 double sampleRate,
241 unsigned long framesPerBuffer,
242 PaStreamFlags streamFlags,
243 PaStreamCallback *streamCallback,
244 void *userData );
245 static PaError CloseStream( PaStream* stream );
246 static PaError StartStream( PaStream *stream );
247 static PaError StopStream( PaStream *stream );
248 static PaError AbortStream( PaStream *stream );
249 static PaError IsStreamStopped( PaStream *s );
250 static PaError IsStreamActive( PaStream *stream );
251 static PaTime GetStreamTime( PaStream *stream );
252 static OSStatus AudioIOProc( void *inRefCon,
253 AudioUnitRenderActionFlags *ioActionFlags,
254 const AudioTimeStamp *inTimeStamp,
255 UInt32 inBusNumber,
256 UInt32 inNumberFrames,
257 AudioBufferList *ioData );
258 static double GetStreamCpuLoad( PaStream* stream );
259
260 static PaError GetChannelInfo( PaMacAUHAL *auhalHostApi,
261 PaDeviceInfo *deviceInfo,
262 AudioDeviceID macCoreDeviceId,
263 int isInput);
264
265 static PaError OpenAndSetupOneAudioUnit(
266 const PaMacCoreStream *stream,
267 const PaStreamParameters *inStreamParams,
268 const PaStreamParameters *outStreamParams,
269 const UInt32 requestedFramesPerBuffer,
270 UInt32 *actualInputFramesPerBuffer,
271 UInt32 *actualOutputFramesPerBuffer,
272 const PaMacAUHAL *auhalHostApi,
273 AudioUnit *audioUnit,
274 AudioConverterRef *srConverter,
275 AudioDeviceID *audioDevice,
276 const double sampleRate,
277 void *refCon );
278
279 /* for setting errors. */
280 #define PA_AUHAL_SET_LAST_HOST_ERROR( errorCode, errorText ) \
281 PaUtil_SetLastHostErrorInfo( paInDevelopment, errorCode, errorText )
282
283 /*
284 * Callback called when starting or stopping a stream.
285 */
286 static void startStopCallback(
287 void * inRefCon,
288 AudioUnit ci,
289 AudioUnitPropertyID inID,
290 AudioUnitScope inScope,
291 AudioUnitElement inElement )
292 {
293 PaMacCoreStream *stream = (PaMacCoreStream *) inRefCon;
294 UInt32 isRunning;
295 UInt32 size = sizeof( isRunning );
296 OSStatus err;
297 err = AudioUnitGetProperty( ci, kAudioOutputUnitProperty_IsRunning, inScope, inElement, &isRunning, &size );
298 assert( !err );
299 if( err )
300 isRunning = false; //it's very unclear what to do in case of error here. There's no real way to notify the user, and crashing seems unreasonable.
301 if( isRunning )
302 return; //We are only interested in when we are stopping
303 // -- if we are using 2 I/O units, we only need one notification!
304 if( stream->inputUnit && stream->outputUnit && stream->inputUnit != stream->outputUnit && ci == stream->inputUnit )
305 return;
306 PaStreamFinishedCallback *sfc = stream->streamRepresentation.streamFinishedCallback;
307 if( stream->state == STOPPING )
308 stream->state = STOPPED ;
309 if( sfc )
310 sfc( stream->streamRepresentation.userData );
311 }
312
313
314 /*currently, this is only used in initialization, but it might be modified
315 to be used when the list of devices changes.*/
316 static PaError gatherDeviceInfo(PaMacAUHAL *auhalHostApi)
317 {
318 UInt32 size;
319 UInt32 propsize;
320 VVDBUG(("gatherDeviceInfo()\n"));
321 /* -- free any previous allocations -- */
322 if( auhalHostApi->devIds )
323 PaUtil_GroupFreeMemory(auhalHostApi->allocations, auhalHostApi->devIds);
324 auhalHostApi->devIds = NULL;
325
326 /* -- figure out how many devices there are -- */
327 AudioHardwareGetPropertyInfo( kAudioHardwarePropertyDevices,
328 &propsize,
329 NULL );
330 auhalHostApi->devCount = propsize / sizeof( AudioDeviceID );
331
332 VDBUG( ( "Found %ld device(s).\n", auhalHostApi->devCount ) );
333
334 /* -- copy the device IDs -- */
335 auhalHostApi->devIds = (AudioDeviceID *)PaUtil_GroupAllocateMemory(
336 auhalHostApi->allocations,
337 propsize );
338 if( !auhalHostApi->devIds )
339 return paInsufficientMemory;
340 AudioHardwareGetProperty( kAudioHardwarePropertyDevices,
341 &propsize,
342 auhalHostApi->devIds );
343 #ifdef MAC_CORE_VERBOSE_DEBUG
344 {
345 int i;
346 for( i=0; i<auhalHostApi->devCount; ++i )
347 printf( "Device %d\t: %ld\n", i, auhalHostApi->devIds[i] );
348 }
349 #endif
350
351 size = sizeof(AudioDeviceID);
352 auhalHostApi->defaultIn = kAudioDeviceUnknown;
353 auhalHostApi->defaultOut = kAudioDeviceUnknown;
354
355 /* determine the default device. */
356 /* I am not sure how these calls to AudioHardwareGetProperty()
357 could fail, but in case they do, we use the first available
358 device as the default. */
359 if( 0 != AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice,
360 &size,
361 &auhalHostApi->defaultIn) ) {
362 int i;
363 auhalHostApi->defaultIn = kAudioDeviceUnknown;
364 VDBUG(("Failed to get default input device from OS."));
365 VDBUG((" I will substitute the first available input Device."));
366 for( i=0; i<auhalHostApi->devCount; ++i ) {
367 PaDeviceInfo devInfo;
368 if( 0 != GetChannelInfo( auhalHostApi, &devInfo,
369 auhalHostApi->devIds[i], TRUE ) )
370 if( devInfo.maxInputChannels ) {
371 auhalHostApi->defaultIn = auhalHostApi->devIds[i];
372 break;
373 }
374 }
375 }
376 if( 0 != AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice,
377 &size,
378 &auhalHostApi->defaultOut) ) {
379 int i;
380 auhalHostApi->defaultIn = kAudioDeviceUnknown;
381 VDBUG(("Failed to get default output device from OS."));
382 VDBUG((" I will substitute the first available output Device."));
383 for( i=0; i<auhalHostApi->devCount; ++i ) {
384 PaDeviceInfo devInfo;
385 if( 0 != GetChannelInfo( auhalHostApi, &devInfo,
386 auhalHostApi->devIds[i], FALSE ) )
387 if( devInfo.maxOutputChannels ) {
388 auhalHostApi->defaultOut = auhalHostApi->devIds[i];
389 break;
390 }
391 }
392 }
393
394 VDBUG( ( "Default in : %ld\n", auhalHostApi->defaultIn ) );
395 VDBUG( ( "Default out: %ld\n", auhalHostApi->defaultOut ) );
396
397 return paNoError;
398 }
399
400 static PaError GetChannelInfo( PaMacAUHAL *auhalHostApi,
401 PaDeviceInfo *deviceInfo,
402 AudioDeviceID macCoreDeviceId,
403 int isInput)
404 {
405 UInt32 propSize;
406 PaError err = paNoError;
407 UInt32 i;
408 int numChannels = 0;
409 AudioBufferList *buflist = NULL;
410 UInt32 frameLatency;
411
412 VVDBUG(("GetChannelInfo()\n"));
413
414 /* Get the number of channels from the stream configuration.
415 Fail if we can't get this. */
416
417 err = ERR(AudioDeviceGetPropertyInfo(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreamConfiguration, &propSize, NULL));
418 if (err)
419 return err;
420
421 buflist = PaUtil_AllocateMemory(propSize);
422 if( !buflist )
423 return paInsufficientMemory;
424 err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreamConfiguration, &propSize, buflist));
425 if (err)
426 goto error;
427
428 for (i = 0; i < buflist->mNumberBuffers; ++i)
429 numChannels += buflist->mBuffers[i].mNumberChannels;
430
431 if (isInput)
432 deviceInfo->maxInputChannels = numChannels;
433 else
434 deviceInfo->maxOutputChannels = numChannels;
435
436 if (numChannels > 0) /* do not try to retrieve the latency if there is no channels. */
437 {
438 /* Get the latency. Don't fail if we can't get this. */
439 /* default to something reasonable */
440 deviceInfo->defaultLowInputLatency = .01;
441 deviceInfo->defaultHighInputLatency = .10;
442 deviceInfo->defaultLowOutputLatency = .01;
443 deviceInfo->defaultHighOutputLatency = .10;
444 propSize = sizeof(UInt32);
445 err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyLatency, &propSize, &frameLatency));
446 if (!err)
447 {
448 /** FEEDBACK:
449 * This code was arrived at by trial and error, and some extentive, but not exhaustive
450 * testing. Sebastien Beaulieu <seb@plogue.com> has suggested using
451 * kAudioDevicePropertyLatency + kAudioDevicePropertySafetyOffset + buffer size instead.
452 * At the time this code was written, many users were reporting dropouts with audio
453 * programs that probably used this formula. This was probably
454 * around 10.4.4, and the problem is probably fixed now. So perhaps
455 * his formula should be reviewed and used.
456 * */
457 double secondLatency = frameLatency / deviceInfo->defaultSampleRate;
458 if (isInput)
459 {
460 deviceInfo->defaultLowInputLatency = 3 * secondLatency;
461 deviceInfo->defaultHighInputLatency = 3 * 10 * secondLatency;
462 }
463 else
464 {
465 deviceInfo->defaultLowOutputLatency = 3 * secondLatency;
466 deviceInfo->defaultHighOutputLatency = 3 * 10 * secondLatency;
467 }
468 }
469 }
470 PaUtil_FreeMemory( buflist );
471 return paNoError;
472 error:
473 PaUtil_FreeMemory( buflist );
474 return err;
475 }
476
477 static PaError InitializeDeviceInfo( PaMacAUHAL *auhalHostApi,
478 PaDeviceInfo *deviceInfo,
479 AudioDeviceID macCoreDeviceId,
480 PaHostApiIndex hostApiIndex )
481 {
482 Float64 sampleRate;
483 char *name;
484 PaError err = paNoError;
485 UInt32 propSize;
486
487 VVDBUG(("InitializeDeviceInfo(): macCoreDeviceId=%ld\n", macCoreDeviceId));
488
489 memset(deviceInfo, 0, sizeof(deviceInfo));
490
491 deviceInfo->structVersion = 2;
492 deviceInfo->hostApi = hostApiIndex;
493
494 /* Get the device name. Fail if we can't get it. */
495 err = ERR(AudioDeviceGetPropertyInfo(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceName, &propSize, NULL));
496 if (err)
497 return err;
498
499 name = PaUtil_GroupAllocateMemory(auhalHostApi->allocations,propSize);
500 if ( !name )
501 return paInsufficientMemory;
502 err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceName, &propSize, name));
503 if (err)
504 return err;
505 deviceInfo->name = name;
506
507 /* Try to get the default sample rate. Don't fail if we can't get this. */
508 propSize = sizeof(Float64);
509 err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyNominalSampleRate, &propSize, &sampleRate));
510 if (err)
511 deviceInfo->defaultSampleRate = 0.0;
512 else
513 deviceInfo->defaultSampleRate = sampleRate;
514
515 /* Get the maximum number of input and output channels. Fail if we can't get this. */
516
517 err = GetChannelInfo(auhalHostApi, deviceInfo, macCoreDeviceId, 1);
518 if (err)
519 return err;
520
521 err = GetChannelInfo(auhalHostApi, deviceInfo, macCoreDeviceId, 0);
522 if (err)
523 return err;
524
525 return paNoError;
526 }
527
528 PaError PaMacCore_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex hostApiIndex )
529 {
530 PaError result = paNoError;
531 int i;
532 PaMacAUHAL *auhalHostApi = NULL;
533 PaDeviceInfo *deviceInfoArray;
534 int unixErr;
535
536 VVDBUG(("PaMacCore_Initialize(): hostApiIndex=%d\n", hostApiIndex));
537
538 SInt32 major;
539 SInt32 minor;
540 Gestalt(gestaltSystemVersionMajor, &major);
541 Gestalt(gestaltSystemVersionMinor, &minor);
542
543 // Starting with 10.6 systems, the HAL notification thread is created internally
544 if (major == 10 && minor >= 6) {
545 CFRunLoopRef theRunLoop = NULL;
546 AudioObjectPropertyAddress theAddress = { kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
547 OSStatus osErr = AudioObjectSetPropertyData (kAudioObjectSystemObject, &theAddress, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
548 if (osErr != noErr) {
549 goto error;
550 }
551 }
552
553 unixErr = initializeXRunListenerList();
554 if( 0 != unixErr ) {
555 return UNIX_ERR(unixErr);
556 }
557
558 auhalHostApi = (PaMacAUHAL*)PaUtil_AllocateMemory( sizeof(PaMacAUHAL) );
559 if( !auhalHostApi )
560 {
561 result = paInsufficientMemory;
562 goto error;
563 }
564
565 auhalHostApi->allocations = PaUtil_CreateAllocationGroup();
566 if( !auhalHostApi->allocations )
567 {
568 result = paInsufficientMemory;
569 goto error;
570 }
571
572 auhalHostApi->devIds = NULL;
573 auhalHostApi->devCount = 0;
574
575 /* get the info we need about the devices */
576 result = gatherDeviceInfo( auhalHostApi );
577 if( result != paNoError )
578 goto error;
579
580 *hostApi = &auhalHostApi->inheritedHostApiRep;
581 (*hostApi)->info.structVersion = 1;
582 (*hostApi)->info.type = paCoreAudio;
583 (*hostApi)->info.name = "Core Audio";
584
585 (*hostApi)->info.defaultInputDevice = paNoDevice;
586 (*hostApi)->info.defaultOutputDevice = paNoDevice;
587
588 (*hostApi)->info.deviceCount = 0;
589
590 if( auhalHostApi->devCount > 0 )
591 {
592 (*hostApi)->deviceInfos = (PaDeviceInfo**)PaUtil_GroupAllocateMemory(
593 auhalHostApi->allocations, sizeof(PaDeviceInfo*) * auhalHostApi->devCount);
594 if( !(*hostApi)->deviceInfos )
595 {
596 result = paInsufficientMemory;
597 goto error;
598 }
599
600 /* allocate all device info structs in a contiguous block */
601 deviceInfoArray = (PaDeviceInfo*)PaUtil_GroupAllocateMemory(
602 auhalHostApi->allocations, sizeof(PaDeviceInfo) * auhalHostApi->devCount );
603 if( !deviceInfoArray )
604 {
605 result = paInsufficientMemory;
606 goto error;
607 }
608
609 for( i=0; i < auhalHostApi->devCount; ++i )
610 {
611 int err;
612 err = InitializeDeviceInfo( auhalHostApi, &deviceInfoArray[i],
613 auhalHostApi->devIds[i],
614 hostApiIndex );
615 if (err == paNoError)
616 { /* copy some info and set the defaults */
617 (*hostApi)->deviceInfos[(*hostApi)->info.deviceCount] = &deviceInfoArray[i];
618 if (auhalHostApi->devIds[i] == auhalHostApi->defaultIn)
619 (*hostApi)->info.defaultInputDevice = (*hostApi)->info.deviceCount;
620 if (auhalHostApi->devIds[i] == auhalHostApi->defaultOut)
621 (*hostApi)->info.defaultOutputDevice = (*hostApi)->info.deviceCount;
622 (*hostApi)->info.deviceCount++;
623 }
624 else
625 { /* there was an error. we need to shift the devices down, so we ignore this one */
626 int j;
627 auhalHostApi->devCount--;
628 for( j=i; j<auhalHostApi->devCount; ++j )
629 auhalHostApi->devIds[j] = auhalHostApi->devIds[j+1];
630 i--;
631 }
632 }
633 }
634
635 (*hostApi)->Terminate = Terminate;
636 (*hostApi)->OpenStream = OpenStream;
637 (*hostApi)->IsFormatSupported = IsFormatSupported;
638
639 PaUtil_InitializeStreamInterface( &auhalHostApi->callbackStreamInterface,
640 CloseStream, StartStream,
641 StopStream, AbortStream, IsStreamStopped,
642 IsStreamActive,
643 GetStreamTime, GetStreamCpuLoad,
644 PaUtil_DummyRead, PaUtil_DummyWrite,
645 PaUtil_DummyGetReadAvailable,
646 PaUtil_DummyGetWriteAvailable );
647
648 PaUtil_InitializeStreamInterface( &auhalHostApi->blockingStreamInterface,
649 CloseStream, StartStream,
650 StopStream, AbortStream, IsStreamStopped,
651 IsStreamActive,
652 GetStreamTime, PaUtil_DummyGetCpuLoad,
653 ReadStream, WriteStream,
654 GetStreamReadAvailable,
655 GetStreamWriteAvailable );
656
657 return result;
658
659 error:
660 if( auhalHostApi )
661 {
662 if( auhalHostApi->allocations )
663 {
664 PaUtil_FreeAllAllocations( auhalHostApi->allocations );
665 PaUtil_DestroyAllocationGroup( auhalHostApi->allocations );
666 }
667
668 PaUtil_FreeMemory( auhalHostApi );
669 }
670 return result;
671 }
672
673
674 static void Terminate( struct PaUtilHostApiRepresentation *hostApi )
675 {
676 int unixErr;
677
678 PaMacAUHAL *auhalHostApi = (PaMacAUHAL*)hostApi;
679
680 VVDBUG(("Terminate()\n"));
681
682 unixErr = destroyXRunListenerList();
683 if( 0 != unixErr )
684 UNIX_ERR(unixErr);
685
686 /*
687 IMPLEMENT ME:
688 - clean up any resources not handled by the allocation group
689 TODO: Double check that everything is handled by alloc group
690 */
691
692 if( auhalHostApi->allocations )
693 {
694 PaUtil_FreeAllAllocations( auhalHostApi->allocations );
695 PaUtil_DestroyAllocationGroup( auhalHostApi->allocations );
696 }
697
698 PaUtil_FreeMemory( auhalHostApi );
699 }
700
701
702 static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi,
703 const PaStreamParameters *inputParameters,
704 const PaStreamParameters *outputParameters,
705 double sampleRate )
706 {
707 int inputChannelCount, outputChannelCount;
708 PaSampleFormat inputSampleFormat, outputSampleFormat;
709
710 VVDBUG(("IsFormatSupported(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld sampleRate=%g\n",
711 inputParameters ? inputParameters->channelCount : -1,
712 inputParameters ? inputParameters->sampleFormat : -1,
713 outputParameters ? outputParameters->channelCount : -1,
714 outputParameters ? outputParameters->sampleFormat : -1,
715 (float) sampleRate ));
716
717 /** These first checks are standard PA checks. We do some fancier checks
718 later. */
719 if( inputParameters )
720 {
721 inputChannelCount = inputParameters->channelCount;
722 inputSampleFormat = inputParameters->sampleFormat;
723
724 /* all standard sample formats are supported by the buffer adapter,
725 this implementation doesn't support any custom sample formats */
726 if( inputSampleFormat & paCustomFormat )
727 return paSampleFormatNotSupported;
728
729 /* unless alternate device specification is supported, reject the use of
730 paUseHostApiSpecificDeviceSpecification */
731
732 if( inputParameters->device == paUseHostApiSpecificDeviceSpecification )
733 return paInvalidDevice;
734
735 /* check that input device can support inputChannelCount */
736 if( inputChannelCount > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels )
737 return paInvalidChannelCount;
738 }
739 else
740 {
741 inputChannelCount = 0;
742 }
743
744 if( outputParameters )
745 {
746 outputChannelCount = outputParameters->channelCount;
747 outputSampleFormat = outputParameters->sampleFormat;
748
749 /* all standard sample formats are supported by the buffer adapter,
750 this implementation doesn't support any custom sample formats */
751 if( outputSampleFormat & paCustomFormat )
752 return paSampleFormatNotSupported;
753
754 /* unless alternate device specification is supported, reject the use of
755 paUseHostApiSpecificDeviceSpecification */
756
757 if( outputParameters->device == paUseHostApiSpecificDeviceSpecification )
758 return paInvalidDevice;
759
760 /* check that output device can support outputChannelCount */
761 if( outputChannelCount > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels )
762 return paInvalidChannelCount;
763
764 }
765 else
766 {
767 outputChannelCount = 0;
768 }
769
770 /* FEEDBACK */
771 /* I think the only way to check a given format SR combo is */
772 /* to try opening it. This could be disruptive, is that Okay? */
773 /* The alternative is to just read off available sample rates, */
774 /* but this will not work %100 of the time (eg, a device that */
775 /* supports N output at one rate but only N/2 at a higher rate.)*/
776
777 /* The following code opens the device with the requested parameters to
778 see if it works. */
779 {
780 PaError err;
781 PaStream *s;
782 err = OpenStream( hostApi, &s, inputParameters, outputParameters,
783 sampleRate, 1024, 0, (PaStreamCallback *)1, NULL );
784 if( err != paNoError && err != paInvalidSampleRate )
785 DBUG( ( "OpenStream @ %g returned: %d: %s\n",
786 (float) sampleRate, err, Pa_GetErrorText( err ) ) );
787 if( err )
788 return err;
789 err = CloseStream( s );
790 if( err ) {
791 /* FEEDBACK: is this more serious? should we assert? */
792 DBUG( ( "WARNING: could not close Stream. %d: %s\n",
793 err, Pa_GetErrorText( err ) ) );
794 }
795 }
796
797 return paFormatIsSupported;
798 }
799
800
801 static void UpdateReciprocalOfActualOutputSampleRateFromDeviceProperty( PaMacCoreStream *stream )
802 {
803 /* FIXME: not sure if this should be the sample rate of the output device or the output unit */
804 Float64 actualOutputSampleRate = stream->outDeviceSampleRate;
805 UInt32 propSize = sizeof(Float64);
806 OSStatus osErr = AudioDeviceGetProperty( stream->outputDevice, 0, /* isInput = */ FALSE, kAudioDevicePropertyActualSampleRate, &propSize, &actualOutputSampleRate);
807 if( osErr != noErr || actualOutputSampleRate < .01 ) // avoid divide by zero if there's an error
808 actualOutputSampleRate = stream->outDeviceSampleRate;
809
810 stream->recipricalOfActualOutputSampleRate = 1. / actualOutputSampleRate;
811 }
812
813 static OSStatus AudioDevicePropertyActualSampleRateListenerProc( AudioDeviceID inDevice, UInt32 inChannel, Boolean isInput, AudioDevicePropertyID inPropertyID, void *inClientData )
814 {
815 PaMacCoreStream *stream = (PaMacCoreStream*)inClientData;
816
817 pthread_mutex_lock( &stream->timingInformationMutex );
818 UpdateReciprocalOfActualOutputSampleRateFromDeviceProperty( stream );
819 pthread_mutex_unlock( &stream->timingInformationMutex );
820
821 return noErr;
822 }
823
824 static void UpdateOutputLatencySamplesFromDeviceProperty( PaMacCoreStream *stream )
825 {
826 UInt32 deviceOutputLatencySamples = 0;
827 UInt32 propSize = sizeof(UInt32);
828 OSStatus osErr = AudioDeviceGetProperty( stream->outputDevice, 0, /* isInput= */ FALSE, kAudioDevicePropertyLatency, &propSize, &deviceOutputLatencySamples);
829 if( osErr != noErr )
830 deviceOutputLatencySamples = 0;
831
832 stream->deviceOutputLatencySamples = deviceOutputLatencySamples;
833 }
834
835 static OSStatus AudioDevicePropertyOutputLatencySamplesListenerProc( AudioDeviceID inDevice, UInt32 inChannel, Boolean isInput, AudioDevicePropertyID inPropertyID, void *inClientData )
836 {
837 PaMacCoreStream *stream = (PaMacCoreStream*)inClientData;
838
839 pthread_mutex_lock( &stream->timingInformationMutex );
840 UpdateOutputLatencySamplesFromDeviceProperty( stream );
841 pthread_mutex_unlock( &stream->timingInformationMutex );
842
843 return noErr;
844 }
845
846 static void UpdateInputLatencySamplesFromDeviceProperty( PaMacCoreStream *stream )
847 {
848 UInt32 deviceInputLatencySamples = 0;
849 UInt32 propSize = sizeof(UInt32);
850 OSStatus osErr = AudioDeviceGetProperty( stream->inputDevice, 0, /* isInput= */ TRUE, kAudioDevicePropertyLatency, &propSize, &deviceInputLatencySamples);
851 if( osErr != noErr )
852 deviceInputLatencySamples = 0;
853
854 stream->deviceInputLatencySamples = deviceInputLatencySamples;
855 }
856
857 static OSStatus AudioDevicePropertyInputLatencySamplesListenerProc( AudioDeviceID inDevice, UInt32 inChannel, Boolean isInput, AudioDevicePropertyID inPropertyID, void *inClientData )
858 {
859 PaMacCoreStream *stream = (PaMacCoreStream*)inClientData;
860
861 pthread_mutex_lock( &stream->timingInformationMutex );
862 UpdateInputLatencySamplesFromDeviceProperty( stream );
863 pthread_mutex_unlock( &stream->timingInformationMutex );
864
865 return noErr;
866 }
867
868
869 static PaError OpenAndSetupOneAudioUnit(
870 const PaMacCoreStream *stream,
871 const PaStreamParameters *inStreamParams,
872 const PaStreamParameters *outStreamParams,
873 const UInt32 requestedFramesPerBuffer,
874 UInt32 *actualInputFramesPerBuffer,
875 UInt32 *actualOutputFramesPerBuffer,
876 const PaMacAUHAL *auhalHostApi,
877 AudioUnit *audioUnit,
878 AudioConverterRef *srConverter,
879 AudioDeviceID *audioDevice,
880 const double sampleRate,
881 void *refCon )
882 {
883 ComponentDescription desc;
884 Component comp;
885 /*An Apple TN suggests using CAStreamBasicDescription, but that is C++*/
886 AudioStreamBasicDescription desiredFormat;
887 OSStatus result = noErr;
888 PaError paResult = paNoError;
889 int line = 0;
890 UInt32 callbackKey;
891 AURenderCallbackStruct rcbs;
892 unsigned long macInputStreamFlags = paMacCorePlayNice;
893 unsigned long macOutputStreamFlags = paMacCorePlayNice;
894 SInt32 const *inChannelMap = NULL;
895 SInt32 const *outChannelMap = NULL;
896 unsigned long inChannelMapSize = 0;
897 unsigned long outChannelMapSize = 0;
898
899 VVDBUG(("OpenAndSetupOneAudioUnit(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld, requestedFramesPerBuffer=%ld\n",
900 inStreamParams ? inStreamParams->channelCount : -1,
901 inStreamParams ? inStreamParams->sampleFormat : -1,
902 outStreamParams ? outStreamParams->channelCount : -1,
903 outStreamParams ? outStreamParams->sampleFormat : -1,
904 requestedFramesPerBuffer ));
905
906 /* -- handle the degenerate case -- */
907 if( !inStreamParams && !outStreamParams ) {
908 *audioUnit = NULL;
909 *audioDevice = kAudioDeviceUnknown;
910 return paNoError;
911 }
912
913 /* -- get the user's api specific info, if they set any -- */
914 if( inStreamParams && inStreamParams->hostApiSpecificStreamInfo )
915 {
916 macInputStreamFlags=
917 ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
918 ->flags;
919 inChannelMap = ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
920 ->channelMap;
921 inChannelMapSize = ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
922 ->channelMapSize;
923 }
924 if( outStreamParams && outStreamParams->hostApiSpecificStreamInfo )
925 {
926 macOutputStreamFlags=
927 ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
928 ->flags;
929 outChannelMap = ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
930 ->channelMap;
931 outChannelMapSize = ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
932 ->channelMapSize;
933 }
934 /* Override user's flags here, if desired for testing. */
935
936 /*
937 * The HAL AU is a Mac OS style "component".
938 * the first few steps deal with that.
939 * Later steps work on a combination of Mac OS
940 * components and the slightly lower level
941 * HAL.
942 */
943
944 /* -- describe the output type AudioUnit -- */
945 /* Note: for the default AudioUnit, we could use the
946 * componentSubType value kAudioUnitSubType_DefaultOutput;
947 * but I don't think that's relevant here.
948 */
949 desc.componentType = kAudioUnitType_Output;
950 desc.componentSubType = kAudioUnitSubType_HALOutput;
951 desc.componentManufacturer = kAudioUnitManufacturer_Apple;
952 desc.componentFlags = 0;
953 desc.componentFlagsMask = 0;
954 /* -- find the component -- */
955 comp = FindNextComponent( NULL, &desc );
956 if( !comp )
957 {
958 DBUG( ( "AUHAL component not found." ) );
959 *audioUnit = NULL;
960 *audioDevice = kAudioDeviceUnknown;
961 return paUnanticipatedHostError;
962 }
963 /* -- open it -- */
964 result = OpenAComponent( comp, audioUnit );
965 if( result )
966 {
967 DBUG( ( "Failed to open AUHAL component." ) );
968 *audioUnit = NULL;
969 *audioDevice = kAudioDeviceUnknown;
970 return ERR( result );
971 }
972 /* -- prepare a little error handling logic / hackery -- */
973 #define ERR_WRAP(mac_err) do { result = mac_err ; line = __LINE__ ; if ( result != noErr ) goto error ; } while(0)
974
975 /* -- if there is input, we have to explicitly enable input -- */
976 if( inStreamParams )
977 {
978 UInt32 enableIO = 1;
979 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
980 kAudioOutputUnitProperty_EnableIO,
981 kAudioUnitScope_Input,
982 INPUT_ELEMENT,
983 &enableIO,
984 sizeof(enableIO) ) );
985 }
986 /* -- if there is no output, we must explicitly disable output -- */
987 if( !outStreamParams )
988 {
989 UInt32 enableIO = 0;
990 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
991 kAudioOutputUnitProperty_EnableIO,
992 kAudioUnitScope_Output,
993 OUTPUT_ELEMENT,
994 &enableIO,
995 sizeof(enableIO) ) );
996 }
997
998 /* -- set the devices -- */
999 /* make sure input and output are the same device if we are doing input and
1000 output. */
1001 if( inStreamParams && outStreamParams )
1002 {
1003 assert( outStreamParams->device == inStreamParams->device );
1004 }
1005 if( inStreamParams )
1006 {
1007 *audioDevice = auhalHostApi->devIds[inStreamParams->device] ;
1008 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1009 kAudioOutputUnitProperty_CurrentDevice,
1010 kAudioUnitScope_Global,
1011 INPUT_ELEMENT,
1012 audioDevice,
1013 sizeof(AudioDeviceID) ) );
1014 }
1015 if( outStreamParams && outStreamParams != inStreamParams )
1016 {
1017 *audioDevice = auhalHostApi->devIds[outStreamParams->device] ;
1018 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1019 kAudioOutputUnitProperty_CurrentDevice,
1020 kAudioUnitScope_Global,
1021 OUTPUT_ELEMENT,
1022 audioDevice,
1023 sizeof(AudioDeviceID) ) );
1024 }
1025 /* -- add listener for dropouts -- */
1026 result = AudioDeviceAddPropertyListener( *audioDevice,
1027 0,
1028 outStreamParams ? false : true,
1029 kAudioDeviceProcessorOverload,
1030 xrunCallback,
1031 addToXRunListenerList( (void *)stream ) ) ;
1032 if( result == kAudioHardwareIllegalOperationError ) {
1033 // -- already registered, we're good
1034 } else {
1035 // -- not already registered, just check for errors
1036 ERR_WRAP( result );
1037 }
1038 /* -- listen for stream start and stop -- */
1039 ERR_WRAP( AudioUnitAddPropertyListener( *audioUnit,
1040 kAudioOutputUnitProperty_IsRunning,
1041 startStopCallback,
1042 (void *)stream ) );
1043
1044 /* -- set format -- */
1045 bzero( &desiredFormat, sizeof(desiredFormat) );
1046 desiredFormat.mFormatID = kAudioFormatLinearPCM ;
1047 desiredFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
1048 desiredFormat.mFramesPerPacket = 1;
1049 desiredFormat.mBitsPerChannel = sizeof( float ) * 8;
1050
1051 result = 0;
1052 /* set device format first, but only touch the device if the user asked */
1053 if( inStreamParams ) {
1054 /*The callback never calls back if we don't set the FPB */
1055 /*This seems wierd, because I would think setting anything on the device
1056 would be disruptive.*/
1057 paResult = setBestFramesPerBuffer( *audioDevice, FALSE,
1058 requestedFramesPerBuffer,
1059 actualInputFramesPerBuffer );
1060 if( paResult ) goto error;
1061 if( macInputStreamFlags & paMacCoreChangeDeviceParameters ) {
1062 bool requireExact;
1063 requireExact=macInputStreamFlags & paMacCoreFailIfConversionRequired;
1064 paResult = setBestSampleRateForDevice( *audioDevice, FALSE,
1065 requireExact, sampleRate );
1066 if( paResult ) goto error;
1067 }
1068 if( actualInputFramesPerBuffer && actualOutputFramesPerBuffer )
1069 *actualOutputFramesPerBuffer = *actualInputFramesPerBuffer ;
1070 }
1071 if( outStreamParams && !inStreamParams ) {
1072 /*The callback never calls back if we don't set the FPB */
1073 /*This seems wierd, because I would think setting anything on the device
1074 would be disruptive.*/
1075 paResult = setBestFramesPerBuffer( *audioDevice, TRUE,
1076 requestedFramesPerBuffer,
1077 actualOutputFramesPerBuffer );
1078 if( paResult ) goto error;
1079 if( macOutputStreamFlags & paMacCoreChangeDeviceParameters ) {
1080 bool requireExact;
1081 requireExact=macOutputStreamFlags & paMacCoreFailIfConversionRequired;
1082 paResult = setBestSampleRateForDevice( *audioDevice, TRUE,
1083 requireExact, sampleRate );
1084 if( paResult ) goto error;
1085 }
1086 }
1087
1088 /* -- set the quality of the output converter -- */
1089 if( outStreamParams ) {
1090 UInt32 value = kAudioConverterQuality_Max;
1091 switch( macOutputStreamFlags & 0x0700 ) {
1092 case 0x0100: /*paMacCore_ConversionQualityMin:*/
1093 value=kRenderQuality_Min;
1094 break;
1095 case 0x0200: /*paMacCore_ConversionQualityLow:*/
1096 value=kRenderQuality_Low;
1097 break;
1098 case 0x0300: /*paMacCore_ConversionQualityMedium:*/
1099 value=kRenderQuality_Medium;
1100 break;
1101 case 0x0400: /*paMacCore_ConversionQualityHigh:*/
1102 value=kRenderQuality_High;
1103 break;
1104 }
1105 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1106 kAudioUnitProperty_RenderQuality,
1107 kAudioUnitScope_Global,
1108 OUTPUT_ELEMENT,
1109 &value,
1110 sizeof(value) ) );
1111 }
1112 /* now set the format on the Audio Units. */
1113 if( outStreamParams )
1114 {
1115 desiredFormat.mSampleRate =sampleRate;
1116 desiredFormat.mBytesPerPacket=sizeof(float)*outStreamParams->channelCount;
1117 desiredFormat.mBytesPerFrame =sizeof(float)*outStreamParams->channelCount;
1118 desiredFormat.mChannelsPerFrame = outStreamParams->channelCount;
1119 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1120 kAudioUnitProperty_StreamFormat,
1121 kAudioUnitScope_Input,
1122 OUTPUT_ELEMENT,
1123 &desiredFormat,
1124 sizeof(AudioStreamBasicDescription) ) );
1125 }
1126 if( inStreamParams )
1127 {
1128 AudioStreamBasicDescription sourceFormat;
1129 UInt32 size = sizeof( AudioStreamBasicDescription );
1130
1131 /* keep the sample rate of the device, or we confuse AUHAL */
1132 ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1133 kAudioUnitProperty_StreamFormat,
1134 kAudioUnitScope_Input,
1135 INPUT_ELEMENT,
1136 &sourceFormat,
1137 &size ) );
1138 desiredFormat.mSampleRate = sourceFormat.mSampleRate;
1139 desiredFormat.mBytesPerPacket=sizeof(float)*inStreamParams->channelCount;
1140 desiredFormat.mBytesPerFrame =sizeof(float)*inStreamParams->channelCount;
1141 desiredFormat.mChannelsPerFrame = inStreamParams->channelCount;
1142 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1143 kAudioUnitProperty_StreamFormat,
1144 kAudioUnitScope_Output,
1145 INPUT_ELEMENT,
1146 &desiredFormat,
1147 sizeof(AudioStreamBasicDescription) ) );
1148 }
1149 /* set the maximumFramesPerSlice */
1150 /* not doing this causes real problems
1151 (eg. the callback might not be called). The idea of setting both this
1152 and the frames per buffer on the device is that we'll be most likely
1153 to actually get the frame size we requested in the callback with the
1154 minimum latency. */
1155 if( outStreamParams ) {
1156 UInt32 size = sizeof( *actualOutputFramesPerBuffer );
1157 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1158 kAudioUnitProperty_MaximumFramesPerSlice,
1159 kAudioUnitScope_Input,
1160 OUTPUT_ELEMENT,
1161 actualOutputFramesPerBuffer,
1162 sizeof(*actualOutputFramesPerBuffer) ) );
1163 ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1164 kAudioUnitProperty_MaximumFramesPerSlice,
1165 kAudioUnitScope_Global,
1166 OUTPUT_ELEMENT,
1167 actualOutputFramesPerBuffer,
1168 &size ) );
1169 }
1170 if( inStreamParams ) {
1171 /*UInt32 size = sizeof( *actualInputFramesPerBuffer );*/
1172 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1173 kAudioUnitProperty_MaximumFramesPerSlice,
1174 kAudioUnitScope_Output,
1175 INPUT_ELEMENT,
1176 actualInputFramesPerBuffer,
1177 sizeof(*actualInputFramesPerBuffer) ) );
1178 /* Don't know why this causes problems
1179 ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1180 kAudioUnitProperty_MaximumFramesPerSlice,
1181 kAudioUnitScope_Global, //Output,
1182 INPUT_ELEMENT,
1183 actualInputFramesPerBuffer,
1184 &size ) );
1185 */
1186 }
1187
1188 /* -- if we have input, we may need to setup an SR converter -- */
1189 /* even if we got the sample rate we asked for, we need to do
1190 the conversion in case another program changes the underlying SR. */
1191 /* FIXME: I think we need to monitor stream and change the converter if the incoming format changes. */
1192 if( inStreamParams ) {
1193 AudioStreamBasicDescription desiredFormat;
1194 AudioStreamBasicDescription sourceFormat;
1195 UInt32 sourceSize = sizeof( sourceFormat );
1196 bzero( &desiredFormat, sizeof(desiredFormat) );
1197 desiredFormat.mSampleRate = sampleRate;
1198 desiredFormat.mFormatID = kAudioFormatLinearPCM ;
1199 desiredFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
1200 desiredFormat.mFramesPerPacket = 1;
1201 desiredFormat.mBitsPerChannel = sizeof( float ) * 8;
1202 desiredFormat.mBytesPerPacket=sizeof(float)*inStreamParams->channelCount;
1203 desiredFormat.mBytesPerFrame =sizeof(float)*inStreamParams->channelCount;
1204 desiredFormat.mChannelsPerFrame = inStreamParams->channelCount;
1205
1206 /* get the source format */
1207 ERR_WRAP( AudioUnitGetProperty(
1208 *audioUnit,
1209 kAudioUnitProperty_StreamFormat,
1210 kAudioUnitScope_Output,
1211 INPUT_ELEMENT,
1212 &sourceFormat,
1213 &sourceSize ) );
1214
1215 if( desiredFormat.mSampleRate != sourceFormat.mSampleRate )
1216 {
1217 UInt32 value = kAudioConverterQuality_Max;
1218 switch( macInputStreamFlags & 0x0700 ) {
1219 case 0x0100: /*paMacCore_ConversionQualityMin:*/
1220 value=kAudioConverterQuality_Min;
1221 break;
1222 case 0x0200: /*paMacCore_ConversionQualityLow:*/
1223 value=kAudioConverterQuality_Low;
1224 break;
1225 case 0x0300: /*paMacCore_ConversionQualityMedium:*/
1226 value=kAudioConverterQuality_Medium;
1227 break;
1228 case 0x0400: /*paMacCore_ConversionQualityHigh:*/
1229 value=kAudioConverterQuality_High;
1230 break;
1231 }
1232 VDBUG(( "Creating sample rate converter for input"
1233 " to convert from %g to %g\n",
1234 (float)sourceFormat.mSampleRate,
1235 (float)desiredFormat.mSampleRate ) );
1236 /* create our converter */
1237 ERR_WRAP( AudioConverterNew(
1238 &sourceFormat,
1239 &desiredFormat,
1240 srConverter ) );
1241 /* Set quality */
1242 ERR_WRAP( AudioConverterSetProperty(
1243 *srConverter,
1244 kAudioConverterSampleRateConverterQuality,
1245 sizeof( value ),
1246 &value ) );
1247 }
1248 }
1249 /* -- set IOProc (callback) -- */
1250 callbackKey = outStreamParams ? kAudioUnitProperty_SetRenderCallback
1251 : kAudioOutputUnitProperty_SetInputCallback ;
1252 rcbs.inputProc = AudioIOProc;
1253 rcbs.inputProcRefCon = refCon;
1254 ERR_WRAP( AudioUnitSetProperty(
1255 *audioUnit,
1256 callbackKey,
1257 kAudioUnitScope_Output,
1258 outStreamParams ? OUTPUT_ELEMENT : INPUT_ELEMENT,
1259 &rcbs,
1260 sizeof(rcbs)) );
1261
1262 if( inStreamParams && outStreamParams && *srConverter )
1263 ERR_WRAP( AudioUnitSetProperty(
1264 *audioUnit,
1265 kAudioOutputUnitProperty_SetInputCallback,
1266 kAudioUnitScope_Output,
1267 INPUT_ELEMENT,
1268 &rcbs,
1269 sizeof(rcbs)) );
1270
1271 /* channel mapping. */
1272 if(inChannelMap)
1273 {
1274 UInt32 mapSize = inChannelMapSize *sizeof(SInt32);
1275
1276 //for each channel of desired input, map the channel from
1277 //the device's output channel.
1278 ERR_WRAP( AudioUnitSetProperty(*audioUnit,
1279 kAudioOutputUnitProperty_ChannelMap,
1280 kAudioUnitScope_Output,
1281 INPUT_ELEMENT,
1282 inChannelMap,
1283 mapSize));
1284 }
1285 if(outChannelMap)
1286 {
1287 UInt32 mapSize = outChannelMapSize *sizeof(SInt32);
1288
1289 //for each channel of desired output, map the channel from
1290 //the device's output channel.
1291 ERR_WRAP(AudioUnitSetProperty(*audioUnit,
1292 kAudioOutputUnitProperty_ChannelMap,
1293 kAudioUnitScope_Output,
1294 OUTPUT_ELEMENT,
1295 outChannelMap,
1296 mapSize));
1297 }
1298 /* initialize the audio unit */
1299 ERR_WRAP( AudioUnitInitialize(*audioUnit) );
1300
1301 if( inStreamParams && outStreamParams )
1302 VDBUG( ("Opened device %ld for input and output.\n", *audioDevice ) );
1303 else if( inStreamParams )
1304 VDBUG( ("Opened device %ld for input.\n", *audioDevice ) );
1305 else if( outStreamParams )
1306 VDBUG( ("Opened device %ld for output.\n", *audioDevice ) );
1307 return paNoError;
1308 #undef ERR_WRAP
1309
1310 error:
1311 CloseComponent( *audioUnit );
1312 *audioUnit = NULL;
1313 if( result )
1314 return PaMacCore_SetError( result, line, 1 );
1315 return paResult;
1316 }
1317
1318 /* see pa_hostapi.h for a list of validity guarantees made about OpenStream parameters */
1319 static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi,
1320 PaStream** s,
1321 const PaStreamParameters *inputParameters,
1322 const PaStreamParameters *outputParameters,
1323 double sampleRate,
1324 unsigned long framesPerBuffer,
1325 PaStreamFlags streamFlags,
1326 PaStreamCallback *streamCallback,
1327 void *userData )
1328 {
1329 PaError result = paNoError;
1330 PaMacAUHAL *auhalHostApi = (PaMacAUHAL*)hostApi;
1331 PaMacCoreStream *stream = 0;
1332 int inputChannelCount, outputChannelCount;
1333 PaSampleFormat inputSampleFormat, outputSampleFormat;
1334 PaSampleFormat hostInputSampleFormat, hostOutputSampleFormat;
1335 VVDBUG(("OpenStream(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld SR=%g, FPB=%ld\n",
1336 inputParameters ? inputParameters->channelCount : -1,
1337 inputParameters ? inputParameters->sampleFormat : -1,
1338 outputParameters ? outputParameters->channelCount : -1,
1339 outputParameters ? outputParameters->sampleFormat : -1,
1340 (float) sampleRate,
1341 framesPerBuffer ));
1342 VDBUG( ("Opening Stream.\n") );
1343
1344 /*These first few bits of code are from paSkeleton with few modifications.*/
1345 if( inputParameters )
1346 {
1347 inputChannelCount = inputParameters->channelCount;
1348 inputSampleFormat = inputParameters->sampleFormat;
1349
1350 /* unless alternate device specification is supported, reject the use of
1351 paUseHostApiSpecificDeviceSpecification */
1352
1353 if( inputParameters->device == paUseHostApiSpecificDeviceSpecification )
1354 return paInvalidDevice;
1355
1356 /* check that input device can support inputChannelCount */
1357 if( inputChannelCount > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels )
1358 return paInvalidChannelCount;
1359
1360 /* Host supports interleaved float32 */
1361 hostInputSampleFormat = paFloat32;
1362 }
1363 else
1364 {
1365 inputChannelCount = 0;
1366 inputSampleFormat = hostInputSampleFormat = paFloat32; /* Surpress 'uninitialised var' warnings. */
1367 }
1368
1369 if( outputParameters )
1370 {
1371 outputChannelCount = outputParameters->channelCount;
1372 outputSampleFormat = outputParameters->sampleFormat;
1373
1374 /* unless alternate device specification is supported, reject the use of
1375 paUseHostApiSpecificDeviceSpecification */
1376
1377 if( outputParameters->device == paUseHostApiSpecificDeviceSpecification )
1378 return paInvalidDevice;
1379
1380 /* check that output device can support inputChannelCount */
1381 if( outputChannelCount > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels )
1382 return paInvalidChannelCount;
1383
1384 /* Host supports interleaved float32 */
1385 hostOutputSampleFormat = paFloat32;
1386 }
1387 else
1388 {
1389 outputChannelCount = 0;
1390 outputSampleFormat = hostOutputSampleFormat = paFloat32; /* Surpress 'uninitialized var' warnings. */
1391 }
1392
1393 /* validate platform specific flags */
1394 if( (streamFlags & paPlatformSpecificFlags) != 0 )
1395 return paInvalidFlag; /* unexpected platform specific flag */
1396
1397 stream = (PaMacCoreStream*)PaUtil_AllocateMemory( sizeof(PaMacCoreStream) );
1398 if( !stream )
1399 {
1400 result = paInsufficientMemory;
1401 goto error;
1402 }
1403
1404 /* If we fail after this point, we my be left in a bad state, with
1405 some data structures setup and others not. So, first thing we
1406 do is initialize everything so that if we fail, we know what hasn't
1407 been touched.
1408 */
1409
1410 stream->inputAudioBufferList.mBuffers[0].mData = NULL;
1411 stream->inputRingBuffer.buffer = NULL;
1412 bzero( &stream->blio, sizeof( PaMacBlio ) );
1413 /*
1414 stream->blio.inputRingBuffer.buffer = NULL;
1415 stream->blio.outputRingBuffer.buffer = NULL;
1416 stream->blio.inputSampleFormat = inputParameters?inputParameters->sampleFormat:0;
1417 stream->blio.inputSampleSize = computeSampleSizeFromFormat(stream->blio.inputSampleFormat);
1418 stream->blio.outputSampleFormat=outputParameters?outputParameters->sampleFormat:0;
1419 stream->blio.outputSampleSize = computeSampleSizeFromFormat(stream->blio.outputSampleFormat);
1420 */
1421 stream->inputSRConverter = NULL;
1422 stream->inputUnit = NULL;
1423 stream->outputUnit = NULL;
1424 stream->inputFramesPerBuffer = 0;
1425 stream->outputFramesPerBuffer = 0;
1426 stream->bufferProcessorIsInitialized = FALSE;
1427 stream->timingInformationMutexIsInitialized = 0;
1428
1429 /* assert( streamCallback ) ; */ /* only callback mode is implemented */
1430 if( streamCallback )
1431 {
1432 PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation,
1433 &auhalHostApi->callbackStreamInterface,
1434 streamCallback, userData );
1435 }
1436 else
1437 {
1438 PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation,
1439 &auhalHostApi->blockingStreamInterface,
1440 BlioCallback, &stream->blio );
1441 }
1442
1443 PaUtil_InitializeCpuLoadMeasurer( &stream->cpuLoadMeasurer, sampleRate );
1444
1445 /* -- handle paFramesPerBufferUnspecified -- */
1446 if( framesPerBuffer == paFramesPerBufferUnspecified ) {
1447 long requested = 64;
1448 if( inputParameters )
1449 requested = MAX( requested, inputParameters->suggestedLatency * sampleRate / 2 );
1450 if( outputParameters )
1451 requested = MAX( requested, outputParameters->suggestedLatency *sampleRate / 2 );
1452 VDBUG( ("Block Size unspecified. Based on Latency, the user wants a Block Size near: %ld.\n",
1453 requested ) );
1454 if( requested <= 64 ) {
1455 /*requested a realtively low latency. make sure this is in range of devices */
1456 /*try to get the device's min natural buffer size and use that (but no smaller than 64).*/
1457 AudioValueRange audioRange;
1458 UInt32 size = sizeof( audioRange );
1459 if( inputParameters ) {
1460 WARNING( result = AudioDeviceGetProperty( auhalHostApi->devIds[inputParameters->device],
1461 0,
1462 false,
1463 kAudioDevicePropertyBufferFrameSizeRange,
1464 &size, &audioRange ) );
1465 if( result )
1466 requested = MAX( requested, audioRange.mMinimum );
1467 }
1468 size = sizeof( audioRange );
1469 if( outputParameters ) {
1470 WARNING( result = AudioDeviceGetProperty( auhalHostApi->devIds[outputParameters->device],
1471 0,
1472 false,
1473 kAudioDevicePropertyBufferFrameSizeRange,
1474 &size, &audioRange ) );
1475 if( result )
1476 requested = MAX( requested, audioRange.mMinimum );
1477 }
1478 } else {
1479 /* requested a realtively high latency. make sure this is in range of devices */
1480 /*try to get the device's max natural buffer size and use that (but no larger than 1024).*/
1481 AudioValueRange audioRange;
1482 UInt32 size = sizeof( audioRange );
1483 requested = MIN( requested, 1024 );
1484 if( inputParameters ) {
1485 WARNING( result = AudioDeviceGetProperty( auhalHostApi->devIds[inputParameters->device],
1486 0,
1487 false,
1488 kAudioDevicePropertyBufferFrameSizeRange,
1489 &size, &audioRange ) );
1490 if( result )
1491 requested = MIN( requested, audioRange.mMaximum );
1492 }
1493 size = sizeof( audioRange );
1494 if( outputParameters ) {
1495 WARNING( result = AudioDeviceGetProperty( auhalHostApi->devIds[outputParameters->device],
1496 0,
1497 false,
1498 kAudioDevicePropertyBufferFrameSizeRange,
1499 &size, &audioRange ) );
1500 if( result )
1501 requested = MIN( requested, audioRange.mMaximum );
1502 }
1503 }
1504 /* -- double check ranges -- */
1505 if( requested > 1024 ) requested = 1024;
1506 if( requested < 64 ) requested = 64;
1507 VDBUG(("After querying hardware, setting block size to %ld.\n", requested));
1508 framesPerBuffer = requested;
1509 }
1510
1511 /* -- Now we actually open and setup streams. -- */
1512 if( inputParameters && outputParameters && outputParameters->device == inputParameters->device )
1513 { /* full duplex. One device. */
1514 UInt32 inputFramesPerBuffer = (UInt32) stream->inputFramesPerBuffer;
1515 UInt32 outputFramesPerBuffer = (UInt32) stream->outputFramesPerBuffer;
1516 result = OpenAndSetupOneAudioUnit( stream,
1517 inputParameters,
1518 outputParameters,
1519 framesPerBuffer,
1520 &inputFramesPerBuffer,
1521 &outputFramesPerBuffer,
1522 auhalHostApi,
1523 &(stream->inputUnit),
1524 &(stream->inputSRConverter),
1525 &(stream->inputDevice),
1526 sampleRate,
1527 stream );
1528 stream->inputFramesPerBuffer = inputFramesPerBuffer;
1529 stream->outputFramesPerBuffer = outputFramesPerBuffer;
1530 stream->outputUnit = stream->inputUnit;
1531 stream->outputDevice = stream->inputDevice;
1532 if( result != paNoError )
1533 goto error;
1534 }
1535 else
1536 { /* full duplex, different devices OR simplex */
1537 UInt32 outputFramesPerBuffer = (UInt32) stream->outputFramesPerBuffer;
1538 UInt32 inputFramesPerBuffer = (UInt32) stream->inputFramesPerBuffer;
1539 result = OpenAndSetupOneAudioUnit( stream,
1540 NULL,
1541 outputParameters,
1542 framesPerBuffer,
1543 NULL,
1544 &outputFramesPerBuffer,
1545 auhalHostApi,
1546 &(stream->outputUnit),
1547 NULL,
1548 &(stream->outputDevice),
1549 sampleRate,
1550 stream );
1551 if( result != paNoError )
1552 goto error;
1553 result = OpenAndSetupOneAudioUnit( stream,
1554 inputParameters,
1555 NULL,
1556 framesPerBuffer,
1557 &inputFramesPerBuffer,
1558 NULL,
1559 auhalHostApi,
1560 &(stream->inputUnit),
1561 &(stream->inputSRConverter),
1562 &(stream->inputDevice),
1563 sampleRate,
1564 stream );
1565 if( result != paNoError )
1566 goto error;
1567 stream->inputFramesPerBuffer = inputFramesPerBuffer;
1568 stream->outputFramesPerBuffer = outputFramesPerBuffer;
1569 }
1570
1571 if( stream->inputUnit ) {
1572 const size_t szfl = sizeof(float);
1573 /* setup the AudioBufferList used for input */
1574 bzero( &stream->inputAudioBufferList, sizeof( AudioBufferList ) );
1575 stream->inputAudioBufferList.mNumberBuffers = 1;
1576 stream->inputAudioBufferList.mBuffers[0].mNumberChannels
1577 = inputChannelCount;
1578 stream->inputAudioBufferList.mBuffers[0].mDataByteSize
1579 = stream->inputFramesPerBuffer*inputChannelCount*szfl;
1580 stream->inputAudioBufferList.mBuffers[0].mData
1581 = (float *) calloc(
1582 stream->inputFramesPerBuffer*inputChannelCount,
1583 szfl );
1584 if( !stream->inputAudioBufferList.mBuffers[0].mData )
1585 {
1586 result = paInsufficientMemory;
1587 goto error;
1588 }
1589
1590 /*
1591 * If input and output devs are different or we are doing SR conversion,
1592 * we also need a
1593 * ring buffer to store inpt data while waiting for output
1594 * data.
1595 */
1596 if( (stream->outputUnit && stream->inputUnit != stream->outputUnit)
1597 || stream->inputSRConverter )
1598 {
1599 /* May want the ringSize ot initial position in
1600 ring buffer to depend somewhat on sample rate change */
1601
1602 void *data;
1603 long ringSize;
1604
1605 ringSize = computeRingBufferSize( inputParameters,
1606 outputParameters,
1607 stream->inputFramesPerBuffer,
1608 stream->outputFramesPerBuffer,
1609 sampleRate );
1610 /*ringSize <<= 4; *//*16x bigger, for testing */
1611
1612
1613 /*now, we need to allocate memory for the ring buffer*/
1614 data = calloc( ringSize, szfl );
1615 if( !data )
1616 {
1617 result = paInsufficientMemory;
1618 goto error;
1619 }
1620
1621 /* now we can initialize the ring buffer */
1622 //FIXME: element size whould probably be szfl*inputchan
1623 // but that will require some work all over the
1624 // place to patch up. szfl may be sufficient and would
1625 // be way easier to handle, but it seems clear from the
1626 // discussion that buffer processor compatibility
1627 // requires szfl*inputchan.
1628 // See revision 1346 and discussion:
1629 // http://techweb.rfa.org/pipermail/portaudio/2008-February/008295.html
1630 PaUtil_InitializeRingBuffer( &stream->inputRingBuffer,
1631 1, ringSize*szfl, data ) ;
1632 /* advance the read point a little, so we are reading from the
1633 middle of the buffer */
1634 if( stream->outputUnit )
1635 PaUtil_AdvanceRingBufferWriteIndex( &stream->inputRingBuffer, ringSize*szfl / RING_BUFFER_ADVANCE_DENOMINATOR );
1636 }
1637 }
1638
1639 /* -- initialize Blio Buffer Processors -- */
1640 if( !streamCallback )
1641 {
1642 long ringSize;
1643
1644 ringSize = computeRingBufferSize( inputParameters,
1645 outputParameters,
1646 stream->inputFramesPerBuffer,
1647 stream->outputFramesPerBuffer,
1648 sampleRate );
1649 result = initializeBlioRingBuffers( &stream->blio,
1650 inputParameters?inputParameters->sampleFormat:0 ,
1651 outputParameters?outputParameters->sampleFormat:0 ,
1652 MAX(stream->inputFramesPerBuffer,stream->outputFramesPerBuffer),
1653 ringSize,
1654 inputParameters?inputChannelCount:0 ,
1655 outputParameters?outputChannelCount:0 ) ;
1656 if( result != paNoError )
1657 goto error;
1658 }
1659
1660 /* -- initialize Buffer Processor -- */
1661 {
1662 unsigned long maxHostFrames = stream->inputFramesPerBuffer;
1663 if( stream->outputFramesPerBuffer > maxHostFrames )
1664 maxHostFrames = stream->outputFramesPerBuffer;
1665 result = PaUtil_InitializeBufferProcessor( &stream->bufferProcessor,
1666 inputChannelCount, inputSampleFormat,
1667 hostInputSampleFormat,
1668 outputChannelCount, outputSampleFormat,
1669 hostOutputSampleFormat,
1670 sampleRate,
1671 streamFlags,
1672 framesPerBuffer,
1673 /* If sample rate conversion takes place, the buffer size
1674 will not be known. */
1675 maxHostFrames,
1676 stream->inputSRConverter
1677 ? paUtilUnknownHostBufferSize
1678 : paUtilBoundedHostBufferSize,
1679 streamCallback ? streamCallback : BlioCallback,
1680 streamCallback ? userData : &stream->blio );
1681 if( result != paNoError )
1682 goto error;
1683 }
1684 stream->bufferProcessorIsInitialized = TRUE;
1685
1686 /*
1687 IMPLEMENT ME: initialise the following fields with estimated or actual
1688 values.
1689 I think this is okay the way it is br 12/1/05
1690 maybe need to change input latency estimate if IO devs differ
1691 */
1692 stream->streamRepresentation.streamInfo.inputLatency =
1693 PaUtil_GetBufferProcessorInputLatency(&stream->bufferProcessor)/sampleRate;
1694 stream->streamRepresentation.streamInfo.outputLatency =
1695 PaUtil_GetBufferProcessorOutputLatency(&stream->bufferProcessor)/sampleRate;
1696 stream->streamRepresentation.streamInfo.sampleRate = sampleRate;
1697
1698 stream->sampleRate = sampleRate;
1699 stream->outDeviceSampleRate = 0;
1700 if( stream->outputUnit ) {
1701 Float64 rate;
1702 UInt32 size = sizeof( rate );
1703 result = ERR( AudioDeviceGetProperty( stream->outputDevice,
1704 0,
1705 FALSE,
1706 kAudioDevicePropertyNominalSampleRate,
1707 &size, &rate ) );
1708 if( result )
1709 goto error;
1710 stream->outDeviceSampleRate = rate;
1711 }
1712 stream->inDeviceSampleRate = 0;
1713 if( stream->inputUnit ) {
1714 Float64 rate;
1715 UInt32 size = sizeof( rate );
1716 result = ERR( AudioDeviceGetProperty( stream->inputDevice,
1717 0,
1718 TRUE,
1719 kAudioDevicePropertyNominalSampleRate,
1720 &size, &rate ) );
1721 if( result )
1722 goto error;
1723 stream->inDeviceSampleRate = rate;
1724 }
1725 stream->userInChan = inputChannelCount;
1726 stream->userOutChan = outputChannelCount;
1727
1728 pthread_mutex_init( &stream->timingInformationMutex, NULL );
1729 stream->timingInformationMutexIsInitialized = 1;
1730
1731 if( stream->outputUnit ) {
1732 UpdateReciprocalOfActualOutputSampleRateFromDeviceProperty( stream );
1733 stream->recipricalOfActualOutputSampleRate_ioProcCopy = stream->recipricalOfActualOutputSampleRate;
1734
1735 AudioDeviceAddPropertyListener( stream->outputDevice, 0, /* isInput = */ FALSE, kAudioDevicePropertyActualSampleRate,
1736 AudioDevicePropertyActualSampleRateListenerProc, stream );
1737
1738 UpdateOutputLatencySamplesFromDeviceProperty( stream );
1739 stream->deviceOutputLatencySamples_ioProcCopy = stream->deviceOutputLatencySamples;
1740
1741 AudioDeviceAddPropertyListener( stream->outputDevice, 0, /* isInput = */ FALSE, kAudioDevicePropertyLatency,
1742 AudioDevicePropertyOutputLatencySamplesListenerProc, stream );
1743
1744 }else{
1745 stream->recipricalOfActualOutputSampleRate = 1.;
1746 stream->recipricalOfActualOutputSampleRate_ioProcCopy = 0.;
1747 stream->deviceOutputLatencySamples_ioProcCopy = 0;
1748 }
1749
1750 if( stream->inputUnit ) {
1751 UpdateInputLatencySamplesFromDeviceProperty( stream );
1752 stream->deviceInputLatencySamples_ioProcCopy = stream->deviceInputLatencySamples;
1753
1754 AudioDeviceAddPropertyListener( stream->inputDevice, 0, /* isInput = */ TRUE, kAudioDevicePropertyLatency,
1755 AudioDevicePropertyInputLatencySamplesListenerProc, stream );
1756 }else{
1757 stream->deviceInputLatencySamples = 0;
1758 stream->deviceInputLatencySamples_ioProcCopy = 0;
1759 }
1760
1761 stream->state = STOPPED;
1762 stream->xrunFlags = 0;
1763
1764 *s = (PaStream*)stream;
1765
1766 return result;
1767
1768 error:
1769 CloseStream( stream );
1770 return result;
1771 }
1772
1773
1774 #define HOST_TIME_TO_PA_TIME( x ) ( AudioConvertHostTimeToNanos( (x) ) * 1.0E-09) /* convert to nanoseconds and then to seconds */
1775
1776 PaTime GetStreamTime( PaStream *s )
1777 {
1778 return HOST_TIME_TO_PA_TIME( AudioGetCurrentHostTime() );
1779 }
1780
1781 #define RING_BUFFER_EMPTY (1000)
1782
1783 static OSStatus ringBufferIOProc( AudioConverterRef inAudioConverter,
1784 UInt32*ioDataSize,
1785 void** outData,
1786 void*inUserData )
1787 {
1788 void *dummyData;
1789 ring_buffer_size_t dummySize;
1790 PaUtilRingBuffer *rb = (PaUtilRingBuffer *) inUserData;
1791
1792 VVDBUG(("ringBufferIOProc()\n"));
1793
1794 if( PaUtil_GetRingBufferReadAvailable( rb ) == 0 ) {
1795 *outData = NULL;
1796 *ioDataSize = 0;
1797 return RING_BUFFER_EMPTY;
1798 }
1799 assert(sizeof(UInt32) == sizeof(ring_buffer_size_t));
1800 PaUtil_GetRingBufferReadRegions( rb, *ioDataSize,
1801 outData, (ring_buffer_size_t *)ioDataSize,
1802 &dummyData, &dummySize );
1803
1804 assert( *ioDataSize );
1805 PaUtil_AdvanceRingBufferReadIndex( rb, *ioDataSize );
1806
1807 return noErr;
1808 }
1809
1810 /*
1811 * Called by the AudioUnit API to process audio from the sound card.
1812 * This is where the magic happens.
1813 */
1814 /* FEEDBACK: there is a lot of redundant code here because of how all the cases differ. This makes it hard to maintain, so if there are suggestinos for cleaning it up, I'm all ears. */
1815 static OSStatus AudioIOProc( void *inRefCon,
1816 AudioUnitRenderActionFlags *ioActionFlags,
1817 const AudioTimeStamp *inTimeStamp,
1818 UInt32 inBusNumber,
1819 UInt32 inNumberFrames,
1820 AudioBufferList *ioData )
1821 {
1822 unsigned long framesProcessed = 0;
1823 PaStreamCallbackTimeInfo timeInfo = {0,0,0};
1824 PaMacCoreStream *stream = (PaMacCoreStream*)inRefCon;
1825 const bool isRender = inBusNumber == OUTPUT_ELEMENT;
1826 int callbackResult = paContinue ;
1827
1828 VVDBUG(("AudioIOProc()\n"));
1829
1830 PaUtil_BeginCpuLoadMeasurement( &stream->cpuLoadMeasurer );
1831
1832 /* -----------------------------------------------------------------*\
1833 This output may be useful for debugging,
1834 But printing durring the callback is a bad enough idea that
1835 this is not enabled by enableing the usual debugging calls.
1836 \* -----------------------------------------------------------------*/
1837 /*
1838 static int renderCount = 0;
1839 static int inputCount = 0;
1840 printf( "------------------- starting reder/input\n" );
1841 if( isRender )
1842 printf("Render callback (%d):\t", ++renderCount);
1843 else
1844 printf("Input callback (%d):\t", ++inputCount);
1845 printf( "Call totals: %d (input), %d (render)\n", inputCount, renderCount );
1846
1847 printf( "--- inBusNumber: %lu\n", inBusNumber );
1848 printf( "--- inNumberFrames: %lu\n", inNumberFrames );
1849 printf( "--- %x ioData\n", (unsigned) ioData );
1850 if( ioData )
1851 {
1852 int i=0;
1853 printf( "--- ioData.mNumBuffers %lu: \n", ioData->mNumberBuffers );
1854 for( i=0; i<ioData->mNumberBuffers; ++i )
1855 printf( "--- ioData buffer %d size: %lu.\n", i, ioData->mBuffers[i].mDataByteSize );
1856 }
1857 ----------------------------------------------------------------- */
1858
1859 /* compute PaStreamCallbackTimeInfo */
1860
1861 if( pthread_mutex_trylock( &stream->timingInformationMutex ) == 0 ){
1862 /* snapshot the ioproc copy of timing information */
1863 stream->deviceOutputLatencySamples_ioProcCopy = stream->deviceOutputLatencySamples;
1864 stream->recipricalOfActualOutputSampleRate_ioProcCopy = stream->recipricalOfActualOutputSampleRate;
1865 stream->deviceInputLatencySamples_ioProcCopy = stream->deviceInputLatencySamples;
1866 pthread_mutex_unlock( &stream->timingInformationMutex );
1867 }
1868
1869 /* For timeInfo.currentTime we could calculate current time backwards from the HAL audio
1870 output time to give a more accurate impression of the current timeslice but it doesn't
1871 seem worth it at the moment since other PA host APIs don't do any better.
1872 */
1873 timeInfo.currentTime = HOST_TIME_TO_PA_TIME( AudioGetCurrentHostTime() );
1874
1875 /*
1876 For an input HAL AU, inTimeStamp is the time the samples are received from the hardware,
1877 for an output HAL AU inTimeStamp is the time the samples are sent to the hardware.
1878 PA expresses timestamps in terms of when the samples enter the ADC or leave the DAC
1879 so we add or subtract kAudioDevicePropertyLatency below.
1880 */
1881
1882 /* FIXME: not sure what to do below if the host timestamps aren't valid (kAudioTimeStampHostTimeValid isn't set)
1883 Could ask on CA mailing list if it is possible for it not to be set. If so, could probably grab a now timestamp
1884 at the top and compute from there (modulo scheduling jitter) or ask on mailing list for other options. */
1885
1886 if( isRender )
1887 {
1888 if( stream->inputUnit ) /* full duplex */
1889 {
1890 if( stream->inputUnit == stream->outputUnit ) /* full duplex AUHAL IOProc */
1891 {
1892 /* FIXME: review. i'm not sure this computation of inputBufferAdcTime is correct for a full-duplex AUHAL */
1893 timeInfo.inputBufferAdcTime = HOST_TIME_TO_PA_TIME(inTimeStamp->mHostTime)
1894 - stream->deviceInputLatencySamples_ioProcCopy * stream->recipricalOfActualOutputSampleRate_ioProcCopy; // FIXME should be using input sample rate here?
1895 timeInfo.outputBufferDacTime = HOST_TIME_TO_PA_TIME(inTimeStamp->mHostTime)
1896 + stream->deviceOutputLatencySamples_ioProcCopy * stream->recipricalOfActualOutputSampleRate_ioProcCopy;
1897 }
1898 else /* full duplex with ring-buffer from a separate input AUHAL ioproc */
1899 {
1900 /* FIXME: review. this computation of inputBufferAdcTime is definitely wrong since it doesn't take the ring buffer latency into account */
1901 timeInfo.inputBufferAdcTime = HOST_TIME_TO_PA_TIME(inTimeStamp->mHostTime)
1902 - stream->deviceInputLatencySamples_ioProcCopy * stream->recipricalOfActualOutputSampleRate_ioProcCopy; // FIXME should be using input sample rate here?
1903 timeInfo.outputBufferDacTime = HOST_TIME_TO_PA_TIME(inTimeStamp->mHostTime)
1904 + stream->deviceOutputLatencySamples_ioProcCopy * stream->recipricalOfActualOutputSampleRate_ioProcCopy;
1905 }
1906 }
1907 else /* output only */
1908 {
1909 timeInfo.inputBufferAdcTime = 0;
1910 timeInfo.outputBufferDacTime = HOST_TIME_TO_PA_TIME(inTimeStamp->mHostTime)
1911 + stream->deviceOutputLatencySamples_ioProcCopy * stream->recipricalOfActualOutputSampleRate_ioProcCopy;
1912 }
1913 }
1914 else /* input only */
1915 {
1916 timeInfo.inputBufferAdcTime = HOST_TIME_TO_PA_TIME(inTimeStamp->mHostTime)
1917 - stream->deviceInputLatencySamples_ioProcCopy * stream->recipricalOfActualOutputSampleRate_ioProcCopy; // FIXME should be using input sample rate here?
1918 timeInfo.outputBufferDacTime = 0;
1919 }
1920
1921 //printf( "---%g, %g, %g\n", timeInfo.inputBufferAdcTime, timeInfo.currentTime, timeInfo.outputBufferDacTime );
1922
1923 if( isRender && stream->inputUnit == stream->outputUnit
1924 && !stream->inputSRConverter )
1925 {
1926 /* --------- Full Duplex, One Device, no SR Conversion -------
1927 *
1928 * This is the lowest latency case, and also the simplest.
1929 * Input data and output data are available at the same time.
1930 * we do not use the input SR converter or the input ring buffer.
1931 *
1932 */
1933 OSStatus err = 0;
1934 unsigned long frames;
1935
1936 /* -- start processing -- */
1937 PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
1938 &timeInfo,
1939 stream->xrunFlags );
1940 stream->xrunFlags = 0; //FIXME: this flag also gets set outside by a callback, which calls the xrunCallback function. It should be in the same thread as the main audio callback, but the apple docs just use the word "usually" so it may be possible to loose an xrun notification, if that callback happens here.
1941
1942 /* -- compute frames. do some checks -- */
1943 assert( ioData->mNumberBuffers == 1 );
1944 assert( ioData->mBuffers[0].mNumberChannels == stream->userOutChan );
1945 frames = ioData->mBuffers[0].mDataByteSize;
1946 frames /= sizeof( float ) * ioData->mBuffers[0].mNumberChannels;
1947 /* -- copy and process input data -- */
1948 err= AudioUnitRender(stream->inputUnit,
1949 ioActionFlags,
1950 inTimeStamp,
1951 INPUT_ELEMENT,
1952 inNumberFrames,
1953 &stream->inputAudioBufferList );
1954 /* FEEDBACK: I'm not sure what to do when this call fails. There's nothing in the PA API to
1955 * do about failures in the callback system. */
1956 assert( !err );
1957
1958 PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
1959 PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
1960 0,
1961 stream->inputAudioBufferList.mBuffers[0].mData,
1962 stream->inputAudioBufferList.mBuffers[0].mNumberChannels);
1963 /* -- Copy and process output data -- */
1964 PaUtil_SetOutputFrameCount( &(stream->bufferProcessor), frames );
1965 PaUtil_SetInterleavedOutputChannels( &(stream->bufferProcessor),
1966 0,
1967 ioData->mBuffers[0].mData,
1968 ioData->mBuffers[0].mNumberChannels);
1969 /* -- complete processing -- */
1970 framesProcessed =
1971 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
1972 &callbackResult );
1973 }
1974 else if( isRender )
1975 {
1976 /* -------- Output Side of Full Duplex (Separate Devices or SR Conversion)
1977 * -- OR Simplex Output
1978 *
1979 * This case handles output data as in the full duplex case,
1980 * and, if there is input data, reads it off the ring buffer
1981 * and into the PA buffer processor. If sample rate conversion
1982 * is required on input, that is done here as well.
1983 */
1984 unsigned long frames;
1985
1986 /* Sometimes, when stopping a duplex stream we get erroneous
1987 xrun flags, so if this is our last run, clear the flags. */
1988 int xrunFlags = stream->xrunFlags;
1989 /*
1990 if( xrunFlags & paInputUnderflow )
1991 printf( "input underflow.\n" );
1992 if( xrunFlags & paInputOverflow )
1993 printf( "input overflow.\n" );
1994 */
1995 if( stream->state == STOPPING || stream->state == CALLBACK_STOPPED )
1996 xrunFlags = 0;
1997
1998 /* -- start processing -- */
1999 PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2000 &timeInfo,
2001 xrunFlags );
2002 stream->xrunFlags = 0; /* FEEDBACK: we only send flags to Buf Proc once */
2003
2004 /* -- Copy and process output data -- */
2005 assert( ioData->mNumberBuffers == 1 );
2006 frames = ioData->mBuffers[0].mDataByteSize;
2007 frames /= sizeof( float ) * ioData->mBuffers[0].mNumberChannels;
2008 assert( ioData->mBuffers[0].mNumberChannels == stream->userOutChan );
2009 PaUtil_SetOutputFrameCount( &(stream->bufferProcessor), frames );
2010 PaUtil_SetInterleavedOutputChannels( &(stream->bufferProcessor),
2011 0,
2012 ioData->mBuffers[0].mData,
2013 ioData->mBuffers[0].mNumberChannels);
2014
2015 /* -- copy and process input data, and complete processing -- */
2016 if( stream->inputUnit ) {
2017 const int flsz = sizeof( float );
2018 /* Here, we read the data out of the ring buffer, through the
2019 audio converter. */
2020 int inChan = stream->inputAudioBufferList.mBuffers[0].mNumberChannels;
2021 if( stream->inputSRConverter )
2022 {
2023 OSStatus err;
2024 UInt32 size;
2025 float data[ inChan * frames ];
2026 size = sizeof( data );
2027 err = AudioConverterFillBuffer(
2028 stream->inputSRConverter,
2029 ringBufferIOProc,
2030 &stream->inputRingBuffer,
2031 &size,
2032 (void *)&data );
2033 if( err == RING_BUFFER_EMPTY )
2034 { /*the ring buffer callback underflowed */
2035 err = 0;
2036 bzero( ((char *)data) + size, sizeof(data)-size );
2037 stream->xrunFlags |= paInputUnderflow;
2038 }
2039 ERR( err );
2040 assert( !err );
2041
2042 PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2043 PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2044 0,
2045 data,
2046 inChan );
2047 framesProcessed =
2048 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2049 &callbackResult );
2050 }
2051 else
2052 {
2053 /* Without the AudioConverter is actually a bit more complex
2054 because we have to do a little buffer processing that the
2055 AudioConverter would otherwise handle for us. */
2056 void *data1, *data2;
2057 ring_buffer_size_t size1, size2;
2058 PaUtil_GetRingBufferReadRegions( &stream->inputRingBuffer,
2059 inChan*frames*flsz,
2060 &data1, &size1,
2061 &data2, &size2 );
2062 if( size1 / ( flsz * inChan ) == frames ) {
2063 /* simplest case: all in first buffer */
2064 PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2065 PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2066 0,
2067 data1,
2068 inChan );
2069 framesProcessed =
2070 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2071 &callbackResult );
2072 PaUtil_AdvanceRingBufferReadIndex(&stream->inputRingBuffer, size1 );
2073 } else if( ( size1 + size2 ) / ( flsz * inChan ) < frames ) {
2074 /*we underflowed. take what data we can, zero the rest.*/
2075 unsigned char data[frames*inChan*flsz];
2076 if( size1 )
2077 memcpy( data, data1, size1 );
2078 if( size2 )
2079 memcpy( data+size1, data2, size2 );
2080 bzero( data+size1+size2, frames*flsz*inChan - size1 - size2 );
2081
2082 PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
2083 PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2084 0,
2085 data,
2086 inChan );
2087 framesProcessed =
2088 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2089 &callbackResult );
2090 PaUtil_AdvanceRingBufferReadIndex( &stream->inputRingBuffer,
2091 size1+size2 );
2092 /* flag underflow */
2093 stream->xrunFlags |= paInputUnderflow;
2094 } else {
2095 /*we got all the data, but split between buffers*/
2096 PaUtil_SetInputFrameCount( &(stream->bufferProcessor),
2097 size1 / ( flsz * inChan ) );
2098 PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2099 0,
2100 data1,
2101 inChan );
2102 PaUtil_Set2ndInputFrameCount( &(stream->bufferProcessor),
2103 size2 / ( flsz * inChan ) );
2104 PaUtil_Set2ndInterleavedInputChannels( &(stream->bufferProcessor),
2105 0,
2106 data2,
2107 inChan );
2108 framesProcessed =
2109 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2110 &callbackResult );
2111 PaUtil_AdvanceRingBufferReadIndex(&stream->inputRingBuffer, size1+size2 );
2112 }
2113 }
2114 } else {
2115 framesProcessed =
2116 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2117 &callbackResult );
2118 }
2119
2120 }
2121 else
2122 {
2123 /* ------------------ Input
2124 *
2125 * First, we read off the audio data and put it in the ring buffer.
2126 * if this is an input-only stream, we need to process it more,
2127 * otherwise, we let the output case deal with it.
2128 */
2129 OSStatus err = 0;
2130 int chan = stream->inputAudioBufferList.mBuffers[0].mNumberChannels ;
2131 /* FIXME: looping here may not actually be necessary, but it was something I tried in testing. */
2132 do {
2133 err= AudioUnitRender(stream->inputUnit,
2134 ioActionFlags,
2135 inTimeStamp,
2136 INPUT_ELEMENT,
2137 inNumberFrames,
2138 &stream->inputAudioBufferList );
2139 if( err == -10874 )
2140 inNumberFrames /= 2;
2141 } while( err == -10874 && inNumberFrames > 1 );
2142 /* FEEDBACK: I'm not sure what to do when this call fails */
2143 ERR( err );
2144 assert( !err );
2145 if( stream->inputSRConverter || stream->outputUnit )
2146 {
2147 /* If this is duplex or we use a converter, put the data
2148 into the ring buffer. */
2149 long bytesIn, bytesOut;
2150 bytesIn = sizeof( float ) * inNumberFrames * chan;
2151 bytesOut = PaUtil_WriteRingBuffer( &stream->inputRingBuffer,
2152 stream->inputAudioBufferList.mBuffers[0].mData,
2153 bytesIn );
2154 if( bytesIn != bytesOut )
2155 stream->xrunFlags |= paInputOverflow ;
2156 }
2157 else
2158 {
2159 /* for simplex input w/o SR conversion,
2160 just pop the data into the buffer processor.*/
2161 PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2162 &timeInfo,
2163 stream->xrunFlags );
2164 stream->xrunFlags = 0;
2165
2166 PaUtil_SetInputFrameCount( &(stream->bufferProcessor), inNumberFrames);
2167 PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2168 0,
2169 stream->inputAudioBufferList.mBuffers[0].mData,
2170 chan );
2171 framesProcessed =
2172 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2173 &callbackResult );
2174 }
2175 if( !stream->outputUnit && stream->inputSRConverter )
2176 {
2177 /* ------------------ Simplex Input w/ SR Conversion
2178 *
2179 * if this is a simplex input stream, we need to read off the buffer,
2180 * do our sample rate conversion and pass the results to the buffer
2181 * processor.
2182 * The logic here is complicated somewhat by the fact that we don't
2183 * know how much data is available, so we loop on reasonably sized
2184 * chunks, and let the BufferProcessor deal with the rest.
2185 *
2186 */
2187 /*This might be too big or small depending on SR conversion*/
2188 float data[ chan * inNumberFrames ];
2189 OSStatus err;
2190 do
2191 { /*Run the buffer processor until we are out of data*/
2192 UInt32 size;
2193 long f;
2194
2195 size = sizeof( data );
2196 err = AudioConverterFillBuffer(
2197 stream->inputSRConverter,
2198 ringBufferIOProc,
2199 &stream->inputRingBuffer,
2200 &size,
2201 (void *)data );
2202 if( err != RING_BUFFER_EMPTY )
2203 ERR( err );
2204 assert( err == 0 || err == RING_BUFFER_EMPTY );
2205
2206 f = size / ( chan * sizeof(float) );
2207 PaUtil_SetInputFrameCount( &(stream->bufferProcessor), f );
2208 if( f )
2209 {
2210 PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2211 &timeInfo,
2212 stream->xrunFlags );
2213 stream->xrunFlags = 0;
2214
2215 PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2216 0,
2217 data,
2218 chan );
2219 framesProcessed =
2220 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2221 &callbackResult );
2222 }
2223 } while( callbackResult == paContinue && !err );
2224 }
2225 }
2226
2227 switch( callbackResult )
2228 {
2229 case paContinue: break;
2230 case paComplete:
2231 case paAbort:
2232 stream->state = CALLBACK_STOPPED ;
2233 if( stream->outputUnit )
2234 AudioOutputUnitStop(stream->outputUnit);
2235 if( stream->inputUnit )
2236 AudioOutputUnitStop(stream->inputUnit);
2237 break;
2238 }
2239
2240 PaUtil_EndCpuLoadMeasurement( &stream->cpuLoadMeasurer, framesProcessed );
2241 return noErr;
2242 }
2243
2244
2245 /*
2246 When CloseStream() is called, the multi-api layer ensures that
2247 the stream has already been stopped or aborted.
2248 */
2249 static PaError CloseStream( PaStream* s )
2250 {
2251 /* This may be called from a failed OpenStream.
2252 Therefore, each piece of info is treated seperately. */
2253 PaError result = paNoError;
2254 PaMacCoreStream *stream = (PaMacCoreStream*)s;
2255
2256 VVDBUG(("CloseStream()\n"));
2257 VDBUG( ( "Closing stream.\n" ) );
2258
2259 if( stream ) {
2260
2261 if( stream->outputUnit ) {
2262 AudioDeviceRemovePropertyListener( stream->outputDevice, 0, /* isInput = */ FALSE, kAudioDevicePropertyActualSampleRate,
2263 AudioDevicePropertyActualSampleRateListenerProc );
2264 AudioDeviceRemovePropertyListener( stream->outputDevice, 0, /* isInput = */ FALSE, kAudioDevicePropertyLatency,
2265 AudioDevicePropertyOutputLatencySamplesListenerProc );
2266 }
2267
2268 if( stream->inputUnit ) {
2269 AudioDeviceRemovePropertyListener( stream->inputDevice, 0, /* isInput = */ TRUE, kAudioDevicePropertyLatency,
2270 AudioDevicePropertyInputLatencySamplesListenerProc );
2271 }
2272
2273 if( stream->outputUnit ) {
2274 int count = removeFromXRunListenerList( stream );
2275 if( count == 0 )
2276 AudioDeviceRemovePropertyListener( stream->outputDevice,
2277 0,
2278 false,
2279 kAudioDeviceProcessorOverload,
2280 xrunCallback );
2281 }
2282 if( stream->inputUnit && stream->outputUnit != stream->inputUnit ) {
2283 int count = removeFromXRunListenerList( stream );
2284 if( count == 0 )
2285 AudioDeviceRemovePropertyListener( stream->inputDevice,
2286 0,
2287 true,
2288 kAudioDeviceProcessorOverload,
2289 xrunCallback );
2290 }
2291 if( stream->outputUnit && stream->outputUnit != stream->inputUnit ) {
2292 AudioUnitUninitialize( stream->outputUnit );
2293 CloseComponent( stream->outputUnit );
2294 }
2295 stream->outputUnit = NULL;
2296 if( stream->inputUnit )
2297 {
2298 AudioUnitUninitialize( stream->inputUnit );
2299 CloseComponent( stream->inputUnit );
2300 stream->inputUnit = NULL;
2301 }
2302 if( stream->inputRingBuffer.buffer )
2303 free( (void *) stream->inputRingBuffer.buffer );
2304 stream->inputRingBuffer.buffer = NULL;
2305 /*TODO: is there more that needs to be done on error
2306 from AudioConverterDispose?*/
2307 if( stream->inputSRConverter )
2308 ERR( AudioConverterDispose( stream->inputSRConverter ) );
2309 stream->inputSRConverter = NULL;
2310 if( stream->inputAudioBufferList.mBuffers[0].mData )
2311 free( stream->inputAudioBufferList.mBuffers[0].mData );
2312 stream->inputAudioBufferList.mBuffers[0].mData = NULL;
2313
2314 result = destroyBlioRingBuffers( &stream->blio );
2315 if( result )
2316 return result;
2317 if( stream->bufferProcessorIsInitialized )
2318 PaUtil_TerminateBufferProcessor( &stream->bufferProcessor );
2319
2320 if( stream->timingInformationMutexIsInitialized )
2321 pthread_mutex_destroy( &stream->timingInformationMutex );
2322
2323 PaUtil_TerminateStreamRepresentation( &stream->streamRepresentation );
2324 PaUtil_FreeMemory( stream );
2325 }
2326
2327 return result;
2328 }
2329
2330 static PaError StartStream( PaStream *s )
2331 {
2332 PaMacCoreStream *stream = (PaMacCoreStream*)s;
2333 OSStatus result = noErr;
2334 VVDBUG(("StartStream()\n"));
2335 VDBUG( ( "Starting stream.\n" ) );
2336
2337 #define ERR_WRAP(mac_err) do { result = mac_err ; if ( result != noErr ) return ERR(result) ; } while(0)
2338
2339 /*FIXME: maybe want to do this on close/abort for faster start? */
2340 PaUtil_ResetBufferProcessor( &stream->bufferProcessor );
2341 if( stream->inputSRConverter )
2342 ERR_WRAP( AudioConverterReset( stream->inputSRConverter ) );
2343
2344 /* -- start -- */
2345 stream->state = ACTIVE;
2346 if( stream->inputUnit ) {
2347 ERR_WRAP( AudioOutputUnitStart(stream->inputUnit) );
2348 }
2349 if( stream->outputUnit && stream->outputUnit != stream->inputUnit ) {
2350 ERR_WRAP( AudioOutputUnitStart(stream->outputUnit) );
2351 }
2352
2353 return paNoError;
2354 #undef ERR_WRAP
2355 }
2356
2357 // it's not clear from appl's docs that this really waits
2358 // until all data is flushed.
2359 static ComponentResult BlockWhileAudioUnitIsRunning( AudioUnit audioUnit, AudioUnitElement element )
2360 {
2361 Boolean isRunning = 1;
2362 while( isRunning ) {
2363 UInt32 s = sizeof( isRunning );
2364 ComponentResult err = AudioUnitGetProperty( audioUnit, kAudioOutputUnitProperty_IsRunning, kAudioUnitScope_Global, element, &isRunning, &s );
2365 if( err )
2366 return err;
2367 Pa_Sleep( 100 );
2368 }
2369 return noErr;
2370 }
2371
2372 static PaError StopStream( PaStream *s )
2373 {
2374 PaMacCoreStream *stream = (PaMacCoreStream*)s;
2375 OSStatus result = noErr;
2376 PaError paErr;
2377 VVDBUG(("StopStream()\n"));
2378
2379 VDBUG( ("Waiting for BLIO.\n") );
2380 waitUntilBlioWriteBufferIsFlushed( &stream->blio );
2381 VDBUG( ( "Stopping stream.\n" ) );
2382
2383 stream->state = STOPPING;
2384
2385 #define ERR_WRAP(mac_err) do { result = mac_err ; if ( result != noErr ) return ERR(result) ; } while(0)
2386 /* -- stop and reset -- */
2387 if( stream->inputUnit == stream->outputUnit && stream->inputUnit )
2388 {
2389 ERR_WRAP( AudioOutputUnitStop(stream->inputUnit) );
2390 ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,0) );
2391 ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,1) );
2392 ERR_WRAP( AudioUnitReset(stream->inputUnit, kAudioUnitScope_Global, 1) );
2393 ERR_WRAP( AudioUnitReset(stream->inputUnit, kAudioUnitScope_Global, 0) );
2394 }
2395 else
2396 {
2397 if( stream->inputUnit )
2398 {
2399 ERR_WRAP(AudioOutputUnitStop(stream->inputUnit) );
2400 ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,1) );
2401 ERR_WRAP(AudioUnitReset(stream->inputUnit,kAudioUnitScope_Global,1));
2402 }
2403 if( stream->outputUnit )
2404 {
2405 ERR_WRAP(AudioOutputUnitStop(stream->outputUnit));
2406 ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->outputUnit,0) );
2407 ERR_WRAP(AudioUnitReset(stream->outputUnit,kAudioUnitScope_Global,0));
2408 }
2409 }
2410 if( stream->inputRingBuffer.buffer ) {
2411 PaUtil_FlushRingBuffer( &stream->inputRingBuffer );
2412 bzero( (void *)stream->inputRingBuffer.buffer,
2413 stream->inputRingBuffer.bufferSize );
2414 /* advance the write point a little, so we are reading from the
2415 middle of the buffer. We'll need extra at the end because
2416 testing has shown that this helps. */
2417 if( stream->outputUnit )
2418 PaUtil_AdvanceRingBufferWriteIndex( &stream->inputRingBuffer,
2419 stream->inputRingBuffer.bufferSize
2420 / RING_BUFFER_ADVANCE_DENOMINATOR );
2421 }
2422
2423 stream->xrunFlags = 0;
2424 stream->state = STOPPED;
2425
2426 paErr = resetBlioRingBuffers( &stream->blio );
2427 if( paErr )
2428 return paErr;
2429
2430 VDBUG( ( "Stream Stopped.\n" ) );
2431 return paNoError;
2432 #undef ERR_WRAP
2433 }
2434
2435 static PaError AbortStream( PaStream *s )
2436 {
2437 VVDBUG(("AbortStream()->StopStream()\n"));
2438 VDBUG( ( "Aborting stream.\n" ) );
2439 /* We have nothing faster than StopStream. */
2440 return StopStream(s);
2441 }
2442
2443
2444 static PaError IsStreamStopped( PaStream *s )
2445 {
2446 PaMacCoreStream *stream = (PaMacCoreStream*)s;
2447 VVDBUG(("IsStreamStopped()\n"));
2448
2449 return stream->state == STOPPED ? 1 : 0;
2450 }
2451
2452
2453 static PaError IsStreamActive( PaStream *s )
2454 {
2455 PaMacCoreStream *stream = (PaMacCoreStream*)s;
2456 VVDBUG(("IsStreamActive()\n"));
2457 return ( stream->state == ACTIVE || stream->state == STOPPING );
2458 }
2459
2460
2461 static double GetStreamCpuLoad( PaStream* s )
2462 {
2463 PaMacCoreStream *stream = (PaMacCoreStream*)s;
2464 VVDBUG(("GetStreamCpuLoad()\n"));
2465
2466 return PaUtil_GetCpuLoad( &stream->cpuLoadMeasurer );
2467 }

  ViewVC Help
Powered by ViewVC 1.1.22