/[pcsx2_0.9.7]/branch/r3113_0.9.7_beta/3rdparty/portaudio/src/hostapi/coreaudio/pa_mac_core.c
ViewVC logotype

Contents of /branch/r3113_0.9.7_beta/3rdparty/portaudio/src/hostapi/coreaudio/pa_mac_core.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 32 - (show annotations) (download)
Tue Sep 7 03:29:01 2010 UTC (10 years, 10 months ago) by william
File MIME type: text/plain
File size: 96005 byte(s)
branching from upstream revision (http://pcsx2.googlecode.com/svn/trunk
): r3113 to
https://svn.netsolutions.dnsalias.com/websvn/ps2/pcsx2/pcsx2_0.9.7/branch/r3113_0.9.7_beta
1 /*
2 * Implementation of the PortAudio API for Apple AUHAL
3 *
4 * PortAudio Portable Real-Time Audio Library
5 * Latest Version at: http://www.portaudio.com
6 *
7 * Written by Bjorn Roche of XO Audio LLC, from PA skeleton code.
8 * Portions copied from code by Dominic Mazzoni (who wrote a HAL implementation)
9 *
10 * Dominic's code was based on code by Phil Burk, Darren Gibbs,
11 * Gord Peters, Stephane Letz, and Greg Pfiel.
12 *
13 * The following people also deserve acknowledgements:
14 *
15 * Olivier Tristan for feedback and testing
16 * Glenn Zelniker and Z-Systems engineering for sponsoring the Blocking I/O
17 * interface.
18 *
19 *
20 * Based on the Open Source API proposed by Ross Bencina
21 * Copyright (c) 1999-2002 Ross Bencina, Phil Burk
22 *
23 * Permission is hereby granted, free of charge, to any person obtaining
24 * a copy of this software and associated documentation files
25 * (the "Software"), to deal in the Software without restriction,
26 * including without limitation the rights to use, copy, modify, merge,
27 * publish, distribute, sublicense, and/or sell copies of the Software,
28 * and to permit persons to whom the Software is furnished to do so,
29 * subject to the following conditions:
30 *
31 * The above copyright notice and this permission notice shall be
32 * included in all copies or substantial portions of the Software.
33 *
34 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
35 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
36 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
37 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
38 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
39 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
40 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41 */
42
43 /*
44 * The text above constitutes the entire PortAudio license; however,
45 * the PortAudio community also makes the following non-binding requests:
46 *
47 * Any person wishing to distribute modifications to the Software is
48 * requested to send the modifications to the original developer so that
49 * they can be incorporated into the canonical version. It is also
50 * requested that these non-binding requests be included along with the
51 * license above.
52 */
53
54 /**
55 @file pa_mac_core
56 @ingroup hostapi_src
57 @author Bjorn Roche
58 @brief AUHAL implementation of PortAudio
59 */
60
61 /* FIXME: not all error conditions call PaUtil_SetLastHostErrorInfo()
62 * PaMacCore_SetError() will do this.
63 */
64
65 #include "pa_mac_core_internal.h"
66
67 #include <string.h> /* strlen(), memcmp() etc. */
68 #include <libkern/OSAtomic.h>
69
70 #include "pa_mac_core.h"
71 #include "pa_mac_core_utilities.h"
72 #include "pa_mac_core_blocking.h"
73
74
75 #ifdef __cplusplus
76 extern "C"
77 {
78 #endif /* __cplusplus */
79
80 /* prototypes for functions declared in this file */
81
82 PaError PaMacCore_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex index );
83
84 /*
85 * Function declared in pa_mac_core.h. Sets up a PaMacCoreStreamInfoStruct
86 * with the requested flags and initializes channel map.
87 */
88 void PaMacCore_SetupStreamInfo( PaMacCoreStreamInfo *data, const unsigned long flags )
89 {
90 bzero( data, sizeof( PaMacCoreStreamInfo ) );
91 data->size = sizeof( PaMacCoreStreamInfo );
92 data->hostApiType = paCoreAudio;
93 data->version = 0x01;
94 data->flags = flags;
95 data->channelMap = NULL;
96 data->channelMapSize = 0;
97 }
98
99 /*
100 * Function declared in pa_mac_core.h. Adds channel mapping to a PaMacCoreStreamInfoStruct
101 */
102 void PaMacCore_SetupChannelMap( PaMacCoreStreamInfo *data, const SInt32 * const channelMap, const unsigned long channelMapSize )
103 {
104 data->channelMap = channelMap;
105 data->channelMapSize = channelMapSize;
106 }
107 static char *channelName = NULL;
108 static int channelNameSize = 0;
109 static bool ensureChannelNameSize( int size )
110 {
111 if( size >= channelNameSize ) {
112 free( channelName );
113 channelName = (char *) malloc( ( channelNameSize = size ) + 1 );
114 if( !channelName ) {
115 channelNameSize = 0;
116 return false;
117 }
118 }
119 return true;
120 }
121 /*
122 * Function declared in pa_mac_core.h. retrives channel names.
123 */
124 const char *PaMacCore_GetChannelName( int device, int channelIndex, bool input )
125 {
126 struct PaUtilHostApiRepresentation *hostApi;
127 PaError err;
128 OSStatus error;
129 err = PaUtil_GetHostApiRepresentation( &hostApi, paCoreAudio );
130 assert(err == paNoError);
131 if( err != paNoError )
132 return NULL;
133 PaMacAUHAL *macCoreHostApi = (PaMacAUHAL*)hostApi;
134 AudioDeviceID hostApiDevice = macCoreHostApi->devIds[device];
135
136 UInt32 size = 0;
137
138 error = AudioDeviceGetPropertyInfo( hostApiDevice,
139 channelIndex + 1,
140 input,
141 kAudioDevicePropertyChannelName,
142 &size,
143 NULL );
144 if( error ) {
145 //try the CFString
146 CFStringRef name;
147 bool isDeviceName = false;
148 size = sizeof( name );
149 error = AudioDeviceGetProperty( hostApiDevice,
150 channelIndex + 1,
151 input,
152 kAudioDevicePropertyChannelNameCFString,
153 &size,
154 &name );
155 if( error ) { //as a last-ditch effort, get the device name. Later we'll append the channel number.
156 size = sizeof( name );
157 error = AudioDeviceGetProperty( hostApiDevice,
158 channelIndex + 1,
159 input,
160 kAudioDevicePropertyDeviceNameCFString,
161 &size,
162 &name );
163 if( error )
164 return NULL;
165 isDeviceName = true;
166 }
167 if( isDeviceName ) {
168 name = CFStringCreateWithFormat( NULL, NULL, CFSTR( "%@: %d"), name, channelIndex + 1 );
169 }
170
171 CFIndex length = CFStringGetLength(name);
172 while( ensureChannelNameSize( length * sizeof(UniChar) + 1 ) ) {
173 if( CFStringGetCString( name, channelName, channelNameSize, kCFStringEncodingUTF8 ) ) {
174 if( isDeviceName )
175 CFRelease( name );
176 return channelName;
177 }
178 if( length == 0 )
179 ++length;
180 length *= 2;
181 }
182 if( isDeviceName )
183 CFRelease( name );
184 return NULL;
185 }
186
187 //continue with C string:
188 if( !ensureChannelNameSize( size ) )
189 return NULL;
190
191 error = AudioDeviceGetProperty( hostApiDevice,
192 channelIndex + 1,
193 input,
194 kAudioDevicePropertyChannelName,
195 &size,
196 channelName );
197
198 if( error ) {
199 ERR( error );
200 return NULL;
201 }
202 return channelName;
203 }
204
205
206
207
208
209 AudioDeviceID PaMacCore_GetStreamInputDevice( PaStream* s )
210 {
211 PaMacCoreStream *stream = (PaMacCoreStream*)s;
212 VVDBUG(("PaMacCore_GetStreamInputHandle()\n"));
213
214 return ( stream->inputDevice );
215 }
216
217 AudioDeviceID PaMacCore_GetStreamOutputDevice( PaStream* s )
218 {
219 PaMacCoreStream *stream = (PaMacCoreStream*)s;
220 VVDBUG(("PaMacCore_GetStreamOutputHandle()\n"));
221
222 return ( stream->outputDevice );
223 }
224
225 #ifdef __cplusplus
226 }
227 #endif /* __cplusplus */
228
229 #define RING_BUFFER_ADVANCE_DENOMINATOR (4)
230
231 static void Terminate( struct PaUtilHostApiRepresentation *hostApi );
232 static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi,
233 const PaStreamParameters *inputParameters,
234 const PaStreamParameters *outputParameters,
235 double sampleRate );
236 static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi,
237 PaStream** s,
238 const PaStreamParameters *inputParameters,
239 const PaStreamParameters *outputParameters,
240 double sampleRate,
241 unsigned long framesPerBuffer,
242 PaStreamFlags streamFlags,
243 PaStreamCallback *streamCallback,
244 void *userData );
245 static PaError CloseStream( PaStream* stream );
246 static PaError StartStream( PaStream *stream );
247 static PaError StopStream( PaStream *stream );
248 static PaError AbortStream( PaStream *stream );
249 static PaError IsStreamStopped( PaStream *s );
250 static PaError IsStreamActive( PaStream *stream );
251 static PaTime GetStreamTime( PaStream *stream );
252 static void setStreamStartTime( PaStream *stream );
253 static OSStatus AudioIOProc( void *inRefCon,
254 AudioUnitRenderActionFlags *ioActionFlags,
255 const AudioTimeStamp *inTimeStamp,
256 UInt32 inBusNumber,
257 UInt32 inNumberFrames,
258 AudioBufferList *ioData );
259 static double GetStreamCpuLoad( PaStream* stream );
260
261 static PaError GetChannelInfo( PaMacAUHAL *auhalHostApi,
262 PaDeviceInfo *deviceInfo,
263 AudioDeviceID macCoreDeviceId,
264 int isInput);
265
266 static PaError OpenAndSetupOneAudioUnit(
267 const PaMacCoreStream *stream,
268 const PaStreamParameters *inStreamParams,
269 const PaStreamParameters *outStreamParams,
270 const UInt32 requestedFramesPerBuffer,
271 UInt32 *actualInputFramesPerBuffer,
272 UInt32 *actualOutputFramesPerBuffer,
273 const PaMacAUHAL *auhalHostApi,
274 AudioUnit *audioUnit,
275 AudioConverterRef *srConverter,
276 AudioDeviceID *audioDevice,
277 const double sampleRate,
278 void *refCon );
279
280 /* for setting errors. */
281 #define PA_AUHAL_SET_LAST_HOST_ERROR( errorCode, errorText ) \
282 PaUtil_SetLastHostErrorInfo( paInDevelopment, errorCode, errorText )
283
284 /*
285 * Callback called when starting or stopping a stream.
286 */
287 static void startStopCallback(
288 void * inRefCon,
289 AudioUnit ci,
290 AudioUnitPropertyID inID,
291 AudioUnitScope inScope,
292 AudioUnitElement inElement )
293 {
294 PaMacCoreStream *stream = (PaMacCoreStream *) inRefCon;
295 UInt32 isRunning;
296 UInt32 size = sizeof( isRunning );
297 OSStatus err;
298 err = AudioUnitGetProperty( ci, kAudioOutputUnitProperty_IsRunning, inScope, inElement, &isRunning, &size );
299 assert( !err );
300 if( err )
301 isRunning = false; //it's very unclear what to do in case of error here. There's no real way to notify the user, and crashing seems unreasonable.
302 if( isRunning )
303 return; //We are only interested in when we are stopping
304 // -- if we are using 2 I/O units, we only need one notification!
305 if( stream->inputUnit && stream->outputUnit && stream->inputUnit != stream->outputUnit && ci == stream->inputUnit )
306 return;
307 PaStreamFinishedCallback *sfc = stream->streamRepresentation.streamFinishedCallback;
308 if( stream->state == STOPPING )
309 stream->state = STOPPED ;
310 if( sfc )
311 sfc( stream->streamRepresentation.userData );
312 }
313
314
315 /*currently, this is only used in initialization, but it might be modified
316 to be used when the list of devices changes.*/
317 static PaError gatherDeviceInfo(PaMacAUHAL *auhalHostApi)
318 {
319 UInt32 size;
320 UInt32 propsize;
321 VVDBUG(("gatherDeviceInfo()\n"));
322 /* -- free any previous allocations -- */
323 if( auhalHostApi->devIds )
324 PaUtil_GroupFreeMemory(auhalHostApi->allocations, auhalHostApi->devIds);
325 auhalHostApi->devIds = NULL;
326
327 /* -- figure out how many devices there are -- */
328 AudioHardwareGetPropertyInfo( kAudioHardwarePropertyDevices,
329 &propsize,
330 NULL );
331 auhalHostApi->devCount = propsize / sizeof( AudioDeviceID );
332
333 VDBUG( ( "Found %ld device(s).\n", auhalHostApi->devCount ) );
334
335 /* -- copy the device IDs -- */
336 auhalHostApi->devIds = (AudioDeviceID *)PaUtil_GroupAllocateMemory(
337 auhalHostApi->allocations,
338 propsize );
339 if( !auhalHostApi->devIds )
340 return paInsufficientMemory;
341 AudioHardwareGetProperty( kAudioHardwarePropertyDevices,
342 &propsize,
343 auhalHostApi->devIds );
344 #ifdef MAC_CORE_VERBOSE_DEBUG
345 {
346 int i;
347 for( i=0; i<auhalHostApi->devCount; ++i )
348 printf( "Device %d\t: %ld\n", i, auhalHostApi->devIds[i] );
349 }
350 #endif
351
352 size = sizeof(AudioDeviceID);
353 auhalHostApi->defaultIn = kAudioDeviceUnknown;
354 auhalHostApi->defaultOut = kAudioDeviceUnknown;
355
356 /* determine the default device. */
357 /* I am not sure how these calls to AudioHardwareGetProperty()
358 could fail, but in case they do, we use the first available
359 device as the default. */
360 if( 0 != AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice,
361 &size,
362 &auhalHostApi->defaultIn) ) {
363 int i;
364 auhalHostApi->defaultIn = kAudioDeviceUnknown;
365 VDBUG(("Failed to get default input device from OS."));
366 VDBUG((" I will substitute the first available input Device."));
367 for( i=0; i<auhalHostApi->devCount; ++i ) {
368 PaDeviceInfo devInfo;
369 if( 0 != GetChannelInfo( auhalHostApi, &devInfo,
370 auhalHostApi->devIds[i], TRUE ) )
371 if( devInfo.maxInputChannels ) {
372 auhalHostApi->defaultIn = auhalHostApi->devIds[i];
373 break;
374 }
375 }
376 }
377 if( 0 != AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice,
378 &size,
379 &auhalHostApi->defaultOut) ) {
380 int i;
381 auhalHostApi->defaultIn = kAudioDeviceUnknown;
382 VDBUG(("Failed to get default output device from OS."));
383 VDBUG((" I will substitute the first available output Device."));
384 for( i=0; i<auhalHostApi->devCount; ++i ) {
385 PaDeviceInfo devInfo;
386 if( 0 != GetChannelInfo( auhalHostApi, &devInfo,
387 auhalHostApi->devIds[i], FALSE ) )
388 if( devInfo.maxOutputChannels ) {
389 auhalHostApi->defaultOut = auhalHostApi->devIds[i];
390 break;
391 }
392 }
393 }
394
395 VDBUG( ( "Default in : %ld\n", auhalHostApi->defaultIn ) );
396 VDBUG( ( "Default out: %ld\n", auhalHostApi->defaultOut ) );
397
398 return paNoError;
399 }
400
401 static PaError GetChannelInfo( PaMacAUHAL *auhalHostApi,
402 PaDeviceInfo *deviceInfo,
403 AudioDeviceID macCoreDeviceId,
404 int isInput)
405 {
406 UInt32 propSize;
407 PaError err = paNoError;
408 UInt32 i;
409 int numChannels = 0;
410 AudioBufferList *buflist = NULL;
411 UInt32 frameLatency;
412
413 VVDBUG(("GetChannelInfo()\n"));
414
415 /* Get the number of channels from the stream configuration.
416 Fail if we can't get this. */
417
418 err = ERR(AudioDeviceGetPropertyInfo(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreamConfiguration, &propSize, NULL));
419 if (err)
420 return err;
421
422 buflist = PaUtil_AllocateMemory(propSize);
423 if( !buflist )
424 return paInsufficientMemory;
425 err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyStreamConfiguration, &propSize, buflist));
426 if (err)
427 goto error;
428
429 for (i = 0; i < buflist->mNumberBuffers; ++i)
430 numChannels += buflist->mBuffers[i].mNumberChannels;
431
432 if (isInput)
433 deviceInfo->maxInputChannels = numChannels;
434 else
435 deviceInfo->maxOutputChannels = numChannels;
436
437 if (numChannels > 0) /* do not try to retrieve the latency if there is no channels. */
438 {
439 /* Get the latency. Don't fail if we can't get this. */
440 /* default to something reasonable */
441 deviceInfo->defaultLowInputLatency = .01;
442 deviceInfo->defaultHighInputLatency = .10;
443 deviceInfo->defaultLowOutputLatency = .01;
444 deviceInfo->defaultHighOutputLatency = .10;
445 propSize = sizeof(UInt32);
446 err = WARNING(AudioDeviceGetProperty(macCoreDeviceId, 0, isInput, kAudioDevicePropertyLatency, &propSize, &frameLatency));
447 if (!err)
448 {
449 /** FEEDBACK:
450 * This code was arrived at by trial and error, and some extentive, but not exhaustive
451 * testing. Sebastien Beaulieu <seb@plogue.com> has suggested using
452 * kAudioDevicePropertyLatency + kAudioDevicePropertySafetyOffset + buffer size instead.
453 * At the time this code was written, many users were reporting dropouts with audio
454 * programs that probably used this formula. This was probably
455 * around 10.4.4, and the problem is probably fixed now. So perhaps
456 * his formula should be reviewed and used.
457 * */
458 double secondLatency = frameLatency / deviceInfo->defaultSampleRate;
459 if (isInput)
460 {
461 deviceInfo->defaultLowInputLatency = 3 * secondLatency;
462 deviceInfo->defaultHighInputLatency = 3 * 10 * secondLatency;
463 }
464 else
465 {
466 deviceInfo->defaultLowOutputLatency = 3 * secondLatency;
467 deviceInfo->defaultHighOutputLatency = 3 * 10 * secondLatency;
468 }
469 }
470 }
471 PaUtil_FreeMemory( buflist );
472 return paNoError;
473 error:
474 PaUtil_FreeMemory( buflist );
475 return err;
476 }
477
478 static PaError InitializeDeviceInfo( PaMacAUHAL *auhalHostApi,
479 PaDeviceInfo *deviceInfo,
480 AudioDeviceID macCoreDeviceId,
481 PaHostApiIndex hostApiIndex )
482 {
483 Float64 sampleRate;
484 char *name;
485 PaError err = paNoError;
486 UInt32 propSize;
487
488 VVDBUG(("InitializeDeviceInfo(): macCoreDeviceId=%ld\n", macCoreDeviceId));
489
490 memset(deviceInfo, 0, sizeof(deviceInfo));
491
492 deviceInfo->structVersion = 2;
493 deviceInfo->hostApi = hostApiIndex;
494
495 /* Get the device name. Fail if we can't get it. */
496 err = ERR(AudioDeviceGetPropertyInfo(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceName, &propSize, NULL));
497 if (err)
498 return err;
499
500 name = PaUtil_GroupAllocateMemory(auhalHostApi->allocations,propSize);
501 if ( !name )
502 return paInsufficientMemory;
503 err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyDeviceName, &propSize, name));
504 if (err)
505 return err;
506 deviceInfo->name = name;
507
508 /* Try to get the default sample rate. Don't fail if we can't get this. */
509 propSize = sizeof(Float64);
510 err = ERR(AudioDeviceGetProperty(macCoreDeviceId, 0, 0, kAudioDevicePropertyNominalSampleRate, &propSize, &sampleRate));
511 if (err)
512 deviceInfo->defaultSampleRate = 0.0;
513 else
514 deviceInfo->defaultSampleRate = sampleRate;
515
516 /* Get the maximum number of input and output channels. Fail if we can't get this. */
517
518 err = GetChannelInfo(auhalHostApi, deviceInfo, macCoreDeviceId, 1);
519 if (err)
520 return err;
521
522 err = GetChannelInfo(auhalHostApi, deviceInfo, macCoreDeviceId, 0);
523 if (err)
524 return err;
525
526 return paNoError;
527 }
528
529 PaError PaMacCore_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex hostApiIndex )
530 {
531 PaError result = paNoError;
532 int i;
533 PaMacAUHAL *auhalHostApi = NULL;
534 PaDeviceInfo *deviceInfoArray;
535 int unixErr;
536
537 VVDBUG(("PaMacCore_Initialize(): hostApiIndex=%d\n", hostApiIndex));
538
539 SInt32 major;
540 SInt32 minor;
541 Gestalt(gestaltSystemVersionMajor, &major);
542 Gestalt(gestaltSystemVersionMinor, &minor);
543
544 // Starting with 10.6 systems, the HAL notification thread is created internally
545 if (major == 10 && minor >= 6) {
546 CFRunLoopRef theRunLoop = NULL;
547 AudioObjectPropertyAddress theAddress = { kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
548 OSStatus osErr = AudioObjectSetPropertyData (kAudioObjectSystemObject, &theAddress, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
549 if (osErr != noErr) {
550 goto error;
551 }
552 }
553
554 unixErr = initializeXRunListenerList();
555 if( 0 != unixErr ) {
556 return UNIX_ERR(unixErr);
557 }
558
559 auhalHostApi = (PaMacAUHAL*)PaUtil_AllocateMemory( sizeof(PaMacAUHAL) );
560 if( !auhalHostApi )
561 {
562 result = paInsufficientMemory;
563 goto error;
564 }
565
566 auhalHostApi->allocations = PaUtil_CreateAllocationGroup();
567 if( !auhalHostApi->allocations )
568 {
569 result = paInsufficientMemory;
570 goto error;
571 }
572
573 auhalHostApi->devIds = NULL;
574 auhalHostApi->devCount = 0;
575
576 /* get the info we need about the devices */
577 result = gatherDeviceInfo( auhalHostApi );
578 if( result != paNoError )
579 goto error;
580
581 *hostApi = &auhalHostApi->inheritedHostApiRep;
582 (*hostApi)->info.structVersion = 1;
583 (*hostApi)->info.type = paCoreAudio;
584 (*hostApi)->info.name = "Core Audio";
585
586 (*hostApi)->info.defaultInputDevice = paNoDevice;
587 (*hostApi)->info.defaultOutputDevice = paNoDevice;
588
589 (*hostApi)->info.deviceCount = 0;
590
591 if( auhalHostApi->devCount > 0 )
592 {
593 (*hostApi)->deviceInfos = (PaDeviceInfo**)PaUtil_GroupAllocateMemory(
594 auhalHostApi->allocations, sizeof(PaDeviceInfo*) * auhalHostApi->devCount);
595 if( !(*hostApi)->deviceInfos )
596 {
597 result = paInsufficientMemory;
598 goto error;
599 }
600
601 /* allocate all device info structs in a contiguous block */
602 deviceInfoArray = (PaDeviceInfo*)PaUtil_GroupAllocateMemory(
603 auhalHostApi->allocations, sizeof(PaDeviceInfo) * auhalHostApi->devCount );
604 if( !deviceInfoArray )
605 {
606 result = paInsufficientMemory;
607 goto error;
608 }
609
610 for( i=0; i < auhalHostApi->devCount; ++i )
611 {
612 int err;
613 err = InitializeDeviceInfo( auhalHostApi, &deviceInfoArray[i],
614 auhalHostApi->devIds[i],
615 hostApiIndex );
616 if (err == paNoError)
617 { /* copy some info and set the defaults */
618 (*hostApi)->deviceInfos[(*hostApi)->info.deviceCount] = &deviceInfoArray[i];
619 if (auhalHostApi->devIds[i] == auhalHostApi->defaultIn)
620 (*hostApi)->info.defaultInputDevice = (*hostApi)->info.deviceCount;
621 if (auhalHostApi->devIds[i] == auhalHostApi->defaultOut)
622 (*hostApi)->info.defaultOutputDevice = (*hostApi)->info.deviceCount;
623 (*hostApi)->info.deviceCount++;
624 }
625 else
626 { /* there was an error. we need to shift the devices down, so we ignore this one */
627 int j;
628 auhalHostApi->devCount--;
629 for( j=i; j<auhalHostApi->devCount; ++j )
630 auhalHostApi->devIds[j] = auhalHostApi->devIds[j+1];
631 i--;
632 }
633 }
634 }
635
636 (*hostApi)->Terminate = Terminate;
637 (*hostApi)->OpenStream = OpenStream;
638 (*hostApi)->IsFormatSupported = IsFormatSupported;
639
640 PaUtil_InitializeStreamInterface( &auhalHostApi->callbackStreamInterface,
641 CloseStream, StartStream,
642 StopStream, AbortStream, IsStreamStopped,
643 IsStreamActive,
644 GetStreamTime, GetStreamCpuLoad,
645 PaUtil_DummyRead, PaUtil_DummyWrite,
646 PaUtil_DummyGetReadAvailable,
647 PaUtil_DummyGetWriteAvailable );
648
649 PaUtil_InitializeStreamInterface( &auhalHostApi->blockingStreamInterface,
650 CloseStream, StartStream,
651 StopStream, AbortStream, IsStreamStopped,
652 IsStreamActive,
653 GetStreamTime, PaUtil_DummyGetCpuLoad,
654 ReadStream, WriteStream,
655 GetStreamReadAvailable,
656 GetStreamWriteAvailable );
657
658 return result;
659
660 error:
661 if( auhalHostApi )
662 {
663 if( auhalHostApi->allocations )
664 {
665 PaUtil_FreeAllAllocations( auhalHostApi->allocations );
666 PaUtil_DestroyAllocationGroup( auhalHostApi->allocations );
667 }
668
669 PaUtil_FreeMemory( auhalHostApi );
670 }
671 return result;
672 }
673
674
675 static void Terminate( struct PaUtilHostApiRepresentation *hostApi )
676 {
677 int unixErr;
678
679 PaMacAUHAL *auhalHostApi = (PaMacAUHAL*)hostApi;
680
681 VVDBUG(("Terminate()\n"));
682
683 unixErr = destroyXRunListenerList();
684 if( 0 != unixErr )
685 UNIX_ERR(unixErr);
686
687 /*
688 IMPLEMENT ME:
689 - clean up any resources not handled by the allocation group
690 TODO: Double check that everything is handled by alloc group
691 */
692
693 if( auhalHostApi->allocations )
694 {
695 PaUtil_FreeAllAllocations( auhalHostApi->allocations );
696 PaUtil_DestroyAllocationGroup( auhalHostApi->allocations );
697 }
698
699 PaUtil_FreeMemory( auhalHostApi );
700 }
701
702
703 static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi,
704 const PaStreamParameters *inputParameters,
705 const PaStreamParameters *outputParameters,
706 double sampleRate )
707 {
708 int inputChannelCount, outputChannelCount;
709 PaSampleFormat inputSampleFormat, outputSampleFormat;
710
711 VVDBUG(("IsFormatSupported(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld sampleRate=%g\n",
712 inputParameters ? inputParameters->channelCount : -1,
713 inputParameters ? inputParameters->sampleFormat : -1,
714 outputParameters ? outputParameters->channelCount : -1,
715 outputParameters ? outputParameters->sampleFormat : -1,
716 (float) sampleRate ));
717
718 /** These first checks are standard PA checks. We do some fancier checks
719 later. */
720 if( inputParameters )
721 {
722 inputChannelCount = inputParameters->channelCount;
723 inputSampleFormat = inputParameters->sampleFormat;
724
725 /* all standard sample formats are supported by the buffer adapter,
726 this implementation doesn't support any custom sample formats */
727 if( inputSampleFormat & paCustomFormat )
728 return paSampleFormatNotSupported;
729
730 /* unless alternate device specification is supported, reject the use of
731 paUseHostApiSpecificDeviceSpecification */
732
733 if( inputParameters->device == paUseHostApiSpecificDeviceSpecification )
734 return paInvalidDevice;
735
736 /* check that input device can support inputChannelCount */
737 if( inputChannelCount > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels )
738 return paInvalidChannelCount;
739 }
740 else
741 {
742 inputChannelCount = 0;
743 }
744
745 if( outputParameters )
746 {
747 outputChannelCount = outputParameters->channelCount;
748 outputSampleFormat = outputParameters->sampleFormat;
749
750 /* all standard sample formats are supported by the buffer adapter,
751 this implementation doesn't support any custom sample formats */
752 if( outputSampleFormat & paCustomFormat )
753 return paSampleFormatNotSupported;
754
755 /* unless alternate device specification is supported, reject the use of
756 paUseHostApiSpecificDeviceSpecification */
757
758 if( outputParameters->device == paUseHostApiSpecificDeviceSpecification )
759 return paInvalidDevice;
760
761 /* check that output device can support outputChannelCount */
762 if( outputChannelCount > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels )
763 return paInvalidChannelCount;
764
765 }
766 else
767 {
768 outputChannelCount = 0;
769 }
770
771 /* FEEDBACK */
772 /* I think the only way to check a given format SR combo is */
773 /* to try opening it. This could be disruptive, is that Okay? */
774 /* The alternative is to just read off available sample rates, */
775 /* but this will not work %100 of the time (eg, a device that */
776 /* supports N output at one rate but only N/2 at a higher rate.)*/
777
778 /* The following code opens the device with the requested parameters to
779 see if it works. */
780 {
781 PaError err;
782 PaStream *s;
783 err = OpenStream( hostApi, &s, inputParameters, outputParameters,
784 sampleRate, 1024, 0, (PaStreamCallback *)1, NULL );
785 if( err != paNoError && err != paInvalidSampleRate )
786 DBUG( ( "OpenStream @ %g returned: %d: %s\n",
787 (float) sampleRate, err, Pa_GetErrorText( err ) ) );
788 if( err )
789 return err;
790 err = CloseStream( s );
791 if( err ) {
792 /* FEEDBACK: is this more serious? should we assert? */
793 DBUG( ( "WARNING: could not close Stream. %d: %s\n",
794 err, Pa_GetErrorText( err ) ) );
795 }
796 }
797
798 return paFormatIsSupported;
799 }
800
801 static PaError OpenAndSetupOneAudioUnit(
802 const PaMacCoreStream *stream,
803 const PaStreamParameters *inStreamParams,
804 const PaStreamParameters *outStreamParams,
805 const UInt32 requestedFramesPerBuffer,
806 UInt32 *actualInputFramesPerBuffer,
807 UInt32 *actualOutputFramesPerBuffer,
808 const PaMacAUHAL *auhalHostApi,
809 AudioUnit *audioUnit,
810 AudioConverterRef *srConverter,
811 AudioDeviceID *audioDevice,
812 const double sampleRate,
813 void *refCon )
814 {
815 ComponentDescription desc;
816 Component comp;
817 /*An Apple TN suggests using CAStreamBasicDescription, but that is C++*/
818 AudioStreamBasicDescription desiredFormat;
819 OSStatus result = noErr;
820 PaError paResult = paNoError;
821 int line = 0;
822 UInt32 callbackKey;
823 AURenderCallbackStruct rcbs;
824 unsigned long macInputStreamFlags = paMacCorePlayNice;
825 unsigned long macOutputStreamFlags = paMacCorePlayNice;
826 SInt32 const *inChannelMap = NULL;
827 SInt32 const *outChannelMap = NULL;
828 unsigned long inChannelMapSize = 0;
829 unsigned long outChannelMapSize = 0;
830
831 VVDBUG(("OpenAndSetupOneAudioUnit(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld, requestedFramesPerBuffer=%ld\n",
832 inStreamParams ? inStreamParams->channelCount : -1,
833 inStreamParams ? inStreamParams->sampleFormat : -1,
834 outStreamParams ? outStreamParams->channelCount : -1,
835 outStreamParams ? outStreamParams->sampleFormat : -1,
836 requestedFramesPerBuffer ));
837
838 /* -- handle the degenerate case -- */
839 if( !inStreamParams && !outStreamParams ) {
840 *audioUnit = NULL;
841 *audioDevice = kAudioDeviceUnknown;
842 return paNoError;
843 }
844
845 /* -- get the user's api specific info, if they set any -- */
846 if( inStreamParams && inStreamParams->hostApiSpecificStreamInfo )
847 {
848 macInputStreamFlags=
849 ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
850 ->flags;
851 inChannelMap = ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
852 ->channelMap;
853 inChannelMapSize = ((PaMacCoreStreamInfo*)inStreamParams->hostApiSpecificStreamInfo)
854 ->channelMapSize;
855 }
856 if( outStreamParams && outStreamParams->hostApiSpecificStreamInfo )
857 {
858 macOutputStreamFlags=
859 ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
860 ->flags;
861 outChannelMap = ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
862 ->channelMap;
863 outChannelMapSize = ((PaMacCoreStreamInfo*)outStreamParams->hostApiSpecificStreamInfo)
864 ->channelMapSize;
865 }
866 /* Override user's flags here, if desired for testing. */
867
868 /*
869 * The HAL AU is a Mac OS style "component".
870 * the first few steps deal with that.
871 * Later steps work on a combination of Mac OS
872 * components and the slightly lower level
873 * HAL.
874 */
875
876 /* -- describe the output type AudioUnit -- */
877 /* Note: for the default AudioUnit, we could use the
878 * componentSubType value kAudioUnitSubType_DefaultOutput;
879 * but I don't think that's relevant here.
880 */
881 desc.componentType = kAudioUnitType_Output;
882 desc.componentSubType = kAudioUnitSubType_HALOutput;
883 desc.componentManufacturer = kAudioUnitManufacturer_Apple;
884 desc.componentFlags = 0;
885 desc.componentFlagsMask = 0;
886 /* -- find the component -- */
887 comp = FindNextComponent( NULL, &desc );
888 if( !comp )
889 {
890 DBUG( ( "AUHAL component not found." ) );
891 *audioUnit = NULL;
892 *audioDevice = kAudioDeviceUnknown;
893 return paUnanticipatedHostError;
894 }
895 /* -- open it -- */
896 result = OpenAComponent( comp, audioUnit );
897 if( result )
898 {
899 DBUG( ( "Failed to open AUHAL component." ) );
900 *audioUnit = NULL;
901 *audioDevice = kAudioDeviceUnknown;
902 return ERR( result );
903 }
904 /* -- prepare a little error handling logic / hackery -- */
905 #define ERR_WRAP(mac_err) do { result = mac_err ; line = __LINE__ ; if ( result != noErr ) goto error ; } while(0)
906
907 /* -- if there is input, we have to explicitly enable input -- */
908 if( inStreamParams )
909 {
910 UInt32 enableIO = 1;
911 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
912 kAudioOutputUnitProperty_EnableIO,
913 kAudioUnitScope_Input,
914 INPUT_ELEMENT,
915 &enableIO,
916 sizeof(enableIO) ) );
917 }
918 /* -- if there is no output, we must explicitly disable output -- */
919 if( !outStreamParams )
920 {
921 UInt32 enableIO = 0;
922 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
923 kAudioOutputUnitProperty_EnableIO,
924 kAudioUnitScope_Output,
925 OUTPUT_ELEMENT,
926 &enableIO,
927 sizeof(enableIO) ) );
928 }
929
930 /* -- set the devices -- */
931 /* make sure input and output are the same device if we are doing input and
932 output. */
933 if( inStreamParams && outStreamParams )
934 {
935 assert( outStreamParams->device == inStreamParams->device );
936 }
937 if( inStreamParams )
938 {
939 *audioDevice = auhalHostApi->devIds[inStreamParams->device] ;
940 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
941 kAudioOutputUnitProperty_CurrentDevice,
942 kAudioUnitScope_Global,
943 INPUT_ELEMENT,
944 audioDevice,
945 sizeof(AudioDeviceID) ) );
946 }
947 if( outStreamParams && outStreamParams != inStreamParams )
948 {
949 *audioDevice = auhalHostApi->devIds[outStreamParams->device] ;
950 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
951 kAudioOutputUnitProperty_CurrentDevice,
952 kAudioUnitScope_Global,
953 OUTPUT_ELEMENT,
954 audioDevice,
955 sizeof(AudioDeviceID) ) );
956 }
957 /* -- add listener for dropouts -- */
958 result = AudioDeviceAddPropertyListener( *audioDevice,
959 0,
960 outStreamParams ? false : true,
961 kAudioDeviceProcessorOverload,
962 xrunCallback,
963 addToXRunListenerList( (void *)stream ) ) ;
964 if( result == kAudioHardwareIllegalOperationError ) {
965 // -- already registered, we're good
966 } else {
967 // -- not already registered, just check for errors
968 ERR_WRAP( result );
969 }
970 /* -- listen for stream start and stop -- */
971 ERR_WRAP( AudioUnitAddPropertyListener( *audioUnit,
972 kAudioOutputUnitProperty_IsRunning,
973 startStopCallback,
974 (void *)stream ) );
975
976 /* -- set format -- */
977 bzero( &desiredFormat, sizeof(desiredFormat) );
978 desiredFormat.mFormatID = kAudioFormatLinearPCM ;
979 desiredFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
980 desiredFormat.mFramesPerPacket = 1;
981 desiredFormat.mBitsPerChannel = sizeof( float ) * 8;
982
983 result = 0;
984 /* set device format first, but only touch the device if the user asked */
985 if( inStreamParams ) {
986 /*The callback never calls back if we don't set the FPB */
987 /*This seems wierd, because I would think setting anything on the device
988 would be disruptive.*/
989 paResult = setBestFramesPerBuffer( *audioDevice, FALSE,
990 requestedFramesPerBuffer,
991 actualInputFramesPerBuffer );
992 if( paResult ) goto error;
993 if( macInputStreamFlags & paMacCoreChangeDeviceParameters ) {
994 bool requireExact;
995 requireExact=macInputStreamFlags & paMacCoreFailIfConversionRequired;
996 paResult = setBestSampleRateForDevice( *audioDevice, FALSE,
997 requireExact, sampleRate );
998 if( paResult ) goto error;
999 }
1000 if( actualInputFramesPerBuffer && actualOutputFramesPerBuffer )
1001 *actualOutputFramesPerBuffer = *actualInputFramesPerBuffer ;
1002 }
1003 if( outStreamParams && !inStreamParams ) {
1004 /*The callback never calls back if we don't set the FPB */
1005 /*This seems wierd, because I would think setting anything on the device
1006 would be disruptive.*/
1007 paResult = setBestFramesPerBuffer( *audioDevice, TRUE,
1008 requestedFramesPerBuffer,
1009 actualOutputFramesPerBuffer );
1010 if( paResult ) goto error;
1011 if( macOutputStreamFlags & paMacCoreChangeDeviceParameters ) {
1012 bool requireExact;
1013 requireExact=macOutputStreamFlags & paMacCoreFailIfConversionRequired;
1014 paResult = setBestSampleRateForDevice( *audioDevice, TRUE,
1015 requireExact, sampleRate );
1016 if( paResult ) goto error;
1017 }
1018 }
1019
1020 /* -- set the quality of the output converter -- */
1021 if( outStreamParams ) {
1022 UInt32 value = kAudioConverterQuality_Max;
1023 switch( macOutputStreamFlags & 0x0700 ) {
1024 case 0x0100: /*paMacCore_ConversionQualityMin:*/
1025 value=kRenderQuality_Min;
1026 break;
1027 case 0x0200: /*paMacCore_ConversionQualityLow:*/
1028 value=kRenderQuality_Low;
1029 break;
1030 case 0x0300: /*paMacCore_ConversionQualityMedium:*/
1031 value=kRenderQuality_Medium;
1032 break;
1033 case 0x0400: /*paMacCore_ConversionQualityHigh:*/
1034 value=kRenderQuality_High;
1035 break;
1036 }
1037 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1038 kAudioUnitProperty_RenderQuality,
1039 kAudioUnitScope_Global,
1040 OUTPUT_ELEMENT,
1041 &value,
1042 sizeof(value) ) );
1043 }
1044 /* now set the format on the Audio Units. */
1045 if( outStreamParams )
1046 {
1047 desiredFormat.mSampleRate =sampleRate;
1048 desiredFormat.mBytesPerPacket=sizeof(float)*outStreamParams->channelCount;
1049 desiredFormat.mBytesPerFrame =sizeof(float)*outStreamParams->channelCount;
1050 desiredFormat.mChannelsPerFrame = outStreamParams->channelCount;
1051 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1052 kAudioUnitProperty_StreamFormat,
1053 kAudioUnitScope_Input,
1054 OUTPUT_ELEMENT,
1055 &desiredFormat,
1056 sizeof(AudioStreamBasicDescription) ) );
1057 }
1058 if( inStreamParams )
1059 {
1060 AudioStreamBasicDescription sourceFormat;
1061 UInt32 size = sizeof( AudioStreamBasicDescription );
1062
1063 /* keep the sample rate of the device, or we confuse AUHAL */
1064 ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1065 kAudioUnitProperty_StreamFormat,
1066 kAudioUnitScope_Input,
1067 INPUT_ELEMENT,
1068 &sourceFormat,
1069 &size ) );
1070 desiredFormat.mSampleRate = sourceFormat.mSampleRate;
1071 desiredFormat.mBytesPerPacket=sizeof(float)*inStreamParams->channelCount;
1072 desiredFormat.mBytesPerFrame =sizeof(float)*inStreamParams->channelCount;
1073 desiredFormat.mChannelsPerFrame = inStreamParams->channelCount;
1074 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1075 kAudioUnitProperty_StreamFormat,
1076 kAudioUnitScope_Output,
1077 INPUT_ELEMENT,
1078 &desiredFormat,
1079 sizeof(AudioStreamBasicDescription) ) );
1080 }
1081 /* set the maximumFramesPerSlice */
1082 /* not doing this causes real problems
1083 (eg. the callback might not be called). The idea of setting both this
1084 and the frames per buffer on the device is that we'll be most likely
1085 to actually get the frame size we requested in the callback with the
1086 minimum latency. */
1087 if( outStreamParams ) {
1088 UInt32 size = sizeof( *actualOutputFramesPerBuffer );
1089 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1090 kAudioUnitProperty_MaximumFramesPerSlice,
1091 kAudioUnitScope_Input,
1092 OUTPUT_ELEMENT,
1093 actualOutputFramesPerBuffer,
1094 sizeof(*actualOutputFramesPerBuffer) ) );
1095 ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1096 kAudioUnitProperty_MaximumFramesPerSlice,
1097 kAudioUnitScope_Global,
1098 OUTPUT_ELEMENT,
1099 actualOutputFramesPerBuffer,
1100 &size ) );
1101 }
1102 if( inStreamParams ) {
1103 /*UInt32 size = sizeof( *actualInputFramesPerBuffer );*/
1104 ERR_WRAP( AudioUnitSetProperty( *audioUnit,
1105 kAudioUnitProperty_MaximumFramesPerSlice,
1106 kAudioUnitScope_Output,
1107 INPUT_ELEMENT,
1108 actualInputFramesPerBuffer,
1109 sizeof(*actualInputFramesPerBuffer) ) );
1110 /* Don't know why this causes problems
1111 ERR_WRAP( AudioUnitGetProperty( *audioUnit,
1112 kAudioUnitProperty_MaximumFramesPerSlice,
1113 kAudioUnitScope_Global, //Output,
1114 INPUT_ELEMENT,
1115 actualInputFramesPerBuffer,
1116 &size ) );
1117 */
1118 }
1119
1120 /* -- if we have input, we may need to setup an SR converter -- */
1121 /* even if we got the sample rate we asked for, we need to do
1122 the conversion in case another program changes the underlying SR. */
1123 /* FIXME: I think we need to monitor stream and change the converter if the incoming format changes. */
1124 if( inStreamParams ) {
1125 AudioStreamBasicDescription desiredFormat;
1126 AudioStreamBasicDescription sourceFormat;
1127 UInt32 sourceSize = sizeof( sourceFormat );
1128 bzero( &desiredFormat, sizeof(desiredFormat) );
1129 desiredFormat.mSampleRate = sampleRate;
1130 desiredFormat.mFormatID = kAudioFormatLinearPCM ;
1131 desiredFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
1132 desiredFormat.mFramesPerPacket = 1;
1133 desiredFormat.mBitsPerChannel = sizeof( float ) * 8;
1134 desiredFormat.mBytesPerPacket=sizeof(float)*inStreamParams->channelCount;
1135 desiredFormat.mBytesPerFrame =sizeof(float)*inStreamParams->channelCount;
1136 desiredFormat.mChannelsPerFrame = inStreamParams->channelCount;
1137
1138 /* get the source format */
1139 ERR_WRAP( AudioUnitGetProperty(
1140 *audioUnit,
1141 kAudioUnitProperty_StreamFormat,
1142 kAudioUnitScope_Output,
1143 INPUT_ELEMENT,
1144 &sourceFormat,
1145 &sourceSize ) );
1146
1147 if( desiredFormat.mSampleRate != sourceFormat.mSampleRate )
1148 {
1149 UInt32 value = kAudioConverterQuality_Max;
1150 switch( macInputStreamFlags & 0x0700 ) {
1151 case 0x0100: /*paMacCore_ConversionQualityMin:*/
1152 value=kAudioConverterQuality_Min;
1153 break;
1154 case 0x0200: /*paMacCore_ConversionQualityLow:*/
1155 value=kAudioConverterQuality_Low;
1156 break;
1157 case 0x0300: /*paMacCore_ConversionQualityMedium:*/
1158 value=kAudioConverterQuality_Medium;
1159 break;
1160 case 0x0400: /*paMacCore_ConversionQualityHigh:*/
1161 value=kAudioConverterQuality_High;
1162 break;
1163 }
1164 VDBUG(( "Creating sample rate converter for input"
1165 " to convert from %g to %g\n",
1166 (float)sourceFormat.mSampleRate,
1167 (float)desiredFormat.mSampleRate ) );
1168 /* create our converter */
1169 ERR_WRAP( AudioConverterNew(
1170 &sourceFormat,
1171 &desiredFormat,
1172 srConverter ) );
1173 /* Set quality */
1174 ERR_WRAP( AudioConverterSetProperty(
1175 *srConverter,
1176 kAudioConverterSampleRateConverterQuality,
1177 sizeof( value ),
1178 &value ) );
1179 }
1180 }
1181 /* -- set IOProc (callback) -- */
1182 callbackKey = outStreamParams ? kAudioUnitProperty_SetRenderCallback
1183 : kAudioOutputUnitProperty_SetInputCallback ;
1184 rcbs.inputProc = AudioIOProc;
1185 rcbs.inputProcRefCon = refCon;
1186 ERR_WRAP( AudioUnitSetProperty(
1187 *audioUnit,
1188 callbackKey,
1189 kAudioUnitScope_Output,
1190 outStreamParams ? OUTPUT_ELEMENT : INPUT_ELEMENT,
1191 &rcbs,
1192 sizeof(rcbs)) );
1193
1194 if( inStreamParams && outStreamParams && *srConverter )
1195 ERR_WRAP( AudioUnitSetProperty(
1196 *audioUnit,
1197 kAudioOutputUnitProperty_SetInputCallback,
1198 kAudioUnitScope_Output,
1199 INPUT_ELEMENT,
1200 &rcbs,
1201 sizeof(rcbs)) );
1202
1203 /* channel mapping. */
1204 if(inChannelMap)
1205 {
1206 UInt32 mapSize = inChannelMapSize *sizeof(SInt32);
1207
1208 //for each channel of desired input, map the channel from
1209 //the device's output channel.
1210 ERR_WRAP( AudioUnitSetProperty(*audioUnit,
1211 kAudioOutputUnitProperty_ChannelMap,
1212 kAudioUnitScope_Output,
1213 INPUT_ELEMENT,
1214 inChannelMap,
1215 mapSize));
1216 }
1217 if(outChannelMap)
1218 {
1219 UInt32 mapSize = outChannelMapSize *sizeof(SInt32);
1220
1221 //for each channel of desired output, map the channel from
1222 //the device's output channel.
1223 ERR_WRAP(AudioUnitSetProperty(*audioUnit,
1224 kAudioOutputUnitProperty_ChannelMap,
1225 kAudioUnitScope_Output,
1226 OUTPUT_ELEMENT,
1227 outChannelMap,
1228 mapSize));
1229 }
1230 /* initialize the audio unit */
1231 ERR_WRAP( AudioUnitInitialize(*audioUnit) );
1232
1233 if( inStreamParams && outStreamParams )
1234 VDBUG( ("Opened device %ld for input and output.\n", *audioDevice ) );
1235 else if( inStreamParams )
1236 VDBUG( ("Opened device %ld for input.\n", *audioDevice ) );
1237 else if( outStreamParams )
1238 VDBUG( ("Opened device %ld for output.\n", *audioDevice ) );
1239 return paNoError;
1240 #undef ERR_WRAP
1241
1242 error:
1243 CloseComponent( *audioUnit );
1244 *audioUnit = NULL;
1245 if( result )
1246 return PaMacCore_SetError( result, line, 1 );
1247 return paResult;
1248 }
1249
1250 /* see pa_hostapi.h for a list of validity guarantees made about OpenStream parameters */
1251 static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi,
1252 PaStream** s,
1253 const PaStreamParameters *inputParameters,
1254 const PaStreamParameters *outputParameters,
1255 double sampleRate,
1256 unsigned long framesPerBuffer,
1257 PaStreamFlags streamFlags,
1258 PaStreamCallback *streamCallback,
1259 void *userData )
1260 {
1261 PaError result = paNoError;
1262 PaMacAUHAL *auhalHostApi = (PaMacAUHAL*)hostApi;
1263 PaMacCoreStream *stream = 0;
1264 int inputChannelCount, outputChannelCount;
1265 PaSampleFormat inputSampleFormat, outputSampleFormat;
1266 PaSampleFormat hostInputSampleFormat, hostOutputSampleFormat;
1267 VVDBUG(("OpenStream(): in chan=%d, in fmt=%ld, out chan=%d, out fmt=%ld SR=%g, FPB=%ld\n",
1268 inputParameters ? inputParameters->channelCount : -1,
1269 inputParameters ? inputParameters->sampleFormat : -1,
1270 outputParameters ? outputParameters->channelCount : -1,
1271 outputParameters ? outputParameters->sampleFormat : -1,
1272 (float) sampleRate,
1273 framesPerBuffer ));
1274 VDBUG( ("Opening Stream.\n") );
1275
1276 /*These first few bits of code are from paSkeleton with few modifications.*/
1277 if( inputParameters )
1278 {
1279 inputChannelCount = inputParameters->channelCount;
1280 inputSampleFormat = inputParameters->sampleFormat;
1281
1282 /* unless alternate device specification is supported, reject the use of
1283 paUseHostApiSpecificDeviceSpecification */
1284
1285 if( inputParameters->device == paUseHostApiSpecificDeviceSpecification )
1286 return paInvalidDevice;
1287
1288 /* check that input device can support inputChannelCount */
1289 if( inputChannelCount > hostApi->deviceInfos[ inputParameters->device ]->maxInputChannels )
1290 return paInvalidChannelCount;
1291
1292 /* Host supports interleaved float32 */
1293 hostInputSampleFormat = paFloat32;
1294 }
1295 else
1296 {
1297 inputChannelCount = 0;
1298 inputSampleFormat = hostInputSampleFormat = paFloat32; /* Surpress 'uninitialised var' warnings. */
1299 }
1300
1301 if( outputParameters )
1302 {
1303 outputChannelCount = outputParameters->channelCount;
1304 outputSampleFormat = outputParameters->sampleFormat;
1305
1306 /* unless alternate device specification is supported, reject the use of
1307 paUseHostApiSpecificDeviceSpecification */
1308
1309 if( outputParameters->device == paUseHostApiSpecificDeviceSpecification )
1310 return paInvalidDevice;
1311
1312 /* check that output device can support inputChannelCount */
1313 if( outputChannelCount > hostApi->deviceInfos[ outputParameters->device ]->maxOutputChannels )
1314 return paInvalidChannelCount;
1315
1316 /* Host supports interleaved float32 */
1317 hostOutputSampleFormat = paFloat32;
1318 }
1319 else
1320 {
1321 outputChannelCount = 0;
1322 outputSampleFormat = hostOutputSampleFormat = paFloat32; /* Surpress 'uninitialized var' warnings. */
1323 }
1324
1325 /* validate platform specific flags */
1326 if( (streamFlags & paPlatformSpecificFlags) != 0 )
1327 return paInvalidFlag; /* unexpected platform specific flag */
1328
1329 stream = (PaMacCoreStream*)PaUtil_AllocateMemory( sizeof(PaMacCoreStream) );
1330 if( !stream )
1331 {
1332 result = paInsufficientMemory;
1333 goto error;
1334 }
1335
1336 /* If we fail after this point, we my be left in a bad state, with
1337 some data structures setup and others not. So, first thing we
1338 do is initialize everything so that if we fail, we know what hasn't
1339 been touched.
1340 */
1341
1342 stream->inputAudioBufferList.mBuffers[0].mData = NULL;
1343 stream->inputRingBuffer.buffer = NULL;
1344 bzero( &stream->blio, sizeof( PaMacBlio ) );
1345 /*
1346 stream->blio.inputRingBuffer.buffer = NULL;
1347 stream->blio.outputRingBuffer.buffer = NULL;
1348 stream->blio.inputSampleFormat = inputParameters?inputParameters->sampleFormat:0;
1349 stream->blio.inputSampleSize = computeSampleSizeFromFormat(stream->blio.inputSampleFormat);
1350 stream->blio.outputSampleFormat=outputParameters?outputParameters->sampleFormat:0;
1351 stream->blio.outputSampleSize = computeSampleSizeFromFormat(stream->blio.outputSampleFormat);
1352 */
1353 stream->inputSRConverter = NULL;
1354 stream->inputUnit = NULL;
1355 stream->outputUnit = NULL;
1356 stream->inputFramesPerBuffer = 0;
1357 stream->outputFramesPerBuffer = 0;
1358 stream->bufferProcessorIsInitialized = FALSE;
1359
1360 /* assert( streamCallback ) ; */ /* only callback mode is implemented */
1361 if( streamCallback )
1362 {
1363 PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation,
1364 &auhalHostApi->callbackStreamInterface,
1365 streamCallback, userData );
1366 }
1367 else
1368 {
1369 PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation,
1370 &auhalHostApi->blockingStreamInterface,
1371 BlioCallback, &stream->blio );
1372 }
1373
1374 PaUtil_InitializeCpuLoadMeasurer( &stream->cpuLoadMeasurer, sampleRate );
1375
1376 /* -- handle paFramesPerBufferUnspecified -- */
1377 if( framesPerBuffer == paFramesPerBufferUnspecified ) {
1378 long requested = 64;
1379 if( inputParameters )
1380 requested = MAX( requested, inputParameters->suggestedLatency * sampleRate / 2 );
1381 if( outputParameters )
1382 requested = MAX( requested, outputParameters->suggestedLatency *sampleRate / 2 );
1383 VDBUG( ("Block Size unspecified. Based on Latency, the user wants a Block Size near: %ld.\n",
1384 requested ) );
1385 if( requested <= 64 ) {
1386 /*requested a realtively low latency. make sure this is in range of devices */
1387 /*try to get the device's min natural buffer size and use that (but no smaller than 64).*/
1388 AudioValueRange audioRange;
1389 UInt32 size = sizeof( audioRange );
1390 if( inputParameters ) {
1391 WARNING( result = AudioDeviceGetProperty( auhalHostApi->devIds[inputParameters->device],
1392 0,
1393 false,
1394 kAudioDevicePropertyBufferFrameSizeRange,
1395 &size, &audioRange ) );
1396 if( result )
1397 requested = MAX( requested, audioRange.mMinimum );
1398 }
1399 size = sizeof( audioRange );
1400 if( outputParameters ) {
1401 WARNING( result = AudioDeviceGetProperty( auhalHostApi->devIds[outputParameters->device],
1402 0,
1403 false,
1404 kAudioDevicePropertyBufferFrameSizeRange,
1405 &size, &audioRange ) );
1406 if( result )
1407 requested = MAX( requested, audioRange.mMinimum );
1408 }
1409 } else {
1410 /* requested a realtively high latency. make sure this is in range of devices */
1411 /*try to get the device's max natural buffer size and use that (but no larger than 1024).*/
1412 AudioValueRange audioRange;
1413 UInt32 size = sizeof( audioRange );
1414 requested = MIN( requested, 1024 );
1415 if( inputParameters ) {
1416 WARNING( result = AudioDeviceGetProperty( auhalHostApi->devIds[inputParameters->device],
1417 0,
1418 false,
1419 kAudioDevicePropertyBufferFrameSizeRange,
1420 &size, &audioRange ) );
1421 if( result )
1422 requested = MIN( requested, audioRange.mMaximum );
1423 }
1424 size = sizeof( audioRange );
1425 if( outputParameters ) {
1426 WARNING( result = AudioDeviceGetProperty( auhalHostApi->devIds[outputParameters->device],
1427 0,
1428 false,
1429 kAudioDevicePropertyBufferFrameSizeRange,
1430 &size, &audioRange ) );
1431 if( result )
1432 requested = MIN( requested, audioRange.mMaximum );
1433 }
1434 }
1435 /* -- double check ranges -- */
1436 if( requested > 1024 ) requested = 1024;
1437 if( requested < 64 ) requested = 64;
1438 VDBUG(("After querying hardware, setting block size to %ld.\n", requested));
1439 framesPerBuffer = requested;
1440 }
1441
1442 /* -- Now we actually open and setup streams. -- */
1443 if( inputParameters && outputParameters && outputParameters->device == inputParameters->device )
1444 { /* full duplex. One device. */
1445 UInt32 inputFramesPerBuffer = (UInt32) stream->inputFramesPerBuffer;
1446 UInt32 outputFramesPerBuffer = (UInt32) stream->outputFramesPerBuffer;
1447 result = OpenAndSetupOneAudioUnit( stream,
1448 inputParameters,
1449 outputParameters,
1450 framesPerBuffer,
1451 &inputFramesPerBuffer,
1452 &outputFramesPerBuffer,
1453 auhalHostApi,
1454 &(stream->inputUnit),
1455 &(stream->inputSRConverter),
1456 &(stream->inputDevice),
1457 sampleRate,
1458 stream );
1459 stream->inputFramesPerBuffer = inputFramesPerBuffer;
1460 stream->outputFramesPerBuffer = outputFramesPerBuffer;
1461 stream->outputUnit = stream->inputUnit;
1462 stream->outputDevice = stream->inputDevice;
1463 if( result != paNoError )
1464 goto error;
1465 }
1466 else
1467 { /* full duplex, different devices OR simplex */
1468 UInt32 outputFramesPerBuffer = (UInt32) stream->outputFramesPerBuffer;
1469 UInt32 inputFramesPerBuffer = (UInt32) stream->inputFramesPerBuffer;
1470 result = OpenAndSetupOneAudioUnit( stream,
1471 NULL,
1472 outputParameters,
1473 framesPerBuffer,
1474 NULL,
1475 &outputFramesPerBuffer,
1476 auhalHostApi,
1477 &(stream->outputUnit),
1478 NULL,
1479 &(stream->outputDevice),
1480 sampleRate,
1481 stream );
1482 if( result != paNoError )
1483 goto error;
1484 result = OpenAndSetupOneAudioUnit( stream,
1485 inputParameters,
1486 NULL,
1487 framesPerBuffer,
1488 &inputFramesPerBuffer,
1489 NULL,
1490 auhalHostApi,
1491 &(stream->inputUnit),
1492 &(stream->inputSRConverter),
1493 &(stream->inputDevice),
1494 sampleRate,
1495 stream );
1496 if( result != paNoError )
1497 goto error;
1498 stream->inputFramesPerBuffer = inputFramesPerBuffer;
1499 stream->outputFramesPerBuffer = outputFramesPerBuffer;
1500 }
1501
1502 if( stream->inputUnit ) {
1503 const size_t szfl = sizeof(float);
1504 /* setup the AudioBufferList used for input */
1505 bzero( &stream->inputAudioBufferList, sizeof( AudioBufferList ) );
1506 stream->inputAudioBufferList.mNumberBuffers = 1;
1507 stream->inputAudioBufferList.mBuffers[0].mNumberChannels
1508 = inputChannelCount;
1509 stream->inputAudioBufferList.mBuffers[0].mDataByteSize
1510 = stream->inputFramesPerBuffer*inputChannelCount*szfl;
1511 stream->inputAudioBufferList.mBuffers[0].mData
1512 = (float *) calloc(
1513 stream->inputFramesPerBuffer*inputChannelCount,
1514 szfl );
1515 if( !stream->inputAudioBufferList.mBuffers[0].mData )
1516 {
1517 result = paInsufficientMemory;
1518 goto error;
1519 }
1520
1521 /*
1522 * If input and output devs are different or we are doing SR conversion,
1523 * we also need a
1524 * ring buffer to store inpt data while waiting for output
1525 * data.
1526 */
1527 if( (stream->outputUnit && stream->inputUnit != stream->outputUnit)
1528 || stream->inputSRConverter )
1529 {
1530 /* May want the ringSize ot initial position in
1531 ring buffer to depend somewhat on sample rate change */
1532
1533 void *data;
1534 long ringSize;
1535
1536 ringSize = computeRingBufferSize( inputParameters,
1537 outputParameters,
1538 stream->inputFramesPerBuffer,
1539 stream->outputFramesPerBuffer,
1540 sampleRate );
1541 /*ringSize <<= 4; *//*16x bigger, for testing */
1542
1543
1544 /*now, we need to allocate memory for the ring buffer*/
1545 data = calloc( ringSize, szfl );
1546 if( !data )
1547 {
1548 result = paInsufficientMemory;
1549 goto error;
1550 }
1551
1552 /* now we can initialize the ring buffer */
1553 //FIXME: element size whould probably be szfl*inputchan
1554 // but that will require some work all over the
1555 // place to patch up. szfl may be sufficient and would
1556 // be way easier to handle, but it seems clear from the
1557 // discussion that buffer processor compatibility
1558 // requires szfl*inputchan.
1559 // See revision 1346 and discussion:
1560 // http://techweb.rfa.org/pipermail/portaudio/2008-February/008295.html
1561 PaUtil_InitializeRingBuffer( &stream->inputRingBuffer,
1562 1, ringSize*szfl, data ) ;
1563 /* advance the read point a little, so we are reading from the
1564 middle of the buffer */
1565 if( stream->outputUnit )
1566 PaUtil_AdvanceRingBufferWriteIndex( &stream->inputRingBuffer, ringSize*szfl / RING_BUFFER_ADVANCE_DENOMINATOR );
1567 }
1568 }
1569
1570 /* -- initialize Blio Buffer Processors -- */
1571 if( !streamCallback )
1572 {
1573 long ringSize;
1574
1575 ringSize = computeRingBufferSize( inputParameters,
1576 outputParameters,
1577 stream->inputFramesPerBuffer,
1578 stream->outputFramesPerBuffer,
1579 sampleRate );
1580 result = initializeBlioRingBuffers( &stream->blio,
1581 inputParameters?inputParameters->sampleFormat:0 ,
1582 outputParameters?outputParameters->sampleFormat:0 ,
1583 MAX(stream->inputFramesPerBuffer,stream->outputFramesPerBuffer),
1584 ringSize,
1585 inputParameters?inputChannelCount:0 ,
1586 outputParameters?outputChannelCount:0 ) ;
1587 if( result != paNoError )
1588 goto error;
1589 }
1590
1591 /* -- initialize Buffer Processor -- */
1592 {
1593 unsigned long maxHostFrames = stream->inputFramesPerBuffer;
1594 if( stream->outputFramesPerBuffer > maxHostFrames )
1595 maxHostFrames = stream->outputFramesPerBuffer;
1596 result = PaUtil_InitializeBufferProcessor( &stream->bufferProcessor,
1597 inputChannelCount, inputSampleFormat,
1598 hostInputSampleFormat,
1599 outputChannelCount, outputSampleFormat,
1600 hostOutputSampleFormat,
1601 sampleRate,
1602 streamFlags,
1603 framesPerBuffer,
1604 /* If sample rate conversion takes place, the buffer size
1605 will not be known. */
1606 maxHostFrames,
1607 stream->inputSRConverter
1608 ? paUtilUnknownHostBufferSize
1609 : paUtilBoundedHostBufferSize,
1610 streamCallback ? streamCallback : BlioCallback,
1611 streamCallback ? userData : &stream->blio );
1612 if( result != paNoError )
1613 goto error;
1614 }
1615 stream->bufferProcessorIsInitialized = TRUE;
1616
1617 /*
1618 IMPLEMENT ME: initialise the following fields with estimated or actual
1619 values.
1620 I think this is okay the way it is br 12/1/05
1621 maybe need to change input latency estimate if IO devs differ
1622 */
1623 stream->streamRepresentation.streamInfo.inputLatency =
1624 PaUtil_GetBufferProcessorInputLatency(&stream->bufferProcessor)/sampleRate;
1625 stream->streamRepresentation.streamInfo.outputLatency =
1626 PaUtil_GetBufferProcessorOutputLatency(&stream->bufferProcessor)/sampleRate;
1627 stream->streamRepresentation.streamInfo.sampleRate = sampleRate;
1628
1629 stream->sampleRate = sampleRate;
1630 stream->outDeviceSampleRate = 0;
1631 if( stream->outputUnit ) {
1632 Float64 rate;
1633 UInt32 size = sizeof( rate );
1634 result = ERR( AudioDeviceGetProperty( stream->outputDevice,
1635 0,
1636 FALSE,
1637 kAudioDevicePropertyNominalSampleRate,
1638 &size, &rate ) );
1639 if( result )
1640 goto error;
1641 stream->outDeviceSampleRate = rate;
1642 }
1643 stream->inDeviceSampleRate = 0;
1644 if( stream->inputUnit ) {
1645 Float64 rate;
1646 UInt32 size = sizeof( rate );
1647 result = ERR( AudioDeviceGetProperty( stream->inputDevice,
1648 0,
1649 TRUE,
1650 kAudioDevicePropertyNominalSampleRate,
1651 &size, &rate ) );
1652 if( result )
1653 goto error;
1654 stream->inDeviceSampleRate = rate;
1655 }
1656 stream->userInChan = inputChannelCount;
1657 stream->userOutChan = outputChannelCount;
1658
1659 stream->isTimeSet = FALSE;
1660 stream->state = STOPPED;
1661 stream->xrunFlags = 0;
1662
1663 *s = (PaStream*)stream;
1664
1665 return result;
1666
1667 error:
1668 CloseStream( stream );
1669 return result;
1670 }
1671
1672 PaTime GetStreamTime( PaStream *s )
1673 {
1674 /* FIXME: I am not at all sure this timing info stuff is right.
1675 patest_sine_time reports negative latencies, which is wierd.*/
1676 PaMacCoreStream *stream = (PaMacCoreStream*)s;
1677 AudioTimeStamp timeStamp;
1678
1679 VVDBUG(("GetStreamTime()\n"));
1680
1681 if ( !stream->isTimeSet )
1682 return (PaTime)0;
1683
1684 if ( stream->outputDevice ) {
1685 AudioDeviceGetCurrentTime( stream->outputDevice, &timeStamp);
1686 return (PaTime)(timeStamp.mSampleTime - stream->startTime.mSampleTime)/stream->outDeviceSampleRate;
1687 } else if ( stream->inputDevice ) {
1688 AudioDeviceGetCurrentTime( stream->inputDevice, &timeStamp);
1689 return (PaTime)(timeStamp.mSampleTime - stream->startTime.mSampleTime)/stream->inDeviceSampleRate;
1690 } else {
1691 return (PaTime)0;
1692 }
1693 }
1694
1695 static void setStreamStartTime( PaStream *stream )
1696 {
1697 /* FIXME: I am not at all sure this timing info stuff is right.
1698 patest_sine_time reports negative latencies, which is wierd.*/
1699 PaMacCoreStream *s = (PaMacCoreStream *) stream;
1700 VVDBUG(("setStreamStartTime()\n"));
1701 if( s->outputDevice )
1702 AudioDeviceGetCurrentTime( s->outputDevice, &s->startTime);
1703 else if( s->inputDevice )
1704 AudioDeviceGetCurrentTime( s->inputDevice, &s->startTime);
1705 else
1706 bzero( &s->startTime, sizeof( s->startTime ) );
1707
1708 //FIXME: we need a memory barier here
1709
1710 s->isTimeSet = TRUE;
1711 }
1712
1713
1714 static PaTime TimeStampToSecs(PaMacCoreStream *stream, const AudioTimeStamp* timeStamp)
1715 {
1716 VVDBUG(("TimeStampToSecs()\n"));
1717 //printf( "ATS: %lu, %g, %g\n", timeStamp->mFlags, timeStamp->mSampleTime, timeStamp->mRateScalar );
1718 if (timeStamp->mFlags & kAudioTimeStampSampleTimeValid)
1719 return (timeStamp->mSampleTime / stream->sampleRate);
1720 else
1721 return 0;
1722 }
1723
1724 #define RING_BUFFER_EMPTY (1000)
1725
1726 static OSStatus ringBufferIOProc( AudioConverterRef inAudioConverter,
1727 UInt32*ioDataSize,
1728 void** outData,
1729 void*inUserData )
1730 {
1731 void *dummyData;
1732 ring_buffer_size_t dummySize;
1733 PaUtilRingBuffer *rb = (PaUtilRingBuffer *) inUserData;
1734
1735 VVDBUG(("ringBufferIOProc()\n"));
1736
1737 if( PaUtil_GetRingBufferReadAvailable( rb ) == 0 ) {
1738 *outData = NULL;
1739 *ioDataSize = 0;
1740 return RING_BUFFER_EMPTY;
1741 }
1742 assert(sizeof(UInt32) == sizeof(ring_buffer_size_t));
1743 PaUtil_GetRingBufferReadRegions( rb, *ioDataSize,
1744 outData, (ring_buffer_size_t *)ioDataSize,
1745 &dummyData, &dummySize );
1746
1747 assert( *ioDataSize );
1748 PaUtil_AdvanceRingBufferReadIndex( rb, *ioDataSize );
1749
1750 return noErr;
1751 }
1752
1753 /*
1754 * Called by the AudioUnit API to process audio from the sound card.
1755 * This is where the magic happens.
1756 */
1757 /* FEEDBACK: there is a lot of redundant code here because of how all the cases differ. This makes it hard to maintain, so if there are suggestinos for cleaning it up, I'm all ears. */
1758 static OSStatus AudioIOProc( void *inRefCon,
1759 AudioUnitRenderActionFlags *ioActionFlags,
1760 const AudioTimeStamp *inTimeStamp,
1761 UInt32 inBusNumber,
1762 UInt32 inNumberFrames,
1763 AudioBufferList *ioData )
1764 {
1765 unsigned long framesProcessed = 0;
1766 PaStreamCallbackTimeInfo timeInfo = {0,0,0};
1767 PaMacCoreStream *stream = (PaMacCoreStream*)inRefCon;
1768 const bool isRender = inBusNumber == OUTPUT_ELEMENT;
1769 int callbackResult = paContinue ;
1770
1771 VVDBUG(("AudioIOProc()\n"));
1772
1773 PaUtil_BeginCpuLoadMeasurement( &stream->cpuLoadMeasurer );
1774
1775 /* -----------------------------------------------------------------*\
1776 This output may be useful for debugging,
1777 But printing durring the callback is a bad enough idea that
1778 this is not enabled by enableing the usual debugging calls.
1779 \* -----------------------------------------------------------------*/
1780 /*
1781 static int renderCount = 0;
1782 static int inputCount = 0;
1783 printf( "------------------- starting reder/input\n" );
1784 if( isRender )
1785 printf("Render callback (%d):\t", ++renderCount);
1786 else
1787 printf("Input callback (%d):\t", ++inputCount);
1788 printf( "Call totals: %d (input), %d (render)\n", inputCount, renderCount );
1789
1790 printf( "--- inBusNumber: %lu\n", inBusNumber );
1791 printf( "--- inNumberFrames: %lu\n", inNumberFrames );
1792 printf( "--- %x ioData\n", (unsigned) ioData );
1793 if( ioData )
1794 {
1795 int i=0;
1796 printf( "--- ioData.mNumBuffers %lu: \n", ioData->mNumberBuffers );
1797 for( i=0; i<ioData->mNumberBuffers; ++i )
1798 printf( "--- ioData buffer %d size: %lu.\n", i, ioData->mBuffers[i].mDataByteSize );
1799 }
1800 ----------------------------------------------------------------- */
1801
1802 if( !stream->isTimeSet )
1803 setStreamStartTime( stream );
1804
1805 if( isRender ) {
1806 AudioTimeStamp currentTime;
1807 timeInfo.outputBufferDacTime = TimeStampToSecs(stream, inTimeStamp);
1808 AudioDeviceGetCurrentTime(stream->outputDevice, &currentTime);
1809 timeInfo.currentTime = TimeStampToSecs(stream, &currentTime);
1810 }
1811 if( isRender && stream->inputUnit == stream->outputUnit )
1812 timeInfo.inputBufferAdcTime = TimeStampToSecs(stream, inTimeStamp);
1813 if( !isRender ) {
1814 AudioTimeStamp currentTime;
1815 timeInfo.inputBufferAdcTime = TimeStampToSecs(stream, inTimeStamp);
1816 AudioDeviceGetCurrentTime(stream->inputDevice, &currentTime);
1817 timeInfo.currentTime = TimeStampToSecs(stream, &currentTime);
1818 }
1819
1820 //printf( "---%g, %g, %g\n", timeInfo.inputBufferAdcTime, timeInfo.currentTime, timeInfo.outputBufferDacTime );
1821
1822 if( isRender && stream->inputUnit == stream->outputUnit
1823 && !stream->inputSRConverter )
1824 {
1825 /* --------- Full Duplex, One Device, no SR Conversion -------
1826 *
1827 * This is the lowest latency case, and also the simplest.
1828 * Input data and output data are available at the same time.
1829 * we do not use the input SR converter or the input ring buffer.
1830 *
1831 */
1832 OSStatus err = 0;
1833 unsigned long frames;
1834
1835 /* -- start processing -- */
1836 PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
1837 &timeInfo,
1838 stream->xrunFlags );
1839 stream->xrunFlags = 0; //FIXME: this flag also gets set outside by a callback, which calls the xrunCallback function. It should be in the same thread as the main audio callback, but the apple docs just use the word "usually" so it may be possible to loose an xrun notification, if that callback happens here.
1840
1841 /* -- compute frames. do some checks -- */
1842 assert( ioData->mNumberBuffers == 1 );
1843 assert( ioData->mBuffers[0].mNumberChannels == stream->userOutChan );
1844 frames = ioData->mBuffers[0].mDataByteSize;
1845 frames /= sizeof( float ) * ioData->mBuffers[0].mNumberChannels;
1846 /* -- copy and process input data -- */
1847 err= AudioUnitRender(stream->inputUnit,
1848 ioActionFlags,
1849 inTimeStamp,
1850 INPUT_ELEMENT,
1851 inNumberFrames,
1852 &stream->inputAudioBufferList );
1853 /* FEEDBACK: I'm not sure what to do when this call fails. There's nothing in the PA API to
1854 * do about failures in the callback system. */
1855 assert( !err );
1856
1857 PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
1858 PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
1859 0,
1860 stream->inputAudioBufferList.mBuffers[0].mData,
1861 stream->inputAudioBufferList.mBuffers[0].mNumberChannels);
1862 /* -- Copy and process output data -- */
1863 PaUtil_SetOutputFrameCount( &(stream->bufferProcessor), frames );
1864 PaUtil_SetInterleavedOutputChannels( &(stream->bufferProcessor),
1865 0,
1866 ioData->mBuffers[0].mData,
1867 ioData->mBuffers[0].mNumberChannels);
1868 /* -- complete processing -- */
1869 framesProcessed =
1870 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
1871 &callbackResult );
1872 }
1873 else if( isRender )
1874 {
1875 /* -------- Output Side of Full Duplex (Separate Devices or SR Conversion)
1876 * -- OR Simplex Output
1877 *
1878 * This case handles output data as in the full duplex case,
1879 * and, if there is input data, reads it off the ring buffer
1880 * and into the PA buffer processor. If sample rate conversion
1881 * is required on input, that is done here as well.
1882 */
1883 unsigned long frames;
1884
1885 /* Sometimes, when stopping a duplex stream we get erroneous
1886 xrun flags, so if this is our last run, clear the flags. */
1887 int xrunFlags = stream->xrunFlags;
1888 /*
1889 if( xrunFlags & paInputUnderflow )
1890 printf( "input underflow.\n" );
1891 if( xrunFlags & paInputOverflow )
1892 printf( "input overflow.\n" );
1893 */
1894 if( stream->state == STOPPING || stream->state == CALLBACK_STOPPED )
1895 xrunFlags = 0;
1896
1897 /* -- start processing -- */
1898 PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
1899 &timeInfo,
1900 xrunFlags );
1901 stream->xrunFlags = 0; /* FEEDBACK: we only send flags to Buf Proc once */
1902
1903 /* -- Copy and process output data -- */
1904 assert( ioData->mNumberBuffers == 1 );
1905 frames = ioData->mBuffers[0].mDataByteSize;
1906 frames /= sizeof( float ) * ioData->mBuffers[0].mNumberChannels;
1907 assert( ioData->mBuffers[0].mNumberChannels == stream->userOutChan );
1908 PaUtil_SetOutputFrameCount( &(stream->bufferProcessor), frames );
1909 PaUtil_SetInterleavedOutputChannels( &(stream->bufferProcessor),
1910 0,
1911 ioData->mBuffers[0].mData,
1912 ioData->mBuffers[0].mNumberChannels);
1913
1914 /* -- copy and process input data, and complete processing -- */
1915 if( stream->inputUnit ) {
1916 const int flsz = sizeof( float );
1917 /* Here, we read the data out of the ring buffer, through the
1918 audio converter. */
1919 int inChan = stream->inputAudioBufferList.mBuffers[0].mNumberChannels;
1920 if( stream->inputSRConverter )
1921 {
1922 OSStatus err;
1923 UInt32 size;
1924 float data[ inChan * frames ];
1925 size = sizeof( data );
1926 err = AudioConverterFillBuffer(
1927 stream->inputSRConverter,
1928 ringBufferIOProc,
1929 &stream->inputRingBuffer,
1930 &size,
1931 (void *)&data );
1932 if( err == RING_BUFFER_EMPTY )
1933 { /*the ring buffer callback underflowed */
1934 err = 0;
1935 bzero( ((char *)data) + size, sizeof(data)-size );
1936 stream->xrunFlags |= paInputUnderflow;
1937 }
1938 ERR( err );
1939 assert( !err );
1940
1941 PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
1942 PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
1943 0,
1944 data,
1945 inChan );
1946 framesProcessed =
1947 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
1948 &callbackResult );
1949 }
1950 else
1951 {
1952 /* Without the AudioConverter is actually a bit more complex
1953 because we have to do a little buffer processing that the
1954 AudioConverter would otherwise handle for us. */
1955 void *data1, *data2;
1956 ring_buffer_size_t size1, size2;
1957 PaUtil_GetRingBufferReadRegions( &stream->inputRingBuffer,
1958 inChan*frames*flsz,
1959 &data1, &size1,
1960 &data2, &size2 );
1961 if( size1 / ( flsz * inChan ) == frames ) {
1962 /* simplest case: all in first buffer */
1963 PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
1964 PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
1965 0,
1966 data1,
1967 inChan );
1968 framesProcessed =
1969 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
1970 &callbackResult );
1971 PaUtil_AdvanceRingBufferReadIndex(&stream->inputRingBuffer, size1 );
1972 } else if( ( size1 + size2 ) / ( flsz * inChan ) < frames ) {
1973 /*we underflowed. take what data we can, zero the rest.*/
1974 unsigned char data[frames*inChan*flsz];
1975 if( size1 )
1976 memcpy( data, data1, size1 );
1977 if( size2 )
1978 memcpy( data+size1, data2, size2 );
1979 bzero( data+size1+size2, frames*flsz*inChan - size1 - size2 );
1980
1981 PaUtil_SetInputFrameCount( &(stream->bufferProcessor), frames );
1982 PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
1983 0,
1984 data,
1985 inChan );
1986 framesProcessed =
1987 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
1988 &callbackResult );
1989 PaUtil_AdvanceRingBufferReadIndex( &stream->inputRingBuffer,
1990 size1+size2 );
1991 /* flag underflow */
1992 stream->xrunFlags |= paInputUnderflow;
1993 } else {
1994 /*we got all the data, but split between buffers*/
1995 PaUtil_SetInputFrameCount( &(stream->bufferProcessor),
1996 size1 / ( flsz * inChan ) );
1997 PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
1998 0,
1999 data1,
2000 inChan );
2001 PaUtil_Set2ndInputFrameCount( &(stream->bufferProcessor),
2002 size2 / ( flsz * inChan ) );
2003 PaUtil_Set2ndInterleavedInputChannels( &(stream->bufferProcessor),
2004 0,
2005 data2,
2006 inChan );
2007 framesProcessed =
2008 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2009 &callbackResult );
2010 PaUtil_AdvanceRingBufferReadIndex(&stream->inputRingBuffer, size1+size2 );
2011 }
2012 }
2013 } else {
2014 framesProcessed =
2015 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2016 &callbackResult );
2017 }
2018
2019 }
2020 else
2021 {
2022 /* ------------------ Input
2023 *
2024 * First, we read off the audio data and put it in the ring buffer.
2025 * if this is an input-only stream, we need to process it more,
2026 * otherwise, we let the output case deal with it.
2027 */
2028 OSStatus err = 0;
2029 int chan = stream->inputAudioBufferList.mBuffers[0].mNumberChannels ;
2030 /* FIXME: looping here may not actually be necessary, but it was something I tried in testing. */
2031 do {
2032 err= AudioUnitRender(stream->inputUnit,
2033 ioActionFlags,
2034 inTimeStamp,
2035 INPUT_ELEMENT,
2036 inNumberFrames,
2037 &stream->inputAudioBufferList );
2038 if( err == -10874 )
2039 inNumberFrames /= 2;
2040 } while( err == -10874 && inNumberFrames > 1 );
2041 /* FEEDBACK: I'm not sure what to do when this call fails */
2042 ERR( err );
2043 assert( !err );
2044 if( stream->inputSRConverter || stream->outputUnit )
2045 {
2046 /* If this is duplex or we use a converter, put the data
2047 into the ring buffer. */
2048 long bytesIn, bytesOut;
2049 bytesIn = sizeof( float ) * inNumberFrames * chan;
2050 bytesOut = PaUtil_WriteRingBuffer( &stream->inputRingBuffer,
2051 stream->inputAudioBufferList.mBuffers[0].mData,
2052 bytesIn );
2053 if( bytesIn != bytesOut )
2054 stream->xrunFlags |= paInputOverflow ;
2055 }
2056 else
2057 {
2058 /* for simplex input w/o SR conversion,
2059 just pop the data into the buffer processor.*/
2060 PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2061 &timeInfo,
2062 stream->xrunFlags );
2063 stream->xrunFlags = 0;
2064
2065 PaUtil_SetInputFrameCount( &(stream->bufferProcessor), inNumberFrames);
2066 PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2067 0,
2068 stream->inputAudioBufferList.mBuffers[0].mData,
2069 chan );
2070 framesProcessed =
2071 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2072 &callbackResult );
2073 }
2074 if( !stream->outputUnit && stream->inputSRConverter )
2075 {
2076 /* ------------------ Simplex Input w/ SR Conversion
2077 *
2078 * if this is a simplex input stream, we need to read off the buffer,
2079 * do our sample rate conversion and pass the results to the buffer
2080 * processor.
2081 * The logic here is complicated somewhat by the fact that we don't
2082 * know how much data is available, so we loop on reasonably sized
2083 * chunks, and let the BufferProcessor deal with the rest.
2084 *
2085 */
2086 /*This might be too big or small depending on SR conversion*/
2087 float data[ chan * inNumberFrames ];
2088 OSStatus err;
2089 do
2090 { /*Run the buffer processor until we are out of data*/
2091 UInt32 size;
2092 long f;
2093
2094 size = sizeof( data );
2095 err = AudioConverterFillBuffer(
2096 stream->inputSRConverter,
2097 ringBufferIOProc,
2098 &stream->inputRingBuffer,
2099 &size,
2100 (void *)data );
2101 if( err != RING_BUFFER_EMPTY )
2102 ERR( err );
2103 assert( err == 0 || err == RING_BUFFER_EMPTY );
2104
2105 f = size / ( chan * sizeof(float) );
2106 PaUtil_SetInputFrameCount( &(stream->bufferProcessor), f );
2107 if( f )
2108 {
2109 PaUtil_BeginBufferProcessing( &(stream->bufferProcessor),
2110 &timeInfo,
2111 stream->xrunFlags );
2112 stream->xrunFlags = 0;
2113
2114 PaUtil_SetInterleavedInputChannels( &(stream->bufferProcessor),
2115 0,
2116 data,
2117 chan );
2118 framesProcessed =
2119 PaUtil_EndBufferProcessing( &(stream->bufferProcessor),
2120 &callbackResult );
2121 }
2122 } while( callbackResult == paContinue && !err );
2123 }
2124 }
2125
2126 switch( callbackResult )
2127 {
2128 case paContinue: break;
2129 case paComplete:
2130 case paAbort:
2131 stream->isTimeSet = FALSE;
2132 stream->state = CALLBACK_STOPPED ;
2133 if( stream->outputUnit )
2134 AudioOutputUnitStop(stream->outputUnit);
2135 if( stream->inputUnit )
2136 AudioOutputUnitStop(stream->inputUnit);
2137 break;
2138 }
2139
2140 PaUtil_EndCpuLoadMeasurement( &stream->cpuLoadMeasurer, framesProcessed );
2141 return noErr;
2142 }
2143
2144
2145 /*
2146 When CloseStream() is called, the multi-api layer ensures that
2147 the stream has already been stopped or aborted.
2148 */
2149 static PaError CloseStream( PaStream* s )
2150 {
2151 /* This may be called from a failed OpenStream.
2152 Therefore, each piece of info is treated seperately. */
2153 PaError result = paNoError;
2154 PaMacCoreStream *stream = (PaMacCoreStream*)s;
2155
2156 VVDBUG(("CloseStream()\n"));
2157 VDBUG( ( "Closing stream.\n" ) );
2158
2159 if( stream ) {
2160 if( stream->outputUnit ) {
2161 int count = removeFromXRunListenerList( stream );
2162 if( count == 0 )
2163 AudioDeviceRemovePropertyListener( stream->outputDevice,
2164 0,
2165 false,
2166 kAudioDeviceProcessorOverload,
2167 xrunCallback );
2168 }
2169 if( stream->inputUnit && stream->outputUnit != stream->inputUnit ) {
2170 int count = removeFromXRunListenerList( stream );
2171 if( count == 0 )
2172 AudioDeviceRemovePropertyListener( stream->inputDevice,
2173 0,
2174 true,
2175 kAudioDeviceProcessorOverload,
2176 xrunCallback );
2177 }
2178 if( stream->outputUnit && stream->outputUnit != stream->inputUnit ) {
2179 AudioUnitUninitialize( stream->outputUnit );
2180 CloseComponent( stream->outputUnit );
2181 }
2182 stream->outputUnit = NULL;
2183 if( stream->inputUnit )
2184 {
2185 AudioUnitUninitialize( stream->inputUnit );
2186 CloseComponent( stream->inputUnit );
2187 stream->inputUnit = NULL;
2188 }
2189 if( stream->inputRingBuffer.buffer )
2190 free( (void *) stream->inputRingBuffer.buffer );
2191 stream->inputRingBuffer.buffer = NULL;
2192 /*TODO: is there more that needs to be done on error
2193 from AudioConverterDispose?*/
2194 if( stream->inputSRConverter )
2195 ERR( AudioConverterDispose( stream->inputSRConverter ) );
2196 stream->inputSRConverter = NULL;
2197 if( stream->inputAudioBufferList.mBuffers[0].mData )
2198 free( stream->inputAudioBufferList.mBuffers[0].mData );
2199 stream->inputAudioBufferList.mBuffers[0].mData = NULL;
2200
2201 result = destroyBlioRingBuffers( &stream->blio );
2202 if( result )
2203 return result;
2204 if( stream->bufferProcessorIsInitialized )
2205 PaUtil_TerminateBufferProcessor( &stream->bufferProcessor );
2206 PaUtil_TerminateStreamRepresentation( &stream->streamRepresentation );
2207 PaUtil_FreeMemory( stream );
2208 }
2209
2210 return result;
2211 }
2212
2213 static PaError StartStream( PaStream *s )
2214 {
2215 PaMacCoreStream *stream = (PaMacCoreStream*)s;
2216 OSStatus result = noErr;
2217 VVDBUG(("StartStream()\n"));
2218 VDBUG( ( "Starting stream.\n" ) );
2219
2220 #define ERR_WRAP(mac_err) do { result = mac_err ; if ( result != noErr ) return ERR(result) ; } while(0)
2221
2222 /*FIXME: maybe want to do this on close/abort for faster start? */
2223 PaUtil_ResetBufferProcessor( &stream->bufferProcessor );
2224 if( stream->inputSRConverter )
2225 ERR_WRAP( AudioConverterReset( stream->inputSRConverter ) );
2226
2227 /* -- start -- */
2228 stream->state = ACTIVE;
2229 if( stream->inputUnit ) {
2230 ERR_WRAP( AudioOutputUnitStart(stream->inputUnit) );
2231 }
2232 if( stream->outputUnit && stream->outputUnit != stream->inputUnit ) {
2233 ERR_WRAP( AudioOutputUnitStart(stream->outputUnit) );
2234 }
2235
2236 //setStreamStartTime( stream );
2237 //stream->isTimeSet = TRUE;
2238
2239 return paNoError;
2240 #undef ERR_WRAP
2241 }
2242
2243 // it's not clear from appl's docs that this really waits
2244 // until all data is flushed.
2245 static ComponentResult BlockWhileAudioUnitIsRunning( AudioUnit audioUnit, AudioUnitElement element )
2246 {
2247 Boolean isRunning = 1;
2248 while( isRunning ) {
2249 UInt32 s = sizeof( isRunning );
2250 ComponentResult err = AudioUnitGetProperty( audioUnit, kAudioOutputUnitProperty_IsRunning, kAudioUnitScope_Global, element, &isRunning, &s );
2251 if( err )
2252 return err;
2253 Pa_Sleep( 100 );
2254 }
2255 return noErr;
2256 }
2257
2258 static PaError StopStream( PaStream *s )
2259 {
2260 PaMacCoreStream *stream = (PaMacCoreStream*)s;
2261 OSStatus result = noErr;
2262 PaError paErr;
2263 VVDBUG(("StopStream()\n"));
2264
2265 VDBUG( ("Waiting for BLIO.\n") );
2266 waitUntilBlioWriteBufferIsFlushed( &stream->blio );
2267 VDBUG( ( "Stopping stream.\n" ) );
2268
2269 stream->isTimeSet = FALSE;
2270 stream->state = STOPPING;
2271
2272 #define ERR_WRAP(mac_err) do { result = mac_err ; if ( result != noErr ) return ERR(result) ; } while(0)
2273 /* -- stop and reset -- */
2274 if( stream->inputUnit == stream->outputUnit && stream->inputUnit )
2275 {
2276 ERR_WRAP( AudioOutputUnitStop(stream->inputUnit) );
2277 ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,0) );
2278 ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,1) );
2279 ERR_WRAP( AudioUnitReset(stream->inputUnit, kAudioUnitScope_Global, 1) );
2280 ERR_WRAP( AudioUnitReset(stream->inputUnit, kAudioUnitScope_Global, 0) );
2281 }
2282 else
2283 {
2284 if( stream->inputUnit )
2285 {
2286 ERR_WRAP(AudioOutputUnitStop(stream->inputUnit) );
2287 ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->inputUnit,1) );
2288 ERR_WRAP(AudioUnitReset(stream->inputUnit,kAudioUnitScope_Global,1));
2289 }
2290 if( stream->outputUnit )
2291 {
2292 ERR_WRAP(AudioOutputUnitStop(stream->outputUnit));
2293 ERR_WRAP( BlockWhileAudioUnitIsRunning(stream->outputUnit,0) );
2294 ERR_WRAP(AudioUnitReset(stream->outputUnit,kAudioUnitScope_Global,0));
2295 }
2296 }
2297 if( stream->inputRingBuffer.buffer ) {
2298 PaUtil_FlushRingBuffer( &stream->inputRingBuffer );
2299 bzero( (void *)stream->inputRingBuffer.buffer,
2300 stream->inputRingBuffer.bufferSize );
2301 /* advance the write point a little, so we are reading from the
2302 middle of the buffer. We'll need extra at the end because
2303 testing has shown that this helps. */
2304 if( stream->outputUnit )
2305 PaUtil_AdvanceRingBufferWriteIndex( &stream->inputRingBuffer,
2306 stream->inputRingBuffer.bufferSize
2307 / RING_BUFFER_ADVANCE_DENOMINATOR );
2308 }
2309
2310 stream->xrunFlags = 0;
2311 stream->state = STOPPED;
2312
2313 paErr = resetBlioRingBuffers( &stream->blio );
2314 if( paErr )
2315 return paErr;
2316
2317 /*
2318 //stream->isTimeSet = FALSE;
2319 */
2320
2321 VDBUG( ( "Stream Stopped.\n" ) );
2322 return paNoError;
2323 #undef ERR_WRAP
2324 }
2325
2326 static PaError AbortStream( PaStream *s )
2327 {
2328 VVDBUG(("AbortStream()->StopStream()\n"));
2329 VDBUG( ( "Aborting stream.\n" ) );
2330 /* We have nothing faster than StopStream. */
2331 return StopStream(s);
2332 }
2333
2334
2335 static PaError IsStreamStopped( PaStream *s )
2336 {
2337 PaMacCoreStream *stream = (PaMacCoreStream*)s;
2338 VVDBUG(("IsStreamStopped()\n"));
2339
2340 return stream->state == STOPPED ? 1 : 0;
2341 }
2342
2343
2344 static PaError IsStreamActive( PaStream *s )
2345 {
2346 PaMacCoreStream *stream = (PaMacCoreStream*)s;
2347 VVDBUG(("IsStreamActive()\n"));
2348 return ( stream->state == ACTIVE || stream->state == STOPPING );
2349 }
2350
2351
2352 static double GetStreamCpuLoad( PaStream* s )
2353 {
2354 PaMacCoreStream *stream = (PaMacCoreStream*)s;
2355 VVDBUG(("GetStreamCpuLoad()\n"));
2356
2357 return PaUtil_GetCpuLoad( &stream->cpuLoadMeasurer );
2358 }

  ViewVC Help
Powered by ViewVC 1.1.22