Mercurial > audio-send
diff Alc/backends/coreaudio.c @ 0:f9476ff7637e
initial forking of open-al to create multiple listeners
author | Robert McIntyre <rlm@mit.edu> |
---|---|
date | Tue, 25 Oct 2011 13:02:31 -0700 |
parents | |
children |
line wrap: on
line diff
1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/Alc/backends/coreaudio.c Tue Oct 25 13:02:31 2011 -0700 1.3 @@ -0,0 +1,719 @@ 1.4 +/** 1.5 + * OpenAL cross platform audio library 1.6 + * Copyright (C) 1999-2007 by authors. 1.7 + * This library is free software; you can redistribute it and/or 1.8 + * modify it under the terms of the GNU Library General Public 1.9 + * License as published by the Free Software Foundation; either 1.10 + * version 2 of the License, or (at your option) any later version. 1.11 + * 1.12 + * This library is distributed in the hope that it will be useful, 1.13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 1.14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1.15 + * Library General Public License for more details. 1.16 + * 1.17 + * You should have received a copy of the GNU Library General Public 1.18 + * License along with this library; if not, write to the 1.19 + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 1.20 + * Boston, MA 02111-1307, USA. 1.21 + * Or go to http://www.gnu.org/copyleft/lgpl.html 1.22 + */ 1.23 + 1.24 +#include "config.h" 1.25 + 1.26 +#include <stdio.h> 1.27 +#include <stdlib.h> 1.28 +#include <string.h> 1.29 + 1.30 +#include "alMain.h" 1.31 +#include "AL/al.h" 1.32 +#include "AL/alc.h" 1.33 + 1.34 +#include <CoreServices/CoreServices.h> 1.35 +#include <unistd.h> 1.36 +#include <AudioUnit/AudioUnit.h> 1.37 +#include <AudioToolbox/AudioToolbox.h> 1.38 + 1.39 + 1.40 +typedef struct { 1.41 + AudioUnit audioUnit; 1.42 + 1.43 + ALuint frameSize; 1.44 + ALdouble sampleRateRatio; // Ratio of hardware sample rate / requested sample rate 1.45 + AudioStreamBasicDescription format; // This is the OpenAL format as a CoreAudio ASBD 1.46 + 1.47 + AudioConverterRef audioConverter; // Sample rate converter if needed 1.48 + AudioBufferList *bufferList; // Buffer for data coming from the input device 1.49 + ALCvoid *resampleBuffer; // Buffer for returned RingBuffer data when resampling 1.50 + 1.51 + RingBuffer *ring; 1.52 +} ca_data; 1.53 + 1.54 +static const ALCchar ca_device[] = "CoreAudio Default"; 1.55 + 1.56 + 1.57 +static void destroy_buffer_list(AudioBufferList* list) 1.58 +{ 1.59 + if(list) 1.60 + { 1.61 + UInt32 i; 1.62 + for(i = 0;i < list->mNumberBuffers;i++) 1.63 + free(list->mBuffers[i].mData); 1.64 + free(list); 1.65 + } 1.66 +} 1.67 + 1.68 +static AudioBufferList* allocate_buffer_list(UInt32 channelCount, UInt32 byteSize) 1.69 +{ 1.70 + AudioBufferList *list; 1.71 + 1.72 + list = calloc(1, sizeof(AudioBufferList) + sizeof(AudioBuffer)); 1.73 + if(list) 1.74 + { 1.75 + list->mNumberBuffers = 1; 1.76 + 1.77 + list->mBuffers[0].mNumberChannels = channelCount; 1.78 + list->mBuffers[0].mDataByteSize = byteSize; 1.79 + list->mBuffers[0].mData = malloc(byteSize); 1.80 + if(list->mBuffers[0].mData == NULL) 1.81 + { 1.82 + free(list); 1.83 + list = NULL; 1.84 + } 1.85 + } 1.86 + return list; 1.87 +} 1.88 + 1.89 +static OSStatus ca_callback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, 1.90 + UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData) 1.91 +{ 1.92 + ALCdevice *device = (ALCdevice*)inRefCon; 1.93 + ca_data *data = (ca_data*)device->ExtraData; 1.94 + 1.95 + aluMixData(device, ioData->mBuffers[0].mData, 1.96 + ioData->mBuffers[0].mDataByteSize / data->frameSize); 1.97 + 1.98 + return noErr; 1.99 +} 1.100 + 1.101 +static OSStatus ca_capture_conversion_callback(AudioConverterRef inAudioConverter, UInt32 *ioNumberDataPackets, 1.102 + AudioBufferList *ioData, AudioStreamPacketDescription **outDataPacketDescription, void* inUserData) 1.103 +{ 1.104 + ALCdevice *device = (ALCdevice*)inUserData; 1.105 + ca_data *data = (ca_data*)device->ExtraData; 1.106 + 1.107 + // Read from the ring buffer and store temporarily in a large buffer 1.108 + ReadRingBuffer(data->ring, data->resampleBuffer, (ALsizei)(*ioNumberDataPackets)); 1.109 + 1.110 + // Set the input data 1.111 + ioData->mNumberBuffers = 1; 1.112 + ioData->mBuffers[0].mNumberChannels = data->format.mChannelsPerFrame; 1.113 + ioData->mBuffers[0].mData = data->resampleBuffer; 1.114 + ioData->mBuffers[0].mDataByteSize = (*ioNumberDataPackets) * data->format.mBytesPerFrame; 1.115 + 1.116 + return noErr; 1.117 +} 1.118 + 1.119 +static OSStatus ca_capture_callback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, 1.120 + const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, 1.121 + UInt32 inNumberFrames, AudioBufferList *ioData) 1.122 +{ 1.123 + ALCdevice *device = (ALCdevice*)inRefCon; 1.124 + ca_data *data = (ca_data*)device->ExtraData; 1.125 + AudioUnitRenderActionFlags flags = 0; 1.126 + OSStatus err; 1.127 + 1.128 + // fill the bufferList with data from the input device 1.129 + err = AudioUnitRender(data->audioUnit, &flags, inTimeStamp, 1, inNumberFrames, data->bufferList); 1.130 + if(err != noErr) 1.131 + { 1.132 + ERR("AudioUnitRender error: %d\n", err); 1.133 + return err; 1.134 + } 1.135 + 1.136 + WriteRingBuffer(data->ring, data->bufferList->mBuffers[0].mData, inNumberFrames); 1.137 + 1.138 + return noErr; 1.139 +} 1.140 + 1.141 +static ALCboolean ca_open_playback(ALCdevice *device, const ALCchar *deviceName) 1.142 +{ 1.143 + ComponentDescription desc; 1.144 + Component comp; 1.145 + ca_data *data; 1.146 + OSStatus err; 1.147 + 1.148 + if(!deviceName) 1.149 + deviceName = ca_device; 1.150 + else if(strcmp(deviceName, ca_device) != 0) 1.151 + return ALC_FALSE; 1.152 + 1.153 + /* open the default output unit */ 1.154 + desc.componentType = kAudioUnitType_Output; 1.155 + desc.componentSubType = kAudioUnitSubType_DefaultOutput; 1.156 + desc.componentManufacturer = kAudioUnitManufacturer_Apple; 1.157 + desc.componentFlags = 0; 1.158 + desc.componentFlagsMask = 0; 1.159 + 1.160 + comp = FindNextComponent(NULL, &desc); 1.161 + if(comp == NULL) 1.162 + { 1.163 + ERR("FindNextComponent failed\n"); 1.164 + return ALC_FALSE; 1.165 + } 1.166 + 1.167 + data = calloc(1, sizeof(*data)); 1.168 + device->ExtraData = data; 1.169 + 1.170 + err = OpenAComponent(comp, &data->audioUnit); 1.171 + if(err != noErr) 1.172 + { 1.173 + ERR("OpenAComponent failed\n"); 1.174 + free(data); 1.175 + device->ExtraData = NULL; 1.176 + return ALC_FALSE; 1.177 + } 1.178 + 1.179 + return ALC_TRUE; 1.180 +} 1.181 + 1.182 +static void ca_close_playback(ALCdevice *device) 1.183 +{ 1.184 + ca_data *data = (ca_data*)device->ExtraData; 1.185 + 1.186 + CloseComponent(data->audioUnit); 1.187 + 1.188 + free(data); 1.189 + device->ExtraData = NULL; 1.190 +} 1.191 + 1.192 +static ALCboolean ca_reset_playback(ALCdevice *device) 1.193 +{ 1.194 + ca_data *data = (ca_data*)device->ExtraData; 1.195 + AudioStreamBasicDescription streamFormat; 1.196 + AURenderCallbackStruct input; 1.197 + OSStatus err; 1.198 + UInt32 size; 1.199 + 1.200 + /* init and start the default audio unit... */ 1.201 + err = AudioUnitInitialize(data->audioUnit); 1.202 + if(err != noErr) 1.203 + { 1.204 + ERR("AudioUnitInitialize failed\n"); 1.205 + return ALC_FALSE; 1.206 + } 1.207 + 1.208 + err = AudioOutputUnitStart(data->audioUnit); 1.209 + if(err != noErr) 1.210 + { 1.211 + ERR("AudioOutputUnitStart failed\n"); 1.212 + return ALC_FALSE; 1.213 + } 1.214 + 1.215 + /* retrieve default output unit's properties (output side) */ 1.216 + size = sizeof(AudioStreamBasicDescription); 1.217 + err = AudioUnitGetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &streamFormat, &size); 1.218 + if(err != noErr || size != sizeof(AudioStreamBasicDescription)) 1.219 + { 1.220 + ERR("AudioUnitGetProperty failed\n"); 1.221 + return ALC_FALSE; 1.222 + } 1.223 + 1.224 +#if 0 1.225 + TRACE("Output streamFormat of default output unit -\n"); 1.226 + TRACE(" streamFormat.mFramesPerPacket = %d\n", streamFormat.mFramesPerPacket); 1.227 + TRACE(" streamFormat.mChannelsPerFrame = %d\n", streamFormat.mChannelsPerFrame); 1.228 + TRACE(" streamFormat.mBitsPerChannel = %d\n", streamFormat.mBitsPerChannel); 1.229 + TRACE(" streamFormat.mBytesPerPacket = %d\n", streamFormat.mBytesPerPacket); 1.230 + TRACE(" streamFormat.mBytesPerFrame = %d\n", streamFormat.mBytesPerFrame); 1.231 + TRACE(" streamFormat.mSampleRate = %5.0f\n", streamFormat.mSampleRate); 1.232 +#endif 1.233 + 1.234 + /* set default output unit's input side to match output side */ 1.235 + err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, size); 1.236 + if(err != noErr) 1.237 + { 1.238 + ERR("AudioUnitSetProperty failed\n"); 1.239 + return ALC_FALSE; 1.240 + } 1.241 + 1.242 + if(device->Frequency != streamFormat.mSampleRate) 1.243 + { 1.244 + if((device->Flags&DEVICE_FREQUENCY_REQUEST)) 1.245 + ERR("CoreAudio does not support changing sample rates (wanted %dhz, got %dhz)\n", device->Frequency, streamFormat.mSampleRate); 1.246 + device->Flags &= ~DEVICE_FREQUENCY_REQUEST; 1.247 + 1.248 + device->UpdateSize = (ALuint)((ALuint64)device->UpdateSize * 1.249 + streamFormat.mSampleRate / 1.250 + device->Frequency); 1.251 + device->Frequency = streamFormat.mSampleRate; 1.252 + } 1.253 + 1.254 + /* FIXME: How to tell what channels are what in the output device, and how 1.255 + * to specify what we're giving? eg, 6.0 vs 5.1 */ 1.256 + switch(streamFormat.mChannelsPerFrame) 1.257 + { 1.258 + case 1: 1.259 + if((device->Flags&DEVICE_CHANNELS_REQUEST) && 1.260 + device->FmtChans != DevFmtMono) 1.261 + { 1.262 + ERR("Failed to set %s, got Mono instead\n", DevFmtChannelsString(device->FmtChans)); 1.263 + device->Flags &= ~DEVICE_CHANNELS_REQUEST; 1.264 + } 1.265 + device->FmtChans = DevFmtMono; 1.266 + break; 1.267 + case 2: 1.268 + if((device->Flags&DEVICE_CHANNELS_REQUEST) && 1.269 + device->FmtChans != DevFmtStereo) 1.270 + { 1.271 + ERR("Failed to set %s, got Stereo instead\n", DevFmtChannelsString(device->FmtChans)); 1.272 + device->Flags &= ~DEVICE_CHANNELS_REQUEST; 1.273 + } 1.274 + device->FmtChans = DevFmtStereo; 1.275 + break; 1.276 + case 4: 1.277 + if((device->Flags&DEVICE_CHANNELS_REQUEST) && 1.278 + device->FmtChans != DevFmtQuad) 1.279 + { 1.280 + ERR("Failed to set %s, got Quad instead\n", DevFmtChannelsString(device->FmtChans)); 1.281 + device->Flags &= ~DEVICE_CHANNELS_REQUEST; 1.282 + } 1.283 + device->FmtChans = DevFmtQuad; 1.284 + break; 1.285 + case 6: 1.286 + if((device->Flags&DEVICE_CHANNELS_REQUEST) && 1.287 + device->FmtChans != DevFmtX51) 1.288 + { 1.289 + ERR("Failed to set %s, got 5.1 Surround instead\n", DevFmtChannelsString(device->FmtChans)); 1.290 + device->Flags &= ~DEVICE_CHANNELS_REQUEST; 1.291 + } 1.292 + device->FmtChans = DevFmtX51; 1.293 + break; 1.294 + case 7: 1.295 + if((device->Flags&DEVICE_CHANNELS_REQUEST) && 1.296 + device->FmtChans != DevFmtX61) 1.297 + { 1.298 + ERR("Failed to set %s, got 6.1 Surround instead\n", DevFmtChannelsString(device->FmtChans)); 1.299 + device->Flags &= ~DEVICE_CHANNELS_REQUEST; 1.300 + } 1.301 + device->FmtChans = DevFmtX61; 1.302 + break; 1.303 + case 8: 1.304 + if((device->Flags&DEVICE_CHANNELS_REQUEST) && 1.305 + device->FmtChans != DevFmtX71) 1.306 + { 1.307 + ERR("Failed to set %s, got 7.1 Surround instead\n", DevFmtChannelsString(device->FmtChans)); 1.308 + device->Flags &= ~DEVICE_CHANNELS_REQUEST; 1.309 + } 1.310 + device->FmtChans = DevFmtX71; 1.311 + break; 1.312 + default: 1.313 + ERR("Unhandled channel count (%d), using Stereo\n", streamFormat.mChannelsPerFrame); 1.314 + device->Flags &= ~DEVICE_CHANNELS_REQUEST; 1.315 + device->FmtChans = DevFmtStereo; 1.316 + streamFormat.mChannelsPerFrame = 2; 1.317 + break; 1.318 + } 1.319 + SetDefaultWFXChannelOrder(device); 1.320 + 1.321 + /* use channel count and sample rate from the default output unit's current 1.322 + * parameters, but reset everything else */ 1.323 + streamFormat.mFramesPerPacket = 1; 1.324 + switch(device->FmtType) 1.325 + { 1.326 + case DevFmtUByte: 1.327 + device->FmtType = DevFmtByte; 1.328 + /* fall-through */ 1.329 + case DevFmtByte: 1.330 + streamFormat.mBitsPerChannel = 8; 1.331 + streamFormat.mBytesPerPacket = streamFormat.mChannelsPerFrame; 1.332 + streamFormat.mBytesPerFrame = streamFormat.mChannelsPerFrame; 1.333 + break; 1.334 + case DevFmtUShort: 1.335 + case DevFmtFloat: 1.336 + device->FmtType = DevFmtShort; 1.337 + /* fall-through */ 1.338 + case DevFmtShort: 1.339 + streamFormat.mBitsPerChannel = 16; 1.340 + streamFormat.mBytesPerPacket = 2 * streamFormat.mChannelsPerFrame; 1.341 + streamFormat.mBytesPerFrame = 2 * streamFormat.mChannelsPerFrame; 1.342 + break; 1.343 + } 1.344 + streamFormat.mFormatID = kAudioFormatLinearPCM; 1.345 + streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | 1.346 + kAudioFormatFlagsNativeEndian | 1.347 + kLinearPCMFormatFlagIsPacked; 1.348 + 1.349 + err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription)); 1.350 + if(err != noErr) 1.351 + { 1.352 + ERR("AudioUnitSetProperty failed\n"); 1.353 + return ALC_FALSE; 1.354 + } 1.355 + 1.356 + /* setup callback */ 1.357 + data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType); 1.358 + input.inputProc = ca_callback; 1.359 + input.inputProcRefCon = device; 1.360 + 1.361 + err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(AURenderCallbackStruct)); 1.362 + if(err != noErr) 1.363 + { 1.364 + ERR("AudioUnitSetProperty failed\n"); 1.365 + return ALC_FALSE; 1.366 + } 1.367 + 1.368 + return ALC_TRUE; 1.369 +} 1.370 + 1.371 +static void ca_stop_playback(ALCdevice *device) 1.372 +{ 1.373 + ca_data *data = (ca_data*)device->ExtraData; 1.374 + OSStatus err; 1.375 + 1.376 + AudioOutputUnitStop(data->audioUnit); 1.377 + err = AudioUnitUninitialize(data->audioUnit); 1.378 + if(err != noErr) 1.379 + ERR("-- AudioUnitUninitialize failed.\n"); 1.380 +} 1.381 + 1.382 +static ALCboolean ca_open_capture(ALCdevice *device, const ALCchar *deviceName) 1.383 +{ 1.384 + AudioStreamBasicDescription requestedFormat; // The application requested format 1.385 + AudioStreamBasicDescription hardwareFormat; // The hardware format 1.386 + AudioStreamBasicDescription outputFormat; // The AudioUnit output format 1.387 + AURenderCallbackStruct input; 1.388 + ComponentDescription desc; 1.389 + AudioDeviceID inputDevice; 1.390 + UInt32 outputFrameCount; 1.391 + UInt32 propertySize; 1.392 + UInt32 enableIO; 1.393 + Component comp; 1.394 + ca_data *data; 1.395 + OSStatus err; 1.396 + 1.397 + desc.componentType = kAudioUnitType_Output; 1.398 + desc.componentSubType = kAudioUnitSubType_HALOutput; 1.399 + desc.componentManufacturer = kAudioUnitManufacturer_Apple; 1.400 + desc.componentFlags = 0; 1.401 + desc.componentFlagsMask = 0; 1.402 + 1.403 + // Search for component with given description 1.404 + comp = FindNextComponent(NULL, &desc); 1.405 + if(comp == NULL) 1.406 + { 1.407 + ERR("FindNextComponent failed\n"); 1.408 + return ALC_FALSE; 1.409 + } 1.410 + 1.411 + data = calloc(1, sizeof(*data)); 1.412 + device->ExtraData = data; 1.413 + 1.414 + // Open the component 1.415 + err = OpenAComponent(comp, &data->audioUnit); 1.416 + if(err != noErr) 1.417 + { 1.418 + ERR("OpenAComponent failed\n"); 1.419 + goto error; 1.420 + } 1.421 + 1.422 + // Turn off AudioUnit output 1.423 + enableIO = 0; 1.424 + err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(ALuint)); 1.425 + if(err != noErr) 1.426 + { 1.427 + ERR("AudioUnitSetProperty failed\n"); 1.428 + goto error; 1.429 + } 1.430 + 1.431 + // Turn on AudioUnit input 1.432 + enableIO = 1; 1.433 + err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(ALuint)); 1.434 + if(err != noErr) 1.435 + { 1.436 + ERR("AudioUnitSetProperty failed\n"); 1.437 + goto error; 1.438 + } 1.439 + 1.440 + // Get the default input device 1.441 + propertySize = sizeof(AudioDeviceID); 1.442 + err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &inputDevice); 1.443 + if(err != noErr) 1.444 + { 1.445 + ERR("AudioHardwareGetProperty failed\n"); 1.446 + goto error; 1.447 + } 1.448 + 1.449 + if(inputDevice == kAudioDeviceUnknown) 1.450 + { 1.451 + ERR("No input device found\n"); 1.452 + goto error; 1.453 + } 1.454 + 1.455 + // Track the input device 1.456 + err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDevice, sizeof(AudioDeviceID)); 1.457 + if(err != noErr) 1.458 + { 1.459 + ERR("AudioUnitSetProperty failed\n"); 1.460 + goto error; 1.461 + } 1.462 + 1.463 + // set capture callback 1.464 + input.inputProc = ca_capture_callback; 1.465 + input.inputProcRefCon = device; 1.466 + 1.467 + err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(AURenderCallbackStruct)); 1.468 + if(err != noErr) 1.469 + { 1.470 + ERR("AudioUnitSetProperty failed\n"); 1.471 + goto error; 1.472 + } 1.473 + 1.474 + // Initialize the device 1.475 + err = AudioUnitInitialize(data->audioUnit); 1.476 + if(err != noErr) 1.477 + { 1.478 + ERR("AudioUnitInitialize failed\n"); 1.479 + goto error; 1.480 + } 1.481 + 1.482 + // Get the hardware format 1.483 + propertySize = sizeof(AudioStreamBasicDescription); 1.484 + err = AudioUnitGetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &hardwareFormat, &propertySize); 1.485 + if(err != noErr || propertySize != sizeof(AudioStreamBasicDescription)) 1.486 + { 1.487 + ERR("AudioUnitGetProperty failed\n"); 1.488 + goto error; 1.489 + } 1.490 + 1.491 + // Set up the requested format description 1.492 + switch(device->FmtType) 1.493 + { 1.494 + case DevFmtUByte: 1.495 + requestedFormat.mBitsPerChannel = 8; 1.496 + requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked; 1.497 + break; 1.498 + case DevFmtShort: 1.499 + requestedFormat.mBitsPerChannel = 16; 1.500 + requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; 1.501 + break; 1.502 + case DevFmtFloat: 1.503 + requestedFormat.mBitsPerChannel = 32; 1.504 + requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked; 1.505 + break; 1.506 + case DevFmtByte: 1.507 + case DevFmtUShort: 1.508 + ERR("%s samples not supported\n", DevFmtTypeString(device->FmtType)); 1.509 + goto error; 1.510 + } 1.511 + 1.512 + switch(device->FmtChans) 1.513 + { 1.514 + case DevFmtMono: 1.515 + requestedFormat.mChannelsPerFrame = 1; 1.516 + break; 1.517 + case DevFmtStereo: 1.518 + requestedFormat.mChannelsPerFrame = 2; 1.519 + break; 1.520 + 1.521 + case DevFmtQuad: 1.522 + case DevFmtX51: 1.523 + case DevFmtX51Side: 1.524 + case DevFmtX61: 1.525 + case DevFmtX71: 1.526 + ERR("%s not supported\n", DevFmtChannelsString(device->FmtChans)); 1.527 + goto error; 1.528 + } 1.529 + 1.530 + requestedFormat.mBytesPerFrame = requestedFormat.mChannelsPerFrame * requestedFormat.mBitsPerChannel / 8; 1.531 + requestedFormat.mBytesPerPacket = requestedFormat.mBytesPerFrame; 1.532 + requestedFormat.mSampleRate = device->Frequency; 1.533 + requestedFormat.mFormatID = kAudioFormatLinearPCM; 1.534 + requestedFormat.mReserved = 0; 1.535 + requestedFormat.mFramesPerPacket = 1; 1.536 + 1.537 + // save requested format description for later use 1.538 + data->format = requestedFormat; 1.539 + data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType); 1.540 + 1.541 + // Use intermediate format for sample rate conversion (outputFormat) 1.542 + // Set sample rate to the same as hardware for resampling later 1.543 + outputFormat = requestedFormat; 1.544 + outputFormat.mSampleRate = hardwareFormat.mSampleRate; 1.545 + 1.546 + // Determine sample rate ratio for resampling 1.547 + data->sampleRateRatio = outputFormat.mSampleRate / device->Frequency; 1.548 + 1.549 + // The output format should be the requested format, but using the hardware sample rate 1.550 + // This is because the AudioUnit will automatically scale other properties, except for sample rate 1.551 + err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, (void *)&outputFormat, sizeof(outputFormat)); 1.552 + if(err != noErr) 1.553 + { 1.554 + ERR("AudioUnitSetProperty failed\n"); 1.555 + goto error; 1.556 + } 1.557 + 1.558 + // Set the AudioUnit output format frame count 1.559 + outputFrameCount = device->UpdateSize * data->sampleRateRatio; 1.560 + err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Output, 0, &outputFrameCount, sizeof(outputFrameCount)); 1.561 + if(err != noErr) 1.562 + { 1.563 + ERR("AudioUnitSetProperty failed: %d\n", err); 1.564 + goto error; 1.565 + } 1.566 + 1.567 + // Set up sample converter 1.568 + err = AudioConverterNew(&outputFormat, &requestedFormat, &data->audioConverter); 1.569 + if(err != noErr) 1.570 + { 1.571 + ERR("AudioConverterNew failed: %d\n", err); 1.572 + goto error; 1.573 + } 1.574 + 1.575 + // Create a buffer for use in the resample callback 1.576 + data->resampleBuffer = malloc(device->UpdateSize * data->frameSize * data->sampleRateRatio); 1.577 + 1.578 + // Allocate buffer for the AudioUnit output 1.579 + data->bufferList = allocate_buffer_list(outputFormat.mChannelsPerFrame, device->UpdateSize * data->frameSize * data->sampleRateRatio); 1.580 + if(data->bufferList == NULL) 1.581 + { 1.582 + alcSetError(device, ALC_OUT_OF_MEMORY); 1.583 + goto error; 1.584 + } 1.585 + 1.586 + data->ring = CreateRingBuffer(data->frameSize, (device->UpdateSize * data->sampleRateRatio) * device->NumUpdates); 1.587 + if(data->ring == NULL) 1.588 + { 1.589 + alcSetError(device, ALC_OUT_OF_MEMORY); 1.590 + goto error; 1.591 + } 1.592 + 1.593 + return ALC_TRUE; 1.594 + 1.595 +error: 1.596 + DestroyRingBuffer(data->ring); 1.597 + free(data->resampleBuffer); 1.598 + destroy_buffer_list(data->bufferList); 1.599 + 1.600 + if(data->audioConverter) 1.601 + AudioConverterDispose(data->audioConverter); 1.602 + if(data->audioUnit) 1.603 + CloseComponent(data->audioUnit); 1.604 + 1.605 + free(data); 1.606 + device->ExtraData = NULL; 1.607 + 1.608 + return ALC_FALSE; 1.609 +} 1.610 + 1.611 +static void ca_close_capture(ALCdevice *device) 1.612 +{ 1.613 + ca_data *data = (ca_data*)device->ExtraData; 1.614 + 1.615 + DestroyRingBuffer(data->ring); 1.616 + free(data->resampleBuffer); 1.617 + destroy_buffer_list(data->bufferList); 1.618 + 1.619 + AudioConverterDispose(data->audioConverter); 1.620 + CloseComponent(data->audioUnit); 1.621 + 1.622 + free(data); 1.623 + device->ExtraData = NULL; 1.624 +} 1.625 + 1.626 +static void ca_start_capture(ALCdevice *device) 1.627 +{ 1.628 + ca_data *data = (ca_data*)device->ExtraData; 1.629 + OSStatus err = AudioOutputUnitStart(data->audioUnit); 1.630 + if(err != noErr) 1.631 + ERR("AudioOutputUnitStart failed\n"); 1.632 +} 1.633 + 1.634 +static void ca_stop_capture(ALCdevice *device) 1.635 +{ 1.636 + ca_data *data = (ca_data*)device->ExtraData; 1.637 + OSStatus err = AudioOutputUnitStop(data->audioUnit); 1.638 + if(err != noErr) 1.639 + ERR("AudioOutputUnitStop failed\n"); 1.640 +} 1.641 + 1.642 +static ALCuint ca_available_samples(ALCdevice *device) 1.643 +{ 1.644 + ca_data *data = device->ExtraData; 1.645 + return RingBufferSize(data->ring) / data->sampleRateRatio; 1.646 +} 1.647 + 1.648 +static void ca_capture_samples(ALCdevice *device, ALCvoid *buffer, ALCuint samples) 1.649 +{ 1.650 + ca_data *data = (ca_data*)device->ExtraData; 1.651 + 1.652 + if(samples <= ca_available_samples(device)) 1.653 + { 1.654 + AudioBufferList *list; 1.655 + UInt32 frameCount; 1.656 + OSStatus err; 1.657 + 1.658 + // If no samples are requested, just return 1.659 + if(samples == 0) 1.660 + return; 1.661 + 1.662 + // Allocate a temporary AudioBufferList to use as the return resamples data 1.663 + list = alloca(sizeof(AudioBufferList) + sizeof(AudioBuffer)); 1.664 + 1.665 + // Point the resampling buffer to the capture buffer 1.666 + list->mNumberBuffers = 1; 1.667 + list->mBuffers[0].mNumberChannels = data->format.mChannelsPerFrame; 1.668 + list->mBuffers[0].mDataByteSize = samples * data->frameSize; 1.669 + list->mBuffers[0].mData = buffer; 1.670 + 1.671 + // Resample into another AudioBufferList 1.672 + frameCount = samples; 1.673 + err = AudioConverterFillComplexBuffer(data->audioConverter, ca_capture_conversion_callback, device, 1.674 + &frameCount, list, NULL); 1.675 + if(err != noErr) 1.676 + { 1.677 + ERR("AudioConverterFillComplexBuffer error: %d\n", err); 1.678 + alcSetError(device, ALC_INVALID_VALUE); 1.679 + } 1.680 + } 1.681 + else 1.682 + alcSetError(device, ALC_INVALID_VALUE); 1.683 +} 1.684 + 1.685 +static const BackendFuncs ca_funcs = { 1.686 + ca_open_playback, 1.687 + ca_close_playback, 1.688 + ca_reset_playback, 1.689 + ca_stop_playback, 1.690 + ca_open_capture, 1.691 + ca_close_capture, 1.692 + ca_start_capture, 1.693 + ca_stop_capture, 1.694 + ca_capture_samples, 1.695 + ca_available_samples 1.696 +}; 1.697 + 1.698 +ALCboolean alc_ca_init(BackendFuncs *func_list) 1.699 +{ 1.700 + *func_list = ca_funcs; 1.701 + return ALC_TRUE; 1.702 +} 1.703 + 1.704 +void alc_ca_deinit(void) 1.705 +{ 1.706 +} 1.707 + 1.708 +void alc_ca_probe(enum DevProbe type) 1.709 +{ 1.710 + switch(type) 1.711 + { 1.712 + case DEVICE_PROBE: 1.713 + AppendDeviceList(ca_device); 1.714 + break; 1.715 + case ALL_DEVICE_PROBE: 1.716 + AppendAllDeviceList(ca_device); 1.717 + break; 1.718 + case CAPTURE_DEVICE_PROBE: 1.719 + AppendCaptureDeviceList(ca_device); 1.720 + break; 1.721 + } 1.722 +}