Mercurial > audio-send
comparison Alc/backends/coreaudio.c @ 0:f9476ff7637e
initial forking of open-al to create multiple listeners
author | Robert McIntyre <rlm@mit.edu> |
---|---|
date | Tue, 25 Oct 2011 13:02:31 -0700 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:f9476ff7637e |
---|---|
1 /** | |
2 * OpenAL cross platform audio library | |
3 * Copyright (C) 1999-2007 by authors. | |
4 * This library is free software; you can redistribute it and/or | |
5 * modify it under the terms of the GNU Library General Public | |
6 * License as published by the Free Software Foundation; either | |
7 * version 2 of the License, or (at your option) any later version. | |
8 * | |
9 * This library is distributed in the hope that it will be useful, | |
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 * Library General Public License for more details. | |
13 * | |
14 * You should have received a copy of the GNU Library General Public | |
15 * License along with this library; if not, write to the | |
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
17 * Boston, MA 02111-1307, USA. | |
18 * Or go to http://www.gnu.org/copyleft/lgpl.html | |
19 */ | |
20 | |
21 #include "config.h" | |
22 | |
23 #include <stdio.h> | |
24 #include <stdlib.h> | |
25 #include <string.h> | |
26 | |
27 #include "alMain.h" | |
28 #include "AL/al.h" | |
29 #include "AL/alc.h" | |
30 | |
31 #include <CoreServices/CoreServices.h> | |
32 #include <unistd.h> | |
33 #include <AudioUnit/AudioUnit.h> | |
34 #include <AudioToolbox/AudioToolbox.h> | |
35 | |
36 | |
37 typedef struct { | |
38 AudioUnit audioUnit; | |
39 | |
40 ALuint frameSize; | |
41 ALdouble sampleRateRatio; // Ratio of hardware sample rate / requested sample rate | |
42 AudioStreamBasicDescription format; // This is the OpenAL format as a CoreAudio ASBD | |
43 | |
44 AudioConverterRef audioConverter; // Sample rate converter if needed | |
45 AudioBufferList *bufferList; // Buffer for data coming from the input device | |
46 ALCvoid *resampleBuffer; // Buffer for returned RingBuffer data when resampling | |
47 | |
48 RingBuffer *ring; | |
49 } ca_data; | |
50 | |
51 static const ALCchar ca_device[] = "CoreAudio Default"; | |
52 | |
53 | |
54 static void destroy_buffer_list(AudioBufferList* list) | |
55 { | |
56 if(list) | |
57 { | |
58 UInt32 i; | |
59 for(i = 0;i < list->mNumberBuffers;i++) | |
60 free(list->mBuffers[i].mData); | |
61 free(list); | |
62 } | |
63 } | |
64 | |
65 static AudioBufferList* allocate_buffer_list(UInt32 channelCount, UInt32 byteSize) | |
66 { | |
67 AudioBufferList *list; | |
68 | |
69 list = calloc(1, sizeof(AudioBufferList) + sizeof(AudioBuffer)); | |
70 if(list) | |
71 { | |
72 list->mNumberBuffers = 1; | |
73 | |
74 list->mBuffers[0].mNumberChannels = channelCount; | |
75 list->mBuffers[0].mDataByteSize = byteSize; | |
76 list->mBuffers[0].mData = malloc(byteSize); | |
77 if(list->mBuffers[0].mData == NULL) | |
78 { | |
79 free(list); | |
80 list = NULL; | |
81 } | |
82 } | |
83 return list; | |
84 } | |
85 | |
86 static OSStatus ca_callback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, | |
87 UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData) | |
88 { | |
89 ALCdevice *device = (ALCdevice*)inRefCon; | |
90 ca_data *data = (ca_data*)device->ExtraData; | |
91 | |
92 aluMixData(device, ioData->mBuffers[0].mData, | |
93 ioData->mBuffers[0].mDataByteSize / data->frameSize); | |
94 | |
95 return noErr; | |
96 } | |
97 | |
98 static OSStatus ca_capture_conversion_callback(AudioConverterRef inAudioConverter, UInt32 *ioNumberDataPackets, | |
99 AudioBufferList *ioData, AudioStreamPacketDescription **outDataPacketDescription, void* inUserData) | |
100 { | |
101 ALCdevice *device = (ALCdevice*)inUserData; | |
102 ca_data *data = (ca_data*)device->ExtraData; | |
103 | |
104 // Read from the ring buffer and store temporarily in a large buffer | |
105 ReadRingBuffer(data->ring, data->resampleBuffer, (ALsizei)(*ioNumberDataPackets)); | |
106 | |
107 // Set the input data | |
108 ioData->mNumberBuffers = 1; | |
109 ioData->mBuffers[0].mNumberChannels = data->format.mChannelsPerFrame; | |
110 ioData->mBuffers[0].mData = data->resampleBuffer; | |
111 ioData->mBuffers[0].mDataByteSize = (*ioNumberDataPackets) * data->format.mBytesPerFrame; | |
112 | |
113 return noErr; | |
114 } | |
115 | |
116 static OSStatus ca_capture_callback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, | |
117 const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, | |
118 UInt32 inNumberFrames, AudioBufferList *ioData) | |
119 { | |
120 ALCdevice *device = (ALCdevice*)inRefCon; | |
121 ca_data *data = (ca_data*)device->ExtraData; | |
122 AudioUnitRenderActionFlags flags = 0; | |
123 OSStatus err; | |
124 | |
125 // fill the bufferList with data from the input device | |
126 err = AudioUnitRender(data->audioUnit, &flags, inTimeStamp, 1, inNumberFrames, data->bufferList); | |
127 if(err != noErr) | |
128 { | |
129 ERR("AudioUnitRender error: %d\n", err); | |
130 return err; | |
131 } | |
132 | |
133 WriteRingBuffer(data->ring, data->bufferList->mBuffers[0].mData, inNumberFrames); | |
134 | |
135 return noErr; | |
136 } | |
137 | |
138 static ALCboolean ca_open_playback(ALCdevice *device, const ALCchar *deviceName) | |
139 { | |
140 ComponentDescription desc; | |
141 Component comp; | |
142 ca_data *data; | |
143 OSStatus err; | |
144 | |
145 if(!deviceName) | |
146 deviceName = ca_device; | |
147 else if(strcmp(deviceName, ca_device) != 0) | |
148 return ALC_FALSE; | |
149 | |
150 /* open the default output unit */ | |
151 desc.componentType = kAudioUnitType_Output; | |
152 desc.componentSubType = kAudioUnitSubType_DefaultOutput; | |
153 desc.componentManufacturer = kAudioUnitManufacturer_Apple; | |
154 desc.componentFlags = 0; | |
155 desc.componentFlagsMask = 0; | |
156 | |
157 comp = FindNextComponent(NULL, &desc); | |
158 if(comp == NULL) | |
159 { | |
160 ERR("FindNextComponent failed\n"); | |
161 return ALC_FALSE; | |
162 } | |
163 | |
164 data = calloc(1, sizeof(*data)); | |
165 device->ExtraData = data; | |
166 | |
167 err = OpenAComponent(comp, &data->audioUnit); | |
168 if(err != noErr) | |
169 { | |
170 ERR("OpenAComponent failed\n"); | |
171 free(data); | |
172 device->ExtraData = NULL; | |
173 return ALC_FALSE; | |
174 } | |
175 | |
176 return ALC_TRUE; | |
177 } | |
178 | |
179 static void ca_close_playback(ALCdevice *device) | |
180 { | |
181 ca_data *data = (ca_data*)device->ExtraData; | |
182 | |
183 CloseComponent(data->audioUnit); | |
184 | |
185 free(data); | |
186 device->ExtraData = NULL; | |
187 } | |
188 | |
189 static ALCboolean ca_reset_playback(ALCdevice *device) | |
190 { | |
191 ca_data *data = (ca_data*)device->ExtraData; | |
192 AudioStreamBasicDescription streamFormat; | |
193 AURenderCallbackStruct input; | |
194 OSStatus err; | |
195 UInt32 size; | |
196 | |
197 /* init and start the default audio unit... */ | |
198 err = AudioUnitInitialize(data->audioUnit); | |
199 if(err != noErr) | |
200 { | |
201 ERR("AudioUnitInitialize failed\n"); | |
202 return ALC_FALSE; | |
203 } | |
204 | |
205 err = AudioOutputUnitStart(data->audioUnit); | |
206 if(err != noErr) | |
207 { | |
208 ERR("AudioOutputUnitStart failed\n"); | |
209 return ALC_FALSE; | |
210 } | |
211 | |
212 /* retrieve default output unit's properties (output side) */ | |
213 size = sizeof(AudioStreamBasicDescription); | |
214 err = AudioUnitGetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &streamFormat, &size); | |
215 if(err != noErr || size != sizeof(AudioStreamBasicDescription)) | |
216 { | |
217 ERR("AudioUnitGetProperty failed\n"); | |
218 return ALC_FALSE; | |
219 } | |
220 | |
221 #if 0 | |
222 TRACE("Output streamFormat of default output unit -\n"); | |
223 TRACE(" streamFormat.mFramesPerPacket = %d\n", streamFormat.mFramesPerPacket); | |
224 TRACE(" streamFormat.mChannelsPerFrame = %d\n", streamFormat.mChannelsPerFrame); | |
225 TRACE(" streamFormat.mBitsPerChannel = %d\n", streamFormat.mBitsPerChannel); | |
226 TRACE(" streamFormat.mBytesPerPacket = %d\n", streamFormat.mBytesPerPacket); | |
227 TRACE(" streamFormat.mBytesPerFrame = %d\n", streamFormat.mBytesPerFrame); | |
228 TRACE(" streamFormat.mSampleRate = %5.0f\n", streamFormat.mSampleRate); | |
229 #endif | |
230 | |
231 /* set default output unit's input side to match output side */ | |
232 err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, size); | |
233 if(err != noErr) | |
234 { | |
235 ERR("AudioUnitSetProperty failed\n"); | |
236 return ALC_FALSE; | |
237 } | |
238 | |
239 if(device->Frequency != streamFormat.mSampleRate) | |
240 { | |
241 if((device->Flags&DEVICE_FREQUENCY_REQUEST)) | |
242 ERR("CoreAudio does not support changing sample rates (wanted %dhz, got %dhz)\n", device->Frequency, streamFormat.mSampleRate); | |
243 device->Flags &= ~DEVICE_FREQUENCY_REQUEST; | |
244 | |
245 device->UpdateSize = (ALuint)((ALuint64)device->UpdateSize * | |
246 streamFormat.mSampleRate / | |
247 device->Frequency); | |
248 device->Frequency = streamFormat.mSampleRate; | |
249 } | |
250 | |
251 /* FIXME: How to tell what channels are what in the output device, and how | |
252 * to specify what we're giving? eg, 6.0 vs 5.1 */ | |
253 switch(streamFormat.mChannelsPerFrame) | |
254 { | |
255 case 1: | |
256 if((device->Flags&DEVICE_CHANNELS_REQUEST) && | |
257 device->FmtChans != DevFmtMono) | |
258 { | |
259 ERR("Failed to set %s, got Mono instead\n", DevFmtChannelsString(device->FmtChans)); | |
260 device->Flags &= ~DEVICE_CHANNELS_REQUEST; | |
261 } | |
262 device->FmtChans = DevFmtMono; | |
263 break; | |
264 case 2: | |
265 if((device->Flags&DEVICE_CHANNELS_REQUEST) && | |
266 device->FmtChans != DevFmtStereo) | |
267 { | |
268 ERR("Failed to set %s, got Stereo instead\n", DevFmtChannelsString(device->FmtChans)); | |
269 device->Flags &= ~DEVICE_CHANNELS_REQUEST; | |
270 } | |
271 device->FmtChans = DevFmtStereo; | |
272 break; | |
273 case 4: | |
274 if((device->Flags&DEVICE_CHANNELS_REQUEST) && | |
275 device->FmtChans != DevFmtQuad) | |
276 { | |
277 ERR("Failed to set %s, got Quad instead\n", DevFmtChannelsString(device->FmtChans)); | |
278 device->Flags &= ~DEVICE_CHANNELS_REQUEST; | |
279 } | |
280 device->FmtChans = DevFmtQuad; | |
281 break; | |
282 case 6: | |
283 if((device->Flags&DEVICE_CHANNELS_REQUEST) && | |
284 device->FmtChans != DevFmtX51) | |
285 { | |
286 ERR("Failed to set %s, got 5.1 Surround instead\n", DevFmtChannelsString(device->FmtChans)); | |
287 device->Flags &= ~DEVICE_CHANNELS_REQUEST; | |
288 } | |
289 device->FmtChans = DevFmtX51; | |
290 break; | |
291 case 7: | |
292 if((device->Flags&DEVICE_CHANNELS_REQUEST) && | |
293 device->FmtChans != DevFmtX61) | |
294 { | |
295 ERR("Failed to set %s, got 6.1 Surround instead\n", DevFmtChannelsString(device->FmtChans)); | |
296 device->Flags &= ~DEVICE_CHANNELS_REQUEST; | |
297 } | |
298 device->FmtChans = DevFmtX61; | |
299 break; | |
300 case 8: | |
301 if((device->Flags&DEVICE_CHANNELS_REQUEST) && | |
302 device->FmtChans != DevFmtX71) | |
303 { | |
304 ERR("Failed to set %s, got 7.1 Surround instead\n", DevFmtChannelsString(device->FmtChans)); | |
305 device->Flags &= ~DEVICE_CHANNELS_REQUEST; | |
306 } | |
307 device->FmtChans = DevFmtX71; | |
308 break; | |
309 default: | |
310 ERR("Unhandled channel count (%d), using Stereo\n", streamFormat.mChannelsPerFrame); | |
311 device->Flags &= ~DEVICE_CHANNELS_REQUEST; | |
312 device->FmtChans = DevFmtStereo; | |
313 streamFormat.mChannelsPerFrame = 2; | |
314 break; | |
315 } | |
316 SetDefaultWFXChannelOrder(device); | |
317 | |
318 /* use channel count and sample rate from the default output unit's current | |
319 * parameters, but reset everything else */ | |
320 streamFormat.mFramesPerPacket = 1; | |
321 switch(device->FmtType) | |
322 { | |
323 case DevFmtUByte: | |
324 device->FmtType = DevFmtByte; | |
325 /* fall-through */ | |
326 case DevFmtByte: | |
327 streamFormat.mBitsPerChannel = 8; | |
328 streamFormat.mBytesPerPacket = streamFormat.mChannelsPerFrame; | |
329 streamFormat.mBytesPerFrame = streamFormat.mChannelsPerFrame; | |
330 break; | |
331 case DevFmtUShort: | |
332 case DevFmtFloat: | |
333 device->FmtType = DevFmtShort; | |
334 /* fall-through */ | |
335 case DevFmtShort: | |
336 streamFormat.mBitsPerChannel = 16; | |
337 streamFormat.mBytesPerPacket = 2 * streamFormat.mChannelsPerFrame; | |
338 streamFormat.mBytesPerFrame = 2 * streamFormat.mChannelsPerFrame; | |
339 break; | |
340 } | |
341 streamFormat.mFormatID = kAudioFormatLinearPCM; | |
342 streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | | |
343 kAudioFormatFlagsNativeEndian | | |
344 kLinearPCMFormatFlagIsPacked; | |
345 | |
346 err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription)); | |
347 if(err != noErr) | |
348 { | |
349 ERR("AudioUnitSetProperty failed\n"); | |
350 return ALC_FALSE; | |
351 } | |
352 | |
353 /* setup callback */ | |
354 data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType); | |
355 input.inputProc = ca_callback; | |
356 input.inputProcRefCon = device; | |
357 | |
358 err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(AURenderCallbackStruct)); | |
359 if(err != noErr) | |
360 { | |
361 ERR("AudioUnitSetProperty failed\n"); | |
362 return ALC_FALSE; | |
363 } | |
364 | |
365 return ALC_TRUE; | |
366 } | |
367 | |
368 static void ca_stop_playback(ALCdevice *device) | |
369 { | |
370 ca_data *data = (ca_data*)device->ExtraData; | |
371 OSStatus err; | |
372 | |
373 AudioOutputUnitStop(data->audioUnit); | |
374 err = AudioUnitUninitialize(data->audioUnit); | |
375 if(err != noErr) | |
376 ERR("-- AudioUnitUninitialize failed.\n"); | |
377 } | |
378 | |
379 static ALCboolean ca_open_capture(ALCdevice *device, const ALCchar *deviceName) | |
380 { | |
381 AudioStreamBasicDescription requestedFormat; // The application requested format | |
382 AudioStreamBasicDescription hardwareFormat; // The hardware format | |
383 AudioStreamBasicDescription outputFormat; // The AudioUnit output format | |
384 AURenderCallbackStruct input; | |
385 ComponentDescription desc; | |
386 AudioDeviceID inputDevice; | |
387 UInt32 outputFrameCount; | |
388 UInt32 propertySize; | |
389 UInt32 enableIO; | |
390 Component comp; | |
391 ca_data *data; | |
392 OSStatus err; | |
393 | |
394 desc.componentType = kAudioUnitType_Output; | |
395 desc.componentSubType = kAudioUnitSubType_HALOutput; | |
396 desc.componentManufacturer = kAudioUnitManufacturer_Apple; | |
397 desc.componentFlags = 0; | |
398 desc.componentFlagsMask = 0; | |
399 | |
400 // Search for component with given description | |
401 comp = FindNextComponent(NULL, &desc); | |
402 if(comp == NULL) | |
403 { | |
404 ERR("FindNextComponent failed\n"); | |
405 return ALC_FALSE; | |
406 } | |
407 | |
408 data = calloc(1, sizeof(*data)); | |
409 device->ExtraData = data; | |
410 | |
411 // Open the component | |
412 err = OpenAComponent(comp, &data->audioUnit); | |
413 if(err != noErr) | |
414 { | |
415 ERR("OpenAComponent failed\n"); | |
416 goto error; | |
417 } | |
418 | |
419 // Turn off AudioUnit output | |
420 enableIO = 0; | |
421 err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(ALuint)); | |
422 if(err != noErr) | |
423 { | |
424 ERR("AudioUnitSetProperty failed\n"); | |
425 goto error; | |
426 } | |
427 | |
428 // Turn on AudioUnit input | |
429 enableIO = 1; | |
430 err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(ALuint)); | |
431 if(err != noErr) | |
432 { | |
433 ERR("AudioUnitSetProperty failed\n"); | |
434 goto error; | |
435 } | |
436 | |
437 // Get the default input device | |
438 propertySize = sizeof(AudioDeviceID); | |
439 err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &inputDevice); | |
440 if(err != noErr) | |
441 { | |
442 ERR("AudioHardwareGetProperty failed\n"); | |
443 goto error; | |
444 } | |
445 | |
446 if(inputDevice == kAudioDeviceUnknown) | |
447 { | |
448 ERR("No input device found\n"); | |
449 goto error; | |
450 } | |
451 | |
452 // Track the input device | |
453 err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDevice, sizeof(AudioDeviceID)); | |
454 if(err != noErr) | |
455 { | |
456 ERR("AudioUnitSetProperty failed\n"); | |
457 goto error; | |
458 } | |
459 | |
460 // set capture callback | |
461 input.inputProc = ca_capture_callback; | |
462 input.inputProcRefCon = device; | |
463 | |
464 err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(AURenderCallbackStruct)); | |
465 if(err != noErr) | |
466 { | |
467 ERR("AudioUnitSetProperty failed\n"); | |
468 goto error; | |
469 } | |
470 | |
471 // Initialize the device | |
472 err = AudioUnitInitialize(data->audioUnit); | |
473 if(err != noErr) | |
474 { | |
475 ERR("AudioUnitInitialize failed\n"); | |
476 goto error; | |
477 } | |
478 | |
479 // Get the hardware format | |
480 propertySize = sizeof(AudioStreamBasicDescription); | |
481 err = AudioUnitGetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &hardwareFormat, &propertySize); | |
482 if(err != noErr || propertySize != sizeof(AudioStreamBasicDescription)) | |
483 { | |
484 ERR("AudioUnitGetProperty failed\n"); | |
485 goto error; | |
486 } | |
487 | |
488 // Set up the requested format description | |
489 switch(device->FmtType) | |
490 { | |
491 case DevFmtUByte: | |
492 requestedFormat.mBitsPerChannel = 8; | |
493 requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked; | |
494 break; | |
495 case DevFmtShort: | |
496 requestedFormat.mBitsPerChannel = 16; | |
497 requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; | |
498 break; | |
499 case DevFmtFloat: | |
500 requestedFormat.mBitsPerChannel = 32; | |
501 requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked; | |
502 break; | |
503 case DevFmtByte: | |
504 case DevFmtUShort: | |
505 ERR("%s samples not supported\n", DevFmtTypeString(device->FmtType)); | |
506 goto error; | |
507 } | |
508 | |
509 switch(device->FmtChans) | |
510 { | |
511 case DevFmtMono: | |
512 requestedFormat.mChannelsPerFrame = 1; | |
513 break; | |
514 case DevFmtStereo: | |
515 requestedFormat.mChannelsPerFrame = 2; | |
516 break; | |
517 | |
518 case DevFmtQuad: | |
519 case DevFmtX51: | |
520 case DevFmtX51Side: | |
521 case DevFmtX61: | |
522 case DevFmtX71: | |
523 ERR("%s not supported\n", DevFmtChannelsString(device->FmtChans)); | |
524 goto error; | |
525 } | |
526 | |
527 requestedFormat.mBytesPerFrame = requestedFormat.mChannelsPerFrame * requestedFormat.mBitsPerChannel / 8; | |
528 requestedFormat.mBytesPerPacket = requestedFormat.mBytesPerFrame; | |
529 requestedFormat.mSampleRate = device->Frequency; | |
530 requestedFormat.mFormatID = kAudioFormatLinearPCM; | |
531 requestedFormat.mReserved = 0; | |
532 requestedFormat.mFramesPerPacket = 1; | |
533 | |
534 // save requested format description for later use | |
535 data->format = requestedFormat; | |
536 data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType); | |
537 | |
538 // Use intermediate format for sample rate conversion (outputFormat) | |
539 // Set sample rate to the same as hardware for resampling later | |
540 outputFormat = requestedFormat; | |
541 outputFormat.mSampleRate = hardwareFormat.mSampleRate; | |
542 | |
543 // Determine sample rate ratio for resampling | |
544 data->sampleRateRatio = outputFormat.mSampleRate / device->Frequency; | |
545 | |
546 // The output format should be the requested format, but using the hardware sample rate | |
547 // This is because the AudioUnit will automatically scale other properties, except for sample rate | |
548 err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, (void *)&outputFormat, sizeof(outputFormat)); | |
549 if(err != noErr) | |
550 { | |
551 ERR("AudioUnitSetProperty failed\n"); | |
552 goto error; | |
553 } | |
554 | |
555 // Set the AudioUnit output format frame count | |
556 outputFrameCount = device->UpdateSize * data->sampleRateRatio; | |
557 err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Output, 0, &outputFrameCount, sizeof(outputFrameCount)); | |
558 if(err != noErr) | |
559 { | |
560 ERR("AudioUnitSetProperty failed: %d\n", err); | |
561 goto error; | |
562 } | |
563 | |
564 // Set up sample converter | |
565 err = AudioConverterNew(&outputFormat, &requestedFormat, &data->audioConverter); | |
566 if(err != noErr) | |
567 { | |
568 ERR("AudioConverterNew failed: %d\n", err); | |
569 goto error; | |
570 } | |
571 | |
572 // Create a buffer for use in the resample callback | |
573 data->resampleBuffer = malloc(device->UpdateSize * data->frameSize * data->sampleRateRatio); | |
574 | |
575 // Allocate buffer for the AudioUnit output | |
576 data->bufferList = allocate_buffer_list(outputFormat.mChannelsPerFrame, device->UpdateSize * data->frameSize * data->sampleRateRatio); | |
577 if(data->bufferList == NULL) | |
578 { | |
579 alcSetError(device, ALC_OUT_OF_MEMORY); | |
580 goto error; | |
581 } | |
582 | |
583 data->ring = CreateRingBuffer(data->frameSize, (device->UpdateSize * data->sampleRateRatio) * device->NumUpdates); | |
584 if(data->ring == NULL) | |
585 { | |
586 alcSetError(device, ALC_OUT_OF_MEMORY); | |
587 goto error; | |
588 } | |
589 | |
590 return ALC_TRUE; | |
591 | |
592 error: | |
593 DestroyRingBuffer(data->ring); | |
594 free(data->resampleBuffer); | |
595 destroy_buffer_list(data->bufferList); | |
596 | |
597 if(data->audioConverter) | |
598 AudioConverterDispose(data->audioConverter); | |
599 if(data->audioUnit) | |
600 CloseComponent(data->audioUnit); | |
601 | |
602 free(data); | |
603 device->ExtraData = NULL; | |
604 | |
605 return ALC_FALSE; | |
606 } | |
607 | |
608 static void ca_close_capture(ALCdevice *device) | |
609 { | |
610 ca_data *data = (ca_data*)device->ExtraData; | |
611 | |
612 DestroyRingBuffer(data->ring); | |
613 free(data->resampleBuffer); | |
614 destroy_buffer_list(data->bufferList); | |
615 | |
616 AudioConverterDispose(data->audioConverter); | |
617 CloseComponent(data->audioUnit); | |
618 | |
619 free(data); | |
620 device->ExtraData = NULL; | |
621 } | |
622 | |
623 static void ca_start_capture(ALCdevice *device) | |
624 { | |
625 ca_data *data = (ca_data*)device->ExtraData; | |
626 OSStatus err = AudioOutputUnitStart(data->audioUnit); | |
627 if(err != noErr) | |
628 ERR("AudioOutputUnitStart failed\n"); | |
629 } | |
630 | |
631 static void ca_stop_capture(ALCdevice *device) | |
632 { | |
633 ca_data *data = (ca_data*)device->ExtraData; | |
634 OSStatus err = AudioOutputUnitStop(data->audioUnit); | |
635 if(err != noErr) | |
636 ERR("AudioOutputUnitStop failed\n"); | |
637 } | |
638 | |
639 static ALCuint ca_available_samples(ALCdevice *device) | |
640 { | |
641 ca_data *data = device->ExtraData; | |
642 return RingBufferSize(data->ring) / data->sampleRateRatio; | |
643 } | |
644 | |
645 static void ca_capture_samples(ALCdevice *device, ALCvoid *buffer, ALCuint samples) | |
646 { | |
647 ca_data *data = (ca_data*)device->ExtraData; | |
648 | |
649 if(samples <= ca_available_samples(device)) | |
650 { | |
651 AudioBufferList *list; | |
652 UInt32 frameCount; | |
653 OSStatus err; | |
654 | |
655 // If no samples are requested, just return | |
656 if(samples == 0) | |
657 return; | |
658 | |
659 // Allocate a temporary AudioBufferList to use as the return resamples data | |
660 list = alloca(sizeof(AudioBufferList) + sizeof(AudioBuffer)); | |
661 | |
662 // Point the resampling buffer to the capture buffer | |
663 list->mNumberBuffers = 1; | |
664 list->mBuffers[0].mNumberChannels = data->format.mChannelsPerFrame; | |
665 list->mBuffers[0].mDataByteSize = samples * data->frameSize; | |
666 list->mBuffers[0].mData = buffer; | |
667 | |
668 // Resample into another AudioBufferList | |
669 frameCount = samples; | |
670 err = AudioConverterFillComplexBuffer(data->audioConverter, ca_capture_conversion_callback, device, | |
671 &frameCount, list, NULL); | |
672 if(err != noErr) | |
673 { | |
674 ERR("AudioConverterFillComplexBuffer error: %d\n", err); | |
675 alcSetError(device, ALC_INVALID_VALUE); | |
676 } | |
677 } | |
678 else | |
679 alcSetError(device, ALC_INVALID_VALUE); | |
680 } | |
681 | |
682 static const BackendFuncs ca_funcs = { | |
683 ca_open_playback, | |
684 ca_close_playback, | |
685 ca_reset_playback, | |
686 ca_stop_playback, | |
687 ca_open_capture, | |
688 ca_close_capture, | |
689 ca_start_capture, | |
690 ca_stop_capture, | |
691 ca_capture_samples, | |
692 ca_available_samples | |
693 }; | |
694 | |
695 ALCboolean alc_ca_init(BackendFuncs *func_list) | |
696 { | |
697 *func_list = ca_funcs; | |
698 return ALC_TRUE; | |
699 } | |
700 | |
701 void alc_ca_deinit(void) | |
702 { | |
703 } | |
704 | |
705 void alc_ca_probe(enum DevProbe type) | |
706 { | |
707 switch(type) | |
708 { | |
709 case DEVICE_PROBE: | |
710 AppendDeviceList(ca_device); | |
711 break; | |
712 case ALL_DEVICE_PROBE: | |
713 AppendAllDeviceList(ca_device); | |
714 break; | |
715 case CAPTURE_DEVICE_PROBE: | |
716 AppendCaptureDeviceList(ca_device); | |
717 break; | |
718 } | |
719 } |