rlm@0
|
1 /**
|
rlm@0
|
2 * OpenAL cross platform audio library
|
rlm@0
|
3 * Copyright (C) 1999-2007 by authors.
|
rlm@0
|
4 * This library is free software; you can redistribute it and/or
|
rlm@0
|
5 * modify it under the terms of the GNU Library General Public
|
rlm@0
|
6 * License as published by the Free Software Foundation; either
|
rlm@0
|
7 * version 2 of the License, or (at your option) any later version.
|
rlm@0
|
8 *
|
rlm@0
|
9 * This library is distributed in the hope that it will be useful,
|
rlm@0
|
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
rlm@0
|
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
rlm@0
|
12 * Library General Public License for more details.
|
rlm@0
|
13 *
|
rlm@0
|
14 * You should have received a copy of the GNU Library General Public
|
rlm@0
|
15 * License along with this library; if not, write to the
|
rlm@0
|
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
rlm@0
|
17 * Boston, MA 02111-1307, USA.
|
rlm@0
|
18 * Or go to http://www.gnu.org/copyleft/lgpl.html
|
rlm@0
|
19 */
|
rlm@0
|
20
|
rlm@0
|
21 #include "config.h"
|
rlm@0
|
22
|
rlm@0
|
23 #include <stdio.h>
|
rlm@0
|
24 #include <stdlib.h>
|
rlm@0
|
25 #include <string.h>
|
rlm@0
|
26
|
rlm@0
|
27 #include "alMain.h"
|
rlm@0
|
28 #include "AL/al.h"
|
rlm@0
|
29 #include "AL/alc.h"
|
rlm@0
|
30
|
rlm@0
|
31 #include <CoreServices/CoreServices.h>
|
rlm@0
|
32 #include <unistd.h>
|
rlm@0
|
33 #include <AudioUnit/AudioUnit.h>
|
rlm@0
|
34 #include <AudioToolbox/AudioToolbox.h>
|
rlm@0
|
35
|
rlm@0
|
36
|
rlm@0
|
37 typedef struct {
|
rlm@0
|
38 AudioUnit audioUnit;
|
rlm@0
|
39
|
rlm@0
|
40 ALuint frameSize;
|
rlm@0
|
41 ALdouble sampleRateRatio; // Ratio of hardware sample rate / requested sample rate
|
rlm@0
|
42 AudioStreamBasicDescription format; // This is the OpenAL format as a CoreAudio ASBD
|
rlm@0
|
43
|
rlm@0
|
44 AudioConverterRef audioConverter; // Sample rate converter if needed
|
rlm@0
|
45 AudioBufferList *bufferList; // Buffer for data coming from the input device
|
rlm@0
|
46 ALCvoid *resampleBuffer; // Buffer for returned RingBuffer data when resampling
|
rlm@0
|
47
|
rlm@0
|
48 RingBuffer *ring;
|
rlm@0
|
49 } ca_data;
|
rlm@0
|
50
|
rlm@0
|
51 static const ALCchar ca_device[] = "CoreAudio Default";
|
rlm@0
|
52
|
rlm@0
|
53
|
rlm@0
|
54 static void destroy_buffer_list(AudioBufferList* list)
|
rlm@0
|
55 {
|
rlm@0
|
56 if(list)
|
rlm@0
|
57 {
|
rlm@0
|
58 UInt32 i;
|
rlm@0
|
59 for(i = 0;i < list->mNumberBuffers;i++)
|
rlm@0
|
60 free(list->mBuffers[i].mData);
|
rlm@0
|
61 free(list);
|
rlm@0
|
62 }
|
rlm@0
|
63 }
|
rlm@0
|
64
|
rlm@0
|
65 static AudioBufferList* allocate_buffer_list(UInt32 channelCount, UInt32 byteSize)
|
rlm@0
|
66 {
|
rlm@0
|
67 AudioBufferList *list;
|
rlm@0
|
68
|
rlm@0
|
69 list = calloc(1, sizeof(AudioBufferList) + sizeof(AudioBuffer));
|
rlm@0
|
70 if(list)
|
rlm@0
|
71 {
|
rlm@0
|
72 list->mNumberBuffers = 1;
|
rlm@0
|
73
|
rlm@0
|
74 list->mBuffers[0].mNumberChannels = channelCount;
|
rlm@0
|
75 list->mBuffers[0].mDataByteSize = byteSize;
|
rlm@0
|
76 list->mBuffers[0].mData = malloc(byteSize);
|
rlm@0
|
77 if(list->mBuffers[0].mData == NULL)
|
rlm@0
|
78 {
|
rlm@0
|
79 free(list);
|
rlm@0
|
80 list = NULL;
|
rlm@0
|
81 }
|
rlm@0
|
82 }
|
rlm@0
|
83 return list;
|
rlm@0
|
84 }
|
rlm@0
|
85
|
rlm@0
|
86 static OSStatus ca_callback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp,
|
rlm@0
|
87 UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData)
|
rlm@0
|
88 {
|
rlm@0
|
89 ALCdevice *device = (ALCdevice*)inRefCon;
|
rlm@0
|
90 ca_data *data = (ca_data*)device->ExtraData;
|
rlm@0
|
91
|
rlm@0
|
92 aluMixData(device, ioData->mBuffers[0].mData,
|
rlm@0
|
93 ioData->mBuffers[0].mDataByteSize / data->frameSize);
|
rlm@0
|
94
|
rlm@0
|
95 return noErr;
|
rlm@0
|
96 }
|
rlm@0
|
97
|
rlm@0
|
98 static OSStatus ca_capture_conversion_callback(AudioConverterRef inAudioConverter, UInt32 *ioNumberDataPackets,
|
rlm@0
|
99 AudioBufferList *ioData, AudioStreamPacketDescription **outDataPacketDescription, void* inUserData)
|
rlm@0
|
100 {
|
rlm@0
|
101 ALCdevice *device = (ALCdevice*)inUserData;
|
rlm@0
|
102 ca_data *data = (ca_data*)device->ExtraData;
|
rlm@0
|
103
|
rlm@0
|
104 // Read from the ring buffer and store temporarily in a large buffer
|
rlm@0
|
105 ReadRingBuffer(data->ring, data->resampleBuffer, (ALsizei)(*ioNumberDataPackets));
|
rlm@0
|
106
|
rlm@0
|
107 // Set the input data
|
rlm@0
|
108 ioData->mNumberBuffers = 1;
|
rlm@0
|
109 ioData->mBuffers[0].mNumberChannels = data->format.mChannelsPerFrame;
|
rlm@0
|
110 ioData->mBuffers[0].mData = data->resampleBuffer;
|
rlm@0
|
111 ioData->mBuffers[0].mDataByteSize = (*ioNumberDataPackets) * data->format.mBytesPerFrame;
|
rlm@0
|
112
|
rlm@0
|
113 return noErr;
|
rlm@0
|
114 }
|
rlm@0
|
115
|
rlm@0
|
116 static OSStatus ca_capture_callback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags,
|
rlm@0
|
117 const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber,
|
rlm@0
|
118 UInt32 inNumberFrames, AudioBufferList *ioData)
|
rlm@0
|
119 {
|
rlm@0
|
120 ALCdevice *device = (ALCdevice*)inRefCon;
|
rlm@0
|
121 ca_data *data = (ca_data*)device->ExtraData;
|
rlm@0
|
122 AudioUnitRenderActionFlags flags = 0;
|
rlm@0
|
123 OSStatus err;
|
rlm@0
|
124
|
rlm@0
|
125 // fill the bufferList with data from the input device
|
rlm@0
|
126 err = AudioUnitRender(data->audioUnit, &flags, inTimeStamp, 1, inNumberFrames, data->bufferList);
|
rlm@0
|
127 if(err != noErr)
|
rlm@0
|
128 {
|
rlm@0
|
129 ERR("AudioUnitRender error: %d\n", err);
|
rlm@0
|
130 return err;
|
rlm@0
|
131 }
|
rlm@0
|
132
|
rlm@0
|
133 WriteRingBuffer(data->ring, data->bufferList->mBuffers[0].mData, inNumberFrames);
|
rlm@0
|
134
|
rlm@0
|
135 return noErr;
|
rlm@0
|
136 }
|
rlm@0
|
137
|
rlm@0
|
138 static ALCboolean ca_open_playback(ALCdevice *device, const ALCchar *deviceName)
|
rlm@0
|
139 {
|
rlm@0
|
140 ComponentDescription desc;
|
rlm@0
|
141 Component comp;
|
rlm@0
|
142 ca_data *data;
|
rlm@0
|
143 OSStatus err;
|
rlm@0
|
144
|
rlm@0
|
145 if(!deviceName)
|
rlm@0
|
146 deviceName = ca_device;
|
rlm@0
|
147 else if(strcmp(deviceName, ca_device) != 0)
|
rlm@0
|
148 return ALC_FALSE;
|
rlm@0
|
149
|
rlm@0
|
150 /* open the default output unit */
|
rlm@0
|
151 desc.componentType = kAudioUnitType_Output;
|
rlm@0
|
152 desc.componentSubType = kAudioUnitSubType_DefaultOutput;
|
rlm@0
|
153 desc.componentManufacturer = kAudioUnitManufacturer_Apple;
|
rlm@0
|
154 desc.componentFlags = 0;
|
rlm@0
|
155 desc.componentFlagsMask = 0;
|
rlm@0
|
156
|
rlm@0
|
157 comp = FindNextComponent(NULL, &desc);
|
rlm@0
|
158 if(comp == NULL)
|
rlm@0
|
159 {
|
rlm@0
|
160 ERR("FindNextComponent failed\n");
|
rlm@0
|
161 return ALC_FALSE;
|
rlm@0
|
162 }
|
rlm@0
|
163
|
rlm@0
|
164 data = calloc(1, sizeof(*data));
|
rlm@0
|
165 device->ExtraData = data;
|
rlm@0
|
166
|
rlm@0
|
167 err = OpenAComponent(comp, &data->audioUnit);
|
rlm@0
|
168 if(err != noErr)
|
rlm@0
|
169 {
|
rlm@0
|
170 ERR("OpenAComponent failed\n");
|
rlm@0
|
171 free(data);
|
rlm@0
|
172 device->ExtraData = NULL;
|
rlm@0
|
173 return ALC_FALSE;
|
rlm@0
|
174 }
|
rlm@0
|
175
|
rlm@0
|
176 return ALC_TRUE;
|
rlm@0
|
177 }
|
rlm@0
|
178
|
rlm@0
|
179 static void ca_close_playback(ALCdevice *device)
|
rlm@0
|
180 {
|
rlm@0
|
181 ca_data *data = (ca_data*)device->ExtraData;
|
rlm@0
|
182
|
rlm@0
|
183 CloseComponent(data->audioUnit);
|
rlm@0
|
184
|
rlm@0
|
185 free(data);
|
rlm@0
|
186 device->ExtraData = NULL;
|
rlm@0
|
187 }
|
rlm@0
|
188
|
rlm@0
|
189 static ALCboolean ca_reset_playback(ALCdevice *device)
|
rlm@0
|
190 {
|
rlm@0
|
191 ca_data *data = (ca_data*)device->ExtraData;
|
rlm@0
|
192 AudioStreamBasicDescription streamFormat;
|
rlm@0
|
193 AURenderCallbackStruct input;
|
rlm@0
|
194 OSStatus err;
|
rlm@0
|
195 UInt32 size;
|
rlm@0
|
196
|
rlm@0
|
197 /* init and start the default audio unit... */
|
rlm@0
|
198 err = AudioUnitInitialize(data->audioUnit);
|
rlm@0
|
199 if(err != noErr)
|
rlm@0
|
200 {
|
rlm@0
|
201 ERR("AudioUnitInitialize failed\n");
|
rlm@0
|
202 return ALC_FALSE;
|
rlm@0
|
203 }
|
rlm@0
|
204
|
rlm@0
|
205 err = AudioOutputUnitStart(data->audioUnit);
|
rlm@0
|
206 if(err != noErr)
|
rlm@0
|
207 {
|
rlm@0
|
208 ERR("AudioOutputUnitStart failed\n");
|
rlm@0
|
209 return ALC_FALSE;
|
rlm@0
|
210 }
|
rlm@0
|
211
|
rlm@0
|
212 /* retrieve default output unit's properties (output side) */
|
rlm@0
|
213 size = sizeof(AudioStreamBasicDescription);
|
rlm@0
|
214 err = AudioUnitGetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &streamFormat, &size);
|
rlm@0
|
215 if(err != noErr || size != sizeof(AudioStreamBasicDescription))
|
rlm@0
|
216 {
|
rlm@0
|
217 ERR("AudioUnitGetProperty failed\n");
|
rlm@0
|
218 return ALC_FALSE;
|
rlm@0
|
219 }
|
rlm@0
|
220
|
rlm@0
|
221 #if 0
|
rlm@0
|
222 TRACE("Output streamFormat of default output unit -\n");
|
rlm@0
|
223 TRACE(" streamFormat.mFramesPerPacket = %d\n", streamFormat.mFramesPerPacket);
|
rlm@0
|
224 TRACE(" streamFormat.mChannelsPerFrame = %d\n", streamFormat.mChannelsPerFrame);
|
rlm@0
|
225 TRACE(" streamFormat.mBitsPerChannel = %d\n", streamFormat.mBitsPerChannel);
|
rlm@0
|
226 TRACE(" streamFormat.mBytesPerPacket = %d\n", streamFormat.mBytesPerPacket);
|
rlm@0
|
227 TRACE(" streamFormat.mBytesPerFrame = %d\n", streamFormat.mBytesPerFrame);
|
rlm@0
|
228 TRACE(" streamFormat.mSampleRate = %5.0f\n", streamFormat.mSampleRate);
|
rlm@0
|
229 #endif
|
rlm@0
|
230
|
rlm@0
|
231 /* set default output unit's input side to match output side */
|
rlm@0
|
232 err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, size);
|
rlm@0
|
233 if(err != noErr)
|
rlm@0
|
234 {
|
rlm@0
|
235 ERR("AudioUnitSetProperty failed\n");
|
rlm@0
|
236 return ALC_FALSE;
|
rlm@0
|
237 }
|
rlm@0
|
238
|
rlm@0
|
239 if(device->Frequency != streamFormat.mSampleRate)
|
rlm@0
|
240 {
|
rlm@0
|
241 if((device->Flags&DEVICE_FREQUENCY_REQUEST))
|
rlm@0
|
242 ERR("CoreAudio does not support changing sample rates (wanted %dhz, got %dhz)\n", device->Frequency, streamFormat.mSampleRate);
|
rlm@0
|
243 device->Flags &= ~DEVICE_FREQUENCY_REQUEST;
|
rlm@0
|
244
|
rlm@0
|
245 device->UpdateSize = (ALuint)((ALuint64)device->UpdateSize *
|
rlm@0
|
246 streamFormat.mSampleRate /
|
rlm@0
|
247 device->Frequency);
|
rlm@0
|
248 device->Frequency = streamFormat.mSampleRate;
|
rlm@0
|
249 }
|
rlm@0
|
250
|
rlm@0
|
251 /* FIXME: How to tell what channels are what in the output device, and how
|
rlm@0
|
252 * to specify what we're giving? eg, 6.0 vs 5.1 */
|
rlm@0
|
253 switch(streamFormat.mChannelsPerFrame)
|
rlm@0
|
254 {
|
rlm@0
|
255 case 1:
|
rlm@0
|
256 if((device->Flags&DEVICE_CHANNELS_REQUEST) &&
|
rlm@0
|
257 device->FmtChans != DevFmtMono)
|
rlm@0
|
258 {
|
rlm@0
|
259 ERR("Failed to set %s, got Mono instead\n", DevFmtChannelsString(device->FmtChans));
|
rlm@0
|
260 device->Flags &= ~DEVICE_CHANNELS_REQUEST;
|
rlm@0
|
261 }
|
rlm@0
|
262 device->FmtChans = DevFmtMono;
|
rlm@0
|
263 break;
|
rlm@0
|
264 case 2:
|
rlm@0
|
265 if((device->Flags&DEVICE_CHANNELS_REQUEST) &&
|
rlm@0
|
266 device->FmtChans != DevFmtStereo)
|
rlm@0
|
267 {
|
rlm@0
|
268 ERR("Failed to set %s, got Stereo instead\n", DevFmtChannelsString(device->FmtChans));
|
rlm@0
|
269 device->Flags &= ~DEVICE_CHANNELS_REQUEST;
|
rlm@0
|
270 }
|
rlm@0
|
271 device->FmtChans = DevFmtStereo;
|
rlm@0
|
272 break;
|
rlm@0
|
273 case 4:
|
rlm@0
|
274 if((device->Flags&DEVICE_CHANNELS_REQUEST) &&
|
rlm@0
|
275 device->FmtChans != DevFmtQuad)
|
rlm@0
|
276 {
|
rlm@0
|
277 ERR("Failed to set %s, got Quad instead\n", DevFmtChannelsString(device->FmtChans));
|
rlm@0
|
278 device->Flags &= ~DEVICE_CHANNELS_REQUEST;
|
rlm@0
|
279 }
|
rlm@0
|
280 device->FmtChans = DevFmtQuad;
|
rlm@0
|
281 break;
|
rlm@0
|
282 case 6:
|
rlm@0
|
283 if((device->Flags&DEVICE_CHANNELS_REQUEST) &&
|
rlm@0
|
284 device->FmtChans != DevFmtX51)
|
rlm@0
|
285 {
|
rlm@0
|
286 ERR("Failed to set %s, got 5.1 Surround instead\n", DevFmtChannelsString(device->FmtChans));
|
rlm@0
|
287 device->Flags &= ~DEVICE_CHANNELS_REQUEST;
|
rlm@0
|
288 }
|
rlm@0
|
289 device->FmtChans = DevFmtX51;
|
rlm@0
|
290 break;
|
rlm@0
|
291 case 7:
|
rlm@0
|
292 if((device->Flags&DEVICE_CHANNELS_REQUEST) &&
|
rlm@0
|
293 device->FmtChans != DevFmtX61)
|
rlm@0
|
294 {
|
rlm@0
|
295 ERR("Failed to set %s, got 6.1 Surround instead\n", DevFmtChannelsString(device->FmtChans));
|
rlm@0
|
296 device->Flags &= ~DEVICE_CHANNELS_REQUEST;
|
rlm@0
|
297 }
|
rlm@0
|
298 device->FmtChans = DevFmtX61;
|
rlm@0
|
299 break;
|
rlm@0
|
300 case 8:
|
rlm@0
|
301 if((device->Flags&DEVICE_CHANNELS_REQUEST) &&
|
rlm@0
|
302 device->FmtChans != DevFmtX71)
|
rlm@0
|
303 {
|
rlm@0
|
304 ERR("Failed to set %s, got 7.1 Surround instead\n", DevFmtChannelsString(device->FmtChans));
|
rlm@0
|
305 device->Flags &= ~DEVICE_CHANNELS_REQUEST;
|
rlm@0
|
306 }
|
rlm@0
|
307 device->FmtChans = DevFmtX71;
|
rlm@0
|
308 break;
|
rlm@0
|
309 default:
|
rlm@0
|
310 ERR("Unhandled channel count (%d), using Stereo\n", streamFormat.mChannelsPerFrame);
|
rlm@0
|
311 device->Flags &= ~DEVICE_CHANNELS_REQUEST;
|
rlm@0
|
312 device->FmtChans = DevFmtStereo;
|
rlm@0
|
313 streamFormat.mChannelsPerFrame = 2;
|
rlm@0
|
314 break;
|
rlm@0
|
315 }
|
rlm@0
|
316 SetDefaultWFXChannelOrder(device);
|
rlm@0
|
317
|
rlm@0
|
318 /* use channel count and sample rate from the default output unit's current
|
rlm@0
|
319 * parameters, but reset everything else */
|
rlm@0
|
320 streamFormat.mFramesPerPacket = 1;
|
rlm@0
|
321 switch(device->FmtType)
|
rlm@0
|
322 {
|
rlm@0
|
323 case DevFmtUByte:
|
rlm@0
|
324 device->FmtType = DevFmtByte;
|
rlm@0
|
325 /* fall-through */
|
rlm@0
|
326 case DevFmtByte:
|
rlm@0
|
327 streamFormat.mBitsPerChannel = 8;
|
rlm@0
|
328 streamFormat.mBytesPerPacket = streamFormat.mChannelsPerFrame;
|
rlm@0
|
329 streamFormat.mBytesPerFrame = streamFormat.mChannelsPerFrame;
|
rlm@0
|
330 break;
|
rlm@0
|
331 case DevFmtUShort:
|
rlm@0
|
332 case DevFmtFloat:
|
rlm@0
|
333 device->FmtType = DevFmtShort;
|
rlm@0
|
334 /* fall-through */
|
rlm@0
|
335 case DevFmtShort:
|
rlm@0
|
336 streamFormat.mBitsPerChannel = 16;
|
rlm@0
|
337 streamFormat.mBytesPerPacket = 2 * streamFormat.mChannelsPerFrame;
|
rlm@0
|
338 streamFormat.mBytesPerFrame = 2 * streamFormat.mChannelsPerFrame;
|
rlm@0
|
339 break;
|
rlm@0
|
340 }
|
rlm@0
|
341 streamFormat.mFormatID = kAudioFormatLinearPCM;
|
rlm@0
|
342 streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
|
rlm@0
|
343 kAudioFormatFlagsNativeEndian |
|
rlm@0
|
344 kLinearPCMFormatFlagIsPacked;
|
rlm@0
|
345
|
rlm@0
|
346 err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription));
|
rlm@0
|
347 if(err != noErr)
|
rlm@0
|
348 {
|
rlm@0
|
349 ERR("AudioUnitSetProperty failed\n");
|
rlm@0
|
350 return ALC_FALSE;
|
rlm@0
|
351 }
|
rlm@0
|
352
|
rlm@0
|
353 /* setup callback */
|
rlm@0
|
354 data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType);
|
rlm@0
|
355 input.inputProc = ca_callback;
|
rlm@0
|
356 input.inputProcRefCon = device;
|
rlm@0
|
357
|
rlm@0
|
358 err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(AURenderCallbackStruct));
|
rlm@0
|
359 if(err != noErr)
|
rlm@0
|
360 {
|
rlm@0
|
361 ERR("AudioUnitSetProperty failed\n");
|
rlm@0
|
362 return ALC_FALSE;
|
rlm@0
|
363 }
|
rlm@0
|
364
|
rlm@0
|
365 return ALC_TRUE;
|
rlm@0
|
366 }
|
rlm@0
|
367
|
rlm@0
|
368 static void ca_stop_playback(ALCdevice *device)
|
rlm@0
|
369 {
|
rlm@0
|
370 ca_data *data = (ca_data*)device->ExtraData;
|
rlm@0
|
371 OSStatus err;
|
rlm@0
|
372
|
rlm@0
|
373 AudioOutputUnitStop(data->audioUnit);
|
rlm@0
|
374 err = AudioUnitUninitialize(data->audioUnit);
|
rlm@0
|
375 if(err != noErr)
|
rlm@0
|
376 ERR("-- AudioUnitUninitialize failed.\n");
|
rlm@0
|
377 }
|
rlm@0
|
378
|
rlm@0
|
379 static ALCboolean ca_open_capture(ALCdevice *device, const ALCchar *deviceName)
|
rlm@0
|
380 {
|
rlm@0
|
381 AudioStreamBasicDescription requestedFormat; // The application requested format
|
rlm@0
|
382 AudioStreamBasicDescription hardwareFormat; // The hardware format
|
rlm@0
|
383 AudioStreamBasicDescription outputFormat; // The AudioUnit output format
|
rlm@0
|
384 AURenderCallbackStruct input;
|
rlm@0
|
385 ComponentDescription desc;
|
rlm@0
|
386 AudioDeviceID inputDevice;
|
rlm@0
|
387 UInt32 outputFrameCount;
|
rlm@0
|
388 UInt32 propertySize;
|
rlm@0
|
389 UInt32 enableIO;
|
rlm@0
|
390 Component comp;
|
rlm@0
|
391 ca_data *data;
|
rlm@0
|
392 OSStatus err;
|
rlm@0
|
393
|
rlm@0
|
394 desc.componentType = kAudioUnitType_Output;
|
rlm@0
|
395 desc.componentSubType = kAudioUnitSubType_HALOutput;
|
rlm@0
|
396 desc.componentManufacturer = kAudioUnitManufacturer_Apple;
|
rlm@0
|
397 desc.componentFlags = 0;
|
rlm@0
|
398 desc.componentFlagsMask = 0;
|
rlm@0
|
399
|
rlm@0
|
400 // Search for component with given description
|
rlm@0
|
401 comp = FindNextComponent(NULL, &desc);
|
rlm@0
|
402 if(comp == NULL)
|
rlm@0
|
403 {
|
rlm@0
|
404 ERR("FindNextComponent failed\n");
|
rlm@0
|
405 return ALC_FALSE;
|
rlm@0
|
406 }
|
rlm@0
|
407
|
rlm@0
|
408 data = calloc(1, sizeof(*data));
|
rlm@0
|
409 device->ExtraData = data;
|
rlm@0
|
410
|
rlm@0
|
411 // Open the component
|
rlm@0
|
412 err = OpenAComponent(comp, &data->audioUnit);
|
rlm@0
|
413 if(err != noErr)
|
rlm@0
|
414 {
|
rlm@0
|
415 ERR("OpenAComponent failed\n");
|
rlm@0
|
416 goto error;
|
rlm@0
|
417 }
|
rlm@0
|
418
|
rlm@0
|
419 // Turn off AudioUnit output
|
rlm@0
|
420 enableIO = 0;
|
rlm@0
|
421 err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(ALuint));
|
rlm@0
|
422 if(err != noErr)
|
rlm@0
|
423 {
|
rlm@0
|
424 ERR("AudioUnitSetProperty failed\n");
|
rlm@0
|
425 goto error;
|
rlm@0
|
426 }
|
rlm@0
|
427
|
rlm@0
|
428 // Turn on AudioUnit input
|
rlm@0
|
429 enableIO = 1;
|
rlm@0
|
430 err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(ALuint));
|
rlm@0
|
431 if(err != noErr)
|
rlm@0
|
432 {
|
rlm@0
|
433 ERR("AudioUnitSetProperty failed\n");
|
rlm@0
|
434 goto error;
|
rlm@0
|
435 }
|
rlm@0
|
436
|
rlm@0
|
437 // Get the default input device
|
rlm@0
|
438 propertySize = sizeof(AudioDeviceID);
|
rlm@0
|
439 err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &inputDevice);
|
rlm@0
|
440 if(err != noErr)
|
rlm@0
|
441 {
|
rlm@0
|
442 ERR("AudioHardwareGetProperty failed\n");
|
rlm@0
|
443 goto error;
|
rlm@0
|
444 }
|
rlm@0
|
445
|
rlm@0
|
446 if(inputDevice == kAudioDeviceUnknown)
|
rlm@0
|
447 {
|
rlm@0
|
448 ERR("No input device found\n");
|
rlm@0
|
449 goto error;
|
rlm@0
|
450 }
|
rlm@0
|
451
|
rlm@0
|
452 // Track the input device
|
rlm@0
|
453 err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDevice, sizeof(AudioDeviceID));
|
rlm@0
|
454 if(err != noErr)
|
rlm@0
|
455 {
|
rlm@0
|
456 ERR("AudioUnitSetProperty failed\n");
|
rlm@0
|
457 goto error;
|
rlm@0
|
458 }
|
rlm@0
|
459
|
rlm@0
|
460 // set capture callback
|
rlm@0
|
461 input.inputProc = ca_capture_callback;
|
rlm@0
|
462 input.inputProcRefCon = device;
|
rlm@0
|
463
|
rlm@0
|
464 err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(AURenderCallbackStruct));
|
rlm@0
|
465 if(err != noErr)
|
rlm@0
|
466 {
|
rlm@0
|
467 ERR("AudioUnitSetProperty failed\n");
|
rlm@0
|
468 goto error;
|
rlm@0
|
469 }
|
rlm@0
|
470
|
rlm@0
|
471 // Initialize the device
|
rlm@0
|
472 err = AudioUnitInitialize(data->audioUnit);
|
rlm@0
|
473 if(err != noErr)
|
rlm@0
|
474 {
|
rlm@0
|
475 ERR("AudioUnitInitialize failed\n");
|
rlm@0
|
476 goto error;
|
rlm@0
|
477 }
|
rlm@0
|
478
|
rlm@0
|
479 // Get the hardware format
|
rlm@0
|
480 propertySize = sizeof(AudioStreamBasicDescription);
|
rlm@0
|
481 err = AudioUnitGetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &hardwareFormat, &propertySize);
|
rlm@0
|
482 if(err != noErr || propertySize != sizeof(AudioStreamBasicDescription))
|
rlm@0
|
483 {
|
rlm@0
|
484 ERR("AudioUnitGetProperty failed\n");
|
rlm@0
|
485 goto error;
|
rlm@0
|
486 }
|
rlm@0
|
487
|
rlm@0
|
488 // Set up the requested format description
|
rlm@0
|
489 switch(device->FmtType)
|
rlm@0
|
490 {
|
rlm@0
|
491 case DevFmtUByte:
|
rlm@0
|
492 requestedFormat.mBitsPerChannel = 8;
|
rlm@0
|
493 requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked;
|
rlm@0
|
494 break;
|
rlm@0
|
495 case DevFmtShort:
|
rlm@0
|
496 requestedFormat.mBitsPerChannel = 16;
|
rlm@0
|
497 requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
|
rlm@0
|
498 break;
|
rlm@0
|
499 case DevFmtFloat:
|
rlm@0
|
500 requestedFormat.mBitsPerChannel = 32;
|
rlm@0
|
501 requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked;
|
rlm@0
|
502 break;
|
rlm@0
|
503 case DevFmtByte:
|
rlm@0
|
504 case DevFmtUShort:
|
rlm@0
|
505 ERR("%s samples not supported\n", DevFmtTypeString(device->FmtType));
|
rlm@0
|
506 goto error;
|
rlm@0
|
507 }
|
rlm@0
|
508
|
rlm@0
|
509 switch(device->FmtChans)
|
rlm@0
|
510 {
|
rlm@0
|
511 case DevFmtMono:
|
rlm@0
|
512 requestedFormat.mChannelsPerFrame = 1;
|
rlm@0
|
513 break;
|
rlm@0
|
514 case DevFmtStereo:
|
rlm@0
|
515 requestedFormat.mChannelsPerFrame = 2;
|
rlm@0
|
516 break;
|
rlm@0
|
517
|
rlm@0
|
518 case DevFmtQuad:
|
rlm@0
|
519 case DevFmtX51:
|
rlm@0
|
520 case DevFmtX51Side:
|
rlm@0
|
521 case DevFmtX61:
|
rlm@0
|
522 case DevFmtX71:
|
rlm@0
|
523 ERR("%s not supported\n", DevFmtChannelsString(device->FmtChans));
|
rlm@0
|
524 goto error;
|
rlm@0
|
525 }
|
rlm@0
|
526
|
rlm@0
|
527 requestedFormat.mBytesPerFrame = requestedFormat.mChannelsPerFrame * requestedFormat.mBitsPerChannel / 8;
|
rlm@0
|
528 requestedFormat.mBytesPerPacket = requestedFormat.mBytesPerFrame;
|
rlm@0
|
529 requestedFormat.mSampleRate = device->Frequency;
|
rlm@0
|
530 requestedFormat.mFormatID = kAudioFormatLinearPCM;
|
rlm@0
|
531 requestedFormat.mReserved = 0;
|
rlm@0
|
532 requestedFormat.mFramesPerPacket = 1;
|
rlm@0
|
533
|
rlm@0
|
534 // save requested format description for later use
|
rlm@0
|
535 data->format = requestedFormat;
|
rlm@0
|
536 data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType);
|
rlm@0
|
537
|
rlm@0
|
538 // Use intermediate format for sample rate conversion (outputFormat)
|
rlm@0
|
539 // Set sample rate to the same as hardware for resampling later
|
rlm@0
|
540 outputFormat = requestedFormat;
|
rlm@0
|
541 outputFormat.mSampleRate = hardwareFormat.mSampleRate;
|
rlm@0
|
542
|
rlm@0
|
543 // Determine sample rate ratio for resampling
|
rlm@0
|
544 data->sampleRateRatio = outputFormat.mSampleRate / device->Frequency;
|
rlm@0
|
545
|
rlm@0
|
546 // The output format should be the requested format, but using the hardware sample rate
|
rlm@0
|
547 // This is because the AudioUnit will automatically scale other properties, except for sample rate
|
rlm@0
|
548 err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, (void *)&outputFormat, sizeof(outputFormat));
|
rlm@0
|
549 if(err != noErr)
|
rlm@0
|
550 {
|
rlm@0
|
551 ERR("AudioUnitSetProperty failed\n");
|
rlm@0
|
552 goto error;
|
rlm@0
|
553 }
|
rlm@0
|
554
|
rlm@0
|
555 // Set the AudioUnit output format frame count
|
rlm@0
|
556 outputFrameCount = device->UpdateSize * data->sampleRateRatio;
|
rlm@0
|
557 err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Output, 0, &outputFrameCount, sizeof(outputFrameCount));
|
rlm@0
|
558 if(err != noErr)
|
rlm@0
|
559 {
|
rlm@0
|
560 ERR("AudioUnitSetProperty failed: %d\n", err);
|
rlm@0
|
561 goto error;
|
rlm@0
|
562 }
|
rlm@0
|
563
|
rlm@0
|
564 // Set up sample converter
|
rlm@0
|
565 err = AudioConverterNew(&outputFormat, &requestedFormat, &data->audioConverter);
|
rlm@0
|
566 if(err != noErr)
|
rlm@0
|
567 {
|
rlm@0
|
568 ERR("AudioConverterNew failed: %d\n", err);
|
rlm@0
|
569 goto error;
|
rlm@0
|
570 }
|
rlm@0
|
571
|
rlm@0
|
572 // Create a buffer for use in the resample callback
|
rlm@0
|
573 data->resampleBuffer = malloc(device->UpdateSize * data->frameSize * data->sampleRateRatio);
|
rlm@0
|
574
|
rlm@0
|
575 // Allocate buffer for the AudioUnit output
|
rlm@0
|
576 data->bufferList = allocate_buffer_list(outputFormat.mChannelsPerFrame, device->UpdateSize * data->frameSize * data->sampleRateRatio);
|
rlm@0
|
577 if(data->bufferList == NULL)
|
rlm@0
|
578 {
|
rlm@0
|
579 alcSetError(device, ALC_OUT_OF_MEMORY);
|
rlm@0
|
580 goto error;
|
rlm@0
|
581 }
|
rlm@0
|
582
|
rlm@0
|
583 data->ring = CreateRingBuffer(data->frameSize, (device->UpdateSize * data->sampleRateRatio) * device->NumUpdates);
|
rlm@0
|
584 if(data->ring == NULL)
|
rlm@0
|
585 {
|
rlm@0
|
586 alcSetError(device, ALC_OUT_OF_MEMORY);
|
rlm@0
|
587 goto error;
|
rlm@0
|
588 }
|
rlm@0
|
589
|
rlm@0
|
590 return ALC_TRUE;
|
rlm@0
|
591
|
rlm@0
|
592 error:
|
rlm@0
|
593 DestroyRingBuffer(data->ring);
|
rlm@0
|
594 free(data->resampleBuffer);
|
rlm@0
|
595 destroy_buffer_list(data->bufferList);
|
rlm@0
|
596
|
rlm@0
|
597 if(data->audioConverter)
|
rlm@0
|
598 AudioConverterDispose(data->audioConverter);
|
rlm@0
|
599 if(data->audioUnit)
|
rlm@0
|
600 CloseComponent(data->audioUnit);
|
rlm@0
|
601
|
rlm@0
|
602 free(data);
|
rlm@0
|
603 device->ExtraData = NULL;
|
rlm@0
|
604
|
rlm@0
|
605 return ALC_FALSE;
|
rlm@0
|
606 }
|
rlm@0
|
607
|
rlm@0
|
608 static void ca_close_capture(ALCdevice *device)
|
rlm@0
|
609 {
|
rlm@0
|
610 ca_data *data = (ca_data*)device->ExtraData;
|
rlm@0
|
611
|
rlm@0
|
612 DestroyRingBuffer(data->ring);
|
rlm@0
|
613 free(data->resampleBuffer);
|
rlm@0
|
614 destroy_buffer_list(data->bufferList);
|
rlm@0
|
615
|
rlm@0
|
616 AudioConverterDispose(data->audioConverter);
|
rlm@0
|
617 CloseComponent(data->audioUnit);
|
rlm@0
|
618
|
rlm@0
|
619 free(data);
|
rlm@0
|
620 device->ExtraData = NULL;
|
rlm@0
|
621 }
|
rlm@0
|
622
|
rlm@0
|
623 static void ca_start_capture(ALCdevice *device)
|
rlm@0
|
624 {
|
rlm@0
|
625 ca_data *data = (ca_data*)device->ExtraData;
|
rlm@0
|
626 OSStatus err = AudioOutputUnitStart(data->audioUnit);
|
rlm@0
|
627 if(err != noErr)
|
rlm@0
|
628 ERR("AudioOutputUnitStart failed\n");
|
rlm@0
|
629 }
|
rlm@0
|
630
|
rlm@0
|
631 static void ca_stop_capture(ALCdevice *device)
|
rlm@0
|
632 {
|
rlm@0
|
633 ca_data *data = (ca_data*)device->ExtraData;
|
rlm@0
|
634 OSStatus err = AudioOutputUnitStop(data->audioUnit);
|
rlm@0
|
635 if(err != noErr)
|
rlm@0
|
636 ERR("AudioOutputUnitStop failed\n");
|
rlm@0
|
637 }
|
rlm@0
|
638
|
rlm@0
|
639 static ALCuint ca_available_samples(ALCdevice *device)
|
rlm@0
|
640 {
|
rlm@0
|
641 ca_data *data = device->ExtraData;
|
rlm@0
|
642 return RingBufferSize(data->ring) / data->sampleRateRatio;
|
rlm@0
|
643 }
|
rlm@0
|
644
|
rlm@0
|
645 static void ca_capture_samples(ALCdevice *device, ALCvoid *buffer, ALCuint samples)
|
rlm@0
|
646 {
|
rlm@0
|
647 ca_data *data = (ca_data*)device->ExtraData;
|
rlm@0
|
648
|
rlm@0
|
649 if(samples <= ca_available_samples(device))
|
rlm@0
|
650 {
|
rlm@0
|
651 AudioBufferList *list;
|
rlm@0
|
652 UInt32 frameCount;
|
rlm@0
|
653 OSStatus err;
|
rlm@0
|
654
|
rlm@0
|
655 // If no samples are requested, just return
|
rlm@0
|
656 if(samples == 0)
|
rlm@0
|
657 return;
|
rlm@0
|
658
|
rlm@0
|
659 // Allocate a temporary AudioBufferList to use as the return resamples data
|
rlm@0
|
660 list = alloca(sizeof(AudioBufferList) + sizeof(AudioBuffer));
|
rlm@0
|
661
|
rlm@0
|
662 // Point the resampling buffer to the capture buffer
|
rlm@0
|
663 list->mNumberBuffers = 1;
|
rlm@0
|
664 list->mBuffers[0].mNumberChannels = data->format.mChannelsPerFrame;
|
rlm@0
|
665 list->mBuffers[0].mDataByteSize = samples * data->frameSize;
|
rlm@0
|
666 list->mBuffers[0].mData = buffer;
|
rlm@0
|
667
|
rlm@0
|
668 // Resample into another AudioBufferList
|
rlm@0
|
669 frameCount = samples;
|
rlm@0
|
670 err = AudioConverterFillComplexBuffer(data->audioConverter, ca_capture_conversion_callback, device,
|
rlm@0
|
671 &frameCount, list, NULL);
|
rlm@0
|
672 if(err != noErr)
|
rlm@0
|
673 {
|
rlm@0
|
674 ERR("AudioConverterFillComplexBuffer error: %d\n", err);
|
rlm@0
|
675 alcSetError(device, ALC_INVALID_VALUE);
|
rlm@0
|
676 }
|
rlm@0
|
677 }
|
rlm@0
|
678 else
|
rlm@0
|
679 alcSetError(device, ALC_INVALID_VALUE);
|
rlm@0
|
680 }
|
rlm@0
|
681
|
rlm@0
|
682 static const BackendFuncs ca_funcs = {
|
rlm@0
|
683 ca_open_playback,
|
rlm@0
|
684 ca_close_playback,
|
rlm@0
|
685 ca_reset_playback,
|
rlm@0
|
686 ca_stop_playback,
|
rlm@0
|
687 ca_open_capture,
|
rlm@0
|
688 ca_close_capture,
|
rlm@0
|
689 ca_start_capture,
|
rlm@0
|
690 ca_stop_capture,
|
rlm@0
|
691 ca_capture_samples,
|
rlm@0
|
692 ca_available_samples
|
rlm@0
|
693 };
|
rlm@0
|
694
|
rlm@0
|
695 ALCboolean alc_ca_init(BackendFuncs *func_list)
|
rlm@0
|
696 {
|
rlm@0
|
697 *func_list = ca_funcs;
|
rlm@0
|
698 return ALC_TRUE;
|
rlm@0
|
699 }
|
rlm@0
|
700
|
rlm@0
|
701 void alc_ca_deinit(void)
|
rlm@0
|
702 {
|
rlm@0
|
703 }
|
rlm@0
|
704
|
rlm@0
|
705 void alc_ca_probe(enum DevProbe type)
|
rlm@0
|
706 {
|
rlm@0
|
707 switch(type)
|
rlm@0
|
708 {
|
rlm@0
|
709 case DEVICE_PROBE:
|
rlm@0
|
710 AppendDeviceList(ca_device);
|
rlm@0
|
711 break;
|
rlm@0
|
712 case ALL_DEVICE_PROBE:
|
rlm@0
|
713 AppendAllDeviceList(ca_device);
|
rlm@0
|
714 break;
|
rlm@0
|
715 case CAPTURE_DEVICE_PROBE:
|
rlm@0
|
716 AppendCaptureDeviceList(ca_device);
|
rlm@0
|
717 break;
|
rlm@0
|
718 }
|
rlm@0
|
719 }
|