Mercurial > audio-send
comparison org/ear.org @ 15:19ff95c69cf5
moved send.c to org file
author | Robert McIntyre <rlm@mit.edu> |
---|---|
date | Thu, 03 Nov 2011 12:08:39 -0700 |
parents | c41d773a85fb |
children | 1e201037f666 |
comparison
equal
deleted
inserted
replaced
14:63312ec4a2bf | 15:19ff95c69cf5 |
---|---|
1 #+title: The EARS! | 1 #+title: Simulated Sense of Hearing |
2 #+author: Robert McIntyre | 2 #+author: Robert McIntyre |
3 #+email: rlm@mit.edu | 3 #+email: rlm@mit.edu |
4 #+MATHJAX: align:"left" mathml:t path:"../aurellem/src/MathJax/MathJax.js" | 4 #+description: Simulating multiple listeners and the sense of hearing in jMonkeyEngine3 |
5 #+STYLE: <link rel="stylesheet" type="text/css" href="../aurellem/src/css/aurellem.css"/> | 5 #+keywords: simulated hearing, openal, clojure, jMonkeyEngine3, LWJGL, AI |
6 #+SETUPFILE: ../../aurellem/org/setup.org | |
7 #+INCLUDE: ../../aurellem/org/level-0.org | |
6 #+BABEL: :exports both :noweb yes :cache no :mkdirp yes | 8 #+BABEL: :exports both :noweb yes :cache no :mkdirp yes |
7 #+INCLUDE: ../aurellem/src/templates/level-0.org | 9 |
8 #+description: Simulating multiple listeners and the sense of hearing in uMonkeyEngine3 | 10 |
9 | 11 |
10 | 12 |
11 | 13 * Hearing |
12 | |
13 * Ears! | |
14 | 14 |
15 I want to be able to place ears in a similiar manner to how I place | 15 I want to be able to place ears in a similiar manner to how I place |
16 the eyes. I want to be able to place ears in a unique spatial | 16 the eyes. I want to be able to place ears in a unique spatial |
17 position, and recieve as output at every tick the FFT of whatever | 17 position, and recieve as output at every tick the FFT of whatever |
18 signals are happening at that point. | 18 signals are happening at that point. |
19 | 19 |
20 #+srcname: ear-header | 20 Hearing is one of the more difficult senses to simulate, because there |
21 is less support for obtaining the actual sound data that is processed | |
22 by jMonkeyEngine3. | |
23 | |
24 jMonkeyEngine's sound system works as follows: | |
25 | |
26 - jMonkeyEngine uese the =AppSettings= for the particular application | |
27 to determine what sort of =AudioRenderer= should be used. | |
28 - although some support is provided for multiple AudioRendering | |
29 backends, jMonkeyEngine at the time of this writing will either | |
30 pick no AudioRender at all, or the =LwjglAudioRenderer= | |
31 - jMonkeyEngine tries to figure out what sort of system you're | |
32 running and extracts the appropiate native libraries. | |
33 - the =LwjglAudioRenderer= uses the [[http://lwjgl.org/][=LWJGL=]] (lightweight java game | |
34 library) bindings to interface with a C library called [[http://kcat.strangesoft.net/openal.html][=OpenAL=]] | |
35 - =OpenAL= calculates the 3D sound localization and feeds a stream of | |
36 sound to any of various sound output devices with which it knows | |
37 how to communicate. | |
38 | |
39 A consequence of this is that there's no way to access the actual | |
40 sound data produced by =OpenAL=. Even worse, =OpanAL= only supports | |
41 one /listener/, which normally isn't a problem for games, but becomes | |
42 a problem when trying to make multiple AI creatures that can each hear | |
43 the world from a different perspective. | |
44 | |
45 To make many AI creatures in jMonkeyEngine that can each hear the | |
46 world from their own perspective, it is necessary to go all the way | |
47 back to =OpenAL= and implement support for simulated hearing there. | |
48 | |
49 ** =OpenAL= Devices | |
50 | |
51 =OpenAL= goes to great lengths to support many different systems, all | |
52 with different sound capabilities and interfaces. It acomplishes this | |
53 difficult task by providing code for many different sound backends in | |
54 pseudo-objects called /Devices/. There's a device for the Linux Open | |
55 Sound System and the Advanced Linxu Sound Architechture, there's one | |
56 for Direct Sound on Windows, there's even one for Solaris. =OpenAL= | |
57 solves the problem of platform independence by providing all these | |
58 Devices. | |
59 | |
60 Wrapper libraries such as LWJGL are free to examine the system on | |
61 which they are running and then select an appropiate device for that | |
62 system. | |
63 | |
64 There are also a few "special" devices that don't interface with any | |
65 particular system. These include the Null Device, which doesn't do | |
66 anything, and the Wave Device, which writes whatever sound it recieves | |
67 to a file, if everything has been set up correctly when configuring | |
68 =OpenAL=. | |
69 | |
70 Actual mixing of the sound data happens in the Devices, and they are | |
71 the only point in the sound rendering process where this data is | |
72 available. | |
73 | |
74 Therefore, in order to support multiple listeners, and get the sound | |
75 data in a form that the AIs can use, it is necessary to create a new | |
76 Device, which supports this features. | |
77 | |
78 | |
79 ** The Send Device | |
80 Adding a device to OpenAL is rather tricky -- there are five separate | |
81 files in the =OpenAL= source tree that must be modified to do so. I've | |
82 documented this process [[./add-new-device.org][here]] for anyone who is interested. | |
83 | |
84 #+srcname: send | |
85 #+begin_src C | |
86 #include "config.h" | |
87 #include <stdlib.h> | |
88 #include "alMain.h" | |
89 #include "AL/al.h" | |
90 #include "AL/alc.h" | |
91 #include "alSource.h" | |
92 #include <jni.h> | |
93 | |
94 //////////////////// Summary | |
95 | |
96 struct send_data; | |
97 struct context_data; | |
98 | |
99 static void addContext(ALCdevice *, ALCcontext *); | |
100 static void syncContexts(ALCcontext *master, ALCcontext *slave); | |
101 static void syncSources(ALsource *master, ALsource *slave, | |
102 ALCcontext *masterCtx, ALCcontext *slaveCtx); | |
103 | |
104 static void syncSourcei(ALuint master, ALuint slave, | |
105 ALCcontext *masterCtx, ALCcontext *ctx2, ALenum param); | |
106 static void syncSourcef(ALuint master, ALuint slave, | |
107 ALCcontext *masterCtx, ALCcontext *ctx2, ALenum param); | |
108 static void syncSource3f(ALuint master, ALuint slave, | |
109 ALCcontext *masterCtx, ALCcontext *ctx2, ALenum param); | |
110 | |
111 static void swapInContext(ALCdevice *, struct context_data *); | |
112 static void saveContext(ALCdevice *, struct context_data *); | |
113 static void limitContext(ALCdevice *, ALCcontext *); | |
114 static void unLimitContext(ALCdevice *); | |
115 | |
116 static void init(ALCdevice *); | |
117 static void renderData(ALCdevice *, int samples); | |
118 | |
119 #define UNUSED(x) (void)(x) | |
120 | |
121 //////////////////// State | |
122 | |
123 typedef struct context_data { | |
124 ALfloat ClickRemoval[MAXCHANNELS]; | |
125 ALfloat PendingClicks[MAXCHANNELS]; | |
126 ALvoid *renderBuffer; | |
127 ALCcontext *ctx; | |
128 } context_data; | |
129 | |
130 typedef struct send_data { | |
131 ALuint size; | |
132 context_data **contexts; | |
133 ALuint numContexts; | |
134 ALuint maxContexts; | |
135 } send_data; | |
136 | |
137 | |
138 | |
139 //////////////////// Context Creation / Synchronization | |
140 | |
141 #define _MAKE_SYNC(NAME, INIT_EXPR, GET_EXPR, SET_EXPR) \ | |
142 void NAME (ALuint sourceID1, ALuint sourceID2, \ | |
143 ALCcontext *ctx1, ALCcontext *ctx2, \ | |
144 ALenum param){ \ | |
145 INIT_EXPR; \ | |
146 ALCcontext *current = alcGetCurrentContext(); \ | |
147 alcMakeContextCurrent(ctx1); \ | |
148 GET_EXPR; \ | |
149 alcMakeContextCurrent(ctx2); \ | |
150 SET_EXPR; \ | |
151 alcMakeContextCurrent(current); \ | |
152 } | |
153 | |
154 #define MAKE_SYNC(NAME, TYPE, GET, SET) \ | |
155 _MAKE_SYNC(NAME, \ | |
156 TYPE value, \ | |
157 GET(sourceID1, param, &value), \ | |
158 SET(sourceID2, param, value)) | |
159 | |
160 #define MAKE_SYNC3(NAME, TYPE, GET, SET) \ | |
161 _MAKE_SYNC(NAME, \ | |
162 TYPE value1; TYPE value2; TYPE value3;, \ | |
163 GET(sourceID1, param, &value1, &value2, &value3), \ | |
164 SET(sourceID2, param, value1, value2, value3)) | |
165 | |
166 MAKE_SYNC( syncSourcei, ALint, alGetSourcei, alSourcei); | |
167 MAKE_SYNC( syncSourcef, ALfloat, alGetSourcef, alSourcef); | |
168 MAKE_SYNC3(syncSource3i, ALint, alGetSource3i, alSource3i); | |
169 MAKE_SYNC3(syncSource3f, ALfloat, alGetSource3f, alSource3f); | |
170 | |
171 void syncSources(ALsource *masterSource, ALsource *slaveSource, | |
172 ALCcontext *masterCtx, ALCcontext *slaveCtx){ | |
173 ALuint master = masterSource->source; | |
174 ALuint slave = slaveSource->source; | |
175 ALCcontext *current = alcGetCurrentContext(); | |
176 | |
177 syncSourcef(master,slave,masterCtx,slaveCtx,AL_PITCH); | |
178 syncSourcef(master,slave,masterCtx,slaveCtx,AL_GAIN); | |
179 syncSourcef(master,slave,masterCtx,slaveCtx,AL_MAX_DISTANCE); | |
180 syncSourcef(master,slave,masterCtx,slaveCtx,AL_ROLLOFF_FACTOR); | |
181 syncSourcef(master,slave,masterCtx,slaveCtx,AL_REFERENCE_DISTANCE); | |
182 syncSourcef(master,slave,masterCtx,slaveCtx,AL_MIN_GAIN); | |
183 syncSourcef(master,slave,masterCtx,slaveCtx,AL_MAX_GAIN); | |
184 syncSourcef(master,slave,masterCtx,slaveCtx,AL_CONE_OUTER_GAIN); | |
185 syncSourcef(master,slave,masterCtx,slaveCtx,AL_CONE_INNER_ANGLE); | |
186 syncSourcef(master,slave,masterCtx,slaveCtx,AL_CONE_OUTER_ANGLE); | |
187 syncSourcef(master,slave,masterCtx,slaveCtx,AL_SEC_OFFSET); | |
188 syncSourcef(master,slave,masterCtx,slaveCtx,AL_SAMPLE_OFFSET); | |
189 syncSourcef(master,slave,masterCtx,slaveCtx,AL_BYTE_OFFSET); | |
190 | |
191 syncSource3f(master,slave,masterCtx,slaveCtx,AL_POSITION); | |
192 syncSource3f(master,slave,masterCtx,slaveCtx,AL_VELOCITY); | |
193 syncSource3f(master,slave,masterCtx,slaveCtx,AL_DIRECTION); | |
194 | |
195 syncSourcei(master,slave,masterCtx,slaveCtx,AL_SOURCE_RELATIVE); | |
196 syncSourcei(master,slave,masterCtx,slaveCtx,AL_LOOPING); | |
197 | |
198 alcMakeContextCurrent(masterCtx); | |
199 ALint source_type; | |
200 alGetSourcei(master, AL_SOURCE_TYPE, &source_type); | |
201 | |
202 // Only static sources are currently synchronized! | |
203 if (AL_STATIC == source_type){ | |
204 ALint master_buffer; | |
205 ALint slave_buffer; | |
206 alGetSourcei(master, AL_BUFFER, &master_buffer); | |
207 alcMakeContextCurrent(slaveCtx); | |
208 alGetSourcei(slave, AL_BUFFER, &slave_buffer); | |
209 if (master_buffer != slave_buffer){ | |
210 alSourcei(slave, AL_BUFFER, master_buffer); | |
211 } | |
212 } | |
213 | |
214 // Synchronize the state of the two sources. | |
215 alcMakeContextCurrent(masterCtx); | |
216 ALint masterState; | |
217 ALint slaveState; | |
218 | |
219 alGetSourcei(master, AL_SOURCE_STATE, &masterState); | |
220 alcMakeContextCurrent(slaveCtx); | |
221 alGetSourcei(slave, AL_SOURCE_STATE, &slaveState); | |
222 | |
223 if (masterState != slaveState){ | |
224 switch (masterState){ | |
225 case AL_INITIAL : alSourceRewind(slave); break; | |
226 case AL_PLAYING : alSourcePlay(slave); break; | |
227 case AL_PAUSED : alSourcePause(slave); break; | |
228 case AL_STOPPED : alSourceStop(slave); break; | |
229 } | |
230 } | |
231 // Restore whatever context was previously active. | |
232 alcMakeContextCurrent(current); | |
233 } | |
234 | |
235 | |
236 void syncContexts(ALCcontext *master, ALCcontext *slave){ | |
237 /* If there aren't sufficient sources in slave to mirror | |
238 the sources in master, create them. */ | |
239 ALCcontext *current = alcGetCurrentContext(); | |
240 | |
241 UIntMap *masterSourceMap = &(master->SourceMap); | |
242 UIntMap *slaveSourceMap = &(slave->SourceMap); | |
243 ALuint numMasterSources = masterSourceMap->size; | |
244 ALuint numSlaveSources = slaveSourceMap->size; | |
245 | |
246 alcMakeContextCurrent(slave); | |
247 if (numSlaveSources < numMasterSources){ | |
248 ALuint numMissingSources = numMasterSources - numSlaveSources; | |
249 ALuint newSources[numMissingSources]; | |
250 alGenSources(numMissingSources, newSources); | |
251 } | |
252 | |
253 /* Now, slave is gauranteed to have at least as many sources | |
254 as master. Sync each source from master to the corresponding | |
255 source in slave. */ | |
256 int i; | |
257 for(i = 0; i < masterSourceMap->size; i++){ | |
258 syncSources((ALsource*)masterSourceMap->array[i].value, | |
259 (ALsource*)slaveSourceMap->array[i].value, | |
260 master, slave); | |
261 } | |
262 alcMakeContextCurrent(current); | |
263 } | |
264 | |
265 static void addContext(ALCdevice *Device, ALCcontext *context){ | |
266 send_data *data = (send_data*)Device->ExtraData; | |
267 // expand array if necessary | |
268 if (data->numContexts >= data->maxContexts){ | |
269 ALuint newMaxContexts = data->maxContexts*2 + 1; | |
270 data->contexts = realloc(data->contexts, newMaxContexts*sizeof(context_data)); | |
271 data->maxContexts = newMaxContexts; | |
272 } | |
273 // create context_data and add it to the main array | |
274 context_data *ctxData; | |
275 ctxData = (context_data*)calloc(1, sizeof(*ctxData)); | |
276 ctxData->renderBuffer = | |
277 malloc(BytesFromDevFmt(Device->FmtType) * | |
278 Device->NumChan * Device->UpdateSize); | |
279 ctxData->ctx = context; | |
280 | |
281 data->contexts[data->numContexts] = ctxData; | |
282 data->numContexts++; | |
283 } | |
284 | |
285 | |
286 //////////////////// Context Switching | |
287 | |
288 /* A device brings along with it two pieces of state | |
289 * which have to be swapped in and out with each context. | |
290 */ | |
291 static void swapInContext(ALCdevice *Device, context_data *ctxData){ | |
292 memcpy(Device->ClickRemoval, ctxData->ClickRemoval, sizeof(ALfloat)*MAXCHANNELS); | |
293 memcpy(Device->PendingClicks, ctxData->PendingClicks, sizeof(ALfloat)*MAXCHANNELS); | |
294 } | |
295 | |
296 static void saveContext(ALCdevice *Device, context_data *ctxData){ | |
297 memcpy(ctxData->ClickRemoval, Device->ClickRemoval, sizeof(ALfloat)*MAXCHANNELS); | |
298 memcpy(ctxData->PendingClicks, Device->PendingClicks, sizeof(ALfloat)*MAXCHANNELS); | |
299 } | |
300 | |
301 static ALCcontext **currentContext; | |
302 static ALuint currentNumContext; | |
303 | |
304 /* By default, all contexts are rendered at once for each call to aluMixData. | |
305 * This function uses the internals of the ALCdecice struct to temporarly | |
306 * cause aluMixData to only render the chosen context. | |
307 */ | |
308 static void limitContext(ALCdevice *Device, ALCcontext *ctx){ | |
309 currentContext = Device->Contexts; | |
310 currentNumContext = Device->NumContexts; | |
311 Device->Contexts = &ctx; | |
312 Device->NumContexts = 1; | |
313 } | |
314 | |
315 static void unLimitContext(ALCdevice *Device){ | |
316 Device->Contexts = currentContext; | |
317 Device->NumContexts = currentNumContext; | |
318 } | |
319 | |
320 | |
321 //////////////////// Main Device Loop | |
322 | |
323 /* Establish the LWJGL context as the main context, which will | |
324 * be synchronized to all the slave contexts | |
325 */ | |
326 static void init(ALCdevice *Device){ | |
327 ALCcontext *masterContext = alcGetCurrentContext(); | |
328 addContext(Device, masterContext); | |
329 } | |
330 | |
331 | |
332 static void renderData(ALCdevice *Device, int samples){ | |
333 if(!Device->Connected){return;} | |
334 send_data *data = (send_data*)Device->ExtraData; | |
335 ALCcontext *current = alcGetCurrentContext(); | |
336 | |
337 ALuint i; | |
338 for (i = 1; i < data->numContexts; i++){ | |
339 syncContexts(data->contexts[0]->ctx , data->contexts[i]->ctx); | |
340 } | |
341 | |
342 if ((uint) samples > Device->UpdateSize){ | |
343 printf("exceeding internal buffer size; dropping samples\n"); | |
344 printf("requested %d; available %d\n", samples, Device->UpdateSize); | |
345 samples = (int) Device->UpdateSize; | |
346 } | |
347 | |
348 for (i = 0; i < data->numContexts; i++){ | |
349 context_data *ctxData = data->contexts[i]; | |
350 ALCcontext *ctx = ctxData->ctx; | |
351 alcMakeContextCurrent(ctx); | |
352 limitContext(Device, ctx); | |
353 swapInContext(Device, ctxData); | |
354 aluMixData(Device, ctxData->renderBuffer, samples); | |
355 saveContext(Device, ctxData); | |
356 unLimitContext(Device); | |
357 } | |
358 alcMakeContextCurrent(current); | |
359 } | |
360 | |
361 | |
362 //////////////////// JNI Methods | |
363 | |
364 #include "com_aurellem_send_AudioSend.h" | |
365 | |
366 /* | |
367 * Class: com_aurellem_send_AudioSend | |
368 * Method: nstep | |
369 * Signature: (JI)V | |
370 */ | |
371 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_nstep | |
372 (JNIEnv *env, jclass clazz, jlong device, jint samples){ | |
373 UNUSED(env);UNUSED(clazz);UNUSED(device); | |
374 renderData((ALCdevice*)((intptr_t)device), samples); | |
375 } | |
376 | |
377 /* | |
378 * Class: com_aurellem_send_AudioSend | |
379 * Method: ngetSamples | |
380 * Signature: (JLjava/nio/ByteBuffer;III)V | |
381 */ | |
382 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_ngetSamples | |
383 (JNIEnv *env, jclass clazz, jlong device, jobject buffer, jint position, | |
384 jint samples, jint n){ | |
385 UNUSED(clazz); | |
386 | |
387 ALvoid *buffer_address = | |
388 ((ALbyte *)(((char*)(*env)->GetDirectBufferAddress(env, buffer)) + position)); | |
389 ALCdevice *recorder = (ALCdevice*) ((intptr_t)device); | |
390 send_data *data = (send_data*)recorder->ExtraData; | |
391 if ((ALuint)n > data->numContexts){return;} | |
392 | |
393 //printf("Want %d samples for listener %d\n", samples, n); | |
394 //printf("Device's format type is %d bytes per sample,\n", | |
395 // BytesFromDevFmt(recorder->FmtType)); | |
396 //printf("and it has %d channels, making for %d requested bytes\n", | |
397 // recorder->NumChan, | |
398 // BytesFromDevFmt(recorder->FmtType) * recorder->NumChan * samples); | |
399 | |
400 memcpy(buffer_address, data->contexts[n]->renderBuffer, | |
401 BytesFromDevFmt(recorder->FmtType) * recorder->NumChan * samples); | |
402 //samples*sizeof(ALfloat)); | |
403 } | |
404 | |
405 /* | |
406 * Class: com_aurellem_send_AudioSend | |
407 * Method: naddListener | |
408 * Signature: (J)V | |
409 */ | |
410 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_naddListener | |
411 (JNIEnv *env, jclass clazz, jlong device){ | |
412 UNUSED(env); UNUSED(clazz); | |
413 //printf("creating new context via naddListener\n"); | |
414 ALCdevice *Device = (ALCdevice*) ((intptr_t)device); | |
415 ALCcontext *new = alcCreateContext(Device, NULL); | |
416 addContext(Device, new); | |
417 } | |
418 | |
419 /* | |
420 * Class: com_aurellem_send_AudioSend | |
421 * Method: nsetNthListener3f | |
422 * Signature: (IFFFJI)V | |
423 */ | |
424 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_nsetNthListener3f | |
425 (JNIEnv *env, jclass clazz, jint param, | |
426 jfloat v1, jfloat v2, jfloat v3, jlong device, jint contextNum){ | |
427 UNUSED(env);UNUSED(clazz); | |
428 | |
429 ALCdevice *Device = (ALCdevice*) ((intptr_t)device); | |
430 send_data *data = (send_data*)Device->ExtraData; | |
431 | |
432 ALCcontext *current = alcGetCurrentContext(); | |
433 if ((ALuint)contextNum > data->numContexts){return;} | |
434 alcMakeContextCurrent(data->contexts[contextNum]->ctx); | |
435 alListener3f(param, v1, v2, v3); | |
436 alcMakeContextCurrent(current); | |
437 } | |
438 | |
439 /* | |
440 * Class: com_aurellem_send_AudioSend | |
441 * Method: nsetNthListenerf | |
442 * Signature: (IFJI)V | |
443 */ | |
444 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_nsetNthListenerf | |
445 (JNIEnv *env, jclass clazz, jint param, jfloat v1, jlong device, | |
446 jint contextNum){ | |
447 | |
448 UNUSED(env);UNUSED(clazz); | |
449 | |
450 ALCdevice *Device = (ALCdevice*) ((intptr_t)device); | |
451 send_data *data = (send_data*)Device->ExtraData; | |
452 | |
453 ALCcontext *current = alcGetCurrentContext(); | |
454 if ((ALuint)contextNum > data->numContexts){return;} | |
455 alcMakeContextCurrent(data->contexts[contextNum]->ctx); | |
456 alListenerf(param, v1); | |
457 alcMakeContextCurrent(current); | |
458 } | |
459 | |
460 /* | |
461 * Class: com_aurellem_send_AudioSend | |
462 * Method: ninitDevice | |
463 * Signature: (J)V | |
464 */ | |
465 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_ninitDevice | |
466 (JNIEnv *env, jclass clazz, jlong device){ | |
467 UNUSED(env);UNUSED(clazz); | |
468 | |
469 ALCdevice *Device = (ALCdevice*) ((intptr_t)device); | |
470 init(Device); | |
471 | |
472 } | |
473 | |
474 | |
475 /* | |
476 * Class: com_aurellem_send_AudioSend | |
477 * Method: ngetAudioFormat | |
478 * Signature: (J)Ljavax/sound/sampled/AudioFormat; | |
479 */ | |
480 JNIEXPORT jobject JNICALL Java_com_aurellem_send_AudioSend_ngetAudioFormat | |
481 (JNIEnv *env, jclass clazz, jlong device){ | |
482 UNUSED(clazz); | |
483 jclass AudioFormatClass = | |
484 (*env)->FindClass(env, "javax/sound/sampled/AudioFormat"); | |
485 jmethodID AudioFormatConstructor = | |
486 (*env)->GetMethodID(env, AudioFormatClass, "<init>", "(FIIZZ)V"); | |
487 | |
488 ALCdevice *Device = (ALCdevice*) ((intptr_t)device); | |
489 | |
490 //float frequency | |
491 | |
492 int isSigned; | |
493 switch (Device->FmtType) | |
494 { | |
495 case DevFmtUByte: | |
496 case DevFmtUShort: isSigned = 0; break; | |
497 default : isSigned = 1; | |
498 } | |
499 float frequency = Device->Frequency; | |
500 int bitsPerFrame = (8 * BytesFromDevFmt(Device->FmtType)); | |
501 int channels = Device->NumChan; | |
502 | |
503 | |
504 //printf("freq = %f, bpf = %d, channels = %d, signed? = %d\n", | |
505 // frequency, bitsPerFrame, channels, isSigned); | |
506 | |
507 jobject format = (*env)-> | |
508 NewObject( | |
509 env,AudioFormatClass,AudioFormatConstructor, | |
510 frequency, | |
511 bitsPerFrame, | |
512 channels, | |
513 isSigned, | |
514 0); | |
515 return format; | |
516 } | |
517 | |
518 | |
519 | |
520 //////////////////// Device Initilization / Management | |
521 | |
522 static const ALCchar sendDevice[] = "Multiple Audio Send"; | |
523 | |
524 static ALCboolean send_open_playback(ALCdevice *device, | |
525 const ALCchar *deviceName) | |
526 { | |
527 send_data *data; | |
528 // stop any buffering for stdout, so that I can | |
529 // see the printf statements in my terminal immediatley | |
530 setbuf(stdout, NULL); | |
531 | |
532 if(!deviceName) | |
533 deviceName = sendDevice; | |
534 else if(strcmp(deviceName, sendDevice) != 0) | |
535 return ALC_FALSE; | |
536 data = (send_data*)calloc(1, sizeof(*data)); | |
537 device->szDeviceName = strdup(deviceName); | |
538 device->ExtraData = data; | |
539 return ALC_TRUE; | |
540 } | |
541 | |
542 static void send_close_playback(ALCdevice *device) | |
543 { | |
544 send_data *data = (send_data*)device->ExtraData; | |
545 alcMakeContextCurrent(NULL); | |
546 ALuint i; | |
547 // Destroy all slave contexts. LWJGL will take care of | |
548 // its own context. | |
549 for (i = 1; i < data->numContexts; i++){ | |
550 context_data *ctxData = data->contexts[i]; | |
551 alcDestroyContext(ctxData->ctx); | |
552 free(ctxData->renderBuffer); | |
553 free(ctxData); | |
554 } | |
555 free(data); | |
556 device->ExtraData = NULL; | |
557 } | |
558 | |
559 static ALCboolean send_reset_playback(ALCdevice *device) | |
560 { | |
561 SetDefaultWFXChannelOrder(device); | |
562 return ALC_TRUE; | |
563 } | |
564 | |
565 static void send_stop_playback(ALCdevice *Device){ | |
566 UNUSED(Device); | |
567 } | |
568 | |
569 static const BackendFuncs send_funcs = { | |
570 send_open_playback, | |
571 send_close_playback, | |
572 send_reset_playback, | |
573 send_stop_playback, | |
574 NULL, | |
575 NULL, /* These would be filled with functions to */ | |
576 NULL, /* handle capturing audio if we we into that */ | |
577 NULL, /* sort of thing... */ | |
578 NULL, | |
579 NULL | |
580 }; | |
581 | |
582 ALCboolean alc_send_init(BackendFuncs *func_list){ | |
583 *func_list = send_funcs; | |
584 return ALC_TRUE; | |
585 } | |
586 | |
587 void alc_send_deinit(void){} | |
588 | |
589 void alc_send_probe(enum DevProbe type) | |
590 { | |
591 switch(type) | |
592 { | |
593 case DEVICE_PROBE: | |
594 AppendDeviceList(sendDevice); | |
595 break; | |
596 case ALL_DEVICE_PROBE: | |
597 AppendAllDeviceList(sendDevice); | |
598 break; | |
599 case CAPTURE_DEVICE_PROBE: | |
600 break; | |
601 } | |
602 } | |
603 #+end_src | |
604 | |
605 | |
606 | |
607 | |
608 | |
609 | |
610 | |
611 | |
612 #+srcname: ears | |
21 #+begin_src clojure | 613 #+begin_src clojure |
22 (ns body.ear) | 614 (ns cortex.hearing) |
23 (use 'cortex.world) | 615 (use 'cortex.world) |
24 (use 'cortex.import) | 616 (use 'cortex.import) |
25 (use 'clojure.contrib.def) | 617 (use 'clojure.contrib.def) |
26 (cortex.import/mega-import-jme3) | 618 (cortex.import/mega-import-jme3) |
27 (rlm.rlm-commands/help) | 619 (rlm.rlm-commands/help) |
39 (import javax.swing.JPanel) | 631 (import javax.swing.JPanel) |
40 (import javax.swing.SwingUtilities) | 632 (import javax.swing.SwingUtilities) |
41 (import javax.swing.ImageIcon) | 633 (import javax.swing.ImageIcon) |
42 (import javax.swing.JOptionPane) | 634 (import javax.swing.JOptionPane) |
43 (import java.awt.image.ImageObserver) | 635 (import java.awt.image.ImageObserver) |
44 #+end_src | 636 |
45 | |
46 JMonkeyEngine3's audio system works as follows: | |
47 first, an appropiate audio renderer is created during initialization | |
48 and depending on the context. On my computer, this is the | |
49 LwjglAudioRenderer. | |
50 | |
51 The LwjglAudioRenderer sets a few internal state variables depending | |
52 on what capabilities the audio system has. | |
53 | |
54 may very well need to make my own AudioRenderer | |
55 | |
56 #+srcname: ear-body-1 | |
57 #+begin_src clojure :results silent | |
58 (in-ns 'body.ear) | |
59 (import 'com.jme3.capture.SoundProcessor) | 637 (import 'com.jme3.capture.SoundProcessor) |
60 | 638 |
61 | 639 |
62 (defn sound-processor | 640 (defn sound-processor |
63 "deals with converting ByteBuffers into Arrays of bytes so that the | 641 "deals with converting ByteBuffers into Arrays of bytes so that the |
70 (no-exceptions | 648 (no-exceptions |
71 (let [byte-array (byte-array numSamples)] | 649 (let [byte-array (byte-array numSamples)] |
72 (.get audioSamples byte-array 0 numSamples) | 650 (.get audioSamples byte-array 0 numSamples) |
73 (continuation | 651 (continuation |
74 (vec byte-array))))))) | 652 (vec byte-array))))))) |
75 | |
76 | 653 |
77 (defn add-ear | 654 (defn add-ear |
78 "add an ear to the world. The continuation function will be called | 655 "add an ear to the world. The continuation function will be called |
79 on the FFT or the sounds which the ear hears in the given | 656 on the FFT or the sounds which the ear hears in the given |
80 timeframe. Sound is 3D." | 657 timeframe. Sound is 3D." |
120 | 697 |
121 #+end_src | 698 #+end_src |
122 | 699 |
123 | 700 |
124 | 701 |
702 * Example | |
703 | |
125 * COMMENT Code Generation | 704 * COMMENT Code Generation |
126 | 705 |
127 #+begin_src clojure :tangle /home/r/cortex/src/body/ear.clj | 706 #+begin_src clojure :tangle ../../cortex/src/cortex/hearing.clj |
128 <<ear-header>> | 707 <<ears>> |
129 <<ear-body-1>> | |
130 #+end_src | 708 #+end_src |
131 | 709 |
132 | 710 #+begin_src clojure :tangle ../../cortex/src/test/hearing.clj |
133 #+begin_src clojure :tangle /home/r/cortex/src/test/hearing.clj | |
134 <<test-hearing>> | 711 <<test-hearing>> |
135 #+end_src | 712 #+end_src |
136 | 713 |
137 | 714 |
715 #+begin_src C :tangle ../Alc/backends/send.c | |
716 <<send>> | |
717 #+end_src |