Mercurial > cortex
comparison org/hearing.org @ 166:e4c2cc79a171
renamed ear.org to hearing.org
author | Robert McIntyre <rlm@mit.edu> |
---|---|
date | Sat, 04 Feb 2012 03:38:05 -0700 |
parents | org/ear.org@362bc30a3d41 |
children | 9e6a30b8c99a |
comparison
equal
deleted
inserted
replaced
165:362bc30a3d41 | 166:e4c2cc79a171 |
---|---|
1 #+title: Simulated Sense of Hearing | |
2 #+author: Robert McIntyre | |
3 #+email: rlm@mit.edu | |
4 #+description: Simulating multiple listeners and the sense of hearing in jMonkeyEngine3 | |
5 #+keywords: simulated hearing, openal, clojure, jMonkeyEngine3, LWJGL, AI | |
6 #+SETUPFILE: ../../aurellem/org/setup.org | |
7 #+INCLUDE: ../../aurellem/org/level-0.org | |
8 #+BABEL: :exports both :noweb yes :cache no :mkdirp yes | |
9 | |
10 * Hearing | |
11 | |
12 I want to be able to place ears in a similar manner to how I place | |
13 the eyes. I want to be able to place ears in a unique spatial | |
14 position, and receive as output at every tick the F.F.T. of whatever | |
15 signals are happening at that point. | |
16 | |
17 Hearing is one of the more difficult senses to simulate, because there | |
18 is less support for obtaining the actual sound data that is processed | |
19 by jMonkeyEngine3. | |
20 | |
21 jMonkeyEngine's sound system works as follows: | |
22 | |
23 - jMonkeyEngine uses the =AppSettings= for the particular application | |
24 to determine what sort of =AudioRenderer= should be used. | |
25 - although some support is provided for multiple AudioRendering | |
26 backends, jMonkeyEngine at the time of this writing will either | |
27 pick no AudioRenderer at all, or the =LwjglAudioRenderer= | |
28 - jMonkeyEngine tries to figure out what sort of system you're | |
29 running and extracts the appropriate native libraries. | |
30 - the =LwjglAudioRenderer= uses the [[http://lwjgl.org/][=LWJGL=]] (LightWeight Java Game | |
31 Library) bindings to interface with a C library called [[http://kcat.strangesoft.net/openal.html][=OpenAL=]] | |
32 - =OpenAL= calculates the 3D sound localization and feeds a stream of | |
33 sound to any of various sound output devices with which it knows | |
34 how to communicate. | |
35 | |
36 A consequence of this is that there's no way to access the actual | |
37 sound data produced by =OpenAL=. Even worse, =OpenAL= only supports | |
38 one /listener/, which normally isn't a problem for games, but becomes | |
39 a problem when trying to make multiple AI creatures that can each hear | |
40 the world from a different perspective. | |
41 | |
42 To make many AI creatures in jMonkeyEngine that can each hear the | |
43 world from their own perspective, it is necessary to go all the way | |
44 back to =OpenAL= and implement support for simulated hearing there. | |
45 | |
46 * Extending =OpenAL= | |
47 ** =OpenAL= Devices | |
48 | |
49 =OpenAL= goes to great lengths to support many different systems, all | |
50 with different sound capabilities and interfaces. It accomplishes this | |
51 difficult task by providing code for many different sound backends in | |
52 pseudo-objects called /Devices/. There's a device for the Linux Open | |
53 Sound System and the Advanced Linux Sound Architecture, there's one | |
54 for Direct Sound on Windows, there's even one for Solaris. =OpenAL= | |
55 solves the problem of platform independence by providing all these | |
56 Devices. | |
57 | |
58 Wrapper libraries such as LWJGL are free to examine the system on | |
59 which they are running and then select an appropriate device for that | |
60 system. | |
61 | |
62 There are also a few "special" devices that don't interface with any | |
63 particular system. These include the Null Device, which doesn't do | |
64 anything, and the Wave Device, which writes whatever sound it receives | |
65 to a file, if everything has been set up correctly when configuring | |
66 =OpenAL=. | |
67 | |
68 Actual mixing of the sound data happens in the Devices, and they are | |
69 the only point in the sound rendering process where this data is | |
70 available. | |
71 | |
72 Therefore, in order to support multiple listeners, and get the sound | |
73 data in a form that the AIs can use, it is necessary to create a new | |
74 Device, which supports this features. | |
75 | |
76 ** The Send Device | |
77 Adding a device to OpenAL is rather tricky -- there are five separate | |
78 files in the =OpenAL= source tree that must be modified to do so. I've | |
79 documented this process [[./add-new-device.org][here]] for anyone who is interested. | |
80 | |
81 | |
82 Onward to that actual Device! | |
83 | |
84 again, my objectives are: | |
85 | |
86 - Support Multiple Listeners from jMonkeyEngine3 | |
87 - Get access to the rendered sound data for further processing from | |
88 clojure. | |
89 | |
90 ** =send.c= | |
91 | |
92 ** Header | |
93 #+name: send-header | |
94 #+begin_src C | |
95 #include "config.h" | |
96 #include <stdlib.h> | |
97 #include "alMain.h" | |
98 #include "AL/al.h" | |
99 #include "AL/alc.h" | |
100 #include "alSource.h" | |
101 #include <jni.h> | |
102 | |
103 //////////////////// Summary | |
104 | |
105 struct send_data; | |
106 struct context_data; | |
107 | |
108 static void addContext(ALCdevice *, ALCcontext *); | |
109 static void syncContexts(ALCcontext *master, ALCcontext *slave); | |
110 static void syncSources(ALsource *master, ALsource *slave, | |
111 ALCcontext *masterCtx, ALCcontext *slaveCtx); | |
112 | |
113 static void syncSourcei(ALuint master, ALuint slave, | |
114 ALCcontext *masterCtx, ALCcontext *ctx2, ALenum param); | |
115 static void syncSourcef(ALuint master, ALuint slave, | |
116 ALCcontext *masterCtx, ALCcontext *ctx2, ALenum param); | |
117 static void syncSource3f(ALuint master, ALuint slave, | |
118 ALCcontext *masterCtx, ALCcontext *ctx2, ALenum param); | |
119 | |
120 static void swapInContext(ALCdevice *, struct context_data *); | |
121 static void saveContext(ALCdevice *, struct context_data *); | |
122 static void limitContext(ALCdevice *, ALCcontext *); | |
123 static void unLimitContext(ALCdevice *); | |
124 | |
125 static void init(ALCdevice *); | |
126 static void renderData(ALCdevice *, int samples); | |
127 | |
128 #define UNUSED(x) (void)(x) | |
129 #+end_src | |
130 | |
131 The main idea behind the Send device is to take advantage of the fact | |
132 that LWJGL only manages one /context/ when using OpenAL. A /context/ | |
133 is like a container that holds samples and keeps track of where the | |
134 listener is. In order to support multiple listeners, the Send device | |
135 identifies the LWJGL context as the master context, and creates any | |
136 number of slave contexts to represent additional listeners. Every | |
137 time the device renders sound, it synchronizes every source from the | |
138 master LWJGL context to the slave contexts. Then, it renders each | |
139 context separately, using a different listener for each one. The | |
140 rendered sound is made available via JNI to jMonkeyEngine. | |
141 | |
142 To recap, the process is: | |
143 - Set the LWJGL context as "master" in the =init()= method. | |
144 - Create any number of additional contexts via =addContext()= | |
145 - At every call to =renderData()= sync the master context with the | |
146 slave contexts with =syncContexts()= | |
147 - =syncContexts()= calls =syncSources()= to sync all the sources | |
148 which are in the master context. | |
149 - =limitContext()= and =unLimitContext()= make it possible to render | |
150 only one context at a time. | |
151 | |
152 ** Necessary State | |
153 #+name: send-state | |
154 #+begin_src C | |
155 //////////////////// State | |
156 | |
157 typedef struct context_data { | |
158 ALfloat ClickRemoval[MAXCHANNELS]; | |
159 ALfloat PendingClicks[MAXCHANNELS]; | |
160 ALvoid *renderBuffer; | |
161 ALCcontext *ctx; | |
162 } context_data; | |
163 | |
164 typedef struct send_data { | |
165 ALuint size; | |
166 context_data **contexts; | |
167 ALuint numContexts; | |
168 ALuint maxContexts; | |
169 } send_data; | |
170 #+end_src | |
171 | |
172 Switching between contexts is not the normal operation of a Device, | |
173 and one of the problems with doing so is that a Device normally keeps | |
174 around a few pieces of state such as the =ClickRemoval= array above | |
175 which will become corrupted if the contexts are not done in | |
176 parallel. The solution is to create a copy of this normally global | |
177 device state for each context, and copy it back and forth into and out | |
178 of the actual device state whenever a context is rendered. | |
179 | |
180 ** Synchronization Macros | |
181 #+name: sync-macros | |
182 #+begin_src C | |
183 //////////////////// Context Creation / Synchronization | |
184 | |
185 #define _MAKE_SYNC(NAME, INIT_EXPR, GET_EXPR, SET_EXPR) \ | |
186 void NAME (ALuint sourceID1, ALuint sourceID2, \ | |
187 ALCcontext *ctx1, ALCcontext *ctx2, \ | |
188 ALenum param){ \ | |
189 INIT_EXPR; \ | |
190 ALCcontext *current = alcGetCurrentContext(); \ | |
191 alcMakeContextCurrent(ctx1); \ | |
192 GET_EXPR; \ | |
193 alcMakeContextCurrent(ctx2); \ | |
194 SET_EXPR; \ | |
195 alcMakeContextCurrent(current); \ | |
196 } | |
197 | |
198 #define MAKE_SYNC(NAME, TYPE, GET, SET) \ | |
199 _MAKE_SYNC(NAME, \ | |
200 TYPE value, \ | |
201 GET(sourceID1, param, &value), \ | |
202 SET(sourceID2, param, value)) | |
203 | |
204 #define MAKE_SYNC3(NAME, TYPE, GET, SET) \ | |
205 _MAKE_SYNC(NAME, \ | |
206 TYPE value1; TYPE value2; TYPE value3;, \ | |
207 GET(sourceID1, param, &value1, &value2, &value3), \ | |
208 SET(sourceID2, param, value1, value2, value3)) | |
209 | |
210 MAKE_SYNC( syncSourcei, ALint, alGetSourcei, alSourcei); | |
211 MAKE_SYNC( syncSourcef, ALfloat, alGetSourcef, alSourcef); | |
212 MAKE_SYNC3(syncSource3i, ALint, alGetSource3i, alSource3i); | |
213 MAKE_SYNC3(syncSource3f, ALfloat, alGetSource3f, alSource3f); | |
214 | |
215 #+end_src | |
216 | |
217 Setting the state of an =OpenAL= source is done with the =alSourcei=, | |
218 =alSourcef=, =alSource3i=, and =alSource3f= functions. In order to | |
219 completely synchronize two sources, it is necessary to use all of | |
220 them. These macros help to condense the otherwise repetitive | |
221 synchronization code involving these similar low-level =OpenAL= functions. | |
222 | |
223 ** Source Synchronization | |
224 #+name: sync-sources | |
225 #+begin_src C | |
226 void syncSources(ALsource *masterSource, ALsource *slaveSource, | |
227 ALCcontext *masterCtx, ALCcontext *slaveCtx){ | |
228 ALuint master = masterSource->source; | |
229 ALuint slave = slaveSource->source; | |
230 ALCcontext *current = alcGetCurrentContext(); | |
231 | |
232 syncSourcef(master,slave,masterCtx,slaveCtx,AL_PITCH); | |
233 syncSourcef(master,slave,masterCtx,slaveCtx,AL_GAIN); | |
234 syncSourcef(master,slave,masterCtx,slaveCtx,AL_MAX_DISTANCE); | |
235 syncSourcef(master,slave,masterCtx,slaveCtx,AL_ROLLOFF_FACTOR); | |
236 syncSourcef(master,slave,masterCtx,slaveCtx,AL_REFERENCE_DISTANCE); | |
237 syncSourcef(master,slave,masterCtx,slaveCtx,AL_MIN_GAIN); | |
238 syncSourcef(master,slave,masterCtx,slaveCtx,AL_MAX_GAIN); | |
239 syncSourcef(master,slave,masterCtx,slaveCtx,AL_CONE_OUTER_GAIN); | |
240 syncSourcef(master,slave,masterCtx,slaveCtx,AL_CONE_INNER_ANGLE); | |
241 syncSourcef(master,slave,masterCtx,slaveCtx,AL_CONE_OUTER_ANGLE); | |
242 syncSourcef(master,slave,masterCtx,slaveCtx,AL_SEC_OFFSET); | |
243 syncSourcef(master,slave,masterCtx,slaveCtx,AL_SAMPLE_OFFSET); | |
244 syncSourcef(master,slave,masterCtx,slaveCtx,AL_BYTE_OFFSET); | |
245 | |
246 syncSource3f(master,slave,masterCtx,slaveCtx,AL_POSITION); | |
247 syncSource3f(master,slave,masterCtx,slaveCtx,AL_VELOCITY); | |
248 syncSource3f(master,slave,masterCtx,slaveCtx,AL_DIRECTION); | |
249 | |
250 syncSourcei(master,slave,masterCtx,slaveCtx,AL_SOURCE_RELATIVE); | |
251 syncSourcei(master,slave,masterCtx,slaveCtx,AL_LOOPING); | |
252 | |
253 alcMakeContextCurrent(masterCtx); | |
254 ALint source_type; | |
255 alGetSourcei(master, AL_SOURCE_TYPE, &source_type); | |
256 | |
257 // Only static sources are currently synchronized! | |
258 if (AL_STATIC == source_type){ | |
259 ALint master_buffer; | |
260 ALint slave_buffer; | |
261 alGetSourcei(master, AL_BUFFER, &master_buffer); | |
262 alcMakeContextCurrent(slaveCtx); | |
263 alGetSourcei(slave, AL_BUFFER, &slave_buffer); | |
264 if (master_buffer != slave_buffer){ | |
265 alSourcei(slave, AL_BUFFER, master_buffer); | |
266 } | |
267 } | |
268 | |
269 // Synchronize the state of the two sources. | |
270 alcMakeContextCurrent(masterCtx); | |
271 ALint masterState; | |
272 ALint slaveState; | |
273 | |
274 alGetSourcei(master, AL_SOURCE_STATE, &masterState); | |
275 alcMakeContextCurrent(slaveCtx); | |
276 alGetSourcei(slave, AL_SOURCE_STATE, &slaveState); | |
277 | |
278 if (masterState != slaveState){ | |
279 switch (masterState){ | |
280 case AL_INITIAL : alSourceRewind(slave); break; | |
281 case AL_PLAYING : alSourcePlay(slave); break; | |
282 case AL_PAUSED : alSourcePause(slave); break; | |
283 case AL_STOPPED : alSourceStop(slave); break; | |
284 } | |
285 } | |
286 // Restore whatever context was previously active. | |
287 alcMakeContextCurrent(current); | |
288 } | |
289 #+end_src | |
290 This function is long because it has to exhaustively go through all the | |
291 possible state that a source can have and make sure that it is the | |
292 same between the master and slave sources. I'd like to take this | |
293 moment to salute the [[http://connect.creativelabs.com/openal/Documentation/Forms/AllItems.aspx][=OpenAL= Reference Manual]], which provides a very | |
294 good description of =OpenAL='s internals. | |
295 | |
296 ** Context Synchronization | |
297 #+name: sync-contexts | |
298 #+begin_src C | |
299 void syncContexts(ALCcontext *master, ALCcontext *slave){ | |
300 /* If there aren't sufficient sources in slave to mirror | |
301 the sources in master, create them. */ | |
302 ALCcontext *current = alcGetCurrentContext(); | |
303 | |
304 UIntMap *masterSourceMap = &(master->SourceMap); | |
305 UIntMap *slaveSourceMap = &(slave->SourceMap); | |
306 ALuint numMasterSources = masterSourceMap->size; | |
307 ALuint numSlaveSources = slaveSourceMap->size; | |
308 | |
309 alcMakeContextCurrent(slave); | |
310 if (numSlaveSources < numMasterSources){ | |
311 ALuint numMissingSources = numMasterSources - numSlaveSources; | |
312 ALuint newSources[numMissingSources]; | |
313 alGenSources(numMissingSources, newSources); | |
314 } | |
315 | |
316 /* Now, slave is guaranteed to have at least as many sources | |
317 as master. Sync each source from master to the corresponding | |
318 source in slave. */ | |
319 int i; | |
320 for(i = 0; i < masterSourceMap->size; i++){ | |
321 syncSources((ALsource*)masterSourceMap->array[i].value, | |
322 (ALsource*)slaveSourceMap->array[i].value, | |
323 master, slave); | |
324 } | |
325 alcMakeContextCurrent(current); | |
326 } | |
327 #+end_src | |
328 | |
329 Most of the hard work in Context Synchronization is done in | |
330 =syncSources()=. The only thing that =syncContexts()= has to worry | |
331 about is automatically creating new sources whenever a slave context | |
332 does not have the same number of sources as the master context. | |
333 | |
334 ** Context Creation | |
335 #+name: context-creation | |
336 #+begin_src C | |
337 static void addContext(ALCdevice *Device, ALCcontext *context){ | |
338 send_data *data = (send_data*)Device->ExtraData; | |
339 // expand array if necessary | |
340 if (data->numContexts >= data->maxContexts){ | |
341 ALuint newMaxContexts = data->maxContexts*2 + 1; | |
342 data->contexts = realloc(data->contexts, newMaxContexts*sizeof(context_data)); | |
343 data->maxContexts = newMaxContexts; | |
344 } | |
345 // create context_data and add it to the main array | |
346 context_data *ctxData; | |
347 ctxData = (context_data*)calloc(1, sizeof(*ctxData)); | |
348 ctxData->renderBuffer = | |
349 malloc(BytesFromDevFmt(Device->FmtType) * | |
350 Device->NumChan * Device->UpdateSize); | |
351 ctxData->ctx = context; | |
352 | |
353 data->contexts[data->numContexts] = ctxData; | |
354 data->numContexts++; | |
355 } | |
356 #+end_src | |
357 | |
358 Here, the slave context is created, and it's data is stored in the | |
359 device-wide =ExtraData= structure. The =renderBuffer= that is created | |
360 here is where the rendered sound samples for this slave context will | |
361 eventually go. | |
362 | |
363 ** Context Switching | |
364 #+name: context-switching | |
365 #+begin_src C | |
366 //////////////////// Context Switching | |
367 | |
368 /* A device brings along with it two pieces of state | |
369 * which have to be swapped in and out with each context. | |
370 */ | |
371 static void swapInContext(ALCdevice *Device, context_data *ctxData){ | |
372 memcpy(Device->ClickRemoval, ctxData->ClickRemoval, sizeof(ALfloat)*MAXCHANNELS); | |
373 memcpy(Device->PendingClicks, ctxData->PendingClicks, sizeof(ALfloat)*MAXCHANNELS); | |
374 } | |
375 | |
376 static void saveContext(ALCdevice *Device, context_data *ctxData){ | |
377 memcpy(ctxData->ClickRemoval, Device->ClickRemoval, sizeof(ALfloat)*MAXCHANNELS); | |
378 memcpy(ctxData->PendingClicks, Device->PendingClicks, sizeof(ALfloat)*MAXCHANNELS); | |
379 } | |
380 | |
381 static ALCcontext **currentContext; | |
382 static ALuint currentNumContext; | |
383 | |
384 /* By default, all contexts are rendered at once for each call to aluMixData. | |
385 * This function uses the internals of the ALCdevice struct to temporally | |
386 * cause aluMixData to only render the chosen context. | |
387 */ | |
388 static void limitContext(ALCdevice *Device, ALCcontext *ctx){ | |
389 currentContext = Device->Contexts; | |
390 currentNumContext = Device->NumContexts; | |
391 Device->Contexts = &ctx; | |
392 Device->NumContexts = 1; | |
393 } | |
394 | |
395 static void unLimitContext(ALCdevice *Device){ | |
396 Device->Contexts = currentContext; | |
397 Device->NumContexts = currentNumContext; | |
398 } | |
399 #+end_src | |
400 | |
401 =OpenAL= normally renders all Contexts in parallel, outputting the | |
402 whole result to the buffer. It does this by iterating over the | |
403 Device->Contexts array and rendering each context to the buffer in | |
404 turn. By temporally setting Device->NumContexts to 1 and adjusting | |
405 the Device's context list to put the desired context-to-be-rendered | |
406 into position 0, we can get trick =OpenAL= into rendering each slave | |
407 context separate from all the others. | |
408 | |
409 ** Main Device Loop | |
410 #+name: main-loop | |
411 #+begin_src C | |
412 //////////////////// Main Device Loop | |
413 | |
414 /* Establish the LWJGL context as the master context, which will | |
415 * be synchronized to all the slave contexts | |
416 */ | |
417 static void init(ALCdevice *Device){ | |
418 ALCcontext *masterContext = alcGetCurrentContext(); | |
419 addContext(Device, masterContext); | |
420 } | |
421 | |
422 | |
423 static void renderData(ALCdevice *Device, int samples){ | |
424 if(!Device->Connected){return;} | |
425 send_data *data = (send_data*)Device->ExtraData; | |
426 ALCcontext *current = alcGetCurrentContext(); | |
427 | |
428 ALuint i; | |
429 for (i = 1; i < data->numContexts; i++){ | |
430 syncContexts(data->contexts[0]->ctx , data->contexts[i]->ctx); | |
431 } | |
432 | |
433 if ((ALuint) samples > Device->UpdateSize){ | |
434 printf("exceeding internal buffer size; dropping samples\n"); | |
435 printf("requested %d; available %d\n", samples, Device->UpdateSize); | |
436 samples = (int) Device->UpdateSize; | |
437 } | |
438 | |
439 for (i = 0; i < data->numContexts; i++){ | |
440 context_data *ctxData = data->contexts[i]; | |
441 ALCcontext *ctx = ctxData->ctx; | |
442 alcMakeContextCurrent(ctx); | |
443 limitContext(Device, ctx); | |
444 swapInContext(Device, ctxData); | |
445 aluMixData(Device, ctxData->renderBuffer, samples); | |
446 saveContext(Device, ctxData); | |
447 unLimitContext(Device); | |
448 } | |
449 alcMakeContextCurrent(current); | |
450 } | |
451 #+end_src | |
452 | |
453 The main loop synchronizes the master LWJGL context with all the slave | |
454 contexts, then walks each context, rendering just that context to it's | |
455 audio-sample storage buffer. | |
456 | |
457 ** JNI Methods | |
458 | |
459 At this point, we have the ability to create multiple listeners by | |
460 using the master/slave context trick, and the rendered audio data is | |
461 waiting patiently in internal buffers, one for each listener. We need | |
462 a way to transport this information to Java, and also a way to drive | |
463 this device from Java. The following JNI interface code is inspired | |
464 by the way LWJGL interfaces with =OpenAL=. | |
465 | |
466 *** step | |
467 #+name: jni-step | |
468 #+begin_src C | |
469 //////////////////// JNI Methods | |
470 | |
471 #include "com_aurellem_send_AudioSend.h" | |
472 | |
473 /* | |
474 * Class: com_aurellem_send_AudioSend | |
475 * Method: nstep | |
476 * Signature: (JI)V | |
477 */ | |
478 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_nstep | |
479 (JNIEnv *env, jclass clazz, jlong device, jint samples){ | |
480 UNUSED(env);UNUSED(clazz);UNUSED(device); | |
481 renderData((ALCdevice*)((intptr_t)device), samples); | |
482 } | |
483 #+end_src | |
484 This device, unlike most of the other devices in =OpenAL=, does not | |
485 render sound unless asked. This enables the system to slow down or | |
486 speed up depending on the needs of the AIs who are using it to | |
487 listen. If the device tried to render samples in real-time, a | |
488 complicated AI whose mind takes 100 seconds of computer time to | |
489 simulate 1 second of AI-time would miss almost all of the sound in | |
490 its environment. | |
491 | |
492 | |
493 *** getSamples | |
494 #+name: jni-get-samples | |
495 #+begin_src C | |
496 /* | |
497 * Class: com_aurellem_send_AudioSend | |
498 * Method: ngetSamples | |
499 * Signature: (JLjava/nio/ByteBuffer;III)V | |
500 */ | |
501 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_ngetSamples | |
502 (JNIEnv *env, jclass clazz, jlong device, jobject buffer, jint position, | |
503 jint samples, jint n){ | |
504 UNUSED(clazz); | |
505 | |
506 ALvoid *buffer_address = | |
507 ((ALbyte *)(((char*)(*env)->GetDirectBufferAddress(env, buffer)) + position)); | |
508 ALCdevice *recorder = (ALCdevice*) ((intptr_t)device); | |
509 send_data *data = (send_data*)recorder->ExtraData; | |
510 if ((ALuint)n > data->numContexts){return;} | |
511 memcpy(buffer_address, data->contexts[n]->renderBuffer, | |
512 BytesFromDevFmt(recorder->FmtType) * recorder->NumChan * samples); | |
513 } | |
514 #+end_src | |
515 | |
516 This is the transport layer between C and Java that will eventually | |
517 allow us to access rendered sound data from clojure. | |
518 | |
519 *** Listener Management | |
520 | |
521 =addListener=, =setNthListenerf=, and =setNthListener3f= are | |
522 necessary to change the properties of any listener other than the | |
523 master one, since only the listener of the current active context is | |
524 affected by the normal =OpenAL= listener calls. | |
525 #+name: listener-manage | |
526 #+begin_src C | |
527 /* | |
528 * Class: com_aurellem_send_AudioSend | |
529 * Method: naddListener | |
530 * Signature: (J)V | |
531 */ | |
532 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_naddListener | |
533 (JNIEnv *env, jclass clazz, jlong device){ | |
534 UNUSED(env); UNUSED(clazz); | |
535 //printf("creating new context via naddListener\n"); | |
536 ALCdevice *Device = (ALCdevice*) ((intptr_t)device); | |
537 ALCcontext *new = alcCreateContext(Device, NULL); | |
538 addContext(Device, new); | |
539 } | |
540 | |
541 /* | |
542 * Class: com_aurellem_send_AudioSend | |
543 * Method: nsetNthListener3f | |
544 * Signature: (IFFFJI)V | |
545 */ | |
546 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_nsetNthListener3f | |
547 (JNIEnv *env, jclass clazz, jint param, | |
548 jfloat v1, jfloat v2, jfloat v3, jlong device, jint contextNum){ | |
549 UNUSED(env);UNUSED(clazz); | |
550 | |
551 ALCdevice *Device = (ALCdevice*) ((intptr_t)device); | |
552 send_data *data = (send_data*)Device->ExtraData; | |
553 | |
554 ALCcontext *current = alcGetCurrentContext(); | |
555 if ((ALuint)contextNum > data->numContexts){return;} | |
556 alcMakeContextCurrent(data->contexts[contextNum]->ctx); | |
557 alListener3f(param, v1, v2, v3); | |
558 alcMakeContextCurrent(current); | |
559 } | |
560 | |
561 /* | |
562 * Class: com_aurellem_send_AudioSend | |
563 * Method: nsetNthListenerf | |
564 * Signature: (IFJI)V | |
565 */ | |
566 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_nsetNthListenerf | |
567 (JNIEnv *env, jclass clazz, jint param, jfloat v1, jlong device, | |
568 jint contextNum){ | |
569 | |
570 UNUSED(env);UNUSED(clazz); | |
571 | |
572 ALCdevice *Device = (ALCdevice*) ((intptr_t)device); | |
573 send_data *data = (send_data*)Device->ExtraData; | |
574 | |
575 ALCcontext *current = alcGetCurrentContext(); | |
576 if ((ALuint)contextNum > data->numContexts){return;} | |
577 alcMakeContextCurrent(data->contexts[contextNum]->ctx); | |
578 alListenerf(param, v1); | |
579 alcMakeContextCurrent(current); | |
580 } | |
581 #+end_src | |
582 | |
583 *** Initialization | |
584 =initDevice= is called from the Java side after LWJGL has created its | |
585 context, and before any calls to =addListener=. It establishes the | |
586 LWJGL context as the master context. | |
587 | |
588 =getAudioFormat= is a convenience function that uses JNI to build up a | |
589 =javax.sound.sampled.AudioFormat= object from data in the Device. This | |
590 way, there is no ambiguity about what the bits created by =step= and | |
591 returned by =getSamples= mean. | |
592 #+name: jni-init | |
593 #+begin_src C | |
594 /* | |
595 * Class: com_aurellem_send_AudioSend | |
596 * Method: ninitDevice | |
597 * Signature: (J)V | |
598 */ | |
599 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_ninitDevice | |
600 (JNIEnv *env, jclass clazz, jlong device){ | |
601 UNUSED(env);UNUSED(clazz); | |
602 ALCdevice *Device = (ALCdevice*) ((intptr_t)device); | |
603 init(Device); | |
604 } | |
605 | |
606 /* | |
607 * Class: com_aurellem_send_AudioSend | |
608 * Method: ngetAudioFormat | |
609 * Signature: (J)Ljavax/sound/sampled/AudioFormat; | |
610 */ | |
611 JNIEXPORT jobject JNICALL Java_com_aurellem_send_AudioSend_ngetAudioFormat | |
612 (JNIEnv *env, jclass clazz, jlong device){ | |
613 UNUSED(clazz); | |
614 jclass AudioFormatClass = | |
615 (*env)->FindClass(env, "javax/sound/sampled/AudioFormat"); | |
616 jmethodID AudioFormatConstructor = | |
617 (*env)->GetMethodID(env, AudioFormatClass, "<init>", "(FIIZZ)V"); | |
618 | |
619 ALCdevice *Device = (ALCdevice*) ((intptr_t)device); | |
620 int isSigned; | |
621 switch (Device->FmtType) | |
622 { | |
623 case DevFmtUByte: | |
624 case DevFmtUShort: isSigned = 0; break; | |
625 default : isSigned = 1; | |
626 } | |
627 float frequency = Device->Frequency; | |
628 int bitsPerFrame = (8 * BytesFromDevFmt(Device->FmtType)); | |
629 int channels = Device->NumChan; | |
630 jobject format = (*env)-> | |
631 NewObject( | |
632 env,AudioFormatClass,AudioFormatConstructor, | |
633 frequency, | |
634 bitsPerFrame, | |
635 channels, | |
636 isSigned, | |
637 0); | |
638 return format; | |
639 } | |
640 #+end_src | |
641 | |
642 ** Boring Device management stuff | |
643 This code is more-or-less copied verbatim from the other =OpenAL= | |
644 backends. It's the basis for =OpenAL='s primitive object system. | |
645 #+name: device-init | |
646 #+begin_src C | |
647 //////////////////// Device Initialization / Management | |
648 | |
649 static const ALCchar sendDevice[] = "Multiple Audio Send"; | |
650 | |
651 static ALCboolean send_open_playback(ALCdevice *device, | |
652 const ALCchar *deviceName) | |
653 { | |
654 send_data *data; | |
655 // stop any buffering for stdout, so that I can | |
656 // see the printf statements in my terminal immediately | |
657 setbuf(stdout, NULL); | |
658 | |
659 if(!deviceName) | |
660 deviceName = sendDevice; | |
661 else if(strcmp(deviceName, sendDevice) != 0) | |
662 return ALC_FALSE; | |
663 data = (send_data*)calloc(1, sizeof(*data)); | |
664 device->szDeviceName = strdup(deviceName); | |
665 device->ExtraData = data; | |
666 return ALC_TRUE; | |
667 } | |
668 | |
669 static void send_close_playback(ALCdevice *device) | |
670 { | |
671 send_data *data = (send_data*)device->ExtraData; | |
672 alcMakeContextCurrent(NULL); | |
673 ALuint i; | |
674 // Destroy all slave contexts. LWJGL will take care of | |
675 // its own context. | |
676 for (i = 1; i < data->numContexts; i++){ | |
677 context_data *ctxData = data->contexts[i]; | |
678 alcDestroyContext(ctxData->ctx); | |
679 free(ctxData->renderBuffer); | |
680 free(ctxData); | |
681 } | |
682 free(data); | |
683 device->ExtraData = NULL; | |
684 } | |
685 | |
686 static ALCboolean send_reset_playback(ALCdevice *device) | |
687 { | |
688 SetDefaultWFXChannelOrder(device); | |
689 return ALC_TRUE; | |
690 } | |
691 | |
692 static void send_stop_playback(ALCdevice *Device){ | |
693 UNUSED(Device); | |
694 } | |
695 | |
696 static const BackendFuncs send_funcs = { | |
697 send_open_playback, | |
698 send_close_playback, | |
699 send_reset_playback, | |
700 send_stop_playback, | |
701 NULL, | |
702 NULL, /* These would be filled with functions to */ | |
703 NULL, /* handle capturing audio if we we into that */ | |
704 NULL, /* sort of thing... */ | |
705 NULL, | |
706 NULL | |
707 }; | |
708 | |
709 ALCboolean alc_send_init(BackendFuncs *func_list){ | |
710 *func_list = send_funcs; | |
711 return ALC_TRUE; | |
712 } | |
713 | |
714 void alc_send_deinit(void){} | |
715 | |
716 void alc_send_probe(enum DevProbe type) | |
717 { | |
718 switch(type) | |
719 { | |
720 case DEVICE_PROBE: | |
721 AppendDeviceList(sendDevice); | |
722 break; | |
723 case ALL_DEVICE_PROBE: | |
724 AppendAllDeviceList(sendDevice); | |
725 break; | |
726 case CAPTURE_DEVICE_PROBE: | |
727 break; | |
728 } | |
729 } | |
730 #+end_src | |
731 | |
732 * The Java interface, =AudioSend= | |
733 | |
734 The Java interface to the Send Device follows naturally from the JNI | |
735 definitions. It is included here for completeness. The only thing here | |
736 of note is the =deviceID=. This is available from LWJGL, but to only | |
737 way to get it is reflection. Unfortunately, there is no other way to | |
738 control the Send device than to obtain a pointer to it. | |
739 | |
740 #+include: "../java/src/com/aurellem/send/AudioSend.java" src java :exports code | |
741 | |
742 * Finally, Ears in clojure! | |
743 | |
744 Now that the infrastructure is complete (modulo a few patches to | |
745 jMonkeyEngine3 to support accessing this modified version of =OpenAL= | |
746 that are not worth discussing), the clojure ear abstraction is rather | |
747 simple. Just as there were =SceneProcessors= for vision, there are | |
748 now =SoundProcessors= for hearing. | |
749 | |
750 #+include "../../jmeCapture/src/com/aurellem/capture/audio/SoundProcessor.java" src java | |
751 | |
752 | |
753 Ears work the same way as vision. | |
754 | |
755 (hearing creature) will return [init-functions sensor-functions]. The | |
756 init functions each take the world and register a SoundProcessor that | |
757 does foureier transforms on the incommong sound data, making it | |
758 available to each sensor function. | |
759 | |
760 | |
761 #+name: ears | |
762 #+begin_src clojure | |
763 (ns cortex.hearing | |
764 "Simulate the sense of hearing in jMonkeyEngine3. Enables multiple | |
765 listeners at different positions in the same world. Automatically | |
766 reads ear-nodes from specially prepared blender files and | |
767 instantiates them in the world as actual ears." | |
768 {:author "Robert McIntyre"} | |
769 (:use (cortex world util sense)) | |
770 (:use clojure.contrib.def) | |
771 (:import java.nio.ByteBuffer) | |
772 (:import org.tritonus.share.sampled.FloatSampleTools) | |
773 (:import (com.aurellem.capture.audio | |
774 SoundProcessor AudioSendRenderer)) | |
775 (:import javax.sound.sampled.AudioFormat) | |
776 (:import (com.jme3.scene Spatial Node)) | |
777 (:import com.jme3.audio.Listener) | |
778 (:import com.jme3.app.Application) | |
779 (:import com.jme3.scene.control.AbstractControl)) | |
780 | |
781 (defn sound-processor | |
782 "Deals with converting ByteBuffers into Vectors of floats so that | |
783 the continuation functions can be defined in terms of immutable | |
784 stuff." | |
785 [continuation] | |
786 (proxy [SoundProcessor] [] | |
787 (cleanup []) | |
788 (process | |
789 [#^ByteBuffer audioSamples numSamples #^AudioFormat audioFormat] | |
790 (let [bytes (byte-array numSamples) | |
791 num-floats (/ numSamples (.getFrameSize audioFormat)) | |
792 floats (float-array num-floats)] | |
793 (.get audioSamples bytes 0 numSamples) | |
794 (FloatSampleTools/byte2floatInterleaved | |
795 bytes 0 floats 0 num-floats audioFormat) | |
796 (continuation | |
797 (vec floats)))))) | |
798 | |
799 (defvar | |
800 ^{:arglists '([creature])} | |
801 ears | |
802 (sense-nodes "ears") | |
803 "Return the children of the creature's \"ears\" node.") | |
804 | |
805 (defn update-listener-velocity! | |
806 "Update the listener's velocity every update loop." | |
807 [#^Spatial obj #^Listener lis] | |
808 (let [old-position (atom (.getLocation lis))] | |
809 (.addControl | |
810 obj | |
811 (proxy [AbstractControl] [] | |
812 (controlUpdate [tpf] | |
813 (let [new-position (.getLocation lis)] | |
814 (.setVelocity | |
815 lis | |
816 (.mult (.subtract new-position @old-position) | |
817 (float (/ tpf)))) | |
818 (reset! old-position new-position))) | |
819 (controlRender [_ _]))))) | |
820 | |
821 (defn create-listener! | |
822 "Create a Listener centered on the current position of 'ear | |
823 which follows the closest physical node in 'creature and | |
824 sends sound data to 'continuation." | |
825 [#^Application world #^Node creature #^Spatial ear continuation] | |
826 (let [target (closest-node creature ear) | |
827 lis (Listener.) | |
828 audio-renderer (.getAudioRenderer world) | |
829 sp (sound-processor continuation)] | |
830 (.setLocation lis (.getWorldTranslation ear)) | |
831 (.setRotation lis (.getWorldRotation ear)) | |
832 (bind-sense target lis) | |
833 (update-listener-velocity! target lis) | |
834 (.addListener audio-renderer lis) | |
835 (.registerSoundProcessor audio-renderer lis sp))) | |
836 | |
837 (defn hearing-fn | |
838 "Returns a functon which returns auditory sensory data when called | |
839 inside a running simulation." | |
840 [#^Node creature #^Spatial ear] | |
841 (let [hearing-data (atom []) | |
842 register-listener! | |
843 (runonce | |
844 (fn [#^Application world] | |
845 (create-listener! | |
846 world creature ear | |
847 (fn [data] | |
848 (reset! hearing-data (vec data))))))] | |
849 (fn [#^Application world] | |
850 (register-listener! world) | |
851 (let [data @hearing-data | |
852 topology | |
853 (vec (map #(vector % 0) (range 0 (count data)))) | |
854 scaled-data | |
855 (vec | |
856 (map | |
857 #(rem (int (* 255 (/ (+ 1 %) 2))) 256) | |
858 data))] | |
859 [topology scaled-data])))) | |
860 | |
861 (defn hearing! | |
862 "Endow the creature in a particular world with the sense of | |
863 hearing. Will return a sequence of functions, one for each ear, | |
864 which when called will return the auditory data from that ear." | |
865 [#^Node creature] | |
866 (for [ear (ears creature)] | |
867 (hearing-fn creature ear))) | |
868 | |
869 | |
870 #+end_src | |
871 | |
872 * Example | |
873 | |
874 #+name: test-hearing | |
875 #+begin_src clojure :results silent | |
876 (ns cortex.test.hearing | |
877 (:use (cortex world util hearing)) | |
878 (:import (com.jme3.audio AudioNode Listener)) | |
879 (:import com.jme3.scene.Node | |
880 com.jme3.system.AppSettings)) | |
881 | |
882 (defn setup-fn [world] | |
883 (let [listener (Listener.)] | |
884 (add-ear world listener #(println-repl (nth % 0))))) | |
885 | |
886 (defn play-sound [node world value] | |
887 (if (not value) | |
888 (do | |
889 (.playSource (.getAudioRenderer world) node)))) | |
890 | |
891 (defn test-basic-hearing [] | |
892 (let [node1 (AudioNode. (asset-manager) "Sounds/pure.wav" false false)] | |
893 (world | |
894 (Node.) | |
895 {"key-space" (partial play-sound node1)} | |
896 setup-fn | |
897 no-op))) | |
898 | |
899 (defn test-advanced-hearing | |
900 "Testing hearing: | |
901 You should see a blue sphere flying around several | |
902 cubes. As the sphere approaches each cube, it turns | |
903 green." | |
904 [] | |
905 (doto (com.aurellem.capture.examples.Advanced.) | |
906 (.setSettings | |
907 (doto (AppSettings. true) | |
908 (.setAudioRenderer "Send"))) | |
909 (.setShowSettings false) | |
910 (.setPauseOnLostFocus false))) | |
911 | |
912 #+end_src | |
913 | |
914 This extremely basic program prints out the first sample it encounters | |
915 at every time stamp. You can see the rendered sound being printed at | |
916 the REPL. | |
917 | |
918 - As a bonus, this method of capturing audio for AI can also be used | |
919 to capture perfect audio from a jMonkeyEngine application, for use | |
920 in demos and the like. | |
921 | |
922 | |
923 * COMMENT Code Generation | |
924 | |
925 #+begin_src clojure :tangle ../src/cortex/hearing.clj | |
926 <<ears>> | |
927 #+end_src | |
928 | |
929 #+begin_src clojure :tangle ../src/cortex/test/hearing.clj | |
930 <<test-hearing>> | |
931 #+end_src | |
932 | |
933 #+begin_src C :tangle ../../audio-send/Alc/backends/send.c | |
934 <<send-header>> | |
935 <<send-state>> | |
936 <<sync-macros>> | |
937 <<sync-sources>> | |
938 <<sync-contexts>> | |
939 <<context-creation>> | |
940 <<context-switching>> | |
941 <<main-loop>> | |
942 <<jni-step>> | |
943 <<jni-get-samples>> | |
944 <<listener-manage>> | |
945 <<jni-init>> | |
946 <<device-init>> | |
947 #+end_src | |
948 | |
949 |