annotate org/ear.org @ 19:22ac5a0367cd

finally, a first pass at ear.org
author Robert McIntyre <rlm@mit.edu>
date Thu, 03 Nov 2011 14:54:45 -0700
parents 1e201037f666
children e8ae40c9848c
rev   line source
rlm@15 1 #+title: Simulated Sense of Hearing
rlm@0 2 #+author: Robert McIntyre
rlm@0 3 #+email: rlm@mit.edu
rlm@15 4 #+description: Simulating multiple listeners and the sense of hearing in jMonkeyEngine3
rlm@15 5 #+keywords: simulated hearing, openal, clojure, jMonkeyEngine3, LWJGL, AI
rlm@15 6 #+SETUPFILE: ../../aurellem/org/setup.org
rlm@15 7 #+INCLUDE: ../../aurellem/org/level-0.org
rlm@0 8 #+BABEL: :exports both :noweb yes :cache no :mkdirp yes
rlm@0 9
rlm@0 10
rlm@0 11
rlm@0 12
rlm@15 13 * Hearing
rlm@0 14
rlm@0 15 I want to be able to place ears in a similiar manner to how I place
rlm@0 16 the eyes. I want to be able to place ears in a unique spatial
rlm@0 17 position, and recieve as output at every tick the FFT of whatever
rlm@0 18 signals are happening at that point.
rlm@0 19
rlm@15 20 Hearing is one of the more difficult senses to simulate, because there
rlm@15 21 is less support for obtaining the actual sound data that is processed
rlm@15 22 by jMonkeyEngine3.
rlm@15 23
rlm@15 24 jMonkeyEngine's sound system works as follows:
rlm@15 25
rlm@15 26 - jMonkeyEngine uese the =AppSettings= for the particular application
rlm@15 27 to determine what sort of =AudioRenderer= should be used.
rlm@15 28 - although some support is provided for multiple AudioRendering
rlm@15 29 backends, jMonkeyEngine at the time of this writing will either
rlm@15 30 pick no AudioRender at all, or the =LwjglAudioRenderer=
rlm@15 31 - jMonkeyEngine tries to figure out what sort of system you're
rlm@15 32 running and extracts the appropiate native libraries.
rlm@18 33 - the =LwjglAudioRenderer= uses the [[http://lwjgl.org/][=LWJGL=]] (LightWeight Java Game
rlm@18 34 Library) bindings to interface with a C library called [[http://kcat.strangesoft.net/openal.html][=OpenAL=]]
rlm@15 35 - =OpenAL= calculates the 3D sound localization and feeds a stream of
rlm@15 36 sound to any of various sound output devices with which it knows
rlm@15 37 how to communicate.
rlm@15 38
rlm@15 39 A consequence of this is that there's no way to access the actual
rlm@15 40 sound data produced by =OpenAL=. Even worse, =OpanAL= only supports
rlm@15 41 one /listener/, which normally isn't a problem for games, but becomes
rlm@15 42 a problem when trying to make multiple AI creatures that can each hear
rlm@15 43 the world from a different perspective.
rlm@15 44
rlm@15 45 To make many AI creatures in jMonkeyEngine that can each hear the
rlm@15 46 world from their own perspective, it is necessary to go all the way
rlm@15 47 back to =OpenAL= and implement support for simulated hearing there.
rlm@15 48
rlm@19 49 * Extending =OpenAL=
rlm@15 50 ** =OpenAL= Devices
rlm@15 51
rlm@15 52 =OpenAL= goes to great lengths to support many different systems, all
rlm@15 53 with different sound capabilities and interfaces. It acomplishes this
rlm@15 54 difficult task by providing code for many different sound backends in
rlm@15 55 pseudo-objects called /Devices/. There's a device for the Linux Open
rlm@15 56 Sound System and the Advanced Linxu Sound Architechture, there's one
rlm@15 57 for Direct Sound on Windows, there's even one for Solaris. =OpenAL=
rlm@15 58 solves the problem of platform independence by providing all these
rlm@15 59 Devices.
rlm@15 60
rlm@15 61 Wrapper libraries such as LWJGL are free to examine the system on
rlm@15 62 which they are running and then select an appropiate device for that
rlm@15 63 system.
rlm@15 64
rlm@15 65 There are also a few "special" devices that don't interface with any
rlm@15 66 particular system. These include the Null Device, which doesn't do
rlm@15 67 anything, and the Wave Device, which writes whatever sound it recieves
rlm@15 68 to a file, if everything has been set up correctly when configuring
rlm@15 69 =OpenAL=.
rlm@15 70
rlm@15 71 Actual mixing of the sound data happens in the Devices, and they are
rlm@15 72 the only point in the sound rendering process where this data is
rlm@15 73 available.
rlm@15 74
rlm@15 75 Therefore, in order to support multiple listeners, and get the sound
rlm@15 76 data in a form that the AIs can use, it is necessary to create a new
rlm@15 77 Device, which supports this features.
rlm@15 78
rlm@15 79 ** The Send Device
rlm@15 80 Adding a device to OpenAL is rather tricky -- there are five separate
rlm@15 81 files in the =OpenAL= source tree that must be modified to do so. I've
rlm@15 82 documented this process [[./add-new-device.org][here]] for anyone who is interested.
rlm@15 83
rlm@18 84
rlm@18 85 Onward to that actual Device!
rlm@18 86
rlm@18 87 again, my objectives are:
rlm@18 88
rlm@18 89 - Support Multiple Listeners from jMonkeyEngine3
rlm@18 90 - Get access to the rendered sound data for further processing from
rlm@18 91 clojure.
rlm@18 92
rlm@18 93 ** =send.c=
rlm@18 94
rlm@18 95 ** Header
rlm@18 96 #+srcname: send-header
rlm@15 97 #+begin_src C
rlm@15 98 #include "config.h"
rlm@15 99 #include <stdlib.h>
rlm@15 100 #include "alMain.h"
rlm@15 101 #include "AL/al.h"
rlm@15 102 #include "AL/alc.h"
rlm@15 103 #include "alSource.h"
rlm@15 104 #include <jni.h>
rlm@15 105
rlm@15 106 //////////////////// Summary
rlm@15 107
rlm@15 108 struct send_data;
rlm@15 109 struct context_data;
rlm@15 110
rlm@15 111 static void addContext(ALCdevice *, ALCcontext *);
rlm@15 112 static void syncContexts(ALCcontext *master, ALCcontext *slave);
rlm@15 113 static void syncSources(ALsource *master, ALsource *slave,
rlm@15 114 ALCcontext *masterCtx, ALCcontext *slaveCtx);
rlm@15 115
rlm@15 116 static void syncSourcei(ALuint master, ALuint slave,
rlm@15 117 ALCcontext *masterCtx, ALCcontext *ctx2, ALenum param);
rlm@15 118 static void syncSourcef(ALuint master, ALuint slave,
rlm@15 119 ALCcontext *masterCtx, ALCcontext *ctx2, ALenum param);
rlm@15 120 static void syncSource3f(ALuint master, ALuint slave,
rlm@15 121 ALCcontext *masterCtx, ALCcontext *ctx2, ALenum param);
rlm@15 122
rlm@15 123 static void swapInContext(ALCdevice *, struct context_data *);
rlm@15 124 static void saveContext(ALCdevice *, struct context_data *);
rlm@15 125 static void limitContext(ALCdevice *, ALCcontext *);
rlm@15 126 static void unLimitContext(ALCdevice *);
rlm@15 127
rlm@15 128 static void init(ALCdevice *);
rlm@15 129 static void renderData(ALCdevice *, int samples);
rlm@15 130
rlm@15 131 #define UNUSED(x) (void)(x)
rlm@18 132 #+end_src
rlm@15 133
rlm@18 134 The main idea behing the Send device is to take advantage of the fact
rlm@18 135 that LWJGL only manages one /context/ when using OpenAL. A /context/
rlm@18 136 is like a container that holds samples and keeps track of where the
rlm@18 137 listener is. In order to support multiple listeners, the Send device
rlm@18 138 identifies the LWJGL context as the master context, and creates any
rlm@18 139 number of slave contexts to represent additional listeners. Every
rlm@18 140 time the device renders sound, it synchronizes every source from the
rlm@18 141 master LWJGL context to the slave contexts. Then, it renders each
rlm@18 142 context separately, using a different listener for each one. The
rlm@18 143 rendered sound is made available via JNI to jMonkeyEngine.
rlm@18 144
rlm@18 145 To recap, the process is:
rlm@18 146 - Set the LWJGL context as "master" in the =init()= method.
rlm@18 147 - Create any number of additional contexts via =addContext()=
rlm@18 148 - At every call to =renderData()= sync the master context with the
rlm@18 149 slave contexts vit =syncContexts()=
rlm@18 150 - =syncContexts()= calls =syncSources()= to sync all the sources
rlm@18 151 which are in the master context.
rlm@18 152 - =limitContext()= and =unLimitContext()= make it possible to render
rlm@18 153 only one context at a time.
rlm@18 154
rlm@18 155 ** Necessary State
rlm@18 156 #+begin_src C
rlm@15 157 //////////////////// State
rlm@15 158
rlm@15 159 typedef struct context_data {
rlm@15 160 ALfloat ClickRemoval[MAXCHANNELS];
rlm@15 161 ALfloat PendingClicks[MAXCHANNELS];
rlm@15 162 ALvoid *renderBuffer;
rlm@15 163 ALCcontext *ctx;
rlm@15 164 } context_data;
rlm@15 165
rlm@15 166 typedef struct send_data {
rlm@15 167 ALuint size;
rlm@15 168 context_data **contexts;
rlm@15 169 ALuint numContexts;
rlm@15 170 ALuint maxContexts;
rlm@15 171 } send_data;
rlm@18 172 #+end_src
rlm@15 173
rlm@18 174 Switching between contexts is not the normal operation of a Device,
rlm@18 175 and one of the problems with doing so is that a Device normally keeps
rlm@18 176 around a few pieces of state such as the =ClickRemoval= array above
rlm@18 177 which will become corrupted if the contexts are not done in
rlm@18 178 parallel. The solution is to create a copy of this normally global
rlm@18 179 device state for each context, and copy it back and forth into and out
rlm@18 180 of the actual device state whenever a context is rendered.
rlm@15 181
rlm@18 182 ** Synchronization Macros
rlm@15 183
rlm@18 184 #+begin_src C
rlm@15 185 //////////////////// Context Creation / Synchronization
rlm@15 186
rlm@15 187 #define _MAKE_SYNC(NAME, INIT_EXPR, GET_EXPR, SET_EXPR) \
rlm@15 188 void NAME (ALuint sourceID1, ALuint sourceID2, \
rlm@15 189 ALCcontext *ctx1, ALCcontext *ctx2, \
rlm@15 190 ALenum param){ \
rlm@15 191 INIT_EXPR; \
rlm@15 192 ALCcontext *current = alcGetCurrentContext(); \
rlm@15 193 alcMakeContextCurrent(ctx1); \
rlm@15 194 GET_EXPR; \
rlm@15 195 alcMakeContextCurrent(ctx2); \
rlm@15 196 SET_EXPR; \
rlm@15 197 alcMakeContextCurrent(current); \
rlm@15 198 }
rlm@15 199
rlm@15 200 #define MAKE_SYNC(NAME, TYPE, GET, SET) \
rlm@15 201 _MAKE_SYNC(NAME, \
rlm@15 202 TYPE value, \
rlm@15 203 GET(sourceID1, param, &value), \
rlm@15 204 SET(sourceID2, param, value))
rlm@15 205
rlm@15 206 #define MAKE_SYNC3(NAME, TYPE, GET, SET) \
rlm@15 207 _MAKE_SYNC(NAME, \
rlm@15 208 TYPE value1; TYPE value2; TYPE value3;, \
rlm@15 209 GET(sourceID1, param, &value1, &value2, &value3), \
rlm@15 210 SET(sourceID2, param, value1, value2, value3))
rlm@15 211
rlm@15 212 MAKE_SYNC( syncSourcei, ALint, alGetSourcei, alSourcei);
rlm@15 213 MAKE_SYNC( syncSourcef, ALfloat, alGetSourcef, alSourcef);
rlm@15 214 MAKE_SYNC3(syncSource3i, ALint, alGetSource3i, alSource3i);
rlm@15 215 MAKE_SYNC3(syncSource3f, ALfloat, alGetSource3f, alSource3f);
rlm@15 216
rlm@18 217 #+end_src
rlm@18 218
rlm@18 219 Setting the state of an =OpenAl= source is done with the =alSourcei=,
rlm@18 220 =alSourcef=, =alSource3i=, and =alSource3f= functions. In order to
rlm@18 221 complely synchronize two sources, it is necessary to use all of
rlm@18 222 them. These macros help to condense the otherwise repetitive
rlm@18 223 synchronization code involving these simillar low-level =OpenAL= functions.
rlm@18 224
rlm@18 225 ** Source Synchronization
rlm@18 226 #+begin_src C
rlm@15 227 void syncSources(ALsource *masterSource, ALsource *slaveSource,
rlm@15 228 ALCcontext *masterCtx, ALCcontext *slaveCtx){
rlm@15 229 ALuint master = masterSource->source;
rlm@15 230 ALuint slave = slaveSource->source;
rlm@15 231 ALCcontext *current = alcGetCurrentContext();
rlm@15 232
rlm@15 233 syncSourcef(master,slave,masterCtx,slaveCtx,AL_PITCH);
rlm@15 234 syncSourcef(master,slave,masterCtx,slaveCtx,AL_GAIN);
rlm@15 235 syncSourcef(master,slave,masterCtx,slaveCtx,AL_MAX_DISTANCE);
rlm@15 236 syncSourcef(master,slave,masterCtx,slaveCtx,AL_ROLLOFF_FACTOR);
rlm@15 237 syncSourcef(master,slave,masterCtx,slaveCtx,AL_REFERENCE_DISTANCE);
rlm@15 238 syncSourcef(master,slave,masterCtx,slaveCtx,AL_MIN_GAIN);
rlm@15 239 syncSourcef(master,slave,masterCtx,slaveCtx,AL_MAX_GAIN);
rlm@15 240 syncSourcef(master,slave,masterCtx,slaveCtx,AL_CONE_OUTER_GAIN);
rlm@15 241 syncSourcef(master,slave,masterCtx,slaveCtx,AL_CONE_INNER_ANGLE);
rlm@15 242 syncSourcef(master,slave,masterCtx,slaveCtx,AL_CONE_OUTER_ANGLE);
rlm@15 243 syncSourcef(master,slave,masterCtx,slaveCtx,AL_SEC_OFFSET);
rlm@15 244 syncSourcef(master,slave,masterCtx,slaveCtx,AL_SAMPLE_OFFSET);
rlm@15 245 syncSourcef(master,slave,masterCtx,slaveCtx,AL_BYTE_OFFSET);
rlm@15 246
rlm@15 247 syncSource3f(master,slave,masterCtx,slaveCtx,AL_POSITION);
rlm@15 248 syncSource3f(master,slave,masterCtx,slaveCtx,AL_VELOCITY);
rlm@15 249 syncSource3f(master,slave,masterCtx,slaveCtx,AL_DIRECTION);
rlm@15 250
rlm@15 251 syncSourcei(master,slave,masterCtx,slaveCtx,AL_SOURCE_RELATIVE);
rlm@15 252 syncSourcei(master,slave,masterCtx,slaveCtx,AL_LOOPING);
rlm@15 253
rlm@15 254 alcMakeContextCurrent(masterCtx);
rlm@15 255 ALint source_type;
rlm@15 256 alGetSourcei(master, AL_SOURCE_TYPE, &source_type);
rlm@15 257
rlm@15 258 // Only static sources are currently synchronized!
rlm@15 259 if (AL_STATIC == source_type){
rlm@15 260 ALint master_buffer;
rlm@15 261 ALint slave_buffer;
rlm@15 262 alGetSourcei(master, AL_BUFFER, &master_buffer);
rlm@15 263 alcMakeContextCurrent(slaveCtx);
rlm@15 264 alGetSourcei(slave, AL_BUFFER, &slave_buffer);
rlm@15 265 if (master_buffer != slave_buffer){
rlm@15 266 alSourcei(slave, AL_BUFFER, master_buffer);
rlm@15 267 }
rlm@15 268 }
rlm@15 269
rlm@15 270 // Synchronize the state of the two sources.
rlm@15 271 alcMakeContextCurrent(masterCtx);
rlm@15 272 ALint masterState;
rlm@15 273 ALint slaveState;
rlm@15 274
rlm@15 275 alGetSourcei(master, AL_SOURCE_STATE, &masterState);
rlm@15 276 alcMakeContextCurrent(slaveCtx);
rlm@15 277 alGetSourcei(slave, AL_SOURCE_STATE, &slaveState);
rlm@15 278
rlm@15 279 if (masterState != slaveState){
rlm@15 280 switch (masterState){
rlm@15 281 case AL_INITIAL : alSourceRewind(slave); break;
rlm@15 282 case AL_PLAYING : alSourcePlay(slave); break;
rlm@15 283 case AL_PAUSED : alSourcePause(slave); break;
rlm@15 284 case AL_STOPPED : alSourceStop(slave); break;
rlm@15 285 }
rlm@15 286 }
rlm@15 287 // Restore whatever context was previously active.
rlm@15 288 alcMakeContextCurrent(current);
rlm@15 289 }
rlm@18 290 #+end_src
rlm@18 291 This function is long because it has to exaustively go through all the
rlm@18 292 possible state that a source can have and make sure that it is the
rlm@18 293 same between the master and slave sources. I'd like to take this
rlm@18 294 moment to salute the [[http://connect.creativelabs.com/openal/Documentation/Forms/AllItems.aspx][=OpenAL= Reference Manual]], which provides a very
rlm@18 295 good description of =OpenAL='s internals.
rlm@15 296
rlm@18 297 ** Context Synchronization
rlm@18 298 #+begin_src C
rlm@15 299 void syncContexts(ALCcontext *master, ALCcontext *slave){
rlm@15 300 /* If there aren't sufficient sources in slave to mirror
rlm@15 301 the sources in master, create them. */
rlm@15 302 ALCcontext *current = alcGetCurrentContext();
rlm@15 303
rlm@15 304 UIntMap *masterSourceMap = &(master->SourceMap);
rlm@15 305 UIntMap *slaveSourceMap = &(slave->SourceMap);
rlm@15 306 ALuint numMasterSources = masterSourceMap->size;
rlm@15 307 ALuint numSlaveSources = slaveSourceMap->size;
rlm@15 308
rlm@15 309 alcMakeContextCurrent(slave);
rlm@15 310 if (numSlaveSources < numMasterSources){
rlm@15 311 ALuint numMissingSources = numMasterSources - numSlaveSources;
rlm@15 312 ALuint newSources[numMissingSources];
rlm@15 313 alGenSources(numMissingSources, newSources);
rlm@15 314 }
rlm@15 315
rlm@15 316 /* Now, slave is gauranteed to have at least as many sources
rlm@15 317 as master. Sync each source from master to the corresponding
rlm@15 318 source in slave. */
rlm@15 319 int i;
rlm@15 320 for(i = 0; i < masterSourceMap->size; i++){
rlm@15 321 syncSources((ALsource*)masterSourceMap->array[i].value,
rlm@15 322 (ALsource*)slaveSourceMap->array[i].value,
rlm@15 323 master, slave);
rlm@15 324 }
rlm@15 325 alcMakeContextCurrent(current);
rlm@15 326 }
rlm@18 327 #+end_src
rlm@15 328
rlm@18 329 Most of the hard work in Context Synchronization is done in
rlm@18 330 =syncSources()=. The only thing that =syncContexts()= has to worry
rlm@18 331 about is automoatically creating new sources whenever a slave context
rlm@18 332 does not have the same number of sources as the master context.
rlm@18 333
rlm@19 334 ** Context Creation
rlm@18 335 #+begin_src C
rlm@15 336 static void addContext(ALCdevice *Device, ALCcontext *context){
rlm@15 337 send_data *data = (send_data*)Device->ExtraData;
rlm@15 338 // expand array if necessary
rlm@15 339 if (data->numContexts >= data->maxContexts){
rlm@15 340 ALuint newMaxContexts = data->maxContexts*2 + 1;
rlm@15 341 data->contexts = realloc(data->contexts, newMaxContexts*sizeof(context_data));
rlm@15 342 data->maxContexts = newMaxContexts;
rlm@15 343 }
rlm@15 344 // create context_data and add it to the main array
rlm@15 345 context_data *ctxData;
rlm@15 346 ctxData = (context_data*)calloc(1, sizeof(*ctxData));
rlm@15 347 ctxData->renderBuffer =
rlm@15 348 malloc(BytesFromDevFmt(Device->FmtType) *
rlm@15 349 Device->NumChan * Device->UpdateSize);
rlm@15 350 ctxData->ctx = context;
rlm@15 351
rlm@15 352 data->contexts[data->numContexts] = ctxData;
rlm@15 353 data->numContexts++;
rlm@15 354 }
rlm@18 355 #+end_src
rlm@15 356
rlm@18 357 Here, the slave context is created, and it's data is stored in the
rlm@18 358 device-wide =ExtraData= structure. The =renderBuffer= that is created
rlm@18 359 here is where the rendered sound samples for this slave context will
rlm@18 360 eventually go.
rlm@15 361
rlm@19 362 ** Context Switching
rlm@18 363 #+begin_src C
rlm@15 364 //////////////////// Context Switching
rlm@15 365
rlm@15 366 /* A device brings along with it two pieces of state
rlm@15 367 * which have to be swapped in and out with each context.
rlm@15 368 */
rlm@15 369 static void swapInContext(ALCdevice *Device, context_data *ctxData){
rlm@15 370 memcpy(Device->ClickRemoval, ctxData->ClickRemoval, sizeof(ALfloat)*MAXCHANNELS);
rlm@15 371 memcpy(Device->PendingClicks, ctxData->PendingClicks, sizeof(ALfloat)*MAXCHANNELS);
rlm@15 372 }
rlm@15 373
rlm@15 374 static void saveContext(ALCdevice *Device, context_data *ctxData){
rlm@15 375 memcpy(ctxData->ClickRemoval, Device->ClickRemoval, sizeof(ALfloat)*MAXCHANNELS);
rlm@15 376 memcpy(ctxData->PendingClicks, Device->PendingClicks, sizeof(ALfloat)*MAXCHANNELS);
rlm@15 377 }
rlm@15 378
rlm@15 379 static ALCcontext **currentContext;
rlm@15 380 static ALuint currentNumContext;
rlm@15 381
rlm@15 382 /* By default, all contexts are rendered at once for each call to aluMixData.
rlm@15 383 * This function uses the internals of the ALCdecice struct to temporarly
rlm@15 384 * cause aluMixData to only render the chosen context.
rlm@15 385 */
rlm@15 386 static void limitContext(ALCdevice *Device, ALCcontext *ctx){
rlm@15 387 currentContext = Device->Contexts;
rlm@15 388 currentNumContext = Device->NumContexts;
rlm@15 389 Device->Contexts = &ctx;
rlm@15 390 Device->NumContexts = 1;
rlm@15 391 }
rlm@15 392
rlm@15 393 static void unLimitContext(ALCdevice *Device){
rlm@15 394 Device->Contexts = currentContext;
rlm@15 395 Device->NumContexts = currentNumContext;
rlm@15 396 }
rlm@18 397 #+end_src
rlm@15 398
rlm@18 399 =OpenAL= normally reneders all Contexts in parallel, outputting the
rlm@18 400 whole result to the buffer. It does this by iterating over the
rlm@18 401 Device->Contexts array and rendering each context to the buffer in
rlm@18 402 turn. By temporarly setting Device->NumContexts to 1 and adjusting
rlm@18 403 the Device's context list to put the desired context-to-be-rendered
rlm@18 404 into position 0, we can get trick =OpenAL= into rendering each slave
rlm@18 405 context separate from all the others.
rlm@15 406
rlm@18 407 ** Main Device Loop
rlm@18 408 #+begin_src C
rlm@15 409 //////////////////// Main Device Loop
rlm@15 410
rlm@18 411 /* Establish the LWJGL context as the master context, which will
rlm@15 412 * be synchronized to all the slave contexts
rlm@15 413 */
rlm@15 414 static void init(ALCdevice *Device){
rlm@15 415 ALCcontext *masterContext = alcGetCurrentContext();
rlm@15 416 addContext(Device, masterContext);
rlm@15 417 }
rlm@15 418
rlm@15 419
rlm@15 420 static void renderData(ALCdevice *Device, int samples){
rlm@15 421 if(!Device->Connected){return;}
rlm@15 422 send_data *data = (send_data*)Device->ExtraData;
rlm@15 423 ALCcontext *current = alcGetCurrentContext();
rlm@15 424
rlm@15 425 ALuint i;
rlm@15 426 for (i = 1; i < data->numContexts; i++){
rlm@15 427 syncContexts(data->contexts[0]->ctx , data->contexts[i]->ctx);
rlm@15 428 }
rlm@15 429
rlm@15 430 if ((uint) samples > Device->UpdateSize){
rlm@15 431 printf("exceeding internal buffer size; dropping samples\n");
rlm@15 432 printf("requested %d; available %d\n", samples, Device->UpdateSize);
rlm@15 433 samples = (int) Device->UpdateSize;
rlm@15 434 }
rlm@15 435
rlm@15 436 for (i = 0; i < data->numContexts; i++){
rlm@15 437 context_data *ctxData = data->contexts[i];
rlm@15 438 ALCcontext *ctx = ctxData->ctx;
rlm@15 439 alcMakeContextCurrent(ctx);
rlm@15 440 limitContext(Device, ctx);
rlm@15 441 swapInContext(Device, ctxData);
rlm@15 442 aluMixData(Device, ctxData->renderBuffer, samples);
rlm@15 443 saveContext(Device, ctxData);
rlm@15 444 unLimitContext(Device);
rlm@15 445 }
rlm@15 446 alcMakeContextCurrent(current);
rlm@15 447 }
rlm@18 448 #+end_src
rlm@15 449
rlm@18 450 The main loop synchronizes the master LWJGL context with all the slave
rlm@18 451 contexts, then walks each context, rendering just that context to it's
rlm@18 452 audio-sample storage buffer.
rlm@15 453
rlm@19 454 ** JNI Methods
rlm@19 455
rlm@19 456 At this point, we have the ability to create multiple listeners by
rlm@19 457 using the master/slave context trick, and the rendered audio data is
rlm@19 458 waiting patiently in internal buffers, one for each listener. We need
rlm@19 459 a way to transport this information to Java, and also a way to drive
rlm@19 460 this device from Java. The following JNI interface code is inspired
rlm@19 461 by the way LWJGL interfaces with =OpenAL=.
rlm@19 462
rlm@19 463 *** step
rlm@19 464
rlm@18 465 #+begin_src C
rlm@15 466 //////////////////// JNI Methods
rlm@15 467
rlm@15 468 #include "com_aurellem_send_AudioSend.h"
rlm@15 469
rlm@15 470 /*
rlm@15 471 * Class: com_aurellem_send_AudioSend
rlm@15 472 * Method: nstep
rlm@15 473 * Signature: (JI)V
rlm@15 474 */
rlm@15 475 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_nstep
rlm@15 476 (JNIEnv *env, jclass clazz, jlong device, jint samples){
rlm@15 477 UNUSED(env);UNUSED(clazz);UNUSED(device);
rlm@15 478 renderData((ALCdevice*)((intptr_t)device), samples);
rlm@15 479 }
rlm@19 480 #+end_src
rlm@19 481 This device, unlike most of the other devices in =OpenAL=, does not
rlm@19 482 render sound unless asked. This enables the system to slow down or
rlm@19 483 speed up depending on the needs of the AIs who are using it to
rlm@19 484 listen. If the device tried to render samples in real-time, a
rlm@19 485 complicated AI whose mind takes 100 seconds of computer time to
rlm@19 486 simulate 1 second of AI-time would miss almost all of the sound in
rlm@19 487 its environment.
rlm@15 488
rlm@19 489
rlm@19 490 *** getSamples
rlm@19 491 #+begin_src C
rlm@15 492 /*
rlm@15 493 * Class: com_aurellem_send_AudioSend
rlm@15 494 * Method: ngetSamples
rlm@15 495 * Signature: (JLjava/nio/ByteBuffer;III)V
rlm@15 496 */
rlm@15 497 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_ngetSamples
rlm@15 498 (JNIEnv *env, jclass clazz, jlong device, jobject buffer, jint position,
rlm@15 499 jint samples, jint n){
rlm@15 500 UNUSED(clazz);
rlm@15 501
rlm@15 502 ALvoid *buffer_address =
rlm@15 503 ((ALbyte *)(((char*)(*env)->GetDirectBufferAddress(env, buffer)) + position));
rlm@15 504 ALCdevice *recorder = (ALCdevice*) ((intptr_t)device);
rlm@15 505 send_data *data = (send_data*)recorder->ExtraData;
rlm@15 506 if ((ALuint)n > data->numContexts){return;}
rlm@15 507 memcpy(buffer_address, data->contexts[n]->renderBuffer,
rlm@15 508 BytesFromDevFmt(recorder->FmtType) * recorder->NumChan * samples);
rlm@15 509 }
rlm@19 510 #+end_src
rlm@15 511
rlm@19 512 This is the transport layer between C and Java that will eventually
rlm@19 513 allow us to access rendered sound data from clojure.
rlm@19 514
rlm@19 515 *** Listener Management
rlm@19 516
rlm@19 517 =addListener=, =setNthListenerf=, and =setNthListener3f= are
rlm@19 518 necessary to change the properties of any listener other than the
rlm@19 519 master one, since only the listener of the current active context is
rlm@19 520 affected by the normal =OpenAL= listener calls.
rlm@19 521
rlm@19 522 #+begin_src C
rlm@15 523 /*
rlm@15 524 * Class: com_aurellem_send_AudioSend
rlm@15 525 * Method: naddListener
rlm@15 526 * Signature: (J)V
rlm@15 527 */
rlm@15 528 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_naddListener
rlm@15 529 (JNIEnv *env, jclass clazz, jlong device){
rlm@15 530 UNUSED(env); UNUSED(clazz);
rlm@15 531 //printf("creating new context via naddListener\n");
rlm@15 532 ALCdevice *Device = (ALCdevice*) ((intptr_t)device);
rlm@15 533 ALCcontext *new = alcCreateContext(Device, NULL);
rlm@15 534 addContext(Device, new);
rlm@15 535 }
rlm@15 536
rlm@15 537 /*
rlm@15 538 * Class: com_aurellem_send_AudioSend
rlm@15 539 * Method: nsetNthListener3f
rlm@15 540 * Signature: (IFFFJI)V
rlm@15 541 */
rlm@15 542 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_nsetNthListener3f
rlm@15 543 (JNIEnv *env, jclass clazz, jint param,
rlm@15 544 jfloat v1, jfloat v2, jfloat v3, jlong device, jint contextNum){
rlm@15 545 UNUSED(env);UNUSED(clazz);
rlm@15 546
rlm@15 547 ALCdevice *Device = (ALCdevice*) ((intptr_t)device);
rlm@15 548 send_data *data = (send_data*)Device->ExtraData;
rlm@15 549
rlm@15 550 ALCcontext *current = alcGetCurrentContext();
rlm@15 551 if ((ALuint)contextNum > data->numContexts){return;}
rlm@15 552 alcMakeContextCurrent(data->contexts[contextNum]->ctx);
rlm@15 553 alListener3f(param, v1, v2, v3);
rlm@15 554 alcMakeContextCurrent(current);
rlm@15 555 }
rlm@15 556
rlm@15 557 /*
rlm@15 558 * Class: com_aurellem_send_AudioSend
rlm@15 559 * Method: nsetNthListenerf
rlm@15 560 * Signature: (IFJI)V
rlm@15 561 */
rlm@15 562 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_nsetNthListenerf
rlm@15 563 (JNIEnv *env, jclass clazz, jint param, jfloat v1, jlong device,
rlm@15 564 jint contextNum){
rlm@15 565
rlm@15 566 UNUSED(env);UNUSED(clazz);
rlm@15 567
rlm@15 568 ALCdevice *Device = (ALCdevice*) ((intptr_t)device);
rlm@15 569 send_data *data = (send_data*)Device->ExtraData;
rlm@15 570
rlm@15 571 ALCcontext *current = alcGetCurrentContext();
rlm@15 572 if ((ALuint)contextNum > data->numContexts){return;}
rlm@15 573 alcMakeContextCurrent(data->contexts[contextNum]->ctx);
rlm@15 574 alListenerf(param, v1);
rlm@15 575 alcMakeContextCurrent(current);
rlm@15 576 }
rlm@19 577 #+end_src
rlm@15 578
rlm@19 579 *** Initilazation
rlm@19 580 =initDevice= is called from the Java side after LWJGL has created its
rlm@19 581 context, and before any calls to =addListener=. It establishes the
rlm@19 582 LWJGL context as the master context.
rlm@19 583
rlm@19 584 =getAudioFormat= is a convienence function that uses JNI to build up a
rlm@19 585 =javax.sound.sampled.AudioFormat= object from data in the Device. This
rlm@19 586 way, there is no ambiguity about what the bits created by =step= and
rlm@19 587 returned by =getSamples= mean.
rlm@19 588
rlm@19 589 #+begin_src C
rlm@15 590 /*
rlm@15 591 * Class: com_aurellem_send_AudioSend
rlm@15 592 * Method: ninitDevice
rlm@15 593 * Signature: (J)V
rlm@15 594 */
rlm@15 595 JNIEXPORT void JNICALL Java_com_aurellem_send_AudioSend_ninitDevice
rlm@15 596 (JNIEnv *env, jclass clazz, jlong device){
rlm@15 597 UNUSED(env);UNUSED(clazz);
rlm@15 598 ALCdevice *Device = (ALCdevice*) ((intptr_t)device);
rlm@15 599 init(Device);
rlm@15 600 }
rlm@15 601
rlm@15 602 /*
rlm@15 603 * Class: com_aurellem_send_AudioSend
rlm@15 604 * Method: ngetAudioFormat
rlm@15 605 * Signature: (J)Ljavax/sound/sampled/AudioFormat;
rlm@15 606 */
rlm@15 607 JNIEXPORT jobject JNICALL Java_com_aurellem_send_AudioSend_ngetAudioFormat
rlm@15 608 (JNIEnv *env, jclass clazz, jlong device){
rlm@15 609 UNUSED(clazz);
rlm@15 610 jclass AudioFormatClass =
rlm@15 611 (*env)->FindClass(env, "javax/sound/sampled/AudioFormat");
rlm@15 612 jmethodID AudioFormatConstructor =
rlm@15 613 (*env)->GetMethodID(env, AudioFormatClass, "<init>", "(FIIZZ)V");
rlm@15 614
rlm@15 615 ALCdevice *Device = (ALCdevice*) ((intptr_t)device);
rlm@15 616 int isSigned;
rlm@15 617 switch (Device->FmtType)
rlm@15 618 {
rlm@15 619 case DevFmtUByte:
rlm@15 620 case DevFmtUShort: isSigned = 0; break;
rlm@15 621 default : isSigned = 1;
rlm@15 622 }
rlm@15 623 float frequency = Device->Frequency;
rlm@15 624 int bitsPerFrame = (8 * BytesFromDevFmt(Device->FmtType));
rlm@15 625 int channels = Device->NumChan;
rlm@15 626 jobject format = (*env)->
rlm@15 627 NewObject(
rlm@15 628 env,AudioFormatClass,AudioFormatConstructor,
rlm@15 629 frequency,
rlm@15 630 bitsPerFrame,
rlm@15 631 channels,
rlm@15 632 isSigned,
rlm@15 633 0);
rlm@15 634 return format;
rlm@15 635 }
rlm@19 636 #+end_src
rlm@15 637
rlm@19 638 *** Boring Device management stuff
rlm@19 639 This code is more-or-less copied verbatim from the other =OpenAL=
rlm@19 640 backends. It's the basis for =OpenAL='s primitive object system.
rlm@19 641
rlm@19 642 #+begin_src C
rlm@15 643 //////////////////// Device Initilization / Management
rlm@15 644
rlm@15 645 static const ALCchar sendDevice[] = "Multiple Audio Send";
rlm@15 646
rlm@15 647 static ALCboolean send_open_playback(ALCdevice *device,
rlm@15 648 const ALCchar *deviceName)
rlm@15 649 {
rlm@15 650 send_data *data;
rlm@15 651 // stop any buffering for stdout, so that I can
rlm@15 652 // see the printf statements in my terminal immediatley
rlm@15 653 setbuf(stdout, NULL);
rlm@15 654
rlm@15 655 if(!deviceName)
rlm@15 656 deviceName = sendDevice;
rlm@15 657 else if(strcmp(deviceName, sendDevice) != 0)
rlm@15 658 return ALC_FALSE;
rlm@15 659 data = (send_data*)calloc(1, sizeof(*data));
rlm@15 660 device->szDeviceName = strdup(deviceName);
rlm@15 661 device->ExtraData = data;
rlm@15 662 return ALC_TRUE;
rlm@15 663 }
rlm@15 664
rlm@15 665 static void send_close_playback(ALCdevice *device)
rlm@15 666 {
rlm@15 667 send_data *data = (send_data*)device->ExtraData;
rlm@15 668 alcMakeContextCurrent(NULL);
rlm@15 669 ALuint i;
rlm@15 670 // Destroy all slave contexts. LWJGL will take care of
rlm@15 671 // its own context.
rlm@15 672 for (i = 1; i < data->numContexts; i++){
rlm@15 673 context_data *ctxData = data->contexts[i];
rlm@15 674 alcDestroyContext(ctxData->ctx);
rlm@15 675 free(ctxData->renderBuffer);
rlm@15 676 free(ctxData);
rlm@15 677 }
rlm@15 678 free(data);
rlm@15 679 device->ExtraData = NULL;
rlm@15 680 }
rlm@15 681
rlm@15 682 static ALCboolean send_reset_playback(ALCdevice *device)
rlm@15 683 {
rlm@15 684 SetDefaultWFXChannelOrder(device);
rlm@15 685 return ALC_TRUE;
rlm@15 686 }
rlm@15 687
rlm@15 688 static void send_stop_playback(ALCdevice *Device){
rlm@15 689 UNUSED(Device);
rlm@15 690 }
rlm@15 691
rlm@15 692 static const BackendFuncs send_funcs = {
rlm@15 693 send_open_playback,
rlm@15 694 send_close_playback,
rlm@15 695 send_reset_playback,
rlm@15 696 send_stop_playback,
rlm@15 697 NULL,
rlm@15 698 NULL, /* These would be filled with functions to */
rlm@15 699 NULL, /* handle capturing audio if we we into that */
rlm@15 700 NULL, /* sort of thing... */
rlm@15 701 NULL,
rlm@15 702 NULL
rlm@15 703 };
rlm@15 704
rlm@15 705 ALCboolean alc_send_init(BackendFuncs *func_list){
rlm@15 706 *func_list = send_funcs;
rlm@15 707 return ALC_TRUE;
rlm@15 708 }
rlm@15 709
rlm@15 710 void alc_send_deinit(void){}
rlm@15 711
rlm@15 712 void alc_send_probe(enum DevProbe type)
rlm@15 713 {
rlm@15 714 switch(type)
rlm@15 715 {
rlm@15 716 case DEVICE_PROBE:
rlm@15 717 AppendDeviceList(sendDevice);
rlm@15 718 break;
rlm@15 719 case ALL_DEVICE_PROBE:
rlm@15 720 AppendAllDeviceList(sendDevice);
rlm@15 721 break;
rlm@15 722 case CAPTURE_DEVICE_PROBE:
rlm@15 723 break;
rlm@15 724 }
rlm@15 725 }
rlm@15 726 #+end_src
rlm@15 727
rlm@19 728 * The Java interface, =AudioSend=
rlm@15 729
rlm@19 730 The Java interface to the Send Device follows naturally from the JNI
rlm@19 731 definitions. It is included here for completeness. The only thing here
rlm@19 732 of note is the =deviceID=. This is available from LWJGL, but to only
rlm@19 733 way to get it is reflection. Unfornatuently, there is no other way to
rlm@19 734 control the Send device than to obtain a pointer to it.
rlm@15 735
rlm@19 736 #+include: "../java/src/com/aurellem/send/AudioSend.java" src java :exports code
rlm@15 737
rlm@19 738 * Finally, Ears in clojure!
rlm@15 739
rlm@19 740 Now that the infastructure is complete (modulo a few patches to
rlm@19 741 jMonkeyEngine3 to support accessing this modified version of =OpenAL=
rlm@19 742 that are not worth discussing), the clojure ear abstraction is rather
rlm@19 743 simple. Just as there were =SceneProcessors= for vision, there are
rlm@19 744 now =SoundProcessors= for hearing.
rlm@15 745
rlm@19 746 #+include "../../jmeCapture/src/com/aurellem/capture/audio/SoundProcessor.java" src java
rlm@15 747
rlm@15 748 #+srcname: ears
rlm@0 749 #+begin_src clojure
rlm@19 750 (ns cortex.hearing
rlm@19 751 "Simulate the sense of hearing in jMonkeyEngine3. Enables multiple
rlm@19 752 listeners at different positions in the same world. Passes vectors
rlm@19 753 of floats in the range [-1.0 -- 1.0] in PCM format to any arbitray
rlm@19 754 function."
rlm@19 755 {:author "Robert McIntyre"}
rlm@19 756 (:use (cortex world util))
rlm@19 757 (:import java.nio.ByteBuffer)
rlm@19 758 (:import org.tritonus.share.sampled.FloatSampleTools)
rlm@19 759 (:import com.aurellem.capture.audio.SoundProcessor)
rlm@19 760 (:import javax.sound.sampled.AudioFormat))
rlm@19 761
rlm@0 762 (defn sound-processor
rlm@19 763 "Deals with converting ByteBuffers into Vectors of floats so that
rlm@19 764 the continuation functions can be defined in terms of immutable
rlm@19 765 stuff."
rlm@0 766 [continuation]
rlm@0 767 (proxy [SoundProcessor] []
rlm@0 768 (cleanup [])
rlm@0 769 (process
rlm@19 770 [#^ByteBuffer audioSamples numSamples #^AudioFormat audioFormat]
rlm@19 771 (let [bytes (byte-array numSamples)
rlm@19 772 floats (float-array numSamples)]
rlm@19 773 (.get audioSamples bytes 0 numSamples)
rlm@19 774 (FloatSampleTools/byte2floatInterleaved
rlm@19 775 bytes 0 floats 0
rlm@19 776 (/ numSamples (.getFrameSize audioFormat)) audioFormat)
rlm@19 777 (continuation
rlm@19 778 (vec floats))))))
rlm@0 779
rlm@0 780 (defn add-ear
rlm@19 781 "Add an ear to the world. The continuation function will be called
rlm@0 782 on the FFT or the sounds which the ear hears in the given
rlm@0 783 timeframe. Sound is 3D."
rlm@0 784 [world listener continuation]
rlm@0 785 (let [renderer (.getAudioRenderer world)]
rlm@0 786 (.addListener renderer listener)
rlm@0 787 (.registerSoundProcessor renderer listener
rlm@0 788 (sound-processor continuation))
rlm@0 789 listener))
rlm@0 790 #+end_src
rlm@0 791
rlm@19 792 * Example
rlm@0 793
rlm@0 794 #+srcname: test-hearing
rlm@0 795 #+begin_src clojure :results silent
rlm@19 796 (ns test.hearing
rlm@19 797 (:use (cortex world util hearing))
rlm@19 798 (:import (com.jme3.audio AudioNode Listener))
rlm@19 799 (:import com.jme3.scene.Node))
rlm@0 800
rlm@0 801 (defn setup-fn [world]
rlm@0 802 (let [listener (Listener.)]
rlm@19 803 (add-ear world listener #(println-repl (nth % 0)))))
rlm@0 804
rlm@0 805 (defn play-sound [node world value]
rlm@0 806 (if (not value)
rlm@0 807 (do
rlm@0 808 (.playSource (.getAudioRenderer world) node))))
rlm@0 809
rlm@19 810 (defn test-basic-hearing []
rlm@19 811 (.start
rlm@19 812 (let [node1 (AudioNode. (asset-manager) "Sounds/pure.wav" false false)]
rlm@19 813 (world
rlm@19 814 (Node.)
rlm@19 815 {"key-space" (partial play-sound node1)}
rlm@19 816 setup-fn
rlm@19 817 no-op))))
rlm@0 818 #+end_src
rlm@0 819
rlm@19 820 This extremely basic program prints out the first sample it encounters
rlm@19 821 at every time stamp. You can see the rendered sound begin printed at
rlm@19 822 the REPL.
rlm@15 823
rlm@0 824 * COMMENT Code Generation
rlm@0 825
rlm@15 826 #+begin_src clojure :tangle ../../cortex/src/cortex/hearing.clj
rlm@15 827 <<ears>>
rlm@0 828 #+end_src
rlm@0 829
rlm@15 830 #+begin_src clojure :tangle ../../cortex/src/test/hearing.clj
rlm@0 831 <<test-hearing>>
rlm@0 832 #+end_src
rlm@0 833
rlm@0 834
rlm@15 835 #+begin_src C :tangle ../Alc/backends/send.c
rlm@15 836 <<send>>
rlm@15 837 #+end_src
rlm@19 838
rlm@19 839