rlm@9: package com.aurellem.capture.audio; rlm@9: rlm@9: import java.lang.reflect.Field; rlm@9: import java.nio.ByteBuffer; rlm@9: import java.util.HashMap; rlm@9: import java.util.Vector; rlm@9: import java.util.concurrent.CountDownLatch; rlm@9: import java.util.logging.Level; rlm@9: import java.util.logging.Logger; rlm@9: rlm@9: import org.lwjgl.LWJGLException; rlm@9: import org.lwjgl.openal.AL; rlm@9: import org.lwjgl.openal.AL10; rlm@9: import org.lwjgl.openal.ALCdevice; rlm@9: import org.lwjgl.openal.OpenALException; rlm@9: rlm@9: import com.jme3.audio.Listener; rlm@9: import com.jme3.audio.lwjgl.LwjglAudioRenderer; rlm@9: import com.jme3.math.Vector3f; rlm@9: import com.jme3.util.BufferUtils; rlm@9: rlm@9: public class AudioSend rlm@9: extends LwjglAudioRenderer implements MultiListener { rlm@9: rlm@9: /** rlm@9: * Keeps track of all the listeners which have been registered so far. rlm@9: * The first element is null, which represents the zeroth rlm@9: * LWJGL listener which is created automatically. rlm@9: */ rlm@9: public Vector listeners = new Vector(); rlm@9: rlm@9: public void initialize(){ rlm@9: super.initialize(); rlm@9: listeners.add(null); rlm@9: } rlm@9: rlm@9: /** rlm@9: * This is to call the native methods which require the OpenAL device ID. rlm@9: * currently it is obtained through reflection. rlm@9: */ rlm@9: private long deviceID; rlm@9: rlm@9: /** rlm@9: * To ensure that deviceID and listeners are rlm@9: * properly initialized before any additional listeners are added. rlm@9: */ rlm@9: private CountDownLatch latch = new CountDownLatch(1); rlm@9: rlm@9: private void waitForInit(){ rlm@9: try {latch.await();} rlm@9: catch (InterruptedException e) {e.printStackTrace();} rlm@9: } rlm@9: rlm@9: /** rlm@9: * Each listener (including the main LWJGL listener) can be registered rlm@9: * with a SoundProcessor, which this Renderer will call whenever rlm@9: * there is new audio data to be processed. rlm@9: */ rlm@9: public HashMap soundProcessorMap = rlm@9: new HashMap(); rlm@9: rlm@9: rlm@9: /** rlm@9: * Create a new slave context on the recorder device which will render all the rlm@9: * sounds in the main LWJGL context with respect to this listener. rlm@9: */ rlm@9: public void addListener(Listener l) { rlm@9: try {this.latch.await();} rlm@9: catch (InterruptedException e) {e.printStackTrace();} rlm@9: this.addListener(); rlm@9: this.listeners.add(l); rlm@9: } rlm@9: rlm@9: /** rlm@9: * Whenever new data is rendered in the perspective of this listener, rlm@9: * this Renderer will send that data to the SoundProcessor of your choosing. rlm@9: */ rlm@9: public void registerSoundProcessor(Listener l, SoundProcessor sp) { rlm@9: this.soundProcessorMap.put(l, sp); rlm@9: } rlm@9: rlm@9: /** rlm@9: * Registers a SoundProcessor for the main LWJGL context. IF all you want to rlm@9: * do is record the sound you would normally hear in your application, then rlm@9: * this is the only method you have to worry about. rlm@9: */ rlm@9: public void registerSoundProcessor(SoundProcessor sp){ rlm@9: // register a sound processor for the default listener. rlm@9: this.soundProcessorMap.put(null, sp); rlm@9: } rlm@9: rlm@9: private static final Logger logger = rlm@9: Logger.getLogger(AudioSend.class.getName()); rlm@9: rlm@9: rlm@9: //////////// Native Methods rlm@9: rlm@9: /** This establishes the LWJGL context as the context which will be copies to all rlm@9: * other contexts. It must be called before any calls to addListener(); rlm@9: */ rlm@9: public void initDevice(){ rlm@9: ninitDevice(this.deviceID);} rlm@9: public static native void ninitDevice(long device); rlm@9: rlm@9: /** rlm@9: * The send device does not automatically process sound. This step function will cause rlm@9: * the desired number of samples to be processed for each listener. The results will then rlm@9: * be available via calls to getSamples() for each listener. rlm@9: * @param samples rlm@9: */ rlm@9: public void step(int samples){ rlm@9: nstep(this.deviceID, samples);} rlm@9: public static native void nstep(long device, int samples); rlm@9: rlm@9: /** rlm@9: * Retrieve the final rendered sound for a particular listener. contextNum == 0 rlm@9: * is the main LWJGL context. rlm@9: * @param buffer rlm@9: * @param samples rlm@9: * @param contextNum rlm@9: */ rlm@9: public void getSamples(ByteBuffer buffer, int samples, int contextNum){ rlm@9: ngetSamples(this.deviceID, buffer, buffer.position(), samples, contextNum);} rlm@9: public static native void ngetSamples( rlm@9: long device, ByteBuffer buffer, int position, int samples, int contextNum); rlm@9: rlm@9: /** rlm@9: * Create an additional listener on the recorder device. The device itself will manage rlm@9: * this listener and synchronize it with the main LWJGL context. Processed sound samples rlm@9: * for this listener will be available via a call to getSamples() with rlm@9: * contextNum equal to the number of times this method has been called. rlm@9: */ rlm@9: public void addListener(){naddListener(this.deviceID);} rlm@9: public static native void naddListener(long device); rlm@9: rlm@9: /** rlm@9: * This will internally call alListener3f in the appropriate slave context and update rlm@9: * that context's listener's parameters. Calling this for a number greater than the current rlm@9: * number of slave contexts will have no effect. rlm@9: * @param pname rlm@9: * @param v1 rlm@9: * @param v2 rlm@9: * @param v3 rlm@9: * @param contextNum rlm@9: */ rlm@9: public void setNthListener3f(int pname, float v1, float v2, float v3, int contextNum){ rlm@9: nsetNthListener3f(pname, v1, v2, v3, this.deviceID, contextNum);} rlm@9: public static native void rlm@9: nsetNthListener3f(int pname, float v1, float v2, float v3, long device, int contextNum); rlm@9: rlm@9: /** rlm@9: * This will internally call alListenerf in the appropriate slave context and update rlm@9: * that context's listener's parameters. Calling this for a number greater than the current rlm@9: * number of slave contexts will have no effect. rlm@9: * @param pname rlm@9: * @param v1 rlm@9: * @param contextNum rlm@9: */ rlm@9: public void setNthListenerf(int pname, float v1, int contextNum){ rlm@9: nsetNthListenerf(pname, v1, this.deviceID, contextNum);} rlm@9: public static native void nsetNthListenerf(int pname, float v1, long device, int contextNum); rlm@9: rlm@9: /** rlm@9: * Instead of taking whatever device is available on the system, this call rlm@9: * creates the "Multiple Audio Send" device, which supports multiple listeners in a limited rlm@9: * capacity. For each listener, the device renders it not to the sound device, but rlm@9: * instead to buffers which it makes available via JNI. rlm@9: */ rlm@9: public void initInThread(){ rlm@9: try{ rlm@9: if (!AL.isCreated()){ rlm@9: AL.create("Multiple Audio Send", 44100, 60, false); rlm@9: } rlm@9: }catch (OpenALException ex){ rlm@9: logger.log(Level.SEVERE, "Failed to load audio library", ex); rlm@9: System.exit(1); rlm@9: return; rlm@9: }catch (LWJGLException ex){ rlm@9: logger.log(Level.SEVERE, "Failed to load audio library", ex); rlm@9: System.exit(1); rlm@9: return; rlm@9: } rlm@9: super.initInThread(); rlm@9: rlm@9: ALCdevice device = AL.getDevice(); rlm@9: rlm@9: // RLM: use reflection to grab the ID of our device for use later. rlm@9: try { rlm@9: Field deviceIDField; rlm@9: deviceIDField = ALCdevice.class.getDeclaredField("device"); rlm@9: deviceIDField.setAccessible(true); rlm@9: try {deviceID = (Long)deviceIDField.get(device);} rlm@9: catch (IllegalArgumentException e) {e.printStackTrace();} rlm@9: catch (IllegalAccessException e) {e.printStackTrace();} rlm@9: deviceIDField.setAccessible(false);} rlm@9: catch (SecurityException e) {e.printStackTrace();} rlm@9: catch (NoSuchFieldException e) {e.printStackTrace();} rlm@9: rlm@9: // the LWJGL context must be established as the master context before rlm@9: // any other listeners can be created on this device. rlm@9: initDevice(); rlm@9: // Now, everything is initialized, and it is safe to add more listeners. rlm@9: latch.countDown(); rlm@9: } rlm@9: rlm@9: rlm@9: public void cleanup(){ rlm@9: for(SoundProcessor sp : this.soundProcessorMap.values()){ rlm@9: sp.cleanup(); rlm@9: } rlm@9: super.cleanup(); rlm@9: } rlm@9: rlm@9: public void updateAllListeners(){ rlm@9: for (int i = 0; i < this.listeners.size(); i++){ rlm@9: Listener lis = this.listeners.get(i); rlm@9: if (null != lis){ rlm@9: Vector3f location = lis.getLocation(); rlm@9: Vector3f velocity = lis.getVelocity(); rlm@9: Vector3f orientation = lis.getUp(); rlm@9: float gain = lis.getVolume(); rlm@9: setNthListener3f(AL10.AL_POSITION, rlm@9: location.x, location.y, location.z, i); rlm@9: setNthListener3f(AL10.AL_VELOCITY, rlm@9: velocity.x, velocity.y, velocity.z, i); rlm@9: setNthListener3f(AL10.AL_ORIENTATION, rlm@9: orientation.x, orientation.y, orientation.z, i); rlm@9: setNthListenerf(AL10.AL_GAIN, gain, i); rlm@9: } rlm@9: } rlm@9: } rlm@9: rlm@9: rlm@9: public final static int BYTES_PER_SAMPLE = 4; rlm@9: private ByteBuffer buffer = BufferUtils.createByteBuffer(4096); rlm@9: rlm@9: public void dispatchAudio(float tpf){ rlm@9: int samplesToGet = (int) (tpf * 44100); rlm@9: try {latch.await();} rlm@9: catch (InterruptedException e) {e.printStackTrace();} rlm@9: step(samplesToGet); rlm@9: updateAllListeners(); rlm@9: rlm@9: for (int i = 0; i < this.listeners.size(); i++){ rlm@9: buffer.clear(); rlm@9: this.getSamples(buffer, samplesToGet, i); rlm@9: SoundProcessor sp = rlm@9: this.soundProcessorMap.get(this.listeners.get(i)); rlm@9: if (null != sp){sp.process(buffer, samplesToGet*BYTES_PER_SAMPLE);} rlm@9: } rlm@9: rlm@9: } rlm@9: rlm@9: public void update(float tpf){ rlm@9: super.update(tpf); rlm@9: dispatchAudio(tpf); rlm@9: } rlm@9: rlm@9: } rlm@9: