# HG changeset patch # User Robert McIntyre # Date 1328988326 25200 # Node ID 23e3df41db3c559cc97794218cf6795ce86b4848 # Parent 155c70b7e6ded7c4d0ecc81186fc626a724f329a reformatting for web diff -r 155c70b7e6de -r 23e3df41db3c src/com/aurellem/capture/audio/AudioSendRenderer.java --- a/src/com/aurellem/capture/audio/AudioSendRenderer.java Fri Feb 10 04:07:05 2012 -0700 +++ b/src/com/aurellem/capture/audio/AudioSendRenderer.java Sat Feb 11 12:25:26 2012 -0700 @@ -27,106 +27,113 @@ public class AudioSendRenderer - extends LwjglAudioRenderer implements MultiListener { + extends LwjglAudioRenderer implements MultiListener { - private AudioSend audioSend; - private AudioFormat outFormat;// = new AudioFormat(44100.0f, 32, 1, true, false); + private AudioSend audioSend; + private AudioFormat outFormat; - /** - * Keeps track of all the listeners which have been registered so far. - * The first element is null, which represents the zeroth - * LWJGL listener which is created automatically. - */ - public Vector listeners = new Vector(); + /** + * Keeps track of all the listeners which have been registered + * so far. The first element is null, which + * represents the zeroth LWJGL listener which is created + * automatically. + */ + public Vector listeners = new Vector(); - public void initialize(){ - super.initialize(); - listeners.add(null); + public void initialize(){ + super.initialize(); + listeners.add(null); + } + + /** + * This is to call the native methods which require the OpenAL + * device ID. Currently it is obtained through reflection. + */ + private long deviceID; + + /** + * To ensure that deviceID and + * listeners are properly initialized before any + * additional listeners are added. + */ + private CountDownLatch latch = new CountDownLatch(1); + + /** + * Each listener (including the main LWJGL listener) can be + * registered with a SoundProcessor, which this + * Renderer will call whenever there is new audio data to be + * processed. + */ + public HashMap soundProcessorMap = + new HashMap(); + + /** + * Create a new slave context on the recorder device which + * will render all the sounds in the main LWJGL context with + * respect to this listener. + */ + public void addListener(Listener l) { + try {this.latch.await();} + catch (InterruptedException e) {e.printStackTrace();} + audioSend.addListener(); + this.listeners.add(l); + l.setRenderer(this); + } + + /** + * Whenever new data is rendered in the perspective of this + * listener, this Renderer will send that data to the + * SoundProcessor of your choosing. + */ + public void registerSoundProcessor(Listener l, SoundProcessor sp) { + this.soundProcessorMap.put(l, sp); + } + + /** + * Registers a SoundProcessor for the main LWJGL context. Ig all + * you want to do is record the sound you would normally hear in + * your application, then this is the only method you have to + * worry about. + */ + public void registerSoundProcessor(SoundProcessor sp){ + // register a sound processor for the default listener. + this.soundProcessorMap.put(null, sp); + } + + private static final Logger logger = + Logger.getLogger(AudioSendRenderer.class.getName()); + + /** + * Instead of taking whatever device is available on the system, + * this call creates the "Multiple Audio Send" device, which + * supports multiple listeners in a limited capacity. For each + * listener, the device renders it not to the sound device, but + * instead to buffers which it makes available via JNI. + */ + public void initInThread(){ + try{ + switch (JmeSystem.getPlatform()){ + case Windows64: + Natives.extractNativeLib("windows/audioSend", + "OpenAL64", true, true); + break; + case Windows32: + Natives.extractNativeLib("windows/audioSend", + "OpenAL32", true, true); + break; + case Linux64: + Natives.extractNativeLib("linux/audioSend", + "openal64", true, true); + break; + case Linux32: + Natives.extractNativeLib("linux/audioSend", + "openal", true, true); + break; + } } - - /** - * This is to call the native methods which require the OpenAL device ID. - * currently it is obtained through reflection. - */ - private long deviceID; - - /** - * To ensure that deviceID and listeners are - * properly initialized before any additional listeners are added. - */ - private CountDownLatch latch = new CountDownLatch(1); - - /** - * Each listener (including the main LWJGL listener) can be registered - * with a SoundProcessor, which this Renderer will call whenever - * there is new audio data to be processed. - */ - public HashMap soundProcessorMap = - new HashMap(); - - - /** - * Create a new slave context on the recorder device which will render all the - * sounds in the main LWJGL context with respect to this listener. - */ - public void addListener(Listener l) { - try {this.latch.await();} - catch (InterruptedException e) {e.printStackTrace();} - audioSend.addListener(); - this.listeners.add(l); - l.setRenderer(this); - } - - /** - * Whenever new data is rendered in the perspective of this listener, - * this Renderer will send that data to the SoundProcessor of your choosing. - */ - public void registerSoundProcessor(Listener l, SoundProcessor sp) { - this.soundProcessorMap.put(l, sp); - } - - /** - * Registers a SoundProcessor for the main LWJGL context. IF all you want to - * do is record the sound you would normally hear in your application, then - * this is the only method you have to worry about. - */ - public void registerSoundProcessor(SoundProcessor sp){ - // register a sound processor for the default listener. - this.soundProcessorMap.put(null, sp); - } - - private static final Logger logger = - Logger.getLogger(AudioSendRenderer.class.getName()); + catch (IOException ex) {ex.printStackTrace();} - - - /** - * Instead of taking whatever device is available on the system, this call - * creates the "Multiple Audio Send" device, which supports multiple listeners in a limited - * capacity. For each listener, the device renders it not to the sound device, but - * instead to buffers which it makes available via JNI. - */ - public void initInThread(){ - - try{ - switch (JmeSystem.getPlatform()){ - case Windows64: - Natives.extractNativeLib("windows/audioSend", "OpenAL64", true, true); - break; - case Windows32: - Natives.extractNativeLib("windows/audioSend", "OpenAL32", true, true); - break; - case Linux64: - Natives.extractNativeLib("linux/audioSend", "openal64", true, true); - break; - case Linux32: - Natives.extractNativeLib("linux/audioSend", "openal", true, true); - break; - } - } - catch (IOException ex) {ex.printStackTrace();} - - try{ + try{ if (!AL.isCreated()){ AL.create("Multiple Audio Send", 44100, 60, false); } @@ -139,95 +146,100 @@ System.exit(1); return; } - super.initInThread(); + super.initInThread(); - ALCdevice device = AL.getDevice(); + ALCdevice device = AL.getDevice(); - // RLM: use reflection to grab the ID of our device for use later. - try { - Field deviceIDField; - deviceIDField = ALCdevice.class.getDeclaredField("device"); - deviceIDField.setAccessible(true); - try {deviceID = (Long)deviceIDField.get(device);} - catch (IllegalArgumentException e) {e.printStackTrace();} - catch (IllegalAccessException e) {e.printStackTrace();} - deviceIDField.setAccessible(false);} - catch (SecurityException e) {e.printStackTrace();} - catch (NoSuchFieldException e) {e.printStackTrace();} + // RLM: use reflection to grab the ID of our device for use + // later. + try { + Field deviceIDField; + deviceIDField = ALCdevice.class.getDeclaredField("device"); + deviceIDField.setAccessible(true); + try {deviceID = (Long)deviceIDField.get(device);} + catch (IllegalArgumentException e) {e.printStackTrace();} + catch (IllegalAccessException e) {e.printStackTrace();} + deviceIDField.setAccessible(false);} + catch (SecurityException e) {e.printStackTrace();} + catch (NoSuchFieldException e) {e.printStackTrace();} - this.audioSend = new AudioSend(this.deviceID); - this.outFormat = audioSend.getAudioFormat(); - initBuffer(); + this.audioSend = new AudioSend(this.deviceID); + this.outFormat = audioSend.getAudioFormat(); + initBuffer(); - // The LWJGL context must be established as the master context before - // any other listeners can be created on this device. - audioSend.initDevice(); - // Now, everything is initialized, and it is safe to add more listeners. - latch.countDown(); + // The LWJGL context must be established as the master context + // before any other listeners can be created on this device. + audioSend.initDevice(); + // Now, everything is initialized, and it is safe to add more + // listeners. + latch.countDown(); + } + + public void cleanup(){ + for(SoundProcessor sp : this.soundProcessorMap.values()){ + sp.cleanup(); } + super.cleanup(); + } + + public void updateAllListeners(){ + for (int i = 0; i < this.listeners.size(); i++){ + Listener lis = this.listeners.get(i); + if (null != lis){ + Vector3f location = lis.getLocation(); + Vector3f velocity = lis.getVelocity(); + Vector3f orientation = lis.getUp(); + float gain = lis.getVolume(); + audioSend.setNthListener3f + (AL10.AL_POSITION, + location.x, location.y, location.z, i); + audioSend.setNthListener3f + (AL10.AL_VELOCITY, + velocity.x, velocity.y, velocity.z, i); + audioSend.setNthListener3f + (AL10.AL_ORIENTATION, + orientation.x, orientation.y, orientation.z, i); + audioSend.setNthListenerf(AL10.AL_GAIN, gain, i); + } + } + } + private ByteBuffer buffer;; + + public static final int MIN_FRAMERATE = 10; - public void cleanup(){ - for(SoundProcessor sp : this.soundProcessorMap.values()){ - sp.cleanup(); - } - super.cleanup(); + private void initBuffer(){ + int bufferSize = + (int)(this.outFormat.getSampleRate() / + ((float)MIN_FRAMERATE)) * + this.outFormat.getFrameSize(); + + this.buffer = BufferUtils.createByteBuffer(bufferSize); + } + + public void dispatchAudio(float tpf){ + + int samplesToGet = (int) (tpf * outFormat.getSampleRate()); + try {latch.await();} + catch (InterruptedException e) {e.printStackTrace();} + audioSend.step(samplesToGet); + updateAllListeners(); + + for (int i = 0; i < this.listeners.size(); i++){ + buffer.clear(); + audioSend.getSamples(buffer, samplesToGet, i); + SoundProcessor sp = + this.soundProcessorMap.get(this.listeners.get(i)); + if (null != sp){ + sp.process + (buffer, + samplesToGet*outFormat.getFrameSize(), outFormat);} } - - public void updateAllListeners(){ - for (int i = 0; i < this.listeners.size(); i++){ - Listener lis = this.listeners.get(i); - if (null != lis){ - Vector3f location = lis.getLocation(); - Vector3f velocity = lis.getVelocity(); - Vector3f orientation = lis.getUp(); - float gain = lis.getVolume(); - audioSend.setNthListener3f(AL10.AL_POSITION, - location.x, location.y, location.z, i); - audioSend.setNthListener3f(AL10.AL_VELOCITY, - velocity.x, velocity.y, velocity.z, i); - audioSend.setNthListener3f(AL10.AL_ORIENTATION, - orientation.x, orientation.y, orientation.z, i); - audioSend.setNthListenerf(AL10.AL_GAIN, gain, i); - } - } - } - - - private ByteBuffer buffer;; - - public static final int MIN_FRAMERATE = 10; - - private void initBuffer(){ - int bufferSize = (int)(this.outFormat.getSampleRate() / ((float)MIN_FRAMERATE)) * - this.outFormat.getFrameSize(); - this.buffer = BufferUtils.createByteBuffer(bufferSize); - } - /* - - */ - public void dispatchAudio(float tpf){ + } - int samplesToGet = (int) (tpf * outFormat.getSampleRate()); - try {latch.await();} - catch (InterruptedException e) {e.printStackTrace();} - audioSend.step(samplesToGet); - updateAllListeners(); - - for (int i = 0; i < this.listeners.size(); i++){ - buffer.clear(); - audioSend.getSamples(buffer, samplesToGet, i); - SoundProcessor sp = - this.soundProcessorMap.get(this.listeners.get(i)); - if (null != sp){sp.process(buffer, samplesToGet*outFormat.getFrameSize(), outFormat);} - } - - } - - public void update(float tpf){ - super.update(tpf); + public void update(float tpf){ + super.update(tpf); dispatchAudio(tpf); - } - + } } diff -r 155c70b7e6de -r 23e3df41db3c src/com/aurellem/capture/audio/SoundProcessor.java --- a/src/com/aurellem/capture/audio/SoundProcessor.java Fri Feb 10 04:07:05 2012 -0700 +++ b/src/com/aurellem/capture/audio/SoundProcessor.java Sat Feb 11 12:25:26 2012 -0700 @@ -7,10 +7,9 @@ public interface SoundProcessor { /** - * Called when the SoundProcessor is being destroyed, and - * there are no more samples to process. This happens at the - * latest when the Application is shutting down. - * + * Called when the SoundProcessor is being destroyed, and there + * are no more samples to process. This happens at the latest + * when the Application is shutting down. */ void cleanup(); @@ -22,7 +21,8 @@ * * @param audioSamples a ByteBuffer containing processed audio * samples - * @param numSamples the number of samples, in bytes, that are valid + * @param numSamples the number of samples, in bytes, that are + * valid * @param format the format of the audio samples in audioSamples */ void process(ByteBuffer audioSamples, int numSamples, AudioFormat format); diff -r 155c70b7e6de -r 23e3df41db3c src/com/aurellem/capture/examples/Advanced.java --- a/src/com/aurellem/capture/examples/Advanced.java Fri Feb 10 04:07:05 2012 -0700 +++ b/src/com/aurellem/capture/examples/Advanced.java Sat Feb 11 12:25:26 2012 -0700 @@ -54,253 +54,265 @@ public class Advanced extends SimpleApplication { - /** - * You will see three grey cubes, a blue sphere, and a path which - * circles each cube. The blue sphere is generating a constant - * monotone sound as it moves along the track. Each cube is - * listening for sound; when a cube hears sound whose intensity is - * greater than a certain threshold, it changes its color from - * grey to green. - * - * Each cube is also saving whatever it hears to a file. The - * scene from the perspective of the viewer is also saved to a - * video file. When you listen to each of the sound files - * alongside the video, the sound will get louder when the sphere - * approaches the cube that generated that sound file. This - * shows that each listener is hearing the world from its own - * perspective. - * - */ - public static void main(String[] args) { - Advanced app = new Advanced(); - AppSettings settings = new AppSettings(true); - settings.setAudioRenderer(AurellemSystemDelegate.SEND); - JmeSystem.setSystemDelegate(new AurellemSystemDelegate()); - app.setSettings(settings); - app.setShowSettings(false); - app.setPauseOnLostFocus(false); + /** + * You will see three grey cubes, a blue sphere, and a path which + * circles each cube. The blue sphere is generating a constant + * monotone sound as it moves along the track. Each cube is + * listening for sound; when a cube hears sound whose intensity is + * greater than a certain threshold, it changes its color from + * grey to green. + * + * Each cube is also saving whatever it hears to a file. The + * scene from the perspective of the viewer is also saved to a + * video file. When you listen to each of the sound files + * alongside the video, the sound will get louder when the sphere + * approaches the cube that generated that sound file. This + * shows that each listener is hearing the world from its own + * perspective. + * + */ + public static void main(String[] args) { + Advanced app = new Advanced(); + AppSettings settings = new AppSettings(true); + settings.setAudioRenderer(AurellemSystemDelegate.SEND); + JmeSystem.setSystemDelegate(new AurellemSystemDelegate()); + app.setSettings(settings); + app.setShowSettings(false); + app.setPauseOnLostFocus(false); - try { - //Capture.captureVideo(app, File.createTempFile("advanced",".avi")); - Capture.captureAudio(app, File.createTempFile("advanced", ".wav")); - } - catch (IOException e) {e.printStackTrace();} + try { + //Capture.captureVideo(app, File.createTempFile("advanced",".avi")); + Capture.captureAudio(app, File.createTempFile("advanced",".wav")); + } + catch (IOException e) {e.printStackTrace();} - app.start(); + app.start(); + } + + private Geometry bell; + private Geometry ear1; + private Geometry ear2; + private Geometry ear3; + private AudioNode music; + private MotionTrack motionControl; + private IsoTimer motionTimer = new IsoTimer(60); + + private Geometry makeEar(Node root, Vector3f position){ + Material mat = new Material(assetManager, + "Common/MatDefs/Misc/Unshaded.j3md"); + Geometry ear = new Geometry("ear", new Box(1.0f, 1.0f, 1.0f)); + ear.setLocalTranslation(position); + mat.setColor("Color", ColorRGBA.Green); + ear.setMaterial(mat); + root.attachChild(ear); + return ear; + } + + private Vector3f[] path = new Vector3f[]{ + // loop 1 + new Vector3f(0, 0, 0), + new Vector3f(0, 0, -10), + new Vector3f(-2, 0, -14), + new Vector3f(-6, 0, -20), + new Vector3f(0, 0, -26), + new Vector3f(6, 0, -20), + new Vector3f(0, 0, -14), + new Vector3f(-6, 0, -20), + new Vector3f(0, 0, -26), + new Vector3f(6, 0, -20), + // loop 2 + new Vector3f(5, 0, -5), + new Vector3f(7, 0, 1.5f), + new Vector3f(14, 0, 2), + new Vector3f(20, 0, 6), + new Vector3f(26, 0, 0), + new Vector3f(20, 0, -6), + new Vector3f(14, 0, 0), + new Vector3f(20, 0, 6), + new Vector3f(26, 0, 0), + new Vector3f(20, 0, -6), + new Vector3f(14, 0, 0), + // loop 3 + new Vector3f(8, 0, 7.5f), + new Vector3f(7, 0, 10.5f), + new Vector3f(6, 0, 20), + new Vector3f(0, 0, 26), + new Vector3f(-6, 0, 20), + new Vector3f(0, 0, 14), + new Vector3f(6, 0, 20), + new Vector3f(0, 0, 26), + new Vector3f(-6, 0, 20), + new Vector3f(0, 0, 14), + // begin ellipse + new Vector3f(16, 5, 20), + new Vector3f(0, 0, 26), + new Vector3f(-16, -10, 20), + new Vector3f(0, 0, 14), + new Vector3f(16, 20, 20), + new Vector3f(0, 0, 26), + new Vector3f(-10, -25, 10), + new Vector3f(-10, 0, 0), + // come at me! + new Vector3f(-28.00242f, 48.005623f, -34.648228f), + new Vector3f(0, 0 , -20), + }; + + private void createScene() { + Material mat = new Material(assetManager, + "Common/MatDefs/Misc/Unshaded.j3md"); + bell = new Geometry( "sound-emitter" , new Sphere(15,15,1)); + mat.setColor("Color", ColorRGBA.Blue); + bell.setMaterial(mat); + rootNode.attachChild(bell); + + ear1 = makeEar(rootNode, new Vector3f(0, 0 ,-20)); + ear2 = makeEar(rootNode, new Vector3f(0, 0 ,20)); + ear3 = makeEar(rootNode, new Vector3f(20, 0 ,0)); + + MotionPath track = new MotionPath(); + + for (Vector3f v : path){ + track.addWayPoint(v); + } + track.setCurveTension(0.80f); + + motionControl = new MotionTrack(bell,track); + // for now, use reflection to change the timer... + // motionControl.setTimer(new IsoTimer(60)); + + try { + Field timerField; + timerField = + AbstractCinematicEvent.class.getDeclaredField("timer"); + timerField.setAccessible(true); + try {timerField.set(motionControl, motionTimer);} + catch (IllegalArgumentException e) {e.printStackTrace();} + catch (IllegalAccessException e) {e.printStackTrace();} + } + catch (SecurityException e) {e.printStackTrace();} + catch (NoSuchFieldException e) {e.printStackTrace();} + + + motionControl.setDirectionType + (MotionTrack.Direction.PathAndRotation); + motionControl.setRotation + (new Quaternion().fromAngleNormalAxis + (-FastMath.HALF_PI, Vector3f.UNIT_Y)); + motionControl.setInitialDuration(20f); + motionControl.setSpeed(1f); + + track.enableDebugShape(assetManager, rootNode); + positionCamera(); + } + + private void positionCamera(){ + this.cam.setLocation + (new Vector3f(-28.00242f, 48.005623f, -34.648228f)); + this.cam.setRotation + (new Quaternion + (0.3359635f, 0.34280345f, -0.13281013f, 0.8671653f)); + } + + private void initAudio() { + org.lwjgl.input.Mouse.setGrabbed(false); + music = new AudioNode(assetManager, + "Sound/Effects/Beep.ogg", false); + rootNode.attachChild(music); + audioRenderer.playSource(music); + music.setPositional(true); + music.setVolume(1f); + music.setReverbEnabled(false); + music.setDirectional(false); + music.setMaxDistance(200.0f); + music.setRefDistance(1f); + //music.setRolloffFactor(1f); + music.setLooping(false); + audioRenderer.pauseSource(music); + } + + public class Dancer implements SoundProcessor { + Geometry entity; + float scale = 2; + public Dancer(Geometry entity){ + this.entity = entity; } + /** + * this method is irrelevant since there is no state to cleanup. + */ + public void cleanup() {} - private Geometry bell; - private Geometry ear1; - private Geometry ear2; - private Geometry ear3; - private AudioNode music; - private MotionTrack motionControl; - private IsoTimer motionTimer = new IsoTimer(60); - private Geometry makeEar(Node root, Vector3f position){ - Material mat = new Material(assetManager, "Common/MatDefs/Misc/Unshaded.j3md"); - Geometry ear = new Geometry("ear", new Box(1.0f, 1.0f, 1.0f)); - ear.setLocalTranslation(position); - mat.setColor("Color", ColorRGBA.Green); - ear.setMaterial(mat); - root.attachChild(ear); - return ear; - } + /** + * Respond to sound! This is the brain of an AI entity that + * hears its surroundings and reacts to them. + */ + public void process(ByteBuffer audioSamples, + int numSamples, AudioFormat format) { + audioSamples.clear(); + byte[] data = new byte[numSamples]; + float[] out = new float[numSamples]; + audioSamples.get(data); + FloatSampleTools. + byte2floatInterleaved + (data, 0, out, 0, numSamples/format.getFrameSize(), format); - private Vector3f[] path = new Vector3f[]{ - // loop 1 - new Vector3f(0, 0, 0), - new Vector3f(0, 0, -10), - new Vector3f(-2, 0, -14), - new Vector3f(-6, 0, -20), - new Vector3f(0, 0, -26), - new Vector3f(6, 0, -20), - new Vector3f(0, 0, -14), - new Vector3f(-6, 0, -20), - new Vector3f(0, 0, -26), - new Vector3f(6, 0, -20), - // loop 2 - new Vector3f(5, 0, -5), - new Vector3f(7, 0, 1.5f), - new Vector3f(14, 0, 2), - new Vector3f(20, 0, 6), - new Vector3f(26, 0, 0), - new Vector3f(20, 0, -6), - new Vector3f(14, 0, 0), - new Vector3f(20, 0, 6), - new Vector3f(26, 0, 0), - new Vector3f(20, 0, -6), - new Vector3f(14, 0, 0), - // loop 3 - new Vector3f(8, 0, 7.5f), - new Vector3f(7, 0, 10.5f), - new Vector3f(6, 0, 20), - new Vector3f(0, 0, 26), - new Vector3f(-6, 0, 20), - new Vector3f(0, 0, 14), - new Vector3f(6, 0, 20), - new Vector3f(0, 0, 26), - new Vector3f(-6, 0, 20), - new Vector3f(0, 0, 14), - // begin ellipse - new Vector3f(16, 5, 20), - new Vector3f(0, 0, 26), - new Vector3f(-16, -10, 20), - new Vector3f(0, 0, 14), - new Vector3f(16, 20, 20), - new Vector3f(0, 0, 26), - new Vector3f(-10, -25, 10), - new Vector3f(-10, 0, 0), - // come at me! - new Vector3f(-28.00242f, 48.005623f, -34.648228f), - new Vector3f(0, 0 , -20), - }; + float max = Float.NEGATIVE_INFINITY; + for (float f : out){if (f > max) max = f;} + audioSamples.clear(); - private void createScene() { - Material mat = new Material(assetManager, "Common/MatDefs/Misc/Unshaded.j3md"); - bell = new Geometry( "sound-emitter" , new Sphere(15,15,1)); - mat.setColor("Color", ColorRGBA.Blue); - bell.setMaterial(mat); - rootNode.attachChild(bell); + if (max > 0.1){ + entity.getMaterial().setColor("Color", ColorRGBA.Green); + } + else { + entity.getMaterial().setColor("Color", ColorRGBA.Gray); + } + } + } - ear1 = makeEar(rootNode, new Vector3f(0, 0 ,-20)); - ear2 = makeEar(rootNode, new Vector3f(0, 0 ,20)); - ear3 = makeEar(rootNode, new Vector3f(20, 0 ,0)); + private void prepareEar(Geometry ear, int n){ + if (this.audioRenderer instanceof MultiListener){ + MultiListener rf = (MultiListener)this.audioRenderer; - MotionPath track = new MotionPath(); + Listener auxListener = new Listener(); + auxListener.setLocation(ear.getLocalTranslation()); - for (Vector3f v : path){ - track.addWayPoint(v); - } - track.setCurveTension(0.80f); + rf.addListener(auxListener); + WaveFileWriter aux = null; - motionControl = new MotionTrack(bell,track); - // for now, use reflection to change the timer... - // motionControl.setTimer(new IsoTimer(60)); - - try { - Field timerField; - timerField = AbstractCinematicEvent.class.getDeclaredField("timer"); - timerField.setAccessible(true); - try {timerField.set(motionControl, motionTimer);} - catch (IllegalArgumentException e) {e.printStackTrace();} - catch (IllegalAccessException e) {e.printStackTrace();} - } - catch (SecurityException e) {e.printStackTrace();} - catch (NoSuchFieldException e) {e.printStackTrace();} + try { + aux = new WaveFileWriter + (File.createTempFile("advanced-audio-" + n, ".wav"));} + catch (IOException e) {e.printStackTrace();} + rf.registerSoundProcessor + (auxListener, + new CompositeSoundProcessor(new Dancer(ear), aux)); + } + } - motionControl.setDirectionType(MotionTrack.Direction.PathAndRotation); - motionControl.setRotation(new Quaternion().fromAngleNormalAxis(-FastMath.HALF_PI, Vector3f.UNIT_Y)); - motionControl.setInitialDuration(20f); - motionControl.setSpeed(1f); + public void simpleInitApp() { + this.setTimer(new IsoTimer(60)); + initAudio(); - track.enableDebugShape(assetManager, rootNode); - positionCamera(); + createScene(); + + prepareEar(ear1, 1); + prepareEar(ear2, 1); + prepareEar(ear3, 1); + + motionControl.play(); + } + + public void simpleUpdate(float tpf) { + motionTimer.update(); + if (music.getStatus() != AudioNode.Status.Playing){ + music.play(); } - - - private void positionCamera(){ - this.cam.setLocation(new Vector3f(-28.00242f, 48.005623f, -34.648228f)); - this.cam.setRotation(new Quaternion(0.3359635f, 0.34280345f, -0.13281013f, 0.8671653f)); - } - - private void initAudio() { - org.lwjgl.input.Mouse.setGrabbed(false); - music = new AudioNode(assetManager, "Sound/Effects/Beep.ogg", false); - - rootNode.attachChild(music); - audioRenderer.playSource(music); - music.setPositional(true); - music.setVolume(1f); - music.setReverbEnabled(false); - music.setDirectional(false); - music.setMaxDistance(200.0f); - music.setRefDistance(1f); - //music.setRolloffFactor(1f); - music.setLooping(false); - audioRenderer.pauseSource(music); - } - - public class Dancer implements SoundProcessor { - Geometry entity; - float scale = 2; - public Dancer(Geometry entity){ - this.entity = entity; - } - - /** - * this method is irrelevant since there is no state to cleanup. - */ - public void cleanup() {} - - - /** - * Respond to sound! This is the brain of an AI entity that - * hears its surroundings and reacts to them. - */ - public void process(ByteBuffer audioSamples, int numSamples, AudioFormat format) { - audioSamples.clear(); - byte[] data = new byte[numSamples]; - float[] out = new float[numSamples]; - audioSamples.get(data); - FloatSampleTools.byte2floatInterleaved(data, 0, out, 0, - numSamples/format.getFrameSize(), format); - - float max = Float.NEGATIVE_INFINITY; - for (float f : out){if (f > max) max = f;} - audioSamples.clear(); - - if (max > 0.1){entity.getMaterial().setColor("Color", ColorRGBA.Green);} - else {entity.getMaterial().setColor("Color", ColorRGBA.Gray);} - } - } - - private void prepareEar(Geometry ear, int n){ - if (this.audioRenderer instanceof MultiListener){ - MultiListener rf = (MultiListener)this.audioRenderer; - - Listener auxListener = new Listener(); - auxListener.setLocation(ear.getLocalTranslation()); - - rf.addListener(auxListener); - WaveFileWriter aux = null; - - try {aux = new WaveFileWriter(File.createTempFile("advanced-audio-" + n, ".wav"));} - catch (IOException e) {e.printStackTrace();} - - rf.registerSoundProcessor(auxListener, - new CompositeSoundProcessor(new Dancer(ear), aux)); - - } - } - - - public void simpleInitApp() { - this.setTimer(new IsoTimer(60)); - initAudio(); - - createScene(); - - prepareEar(ear1, 1); - prepareEar(ear2, 1); - prepareEar(ear3, 1); - - motionControl.play(); - - } - - public void simpleUpdate(float tpf) { - motionTimer.update(); - if (music.getStatus() != AudioNode.Status.Playing){ - music.play(); - } - Vector3f loc = cam.getLocation(); - Quaternion rot = cam.getRotation(); - listener.setLocation(loc); - listener.setRotation(rot); - music.setLocalTranslation(bell.getLocalTranslation()); - } - + Vector3f loc = cam.getLocation(); + Quaternion rot = cam.getRotation(); + listener.setLocation(loc); + listener.setRotation(rot); + music.setLocalTranslation(bell.getLocalTranslation()); + } }