# HG changeset patch # User Robert McIntyre # Date 1362509721 0 # Node ID 7239aee7267f9beefafc826f83945a7387a5380a # Parent d9128eb5f42ea20633beef524cc29c7999abd3c2# Parent eb7c94a0318819c5f521edf8dc2b3ef8dacf22d3 merge. diff -r eb7c94a03188 -r 7239aee7267f .hgignore --- a/.hgignore Tue Feb 26 16:31:29 2013 +0000 +++ b/.hgignore Tue Mar 05 18:55:21 2013 +0000 @@ -10,6 +10,9 @@ libbulletjme64.so liblwjgl64.so libopenal64.so +liblwjgl.so +libopenal.so +libbulletjme.so syntax: regexp diff -r eb7c94a03188 -r 7239aee7267f MIT-media-projects.org --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/MIT-media-projects.org Tue Mar 05 18:55:21 2013 +0000 @@ -0,0 +1,24 @@ +*Machine Learning and Pattern Recognition with Multiple +Modalities Hyungil Ahn and Rosalind W. Picard + +This project develops new theory and algorithms to enable +computers to make rapid and accurate inferences from +multiple modes of data, such as determining a person's +affective state from multiple sensors--video, mouse behavior, +chair pressure patterns, typed selections, or +physiology. Recent efforts focus on understanding the level +of a person's attention, useful for things such as +determining when to interrupt. Our approach is Bayesian: +formulating probabilistic models on the basis of domain +knowledge and training data, and then performing inference +according to the rules of probability theory. This type of +sensor fusion work is especially challenging due to problems +of sensor channel drop-out, different kinds of noise in +different channels, dependence between channels, scarce and +sometimes inaccurate labels, and patterns to detect that are +inherently time-varying. We have constructed a variety of +new algorithms for solving these problems and demonstrated +their performance gains over other state-of-the-art methods. + +http://affect.media.mit.edu/projectpages/multimodal/ + diff -r eb7c94a03188 -r 7239aee7267f assets/Models/joint/basic-muscle.png Binary file assets/Models/joint/basic-muscle.png has changed diff -r eb7c94a03188 -r 7239aee7267f assets/Models/joint/joint.blend Binary file assets/Models/joint/joint.blend has changed diff -r eb7c94a03188 -r 7239aee7267f assets/Models/joint/retina.png Binary file assets/Models/joint/retina.png has changed diff -r eb7c94a03188 -r 7239aee7267f assets/Models/joint/segment-layout.png Binary file assets/Models/joint/segment-layout.png has changed diff -r eb7c94a03188 -r 7239aee7267f assets/Models/joint/segment-layout.xcf Binary file assets/Models/joint/segment-layout.xcf has changed diff -r eb7c94a03188 -r 7239aee7267f org/body.org --- a/org/body.org Tue Feb 26 16:31:29 2013 +0000 +++ b/org/body.org Tue Mar 05 18:55:21 2013 +0000 @@ -579,11 +579,14 @@ (ns cortex.test.body (:use (cortex world util body)) (:import - (com.aurellem.capture Capture RatchetTimer) + (com.aurellem.capture Capture RatchetTimer IsoTimer) (com.jme3.math Quaternion Vector3f ColorRGBA) java.io.File)) #+end_src +#+results: test-header +: java.io.File + * Source - [[../src/cortex/body.clj][cortex.body]] - [[../src/cortex/test/body.clj][cortex.test.body]] diff -r eb7c94a03188 -r 7239aee7267f org/hearing.org --- a/org/hearing.org Tue Feb 26 16:31:29 2013 +0000 +++ b/org/hearing.org Tue Mar 05 18:55:21 2013 +0000 @@ -5,7 +5,7 @@ #+keywords: simulated hearing, openal, clojure, jMonkeyEngine3, LWJGL, AI #+SETUPFILE: ../../aurellem/org/setup.org #+INCLUDE: ../../aurellem/org/level-0.org -#+BABEL: :exports both :noweb yes :cache no :mkdirp yes + * Hearing @@ -976,7 +976,7 @@ #+end_html -#+include "../../jmeCapture/src/com/aurellem/capture/examples/Advanced.java" src java +#+include: "../../jmeCapture/src/com/aurellem/capture/examples/Advanced.java" src java Here is a small clojure program to drive the java program and make it available as part of my test suite. @@ -1027,8 +1027,8 @@ hearing. When you play a sound, the bar should display a waveform. Keys: - : play sound" - + : play sound + l : play hymn" ([] (test-worm-hearing false)) ([record?] (let [the-worm (doto (worm) (body!)) @@ -1051,14 +1051,17 @@ (if value (.play hymn)))}) (fn [world] (light-up-everything world) + (let [timer (IsoTimer. 60)] + (.setTimer world timer) + (display-dilated-time world timer)) (if record? (do (com.aurellem.capture.Capture/captureVideo world - (File."/home/r/proj/cortex/render/worm-audio/frames")) + (File. "/home/r/proj/cortex/render/worm-audio/frames")) (com.aurellem.capture.Capture/captureAudio world - (File."/home/r/proj/cortex/render/worm-audio/audio.wav"))))) + (File. "/home/r/proj/cortex/render/worm-audio/audio.wav"))))) (fn [world tpf] (hearing-display @@ -1067,6 +1070,9 @@ (File. "/home/r/proj/cortex/render/worm-audio/hearing-data")))))))) #+end_src +#+results: test-hearing-2 +: #'cortex.test.hearing/test-worm-hearing + In this test, I load the worm with its newly formed ear and let it hear sounds. The sound the worm is hearing is localized to the origin of the world, and you can see that as the worm moves farther away from @@ -1170,9 +1176,13 @@ (:import java.io.File) (:import com.jme3.scene.Node com.jme3.system.AppSettings - com.jme3.math.Vector3f)) + com.jme3.math.Vector3f) + (:import (com.aurellem.capture Capture IsoTimer RatchetTimer))) #+end_src +#+results: test-header +: com.aurellem.capture.RatchetTimer + * Source Listing - [[../src/cortex/hearing.clj][cortex.hearing]] - [[../src/cortex/test/hearing.clj][cortex.test.hearing]] diff -r eb7c94a03188 -r 7239aee7267f org/ideas.org --- a/org/ideas.org Tue Feb 26 16:31:29 2013 +0000 +++ b/org/ideas.org Tue Mar 05 18:55:21 2013 +0000 @@ -1,3 +1,5 @@ + + * Brainstorming different sensors and effectors. Every sense that we have should have an effector that changes what diff -r eb7c94a03188 -r 7239aee7267f org/intro.org --- a/org/intro.org Tue Feb 26 16:31:29 2013 +0000 +++ b/org/intro.org Tue Mar 05 18:55:21 2013 +0000 @@ -8,170 +8,193 @@ #+babel: :mkdirp yes :noweb yes * Background -Artificial Intelligence has tried and failed for more than half a -century to produce programs as flexible, creative, and "intelligent" -as the human mind itself. Clearly, we are still missing some important -ideas concerning intelligent programs or we would have strong AI -already. What idea could be missing? + +Artificial Intelligence has tried and failed for more than +half a century to produce programs as flexible, creative, +and "intelligent" as the human mind itself. Clearly, we are +still missing some important ideas concerning intelligent +programs or we would have strong AI already. What idea could +be missing? When Turing first proposed his famous "Turing Test" in the -groundbreaking paper [[../sources/turing.pdf][/Computing Machines and Intelligence/]], he gave -little importance to how a computer program might interact with the -world: +groundbreaking paper [[../sources/turing.pdf][/Computing Machines and Intelligence/]], +he gave little importance to how a computer program might +interact with the world: #+BEGIN_QUOTE -\ldquo{}We need not be too concerned about the legs, eyes, etc. The example of -Miss Helen Keller shows that education can take place provided that -communication in both directions between teacher and pupil can take -place by some means or other.\rdquo{} +\ldquo{}We need not be too concerned about the legs, eyes, +etc. The example of Miss Helen Keller shows that education +can take place provided that communication in both +directions between teacher and pupil can take place by some +means or other.\rdquo{} #+END_QUOTE -And from the example of Hellen Keller he went on to assume that the -only thing a fledgling AI program could need by way of communication -is a teletypewriter. But Hellen Keller did possess vision and hearing -for the first few months of her life, and her tactile sense was far -more rich than any text-stream could hope to achieve. She possessed a -body she could move freely, and had continual access to the real world -to learn from her actions. +And from the example of Hellen Keller he went on to assume +that the only thing a fledgling AI program could need by way +of communication is a teletypewriter. But Hellen Keller did +possess vision and hearing for the first few months of her +life, and her tactile sense was far more rich than any +text-stream could hope to achieve. She possessed a body she +could move freely, and had continual access to the real +world to learn from her actions. -I believe that our programs are suffering from too little sensory -input to become really intelligent. Imagine for a moment that you -lived in a world completely cut off form all sensory stimulation. You -have no eyes to see, no ears to hear, no mouth to speak. No body, no -taste, no feeling whatsoever. The only sense you get at all is a -single point of light, flickering on and off in the void. If this was -your life from birth, you would never learn anything, and could never -become intelligent. Actual humans placed in sensory deprivation -chambers experience hallucinations and can begin to loose their sense -of reality. Most of the time, the programs we write are in exactly -this situation. They do not interface with cameras and microphones, -and they do not control a real or simulated body or interact with any -sort of world. +I believe that our programs are suffering from too little +sensory input to become really intelligent. Imagine for a +moment that you lived in a world completely cut off form all +sensory stimulation. You have no eyes to see, no ears to +hear, no mouth to speak. No body, no taste, no feeling +whatsoever. The only sense you get at all is a single point +of light, flickering on and off in the void. If this was +your life from birth, you would never learn anything, and +could never become intelligent. Actual humans placed in +sensory deprivation chambers experience hallucinations and +can begin to loose their sense of reality. Most of the time, +the programs we write are in exactly this situation. They do +not interface with cameras and microphones, and they do not +control a real or simulated body or interact with any sort +of world. * Simulation vs. Reality + I want demonstrate that multiple senses are what enable -intelligence. There are two ways of playing around with senses and -computer programs: - +intelligence. There are two ways of playing around with +senses and computer programs: ** Simulation -The first is to go entirely with simulation: virtual world, virtual -character, virtual senses. The advantages are that when everything is -a simulation, experiments in that simulation are absolutely -reproducible. It's also easier to change the character and world to -explore new situations and different sensory combinations. -If the world is to be simulated on a computer, then not only do you -have to worry about whether the character's senses are rich enough to -learn from the world, but whether the world itself is rendered with -enough detail and realism to give enough working material to the -character's senses. To name just a few difficulties facing modern -physics simulators: destructibility of the environment, simulation of -water/other fluids, large areas, nonrigid bodies, lots of objects, -smoke. I don't know of any computer simulation that would allow a -character to take a rock and grind it into fine dust, then use that -dust to make a clay sculpture, at least not without spending years -calculating the interactions of every single small grain of -dust. Maybe a simulated world with today's limitations doesn't provide +The first is to go entirely with simulation: virtual world, +virtual character, virtual senses. The advantages are that +when everything is a simulation, experiments in that +simulation are absolutely reproducible. It's also easier to +change the character and world to explore new situations and +different sensory combinations. + +If the world is to be simulated on a computer, then not only +do you have to worry about whether the character's senses +are rich enough to learn from the world, but whether the +world itself is rendered with enough detail and realism to +give enough working material to the character's senses. To +name just a few difficulties facing modern physics +simulators: destructibility of the environment, simulation +of water/other fluids, large areas, nonrigid bodies, lots of +objects, smoke. I don't know of any computer simulation that +would allow a character to take a rock and grind it into +fine dust, then use that dust to make a clay sculpture, at +least not without spending years calculating the +interactions of every single small grain of dust. Maybe a +simulated world with today's limitations doesn't provide enough richness for real intelligence to evolve. ** Reality -The other approach for playing with senses is to hook your software up -to real cameras, microphones, robots, etc., and let it loose in the -real world. This has the advantage of eliminating concerns about -simulating the world at the expense of increasing the complexity of -implementing the senses. Instead of just grabbing the current rendered -frame for processing, you have to use an actual camera with real -lenses and interact with photons to get an image. It is much harder to -change the character, which is now partly a physical robot of some -sort, since doing so involves changing things around in the real world -instead of modifying lines of code. While the real world is very rich -and definitely provides enough stimulation for intelligence to develop -as evidenced by our own existence, it is also uncontrollable in the -sense that a particular situation cannot be recreated perfectly or -saved for later use. It is harder to conduct science because it is -harder to repeat an experiment. The worst thing about using the real -world instead of a simulation is the matter of time. Instead of -simulated time you get the constant and unstoppable flow of real -time. This severely limits the sorts of software you can use to -program the AI because all sense inputs must be handled in real -time. Complicated ideas may have to be implemented in hardware or may -simply be impossible given the current speed of our -processors. Contrast this with a simulation, in which the flow of time -in the simulated world can be slowed down to accommodate the -limitations of the character's programming. In terms of cost, doing -everything in software is far cheaper than building custom real-time +The other approach for playing with senses is to hook your +software up to real cameras, microphones, robots, etc., and +let it loose in the real world. This has the advantage of +eliminating concerns about simulating the world at the +expense of increasing the complexity of implementing the +senses. Instead of just grabbing the current rendered frame +for processing, you have to use an actual camera with real +lenses and interact with photons to get an image. It is much +harder to change the character, which is now partly a +physical robot of some sort, since doing so involves +changing things around in the real world instead of +modifying lines of code. While the real world is very rich +and definitely provides enough stimulation for intelligence +to develop as evidenced by our own existence, it is also +uncontrollable in the sense that a particular situation +cannot be recreated perfectly or saved for later use. It is +harder to conduct science because it is harder to repeat an +experiment. The worst thing about using the real world +instead of a simulation is the matter of time. Instead of +simulated time you get the constant and unstoppable flow of +real time. This severely limits the sorts of software you +can use to program the AI because all sense inputs must be +handled in real time. Complicated ideas may have to be +implemented in hardware or may simply be impossible given +the current speed of our processors. Contrast this with a +simulation, in which the flow of time in the simulated world +can be slowed down to accommodate the limitations of the +character's programming. In terms of cost, doing everything +in software is far cheaper than building custom real-time hardware. All you need is a laptop and some patience. * Choose a Simulation Engine -Mainly because of issues with controlling the flow of time, I chose to -simulate both the world and the character. I set out to make a world -in which I could embed a character with multiple senses. My main goal -is to make an environment where I can perform further experiments in -simulated senses. +Mainly because of issues with controlling the flow of time, +I chose to simulate both the world and the character. I set +out to make a world in which I could embed a character with +multiple senses. My main goal is to make an environment +where I can perform further experiments in simulated senses. -I examined many different 3D environments to try and find something I -would use as the base for my simulation; eventually the choice came -down to three engines: the Quake II engine, the Source Engine, and -jMonkeyEngine. +I examined many different 3D environments to try and find +something I would use as the base for my simulation; +eventually the choice came down to three engines: the Quake +II engine, the Source Engine, and jMonkeyEngine. ** [[http://www.idsoftware.com][Quake II]]/[[http://www.bytonic.de/html/jake2.html][Jake2]] -I spent a bit more than a month working with the Quake II Engine from -ID software to see if I could use it for my purposes. All the source -code was released by ID software into the Public Domain several years -ago, and as a result it has been ported and modified for many -different reasons. This engine was famous for its advanced use of +I spent a bit more than a month working with the Quake II +Engine from ID software to see if I could use it for my +purposes. All the source code was released by ID software +into the Public Domain several years ago, and as a result it +has been ported and modified for many different +reasons. This engine was famous for its advanced use of realistic shading and had decent and fast physics -simulation. Researchers at Princeton [[http://papers.cnl.salk.edu/PDFs/Intracelllular%20Dynamics%20of%20Virtual%20Place%20Cells%202011-4178.pdf][used this code]] ([[http://brainwindows.wordpress.com/2009/10/14/playing-quake-with-a-real-mouse/][video]]) to study -spatial information encoding in the hippocampal cells of rats. Those -researchers created a special Quake II level that simulated a maze, -and added an interface where a mouse could run on top of a ball in -various directions to move the character in the simulated maze. They -measured hippocampal activity during this exercise to try and tease -out the method in which spatial data was stored in that area of the -brain. I find this promising because if a real living rat can interact -with a computer simulation of a maze in the same way as it interacts -with a real-world maze, then maybe that simulation is close enough to -reality that a simulated sense of vision and motor control interacting -with that simulation could reveal useful information about the real -thing. There is a Java port of the original C source code called -Jake2. The port demonstrates Java's OpenGL bindings and runs anywhere -from 90% to 105% as fast as the C version. After reviewing much of the -source of Jake2, I eventually rejected it because the engine is too -tied to the concept of a first-person shooter game. One of the -problems I had was that there do not seem to be any easy way to attach -multiple cameras to a single character. There are also several physics -clipping issues that are corrected in a way that only applies to the -main character and does not apply to arbitrary objects. While there is -a large community of level modders, I couldn't find a community to -support using the engine to make new things. +simulation. Researchers at Princeton [[http://papers.cnl.salk.edu/PDFs/Intracelllular%20Dynamics%20of%20Virtual%20Place%20Cells%202011-4178.pdf][used this code]] ([[http://brainwindows.wordpress.com/2009/10/14/playing-quake-with-a-real-mouse/][video]]) +to study spatial information encoding in the hippocampal +cells of rats. Those researchers created a special Quake II +level that simulated a maze, and added an interface where a +mouse could run on top of a ball in various directions to +move the character in the simulated maze. They measured +hippocampal activity during this exercise to try and tease +out the method in which spatial data was stored in that area +of the brain. I find this promising because if a real living +rat can interact with a computer simulation of a maze in the +same way as it interacts with a real-world maze, then maybe +that simulation is close enough to reality that a simulated +sense of vision and motor control interacting with that +simulation could reveal useful information about the real +thing. There is a Java port of the original C source code +called Jake2. The port demonstrates Java's OpenGL bindings +and runs anywhere from 90% to 105% as fast as the C +version. After reviewing much of the source of Jake2, I +rejected it because the engine is too tied to the concept of +a first-person shooter game. One of the problems I had was +that there does not seem to be any easy way to attach +multiple cameras to a single character. There are also +several physics clipping issues that are corrected in a way +that only applies to the main character and do not apply to +arbitrary objects. While there is a large community of level +modders, I couldn't find a community to support using the +engine to make new things. ** [[http://source.valvesoftware.com/][Source Engine]] -The Source Engine evolved from the Quake II and Quake I engines and is -used by Valve in the Half-Life series of games. The physics simulation -in the Source Engine is quite accurate and probably the best out of -all the engines I investigated. There is also an extensive community -actively working with the engine. However, applications that use the -Source Engine must be written in C++, the code is not open, it only -runs on Windows, and the tools that come with the SDK to handle models -and textures are complicated and awkward to use. +The Source Engine evolved from the Quake II and Quake I +engines and is used by Valve in the Half-Life series of +games. The physics simulation in the Source Engine is quite +accurate and probably the best out of all the engines I +investigated. There is also an extensive community actively +working with the engine. However, applications that use the +Source Engine must be written in C++, the code is not open, +it only runs on Windows, and the tools that come with the +SDK to handle models and textures are complicated and +awkward to use. ** [[http://jmonkeyengine.com/][jMonkeyEngine3]] -jMonkeyEngine is a new library for creating games in Java. It uses -OpenGL to render to the screen and uses screengraphs to avoid drawing -things that do not appear on the screen. It has an active community -and several games in the pipeline. The engine was not built to serve -any particular game but is instead meant to be used for any 3D -game. After experimenting with each of these three engines and a few -others for about 2 months I settled on jMonkeyEngine. I chose it -because it had the most features out of all the open projects I looked -at, and because I could then write my code in Clojure, an -implementation of LISP that runs on the JVM. +jMonkeyEngine is a new library for creating games in +Java. It uses OpenGL to render to the screen and uses +screengraphs to avoid drawing things that do not appear on +the screen. It has an active community and several games in +the pipeline. The engine was not built to serve any +particular game but is instead meant to be used for any 3D +game. After experimenting with each of these three engines +and a few others for about 2 months I settled on +jMonkeyEngine. I chose it because it had the most features +out of all the open projects I looked at, and because I +could then write my code in Clojure, an implementation of +LISP that runs on the JVM. diff -r eb7c94a03188 -r 7239aee7267f org/joint.org --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/org/joint.org Tue Mar 05 18:55:21 2013 +0000 @@ -0,0 +1,124 @@ +* Summary of Senses + +vision -- list of functions which must each be called with +the world as their argument, each of which returns [topology data]. Each +element of data is a number between 0 and 255 representing the +intensity of the light recieved at that sensor. Each element of +topology is a pair of numbers [x, y] such that numbers whose pairs +have a short euclidean distance are generally physically close on the +actual sensor. + +proprioception -- list of nullary functions, one for each joint, which +return [heding pitch roll]. + +movement -- list of functions, one for each muscle, which must be +called with an integer between 0 and the total number of muscle fibers +in the muscle. Each function returns a float which is (current-force/ +total-possible-force). + +touch -- list of functions which must each be called with a Node +(normally the root node of the simulation) the argument, each of which +returns [topology data]. Each element of data is [length limit] where +limit is the length of that particular "hair" and length is the amount +of the hair that has been activated so far. (= limit length) means that +nothing is touching the hair. + + +* A Flower + +A flower is a basic creature that tries to maximize the amount of +light that it sees. It can have one or more eyes, with one eye being +"special" in that it is this eye which must recieve maximum light. It +can have multiple articulated joints and mulcles. + +Want an algorithm that uses the sense data of =vision= +=proprioception=, and =movement= to maximum benefit in order to look +at the light source. + +The light source will move from place to place and the flower will +have to follow it. + +The algorithm should be generalize to any number of eyes and muscles, +and should become /more/ preformant the more sensory data is +available. + +I will punt on working out an elegant model of motivation for the +flower which makes it want to go to the light. + +Maybe I need a motivationless entity first, which just learns how its +own body works? But then, wouldn't that just be a motivation itself? + + + + + +#+name: load-creature +#+begin_src clojure +(in-ns 'cortex.joint) + +(def joint "Models/joint/joint.blend") + +(defn joint-creature [] + (load-blender-model joint)) + +(defn test-joint-creature [] + (let [me (sphere 0.5 :color ColorRGBA/Blue :physical? false) + creature (doto (joint-creature) (body!)) + + ;;;;;;;;;;;; Sensors/Effectors ;;;;;;;;;;;;;;;;;;;;;;;;;;;; + touch (touch! creature) + touch-display (view-touch) + + vision (vision! creature) + vision-display (view-vision) + + ;;hearing (hearing! creature) + ;;hearing-display (view-hearing) + + prop (proprioception! creature) + prop-display (view-proprioception) + + muscles (movement! creature) + muscle-display (view-movement) + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + + fix-display (gen-fix-display) + + floor (box 10 2 10 :position (Vector3f. 0 -9 0) + :color ColorRGBA/Gray :mass 0)] + (world + (nodify [floor me creature]) + standard-debug-controls + (fn [world] + ;;(speed-up world) + (light-up-everything world) + (let [timer (RatchetTimer. 60)] + (.setTimer world timer) + (display-dilated-time world timer))) + (fn [world tpf] + (.setLocalTranslation me (.getLocation (.getCamera world))) + (fix-display world))))) +#+end_src + +* Headers +#+name: joint-header +#+begin_src clojure +(ns cortex.joint + (:require cortex.import) + (:use (cortex world util import body sense + hearing touch vision proprioception movement)) + (:import java.io.File) + (:import (com.aurellem.capture RatchetTimer IsoTimer))) + +(cortex.import/mega-import-jme3) +(rlm.rlm-commands/help) +#+end_src + + +* COMMENT Generate Source + +#+begin_src clojure :tangle ../src/cortex/joint.clj +<> +<> +#+end_src + diff -r eb7c94a03188 -r 7239aee7267f org/movement.org --- a/org/movement.org Tue Feb 26 16:31:29 2013 +0000 +++ b/org/movement.org Tue Mar 05 18:55:21 2013 +0000 @@ -185,6 +185,8 @@ #+name: test-movement #+begin_src clojure +(in-ns 'cortex.test.movement) + (defn test-worm-movement "Testing movement: You should see the worm suspended in mid air and a display on the @@ -217,13 +219,16 @@ (if value (swap! muscle-exertion (fn [v] (- v 20)))))}) (fn [world] + + (let [timer (RatchetTimer. 60)] + (.setTimer world timer) + (display-dilated-time world timer)) (if record? (Capture/captureVideo world (File. "/home/r/proj/cortex/render/worm-muscles/main-view"))) (light-up-everything world) (enable-debug world) - (.setTimer world (RatchetTimer. 60)) (set-gravity world (Vector3f. 0 0 0)) (.setLocation (.getCamera world) (Vector3f. -4.912815, 2.004171, 0.15710819)) @@ -237,6 +242,9 @@ (File. "/home/r/proj/cortex/render/worm-muscles/muscles")))))))) #+end_src +#+results: test-movement +: #'cortex.test.movement/test-worm-movement + * Video Demonstration #+begin_html @@ -317,10 +325,13 @@ (:import java.awt.image.BufferedImage) (:import com.jme3.scene.Node) (:import (com.jme3.math Quaternion Vector3f)) - (:import (com.aurellem.capture Capture RatchetTimer)) + (:import (com.aurellem.capture Capture RatchetTimer IsoTimer)) (:import com.jme3.bullet.control.RigidBodyControl)) #+end_src +#+results: test-header +: com.jme3.bullet.control.RigidBodyControl + * Source Listing - [[../src/cortex/movement.clj][cortex.movement]] - [[../src/cortex/test/movement.clj][cortex.test.movement]] diff -r eb7c94a03188 -r 7239aee7267f org/proposal.org --- a/org/proposal.org Tue Feb 26 16:31:29 2013 +0000 +++ b/org/proposal.org Tue Mar 05 18:55:21 2013 +0000 @@ -1,1 +0,0 @@ - diff -r eb7c94a03188 -r 7239aee7267f org/proprioception.org --- a/org/proprioception.org Tue Feb 26 16:31:29 2013 +0000 +++ b/org/proprioception.org Tue Mar 05 18:55:21 2013 +0000 @@ -272,11 +272,13 @@ [root standard-debug-controls (fn [world] + (let [timer (RatchetTimer. 60)] + (.setTimer world timer) + (display-dilated-time world timer)) (if record? (Capture/captureVideo world (File. "/home/r/proj/cortex/render/proprio/main-view"))) - (.setTimer world (com.aurellem.capture.RatchetTimer. 60)) (set-gravity world (Vector3f. 0 0 0)) (enable-debug world) (light-up-everything world)) @@ -363,7 +365,7 @@ #+name: test-proprioception-header #+begin_src clojure (ns cortex.test.proprioception - (:import (com.aurellem.capture Capture RatchetTimer)) + (:import (com.aurellem.capture Capture RatchetTimer IsoTimer)) (:use (cortex util world proprioception body)) (:import java.io.File) (:import com.jme3.bullet.control.RigidBodyControl) @@ -371,6 +373,9 @@ (:import (com.jme3.math Vector3f Quaternion ColorRGBA))) #+end_src +#+results: test-proprioception-header +: com.jme3.math.ColorRGBA + * Source Listing - [[../src/cortex/proprioception.clj][cortex.proprioception]] - [[../src/cortex/test/touch.clj][cortex.test.proprioception]] diff -r eb7c94a03188 -r 7239aee7267f org/sense.org --- a/org/sense.org Tue Feb 26 16:31:29 2013 +0000 +++ b/org/sense.org Tue Mar 05 18:55:21 2013 +0000 @@ -14,13 +14,21 @@ #+name: blender-1 #+begin_src clojure +(in-ns 'cortex.sense) (defn meta-data "Get the meta-data for a node created with blender." [blender-node key] (if-let [data (.getUserData blender-node "properties")] - (.findValue data key) nil)) + ;; this part is to accomodate weird blender properties + ;; as well as sensible clojure maps. + (.findValue data key) + (.getUserData blender-node key))) + #+end_src +#+results: blender-1 +: #'cortex.sense/meta-data + Blender uses a different coordinate system than jMonkeyEngine so it is useful to be able to convert between the two. These only come into play when the meta-data of a node refers to a vector in the blender @@ -446,6 +454,8 @@ #+name: test #+begin_src clojure +(in-ns 'cortex.test.sense) + (defn test-bind-sense "Show a camera that stays in the same relative position to a blue cube." @@ -469,12 +479,14 @@ (.setTimer world (RatchetTimer. 60)) (if record? (Capture/captureVideo - world (File. "/home/r/proj/cortex/render/bind-sense0"))) + world + (File. "/home/r/proj/cortex/render/bind-sense0"))) (add-camera! world cam - (comp (view-image - (if record? - (File. "/home/r/proj/cortex/render/bind-sense1"))) + (comp + (view-image + (if record? + (File. "/home/r/proj/cortex/render/bind-sense1"))) BufferedImage!)) (add-camera! world (.getCamera world) no-op))) no-op)))) diff -r eb7c94a03188 -r 7239aee7267f org/thesis.org --- a/org/thesis.org Tue Feb 26 16:31:29 2013 +0000 +++ b/org/thesis.org Tue Mar 05 18:55:21 2013 +0000 @@ -1,1 +1,57 @@ +#+title: Thesis +#+author: Robert McIntyre +#+email: rlm@mit.edu +#+description: MEng thesis for Robert McIntyre +#+keywords: AI, simulation, jMonkeyEngine3, clojure, virtual reality +#+SETUPFILE: ../../aurellem/org/setup.org +* COMMENT Multiple senses are compelling for AI. +#+include: "./intro.org" + +* Virtual reality is vastly easier than actual reality. + +* There is no framework for AI experimenmts with multiple senses. + +* Cortex is my attempt at such a framework. + +** COMMENT Cortex uses jMonkeyEngine3 to create virtual worlds... +#+include: "./world.org" +#+include: "./util.org" + +** COMMENT ...and Blender to describe virtual creatures. + +** COMMENT Bodies are articulated rigid constructs +#+include: "./body.org" + +** COMMENT Senses are all 2d surfaces with embedded sensor elements. +#+include: "./sense.org" + +** COMMENT Thousands of hair-like elements simulate touch. +#+include: "./touch.org" + +** COMMENT Vision is modeled after the human retina. +#+include: "./vision.org" + +** COMMENT Cortex provides general simulated hearing. +#+include: "./hearing.org" + +** COMMENT Proprioception and Movement provide a sense of embodiment. +#+include: "./proprioception.org" +#+include: "./movement.org" + +* COMMENT The Hand +#+include: "./integration.org" + +* The Reusable Self Learning Joint + +* Robotic Calisthenics + +* The Lense that Sees its Flaws + +* Rat in a Maze + +* Swarm Creatures + +* Simulated Imagination + + diff -r eb7c94a03188 -r 7239aee7267f org/touch.org --- a/org/touch.org Tue Feb 26 16:31:29 2013 +0000 +++ b/org/touch.org Tue Mar 05 18:55:21 2013 +0000 @@ -552,6 +552,9 @@ standard-debug-controls (fn [world] + (let [timer (IsoTimer. 60)] + (.setTimer world timer) + (display-dilated-time world timer)) (if record? (Capture/captureVideo world @@ -566,6 +569,9 @@ (File. "/home/r/proj/cortex/render/touch-cube/touch/")))))))) #+end_src +#+results: test-touch-1 +: #'cortex.test.touch/test-basic-touch + ** Basic Touch Demonstration #+begin_html @@ -656,6 +662,9 @@ standard-debug-controls (fn [world] + (let [timer (IsoTimer. 60)] + (.setTimer world timer) + (display-dilated-time world timer)) (if record? (Capture/captureVideo world @@ -670,6 +679,9 @@ (File. "/home/r/proj/cortex/render/worm-touch/touch/")))))))) #+end_src +#+results: test-touch-2 +: #'cortex.test.touch/test-worm-touch + ** Worm Touch Demonstration #+begin_html
@@ -747,11 +759,14 @@ (ns cortex.test.touch (:use (cortex world util sense body touch)) (:use cortex.test.body) - (:import com.aurellem.capture.Capture) + (:import (com.aurellem.capture Capture IsoTimer)) (:import java.io.File) (:import (com.jme3.math Vector3f ColorRGBA))) #+end_src +#+results: test-touch-header +: com.jme3.math.ColorRGBA + * Source Listing - [[../src/cortex/touch.clj][cortex.touch]] - [[../src/cortex/test/touch.clj][cortex.test.touch]] @@ -762,23 +777,26 @@ - [[http://hg.bortreb.com ][source-repository]] * Next -So far I've implemented simulated Vision, Hearing, and Touch, the most -obvious and prominent senses that humans have. Smell and Taste shall -remain unimplemented for now. This accounts for the "five senses" that -feature so prominently in our lives. But humans have far more than the -five main senses. There are internal chemical senses, pain (which is -*not* the same as touch), heat sensitivity, and our sense of balance, -among others. One extra sense is so important that I must implement it -to have a hope of making creatures that can gracefully control their -own bodies. It is Proprioception, which is the sense of the location -of each body part in relation to the other body parts. +So far I've implemented simulated Vision, Hearing, and +Touch, the most obvious and prominent senses that humans +have. Smell and Taste shall remain unimplemented for +now. This accounts for the "five senses" that feature so +prominently in our lives. But humans have far more than the +five main senses. There are internal chemical senses, pain +(which is *not* the same as touch), heat sensitivity, and +our sense of balance, among others. One extra sense is so +important that I must implement it to have a hope of making +creatures that can gracefully control their own bodies. It +is Proprioception, which is the sense of the location of +each body part in relation to the other body parts. -Close your eyes, and touch your nose with your right index finger. How -did you do it? You could not see your hand, and neither your hand nor -your nose could use the sense of touch to guide the path of your hand. -There are no sound cues, and Taste and Smell certainly don't provide -any help. You know where your hand is without your other senses -because of Proprioception. +Close your eyes, and touch your nose with your right index +finger. How did you do it? You could not see your hand, and +neither your hand nor your nose could use the sense of touch +to guide the path of your hand. There are no sound cues, +and Taste and Smell certainly don't provide any help. You +know where your hand is without your other senses because of +Proprioception. Onward to [[./proprioception.org][proprioception]]! diff -r eb7c94a03188 -r 7239aee7267f org/util.org --- a/org/util.org Tue Feb 26 16:31:29 2013 +0000 +++ b/org/util.org Tue Mar 05 18:55:21 2013 +0000 @@ -100,6 +100,7 @@ (:import java.awt.image.BufferedImage) (:import javax.swing.JPanel) (:import javax.swing.JFrame) + (:import ij.ImagePlus) (:import javax.swing.SwingUtilities) (:import com.jme3.scene.plugins.blender.BlenderModelLoader) (:import (java.util.logging Level Logger))) @@ -491,7 +492,7 @@ (controlUpdate [tpf] (.setText text (format "%.2f" - (float (/ (.getTime timer) 1000))))) + (float (.getTimeInSeconds timer))))) (controlRender [_ _]))) (.attachChild (.getGuiNode world) text))) #+end_src @@ -532,6 +533,18 @@ (view (doto (Node.) (.attachChild (box 1 1 1 :color color)))))) +(extend-type ij.ImagePlus + Viewable + (view [image] + (.show image))) + +(extend-type java.awt.image.BufferedImage + Viewable + (view + [image] + (view (ImagePlus. "view-buffered-image" image)))) + + (defprotocol Textual (text [something] "Display a detailed textual analysis of the given object.")) diff -r eb7c94a03188 -r 7239aee7267f org/vision.org --- a/org/vision.org Tue Feb 26 16:31:29 2013 +0000 +++ b/org/vision.org Tue Mar 05 18:55:21 2013 +0000 @@ -149,26 +149,34 @@ (defn add-eye! "Create a Camera centered on the current position of 'eye which - follows the closest physical node in 'creature and sends visual - data to 'continuation. The camera will point in the X direction and - use the Z vector as up as determined by the rotation of these - vectors in blender coordinate space. Use XZY rotation for the node - in blender." + follows the closest physical node in 'creature. The camera will + point in the X direction and use the Z vector as up as determined + by the rotation of these vectors in blender coordinate space. Use + XZY rotation for the node in blender." [#^Node creature #^Spatial eye] (let [target (closest-node creature eye) - [cam-width cam-height] (eye-dimensions eye) + [cam-width cam-height] + ;;[640 480] ;; graphics card on laptop doesn't support + ;; arbitray dimensions. + (eye-dimensions eye) cam (Camera. cam-width cam-height) rot (.getWorldRotation eye)] (.setLocation cam (.getWorldTranslation eye)) (.lookAtDirection - cam ; this part is not a mistake and - (.mult rot Vector3f/UNIT_X) ; is consistent with using Z in - (.mult rot Vector3f/UNIT_Y)) ; blender as the UP vector. + cam ; this part is not a mistake and + (.mult rot Vector3f/UNIT_X) ; is consistent with using Z in + (.mult rot Vector3f/UNIT_Y)) ; blender as the UP vector. (.setFrustumPerspective - cam 45 (/ (.getWidth cam) (.getHeight cam)) 1 1000) + cam (float 45) + (float (/ (.getWidth cam) (.getHeight cam))) + (float 1) + (float 1000)) (bind-sense target cam) cam)) #+end_src +#+results: add-eye +: #'cortex.vision/add-eye! + Here, the camera is created based on metadata on the eye-node and attached to the nearest physical object with =bind-sense= ** The Retina @@ -280,6 +288,7 @@ #+name: add-camera #+begin_src clojure +(in-ns 'cortex.vision) (defn add-camera! "Add a camera to the world, calling continuation on every frame produced." @@ -295,6 +304,9 @@ (.attachScene (.getRootNode world))))) #+end_src +#+results: add-camera +: #'cortex.vision/add-camera! + The eye's continuation function should register the viewport with the simulation the first time it is called, use the CPU to extract the @@ -399,8 +411,8 @@ #+name: main #+begin_src clojure (defn vision! - "Returns a function which returns visual sensory data when called - inside a running simulation." + "Returns a list of functions, each of which returns visual sensory + data when called inside a running simulation." [#^Node creature & {skip :skip :or {skip 0}}] (reduce concat @@ -480,13 +492,19 @@ (if record? (File. "/home/r/proj/cortex/render/vision/2"))) BufferedImage!)) + (let [timer (IsoTimer. 60)] + (.setTimer world timer) + (display-dilated-time world timer)) ;; This is here to restore the main view - ;; after the other views have completed processing + ;; after the other views have completed processing (add-camera! world (.getCamera world) no-op))) (fn [world tpf] (.rotate candy (* tpf 0.2) 0 0)))))) #+end_src +#+results: test-1 +: #'cortex.test.vision/test-pipeline + #+begin_html