rlm@109
|
1 <?xml version="1.0" encoding="utf-8"?>
|
rlm@109
|
2 <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
|
rlm@109
|
3 "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
|
rlm@109
|
4 <html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en">
|
rlm@109
|
5 <head>
|
rlm@109
|
6 <title>Transcript of Aaron Sloman - Artificial Intelligence - Psychology - Oxford Interview</title>
|
rlm@109
|
7 <meta http-equiv="Content-Type" content="text/html;charset=utf-8"/>
|
rlm@109
|
8 <meta name="title" content="Transcript of Aaron Sloman - Artificial Intelligence - Psychology - Oxford Interview"/>
|
rlm@109
|
9 <meta name="generator" content="Org-mode"/>
|
rlm@109
|
10 <meta name="generated" content="2013-10-04 18:49:53 UTC"/>
|
rlm@109
|
11 <meta name="author" content="Dylan Holmes"/>
|
rlm@109
|
12 <meta name="description" content=""/>
|
rlm@109
|
13 <meta name="keywords" content=""/>
|
rlm@109
|
14 <style type="text/css">
|
rlm@109
|
15 <!--/*--><![CDATA[/*><!--*/
|
rlm@109
|
16 html { font-family: Times, serif; font-size: 12pt; }
|
rlm@109
|
17 .title { text-align: center; }
|
rlm@109
|
18 .todo { color: red; }
|
rlm@109
|
19 .done { color: green; }
|
rlm@109
|
20 .tag { background-color: #add8e6; font-weight:normal }
|
rlm@109
|
21 .target { }
|
rlm@109
|
22 .timestamp { color: #bebebe; }
|
rlm@109
|
23 .timestamp-kwd { color: #5f9ea0; }
|
rlm@109
|
24 .right {margin-left:auto; margin-right:0px; text-align:right;}
|
rlm@109
|
25 .left {margin-left:0px; margin-right:auto; text-align:left;}
|
rlm@109
|
26 .center {margin-left:auto; margin-right:auto; text-align:center;}
|
rlm@109
|
27 p.verse { margin-left: 3% }
|
rlm@109
|
28 pre {
|
rlm@109
|
29 border: 1pt solid #AEBDCC;
|
rlm@109
|
30 background-color: #F3F5F7;
|
rlm@109
|
31 padding: 5pt;
|
rlm@109
|
32 font-family: courier, monospace;
|
rlm@109
|
33 font-size: 90%;
|
rlm@109
|
34 overflow:auto;
|
rlm@109
|
35 }
|
rlm@109
|
36 table { border-collapse: collapse; }
|
rlm@109
|
37 td, th { vertical-align: top; }
|
rlm@109
|
38 th.right { text-align:center; }
|
rlm@109
|
39 th.left { text-align:center; }
|
rlm@109
|
40 th.center { text-align:center; }
|
rlm@109
|
41 td.right { text-align:right; }
|
rlm@109
|
42 td.left { text-align:left; }
|
rlm@109
|
43 td.center { text-align:center; }
|
rlm@109
|
44 dt { font-weight: bold; }
|
rlm@109
|
45 div.figure { padding: 0.5em; }
|
rlm@109
|
46 div.figure p { text-align: center; }
|
rlm@109
|
47 div.inlinetask {
|
rlm@109
|
48 padding:10px;
|
rlm@109
|
49 border:2px solid gray;
|
rlm@109
|
50 margin:10px;
|
rlm@109
|
51 background: #ffffcc;
|
rlm@109
|
52 }
|
rlm@109
|
53 textarea { overflow-x: auto; }
|
rlm@109
|
54 .linenr { font-size:smaller }
|
rlm@109
|
55 .code-highlighted {background-color:#ffff00;}
|
rlm@109
|
56 .org-info-js_info-navigation { border-style:none; }
|
rlm@109
|
57 #org-info-js_console-label { font-size:10px; font-weight:bold;
|
rlm@109
|
58 white-space:nowrap; }
|
rlm@109
|
59 .org-info-js_search-highlight {background-color:#ffff00; color:#000000;
|
rlm@109
|
60 font-weight:bold; }
|
rlm@109
|
61 /*]]>*/-->
|
rlm@109
|
62 </style>
|
rlm@109
|
63 <link rel="stylesheet" type="text/css" href="../css/sloman.css" />
|
rlm@109
|
64 <script type="text/javascript">
|
rlm@109
|
65 <!--/*--><![CDATA[/*><!--*/
|
rlm@109
|
66 function CodeHighlightOn(elem, id)
|
rlm@109
|
67 {
|
rlm@109
|
68 var target = document.getElementById(id);
|
rlm@109
|
69 if(null != target) {
|
rlm@109
|
70 elem.cacheClassElem = elem.className;
|
rlm@109
|
71 elem.cacheClassTarget = target.className;
|
rlm@109
|
72 target.className = "code-highlighted";
|
rlm@109
|
73 elem.className = "code-highlighted";
|
rlm@109
|
74 }
|
rlm@109
|
75 }
|
rlm@109
|
76 function CodeHighlightOff(elem, id)
|
rlm@109
|
77 {
|
rlm@109
|
78 var target = document.getElementById(id);
|
rlm@109
|
79 if(elem.cacheClassElem)
|
rlm@109
|
80 elem.className = elem.cacheClassElem;
|
rlm@109
|
81 if(elem.cacheClassTarget)
|
rlm@109
|
82 target.className = elem.cacheClassTarget;
|
rlm@109
|
83 }
|
rlm@109
|
84 /*]]>*///-->
|
rlm@109
|
85 </script>
|
rlm@109
|
86
|
rlm@109
|
87 </head>
|
rlm@109
|
88 <body>
|
rlm@109
|
89
|
rlm@109
|
90
|
rlm@109
|
91 <div id="content">
|
rlm@109
|
92 <h1 class="title">Transcript of Aaron Sloman - Artificial Intelligence - Psychology - Oxford Interview</h1>
|
rlm@109
|
93
|
rlm@109
|
94
|
rlm@109
|
95 <blockquote>
|
rlm@109
|
96
|
rlm@109
|
97
|
rlm@109
|
98
|
rlm@109
|
99
|
rlm@109
|
100
|
rlm@109
|
101
|
rlm@109
|
102
|
rlm@109
|
103
|
rlm@109
|
104
|
rlm@109
|
105
|
rlm@109
|
106
|
rlm@109
|
107
|
rlm@109
|
108
|
rlm@109
|
109
|
rlm@109
|
110
|
rlm@109
|
111
|
rlm@109
|
112 <p>
|
rlm@109
|
113 <b>Editor's note:</b> This is a working draft transcript which I made of
|
rlm@109
|
114 <a href="http://www.youtube.com/watch?feature=player_detailpage&v=iuH8dC7Snno">this nice interview</a> of Aaron Sloman. Having just finished one
|
rlm@109
|
115 iteration of transcription, I still need to go in and clean up the
|
rlm@109
|
116 formatting and fix the parts that I misheard, so you can expect the
|
rlm@109
|
117 text to improve significantly in the near future.
|
rlm@109
|
118 </p>
|
rlm@109
|
119 <p>
|
rlm@109
|
120 To the extent that this is my work, you have my permission to make
|
rlm@109
|
121 copies of this transcript for your own purposes. Also, feel free to
|
rlm@109
|
122 e-mail me with comments or corrections.
|
rlm@109
|
123 </p>
|
rlm@109
|
124 <p>
|
rlm@109
|
125 You can send mail to <code>transcript@aurellem.org</code>.
|
rlm@109
|
126 </p>
|
rlm@109
|
127 <p>
|
rlm@109
|
128 Cheers,
|
rlm@109
|
129 </p>
|
rlm@109
|
130 <p>
|
rlm@109
|
131 —Dylan
|
rlm@109
|
132 </p>
|
rlm@109
|
133 </blockquote>
|
rlm@109
|
134
|
rlm@109
|
135
|
rlm@109
|
136
|
rlm@109
|
137
|
rlm@109
|
138
|
rlm@109
|
139 <div id="table-of-contents">
|
rlm@109
|
140 <h2>Table of Contents</h2>
|
rlm@109
|
141 <div id="text-table-of-contents">
|
rlm@109
|
142 <ul>
|
rlm@109
|
143 <li><a href="#sec-1">1 Introduction</a>
|
rlm@109
|
144 <ul>
|
rlm@109
|
145 <li><a href="#sec-1-1">1.1 Aaron Sloman evolves into a philosopher of AI</a></li>
|
rlm@109
|
146 <li><a href="#sec-1-2">1.2 AI is hard, in part because there are tempting non-problems.</a></li>
|
rlm@109
|
147 </ul>
|
rlm@109
|
148 </li>
|
rlm@109
|
149 <li><a href="#sec-2">2 What problems of intelligence did evolution solve?</a>
|
rlm@109
|
150 <ul>
|
rlm@109
|
151 <li><a href="#sec-2-1">2.1 Intelligence consists of solutions to many evolutionary problems; no single development (e.g. communication) was key to human-level intelligence.</a></li>
|
rlm@109
|
152 <li><a href="#sec-2-2">2.2 Speculation about how communication might have evolved from internal lanagues.</a></li>
|
rlm@109
|
153 </ul>
|
rlm@109
|
154 </li>
|
rlm@109
|
155 <li><a href="#sec-3">3 How do language and internal states relate to AI?</a>
|
rlm@109
|
156 <ul>
|
rlm@109
|
157 <li><a href="#sec-3-1">3.1 In AI, false assumptions can lead investigators astray.</a></li>
|
rlm@109
|
158 <li><a href="#sec-3-2">3.2 Example: Vision is not just about finding surfaces, but about finding affordances.</a></li>
|
rlm@109
|
159 <li><a href="#sec-3-3">3.3 Online and offline intelligence</a></li>
|
rlm@109
|
160 <li><a href="#sec-3-4">3.4 Example: Even toddlers use sophisticated geometric knowledge</a></li>
|
rlm@109
|
161 </ul>
|
rlm@109
|
162 </li>
|
rlm@109
|
163 <li><a href="#sec-4">4 Animal intelligence</a>
|
rlm@109
|
164 <ul>
|
rlm@109
|
165 <li><a href="#sec-4-1">4.1 The priority is <i>cataloguing</i> what competences have evolved, not ranking them.</a></li>
|
rlm@109
|
166 <li><a href="#sec-4-2">4.2 AI can be used to test philosophical theories</a></li>
|
rlm@109
|
167 </ul>
|
rlm@109
|
168 </li>
|
rlm@109
|
169 <li><a href="#sec-5">5 Is abstract general intelligence feasible?</a>
|
rlm@109
|
170 <ul>
|
rlm@109
|
171 <li><a href="#sec-5-1">5.1 It's misleading to compare the brain and its neurons to a computer made of transistors</a></li>
|
rlm@109
|
172 <li><a href="#sec-5-2">5.2 For example, brains may rely heavily on chemical information processing</a></li>
|
rlm@109
|
173 <li><a href="#sec-5-3">5.3 Brain algorithms may simply be optimized for certain kinds of information processing other than bit manipulations</a></li>
|
rlm@109
|
174 <li><a href="#sec-5-4">5.4 Example: find the shortest path by dangling strings</a></li>
|
rlm@109
|
175 <li><a href="#sec-5-5">5.5 In sum, we know surprisingly little about the kinds of problems that evolution solved, and the manner in which they were solved.</a></li>
|
rlm@109
|
176 </ul>
|
rlm@109
|
177 </li>
|
rlm@109
|
178 <li><a href="#sec-6">6 A singularity of cognitive catch-up</a>
|
rlm@109
|
179 <ul>
|
rlm@109
|
180 <li><a href="#sec-6-1">6.1 What if it will take a lifetime to learn enough to make something new?</a></li>
|
rlm@109
|
181 </ul>
|
rlm@109
|
182 </li>
|
rlm@109
|
183 <li><a href="#sec-7">7 Spatial reasoning: a difficult problem</a>
|
rlm@109
|
184 <ul>
|
rlm@109
|
185 <li><a href="#sec-7-1">7.1 Example: Spatial proof that the angles of any triangle add up to a half-circle</a></li>
|
rlm@109
|
186 <li><a href="#sec-7-2">7.2 Geometric results are fundamentally different than experimental results in chemistry or physics.</a></li>
|
rlm@109
|
187 </ul>
|
rlm@109
|
188 </li>
|
rlm@109
|
189 <li><a href="#sec-8">8 Is near-term artificial general intelligence likely?</a>
|
rlm@109
|
190 <ul>
|
rlm@109
|
191 <li><a href="#sec-8-1">8.1 Two interpretations: a single mechanism for all problems, or many mechanisms unified in one program.</a></li>
|
rlm@109
|
192 </ul>
|
rlm@109
|
193 </li>
|
rlm@109
|
194 <li><a href="#sec-9">9 Abstract General Intelligence impacts</a></li>
|
rlm@109
|
195 </ul>
|
rlm@109
|
196 </div>
|
rlm@109
|
197 </div>
|
rlm@109
|
198
|
rlm@109
|
199 <div id="outline-container-1" class="outline-2">
|
rlm@109
|
200 <h2 id="sec-1"><span class="section-number-2">1</span> Introduction</h2>
|
rlm@109
|
201 <div class="outline-text-2" id="text-1">
|
rlm@109
|
202
|
rlm@109
|
203
|
rlm@109
|
204
|
rlm@109
|
205 </div>
|
rlm@109
|
206
|
rlm@109
|
207 <div id="outline-container-1-1" class="outline-3">
|
rlm@109
|
208 <h3 id="sec-1-1"><span class="section-number-3">1.1</span> Aaron Sloman evolves into a philosopher of AI</h3>
|
rlm@109
|
209 <div class="outline-text-3" id="text-1-1">
|
rlm@109
|
210
|
rlm@109
|
211 <p>[0:09] My name is Aaron Sloman. My first degree many years ago in
|
rlm@109
|
212 Capetown University was in Physics and Mathematics, and I intended to
|
rlm@109
|
213 go and be a mathematician. I came to Oxford and encountered
|
rlm@109
|
214 philosophers — I had started reading philosophy and discussing
|
rlm@109
|
215 philosophy before then, and then I found that there were philosophers
|
rlm@109
|
216 who said things about mathematics that I thought were wrong, so
|
rlm@109
|
217 gradually got more and more involved in [philosophy] discussions and
|
rlm@109
|
218 switched to doing philosophy DPhil. Then I became a philosophy
|
rlm@109
|
219 lecturer and about six years later, I was introduced to artificial
|
rlm@109
|
220 intelligence when I was a lecturer at Sussex University in philosophy
|
rlm@109
|
221 and I very soon became convinced that the best way to make progress in
|
rlm@109
|
222 both areas of philosophy (including philosophy of mathematics which I
|
rlm@109
|
223 felt i hadn't dealt with adequately in my DPhil) about the philosophy
|
rlm@109
|
224 of mathematics, philosophy of mind, philsophy of language and all
|
rlm@109
|
225 those things—the best way was to try to design and test working
|
rlm@109
|
226 fragments of mind and maybe eventually put them all together but
|
rlm@109
|
227 initially just working fragments that would do various things.
|
rlm@109
|
228 </p>
|
rlm@109
|
229 <p>
|
rlm@109
|
230 [1:12] And I learned to program and ~ with various other people
|
rlm@109
|
231 including ~Margaret Boden whom you've interviewed, developed—helped
|
rlm@109
|
232 develop an undergraduate degree in AI and other things and also began
|
rlm@109
|
233 to do research in AI and so on which I thought of as doing philosophy,
|
rlm@109
|
234 primarily.
|
rlm@109
|
235 </p>
|
rlm@109
|
236 <p>
|
rlm@109
|
237 [1:29] And then I later moved to the University of Birmingham and I
|
rlm@109
|
238 was there — I came in 1991 — and I've been retired for a while but
|
rlm@109
|
239 I'm not interested in golf or gardening so I just go on doing full
|
rlm@109
|
240 time research and my department is happy to keep me on without paying
|
rlm@109
|
241 me and provide space and resources and I come, meeting bright people
|
rlm@109
|
242 at conferences and try to learn and make progress if I can.
|
rlm@109
|
243 </p>
|
rlm@109
|
244 </div>
|
rlm@109
|
245
|
rlm@109
|
246 </div>
|
rlm@109
|
247
|
rlm@109
|
248 <div id="outline-container-1-2" class="outline-3">
|
rlm@109
|
249 <h3 id="sec-1-2"><span class="section-number-3">1.2</span> AI is hard, in part because there are tempting non-problems.</h3>
|
rlm@109
|
250 <div class="outline-text-3" id="text-1-2">
|
rlm@109
|
251
|
rlm@109
|
252
|
rlm@109
|
253 <p>
|
rlm@109
|
254 One of the things I learnt and understood more and more over the many
|
rlm@109
|
255 years — forty years or so since I first encountered AI — is how
|
rlm@109
|
256 hard the problems are, and in part that's because it's very often
|
rlm@109
|
257 tempting to <i>think</i> the problem is something different from what it
|
rlm@109
|
258 actually is, and then people design solutions to the non-problems, and
|
rlm@109
|
259 I think of most of my work now as just helping to clarify what the
|
rlm@109
|
260 problems are: what is it that we're trying to explain — and maybe
|
rlm@109
|
261 this is leading into what you wanted to talk about:
|
rlm@109
|
262 </p>
|
rlm@109
|
263 <p>
|
rlm@109
|
264 I now think that one of the ways of getting a deep understanding of
|
rlm@109
|
265 that is to find out what were the problems that biological evolution
|
rlm@109
|
266 solved, because we are a product of <i>many</i> solutions to <i>many</i>
|
rlm@109
|
267 problems, and if we just try to go in and work out what the whole
|
rlm@109
|
268 system is doing, we may get it all wrong, or badly wrong.
|
rlm@109
|
269 </p>
|
rlm@109
|
270
|
rlm@109
|
271 </div>
|
rlm@109
|
272 </div>
|
rlm@109
|
273
|
rlm@109
|
274 </div>
|
rlm@109
|
275
|
rlm@109
|
276 <div id="outline-container-2" class="outline-2">
|
rlm@109
|
277 <h2 id="sec-2"><span class="section-number-2">2</span> What problems of intelligence did evolution solve?</h2>
|
rlm@109
|
278 <div class="outline-text-2" id="text-2">
|
rlm@109
|
279
|
rlm@109
|
280
|
rlm@109
|
281
|
rlm@109
|
282 </div>
|
rlm@109
|
283
|
rlm@109
|
284 <div id="outline-container-2-1" class="outline-3">
|
rlm@109
|
285 <h3 id="sec-2-1"><span class="section-number-3">2.1</span> Intelligence consists of solutions to many evolutionary problems; no single development (e.g. communication) was key to human-level intelligence.</h3>
|
rlm@109
|
286 <div class="outline-text-3" id="text-2-1">
|
rlm@109
|
287
|
rlm@109
|
288
|
rlm@109
|
289 <p>
|
rlm@109
|
290 [2:57] Well, first I would challenge that we are the dominant
|
rlm@109
|
291 species. I know it looks like that but actually if you count biomass,
|
rlm@109
|
292 if you count number of species, if you count number of individuals,
|
rlm@109
|
293 the dominant species are microbes — maybe not one of them but anyway
|
rlm@109
|
294 they're the ones who dominate in that sense, and furthermore we are
|
rlm@109
|
295 mostly — we are largely composed of microbes, without which we
|
rlm@109
|
296 wouldn't survive.
|
rlm@109
|
297 </p>
|
rlm@109
|
298
|
rlm@109
|
299 <p>
|
rlm@109
|
300 [3:27] But there are things that make humans (you could say) best at
|
rlm@109
|
301 those things, or worst at those things, but it's a combination. And I
|
rlm@109
|
302 think it was a collection of developments of which there isn't any
|
rlm@109
|
303 single one. [] there might be, some people say, human language which
|
rlm@109
|
304 changed everything. By our human language, they mean human
|
rlm@109
|
305 communication in words, but I think that was a later development from
|
rlm@109
|
306 what must have started as the use of <i>internal</i> forms of
|
rlm@109
|
307 representation — which are there in nest-building birds, in
|
rlm@109
|
308 pre-verbal children, in hunting mammals — because you can't take in
|
rlm@109
|
309 information about a complex structured environment in which things can
|
rlm@109
|
310 change and you may have to be able to work out what's possible and
|
rlm@109
|
311 what isn't possible, without having some way of representing the
|
rlm@109
|
312 components of the environment, their relationships, the kinds of
|
rlm@109
|
313 things they can and can't do, the kinds of things you might or might
|
rlm@109
|
314 not be able to do — and <i>that</i> kind of capability needs internal
|
rlm@109
|
315 languages, and I and colleagues [at Birmingham] have been referring to
|
rlm@109
|
316 them as generalized languages because some people object to
|
rlm@109
|
317 referring…to using language to refer to something that isn't used
|
rlm@109
|
318 for communication. But from that viewpoint, not only humans but many
|
rlm@109
|
319 other animals developed abilities to do things to their environment to
|
rlm@109
|
320 make them more friendly to themselves, which depended on being able to
|
rlm@109
|
321 represent possible futures, possible actions, and work out what's the
|
rlm@109
|
322 best thing to do.
|
rlm@109
|
323 </p>
|
rlm@109
|
324 <p>
|
rlm@109
|
325 [5:13] And nest-building in corvids for instance—crows, magpies,
|
rlm@109
|
326 [hawks], and so on — are way beyond what current robots can do, and
|
rlm@109
|
327 in fact I think most humans would be challenged if they had to go and
|
rlm@109
|
328 find a collection of twigs, one at a time, maybe bring them with just
|
rlm@109
|
329 one hand — or with your mouth — and assemble them into a
|
rlm@109
|
330 structure that, you know, is shaped like a nest, and is fairly rigid,
|
rlm@109
|
331 and you could trust your eggs in them when wind blows. But they're
|
rlm@109
|
332 doing it, and so … they're not our evolutionary ancestors, but
|
rlm@109
|
333 they're an indication — and that example is an indication — of
|
rlm@109
|
334 what must have evolved in order to provide control over the
|
rlm@109
|
335 environment in <i>that</i> species.
|
rlm@109
|
336 </p>
|
rlm@109
|
337 </div>
|
rlm@109
|
338
|
rlm@109
|
339 </div>
|
rlm@109
|
340
|
rlm@109
|
341 <div id="outline-container-2-2" class="outline-3">
|
rlm@109
|
342 <h3 id="sec-2-2"><span class="section-number-3">2.2</span> Speculation about how communication might have evolved from internal lanagues.</h3>
|
rlm@109
|
343 <div class="outline-text-3" id="text-2-2">
|
rlm@109
|
344
|
rlm@109
|
345 <p>[5:56] And I think hunting mammals, fruit-picking mammals, mammals
|
rlm@109
|
346 that can rearrange parts of the environment, provide shelters, needed
|
rlm@109
|
347 to have …. also needed to have ways of representing possible
|
rlm@109
|
348 futures, not just what's there in the environment. I think at a later
|
rlm@109
|
349 stage, that developed into a form of communication, or rather the
|
rlm@109
|
350 <i>internal</i> forms of representation became usable as a basis for
|
rlm@109
|
351 providing [context] to be communicated. And that happened, I think,
|
rlm@109
|
352 initially through performing actions that expressed intentions, and
|
rlm@109
|
353 probably led to situtations where an action (for instance, moving some
|
rlm@109
|
354 large object) was performed more easily, or more successfully, or more
|
rlm@109
|
355 accurately if it was done collaboratively. So someone who had worked
|
rlm@109
|
356 out what to do might start doing it, and then a conspecific might be
|
rlm@109
|
357 able to work out what the intention is, because that person has the
|
rlm@109
|
358 <i>same</i> forms of representation and can build theories about what's
|
rlm@109
|
359 going on, and might then be able to help.
|
rlm@109
|
360 </p>
|
rlm@109
|
361 <p>
|
rlm@109
|
362 [7:11] You can imagine that if that started happening more (a lot of
|
rlm@109
|
363 collaboration based on inferred intentions and plans) then sometimes
|
rlm@109
|
364 the inferences might be obscure and difficult, so the <i>actions</i> might
|
rlm@109
|
365 be enhanced to provide signals as to what the intention is, and what
|
rlm@109
|
366 the best way is to help, and so on.
|
rlm@109
|
367 </p>
|
rlm@109
|
368 <p>
|
rlm@109
|
369 [7:35] So, this is all handwaving and wild speculation, but I think
|
rlm@109
|
370 it's consistent with a large collection of facts which one can look at
|
rlm@109
|
371 — and find if one looks for them, but one won't know if [some]one
|
rlm@109
|
372 doesn't look for them — about the way children, for instance, who
|
rlm@109
|
373 can't yet talk, communicate, and the things they'll do, like going to
|
rlm@109
|
374 the mother and turning the face to point in the direction where the
|
rlm@109
|
375 child wants it to look and so on; that's an extreme version of action
|
rlm@109
|
376 indicating intention.
|
rlm@109
|
377 </p>
|
rlm@109
|
378 <p>
|
rlm@109
|
379 [8:03] Anyway. That's a very long roundabout answer to one conjecture
|
rlm@109
|
380 that the use of communicative language is what gave humans their
|
rlm@109
|
381 unique power to create and destroy and whatever, and I'm saying that
|
rlm@109
|
382 if by that you mean <i>communicative</i> language, then I'm saying there
|
rlm@109
|
383 was something before that which was <i>non</i>-communicative language, and I
|
rlm@109
|
384 suspect that noncommunicative language continues to play a deep role
|
rlm@109
|
385 in <i>all</i> human perception —in mathematical and scientific reasoning, in
|
rlm@109
|
386 problem solving — and we don't understand very much about it.
|
rlm@109
|
387 </p>
|
rlm@109
|
388 <p>
|
rlm@109
|
389 [8:48]
|
rlm@109
|
390 I'm sure there's a lot more to be said about the development of
|
rlm@109
|
391 different kinds of senses, the development of brain structures and
|
rlm@109
|
392 mechanisms is above all that, but perhaps I've droned on long enough
|
rlm@109
|
393 on that question.
|
rlm@109
|
394 </p>
|
rlm@109
|
395
|
rlm@109
|
396 </div>
|
rlm@109
|
397 </div>
|
rlm@109
|
398
|
rlm@109
|
399 </div>
|
rlm@109
|
400
|
rlm@109
|
401 <div id="outline-container-3" class="outline-2">
|
rlm@109
|
402 <h2 id="sec-3"><span class="section-number-2">3</span> How do language and internal states relate to AI?</h2>
|
rlm@109
|
403 <div class="outline-text-2" id="text-3">
|
rlm@109
|
404
|
rlm@109
|
405
|
rlm@109
|
406 <p>
|
rlm@109
|
407 [9:09] Well, I think most of the human and animal capabilities that
|
rlm@109
|
408 I've been referring to are not yet to be found in current robots or
|
rlm@109
|
409 [computing] systems, and I think there are two reasons for that: one
|
rlm@109
|
410 is that it's intrinsically very difficult; I think that in particular
|
rlm@109
|
411 it may turn out that the forms of information processing that one can
|
rlm@109
|
412 implement on digital computers as we currently know them may not be as
|
rlm@109
|
413 well suited to performing some of these tasks as other kinds of
|
rlm@109
|
414 computing about which we don't know so much — for example, I think
|
rlm@109
|
415 there may be important special features about <i>chemical</i> computers
|
rlm@109
|
416 which we might [talk about in a little bit? find out about].
|
rlm@109
|
417 </p>
|
rlm@109
|
418
|
rlm@109
|
419 </div>
|
rlm@109
|
420
|
rlm@109
|
421 <div id="outline-container-3-1" class="outline-3">
|
rlm@109
|
422 <h3 id="sec-3-1"><span class="section-number-3">3.1</span> In AI, false assumptions can lead investigators astray.</h3>
|
rlm@109
|
423 <div class="outline-text-3" id="text-3-1">
|
rlm@109
|
424
|
rlm@109
|
425 <p>[9:57] So, one of the problems then is that the tasks are hard … but
|
rlm@109
|
426 there's a deeper problem as to why AI hasn't made a great deal of
|
rlm@109
|
427 progress on these problems that I'm talking about, and that is that
|
rlm@109
|
428 most AI researchers assume things—and this is not just AI
|
rlm@109
|
429 researchers, but [also] philsophers, and psychologists, and people
|
rlm@109
|
430 studying animal behavior—make assumptions about what it is that
|
rlm@109
|
431 animals or humans do, for instance make assumptions about what vision
|
rlm@109
|
432 is for, or assumptions about what motivation is and how motivation
|
rlm@109
|
433 works, or assumptions about how learning works, and then they try ---
|
rlm@109
|
434 the AI people try — to model [or] build systems that perform those
|
rlm@109
|
435 assumed functions. So if you get the <i>functions</i> wrong, then even if
|
rlm@109
|
436 you implement some of the functions that you're trying to implement,
|
rlm@109
|
437 they won't necessarily perform the tasks that the initial objective
|
rlm@109
|
438 was to imitate, for instance the tasks that humans, and nest-building
|
rlm@109
|
439 birds, and monkeys and so on can perform.
|
rlm@109
|
440 </p>
|
rlm@109
|
441 </div>
|
rlm@109
|
442
|
rlm@109
|
443 </div>
|
rlm@109
|
444
|
rlm@109
|
445 <div id="outline-container-3-2" class="outline-3">
|
rlm@109
|
446 <h3 id="sec-3-2"><span class="section-number-3">3.2</span> Example: Vision is not just about finding surfaces, but about finding affordances.</h3>
|
rlm@109
|
447 <div class="outline-text-3" id="text-3-2">
|
rlm@109
|
448
|
rlm@109
|
449 <p>[11:09] I'll give you a simple example — well, maybe not so simple,
|
rlm@109
|
450 but — It's often assumed that the function of vision in humans (and
|
rlm@109
|
451 in other animals with good eyesight and so on) is to take in optical
|
rlm@109
|
452 information that hits the retina, and form into the (maybe changing
|
rlm@109
|
453 — or, really, in our case definitely changing) patterns of
|
rlm@109
|
454 illumination where there are sensory receptors that detect those
|
rlm@109
|
455 patterns, and then somehow from that information (plus maybe other
|
rlm@109
|
456 information gained from head movement or from comparisons between two
|
rlm@109
|
457 eyes) to work out what there was in the environment that produced
|
rlm@109
|
458 those patterns, and that is often taken to mean “where were the
|
rlm@109
|
459 surfaces off which the light bounced before it came to me”. So
|
rlm@109
|
460 you essentially think of the task of the visual system as being to
|
rlm@109
|
461 reverse the image formation process: so the 3D structure's there, the
|
rlm@109
|
462 lens causes the image to form in the retina, and then the brain goes
|
rlm@109
|
463 back to a model of that 3D structure there. That's a very plausible
|
rlm@109
|
464 theory about vision, and it may be that that's a <i>subset</i> of what
|
rlm@109
|
465 human vision does, but I think James Gibson pointed out that that kind
|
rlm@109
|
466 of thing is not necessarily going to be very useful for an organism,
|
rlm@109
|
467 and it's very unlikely that that's the main function of perception in
|
rlm@109
|
468 general, namely to produce some physical description of what's out
|
rlm@109
|
469 there.
|
rlm@109
|
470 </p>
|
rlm@109
|
471 <p>
|
rlm@109
|
472 [12:37] What does an animal <i>need</i>? It needs to know what it can do,
|
rlm@109
|
473 what it can't do, what the consequences of its actions will be
|
rlm@109
|
474 …. so, he introduced the word <i>affordance</i>, so from his point of
|
rlm@109
|
475 view, the function of vision, perception, are to inform the organism
|
rlm@109
|
476 of what the <i>affordances</i> are for action, where that would mean what
|
rlm@109
|
477 the animal, <i>given</i> its morphology (what it can do with its mouth, its
|
rlm@109
|
478 limbs, and so on, and the ways it can move) what it can do, what its
|
rlm@109
|
479 needs are, what the obstacles are, and how the environment supports or
|
rlm@109
|
480 obstructs those possible actions.
|
rlm@109
|
481 </p>
|
rlm@109
|
482 <p>
|
rlm@109
|
483 [13:15] And that's a very different collection of information
|
rlm@109
|
484 structures that you need from, say, “where are all the
|
rlm@109
|
485 surfaces?”: if you've got all the surfaces, <i>deriving</i> the
|
rlm@109
|
486 affordances would still be a major task. So, if you think of the
|
rlm@109
|
487 perceptual system as primarily (for biological organisms) being
|
rlm@109
|
488 devices that provide information about affordances and so on, then the
|
rlm@109
|
489 tasks look very different. And most of the people working, doing
|
rlm@109
|
490 research on computer vision in robots, I think haven't taken all that
|
rlm@109
|
491 on board, so they're trying to get machines to do things which, even
|
rlm@109
|
492 if they were successful, would not make the robots very intelligent
|
rlm@109
|
493 (and in fact, even the ones they're trying to do are not really easy
|
rlm@109
|
494 to do, and they don't succeed very well— although, there's progress;
|
rlm@109
|
495 I shouldn't disparage it too much.)
|
rlm@109
|
496 </p>
|
rlm@109
|
497 </div>
|
rlm@109
|
498
|
rlm@109
|
499 </div>
|
rlm@109
|
500
|
rlm@109
|
501 <div id="outline-container-3-3" class="outline-3">
|
rlm@109
|
502 <h3 id="sec-3-3"><span class="section-number-3">3.3</span> Online and offline intelligence</h3>
|
rlm@109
|
503 <div class="outline-text-3" id="text-3-3">
|
rlm@109
|
504
|
rlm@109
|
505
|
rlm@109
|
506 <p>
|
rlm@109
|
507 [14:10] It gets more complex as animals get more sophisticated. So, I
|
rlm@109
|
508 like to make a distinction between online intelligence and offline
|
rlm@109
|
509 intelligence. So, for example, if I want to pick something up — like
|
rlm@109
|
510 this leaf <he plucks a leaf from the table> — I was able to select
|
rlm@109
|
511 it from all the others in there, and while moving my hand towards it,
|
rlm@109
|
512 I was able to guide its trajectory, making sure it was going roughly
|
rlm@109
|
513 in the right direction — as opposed to going out there, which
|
rlm@109
|
514 wouldn't have been able to pick it up — and these two fingers ended
|
rlm@109
|
515 up with a portion of the leaf between them, so that I was able to tell
|
rlm@109
|
516 when I'm ready to do that <he clamps the leaf between two fingers>
|
rlm@109
|
517 and at that point, I clamped my fingers and then I could pick up the
|
rlm@109
|
518 leaf.
|
rlm@109
|
519 </p>
|
rlm@109
|
520 <p>
|
rlm@109
|
521 [14:54] Whereas, — and that's an example of online intelligence:
|
rlm@109
|
522 during the performance of an action (both from the stage where it's
|
rlm@109
|
523 initiated, and during the intermediate stages, and where it's
|
rlm@109
|
524 completed) I'm taking in information relevant to controlling all those
|
rlm@109
|
525 stages, and that relevant information keeps changing. That means I
|
rlm@109
|
526 need stores of transient information which gets discarded almost
|
rlm@109
|
527 immediately and replaced or something. That's online intelligence. And
|
rlm@109
|
528 there are many forms; that's just one example, and Gibson discussed
|
rlm@109
|
529 quite a lot of examples which I won't try to replicate now.
|
rlm@109
|
530 </p>
|
rlm@109
|
531 <p>
|
rlm@109
|
532 [15:30] But in offline intelligence, you're not necessarily actually
|
rlm@109
|
533 <i>performing</i> the actions when you're using your intelligence; you're
|
rlm@109
|
534 thinking about <i>possible</i> actions. So, for instance, I could think
|
rlm@109
|
535 about how fast or by what route I would get back to the lecture room
|
rlm@109
|
536 if I wanted to [get to the next talk] or something. And I know where
|
rlm@109
|
537 the door is, roughly speaking, and I know roughly which route I would
|
rlm@109
|
538 take, when I go out, I should go to the left or to the right, because
|
rlm@109
|
539 I've stored information about where the spaces are, where the
|
rlm@109
|
540 buildings are, where the door was that we came out — but in using
|
rlm@109
|
541 that information to think about that route, I'm not actually
|
rlm@109
|
542 performing the action. I'm not even <i>simulating</i> it in detail: the
|
rlm@109
|
543 precise details of direction and speed and when to clamp my fingers,
|
rlm@109
|
544 or when to contract my leg muscles when walking, are all irrelevant to
|
rlm@109
|
545 thinking about a good route, or thinking about the potential things
|
rlm@109
|
546 that might happen on the way. Or what would be a good place to meet
|
rlm@109
|
547 someone who I think [for an acquaintance in particular] — [barber]
|
rlm@109
|
548 or something — I don't necessarily have to work out exactly <i>where</i>
|
rlm@109
|
549 the person's going to stand, or from what angle I would recognize
|
rlm@109
|
550 them, and so on.
|
rlm@109
|
551 </p>
|
rlm@109
|
552 <p>
|
rlm@109
|
553 [16:46] So, offline intelligence — which I think became not just a
|
rlm@109
|
554 human competence; I think there are other animals that have aspects of
|
rlm@109
|
555 it: Squirrels are very impressive as you watch them. Gray squirrels at
|
rlm@109
|
556 any rate, as you watch them defeating squirrel-proof birdfeeders, seem
|
rlm@109
|
557 to have a lot of that [offline intelligence], as well as the online
|
rlm@109
|
558 intelligence when they eventually perform the action they've worked
|
rlm@109
|
559 out [] that will get them to the nuts.
|
rlm@109
|
560 </p>
|
rlm@109
|
561 <p>
|
rlm@109
|
562 [17:16] And I think that what happened during our evolution is that
|
rlm@109
|
563 mechanisms for acquiring and processing and storing and manipulating
|
rlm@109
|
564 information that is more and more remote from the performance of
|
rlm@109
|
565 actions developed. An example is taking in information about where
|
rlm@109
|
566 locations are that you might need to go to infrequently: There's a
|
rlm@109
|
567 store of a particular type of material that's good for building on
|
rlm@109
|
568 roofs of houses or something out around there in some
|
rlm@109
|
569 direction. There's a good place to get water somewhere in another
|
rlm@109
|
570 direction. There are people that you'd like to go and visit in
|
rlm@109
|
571 another place, and so on.
|
rlm@109
|
572 </p>
|
rlm@109
|
573 <p>
|
rlm@109
|
574 [17:59] So taking in information about an extended environment and
|
rlm@109
|
575 building it into a structure that you can make use of for different
|
rlm@109
|
576 purposes is another example of offline intelligence. And when we do
|
rlm@109
|
577 that, we sometimes use only our brains, but in modern times, we also
|
rlm@109
|
578 learned how to make maps on paper and walls and so on. And it's not
|
rlm@109
|
579 clear whether the stuff inside our heads has the same structures as
|
rlm@109
|
580 the maps we make on paper: the maps on paper have a different
|
rlm@109
|
581 function; they may be used to communicate with others, or meant for
|
rlm@109
|
582 <i>looking</i> at, whereas the stuff in your head you don't <i>look</i> at; you
|
rlm@109
|
583 use it in some other way.
|
rlm@109
|
584 </p>
|
rlm@109
|
585 <p>
|
rlm@109
|
586 [18:46] So, what I'm getting at is that there's a great deal of human
|
rlm@109
|
587 intelligence (and animal intelligence) which is involved in what's
|
rlm@109
|
588 possible in the future, what exists in distant places, what might have
|
rlm@109
|
589 happened in the past (sometimes you need to know why something is as
|
rlm@109
|
590 it is, because that might be relevant to what you should or shouldn't
|
rlm@109
|
591 do in the future, and so on), and I think there was something about
|
rlm@109
|
592 human evolution that extended that offline intelligence way beyond
|
rlm@109
|
593 that of animals. And I don't think it was <i>just</i> human language, (but
|
rlm@109
|
594 human language had something to do with it) but I think there was
|
rlm@109
|
595 something else that came earlier than language which involves the
|
rlm@109
|
596 ability to use your offline intelligence to discover something that
|
rlm@109
|
597 has a rich mathematical structure.
|
rlm@109
|
598 </p>
|
rlm@109
|
599 </div>
|
rlm@109
|
600
|
rlm@109
|
601 </div>
|
rlm@109
|
602
|
rlm@109
|
603 <div id="outline-container-3-4" class="outline-3">
|
rlm@109
|
604 <h3 id="sec-3-4"><a name="example-gap" id="example-gap"></a><span class="section-number-3">3.4</span> Example: Even toddlers use sophisticated geometric knowledge</h3>
|
rlm@109
|
605 <div class="outline-text-3" id="text-3-4">
|
rlm@109
|
606
|
rlm@109
|
607 <p>[19:44] I'll give you a simple example: if you look through a gap, you
|
rlm@109
|
608 can see something that's on the other side of the gap. Now, you
|
rlm@109
|
609 <i>might</i> see what you want to see, or you might see only part of it. If
|
rlm@109
|
610 you want to see more of it, which way would you move? Well, you could
|
rlm@109
|
611 either move <i>sideways</i>, and see through the gap—and see it roughly
|
rlm@109
|
612 the same amount but a different part of it [if it's a ????], or you
|
rlm@109
|
613 could move <i>towards</i> the gap and then your view will widen as you
|
rlm@109
|
614 approach the gap. Now, there's a bit of mathematics in there, insofar
|
rlm@109
|
615 as you are implicitly assuming that information travels in straight
|
rlm@109
|
616 lines, and as you go closer to a gap, the straight lines that you can
|
rlm@109
|
617 draw from where you are through the gap, widen as you approach that
|
rlm@109
|
618 gap. Now, there's a kind of theorem of Euclidean geometry in there
|
rlm@109
|
619 which I'm not going to try to state very precisely (and as far as I
|
rlm@109
|
620 know, wasn't stated explicitly in Euclidean geometry) but it's
|
rlm@109
|
621 something every toddler— human toddler—learns. (Maybe other
|
rlm@109
|
622 animals also know it, I don't know.) But there are many more things,
|
rlm@109
|
623 actions to perform, to get you more information about things, actions
|
rlm@109
|
624 to perform to conceal information from other people, actions that will
|
rlm@109
|
625 enable you to operate, to act on a rigid object in one place in order
|
rlm@109
|
626 to produce an effect on another place. So, there's a lot of stuff that
|
rlm@109
|
627 involves lines and rotations and angles and speeds and so on that I
|
rlm@109
|
628 think humans (maybe, to a lesser extent, other animals) develop the
|
rlm@109
|
629 ability to think about in a generic way. That means that you could
|
rlm@109
|
630 take out the generalizations from the particular contexts and then
|
rlm@109
|
631 re-use them in a new contexts in ways that I think are not yet
|
rlm@109
|
632 represented at all in AI and in theories of human learning in any []
|
rlm@109
|
633 way — although some people are trying to study learning of mathematics.
|
rlm@109
|
634 </p>
|
rlm@109
|
635 </div>
|
rlm@109
|
636 </div>
|
rlm@109
|
637
|
rlm@109
|
638 </div>
|
rlm@109
|
639
|
rlm@109
|
640 <div id="outline-container-4" class="outline-2">
|
rlm@109
|
641 <h2 id="sec-4"><span class="section-number-2">4</span> Animal intelligence</h2>
|
rlm@109
|
642 <div class="outline-text-2" id="text-4">
|
rlm@109
|
643
|
rlm@109
|
644
|
rlm@109
|
645
|
rlm@109
|
646 </div>
|
rlm@109
|
647
|
rlm@109
|
648 <div id="outline-container-4-1" class="outline-3">
|
rlm@109
|
649 <h3 id="sec-4-1"><span class="section-number-3">4.1</span> The priority is <i>cataloguing</i> what competences have evolved, not ranking them.</h3>
|
rlm@109
|
650 <div class="outline-text-3" id="text-4-1">
|
rlm@109
|
651
|
rlm@109
|
652 <p>[22:03] I wasn't going to challenge the claim that humans can do more
|
rlm@109
|
653 sophisticated forms of [tracking], just to mention that there are some
|
rlm@109
|
654 things that other animals can do which are in some ways comparable,
|
rlm@109
|
655 and some ways superior to [things] that humans can do. In particular,
|
rlm@109
|
656 there are species of birds and also, I think, some rodents ---
|
rlm@109
|
657 squirrels, or something — I don't know enough about the variety ---
|
rlm@109
|
658 that can hide nuts and remember where they've hidden them, and go back
|
rlm@109
|
659 to them. And there have been tests which show that some birds are able
|
rlm@109
|
660 to hide tens — you know, [eighteen] or something nuts — and to
|
rlm@109
|
661 remember which ones have been taken, which ones haven't, and so
|
rlm@109
|
662 on. And I suspect most humans can't do that. I wouldn't want to say
|
rlm@109
|
663 categorically that maybe we couldn't, because humans are very
|
rlm@109
|
664 [varied], and also [a few] people can develop particular competences
|
rlm@109
|
665 through training. But it's certainly not something I can do.
|
rlm@109
|
666 </p>
|
rlm@109
|
667
|
rlm@109
|
668 </div>
|
rlm@109
|
669
|
rlm@109
|
670 </div>
|
rlm@109
|
671
|
rlm@109
|
672 <div id="outline-container-4-2" class="outline-3">
|
rlm@109
|
673 <h3 id="sec-4-2"><span class="section-number-3">4.2</span> AI can be used to test philosophical theories</h3>
|
rlm@109
|
674 <div class="outline-text-3" id="text-4-2">
|
rlm@109
|
675
|
rlm@109
|
676 <p>[23:01] But I also would like to say that I am not myself particularly
|
rlm@109
|
677 interested in trying to align animal intelligences according to any
|
rlm@109
|
678 kind of scale of superiority; I'm just trying to understand what it
|
rlm@109
|
679 was that biological evolution produced, and how it works, and I'm
|
rlm@109
|
680 interested in AI <i>mainly</i> because I think that when one comes up with
|
rlm@109
|
681 theories about how these things work, one needs to have some way of
|
rlm@109
|
682 testing the theory. And AI provides ways of implementing and testing
|
rlm@109
|
683 theories that were not previously available: Immanuel Kant was trying
|
rlm@109
|
684 to come up with theories about how minds work, but he didn't have any
|
rlm@109
|
685 kind of a mechanism that he could build to test his theory about the
|
rlm@109
|
686 nature of mathematical knowledge, for instance, or how concepts were
|
rlm@109
|
687 developed from babyhood onward. Whereas now, if we do develop a
|
rlm@109
|
688 theory, we have a criterion of adequacy, namely it should be precise
|
rlm@109
|
689 enough and rich enough and detailed to enable a model to be
|
rlm@109
|
690 built. And then we can see if it works.
|
rlm@109
|
691 </p>
|
rlm@109
|
692 <p>
|
rlm@109
|
693 [24:07] If it works, it doesn't mean we've proved that the theory is
|
rlm@109
|
694 correct; it just shows it's a candidate. And if it doesn't work, then
|
rlm@109
|
695 it's not a candidate as it stands; it would need to be modified in
|
rlm@109
|
696 some way.
|
rlm@109
|
697 </p>
|
rlm@109
|
698 </div>
|
rlm@109
|
699 </div>
|
rlm@109
|
700
|
rlm@109
|
701 </div>
|
rlm@109
|
702
|
rlm@109
|
703 <div id="outline-container-5" class="outline-2">
|
rlm@109
|
704 <h2 id="sec-5"><span class="section-number-2">5</span> Is abstract general intelligence feasible?</h2>
|
rlm@109
|
705 <div class="outline-text-2" id="text-5">
|
rlm@109
|
706
|
rlm@109
|
707
|
rlm@109
|
708
|
rlm@109
|
709 </div>
|
rlm@109
|
710
|
rlm@109
|
711 <div id="outline-container-5-1" class="outline-3">
|
rlm@109
|
712 <h3 id="sec-5-1"><span class="section-number-3">5.1</span> It's misleading to compare the brain and its neurons to a computer made of transistors</h3>
|
rlm@109
|
713 <div class="outline-text-3" id="text-5-1">
|
rlm@109
|
714
|
rlm@109
|
715 <p>[24:27] I think there's a lot of optimism based on false clues:
|
rlm@109
|
716 the…for example, one of the false clues is to count the number of
|
rlm@109
|
717 neurons in the brain, and then talk about the number of transistors
|
rlm@109
|
718 you can fit into a computer or something, and then compare them. It
|
rlm@109
|
719 might turn out that the study of the way synapses work (which leads
|
rlm@109
|
720 some people to say that a typical synapse [] in the human brain has
|
rlm@109
|
721 computational power comparable to the Internet a few years ago,
|
rlm@109
|
722 because of the number of different molecules that are doing things,
|
rlm@109
|
723 the variety of types of things that are being done in those molecular
|
rlm@109
|
724 interactions, and the speed at which they happen, if you somehow count
|
rlm@109
|
725 up the number of operations per second or something, then you get
|
rlm@109
|
726 these comparable figures).
|
rlm@109
|
727 </p>
|
rlm@109
|
728 </div>
|
rlm@109
|
729
|
rlm@109
|
730 </div>
|
rlm@109
|
731
|
rlm@109
|
732 <div id="outline-container-5-2" class="outline-3">
|
rlm@109
|
733 <h3 id="sec-5-2"><span class="section-number-3">5.2</span> For example, brains may rely heavily on chemical information processing</h3>
|
rlm@109
|
734 <div class="outline-text-3" id="text-5-2">
|
rlm@109
|
735
|
rlm@109
|
736 <p>Now even if the details aren't right, there may just be a lot of
|
rlm@109
|
737 information processing that…going on in brains at the <i>molecular</i>
|
rlm@109
|
738 level, not the neural level. Then, if that's the case, the processing
|
rlm@109
|
739 units will be orders of magnitude larger in number than the number of
|
rlm@109
|
740 neurons. And it's certainly the case that all the original biological
|
rlm@109
|
741 forms of information processing were chemical; there weren't brains
|
rlm@109
|
742 around, and still aren't in most microbes. And even when humans grow
|
rlm@109
|
743 their brains, the process of starting from a fertilized egg and
|
rlm@109
|
744 producing this rich and complex structure is, for much of the time,
|
rlm@109
|
745 under the control of chemical computations, chemical information
|
rlm@109
|
746 processing—of course combined with physical sorts of materials and
|
rlm@109
|
747 energy and so on as well.
|
rlm@109
|
748 </p>
|
rlm@109
|
749 <p>
|
rlm@109
|
750 [26:25] So it would seem very strange if all that capability was
|
rlm@109
|
751 something thrown away when you've got a brain and all the information
|
rlm@109
|
752 processing, the [challenges that were handled in making a brain],
|
rlm@109
|
753 … This is handwaving on my part; I'm just saying that we <i>might</i>
|
rlm@109
|
754 learn that what brains do is not what we think they do, and that
|
rlm@109
|
755 problems of replicating them are not what we think they are, solely in
|
rlm@109
|
756 terms of numerical estimate of time scales, the number of components,
|
rlm@109
|
757 and so on.
|
rlm@109
|
758 </p>
|
rlm@109
|
759 </div>
|
rlm@109
|
760
|
rlm@109
|
761 </div>
|
rlm@109
|
762
|
rlm@109
|
763 <div id="outline-container-5-3" class="outline-3">
|
rlm@109
|
764 <h3 id="sec-5-3"><span class="section-number-3">5.3</span> Brain algorithms may simply be optimized for certain kinds of information processing other than bit manipulations</h3>
|
rlm@109
|
765 <div class="outline-text-3" id="text-5-3">
|
rlm@109
|
766
|
rlm@109
|
767 <p>[26:56] But apart from that, the other basis of skepticism concerns
|
rlm@109
|
768 how well we understand what the problems are. I think there are many
|
rlm@109
|
769 people who try to formalize the problems of designing an intelligent
|
rlm@109
|
770 system in terms of streams of information thought of as bit streams or
|
rlm@109
|
771 collections of bit streams, and they think of as the problems of
|
rlm@109
|
772 intelligence as being the construction or detection of patterns in
|
rlm@109
|
773 those, and perhaps not just detection of patterns, but detection of
|
rlm@109
|
774 patterns that are useable for sending <i>out</i> streams to control motors
|
rlm@109
|
775 and so on in order to []. And that way of conceptualizing the problem
|
rlm@109
|
776 may lead on the one hand to oversimplification, so that the things
|
rlm@109
|
777 that <i>would</i> be achieved, if those goals were achieved, maybe much
|
rlm@109
|
778 simpler, in some ways inadequate. Or the replication of human
|
rlm@109
|
779 intelligence, or the matching of human intelligence—or for that
|
rlm@109
|
780 matter, squirrel intelligence—but in another way, it may also make
|
rlm@109
|
781 the problem harder: it may be that some of the kinds of things that
|
rlm@109
|
782 biological evolution has achieved can't be done that way. And one of
|
rlm@109
|
783 the ways that might turn out to be the case is not because it's not
|
rlm@109
|
784 impossible in principle to do some of the information processing on
|
rlm@109
|
785 artificial computers-based-on-transistors and other bit-manipulating
|
rlm@109
|
786 []—but it may just be that the computational complexity of solving
|
rlm@109
|
787 problems, processes, or finding solutions to complex problems, are
|
rlm@109
|
788 much greater and therefore you might need a much larger universe than
|
rlm@109
|
789 we have available in order to do things.
|
rlm@109
|
790 </p>
|
rlm@109
|
791 </div>
|
rlm@109
|
792
|
rlm@109
|
793 </div>
|
rlm@109
|
794
|
rlm@109
|
795 <div id="outline-container-5-4" class="outline-3">
|
rlm@109
|
796 <h3 id="sec-5-4"><span class="section-number-3">5.4</span> Example: find the shortest path by dangling strings</h3>
|
rlm@109
|
797 <div class="outline-text-3" id="text-5-4">
|
rlm@109
|
798
|
rlm@109
|
799 <p>[28:55] Then if the underlying mechanisms were different, the
|
rlm@109
|
800 information processing mechanisms, they might be better tailored to
|
rlm@109
|
801 particular sorts of computation. There's a [] example, which is
|
rlm@109
|
802 finding the shortest route if you've got a collection of roads, and
|
rlm@109
|
803 they may be curved roads, and lots of tangled routes from A to B to C,
|
rlm@109
|
804 and so on. And if you start at A and you want to get to Z — a place
|
rlm@109
|
805 somewhere on that map — the process of finding the shortest route
|
rlm@109
|
806 will involve searching through all these different possibilities and
|
rlm@109
|
807 rejecting some that are longer than others and so on. But if you make
|
rlm@109
|
808 a model of that map out of string, where these strings are all laid
|
rlm@109
|
809 out on the maps and so have the lengths of the routes. Then if you
|
rlm@109
|
810 hold the two knots in the string – it's a network of string — which
|
rlm@109
|
811 correspond to the start point and end point, then <i>pull</i>, then the
|
rlm@109
|
812 bits of string that you're left with in a straight line will give you
|
rlm@109
|
813 the shortest route, and that process of pulling just gets you the
|
rlm@109
|
814 solution very rapidly in a parallel computation, where all the others
|
rlm@109
|
815 just hang by the wayside, so to speak.
|
rlm@109
|
816 </p>
|
rlm@109
|
817 </div>
|
rlm@109
|
818
|
rlm@109
|
819 </div>
|
rlm@109
|
820
|
rlm@109
|
821 <div id="outline-container-5-5" class="outline-3">
|
rlm@109
|
822 <h3 id="sec-5-5"><span class="section-number-3">5.5</span> In sum, we know surprisingly little about the kinds of problems that evolution solved, and the manner in which they were solved.</h3>
|
rlm@109
|
823 <div class="outline-text-3" id="text-5-5">
|
rlm@109
|
824
|
rlm@109
|
825 <p>[30:15] Now, I'm not saying brains can build networks of string and
|
rlm@109
|
826 pull them or anything like that; that's just an illustration of how if
|
rlm@109
|
827 you have the right representation, correctly implemented—or suitably
|
rlm@109
|
828 implemented—for a problem, then you can avoid very combinatorially
|
rlm@109
|
829 complex searches, which will maybe grow exponentially with the number
|
rlm@109
|
830 of components in your map, whereas with this thing, the time it takes
|
rlm@109
|
831 won't depend on how many strings you've [got on the map]; you just
|
rlm@109
|
832 pull, and it will depend only on the shortest route that exists in
|
rlm@109
|
833 there. Even if that shortest route wasn't obvious on the original map.
|
rlm@109
|
834 </p>
|
rlm@109
|
835
|
rlm@109
|
836 <p>
|
rlm@109
|
837 [30:59] So that's a rather long-winded way of formulating the
|
rlm@109
|
838 conjecture which—of supporting, a roundabout way of supporting the
|
rlm@109
|
839 conjecture that there may be something about the way molecules perform
|
rlm@109
|
840 computations where they have the combination of continuous change as
|
rlm@109
|
841 things move through space and come together and move apart, and
|
rlm@109
|
842 whatever — and also snap into states that then persist, so [as you
|
rlm@109
|
843 learn from] quantum mechanics, you can have stable molecular
|
rlm@109
|
844 structures which are quite hard to separate, and then in catalytic
|
rlm@109
|
845 processes you can separate them, or extreme temperatures, or strong
|
rlm@109
|
846 forces, but they may nevertheless be able to move very rapidly in some
|
rlm@109
|
847 conditions in order to perform computations.
|
rlm@109
|
848 </p>
|
rlm@109
|
849 <p>
|
rlm@109
|
850 [31:49] Now there may be things about that kind of structure that
|
rlm@109
|
851 enable searching for solutions to <i>certain</i> classes of problems to be
|
rlm@109
|
852 done much more efficiently (by brain) than anything we could do with
|
rlm@109
|
853 computers. It's just an open question.
|
rlm@109
|
854 </p>
|
rlm@109
|
855 <p>
|
rlm@109
|
856 [32:04] So it <i>might</i> turn out that we need new kinds of technology
|
rlm@109
|
857 that aren't on the horizon in order to replicate the functions that
|
rlm@109
|
858 animal brains perform —or, it might not. I just don't know. I'm not
|
rlm@109
|
859 claiming that there's strong evidence for that; I'm just saying that
|
rlm@109
|
860 it might turn out that way, partly because I think we know less than
|
rlm@109
|
861 many people think we know about what biological evolution achieved.
|
rlm@109
|
862 </p>
|
rlm@109
|
863 <p>
|
rlm@109
|
864 [32:28] There are some other possibilities: we may just find out that
|
rlm@109
|
865 there are shortcuts no one ever thought of, and it will all happen
|
rlm@109
|
866 much more quickly—I have an open mind; I'd be surprised, but it
|
rlm@109
|
867 could turn up. There <i>is</i> something that worries me much more than the
|
rlm@109
|
868 singularity that most people talk about, which is machines achieving
|
rlm@109
|
869 human-level intelligence and perhaps taking over [the] planet or
|
rlm@109
|
870 something. There's what I call the <i>singularity of cognitive catch-up</i> …
|
rlm@109
|
871 </p>
|
rlm@109
|
872 </div>
|
rlm@109
|
873 </div>
|
rlm@109
|
874
|
rlm@109
|
875 </div>
|
rlm@109
|
876
|
rlm@109
|
877 <div id="outline-container-6" class="outline-2">
|
rlm@109
|
878 <h2 id="sec-6"><span class="section-number-2">6</span> A singularity of cognitive catch-up</h2>
|
rlm@109
|
879 <div class="outline-text-2" id="text-6">
|
rlm@109
|
880
|
rlm@109
|
881
|
rlm@109
|
882
|
rlm@109
|
883 </div>
|
rlm@109
|
884
|
rlm@109
|
885 <div id="outline-container-6-1" class="outline-3">
|
rlm@109
|
886 <h3 id="sec-6-1"><span class="section-number-3">6.1</span> What if it will take a lifetime to learn enough to make something new?</h3>
|
rlm@109
|
887 <div class="outline-text-3" id="text-6-1">
|
rlm@109
|
888
|
rlm@109
|
889 <p>… SCC, singularity of cognitive catch-up, which I think we're close
|
rlm@109
|
890 to, or maybe have already reached—I'll explain what I mean by
|
rlm@109
|
891 that. One of the products of biological evolution—and this is one of
|
rlm@109
|
892 the answers to your earlier questions which I didn't get on to—is
|
rlm@109
|
893 that humans have not only the ability to make discoveries that none of
|
rlm@109
|
894 their ancestors have ever made, but to shorten the time required for
|
rlm@109
|
895 similar achievements to be reached by their offspring and their
|
rlm@109
|
896 descendants. So once we, for instance, worked out ways of complex
|
rlm@109
|
897 computations, or ways of building houses, or ways of finding our way
|
rlm@109
|
898 around, we don't need…our children don't need to work it out for
|
rlm@109
|
899 themselves by the same lengthy trial and error procedure; we can help
|
rlm@109
|
900 them get there much faster.
|
rlm@109
|
901 </p>
|
rlm@109
|
902 <p>
|
rlm@109
|
903 Okay, well, what I've been referring to as the singularity of
|
rlm@109
|
904 cognitive catch-up depends on the fact that—fairly obvious, and it's
|
rlm@109
|
905 often been commented on—that in case of humans, it's not necessary
|
rlm@109
|
906 for each generation to learn what previous generations learned <i>in the same way</i>. And we can speed up learning once something has been
|
rlm@109
|
907 learned, [it is able to] be learned by new people. And that has meant
|
rlm@109
|
908 that the social processes that support that kind of education of the
|
rlm@109
|
909 young can enormously accelerate what would have taken…perhaps
|
rlm@109
|
910 thousands [or] millions of years for evolution to produce, can happen in
|
rlm@109
|
911 a much shorter time.
|
rlm@109
|
912 </p>
|
rlm@109
|
913
|
rlm@109
|
914 <p>
|
rlm@109
|
915 [34:54] But here's the catch: in order for a new advance to happen ---
|
rlm@109
|
916 so for something new to be discovered that wasn't there before, like
|
rlm@109
|
917 Newtonian mechanics, or the theory of relativity, or Beethoven's music
|
rlm@109
|
918 or [style] or whatever — the individuals have to have traversed a
|
rlm@109
|
919 significant amount of what their ancestors have learned, even if they
|
rlm@109
|
920 do it much faster than their ancestors, to get to the point where they
|
rlm@109
|
921 can see the gaps, the possibilities for going further than their
|
rlm@109
|
922 ancestors, or their parents or whatever, have done.
|
rlm@109
|
923 </p>
|
rlm@109
|
924 <p>
|
rlm@109
|
925 [35:27] Now in the case of knowledge of science, mathematics,
|
rlm@109
|
926 philosophy, engineering and so on, there's been a lot of accumulated
|
rlm@109
|
927 knowledge. And humans are living a <i>bit</i> longer than they used to, but
|
rlm@109
|
928 they're still living for [whatever it is], a hundred years, or for
|
rlm@109
|
929 most people, less than that. So you can imagine that there might come
|
rlm@109
|
930 a time when in a normal human lifespan, it's not possible for anyone
|
rlm@109
|
931 to learn enough to understand the scope and limits of what's already
|
rlm@109
|
932 been achieved in order to see the potential for going beyond it and to
|
rlm@109
|
933 build on what's already been done to make that…those future steps.
|
rlm@109
|
934 </p>
|
rlm@109
|
935 <p>
|
rlm@109
|
936 [36:10] So if we reach that stage, we will have reached the
|
rlm@109
|
937 singularity of cognitive catch-up because the process of education
|
rlm@109
|
938 that enables individuals to learn faster than their ancestors did is
|
rlm@109
|
939 the catching-up process, and it may just be that we at some point
|
rlm@109
|
940 reach a point where catching up can only happen within a lifetime of
|
rlm@109
|
941 an individual, and after that they're dead and they can't go
|
rlm@109
|
942 beyond. And I have some evidence that there's a lot of that around
|
rlm@109
|
943 because I see a lot of people coming up with what <i>they</i> think of as
|
rlm@109
|
944 new ideas which they've struggled to come up with, but actually they
|
rlm@109
|
945 just haven't taken in some of what was…some of what was done [] by
|
rlm@109
|
946 other people, in other places before them. And I think that despite
|
rlm@109
|
947 the availability of search engines which make it <i>easier</i> for people
|
rlm@109
|
948 to get the information—for instance, when I was a student, if I
|
rlm@109
|
949 wanted to find out what other people had done in the field, it was a
|
rlm@109
|
950 laborious process—going to the library, getting books, and
|
rlm@109
|
951 —whereas now, I can often do things in seconds that would have taken
|
rlm@109
|
952 hours. So that means that if seconds [are needed] for that kind of
|
rlm@109
|
953 work, my lifespan has been extended by a factor of ten or
|
rlm@109
|
954 something. So maybe that <i>delays</i> the singularity, but it may not
|
rlm@109
|
955 delay it enough. But that's an open question; I don't know. And it may
|
rlm@109
|
956 just be that in some areas, this is more of a problem than others. For
|
rlm@109
|
957 instance, it may be that in some kinds of engineering, we're handing
|
rlm@109
|
958 over more and more of the work to machines anyways and they can go on
|
rlm@109
|
959 doing it. So for instance, most of the production of computers now is
|
rlm@109
|
960 done by a computer-controlled machine—although some of the design
|
rlm@109
|
961 work is done by humans— a lot of <i>detail</i> of the design is done by
|
rlm@109
|
962 computers, and they produce the next generation, which then produces
|
rlm@109
|
963 the next generation, and so on.
|
rlm@109
|
964 </p>
|
rlm@109
|
965 <p>
|
rlm@109
|
966 [37:57] I don't know if humans can go on having major advances, so
|
rlm@109
|
967 it'll be kind of sad if we can't.
|
rlm@109
|
968 </p>
|
rlm@109
|
969 </div>
|
rlm@109
|
970 </div>
|
rlm@109
|
971
|
rlm@109
|
972 </div>
|
rlm@109
|
973
|
rlm@109
|
974 <div id="outline-container-7" class="outline-2">
|
rlm@109
|
975 <h2 id="sec-7"><span class="section-number-2">7</span> Spatial reasoning: a difficult problem</h2>
|
rlm@109
|
976 <div class="outline-text-2" id="text-7">
|
rlm@109
|
977
|
rlm@109
|
978
|
rlm@109
|
979 <p>
|
rlm@109
|
980 [38:15] Okay, well, there are different problems [ ] mathematics, and
|
rlm@109
|
981 they have to do with properties. So for instance a lot of mathematics
|
rlm@109
|
982 that can be expressed in terms of logical structures or algebraic
|
rlm@109
|
983 structures and those are pretty well suited for manipulation and…on
|
rlm@109
|
984 computers, and if a problem can be specified using the
|
rlm@109
|
985 logical/algebraic notation, and the solution method requires creating
|
rlm@109
|
986 something in that sort of notation, then computers are pretty good,
|
rlm@109
|
987 and there are lots of mathematical tools around—there are theorem
|
rlm@109
|
988 provers and theorem checkers, and all kinds of things, which couldn't
|
rlm@109
|
989 have existed fifty, sixty years ago, and they will continue getting
|
rlm@109
|
990 better.
|
rlm@109
|
991 </p>
|
rlm@109
|
992
|
rlm@109
|
993 <p>
|
rlm@109
|
994 But there was something that I was <a href="#sec-3-4">alluding to earlier</a> when I gave the
|
rlm@109
|
995 example of how you can reason about what you will see by changing your
|
rlm@109
|
996 position in relation to a door, where what you are doing is using your
|
rlm@109
|
997 grasp of spatial structures and how as one spatial relationship
|
rlm@109
|
998 changes namely you come closer to the door or move sideways and
|
rlm@109
|
999 parallel to the wall or whatever, other spatial relationships change
|
rlm@109
|
1000 in parallel, so the lines from your eyes through to other parts of
|
rlm@109
|
1001 the…parts of the room on the other side of the doorway change,
|
rlm@109
|
1002 spread out more as you go towards the doorway, and as you move
|
rlm@109
|
1003 sideways, they don't spread out differently, but focus on different
|
rlm@109
|
1004 parts of the internal … that they access different parts of the
|
rlm@109
|
1005 … of the room.
|
rlm@109
|
1006 </p>
|
rlm@109
|
1007 <p>
|
rlm@109
|
1008 Now, those are examples of ways of thinking about relationships and
|
rlm@109
|
1009 changing relationships which are not the same as thinking about what
|
rlm@109
|
1010 happens if I replace this symbol with that symbol, or if I substitute
|
rlm@109
|
1011 this expression in that expression in a logical formula. And at the
|
rlm@109
|
1012 moment, I do not believe that there is anything in AI amongst the
|
rlm@109
|
1013 mathematical reasoning community, the theorem-proving community, that
|
rlm@109
|
1014 can model the processes that go on when a young child starts learning
|
rlm@109
|
1015 to do Euclidean geometry and is taught things about—for instance, I
|
rlm@109
|
1016 can give you a proof that the angles of any triangle add up to a
|
rlm@109
|
1017 straight line, 180 degrees.
|
rlm@109
|
1018 </p>
|
rlm@109
|
1019
|
rlm@109
|
1020 </div>
|
rlm@109
|
1021
|
rlm@109
|
1022 <div id="outline-container-7-1" class="outline-3">
|
rlm@109
|
1023 <h3 id="sec-7-1"><span class="section-number-3">7.1</span> Example: Spatial proof that the angles of any triangle add up to a half-circle</h3>
|
rlm@109
|
1024 <div class="outline-text-3" id="text-7-1">
|
rlm@109
|
1025
|
rlm@109
|
1026 <p>There are standard proofs which involves starting with one triangle,
|
rlm@109
|
1027 then adding a line parallel to the base one of my former students,
|
rlm@109
|
1028 Mary Pardoe, came up with which I will demonstrate with this <he holds
|
rlm@109
|
1029 up a pen> — can you see it? If I have a triangle here that's got
|
rlm@109
|
1030 three sides, if I put this thing on it, on one side — let's say the
|
rlm@109
|
1031 bottom—I can rotate it until it lies along the second…another
|
rlm@109
|
1032 side, and then maybe move it up to the other end ~. Then I can rotate
|
rlm@109
|
1033 it again, until it lies on the third side, and move it back to the
|
rlm@109
|
1034 other end. And then I'll rotate it again and it'll eventually end up
|
rlm@109
|
1035 on the original side, but it will have changed the direction it's
|
rlm@109
|
1036 pointing in — and it won't have crossed over itself so it will have
|
rlm@109
|
1037 gone through a half-circle, and that says that the three angles of a
|
rlm@109
|
1038 triangle add up to the rotations of half a circle, which is a
|
rlm@109
|
1039 beautiful kind of proof and almost anyone can understand it. Some
|
rlm@109
|
1040 mathematicians don't like it, because they say it hides some of the
|
rlm@109
|
1041 assumptions, but nevertheless, as far as I'm concerned, it's an
|
rlm@109
|
1042 example of a human ability to do reasoning which, once you've
|
rlm@109
|
1043 understood it, you can see will apply to any triangle — it's got to
|
rlm@109
|
1044 be a planar triangle — not a triangle on a globe, because then the
|
rlm@109
|
1045 angles can add up to more than … you can have three <i>right</i> angles
|
rlm@109
|
1046 if you have an equator…a line on the equator, and a line going up to
|
rlm@109
|
1047 to the north pole of the earth, and then you have a right angle and
|
rlm@109
|
1048 then another line going down to the equator, and you have a right
|
rlm@109
|
1049 angle, right angle, right angle, and they add up to more than a
|
rlm@109
|
1050 straight line. But that's because the triangle isn't in the plane,
|
rlm@109
|
1051 it's on a curved surface. In fact, that's one of the
|
rlm@109
|
1052 differences…definitional differences you can take between planar and
|
rlm@109
|
1053 curved surfaces: how much the angles of a triangle add up to. But our
|
rlm@109
|
1054 ability to <i>visualize</i> and notice the generality in that process, and
|
rlm@109
|
1055 see that you're going to be able to do the same thing using triangles
|
rlm@109
|
1056 that stretch in all sorts of ways, or if it's a million times as
|
rlm@109
|
1057 large, or if it's made…you know, written on, on…if it's drawn in
|
rlm@109
|
1058 different colors or whatever — none of that's going to make any
|
rlm@109
|
1059 difference to the essence of that process. And that ability to see
|
rlm@109
|
1060 the commonality in a spatial structure which enables you to draw some
|
rlm@109
|
1061 conclusions with complete certainty—subject to the possibility that
|
rlm@109
|
1062 sometimes you make mistakes, but when you make mistakes, you can
|
rlm@109
|
1063 discover them, as has happened in the history of geometrical theorem
|
rlm@109
|
1064 proving. Imre Lakatos had a wonderful book called <a href="http://en.wikipedia.org/wiki/Proofs_and_Refutations"><i>Proofs and Refutations</i></a> — which I won't try to summarize — but he has
|
rlm@109
|
1065 examples: mistakes were made; that was because people didn't always
|
rlm@109
|
1066 realize there were subtle subcases which had slightly different
|
rlm@109
|
1067 properties, and they didn't take account of that. But once they're
|
rlm@109
|
1068 noticed, you rectify that.
|
rlm@109
|
1069 </p>
|
rlm@109
|
1070 </div>
|
rlm@109
|
1071
|
rlm@109
|
1072 </div>
|
rlm@109
|
1073
|
rlm@109
|
1074 <div id="outline-container-7-2" class="outline-3">
|
rlm@109
|
1075 <h3 id="sec-7-2"><span class="section-number-3">7.2</span> Geometric results are fundamentally different than experimental results in chemistry or physics.</h3>
|
rlm@109
|
1076 <div class="outline-text-3" id="text-7-2">
|
rlm@109
|
1077
|
rlm@109
|
1078 <p>[43:28] But it's not the same as doing experiments in chemistry and
|
rlm@109
|
1079 physics, where you can't be sure it'll be the same on [] or at a high
|
rlm@109
|
1080 temperature, or in a very strong magnetic field — with geometric
|
rlm@109
|
1081 reasoning, in some sense you've got the full information in front of
|
rlm@109
|
1082 you; even if you don't always notice an important part of it. So, that
|
rlm@109
|
1083 kind of reasoning (as far as I know) is not implemented anywhere in a
|
rlm@109
|
1084 computer. And most people who do research on trying to model
|
rlm@109
|
1085 mathematical reasoning, don't pay any attention to that, because of
|
rlm@109
|
1086 … they just don't think about it. They start from somewhere else,
|
rlm@109
|
1087 maybe because of how they were educated. I was taught Euclidean
|
rlm@109
|
1088 geometry at school. Were you?
|
rlm@109
|
1089 </p>
|
rlm@109
|
1090 <p>
|
rlm@109
|
1091 (Adam ford: Yeah)
|
rlm@109
|
1092 </p>
|
rlm@109
|
1093 <p>
|
rlm@109
|
1094 Many people are not now. Instead they're taught set theory, and
|
rlm@109
|
1095 logic, and arithmetic, and [algebra], and so on. And so they don't use
|
rlm@109
|
1096 that bit of their brains, without which we wouldn't have built any of
|
rlm@109
|
1097 the cathedrals, and all sorts of things we now depend on.
|
rlm@109
|
1098 </p>
|
rlm@109
|
1099 </div>
|
rlm@109
|
1100 </div>
|
rlm@109
|
1101
|
rlm@109
|
1102 </div>
|
rlm@109
|
1103
|
rlm@109
|
1104 <div id="outline-container-8" class="outline-2">
|
rlm@109
|
1105 <h2 id="sec-8"><span class="section-number-2">8</span> Is near-term artificial general intelligence likely?</h2>
|
rlm@109
|
1106 <div class="outline-text-2" id="text-8">
|
rlm@109
|
1107
|
rlm@109
|
1108
|
rlm@109
|
1109
|
rlm@109
|
1110 </div>
|
rlm@109
|
1111
|
rlm@109
|
1112 <div id="outline-container-8-1" class="outline-3">
|
rlm@109
|
1113 <h3 id="sec-8-1"><span class="section-number-3">8.1</span> Two interpretations: a single mechanism for all problems, or many mechanisms unified in one program.</h3>
|
rlm@109
|
1114 <div class="outline-text-3" id="text-8-1">
|
rlm@109
|
1115
|
rlm@109
|
1116
|
rlm@109
|
1117 <p>
|
rlm@109
|
1118 [44:35] Well, this relates to what's meant by general. And when I
|
rlm@109
|
1119 first encountered the AGI community, I thought that what they all
|
rlm@109
|
1120 meant by general intelligence was <i>uniform</i> intelligence ---
|
rlm@109
|
1121 intelligence based on some common simple (maybe not so simple, but)
|
rlm@109
|
1122 single powerful mechanism or principle of inference. And there are
|
rlm@109
|
1123 some people in the community who are trying to produce things like
|
rlm@109
|
1124 that, often in connection with algorithmic information theory and
|
rlm@109
|
1125 computability of information, and so on. But there's another sense of
|
rlm@109
|
1126 general which means that the system of general intelligence can do
|
rlm@109
|
1127 lots of different things, like perceive things, understand language,
|
rlm@109
|
1128 move around, make things, and so on — perhaps even enjoy a joke;
|
rlm@109
|
1129 that's something that's not nearly on the horizon, as far as I
|
rlm@109
|
1130 know. Enjoying a joke isn't the same as being able to make laughing
|
rlm@109
|
1131 noises.
|
rlm@109
|
1132 </p>
|
rlm@109
|
1133 <p>
|
rlm@109
|
1134 Given, then, that there are these two notions of general
|
rlm@109
|
1135 intelligence—there's one that looks for one uniform, possibly
|
rlm@109
|
1136 simple, mechanism or collection of ideas and notations and algorithms,
|
rlm@109
|
1137 that will deal with any problem that's solvable — and the other
|
rlm@109
|
1138 that's general in the sense that it can do lots of different things
|
rlm@109
|
1139 that are combined into an integrated architecture (which raises lots
|
rlm@109
|
1140 of questions about how you combine these things and make them work
|
rlm@109
|
1141 together) and we humans, certainly, are of the second kind: we do all
|
rlm@109
|
1142 sorts of different things, and other animals also seem to be of the
|
rlm@109
|
1143 second kind, perhaps not as general as humans. Now, it may turn out
|
rlm@109
|
1144 that in some near future time, who knows—decades, a few
|
rlm@109
|
1145 decades—you'll be able to get machines that are capable of solving
|
rlm@109
|
1146 in a time that will depend on the nature of the problem, but any
|
rlm@109
|
1147 problem that is solvable, and they will be able to do it in some sort
|
rlm@109
|
1148 of tractable time — of course, there are some problems that are
|
rlm@109
|
1149 solvable that would require a larger universe and a longer history
|
rlm@109
|
1150 than the history of the universe, but apart from that constraint,
|
rlm@109
|
1151 these machines will be able to do anything []. But to be able to do
|
rlm@109
|
1152 some of the kinds of things that humans can do, like the kinds of
|
rlm@109
|
1153 geometrical reasoning where you look at the shape and you abstract
|
rlm@109
|
1154 away from the precise angles and sizes and shapes and so on, and
|
rlm@109
|
1155 realize there's something general here, as must have happened when our
|
rlm@109
|
1156 ancestors first made the discoveries that eventually put together in
|
rlm@109
|
1157 Euclidean geometry.
|
rlm@109
|
1158 </p>
|
rlm@109
|
1159 <p>
|
rlm@109
|
1160 It may be that that requires mechanisms of a kind that we don't know
|
rlm@109
|
1161 anything about at the moment. Maybe brains are using molecules and
|
rlm@109
|
1162 rearranging molecules in some way that supports that kind of
|
rlm@109
|
1163 reasoning. I'm not saying they are — I don't know, I just don't see
|
rlm@109
|
1164 any simple…any obvious way to map that kind of reasoning capability
|
rlm@109
|
1165 onto what we currently do on computers. There is—and I just
|
rlm@109
|
1166 mentioned this briefly beforehand—there is a kind of thing that's
|
rlm@109
|
1167 sometimes thought of as a major step in that direction, namely you can
|
rlm@109
|
1168 build a machine (or a software system) that can represent some
|
rlm@109
|
1169 geometrical structure, and then be told about some change that's going
|
rlm@109
|
1170 to happen to it, and it can predict in great detail what'll
|
rlm@109
|
1171 happen. And this happens for instance in game engines, where you say
|
rlm@109
|
1172 we have all these blocks on the table and I'll drop one other block,
|
rlm@109
|
1173 and then [the thing] uses Newton's laws and properties of rigidity of
|
rlm@109
|
1174 the parts and the elasticity and also stuff about geometries and space
|
rlm@109
|
1175 and so on, to give you a very accurate representation of what'll
|
rlm@109
|
1176 happen when this brick lands on this pile of things, [it'll bounce and
|
rlm@109
|
1177 go off, and so on]. And you just, with more memory and more CPU power,
|
rlm@109
|
1178 you can increase the accuracy— but that's totally different than
|
rlm@109
|
1179 looking at <i>one</i> example, and working out what will happen in a whole
|
rlm@109
|
1180 <i>range</i> of cases at a higher level of abstraction, whereas the game
|
rlm@109
|
1181 engine does it in great detail for <i>just</i> this case, with <i>just</i> those
|
rlm@109
|
1182 precise things, and it won't even know what the generalizations are
|
rlm@109
|
1183 that it's using that would apply to others []. So, in that sense, [we]
|
rlm@109
|
1184 may get AGI — artificial general intelligence — pretty soon, but
|
rlm@109
|
1185 it'll be limited in what it can do. And the other kind of general
|
rlm@109
|
1186 intelligence which combines all sorts of different things, including
|
rlm@109
|
1187 human spatial geometrical reasoning, and maybe other things, like the
|
rlm@109
|
1188 ability to find things funny, and to appreciate artistic features and
|
rlm@109
|
1189 other things may need forms of pattern-mechanism, and I have an open
|
rlm@109
|
1190 mind about that.
|
rlm@109
|
1191 </p>
|
rlm@109
|
1192 </div>
|
rlm@109
|
1193 </div>
|
rlm@109
|
1194
|
rlm@109
|
1195 </div>
|
rlm@109
|
1196
|
rlm@109
|
1197 <div id="outline-container-9" class="outline-2">
|
rlm@109
|
1198 <h2 id="sec-9"><span class="section-number-2">9</span> Abstract General Intelligence impacts</h2>
|
rlm@109
|
1199 <div class="outline-text-2" id="text-9">
|
rlm@109
|
1200
|
rlm@109
|
1201
|
rlm@109
|
1202 <p>
|
rlm@109
|
1203 [49:53] Well, as far as the first type's concerned, it could be useful
|
rlm@109
|
1204 for all kinds of applications — there are people who worry about
|
rlm@109
|
1205 where there's a system that has that type of intelligence, might in
|
rlm@109
|
1206 some sense take over control of the planet. Well, humans often do
|
rlm@109
|
1207 stupid things, and they might do something stupid that would lead to
|
rlm@109
|
1208 disaster, but I think it's more likely that there would be other
|
rlm@109
|
1209 things [] lead to disaster— population problems, using up all the
|
rlm@109
|
1210 resources, destroying ecosystems, and whatever. But certainly it would
|
rlm@109
|
1211 go on being useful to have these calculating devices. Now, as for the
|
rlm@109
|
1212 second kind of them, I don't know—if we succeeded at putting
|
rlm@109
|
1213 together all the parts that we find in humans, we might just make an
|
rlm@109
|
1214 artificial human, and then we might have some of them as your friends,
|
rlm@109
|
1215 and some of them we might not like, and some of them might become
|
rlm@109
|
1216 teachers or whatever, composers — but that raises a question: could
|
rlm@109
|
1217 they, in some sense, be superior to us, in their learning
|
rlm@109
|
1218 capabilities, their understanding of human nature, or maybe their
|
rlm@109
|
1219 wickedness or whatever — these are all issues in which I expect the
|
rlm@109
|
1220 best science fiction writers would give better answers than anything I
|
rlm@109
|
1221 could do, but I did once fantasize when I [back] in 1978, that perhaps
|
rlm@109
|
1222 if we achieved that kind of thing, that they would be wise, and gentle
|
rlm@109
|
1223 and kind, and realize that humans are an inferior species that, you
|
rlm@109
|
1224 know, have some good features, so they'd keep us in some kind of
|
rlm@109
|
1225 secluded…restrictive kind of environment, keep us away from
|
rlm@109
|
1226 dangerous weapons, and so on. And find ways of cohabitating with
|
rlm@109
|
1227 us. But that's just fantasy.
|
rlm@109
|
1228 </p>
|
rlm@109
|
1229 <p>
|
rlm@109
|
1230 Adam Ford: Awesome. Yeah, there's an interesting story <i>With Folded Hands</i> where [the computers] want to take care of us and want to
|
rlm@109
|
1231 reduce suffering and end up lobotomizing everybody [but] keeping them
|
rlm@109
|
1232 alive so as to reduce the suffering.
|
rlm@109
|
1233 </p>
|
rlm@109
|
1234 <p>
|
rlm@109
|
1235 Aaron Sloman: Not all that different from <i>Brave New World</i>, where it
|
rlm@109
|
1236 was done with drugs and so on, but different humans are given
|
rlm@109
|
1237 different roles in that system, yeah.
|
rlm@109
|
1238 </p>
|
rlm@109
|
1239 <p>
|
rlm@109
|
1240 There's also <i>The Time Machine</i>, H.G. Wells, where the … in the
|
rlm@109
|
1241 distant future, humans have split in two: the Eloi, I think they were
|
rlm@109
|
1242 called, they lived underground, they were the [] ones, and then—no,
|
rlm@109
|
1243 the Morlocks lived underground; Eloi lived on the planet; they were
|
rlm@109
|
1244 pleasant and pretty but not very bright, and so on, and they were fed
|
rlm@109
|
1245 on by …
|
rlm@109
|
1246 </p>
|
rlm@109
|
1247 <p>
|
rlm@109
|
1248 Adam Ford: [] in the future.
|
rlm@109
|
1249 </p>
|
rlm@109
|
1250 <p>
|
rlm@109
|
1251 Aaron Sloman: As I was saying, if you ask science fiction writers,
|
rlm@109
|
1252 you'll probably come up with a wide variety of interesting answers.
|
rlm@109
|
1253 </p>
|
rlm@109
|
1254 <p>
|
rlm@109
|
1255 Adam Ford: I certainly have; I've spoken to [] of Birmingham, and
|
rlm@109
|
1256 Sean Williams, … who else?
|
rlm@109
|
1257 </p>
|
rlm@109
|
1258 <p>
|
rlm@109
|
1259 Aaron Sloman: Did you ever read a story by E.M. Forrester called <i>The Machine Stops</i> — very short story, it's <a href="http://archive.ncsa.illinois.edu/prajlich/forster.html">on the Internet somewhere</a>
|
rlm@109
|
1260 — it's about a time when people sitting … and this was written in
|
rlm@109
|
1261 about [1914 ] so it's about…over a hundred years ago … people are
|
rlm@109
|
1262 in their rooms, they sit in front of screens, and they type things,
|
rlm@109
|
1263 and they communicate with one another that way, and they don't meet;
|
rlm@109
|
1264 they have debates, and they give lectures to their audiences that way,
|
rlm@109
|
1265 and then there's a woman whose son says “I'd like to see
|
rlm@109
|
1266 you” and she says “What's the point? You've got me at
|
rlm@109
|
1267 this point ” but he wants to come and talk to her — I won't
|
rlm@109
|
1268 tell you how it ends, but.
|
rlm@109
|
1269 </p>
|
rlm@109
|
1270 <p>
|
rlm@109
|
1271 Adam Ford: Reminds me of the Internet.
|
rlm@109
|
1272 </p>
|
rlm@109
|
1273 <p>
|
rlm@109
|
1274 Aaron Sloman: Well, yes; he invented … it was just extraordinary
|
rlm@109
|
1275 that he was able to do that, before most of the components that we
|
rlm@109
|
1276 need for it existed.
|
rlm@109
|
1277 </p>
|
rlm@109
|
1278 <p>
|
rlm@109
|
1279 Adam Ford: [Another person who did that] was Vernor Vinge [] <i>True Names</i>.
|
rlm@109
|
1280 </p>
|
rlm@109
|
1281 <p>
|
rlm@109
|
1282 Aaron Sloman: When was that written?
|
rlm@109
|
1283 </p>
|
rlm@109
|
1284 <p>
|
rlm@109
|
1285 Adam Ford: The seventies.
|
rlm@109
|
1286 </p>
|
rlm@109
|
1287 <p>
|
rlm@109
|
1288 Aaron Sloman: Okay, well a lot of the technology was already around
|
rlm@109
|
1289 then. The original bits of internet were working, in about 1973, I was
|
rlm@109
|
1290 sitting … 1974, I was sitting at Sussex University trying to
|
rlm@109
|
1291 use…learn LOGO, the programming language, to decide whether it was
|
rlm@109
|
1292 going to be useful for teaching AI, and I was sitting [] paper
|
rlm@109
|
1293 teletype, there was paper coming out, transmitting ten characters a
|
rlm@109
|
1294 second from Sussex to UCL computer lab by telegraph cable, from there
|
rlm@109
|
1295 to somewhere in Norway via another cable, from there by satellite to
|
rlm@109
|
1296 California to a computer Xerox [] research center where they had
|
rlm@109
|
1297 implemented a computer with a LOGO system on it, with someone I had
|
rlm@109
|
1298 met previously in Edinburgh, Danny Bobrow, and he allowed me to have
|
rlm@109
|
1299 access to this sytem. So there I was typing. And furthermore, it was
|
rlm@109
|
1300 duplex typing, so every character I typed didn't show up on my
|
rlm@109
|
1301 terminal until it had gone all the way there and echoed back, so I
|
rlm@109
|
1302 would type, and the characters would come back four seconds later.
|
rlm@109
|
1303 </p>
|
rlm@109
|
1304 <p>
|
rlm@109
|
1305 [55:26] But that was the Internet, and I think Vernor Vinge was
|
rlm@109
|
1306 writing after that kind of thing had already started, but I don't
|
rlm@109
|
1307 know. Anyway.
|
rlm@109
|
1308 </p>
|
rlm@109
|
1309 <p>
|
rlm@109
|
1310 [55:41] Another…I mentioned H.G. Wells, <i>The Time Machine</i>. I
|
rlm@109
|
1311 recently discovered, because <a href="http://en.wikipedia.org/wiki/David_Lodge_(author)">David Lodge</a> had written a sort of
|
rlm@109
|
1312 semi-novel about him, that he had invented Wikipedia, in advance— he
|
rlm@109
|
1313 had this notion of an encyclopedia that was free to everybody, and
|
rlm@109
|
1314 everybody could contribute and [collaborate on it]. So, go to the
|
rlm@109
|
1315 science fiction writers to find out the future — well, a range of
|
rlm@109
|
1316 possible futures.
|
rlm@109
|
1317 </p>
|
rlm@109
|
1318 <p>
|
rlm@109
|
1319 Adam Ford: Well the thing is with science fiction writers, they have
|
rlm@109
|
1320 to maintain some sort of interest for their readers, after all the
|
rlm@109
|
1321 science fiction which reaches us is the stuff that publishers want to
|
rlm@109
|
1322 sell, and so there's a little bit of a … a bias towards making a
|
rlm@109
|
1323 plot device there, and so the dramatic sort of appeals to our
|
rlm@109
|
1324 amygdala, our lizard brain; we'll sort of stay there obviously to some
|
rlm@109
|
1325 extent. But I think that they do come up with sort of amazing ideas; I
|
rlm@109
|
1326 think it's worth trying to make these predictions; I think that we
|
rlm@109
|
1327 should more time on strategic forecasting, I mean take that seriously.
|
rlm@109
|
1328 </p>
|
rlm@109
|
1329 <p>
|
rlm@109
|
1330 Aaron Sloman: Well, I'm happy to leave that to others; I just want to
|
rlm@109
|
1331 try to understand these problems that bother me about how things
|
rlm@109
|
1332 work. And it may be that some would say that's irresponsible if I
|
rlm@109
|
1333 don't think about what the implications will be. Well, understanding
|
rlm@109
|
1334 how humans work <i>might</i> enable us to make [] humans — I suspect it
|
rlm@109
|
1335 wont happen in this century; I think it's going to be too difficult.
|
rlm@109
|
1336 </p></div>
|
rlm@109
|
1337 </div>
|
rlm@109
|
1338 </div>
|
rlm@109
|
1339
|
rlm@109
|
1340 <div id="postamble">
|
rlm@109
|
1341 <p class="date">Date: 2013-10-04 18:49:53 UTC</p>
|
rlm@109
|
1342 <p class="author">Author: Dylan Holmes</p>
|
rlm@109
|
1343 <p class="creator">Org version 7.7 with Emacs version 23</p>
|
rlm@109
|
1344 <a href="http://validator.w3.org/check?uri=referer">Validate XHTML 1.0</a>
|
rlm@109
|
1345
|
rlm@109
|
1346 </div>
|
rlm@109
|
1347 </body>
|
rlm@109
|
1348 </html>
|