Tuesday 10 September 2013

Neural network

What is a neural network? You could read an answer on http://en.wikipedia.org/wiki/Neural_network, but in simple words is a software that produce an answer, but the calculus behind the result is very complex. or read and try the following script:



Here is the source code:

REBOL [
Title: "Simple Backprop Neural Net"
Date: 18-Sep-2003
File: %bp.r
Author: "Karl Lewin and Francois Jouen for graphical implementation"
Purpose: {
This is pretty much a straight translation from python
to REBOL of Neil Schemenauer's bpnn.py
There are probably some places where it could be changed
to be more REBOL friendly but I'm not fluent enough to
know for sure.
}
History: "18-Sep-2003 - Initial Translation from bpnn.py"
"18-Jun-2005 - Graphical Version"
]
; calculate a random number where: a <= rand < b
rand: func [a b] [(b - a) * ((random 10000) / 10000) + a]
; Make a matrix
makeMatrix: func [I J /fill f] [
if not fill [f: 0.0]
array/initial reduce [I J] f
]
NN: func [p_ni [integer!] p_nh [integer!] p_no [integer!]] [
make object! [
; number of input, hidden, and output nodes
ni: p_ni + 1
nh: p_nh
no: p_no
; activations for nodes
ai: array/initial ni 1.0
ah: array/initial nh 1.0
ao: array/initial no 1.0
; create weights
wi: makeMatrix ni nh
wo: makeMatrix nh no
; set them to random values
foreach elo wi [loop length? elo [elo: change/part elo rand -2 2 1]]
foreach elo wo [loop length? elo [elo: change/part elo rand -2 2 1]]
; last change in weights for momentum
ci: makeMatrix ni nh
co: makeMatrix nh no
update: func [inputs /local i j sum] [
; input activations
repeat i (self/ni - 1) [poke self/ai i inputs/:i]
; hidden activations
repeat j self/nh [
sum: 0
repeat i self/ni [sum: sum + (self/ai/:i * self/wi/:i/:j)]
poke self/ah j 1 / (1 + (EXP - sum))
]
; output activations
repeat j self/no [
sum: 0
repeat i self/nh [sum: sum + (self/ah/:i * self/wo/:i/:j)]
poke self/ao j 1 / (1 + (EXP - sum))
]
self/ao
]
backprop: func [targets N M /local i j k sum chnge o_deltas h_deltas ao error] [
; calculate error terms for output
o_deltas: array/initial self/no 0
repeat k self/no [
ao: self/ao/:k
poke o_deltas k ao * (1 - ao) * (targets/:k - ao)
]
; calculate error terms for hidden
h_deltas: array/initial self/nh 0
repeat j self/nh [
sum: 0.0
repeat k self/no [sum: sum + (o_deltas/:k * self/wo/:j/:k)]
poke h_deltas j (self/ah/:j * (1 - self/ah/:j) * sum)
]
; update output weights
repeat j self/nh [
repeat k self/no [
chnge: o_deltas/:k * self/ah/:j
poke self/wo/:j k (self/wo/:j/:k + (N * chnge) + (M * self/co/:j/:k))
poke self/co/:j k chnge
]
]
; update hidden weights
repeat i self/ni [
repeat j self/nh [
chnge: h_deltas/:j * self/ai/:i
poke self/wi/:i j (self/wi/:i/:j + (N * chnge) + (M * self/ci/:i/:j))
poke self/ci/:i j chnge
]
]
; calculate error
error: 0
repeat k self/no [error: error + (lr * ((targets/:k - self/ao/:k) ** 2))]
error
]
test: func [patterns /local p] [
clear tt/text
append tt/text join rule/text newline
foreach p patterns [rs: self/update(p/1) append tt/text mold p
append tt/text newline
append tt/text rejoin [p/1 "-->" round first rs]
append tt/text newline
show tt
]
]
train: func [patterns iterations /local i error] [
repeat i iterations [
error: 0
foreach p patterns [
self/update p/1
error: error + self/backprop p/2 lr mf ; 0.5 = learning rate, 0.1 = momentum factor
]
if (i // 100) = 0 [result/text: join i [ ": error: " error] show result
show_training error]
]
result/text: "Training done" show [result visu]
]
]
]
Set_Pattern: does [
switch rule/text [
"XOR" [ change second first pat 0 change second second pat 1 change second third pat 1 change second fourth pat 0]
"AND" [change second first pat 0 change second second pat 0 change second third pat 0 change second fourth pat 1]
"OR" [change second first pat 0 change second second pat 1 change second third pat 1 change second fourth pat 1]
]
]
Fix_pattern: does [
pat: [
[[0 0] [0]]
[[0 1] [1]]
[[1 0] [1]]
[[1 1] [0]]
]
a: NN 2 3 1 ; 2 input, 3 hidden, 1 output
]
Fix_Pattern
lr: 0.5
mf: 0.1
;0.5 = learning rate, 0.1 = momentum factor

x: 0
col: red
n: 2000
Clear_Screen: does [
clear tt/text
x: 0
plot: copy [pen col spline]
append clear visu/effect reduce ['grid 100x20 blue 'draw plot]
show [visu tt]
]
show_training: func [val] [
tmp: n / 100
either x = 0 [pas: 1]
[pas: round (600 / tmp)]
x: x + pas
y: 170 - (val * 150)
append plot to-pair compose [(x) (y)]
show visu
]
ParaWin: Layout [
origin 0x0
space 5x5
across
at 5x5 Text 150 left "Sample" nt: field 50 to-string n [if error? try [n: to-integer nt/text] [n: 2000]]
at 5x30 text 150 "Learning rate" lrf: field 50 to-string lr [if error? try [lr: to-decimal lrf/text] [lr: 0.5]]
at 5x55 text 150 "Momentum Factor" mff: field 50 to-string mf [if error? try [mf: to-decimal mff/text] [mf: 0.1]]
at 5x80 text 150 "Learning rule" rule: choice silver 50 "XOR" "AND" "OR" [Set_Pattern Fix_pattern]
at 90x110 btn "Close" [unview/only Parawin]
]
win: layout [
origin 0x0
space 2x5
across
at 0x5
btn "Train Network" [clear result/text clear_screen
a/train pat n
a/test pat
]
result: info 280
btn "Reset Learning" [Fix_pattern clear_screen]
btn "Neural Net Configuration" [view/new center-face ParaWin]
at 602x5 btn 100 "Quit" [Quit]
at 0x30 visu: box 600x200 green frame blue
tt: info 100x200
]
view center-face win

No comments:

Post a Comment