1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
|
{- |
N E U R O T I C U S
A small and straight forward neural network coded in Haskell.
It uses the beatuiful backpropagation for learning.
Michal Idziorek <m.i@gmx.at>
March 2019
-}
module Neuronet
( neuronet -- initalize neuronet
,train -- train with one sample
,trainBatch -- train with batch
,asknet -- ask the neuroal net
)where
import Numeric.LinearAlgebra (Matrix,Vector,tr,scale,cmap,(#>),randn,toList,fromList,toLists,fromLists,Container)
import Data.List
-- | Our neural network is simply a list of layers each consisting of
-- a weight matrix with input weights and a vector holding the bias at
-- each neuron.
type Layer = (Matrix Double,Vector Double)
type Neuronet = [Layer]
-- | Initialize a fresh neuronal network given the number of neurons on
-- each layer, as a list. Weights and biases are initialized randomly
-- using gaussian distribution with mean 0 and standard deviation 1.
neuronet :: [Int] -> IO Neuronet
neuronet l = mapM nl $ zip l (tail l)
where nl (i,l) = (,) <$> randn l i <*> (randn 1 l >>= return.fromList.head.toLists)
-- | Given the input vector calculate the `weighted inputs` and
-- `activations` for all layers of our network.
weightin :: Neuronet -> Vector Double -> [(Vector Double,Vector Double)]
weightin [] _ = []
weightin ((w,b):lx) x = (z,a):weightin lx a
where z = w #> x + b
a = cmap sigmoid z
-- | Given the input vector calculate the final output
asknet :: Neuronet -> Vector Double -> Vector Double
asknet net x = snd . last $ weightin net x
-- | Given the input and output vectors calculate the gradient of our
-- cost function, utilizing backpropagation (output list by layer and
-- split in the weight and bias partial derivatives respectively).
-- Keep the required assumptions about the cost function in mind!
backprop :: Neuronet -> Vector Double -> Vector Double -> [(Matrix Double,Vector Double)]
backprop net x y = zipWith (\a e->(wm a e,e)) (x:map snd wa) (go $ zip ws wa)
where ws = (++[fromLists []]) . tail . map fst $ net
wa = weightin net x
wm a e = fromLists $ map (\e->map (*e) (toList a)) (toList e)
go [(w,(z,a))] = [cost_derivative a y * cmap sigmoid' z]
go ((w,(z,a)):lx) =let r@(e:_)=go lx in tr w #> e * cmap sigmoid' z:r
-- | Sigmoid function
sigmoid :: Double -> Double
sigmoid x = 1/(1+exp(-x))
-- | Derivative of sigmoid function
sigmoid' :: Double -> Double
sigmoid' x = sigmoid x * (1-sigmoid x)
-- | Returs vector of partial derivatives of the cost function
cost_derivative :: Vector Double -> Vector Double -> Vector Double
cost_derivative a y = a-y
-- | Train on one single sample
train :: Double -> Neuronet -> Vector Double -> Vector Double -> Neuronet
train r net x y = zipWith f net (backprop net x y)
where f :: Layer -> Layer -> Layer
f (a,b) (c,d) = (a-scale r c,b-scale r d)
-- | Train on a batch of samples
trainBatch :: Double -> Neuronet -> [Vector Double] -> [Vector Double] -> Neuronet
trainBatch r net xs ys = zipWith f net bp
where bp = foldl1' fc $ map (uncurry $ backprop net) (zip xs ys)
f :: Layer -> Layer -> Layer
f (a,b) (c,d) = (a-scale r c,b-scale r d)
fc v a = zipWith ff v a
ff (a,b) (c,d) = (a+c,b+d)
|