summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mnist/Neuronet.hs35
1 files changed, 18 insertions, 17 deletions
diff --git a/mnist/Neuronet.hs b/mnist/Neuronet.hs
index cdebc64..7a3a159 100644
--- a/mnist/Neuronet.hs
+++ b/mnist/Neuronet.hs
@@ -1,9 +1,9 @@
{- |
N E U R O T I C U S
-
- A small and straight forward neural network coded in Haskell.
- It uses the beatuiful backpropagation for learning.
+
+ A small and straight forward neural network coded in Haskell
+ from scratch. It uses the beatuiful backpropagation for learning.
Michal Idziorek <m.i@gmx.at>
March 2019
@@ -20,10 +20,11 @@ module Neuronet
import Numeric.LinearAlgebra (Matrix,Vector,tr,scale,cmap,(#>),randn,toList,fromList,toLists,fromLists,Container)
import Data.List
--- | Our neural network is simply a list of layers each consisting of
--- a weight matrix with input weights and a vector holding the bias at
--- each neuron.
+-- | A layer of our network consists of a weight matrix with input weights
+-- and a vector holding the bias at each neuron.
type Layer = (Matrix Double,Vector Double)
+
+-- | Our neural network is simply a list of layers
type Neuronet = [Layer]
-- | Initialize a fresh neuronal network given the number of neurons on
@@ -35,15 +36,15 @@ neuronet l = mapM nl $ zip l (tail l)
-- | Given the input vector calculate the `weighted inputs` and
-- `activations` for all layers of our network.
-weightin :: Neuronet -> Vector Double -> [(Vector Double,Vector Double)]
-weightin [] _ = []
-weightin ((w,b):lx) x = (z,a):weightin lx a
+wghtact :: Neuronet -> Vector Double -> [(Vector Double,Vector Double)]
+wghtact [] _ = []
+wghtact ((w,b):lx) x = (z,a):wghtact lx a
where z = w #> x + b
a = cmap sigmoid z
-- | Given the input vector calculate the final output
asknet :: Neuronet -> Vector Double -> Vector Double
-asknet net x = snd . last $ weightin net x
+asknet net x = snd . last $ wghtact net x
-- | Given the input and output vectors calculate the gradient of our
-- cost function, utilizing backpropagation (output list by layer and
@@ -52,7 +53,7 @@ asknet net x = snd . last $ weightin net x
backprop :: Neuronet -> Vector Double -> Vector Double -> [(Matrix Double,Vector Double)]
backprop net x y = zipWith (\a e->(wm a e,e)) (x:map snd wa) (go $ zip ws wa)
where ws = (++[fromLists []]) . tail . map fst $ net
- wa = weightin net x
+ wa = wghtact net x
wm a e = fromLists $ map (\e->map (*e) (toList a)) (toList e)
go [(w,(z,a))] = [cost_derivative a y * cmap sigmoid' z]
go ((w,(z,a)):lx) =let r@(e:_)=go lx in tr w #> e * cmap sigmoid' z:r
@@ -71,15 +72,15 @@ cost_derivative a y = a-y
-- | Train on one single sample
train :: Double -> Neuronet -> Vector Double -> Vector Double -> Neuronet
-train r net x y = zipWith f net (backprop net x y)
- where f :: Layer -> Layer -> Layer
- f (a,b) (c,d) = (a-scale r c,b-scale r d)
+train r net x y = zipWith (upd r) net (backprop net x y)
-- | Train on a batch of samples
trainBatch :: Double -> Neuronet -> [Vector Double] -> [Vector Double] -> Neuronet
-trainBatch r net xs ys = zipWith f net bp
+trainBatch r net xs ys = zipWith (upd r) net bp
where bp = foldl1' fc $ map (uncurry $ backprop net) (zip xs ys)
- f :: Layer -> Layer -> Layer
- f (a,b) (c,d) = (a-scale r c,b-scale r d)
fc v a = zipWith ff v a
ff (a,b) (c,d) = (a+c,b+d)
+
+-- | Update a Layer given the `direction` and `training rate`
+upd :: Double -> Layer -> (Matrix Double,Vector Double) -> Layer
+upd r (a,b) (c,d) = (a-scale r c,b-scale r d)