diff options
| author | Miguel <m.i@gmx.at> | 2019-03-22 23:33:01 +0100 |
|---|---|---|
| committer | Miguel <m.i@gmx.at> | 2019-03-22 23:33:01 +0100 |
| commit | 8281304e3a7bea0cb1678f899e371f8d4776f34f (patch) | |
| tree | 9bf205239738d57f1c3cd546aaf131524f2163ad /mnist/Neuronet.hs | |
| parent | 9c7b00c58ae0b4ece9f46a7226b59248b8b9eba6 (diff) | |
cleanup and testing
Diffstat (limited to 'mnist/Neuronet.hs')
| -rw-r--r-- | mnist/Neuronet.hs | 33 |
1 files changed, 9 insertions, 24 deletions
diff --git a/mnist/Neuronet.hs b/mnist/Neuronet.hs index ece288a..6c3ea32 100644 --- a/mnist/Neuronet.hs +++ b/mnist/Neuronet.hs @@ -1,18 +1,3 @@ -{- | - - N E U R O T I C U S - - A small and straightforward neural network coded in Haskell - from scratch. It uses gradient descent and the beauty of - backpropagation for learning. - - TODOS: improve performance, parallelize, CUDA, FFI - - Michal Idziorek <m.i@gmx.at> - March 2019 - --} - module Neuronet ( Neuronet -- the neuronet ,neuronet -- initalize neuronet @@ -20,15 +5,16 @@ module Neuronet ,trainBatch -- train with batch ,asknet -- ask the neuroal net - ,wghtact + ,wghtact ,backprop )where -import Numeric.LinearAlgebra (Matrix,Vector,tr,scale,cmap,(#>),randn,toList,fromList,toLists,fromLists,outer) import Data.List +import Numeric.LinearAlgebra (Matrix,Vector,tr,scale,cmap,(#>),randn, + toList,fromList,toLists,fromLists,outer) --- | A layer of our network consists of a weight matrix with input weights --- and a vector holding the bias at each neuron. +-- | A layer of our network consists of a weight matrix with input +-- weights and a vector holding the bias at each neuron. type Layer = (Matrix Double,Vector Double) -- | Our neural network is simply a list of layers @@ -39,15 +25,15 @@ type Neuronet = [Layer] -- using gaussian distribution with mean 0 and standard deviation 1. neuronet :: [Int] -> IO Neuronet neuronet l = mapM nl $ zip l (tail l) - where nl (i,l) = (,) <$> randn l i <*> (randn 1 l >>= return.fromList.head.toLists) + where nl (i,l) = (,) <$> randn l i <*> + (randn 1 l >>= return.fromList.head.toLists) -- | Given the input vector calculate the `weighted inputs` and -- `activations` for all layers of our network. wghtact :: Neuronet -> Vector Double -> [(Vector Double,Vector Double)] wghtact [] _ = [] -wghtact ((w,b):lx) x = (z,a):wghtact lx a - where z = w #> x + b - a = cmap sigmoid z +wghtact ((w,b):lx) x = (z,a):wghtact lx a where z = w #> x + b + a = cmap sigmoid z -- | Given the input vector calculate the final output asknet :: Neuronet -> Vector Double -> Vector Double @@ -90,4 +76,3 @@ trainBatch r net xs ys = zipWith (upd r) net bp -- | Update a single Layer given the `direction` and `training rate` upd :: Double -> Layer -> (Matrix Double,Vector Double) -> Layer upd r (a,b) (c,d) = (a-scale r c,b-scale r d) - |
