{- | N E U R O T I C U S A small and straight forward neural network coded in Haskell from scratch. It uses gradient descent and the beatuiful backpropagation for learning. Michal Idziorek March 2019 -} module Neuronet ( Neuronet -- the neuronet ,neuronet -- initalize neuronet ,train -- train with one sample ,trainBatch -- train with batch ,asknet -- ask the neuroal net )where import Numeric.LinearAlgebra (Matrix,Vector,tr,scale,cmap,(#>),randn,toList,fromList,toLists,fromLists,Container) import Data.List -- | A layer of our network consists of a weight matrix with input weights -- and a vector holding the bias at each neuron. type Layer = (Matrix Double,Vector Double) -- | Our neural network is simply a list of layers type Neuronet = [Layer] -- | Initialize a fresh neuronal network given the number of neurons on -- each layer, as a list. Weights and biases are initialized randomly -- using gaussian distribution with mean 0 and standard deviation 1. neuronet :: [Int] -> IO Neuronet neuronet l = mapM nl $ zip l (tail l) where nl (i,l) = (,) <$> randn l i <*> (randn 1 l >>= return.fromList.head.toLists) -- | Given the input vector calculate the `weighted inputs` and -- `activations` for all layers of our network. wghtact :: Neuronet -> Vector Double -> [(Vector Double,Vector Double)] wghtact [] _ = [] wghtact ((w,b):lx) x = (z,a):wghtact lx a where z = w #> x + b a = cmap sigmoid z -- | Given the input vector calculate the final output asknet :: Neuronet -> Vector Double -> Vector Double asknet net x = snd . last $ wghtact net x -- | Given the input and output vectors calculate the gradient of our -- cost function, utilizing backpropagation (output list by layer and -- split in the weight and bias partial derivatives respectively). -- Keep the required assumptions about the cost function in mind! backprop :: Neuronet -> Vector Double -> Vector Double -> [(Matrix Double,Vector Double)] backprop net x y = zipWith (\a e->(wm a e,e)) (x:map snd wa) (go $ zip ws wa) where ws = (++[fromLists []]) . tail . map fst $ net wa = wghtact net x wm a e = fromLists $ map (\e->map (*e) (toList a)) (toList e) go [(w,(z,a))] = [cost_derivative a y * cmap sigmoid' z] go ((w,(z,a)):lx) =let r@(e:_)=go lx in tr w #> e * cmap sigmoid' z:r -- | Sigmoid function sigmoid :: Double -> Double sigmoid x = 1/(1+exp(-x)) -- | Derivative of sigmoid function sigmoid' :: Double -> Double sigmoid' x = sigmoid x * (1-sigmoid x) -- | Returs vector of partial derivatives of the cost function cost_derivative :: Vector Double -> Vector Double -> Vector Double cost_derivative a y = a-y -- | Train on one single sample train :: Double -> Neuronet -> Vector Double -> Vector Double -> Neuronet train r net x y = zipWith (upd r) net (backprop net x y) -- | Train on a batch of samples trainBatch :: Double -> Neuronet -> [Vector Double] -> [Vector Double] -> Neuronet trainBatch r net xs ys = zipWith (upd r) net bp where bp = foldl1' fc $ map (uncurry $ backprop net) (zip xs ys) fc v a = zipWith ff v a ff (a,b) (c,d) = (a+c,b+d) -- | Update a Layer given the `direction` and `training rate` upd :: Double -> Layer -> (Matrix Double,Vector Double) -> Layer upd r (a,b) (c,d) = (a-scale r c,b-scale r d)