summaryrefslogtreecommitdiff
path: root/mnist/main.hs
diff options
context:
space:
mode:
Diffstat (limited to 'mnist/main.hs')
-rw-r--r--mnist/main.hs62
1 files changed, 4 insertions, 58 deletions
diff --git a/mnist/main.hs b/mnist/main.hs
index ae39c85..97c76df 100644
--- a/mnist/main.hs
+++ b/mnist/main.hs
@@ -1,67 +1,13 @@
-import Numeric.LinearAlgebra (Matrix,Vector,tr,scale,cmap,(#>),randn,toList,fromList,toLists,fromLists,Container)
+import Neuronet
import System.Random(randomRIO)
+import Numeric.LinearAlgebra (Matrix,Vector,tr,scale,cmap,(#>),randn,toList,fromList,toLists,fromLists,Container)
import Data.List
--- | Our neural network is simply a list of layers each consisting of
--- a weight matrix with input weights and a vector holding the bias at
--- each neuron.
-type Layer = (Matrix Double,Vector Double)
-type Neuronet = [Layer]
-
--- | Initialize a fresh neuronal network given the number of neurons on
--- each layer, as a list. Weights and biases are initialized randomly
--- using gaussian distribution with mean 0 and standard deviation 1.
-neuronet :: [Int] -> IO Neuronet
-neuronet l = mapM nl $ zip l (tail l)
- where nl (i,l) = (,) <$> randn l i <*> (randn 1 l >>= return.fromList.head.toLists)
-
--- | Given the input vector calculate the `weighted inputs` and
--- `activations` for all layers of our network.
-weightin :: Neuronet -> Vector Double -> [(Vector Double,Vector Double)]
-weightin [] _ = []
-weightin ((w,b):lx) x = (z,a):weightin lx a
- where z = w #> x + b
- a = cmap sigmoid z
-
--- | Given the input and outpout vectors calculate the gradient of our
--- cost function, utilizing backpropagation (output list by layer and
--- split in the weight and bias partial derivatives respectively).
--- Keep the required assumptions about the cost function in mind!
-backprop :: Neuronet -> Vector Double -> Vector Double -> [(Matrix Double,Vector Double)]
-backprop net x y = zipWith (\a e->(wm a e,e)) (x:map snd wa) (go $ zip ws wa)
- where ws = (++[fromLists []]) . tail . map fst $ net
- wa = weightin net x
- wm a e = fromLists $ map (\e->map (*e) (toList a)) (toList e)
- go [(w,(z,a))] = [cost_derivative a y * cmap sigmoid' z]
- go ((w,(z,a)):lx) =let r@(e:_)=go lx in tr w #> e * cmap sigmoid' z:r
-
--- | Sigmoid function
-sigmoid :: Double -> Double
-sigmoid x = 1/(1+exp(-x))
-
--- | Derivative of sigmoid function
-sigmoid' :: Double -> Double
-sigmoid' x = sigmoid x * (1-sigmoid x)
-
--- | Returs vector of partial derivatives of the cost function
-cost_derivative :: Vector Double -> Vector Double -> Vector Double
-cost_derivative a y = a-y
-
--- | Train on one single sample
-train :: Double -> Neuronet -> Vector Double -> Vector Double -> Neuronet
-train r net x y = zipWith f net (backprop net x y)
- where f :: Layer -> Layer -> Layer
- f (a,b) (c,d) = (a-scale r c,b-scale r d)
-
--- | Train on a batch of samples
-trainBatch :: Double -> Neuronet -> [Vector Double] -> [Vector Double] -> Neuronet
-trainBatch = undefined
-
main = do
let numtrain=300000
- easynet<-neuronet [20,15,10] :: IO Neuronet
+ easynet<-neuronet [20,15,10]
samples<-filter ((<10).uncurry (+)) <$> gensamp numtrain
let testsmp=filter ((<10).uncurry (+))[(a,b)|a<-[0..10],b<-[0..10]]
@@ -82,7 +28,7 @@ main = do
getVal x=snd . last . sort $ zip x [0..9]
training net (s1,s2) = train 0.25 net (fromList $ val s1++val s2) (fromList $ val $ s1+s2)
- test net (s1,s2) = getVal . toList . snd . last $ weightin net (fromList $ val s1++val s2)
+ test net (s1,s2) = getVal . toList $ asknet net (fromList $ val s1++val s2)
gensamp :: Int -> IO [(Int,Int)]
gensamp 0 = return []