summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMiguel <m.i@gmx.at>2019-03-22 23:33:01 +0100
committerMiguel <m.i@gmx.at>2019-03-22 23:33:01 +0100
commit8281304e3a7bea0cb1678f899e371f8d4776f34f (patch)
tree9bf205239738d57f1c3cd546aaf131524f2163ad
parent9c7b00c58ae0b4ece9f46a7226b59248b8b9eba6 (diff)
cleanup and testing
-rw-r--r--mnist/Neuronet.hs33
-rw-r--r--mnist/main.hs4
2 files changed, 11 insertions, 26 deletions
diff --git a/mnist/Neuronet.hs b/mnist/Neuronet.hs
index ece288a..6c3ea32 100644
--- a/mnist/Neuronet.hs
+++ b/mnist/Neuronet.hs
@@ -1,18 +1,3 @@
-{- |
-
- N E U R O T I C U S
-
- A small and straightforward neural network coded in Haskell
- from scratch. It uses gradient descent and the beauty of
- backpropagation for learning.
-
- TODOS: improve performance, parallelize, CUDA, FFI
-
- Michal Idziorek <m.i@gmx.at>
- March 2019
-
--}
-
module Neuronet
( Neuronet -- the neuronet
,neuronet -- initalize neuronet
@@ -20,15 +5,16 @@ module Neuronet
,trainBatch -- train with batch
,asknet -- ask the neuroal net
- ,wghtact
+ ,wghtact
,backprop
)where
-import Numeric.LinearAlgebra (Matrix,Vector,tr,scale,cmap,(#>),randn,toList,fromList,toLists,fromLists,outer)
import Data.List
+import Numeric.LinearAlgebra (Matrix,Vector,tr,scale,cmap,(#>),randn,
+ toList,fromList,toLists,fromLists,outer)
--- | A layer of our network consists of a weight matrix with input weights
--- and a vector holding the bias at each neuron.
+-- | A layer of our network consists of a weight matrix with input
+-- weights and a vector holding the bias at each neuron.
type Layer = (Matrix Double,Vector Double)
-- | Our neural network is simply a list of layers
@@ -39,15 +25,15 @@ type Neuronet = [Layer]
-- using gaussian distribution with mean 0 and standard deviation 1.
neuronet :: [Int] -> IO Neuronet
neuronet l = mapM nl $ zip l (tail l)
- where nl (i,l) = (,) <$> randn l i <*> (randn 1 l >>= return.fromList.head.toLists)
+ where nl (i,l) = (,) <$> randn l i <*>
+ (randn 1 l >>= return.fromList.head.toLists)
-- | Given the input vector calculate the `weighted inputs` and
-- `activations` for all layers of our network.
wghtact :: Neuronet -> Vector Double -> [(Vector Double,Vector Double)]
wghtact [] _ = []
-wghtact ((w,b):lx) x = (z,a):wghtact lx a
- where z = w #> x + b
- a = cmap sigmoid z
+wghtact ((w,b):lx) x = (z,a):wghtact lx a where z = w #> x + b
+ a = cmap sigmoid z
-- | Given the input vector calculate the final output
asknet :: Neuronet -> Vector Double -> Vector Double
@@ -90,4 +76,3 @@ trainBatch r net xs ys = zipWith (upd r) net bp
-- | Update a single Layer given the `direction` and `training rate`
upd :: Double -> Layer -> (Matrix Double,Vector Double) -> Layer
upd r (a,b) (c,d) = (a-scale r c,b-scale r d)
-
diff --git a/mnist/main.hs b/mnist/main.hs
index 2810e34..02ba6f7 100644
--- a/mnist/main.hs
+++ b/mnist/main.hs
@@ -59,9 +59,9 @@ read_samples f1 f2 = do
mainMNIST :: IO ()
mainMNIST =do
- let ep = 20 -- number of epochs
+ let ep = 10 -- number of epochs
let mbs = 10 -- mini-batch size
- let lr = 2 -- learning rate
+ let lr = 3 -- learning rate
let cap = 999999 -- cap number of training samples
putStrLn "= Init ="