summaryrefslogtreecommitdiff
path: root/mnist/Neuronet.hs
diff options
context:
space:
mode:
authorMiguel <m.i@gmx.at>2019-03-22 20:11:55 +0100
committerMiguel <m.i@gmx.at>2019-03-22 20:11:55 +0100
commite1826a4c5975260c784d3f6c43fd53a7092d64e4 (patch)
tree0571ce3ddbf956d397da37bdec4b425d1808baaf /mnist/Neuronet.hs
parent683ce9f3dc564766c2b3be3d9e186c222d843332 (diff)
hoooray. fixed stupid bug. now at least 90% MNIST after one epoch
Diffstat (limited to 'mnist/Neuronet.hs')
-rw-r--r--mnist/Neuronet.hs12
1 files changed, 9 insertions, 3 deletions
diff --git a/mnist/Neuronet.hs b/mnist/Neuronet.hs
index 517f3b8..e3344c7 100644
--- a/mnist/Neuronet.hs
+++ b/mnist/Neuronet.hs
@@ -2,10 +2,12 @@
N E U R O T I C U S
- A small and straight forward neural network coded in Haskell
- from scratch. It uses gradient descent and the beatuiful
+ A small and straightforward neural network coded in Haskell
+ from scratch. It uses gradient descent and the beauty of
backpropagation for learning.
+ TODOS: improve performance, parallelize, CUDA, FFI
+
Michal Idziorek <m.i@gmx.at>
March 2019
@@ -17,6 +19,9 @@ module Neuronet
,train -- train with one sample
,trainBatch -- train with batch
,asknet -- ask the neuroal net
+
+ ,wghtact
+ ,backprop
)where
import Numeric.LinearAlgebra (Matrix,Vector,tr,scale,cmap,(#>),randn,toList,fromList,toLists,fromLists,Container)
@@ -83,6 +88,7 @@ trainBatch r net xs ys = zipWith (upd r) net bp
fc v a = zipWith ff v a
ff (a,b) (c,d) = (a+c,b+d)
--- | Update a Layer given the `direction` and `training rate`
+-- | Update a single Layer given the `direction` and `training rate`
upd :: Double -> Layer -> (Matrix Double,Vector Double) -> Layer
upd r (a,b) (c,d) = (a-scale r c,b-scale r d)
+