From 51b07b4d47f4bacda944b0fa0564e5f338b7aefc Mon Sep 17 00:00:00 2001 From: Miguel Date: Fri, 22 Mar 2019 23:59:47 +0100 Subject: writing about neuronal networks in haskell --- .../00040_Haskell/00220_Neural-Networks/digits.png | Bin 0 -> 26299 bytes .../00040_Haskell/00220_Neural-Networks/index.md | 55 +++++++++++--- .../00040_Haskell/00220_Neural-Networks/mnist.cast | 82 +++++++++++++++++++++ 3 files changed, 128 insertions(+), 9 deletions(-) create mode 100644 00_blog/00040_Haskell/00220_Neural-Networks/digits.png create mode 100644 00_blog/00040_Haskell/00220_Neural-Networks/mnist.cast diff --git a/00_blog/00040_Haskell/00220_Neural-Networks/digits.png b/00_blog/00040_Haskell/00220_Neural-Networks/digits.png new file mode 100644 index 0000000..6b18ad6 Binary files /dev/null and b/00_blog/00040_Haskell/00220_Neural-Networks/digits.png differ diff --git a/00_blog/00040_Haskell/00220_Neural-Networks/index.md b/00_blog/00040_Haskell/00220_Neural-Networks/index.md index ee62d09..db69154 100644 --- a/00_blog/00040_Haskell/00220_Neural-Networks/index.md +++ b/00_blog/00040_Haskell/00220_Neural-Networks/index.md @@ -1,13 +1,50 @@ - March 2019 # Neural Networks ## Hello Sigmoid -WORK IN PROGRESS +Welcome to my **Hello World** of **Backpropagation**. + +This is my tiny and straightforward Haskell implementation of a basic +neural net using **gradient descent**. I coded this from scratch, along +reading the first two chapters of _Neural Networks and Deep Learning_ [1]. + +Be warned that the following implementation aims at clarity and readability, +but not performance! In another article I will probably discuss, how to +optimize it heavily, utilizing _Parallel Programming_ / _Tensor Flow (CUDA)_. + +The source code below was auto-fetched from: + + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ {.haskell .numberLines} +{BEGIN:EMBED} +https://gitweb.softwarefools.com/?p=miguel/haskell.git;a=blob_plain;f=mnist/Neuronet.hs +{END:EMBED} +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +## MNIST + +Even this simple _vanilla_ network can already give us some tangible +results, when run on the well-known MNIST Database [2] of handwritten digits, +which contains a set of 60.000 training samples and another set of +10.000 test samples. + +![](digits.png){} + +The following screencast shows the success rates of the simple network for +the first ten epochs. (The speed of the cast was increased quite a bit) + + + + +## Technical Vocublary + +A loose collection of notes and some important terms, you should make +yourself familiar with, when learning about neural networks. * sigmoid neurons vs. perceptrons * recurrent neural networks * cost / loss / objective function +* required assumptions about the cost function * quadratic cost function / mean squared error * gradient descent * gradient (vector of partial derivatives) @@ -15,17 +52,17 @@ WORK IN PROGRESS * on-line / incremental learning * deep neural networks * mini-batches -* backpropagation (4 funamental equations) +* backpropagation (based on 4 fudnamental equations / check proofs) * weighted input -* required assumptions about the cost function -* hadamard / schur product +* saturated neuron +## Git Repo -* saturated neuron +* ## Ref * [1] -* [2] -* [3] -* [4] +* [2] +* [3] +* [4] diff --git a/00_blog/00040_Haskell/00220_Neural-Networks/mnist.cast b/00_blog/00040_Haskell/00220_Neural-Networks/mnist.cast new file mode 100644 index 0000000..d003853 --- /dev/null +++ b/00_blog/00040_Haskell/00220_Neural-Networks/mnist.cast @@ -0,0 +1,82 @@ +{ + "height": 17, + "duration": 31.546069, + "title": null, + "env": { + "SHELL": "/bin/bash", + "TERM": "screen-256color" + }, + "command": null, + "version": 1, + "stdout": [ + [ + 0.019267, + "miguel@megaloman:~/git/haskell/mnist$ " + ], + [ + 0.616503, + "./main " + ], + [ + 0.672781, + "\r\n" + ], + [ + 0.003816, + "= Init =\r\n" + ], + [ + 4.6e-05, + "Initializing Net.......[\u001b[32m\u0002Done\u001b[m\u0002]\r\n" + ], + [ + 0.060061, + "Reading Samples........[\u001b[32m\u0002Done\u001b[m\u0002]\r\n= Training =\r\n" + ], + [ + 3.0, + "Epoch 1....... \u001b[32m\u000291.1\u001b[m\u0002%\r\n" + ], + [ + 3.0, + "Epoch 2....... \u001b[32m\u000292.59\u001b[m\u0002%\r\n" + ], + [ + 3.0, + "Epoch 3....... \u001b[32m\u000293.37\u001b[m\u0002%\r\n" + ], + [ + 3.0, + "Epoch 4....... \u001b[32m\u000293.54\u001b[m\u0002%\r\n" + ], + [ + 3.0, + "Epoch 5....... \u001b[32m\u000293.94\u001b[m\u0002%\r\n" + ], + [ + 3.0, + "Epoch 6....... \u001b[32m\u000294.17\u001b[m\u0002%\r\n" + ], + [ + 3.0, + "Epoch 7....... \u001b[32m\u000294.59\u001b[m\u0002%\r\n" + ], + [ + 3.0, + "Epoch 8....... \u001b[32m\u000294.74\u001b[m\u0002%\r\n" + ], + [ + 3.0, + "Epoch 9....... \u001b[32m\u000294.75\u001b[m\u0002%\r\n" + ], + [ + 3.0, + "Epoch 10....... \u001b[32m\u000294.85\u001b[m\u0002%\r\n= THE END =\r\n" + ], + [ + 0.172595, + "miguel@megaloman:~/git/haskell/mnist$ " + ] + ], + "width": 46 +} \ No newline at end of file -- cgit v1.2.3