neu

package module
v0.0.0-...-885409b Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 16, 2024 License: MIT Imports: 0 Imported by: 0

README

neu

PkgGoDev Go Report Card tests codecov

  • Deep Learning framework for Go from scratch
  • pure Go implementation
  • using only the standard library

Example

MNIST
make mnistdl
go run cmd/mnist/main.go --dir ./testdata
*model.MLP
 0: *layer.Affine: W(784, 50), B(1, 50): 39250
 1: *layer.BatchNorm: G(1, 50), B(1, 50): 100
 2: *layer.ReLU
 3: *layer.Affine: W(50, 50), B(1, 50): 2550
 4: *layer.BatchNorm: G(1, 50), B(1, 50): 100
 5: *layer.ReLU
 6: *layer.Affine: W(50, 50), B(1, 50): 2550
 7: *layer.BatchNorm: G(1, 50), B(1, 50): 100
 8: *layer.ReLU
 9: *layer.Affine: W(50, 10), B(1, 10): 510
10: *layer.SoftmaxWithLoss

   0: loss=[[2.2485]], train_acc=0.1800, test_acc=0.1300
predict: [9 4 1 4 8 3 2 4 2 4 9 4 4 3 4 2 6 8 4 8]
label  : [0 6 8 7 8 2 0 1 3 1 8 7 7 7 5 6 1 8 8 8]

 100: loss=[[0.4916]], train_acc=0.8800, test_acc=0.8900
predict: [4 3 3 6 4 8 3 1 6 7 2 9 8 1 8 7 3 0 5 6]
label  : [4 3 3 6 4 8 3 1 6 7 2 9 8 1 8 3 8 0 5 6]

 200: loss=[[0.4164]], train_acc=0.8900, test_acc=0.8600
predict: [4 4 3 1 0 0 3 3 7 6 1 9 8 2 9 5 2 6 6 6]
label  : [9 4 3 1 0 0 3 3 7 6 1 9 8 2 9 9 2 6 6 6]

 300: loss=[[0.3602]], train_acc=0.8800, test_acc=0.8800
predict: [7 9 8 5 2 6 3 6 2 6 9 4 9 8 2 1 5 7 0 9]
label  : [7 9 8 6 2 6 3 6 2 6 9 4 9 8 2 1 5 7 0 9]

 400: loss=[[0.2036]], train_acc=0.9400, test_acc=0.9500
predict: [4 1 2 6 7 1 8 7 5 5 7 6 7 5 8 4 7 4 3 2]
label  : [4 1 2 6 7 1 8 7 5 5 7 6 7 5 8 4 7 4 3 2]

...
Seq2Seq
make additiondl
go run cmd/seq2seq/main.go --dir ./testdata
*model.PeekySeq2Seq
 0: *model.Encoder
 1: *layer.TimeEmbedding: W(13, 64): 832
 2: *layer.TimeLSTM: Wx(64, 512), Wh(128, 512), B(1, 512): 98816
 3: *model.PeekyDecoder
 4: *layer.TimeEmbedding: W(13, 64): 832
 5: *layer.TimeLSTM: Wx(192, 512), Wh(128, 512), B(1, 512): 164352
 6: *layer.TimeAffine: W(256, 13), B(1, 13): 3341
 7: *layer.TimeSoftmaxWithLoss

...
[7 + 9 0 6    ] [_ 9 1 3  ]; [9 9 1  ]
[5 4 4 + 4 2  ] [_ 5 8 6  ]; [2 1    ]
[7 2 + 9 2 5  ] [_ 9 9 7  ]; [9 9    ]
[7 8 8 + 2 2 7] [_ 1 0 1 5]; [5 4    ]
[7 8 + 4 6 8  ] [_ 5 4 6  ]; [9 1    ]
[1 + 6 8 0    ] [_ 6 8 1  ]; [2 9 1  ]
[4 + 6 7 0    ] [_ 6 7 4  ]; [9 9    ]
[3 5 4 + 5 7 9] [_ 9 3 3  ]; [2 9 1  ]
[4 3 6 + 8 7 4] [_ 1 3 1 0]; [9 1    ]
[2 6 6 + 9 8 3] [_ 1 2 4 9]; [9 9    ]
20,  0: loss=0.8230, train_acc=0.0000, test_acc=0.0000

[1 9 4 + 7    ] [_ 2 0 1  ]; [2 0 1  ]
[5 4 4 + 4 2  ] [_ 5 8 6  ]; [5 8 6  ]
[7 8 8 + 2 2 7] [_ 1 0 1 5]; [1 0 1 5]
[3 0 + 2 6 1  ] [_ 2 9 1  ]; [2 9 1  ]
[7 + 9 0 6    ] [_ 9 1 3  ]; [9 1 3  ]
[4 + 8 9 6    ] [_ 9 0 0  ]; [9 9    ]
[3 5 4 + 5 7 9] [_ 9 3 3  ]; [2 0 1  ]
[2 6 6 + 9 8 3] [_ 1 2 4 9]; [2 9 2  ]
[6 8 8 + 6 7 0] [_ 1 3 5 8]; [1 0 1 5]
[1 + 6 8 0    ] [_ 6 8 1  ]; [2 0 1  ]
40,  0: loss=0.0912, train_acc=1.0000, test_acc=0.0000
...

Documentation

Overview

Example (Neuralnet)
package main

import (
	"fmt"

	"github.com/itsubaki/neu/activation"
	"github.com/itsubaki/neu/math/matrix"
)

func main() {
	// weight
	W1 := matrix.New([]float64{0.1, 0.3, 0.5}, []float64{0.2, 0.4, 0.6})
	B1 := matrix.New([]float64{0.1, 0.2, 0.3})
	W2 := matrix.New([]float64{0.1, 0.4}, []float64{0.2, 0.5}, []float64{0.3, 0.6})
	B2 := matrix.New([]float64{0.1, 0.2})
	W3 := matrix.New([]float64{0.1, 0.3}, []float64{0.2, 0.4})
	B3 := matrix.New([]float64{0.1, 0.2})

	// data
	x := matrix.New([]float64{1.0, 0.5})

	// forward
	A1 := matrix.Dot(x, W1).Add(B1)
	Z1 := matrix.F(A1, activation.Sigmoid)
	A2 := matrix.Dot(Z1, W2).Add(B2)
	Z2 := matrix.F(A2, activation.Sigmoid)
	A3 := matrix.Dot(Z2, W3).Add(B3)
	y := matrix.F(A3, activation.Identity)

	// print
	fmt.Println(A1)
	fmt.Println(Z1)

	fmt.Println(A2)
	fmt.Println(Z2)

	fmt.Println(A3)
	fmt.Println(y)

}
Output:

[[0.30000000000000004 0.7 1.1]]
[[0.574442516811659 0.6681877721681662 0.7502601055951177]]
[[0.5161598377933344 1.2140269561658172]]
[[0.6262493703990729 0.7710106968556123]]
[[0.3168270764110298 0.6962790898619668]]
[[0.3168270764110298 0.6962790898619668]]
Example (Perceptron)
package main

import (
	"fmt"
)

func main() {
	f := func(x, w []float64, b float64) int {
		var sum float64
		for i := range x {
			sum = sum + x[i]*w[i]
		}

		v := sum + b
		if v <= 0 {
			return 0
		}

		return 1
	}

	AND := func(x []float64) int { return f(x, []float64{0.5, 0.5}, -0.7) }
	NAND := func(x []float64) int { return f(x, []float64{-0.5, -0.5}, 0.7) }
	OR := func(x []float64) int { return f(x, []float64{0.5, 0.5}, -0.2) }
	XOR := func(x []float64) int { return AND([]float64{float64(NAND(x)), float64(OR(x))}) }

	fmt.Println("AND")
	fmt.Println(AND([]float64{0, 0}))
	fmt.Println(AND([]float64{1, 0}))
	fmt.Println(AND([]float64{0, 1}))
	fmt.Println(AND([]float64{1, 1}))
	fmt.Println()

	fmt.Println("NAND")
	fmt.Println(NAND([]float64{0, 0}))
	fmt.Println(NAND([]float64{1, 0}))
	fmt.Println(NAND([]float64{0, 1}))
	fmt.Println(NAND([]float64{1, 1}))
	fmt.Println()

	fmt.Println("OR")
	fmt.Println(OR([]float64{0, 0}))
	fmt.Println(OR([]float64{1, 0}))
	fmt.Println(OR([]float64{0, 1}))
	fmt.Println(OR([]float64{1, 1}))
	fmt.Println()

	fmt.Println("XOR")
	fmt.Println(XOR([]float64{0, 0}))
	fmt.Println(XOR([]float64{1, 0}))
	fmt.Println(XOR([]float64{0, 1}))
	fmt.Println(XOR([]float64{1, 1}))
	fmt.Println()

}
Output:

AND
0
0
0
1

NAND
1
1
1
0

OR
0
1
1
1

XOR
0
1
1
0
Example (Simplenet)
package main

import (
	"fmt"

	"github.com/itsubaki/neu/activation"
	"github.com/itsubaki/neu/loss"
	"github.com/itsubaki/neu/math/matrix"
	"github.com/itsubaki/neu/math/numerical"
)

func main() {
	// https://github.com/oreilly-japan/deep-learning-from-scratch/wiki/errata#%E7%AC%AC7%E5%88%B7%E3%81%BE%E3%81%A7

	// weight
	W := matrix.New(
		[]float64{0.47355232, 0.99773930, 0.84668094},
		[]float64{0.85557411, 0.03563661, 0.69422093},
	)

	// data
	x := matrix.New([]float64{0.6, 0.9})
	t := []float64{0, 0, 1}

	// predict
	p := matrix.Dot(x, W)
	y := activation.Softmax(p[0])
	e := loss.CrossEntropyError(y, t)

	fmt.Println(p)
	fmt.Println(e)

	// gradient
	fW := func(w ...float64) float64 {
		p := matrix.Dot(x, W)
		y := activation.Softmax(p[0])
		e := loss.CrossEntropyError(y, t)
		return e
	}

	grad := func(f func(x ...float64) float64, x matrix.Matrix) matrix.Matrix {
		out := make(matrix.Matrix, 0)
		for _, r := range x {
			out = append(out, numerical.Gradient(f, r))
		}

		return out
	}

	dW := grad(fW, W)
	for _, r := range dW {
		fmt.Println(r)
	}

}
Output:

[[1.054148091 0.630716529 1.132807401]]
0.9280682857864075
[0.2192475712392561 0.14356242984070455 -0.3628100010055757]
[0.3288713569016277 0.21534364482433954 -0.5442150014750569]

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL