Documentation ¶
Index ¶
- func ArgMax(xx []float64) int
- func Dot(xx, yy []float64) float64
- func Logistic(x, a float64) float64
- func Max(xx []float64) float64
- func Mean(xx []float64) float64
- func Min(xx []float64) float64
- func Normal(stdDev, mean float64) float64
- func Normalize(xx []float64)
- func Round(x float64) float64
- func Sgn(x float64) float64
- func Softmax(xx []float64) []float64
- func StandardDeviation(xx []float64) float64
- func Standardize(xx []float64)
- func Sum(xx []float64) (sum float64)
- func Uniform(stdDev, mean float64) float64
- func Variance(xx []float64) float64
- type ActivationType
- type BinaryCrossEntropy
- type Config
- type CrossEntropy
- type Differentiable
- type Dump
- type Layer
- type Linear
- type Loss
- type LossType
- type MeanSquared
- type Mode
- type Neural
- func (n *Neural) ApplyWeights(weights [][][]float64)
- func (n Neural) Dump() *Dump
- func (n *Neural) Forward(input []float64) error
- func (n Neural) Marshal() ([]byte, error)
- func (n *Neural) NumWeights() (num int)
- func (n *Neural) Predict(input []float64) []float64
- func (n *Neural) String() string
- func (n Neural) Weights() [][][]float64
- type Neuron
- type ReLU
- type Sigmoid
- type Synapse
- type Tanh
- type WeightInitializer
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func Standardize ¶
func Standardize(xx []float64)
Standardize (z-score) shifts distribution to μ=0 σ=1
Types ¶
type ActivationType ¶
type ActivationType int
ActivationType is represents a neuron activation function
const ( // ActivationNone is no activation ActivationNone ActivationType = 0 // ActivationSigmoid is a sigmoid activation ActivationSigmoid ActivationType = 1 // ActivationTanh is hyperbolic activation ActivationTanh ActivationType = 2 // ActivationReLU is rectified linear unit activation ActivationReLU ActivationType = 3 // ActivationLinear is linear activation ActivationLinear ActivationType = 4 // ActivationSoftmax is a softmax activation (per layer) ActivationSoftmax ActivationType = 5 )
func OutputActivation ¶
func OutputActivation(c Mode) ActivationType
OutputActivation returns activation corresponding to prediction mode
type BinaryCrossEntropy ¶
type BinaryCrossEntropy struct{}
BinaryCrossEntropy is binary CE loss
func (BinaryCrossEntropy) Df ¶
func (l BinaryCrossEntropy) Df(estimate, ideal, activation float64) float64
Df is CE'(...)
func (BinaryCrossEntropy) F ¶
func (l BinaryCrossEntropy) F(estimate, ideal [][]float64) float64
F is CE(...)
type Config ¶
type Config struct { // Number of inputs Inputs int // Defines topology: // For instance, [5 3 3] signifies a network with two hidden layers // containing 5 and 3 nodes respectively, followed an output layer // containing 3 nodes. Layout []int // Activation functions: {ActivationTanh, ActivationReLU, ActivationSigmoid} Activation ActivationType // Solver modes: {ModeRegression, ModeBinary, ModeMultiClass, ModeMultiLabel} Mode Mode // Initializer for weights: {NewNormal(σ, μ), NewUniform(σ, μ)} Weight WeightInitializer `json:"-"` // Loss functions: {LossCrossEntropy, LossBinaryCrossEntropy, LossMeanSquared} Loss LossType // Apply bias nodes Bias bool }
Config defines the network topology, activations, losses etc
type CrossEntropy ¶
type CrossEntropy struct{}
CrossEntropy is CE loss
func (CrossEntropy) Df ¶
func (l CrossEntropy) Df(estimate, ideal, activation float64) float64
Df is CE'(...)
type Differentiable ¶
Differentiable is an activation function and its first order derivative, where the latter is expressed as a function of the former for efficiency
func GetActivation ¶
func GetActivation(act ActivationType) Differentiable
GetActivation returns the concrete activation given an ActivationType
type Layer ¶
type Layer struct { Neurons []*Neuron A ActivationType }
Layer is a set of neurons and corresponding activation
func NewLayer ¶
func NewLayer(n int, activation ActivationType) *Layer
NewLayer creates a new layer with n nodes
func (*Layer) ApplyBias ¶
func (l *Layer) ApplyBias(weight WeightInitializer) []*Synapse
ApplyBias creates and returns a bias synapse for each neuron in l
func (*Layer) Connect ¶
func (l *Layer) Connect(next *Layer, weight WeightInitializer)
Connect fully connects layer l to next, and initializes each synapse with the given weight function
type Loss ¶
type Loss interface { F(estimate, ideal [][]float64) float64 Df(estimate, ideal, activation float64) float64 }
Loss is satisfied by loss functions
type LossType ¶
type LossType int
LossType represents a loss function
const ( // LossNone signifies unspecified loss LossNone LossType = 0 // LossCrossEntropy is cross entropy loss LossCrossEntropy LossType = 1 // LossBinaryCrossEntropy is the special case of binary cross entropy loss LossBinaryCrossEntropy LossType = 2 // LossMeanSquared is MSE LossMeanSquared LossType = 3 )
type MeanSquared ¶
type MeanSquared struct{}
MeanSquared in MSE loss
func (MeanSquared) Df ¶
func (l MeanSquared) Df(estimate, ideal, activation float64) float64
Df is MSE'(...)
type Mode ¶
type Mode int
Mode denotes inference mode
const ( // ModeDefault is unspecified mode ModeDefault Mode = 0 // ModeMultiClass is for one-hot encoded classification, applies softmax output layer ModeMultiClass Mode = 1 // ModeRegression is regression, applies linear output layer ModeRegression Mode = 2 // ModeBinary is binary classification, applies sigmoid output layer ModeBinary Mode = 3 // ModeMultiLabel is for multilabel classification, applies sigmoid output layer ModeMultiLabel Mode = 4 )
type Neural ¶
Neural is a neural network
func (*Neural) ApplyWeights ¶
ApplyWeights sets the weights from a three-dimensional slice
func (*Neural) NumWeights ¶
NumWeights returns the number of weights in the network
type Neuron ¶
type Neuron struct { A ActivationType `json:"-"` In []*Synapse Out []*Synapse Value float64 `json:"-"` }
Neuron is a neural network node
func NewNeuron ¶
func NewNeuron(activation ActivationType) *Neuron
NewNeuron returns a neuron with the given activation
type Synapse ¶
Synapse is an edge between neurons
func NewSynapse ¶
NewSynapse returns a synapse with the specified initialized weight
type WeightInitializer ¶
type WeightInitializer func() float64
A WeightInitializer returns a (random) weight
func NewNormal ¶
func NewNormal(stdDev, mean float64) WeightInitializer
NewNormal returns a normal weight generator
func NewUniform ¶
func NewUniform(stdDev, mean float64) WeightInitializer
NewUniform returns a uniform weight generator