Documentation ¶
Index ¶
- func ArgMax(xx []float32) int
- func Dot(xx, yy []float32) float32
- func Logistic(x, a float32) float32
- func Max(xx []float32) float32
- func Mean(xx []float32) float32
- func Min(xx []float32) float32
- func Normal(stdDev, mean float32) float32
- func Normalize(xx []float32)
- func Round(x float32) float32
- func SetCustomDf(Df func(float32, float32) float32)
- func SetCustomF(F func(float32) float32)
- func Sgn(x float32) float32
- func Softmax(xx []float32) []float32
- func Sqrt(N float32) float32
- func StandardDeviation(xx []float32) float32
- func Standardize(xx []float32)
- func Sum(xx []float32) (sum float32)
- func Uniform(stdDev, mean float32) float32
- func Variance(xx []float32) float32
- type ActivationType
- type BinaryCrossEntropy
- type Config
- type CrossEntropy
- type Custom
- type Differentiable
- type DivX
- type DoubleDiv
- type DoublePow
- type DoubleRoot
- type Dump
- type Layer
- type Linear
- type Loss
- type LossType
- type MeanSquared
- type Mish
- type Mode
- type Neural
- func (n *Neural) ApplyWeights(weights [][][]float32)
- func (n Neural) Dump() *Dump
- func (n *Neural) Forward(input []float32, training bool) error
- func (n Neural) Marshal() ([]byte, error)
- func (n *Neural) NumWeights() (num int)
- func (n *Neural) Predict(input []float32) []float32
- func (n *Neural) String() string
- func (n Neural) Weights() [][][]float32
- type Neuron
- type ReLU
- type RootPow
- type RootSwish
- type RootX
- type Sigmoid
- type Swish
- type Synapse
- type Tanh
- type WeightInitializer
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func SetCustomDf ¶
func SetCustomF ¶
func Standardize ¶
func Standardize(xx []float32)
Standardize (z-score) shifts distribution to μ=0 σ=1
Types ¶
type ActivationType ¶
type ActivationType int
ActivationType is represents a neuron activation function
const ( // ActivationNone is no activation ActivationNone ActivationType = 0 // ActivationSigmoid is a sigmoid activation ActivationSigmoid ActivationType = 1 // ActivationTanh is hyperbolic activation ActivationTanh ActivationType = 2 // ActivationReLU is rectified linear unit activation ActivationReLU ActivationType = 3 // ActivationLinear is linear activation ActivationLinear ActivationType = 4 // ActivationSoftmax is a softmax activation (per layer) ActivationSoftmax ActivationType = 5 // ActivationELU is a Elu activation ActivationELU ActivationType = 6 // ActivationSwish is a Swish activation ActivationSwish ActivationType = 7 // ActivationMish is a Mish activation ActivationMish ActivationType = 8 // ActivationCustom is a Custom activation ActivationCustom ActivationType = 9 // ActivationCustom is a Custom activation ActivationDoubleRoot ActivationType = 10 // ActivationCustom is a Custom activation ActivationRootX ActivationType = 11 // ActivationMulDiv is a Custom activation ActivationDivX ActivationType = 12 // ActivationMulDiv is a Custom activation ActivationDoubleDiv ActivationType = 13 // ActivationMulDiv is a Custom activation ActivationRootPow ActivationType = 14 // ActivationMulDiv is a Custom activation ActivationDoublePow ActivationType = 15 // ActivationMulDiv is a Custom activation ActivationRootSwish ActivationType = 16 )
func OutputActivation ¶
func OutputActivation(c Mode) ActivationType
OutputActivation returns activation corresponding to prediction mode
type BinaryCrossEntropy ¶
type BinaryCrossEntropy struct{}
BinaryCrossEntropy is binary CE loss
func (BinaryCrossEntropy) Df ¶
func (l BinaryCrossEntropy) Df(estimate, ideal, activation float32) float32
Df is CE'(...)
func (BinaryCrossEntropy) F ¶
func (l BinaryCrossEntropy) F(estimate, ideal [][]float32) float32
F is CE(...)
type Config ¶
type Config struct { // Number of inputs Inputs int // Defines topology: // For instance, [5 3 3] signifies a network with two hidden layers // containing 5 and 3 nodes respectively, followed an output layer // containing 3 nodes. Layout []int // Activation functions: {ActivationTanh, ActivationReLU, ActivationSigmoid} Activation []ActivationType // Solver modes: {ModeRegression, ModeBinary, ModeMultiClass, ModeMultiLabel} Mode Mode // Initializer for weights: {NewNormal(σ, μ), NewUniform(σ, μ)} Weight WeightInitializer `json:"-"` // Loss functions: {LossCrossEntropy, LossBinaryCrossEntropy, LossMeanSquared} Loss LossType // Apply bias nodes Bias bool }
Config defines the network topology, activations, losses etc
type CrossEntropy ¶
type CrossEntropy struct{}
CrossEntropy is CE loss
func (CrossEntropy) Df ¶
func (l CrossEntropy) Df(estimate, ideal, activation float32) float32
Df is CE'(...)
type Differentiable ¶
Differentiable is an activation function and its first order derivative, where the latter is expressed as a function of the former for efficiency
func GetActivation ¶
func GetActivation(act ActivationType) Differentiable
GetActivation returns the concrete activation given an ActivationType
type DoubleDiv ¶
MulDiv is a logistic activator in the special case of a = 1
type DoublePow ¶
RootX is a logistic activator in the special case of a = 1
type DoubleRoot ¶
DoubleRoot is a logistic activator in the special case of a = 1
func (DoubleRoot) Df ¶
func (a DoubleRoot) Df(x float32) float32
Df is DoubleRoot'(y), where y = DoubleRoot(x)
type Layer ¶
type Layer struct { Neurons []*Neuron A ActivationType }
Layer is a set of neurons and corresponding activation
func NewLayer ¶
func NewLayer(n int, activation ActivationType) *Layer
NewLayer creates a new layer with n nodes
func (*Layer) ApplyBias ¶
func (l *Layer) ApplyBias(weight WeightInitializer) []*Synapse
ApplyBias creates and returns a bias synapse for each neuron in l
func (*Layer) Connect ¶
func (l *Layer) Connect(next *Layer, weight WeightInitializer)
Connect fully connects layer l to next, and initializes each synapse with the given weight function
type Loss ¶
type Loss interface { F(estimate, ideal [][]float32) float32 Df(estimate, ideal, activation float32) float32 }
Loss is satisfied by loss functions
type LossType ¶
type LossType int
LossType represents a loss function
const ( // LossNone signifies unspecified loss LossNone LossType = 0 // LossCrossEntropy is cross entropy loss LossCrossEntropy LossType = 1 // LossBinaryCrossEntropy is the special case of binary cross entropy loss LossBinaryCrossEntropy LossType = 2 // LossMeanSquared is MSE LossMeanSquared LossType = 3 )
type MeanSquared ¶
type MeanSquared struct{}
MeanSquared in MSE loss
func (MeanSquared) Df ¶
func (l MeanSquared) Df(estimate, ideal, activation float32) float32
Df is MSE'(...)
type Mode ¶
type Mode int
Mode denotes inference mode
const ( // ModeDefault is unspecified mode ModeDefault Mode = 0 // ModeMultiClass is for one-hot encoded classification, applies softmax output layer ModeMultiClass Mode = 1 // ModeRegression is regression, applies linear output layer ModeRegression Mode = 2 // ModeBinary is binary classification, applies sigmoid output layer ModeBinary Mode = 3 // ModeMultiLabel is for multilabel classification, applies sigmoid output layer ModeMultiLabel Mode = 4 )
type Neural ¶
type Neural struct { // Shift []float32 // Significance []float32 Layers []*Layer Biases [][]*Synapse Config *Config }
Neural is a neural network
func (*Neural) ApplyWeights ¶
ApplyWeights sets the weights from a three-dimensional slice
func (*Neural) NumWeights ¶
NumWeights returns the number of weights in the network
type Neuron ¶
type Neuron struct { A ActivationType `json:"-"` In []*Synapse Out []*Synapse Value float32 `json:"-"` }
Neuron is a neural network node
func NewNeuron ¶
func NewNeuron(activation ActivationType) *Neuron
NewNeuron returns a neuron with the given activation
type RootPow ¶
RootX is a logistic activator in the special case of a = 1
type RootX ¶
RootX is a logistic activator in the special case of a = 1
type Synapse ¶
Synapse is an edge between neurons
func NewSynapse ¶
NewSynapse returns a synapse with the specified initialized weight
type WeightInitializer ¶
type WeightInitializer func() float32
A WeightInitializer returns a (random) weight
func NewNormal ¶
func NewNormal(stdDev, mean float32) WeightInitializer
NewNormal returns a normal weight generator
func NewUniform ¶
func NewUniform(stdDev, mean float32) WeightInitializer
NewUniform returns a uniform weight generator