Documentation ¶
Index ¶
- Constants
- Variables
- func NeuronVarIdxByName(varNm string) (int, error)
- func OneHotUS(us US) int
- func SynapseVarByName(varNm string) (int, error)
- func Tensor(us US) etensor.Tensor
- func TensorScaled(us US, scale float32) etensor.Tensor
- func TotalAct(ly emer.Layer) float32
- func TraceVarByName(varNm string) (int, error)
- type AcqExt
- type AmygModPrjn
- type AvgMaxModLayer
- type BlAmygLayer
- type CElAmygLayer
- type CElAmygLayerType
- type Context
- func (ctx Context) Empty() bool
- func (ctx Context) FromString(s string) Inputs
- func (ctx Context) Int() int
- func (ctx Context) OneHot() int
- func (ctx Context) Parts() []int
- func (i Context) String() string
- func (ctx Context) Tensor() etensor.Tensor
- func (ctx Context) TensorScaled(scale float32) etensor.Tensor
- type DALrnRule
- type DaModParams
- type DaRType
- type DelInhState
- type DelayedInhibParams
- type IAmygPrjn
- type IBlAmygLayer
- type ICElAmygLayer
- type IMSNLayer
- type IMSNPrjn
- type IModLayer
- type INetwork
- type ISetScalePrjn
- type IUS
- type Inputs
- type LHBRMTgInternalState
- type LHbRMTgGains
- type LHbRMTgLayer
- type MSNLayer
- func (ly *MSNLayer) AlphaCycInit(updtActAvg bool)
- func (ly *MSNLayer) AsMSNLayer() *MSNLayer
- func (ly *MSNLayer) AsMod() *ModLayer
- func (ly *MSNLayer) Build() error
- func (ly *MSNLayer) ClearMSNTrace()
- func (ly *MSNLayer) Defaults()
- func (ly *MSNLayer) GetDA() float32
- func (ly *MSNLayer) GetMonitorVal(data []string) float64
- func (ly *MSNLayer) InhibFmGeAct(ltime *leabra.Time)
- func (ly *MSNLayer) InitActs()
- func (ly *MSNLayer) ModsFmInc(_ *leabra.Time)
- func (ly *MSNLayer) PoolDelayedInhib(pl *leabra.Pool)
- func (ly *MSNLayer) QuarterInitPrvs(ltime *leabra.Time)
- func (ly *MSNLayer) RecvPrjnVals(vals *[]float32, varNm string, sendLay emer.Layer, sendIdx1D int, ...) error
- func (ly *MSNLayer) SendPrjnVals(vals *[]float32, varNm string, recvLay emer.Layer, recvIdx1D int, ...) error
- func (ly *MSNLayer) SetDA(da float32)
- type MSNParams
- type MSNPrjn
- func (pj *MSNPrjn) AsMSNPrjn() *MSNPrjn
- func (pj *MSNPrjn) Build() error
- func (pj *MSNPrjn) ClearTrace()
- func (pj *MSNPrjn) DWt()
- func (pj *MSNPrjn) Defaults()
- func (pj *MSNPrjn) InitWts()
- func (pj *MSNPrjn) SynVal(varNm string, sidx, ridx int) float32
- func (pj *MSNPrjn) SynVal1D(varIdx int, synIdx int) float32
- func (pj *MSNPrjn) SynVarIdx(varNm string) (int, error)
- type MSNTraceParams
- type ModLayer
- func (ly *ModLayer) ActFmG(_ *leabra.Time)
- func (ly *ModLayer) AddModReceiver(rcvr ModReceiver, scale float32)
- func (ly *ModLayer) AsLeabra() *leabra.Layer
- func (ly *ModLayer) AsMod() *ModLayer
- func (ly *ModLayer) AvgMaxMod(_ *leabra.Time)
- func (ly *ModLayer) Build() error
- func (ly *ModLayer) ClearModActs()
- func (ly *ModLayer) ClearModLevels()
- func (ly *ModLayer) DALrnFmDA(da float32) float32
- func (ly *ModLayer) Defaults()
- func (ly *ModLayer) GScaleFmAvgAct()
- func (ly *ModLayer) GetDA() float32
- func (ly *ModLayer) GetMonitorVal(data []string) float64
- func (ly *ModLayer) Init()
- func (ly *ModLayer) InitActs()
- func (ly *ModLayer) ModSendValue(ni int32) float32
- func (ly *ModLayer) ModUnitVals(vals *[]float32, varNm string) error
- func (ly *ModLayer) ModsFmInc(_ *leabra.Time)
- func (ly *ModLayer) ReceiveMods(sender ModSender, scale float32)
- func (ly *ModLayer) SendMods(_ *leabra.Time)
- func (ly *ModLayer) SetDA(da float32)
- func (ly *ModLayer) UnitVal1D(varIdx int, idx int) float32
- func (ly *ModLayer) UnitValByIdx(vidx ModNeuronVar, idx int) float32
- func (ly *ModLayer) UnitVals(vals *[]float32, varNm string) error
- func (ly *ModLayer) UnitValsTensor(tsr etensor.Tensor, varNm string) error
- func (ly *ModLayer) UnitVarIdx(varNm string) (int, error)
- func (ly *ModLayer) UnitVarNames() []string
- func (ly *ModLayer) UpdateParams()
- type ModNeuron
- type ModNeuronVar
- type ModParams
- type ModPool
- type ModRcvrParams
- type ModReceiver
- type ModSender
- type Modulators
- type NegUS
- type Network
- func (nt *Network) AddBlAmygLayer(name string, nY, nX, nNeurY, nNeurX int, val Valence, dar DaRType, ...) *BlAmygLayer
- func (nt *Network) AddCElAmygLayer(name string, nY, nX, nNeurY, nNeurX int, acqExt AcqExt, val Valence, ...) *CElAmygLayer
- func (nt *Network) AddMSNLayer(name string, nY, nX, nNeurY, nNeurX int, cpmt StriatalCompartment, da DaRType) *MSNLayer
- func (nt *Network) AddVTALayer(name string, val Valence) *VTALayer
- func (nt *Network) AsLeabra() *leabra.Network
- func (nt *Network) AvgMaxMod(ltime *leabra.Time)
- func (nt *Network) ClearMSNTraces(_ *leabra.Time)
- func (nt *Network) ClearModActs(_ *leabra.Time)
- func (nt *Network) ConnectLayersActMod(sender ModSender, rcvr ModReceiver, scale float32)
- func (nt *Network) Cycle(ltime *leabra.Time)
- func (nt *Network) CycleImpl(ltime *leabra.Time)
- func (nt *Network) InitActs()
- func (nt *Network) QuarterInitPrvs(ltime *leabra.Time)
- func (nt *Network) RecvModInc(ltime *leabra.Time)
- func (nt *Network) SendMods(ltime *leabra.Time)
- func (nt *Network) SynVarNames() []string
- func (nt *Network) SynVarProps() map[string]string
- func (nt *Network) UnitVarNames() []string
- type PPTgLayer
- func (ly *PPTgLayer) ActFmG(_ *leabra.Time)
- func (ly *PPTgLayer) Build() error
- func (ly *PPTgLayer) Defaults()
- func (ly *PPTgLayer) GetDA() float32
- func (ly *PPTgLayer) GetMonitorVal(data []string) float64
- func (ly *PPTgLayer) InitActs()
- func (ly *PPTgLayer) QuarterFinal(ltime *leabra.Time)
- func (ly *PPTgLayer) SetDA(da float32)
- type PVLayer
- type PackedUSTimeState
- func (ps PackedUSTimeState) Empty() bool
- func (pus PackedUSTimeState) FromString(s string) PackedUSTimeState
- func (ps PackedUSTimeState) Shape() []int
- func (ps PackedUSTimeState) Stim() Stim
- func (ps PackedUSTimeState) String() string
- func (ps PackedUSTimeState) Tensor() etensor.Tensor
- func (ps PackedUSTimeState) TensorScaled(scale float32) etensor.Tensor
- func (ps PackedUSTimeState) US() US
- func (ps PackedUSTimeState) USTimeIn() Tick
- func (ps PackedUSTimeState) Unpack() USTimeState
- func (ps PackedUSTimeState) Valence() Valence
- type PosUS
- type Stim
- type StriatalCompartment
- type Tick
- type TraceSyn
- type US
- type USTimeState
- func (usts USTimeState) Coords() []int
- func (usts USTimeState) CoordsString() string
- func (usts USTimeState) Empty() bool
- func (usts USTimeState) EnumVal() int
- func (usts USTimeState) OneHot(scale float32) etensor.Tensor
- func (usts USTimeState) Pack() PackedUSTimeState
- func (usts USTimeState) String() string
- func (usts USTimeState) Tensor() etensor.Tensor
- func (usts USTimeState) TensorScaleAndAdd(scale float32, other USTimeState) etensor.Tensor
- func (usts USTimeState) TensorScaled(scale float32) etensor.Tensor
- func (usts USTimeState) TsrOffset() []int
- type VTADAGains
- type VTALayer
- func (ly *VTALayer) ActFmG(ltime *leabra.Time)
- func (ly *VTALayer) Build() error
- func (ly *VTALayer) CyclePost(_ *leabra.Time)
- func (ly *VTALayer) Defaults()
- func (ly *VTALayer) GetMonitorVal(data []string) float64
- func (ly *VTALayer) VTAAct(ltime *leabra.Time)
- func (ly *VTALayer) VTAActN(_ *leabra.Time)
- func (ly *VTALayer) VTAActP(_ *leabra.Time)
- type VTAState
- type Valence
Constants ¶
const NoUSTimeIn = 320
Variables ¶
var ( TraceVars = []string{"NTr", "Tr"} SynapseVarProps = map[string]string{ "NTr": `auto-scale:"+"`, "Tr": `auto-scale:"+"`, } TraceVarsMap map[string]int SynapseVarsAll []string )
var ( // ModNeuronVars are the modulator neurons plus some custom variables that sub-types use for their // algo-specific cases -- need a consistent set of overall network-level vars for display / generic // interface. ModNeuronVars = []string{ DA.String(), ACh.String(), SE.String(), ModAct.String(), ModLevel.String(), ModNet.String(), ModLrn.String(), PVAct.String(), } ModNeuronVarsMap map[string]int ModNeuronVarsAll []string )
var ContextInShape = []int{20, 3}
Context
var CtxMap = map[string]Context{ CtxA.String(): CtxA, CtxA_B.String(): CtxA_B, CtxA_C.String(): CtxA_C, CtxB.String(): CtxB, CtxB_B.String(): CtxB_B, CtxB_C.String(): CtxB_C, CtxC.String(): CtxC, CtxC_B.String(): CtxC_B, CtxC_C.String(): CtxC_C, CtxD.String(): CtxD, CtxD_B.String(): CtxD_B, CtxD_C.String(): CtxD_C, CtxE.String(): CtxE, CtxE_B.String(): CtxE_B, CtxE_C.String(): CtxE_C, CtxF.String(): CtxF, CtxF_B.String(): CtxF_B, CtxF_C.String(): CtxF_C, CtxU.String(): CtxU, CtxU_B.String(): CtxU_B, CtxU_C.String(): CtxU_C, CtxV.String(): CtxV, CtxV_B.String(): CtxV_B, CtxV_C.String(): CtxV_C, CtxW.String(): CtxW, CtxW_B.String(): CtxW_B, CtxW_C.String(): CtxW_C, CtxX.String(): CtxX, CtxX_B.String(): CtxX_B, CtxX_C.String(): CtxX_C, CtxY.String(): CtxY, CtxY_B.String(): CtxY_B, CtxY_C.String(): CtxY_C, CtxZ.String(): CtxZ, CtxZ_B.String(): CtxZ_B, CtxZ_C.String(): CtxZ_C, CtxAX.String(): CtxAX, CtxAX_B.String(): CtxAX_B, CtxAX_C.String(): CtxAX_C, CtxAB.String(): CtxAB, CtxAB_B.String(): CtxAB_B, CtxAB_C.String(): CtxAB_C, CtxBY.String(): CtxBY, CtxBY_B.String(): CtxBY_B, CtxBY_C.String(): CtxBY_C, CtxCD.String(): CtxCD, CtxCD_B.String(): CtxCD_B, CtxCD_C.String(): CtxCD_C, CtxCX.String(): CtxCX, CtxCX_B.String(): CtxCX_B, CtxCX_C.String(): CtxCX_C, CtxCY.String(): CtxCY, CtxCY_B.String(): CtxCY_B, CtxCY_C.String(): CtxCY_C, CtxCZ.String(): CtxCZ, CtxCZ_B.String(): CtxCZ_B, CtxCZ_C.String(): CtxCZ_C, CtxDU.String(): CtxDU, }
var CtxRe, _ = regexp.Compile("([ABCDEFUVWXYZ])([ABCDEFUVWXYZ]?)_?([ABCDEFUVWXYZ]?)")
var StimRe, _ = regexp.Compile("([ABCDEFUVWXYZ])([ABCDEFUVWXYZ]?)_(Rf|NR)")
var KiT_AcqExt = kit.Enums.AddEnum(NAcqExt, kit.NotBitFlag, nil)
var KiT_BlAmygLayer = kit.Types.AddType(&BlAmygLayer{}, nil)
var KiT_CElAmygLayer = kit.Types.AddType(&CElAmygLayer{}, nil)
var KiT_Context = kit.Enums.AddEnum(NContexts+1, kit.NotBitFlag, nil)
var KiT_DALrnRule = kit.Enums.AddEnum(DALrnRuleN, kit.NotBitFlag, nil)
var KiT_DaRType = kit.Enums.AddEnum(DaRTypeN, kit.NotBitFlag, nil)
var KiT_LHbRMTgLayer = kit.Types.AddType(&LHbRMTgLayer{}, leabra.LayerProps)
var KiT_MSNLayer = kit.Types.AddType(&MSNLayer{}, leabra.LayerProps)
var KiT_ModLayer = kit.Types.AddType(&ModLayer{}, nil)
var KiT_ModNeuron = kit.Types.AddType(&ModNeuron{}, nil)
var KiT_ModNeuronVar = kit.Enums.AddEnum(ModNeuronVarsN, kit.NotBitFlag, nil)
var KiT_ModParams = kit.Types.AddType(&ModParams{}, nil)
var KiT_Modulators = kit.Types.AddType(&Modulators{}, nil)
var KiT_Network = kit.Types.AddType(&Network{}, NetworkProps)
var KiT_PPTgLayer = kit.Types.AddType(&PPTgLayer{}, leabra.LayerProps)
var KiT_Stim = kit.Enums.AddEnum(StimN+1, kit.NotBitFlag, nil)
var KiT_StriatalCompartment = kit.Enums.AddEnum(NSComp, kit.NotBitFlag, nil)
var KiT_Tick = kit.Enums.AddEnum(TickN+1, kit.NotBitFlag, nil)
var KiT_Valence = kit.Enums.AddEnum(ValenceN, kit.NotBitFlag, nil)
var NegSMap = map[string]NegUS{ Shock.String(): Shock, Nausea.String(): Nausea, Sharp.String(): Sharp, OtherNeg.String(): OtherNeg, }
var NetworkProps = leabra.NetworkProps
var PosSMap = map[string]PosUS{ Water.String(): Water, Food.String(): Food, Mate.String(): Mate, OtherPos.String(): OtherPos, }
var StimInShape = []int{12, 1}
Stim : conditioned stimuli
var StimMap = map[string]Stim{ StmA.String(): StmA, StmB.String(): StmB, StmC.String(): StmC, StmD.String(): StmD, StmE.String(): StmE, StmF.String(): StmF, StmU.String(): StmU, StmV.String(): StmV, StmW.String(): StmW, StmX.String(): StmX, StmY.String(): StmY, StmZ.String(): StmZ, StmNone.String(): StmNone, "": StmNone, }
var StmGrpMap = map[Stim]int{ StmNone: 0, StmA: 1, StmB: 2, StmC: 3, StmD: 1, StmE: 2, StmF: 3, StmX: 4, StmU: 4, StmY: 5, StmV: 5, StmZ: 6, StmW: 7, }
var TickMap = map[string]Tick{ T0.String(): T0, T1.String(): T1, T2.String(): T2, T3.String(): T3, T4.String(): T4, T5.String(): T5, T6.String(): T6, T7.String(): T7, T8.String(): T8, T9.String(): T9, TckNone.String(): TckNone, }
var USInShape = []int{4}
var USNone = US(PosUSNone)
var USTRe, _ = regexp.Compile("([ABCDEFUVWXYZ]?)_?(Pos|Neg)US([0123])_t([01234])")
var USTimeInShape = []int{16, 2, 4, 5}
USTimeIn
Functions ¶
func NeuronVarIdxByName ¶
NeuronVarIdxByName returns the index of the variable in the Neuron, or error
func SynapseVarByName ¶
VarByName returns variable by name, or error
Types ¶
type AmygModPrjn ¶
type AmygModPrjn struct { leabra.Prjn SetScale bool `` /* 550-byte string literal not displayed */ SetScaleMin float32 `desc:"minimum scale value for SetScale projections"` SetScaleMax float32 `desc:"maximum scale value for SetScale projections"` InitWtVal float32 `` /* 303-byte string literal not displayed */ DALRGain float32 `desc:"gain multiplier on abs(DA) learning rate multiplier"` DALRBase float32 `` /* 176-byte string literal not displayed */ DALrnThr float32 `` /* 234-byte string literal not displayed */ ActDeltaThr float32 `` /* 207-byte string literal not displayed */ ActLrnMod bool `desc:"if true, recv unit deep_lrn value modulates learning"` ActLrnThr float32 `desc:"only ru->deep_lrn values > this get to learn - 0.05f seems to work okay"` DaMod DaModParams `desc:"parameters for dopaminergic modulation"` }
AmygModPrjn holds parameters and state variables for modulatory projections to amygdala layers
func (*AmygModPrjn) AsAmygModPrjn ¶
func (pj *AmygModPrjn) AsAmygModPrjn() *AmygModPrjn
AsAmygModPrjn returns a pointer to the modulatory variables for an amygdala projection
func (*AmygModPrjn) DWt ¶
func (pj *AmygModPrjn) DWt()
DWt computes DA-modulated weight changes for amygdala layers
func (*AmygModPrjn) Defaults ¶
func (pj *AmygModPrjn) Defaults()
func (*AmygModPrjn) GaussScale ¶
func (pj *AmygModPrjn) GaussScale(_, _ int, _, _ *etensor.Shape) float32
GaussScale returns gaussian weight value for given unit indexes in given send and recv layers according to Gaussian Sigma and MaxWt.
func (*AmygModPrjn) InitWts ¶
func (pj *AmygModPrjn) InitWts()
InitWts sets initial weights, possibly including SetScale calculations
type AvgMaxModLayer ¶
type BlAmygLayer ¶
type BlAmygLayer struct { ModLayer `desc:"modulation state"` Valence Valence `desc:"positive or negative valence"` ILI interinhib.InterInhib `desc:"inter-layer inhibition parameters and state"` }
BlAmygLayer contains values specific to BLA layers, including Interlayer Inhibition (ILI)
func (*BlAmygLayer) AsBlAmygLayer ¶
func (ly *BlAmygLayer) AsBlAmygLayer() *BlAmygLayer
AsBlAmygLayer returns a pointer to the layer specifically as a BLA layer.
func (*BlAmygLayer) Build ¶
func (ly *BlAmygLayer) Build() error
func (*BlAmygLayer) Defaults ¶
func (ly *BlAmygLayer) Defaults()
func (*BlAmygLayer) GetMonitorVal ¶
func (ly *BlAmygLayer) GetMonitorVal(data []string) float64
GetMonitorVal retrieves a value for a trace of some quantity, possibly more than just a variable
func (*BlAmygLayer) InhibFmGeAct ¶
func (ly *BlAmygLayer) InhibFmGeAct(ltime *leabra.Time)
InhibiFmGeAct computes inhibition Gi from Ge and Act averages within relevant Pools
type CElAmygLayer ¶
type CElAmygLayer struct { ModLayer CElTyp CElAmygLayerType `desc:"basic parameters determining what type CEl layer this is"` AcqDeepMod bool `` /* 216-byte string literal not displayed */ }
func (*CElAmygLayer) AsCElAmygLayer ¶
func (ly *CElAmygLayer) AsCElAmygLayer() *CElAmygLayer
func (*CElAmygLayer) Build ¶
func (ly *CElAmygLayer) Build() error
func (*CElAmygLayer) Defaults ¶
func (ly *CElAmygLayer) Defaults()
type CElAmygLayerType ¶
type Context ¶
type Context int
const ( CtxA Context = iota // A CtxA_B // A_B CtxA_C // A_C CtxB // B CtxB_B // B_B CtxB_C // B_C CtxC // C CtxC_B // C_B CtxC_C // C_C CtxD // D CtxD_B // D_B CtxD_C // D_C CtxE // E CtxE_B // E_B CtxE_C // E_C CtxF // F CtxF_B // F_B CtxF_C // F_C CtxU // U CtxU_B // U_B CtxU_C // U_C CtxV // V CtxV_B // V_B CtxV_C // V_C CtxW // W CtxW_B // W_B CtxW_C // W_C CtxX // X CtxX_B // X_B CtxX_C // X_C CtxY // Y CtxY_B // Y_B CtxY_C // Y_C CtxZ // Z CtxZ_B // Z_B CtxZ_C // Z_C CtxAX // AX CtxAX_B // AX_B CtxAX_C // AX_C CtxAB // AB CtxAB_B // AB_B CtxAB_C // AB_C CtxBY // BY CtxBY_B // BY_B CtxBY_C // BY_C CtxCD // CD CtxCD_B // CD_B CtxCD_C // CD_C CtxCX // CX CtxCX_B // CX_B CtxCX_C // CX_C CtxCY // CY CtxCY_B // CY_B CtxCY_C // CY_C CtxCZ // CZ CtxCZ_B // CZ_B CtxCZ_C // CZ_C CtxDU // DU CtxNone // NoContext NContexts = CtxNone )
func (Context) FromString ¶
type DaModParams ¶ added in v1.1.12
type DaModParams struct { On bool `desc:"whether to use dopamine modulation"` RecepType DaRType `inactive:"+" desc:"dopamine receptor type, D1 or D2"` BurstGain float32 `` /* 173-byte string literal not displayed */ DipGain float32 `` /* 241-byte string literal not displayed */ }
DaModParams specifies parameters shared by all layers that receive dopaminergic modulatory input.
type DaRType ¶
type DaRType int
Dopamine receptor type, for D1R and D2R dopamine receptors
const ( // D1R: primarily expresses Dopamine D1 Receptors -- dopamine is excitatory and bursts of dopamine lead to increases in synaptic weight, while dips lead to decreases -- direct pathway in dorsal striatum D1R DaRType = iota // D2R: primarily expresses Dopamine D2 Receptors -- dopamine is inhibitory and bursts of dopamine lead to decreases in synaptic weight, while dips lead to increases -- indirect pathway in dorsal striatum D2R DaRTypeN )
type DelInhState ¶
type DelInhState struct { GePrvQ float32 `desc:"netin from previous quarter, used for delayed inhibition"` GePrvTrl float32 `desc:"netin from previous \"trial\" (alpha cycle), used for delayed inhibition"` }
DelInhState contains extra variables for MSNLayer neurons -- stored separately
type DelayedInhibParams ¶
type DelayedInhibParams struct { Active bool `desc:"add in a portion of inhibition from previous time period"` PrvQ float32 `desc:"proportion of per-unit net input on previous gamma-frequency quarter to add in as inhibition"` PrvTrl float32 `desc:"proportion of per-unit net input on previous trial to add in as inhibition"` }
Delayed inhibition for matrix compartment layers
type IAmygPrjn ¶
type IAmygPrjn interface {
AsAmygModPrjn() *AmygModPrjn // recast the projection as a moddulatory projection
}
IAmygPrjn has one method, AsAmygModPrjn, which recasts the projection as a moddulatory projection
type IBlAmygLayer ¶
type IBlAmygLayer interface {
AsBlAmygLayer() *BlAmygLayer
}
IBlAmygLayer has one method, AsBlAmygLayer, that returns a pointer to the layer specifically as a BLA layer.
type ICElAmygLayer ¶
type ICElAmygLayer interface {
AsCElAmygLayer() *CElAmygLayer
}
type ISetScalePrjn ¶
type ISetScalePrjn interface {
InitWts()
}
ISetScalePrjn initializes weights, including special scale calculations
type Inputs ¶
type LHBRMTgInternalState ¶ added in v1.1.12
type LHBRMTgInternalState struct { VSPatchPosD1 float32 VSPatchPosD2 float32 VSPatchNegD1 float32 VSPatchNegD2 float32 VSMatrixPosD1 float32 VSMatrixPosD2 float32 VSMatrixNegD1 float32 VSMatrixNegD2 float32 PosPV float32 NegPV float32 VSPatchPosNet float32 VSPatchNegNet float32 VSMatrixPosNet float32 VSMatrixNegNet float32 NetPos float32 NetNeg float32 }
type LHbRMTgGains ¶
type LHbRMTgGains struct { All float32 `desc:"final overall gain on everything"` VSPatchPosD1 float32 `desc:"patch D1 APPETITIVE pathway - versus pos PV outcomes"` VSPatchPosD2 float32 `desc:"patch D2 APPETITIVE pathway versus vspatch_pos_D1"` VSPatchPosDisinhib float32 `desc:"proportion of positive reward prediction error (RPE) to use if RPE results from a predicted omission of positive"` VSMatrixPosD1 float32 `desc:"gain on VS matrix D1 APPETITIVE guys"` VSMatrixPosD2 float32 `desc:"VS matrix D2 APPETITIVE"` VSPatchNegD1 float32 `desc:"VS patch D1 pathway versus neg PV outcomes"` VSPatchNegD2 float32 `desc:"VS patch D2 pathway versus vspatch_neg_D1"` VSMatrixNegD1 float32 `desc:"VS matrix D1 AVERSIVE"` VSMatrixNegD2 float32 `desc:"VS matrix D2 AVERSIVE"` }
Gain constants for LHbRMTg inputs
type LHbRMTgLayer ¶
type LHbRMTgLayer struct { leabra.Layer RcvFrom emer.LayNames Gains LHbRMTgGains `view:"inline"` PVNegDiscount float32 `` /* 180-byte string literal not displayed */ InternalState LHBRMTgInternalState // for debugging }
func AddLHbRMTgLayer ¶
func AddLHbRMTgLayer(nt *Network, name string) *LHbRMTgLayer
func (*LHbRMTgLayer) ActFmG ¶
func (ly *LHbRMTgLayer) ActFmG(ltime *leabra.Time)
func (*LHbRMTgLayer) Build ¶
func (ly *LHbRMTgLayer) Build() error
func (*LHbRMTgLayer) Defaults ¶
func (ly *LHbRMTgLayer) Defaults()
func (*LHbRMTgLayer) GetMonitorVal ¶
func (ly *LHbRMTgLayer) GetMonitorVal(data []string) float64
GetMonitorVal retrieves a value for a trace of some quantity, possibly more than just a variable
type MSNLayer ¶
type MSNLayer struct { ModLayer Compartment StriatalCompartment `inactive:"+" desc:"patch or matrix"` DIState []DelInhState `desc:"slice of delayed inhibition state for this layer."` DIParams DelayedInhibParams `view:"no-inline add-fields"` }
func AddMSNLayer ¶
func AddMSNLayer(nt *Network, name string, nY, nX, nNeurY, nNeurX int, cpmt StriatalCompartment, da DaRType) *MSNLayer
AddMatrixLayer adds a MSNLayer of given size, with given name. nY = number of pools in Y dimension, nX is pools in X dimension, and each pool has nNeurY, nNeurX neurons. da gives the DaReceptor type (D1R = Go, D2R = NoGo)
func (*MSNLayer) AlphaCycInit ¶
func (*MSNLayer) AsMSNLayer ¶
func (*MSNLayer) Build ¶
Build constructs the layer state, including calling Build on the projections you MUST have properly configured the Inhib.Pool.On setting by this point to properly allocate Pools for the unit groups if necessary.
func (*MSNLayer) ClearMSNTrace ¶
func (ly *MSNLayer) ClearMSNTrace()
func (*MSNLayer) GetMonitorVal ¶
func (*MSNLayer) InhibFmGeAct ¶
InhibFmGeAct computes inhibition Gi from Ge and Act averages within relevant Pools this is here for matrix delyed inhibition, not needed otherwise
func (*MSNLayer) PoolDelayedInhib ¶
func (*MSNLayer) QuarterInitPrvs ¶
func (*MSNLayer) RecvPrjnVals ¶
func (*MSNLayer) SendPrjnVals ¶
type MSNParams ¶
type MSNParams struct {
Compartment StriatalCompartment `inactive:"+" desc:"patch or matrix"`
}
Parameters for Dorsal Striatum Medium Spiny Neuron computation
type MSNPrjn ¶
type MSNPrjn struct { leabra.Prjn LearningRule DALrnRule Trace MSNTraceParams `view:"inline" desc:"special parameters for striatum trace learning"` TrSyns []TraceSyn `desc:"trace synaptic state values, ordered by the sending layer units which owns them -- one-to-one with SConIdx array"` SLActVar string `desc:"sending layer activation variable name"` RLActVar string `desc:"receiving layer activation variable name"` MaxVSActMod float32 `` /* 230-byte string literal not displayed */ DaMod DaModParams `desc:"parameters for dopaminergic modulation"` }
MSNPrjn does dopamine-modulated, for striatum-like layers
func (*MSNPrjn) ClearTrace ¶
func (pj *MSNPrjn) ClearTrace()
func (*MSNPrjn) DWt ¶
func (pj *MSNPrjn) DWt()
DWt computes the weight change (learning) -- on sending projections.
type MSNTraceParams ¶
type MSNTraceParams struct { Deriv bool `` /* 305-byte string literal not displayed */ Decay float32 `` /* 294-byte string literal not displayed */ GateLRScale float32 `desc:"learning rate scale factor, if "` }
Params for for trace-based learning
func (*MSNTraceParams) Defaults ¶
func (tp *MSNTraceParams) Defaults()
func (*MSNTraceParams) MSNActLrnFactor ¶
func (tp *MSNTraceParams) MSNActLrnFactor(act float32) float32
LrnFactor returns multiplicative factor for level of msn activation. If Deriv is 2 * act * (1-act) -- the factor of 2 compensates for otherwise reduction in learning from these factors. Otherwise is just act.
type ModLayer ¶
type ModLayer struct { leabra.Layer ModNeurs []ModNeuron `desc:"neuron-level modulation state"` ModPools []ModPool `desc:"pools for maintaining aggregate values"` ModReceivers []ModRcvrParams `desc:"layer names and scale values for mods sent from this layer"` ModParams `desc:"parameters shared by all modulator receiver layers"` DaMod DaModParams `desc:"parameters for dopaminergic modulation"` Modulators `desc:"layer-level neuromodulator levels"` }
ModLayer is a layer that RECEIVES modulatory input
func (*ModLayer) AddModReceiver ¶
func (ly *ModLayer) AddModReceiver(rcvr ModReceiver, scale float32)
AddModReceiver adds a receiving layer to the list of modulatory target layers for a sending layer.
func (*ModLayer) AvgMaxMod ¶
AvgMaxMod runs the standard activation statistics calculation as used for other pools on a layer's ModPools.
func (*ModLayer) ClearModActs ¶
func (ly *ModLayer) ClearModActs()
ClearModActs clears modulatory activation values. This is critical for getting clean results from one trial to the next.
func (*ModLayer) ClearModLevels ¶
func (ly *ModLayer) ClearModLevels()
ClearModLevels resets modulation state variables to their default values for an entire layer.
func (*ModLayer) DALrnFmDA ¶
DALrnFmDA returns effective learning dopamine value from given raw DA value applying Burst and Dip Gain factors, and then reversing sign for D2R. GetDa in cemer
func (*ModLayer) GScaleFmAvgAct ¶
func (ly *ModLayer) GScaleFmAvgAct()
GScaleFmAvgAct sets the value of GScale on incoming projections, based on sending layer subpool activations.
func (*ModLayer) GetMonitorVal ¶
GetMonitorVal retrieves a value for a trace of some quantity, possibly more than just a variable
func (*ModLayer) InitActs ¶
func (ly *ModLayer) InitActs()
InitActs sets modulation state variables to their default values for a layer, including its pools.
func (*ModLayer) ModSendValue ¶
ModSendValue returns the value of ModSent for one modulatory pool, specified by ni.
func (*ModLayer) ModsFmInc ¶
ModsFmInc sets ModLrn and ModLevel based on individual neuron activation and incoming ModNet values.
If ModNet is below threshold, ModLrn is set to 0, and ModLevel is set to either 0 or 1 depending on the value of the ModNetThreshold parameter.
If ModNet is above threshold, ModLrn for each neuron is set to the ratio of its ModNet input to its subpool activation value, with special cases for extreme values.
func (*ModLayer) ReceiveMods ¶
ReceiveMods computes ModNet, based on the value from the sender, times a scale value.
func (*ModLayer) SendMods ¶
SendMods calculates the level of modulation to send to receivers, based on subpool activations, and calls ReceiveMods for the receivers to process sent values.
func (*ModLayer) UnitVal1D ¶
UnitVal1D returns value of given variable index on given unit, using 1-dimensional index. returns NaN on invalid index. This is the core unit var access method used by other methods, so it is the only one that needs to be updated for derived layer types.
func (*ModLayer) UnitValByIdx ¶
func (ly *ModLayer) UnitValByIdx(vidx ModNeuronVar, idx int) float32
UnitValByIdx returns value of given variable by variable index and flat neuron index (from layer or neuron-specific one).
func (*ModLayer) UnitVals ¶
// UnitVals fills in values of given variable name on unit, // for each unit in the layer, into given float32 slice (only resized if not big enough). // Returns error on invalid var name.
func (*ModLayer) UnitValsTensor ¶
// UnitValsTensor returns values of given variable name on unit // for each unit in the layer, as a float32 tensor in same shape as layer units.
func (*ModLayer) UnitVarIdx ¶
UnitVarIdx returns the index of given variable within the Neuron, according to UnitVarNames() list (using a map to lookup index), or -1 and error message if not found.
func (*ModLayer) UnitVarNames ¶
UnitVarNames returns a list of variable names available on the units in this layer Mod returns *layer level* vars
func (*ModLayer) UpdateParams ¶
func (ly *ModLayer) UpdateParams()
UpdateParams passes on an UpdateParams call to the layer's underlying Leabra layer.
type ModNeuron ¶
type ModNeuron struct { Modulators `desc:"neuron-level modulator activation"` ModAct float32 `desc:"activity level for modulation"` ModLevel float32 `desc:"degree of full modulation to apply"` ModNet float32 `desc:"modulation input from sender"` ModLrn float32 `desc:"multiplier for DA modulation of learning rate"` PVAct float32 `desc:"direct activation from US"` }
ModNeuron encapsulates the variables used by all layers that receive modulatory input
func (*ModNeuron) InitActs ¶
func (mnr *ModNeuron) InitActs()
InitActs sets modulation state variables to their default values for one neuron.
func (*ModNeuron) VarByIndex ¶
VarByIndex returns variable using index (0 = first variable in NeuronVars list)
type ModNeuronVar ¶
type ModNeuronVar int
NeuronVars are indexes into extra neuron-level variables
const ( DA ModNeuronVar = iota ACh SE ModAct ModLevel ModNet ModLrn PVAct Cust1 ModNeuronVarsN )
func (ModNeuronVar) String ¶
func (i ModNeuronVar) String() string
type ModParams ¶
type ModParams struct { Minus float32 `` /* 145-byte string literal not displayed */ Plus float32 `` /* 144-byte string literal not displayed */ NegGain float32 `` /* 214-byte string literal not displayed */ PosGain float32 `` /* 214-byte string literal not displayed */ ActModZero bool `` /* 222-byte string literal not displayed */ ModNetThreshold float32 `` /* 348-byte string literal not displayed */ ModSendThreshold float32 `desc:"threshold for including neuron activation in total to send (for ModNet)"` IsModSender bool `desc:"does this layer send modulation to other layers?"` IsModReceiver bool `desc:"does this layer receive modulation from other layers?"` IsPVReceiver bool `desc:"does this layer receive a direct PV input?"` }
ModParams contains values that control a receiving layer's response to modulatory inputs
type ModPool ¶
type ModPool struct { ModNetStats minmax.AvgMax32 ModSent float32 `desc:"modulation level transmitted to receiver layers"` ModSendThreshold float32 `desc:"threshold for sending modulation. values below this are not added to the pool-level total"` }
ModPool is similar to a standard Pool structure, and uses the same code to compute running statistics.
type ModRcvrParams ¶
type ModRcvrParams struct { RcvName string `desc:"name of receiving layer"` Scale float32 `desc:"scale factor for modulation to this receiver"` }
ModRcvrParams specifies the name of a layer that receives modulatory input, and a scale factor--critical for inputs from large layers such as BLA.
type ModReceiver ¶
type ModReceiver interface { ReceiveMods(sender ModSender, scale float32) // copy incoming modulation values into the layer's own ModNet variable ModsFmInc(ltime *leabra.Time) // set modulation levels }
ModReceiver has one method to integrate incoming modulation, and another
type Modulators ¶
type Modulators struct { DA float32 `desc:"current dopamine level for this layer"` ACh float32 `desc:"current acetylcholine level for this layer"` SE float32 `desc:"current serotonin level for this layer"` }
Modulators are modulatory neurotransmitters. Currently ACh and SE are only placeholders.
func (*Modulators) InitActs ¶
func (ml *Modulators) InitActs()
InitActs zeroes activation levels for a set of modulator variables.
type Network ¶
func (*Network) AddBlAmygLayer ¶
func (nt *Network) AddBlAmygLayer(name string, nY, nX, nNeurY, nNeurX int, val Valence, dar DaRType, lTyp emer.LayerType) *BlAmygLayer
AddBlAmygLayer adds a Basolateral Amygdala layer with specified 4D geometry, acquisition/extinction, valence, and DA receptor type
func (*Network) AddCElAmygLayer ¶
func (nt *Network) AddCElAmygLayer(name string, nY, nX, nNeurY, nNeurX int, acqExt AcqExt, val Valence, dar DaRType) *CElAmygLayer
AddCElAmygLayer adds a CentroLateral Amygdala layer with specified 4D geometry, acquisition/extinction, valence, and DA receptor type
Geometry is 4D.
nY = number of pools in Y dimension, nX is pools in X dimension, and each pool has nNeurY * nNeurX neurons. da parameter gives the DaReceptor type (D1R = Go, D2R = NoGo). acqExt (AcqExt) specifies whether this layer is involved with acquisition or extinction. val is positive (appetitive) or negative (aversive) Valence.
func (*Network) AddMSNLayer ¶
func (nt *Network) AddMSNLayer(name string, nY, nX, nNeurY, nNeurX int, cpmt StriatalCompartment, da DaRType) *MSNLayer
AddMatrixLayer adds a MSNLayer of given size, with given name.
Geometry is 4D.
nY = number of pools in Y dimension, nX is pools in X dimension, and each pool has nNeurY * nNeurX neurons.
cpmt specifies patch or matrix StriatalCompartment
da parameter gives the DaReceptor type (DaRType) (D1R = Go, D2R = NoGo)
func (*Network) AddVTALayer ¶
AddVTALayer adds a positive or negative Valence VTA layer
func (*Network) ClearMSNTraces ¶
func (*Network) ClearModActs ¶
func (*Network) ConnectLayersActMod ¶
func (nt *Network) ConnectLayersActMod(sender ModSender, rcvr ModReceiver, scale float32)
func (*Network) QuarterInitPrvs ¶
func (*Network) RecvModInc ¶
func (*Network) SynVarNames ¶
func (*Network) SynVarProps ¶
SynVarProps returns properties for variables
func (*Network) UnitVarNames ¶
UnitVarNames returns a list of variable names available on the units in this layer
type PPTgLayer ¶
type PPTgLayer struct { leabra.Layer Ge float32 GePrev float32 SendAct float32 DA float32 DNetGain float32 `desc:"gain on input activation"` ActThreshold float32 `desc:"activation threshold for passing through"` ClampActivation bool `desc:"clamp activation directly, after applying gain"` }
The PPTg passes on a positively-rectified version of its input signal.
func AddPPTgLayer ¶
Add a Pedunculopontine Gyrus layer. Acts as a positive rectifier for its inputs.
func (*PPTgLayer) GetMonitorVal ¶
GetMonitorVal retrieves a value for a trace of some quantity, possibly more than just a variable
func (*PPTgLayer) QuarterFinal ¶
type PVLayer ¶
Primary Value input layer. Sends activation directly to its receivers, bypassing the standard mechanisms.
func AddPVLayer ¶
func (*PVLayer) AddPVReceiver ¶
func (*PVLayer) GetMonitorVal ¶
type PackedUSTimeState ¶
type PackedUSTimeState int64
const USTimeNone PackedUSTimeState = 0
func PUSTFromString ¶
func PUSTFromString(s string) PackedUSTimeState
func (PackedUSTimeState) Empty ¶
func (ps PackedUSTimeState) Empty() bool
func (PackedUSTimeState) FromString ¶
func (pus PackedUSTimeState) FromString(s string) PackedUSTimeState
func (PackedUSTimeState) Shape ¶
func (ps PackedUSTimeState) Shape() []int
func (PackedUSTimeState) Stim ¶
func (ps PackedUSTimeState) Stim() Stim
func (PackedUSTimeState) String ¶
func (ps PackedUSTimeState) String() string
func (PackedUSTimeState) Tensor ¶
func (ps PackedUSTimeState) Tensor() etensor.Tensor
func (PackedUSTimeState) TensorScaled ¶
func (ps PackedUSTimeState) TensorScaled(scale float32) etensor.Tensor
func (PackedUSTimeState) US ¶
func (ps PackedUSTimeState) US() US
func (PackedUSTimeState) USTimeIn ¶
func (ps PackedUSTimeState) USTimeIn() Tick
func (PackedUSTimeState) Unpack ¶
func (ps PackedUSTimeState) Unpack() USTimeState
func (PackedUSTimeState) Valence ¶
func (ps PackedUSTimeState) Valence() Valence
type PosUS ¶
type PosUS US
positive and negative subtypes of US
func (PosUS) FromString ¶
func (PosUS) PosUSEmpty ¶
type StriatalCompartment ¶
type StriatalCompartment int
const ( PATCH StriatalCompartment = iota MATRIX NSComp )
func (StriatalCompartment) String ¶
func (i StriatalCompartment) String() string
type TraceSyn ¶
type TraceSyn struct { NTr float32 `` /* 136-byte string literal not displayed */ Tr float32 `` /* 183-byte string literal not displayed */ }
TraceSyn holds extra synaptic state for trace projections
func (*TraceSyn) SetVarByIndex ¶
func (*TraceSyn) SetVarByName ¶
SetVarByName sets synapse variable to given value
func (*TraceSyn) VarByIndex ¶
VarByIndex returns variable using index (0 = first variable in SynapseVars list)
type US ¶
type US int
func (US) FromString ¶
func (US) TensorScaled ¶
func (pos PosUS) TensorScaled(scale float32) etensor.Tensor { return TensorScaled(pos, 1.0 / scale) }
func (neg NegUS) TensorScaled(scale float32) etensor.Tensor { return TensorScaled(neg, 1.0 / scale) }
type USTimeState ¶
type USTimeState struct { Stm Stim `desc:"CS value"` US US `desc:"a US value or absent (USNone)"` Val Valence `desc:"PV d, POS, NEG, or absent (ValNone)"` Tck Tick `desc:"Within-trial timestep"` }
func USTFromString ¶
func USTFromString(uss string) USTimeState
func (USTimeState) Coords ¶
func (usts USTimeState) Coords() []int
func (USTimeState) CoordsString ¶
func (usts USTimeState) CoordsString() string
func (USTimeState) Empty ¶
func (usts USTimeState) Empty() bool
func (USTimeState) EnumVal ¶
func (usts USTimeState) EnumVal() int
func (USTimeState) Pack ¶
func (usts USTimeState) Pack() PackedUSTimeState
func (USTimeState) String ¶
func (usts USTimeState) String() string
func (USTimeState) Tensor ¶
func (usts USTimeState) Tensor() etensor.Tensor
func (USTimeState) TensorScaleAndAdd ¶
func (usts USTimeState) TensorScaleAndAdd(scale float32, other USTimeState) etensor.Tensor
func (USTimeState) TensorScaled ¶
func (usts USTimeState) TensorScaled(scale float32) etensor.Tensor
func (USTimeState) TsrOffset ¶
func (usts USTimeState) TsrOffset() []int
type VTADAGains ¶
type VTADAGains struct { DA float32 `desc:"overall multiplier for dopamine values"` PPTg float32 `desc:"gain on bursts from PPTg"` LHb float32 `desc:"gain on dips/bursts from LHbRMTg"` PV float32 `desc:"gain on positive PV component of total phasic DA signal (net after subtracting VSPatchIndir (PVi) shunt signal)"` PVIBurstShunt float32 `desc:"gain on VSPatch projection that shunts bursting in VTA (for VTAp = VSPatchPosD1, for VTAn = VSPatchNegD2)"` PVIAntiBurstShunt float32 `desc:"gain on VSPatch projection that opposes shunting of bursting in VTA (for VTAp = VSPatchPosD2, for VTAn = VSPatchNegD1)"` PVIDipShunt float32 `` /* 146-byte string literal not displayed */ PVIAntiDipShunt float32 `desc:"gain on VSPatch projection that opposes the shunting of dipping in VTA (currently only VTAp supported = VSPatchNegD1)"` }
Gain constants for inputs to the VTA
func (*VTADAGains) Defaults ¶
func (dag *VTADAGains) Defaults()
type VTALayer ¶
type VTALayer struct { rl.ClampDaLayer SendVal float32 Valence Valence `desc:"VTA layer DA valence, positive or negative"` TonicDA float32 `desc:"set a tonic 'dopamine' (DA) level (offset to add to da values)"` DAGains VTADAGains `view:"inline" desc:"gains for various VTA inputs"` RecvFrom map[string]emer.Layer InternalState VTAState `desc:"input values--for debugging only"` }
VTA internal state
func (*VTALayer) GetMonitorVal ¶
GetMonitorVal is for monitoring during run. Includes values beyond the scope of neuron fields.
type VTAState ¶ added in v1.1.12
type VTAState struct { PPTgDAp float32 LHbDA float32 PosPVAct float32 VSPosPVI float32 VSNegPVI float32 BurstLHbDA float32 DipLHbDA float32 TotBurstDA float32 TotDipDA float32 NetDipDA float32 NetDA float32 SendVal float32 }
monitoring and debugging only. Received values from all inputs