Documentation
¶
Index ¶
- Constants
- Variables
- func NeuronVarIdxByName(varNm string) (int, error)
- func OneHotUS(us US) int
- func SynapseVarByName(varNm string) (int, error)
- func Tensor(us US) etensor.Tensor
- func TensorScaled(us US, scale float32) etensor.Tensor
- func TotalAct(ly emer.Layer) float32
- func TraceVarByName(varNm string) (int, error)
- type AcqExt
- type AmygModPrjn
- type AvgMaxModLayer
- type BlAmygLayer
- type CElAmygLayer
- type CElAmygLayerType
- type Context
- func (ctx Context) Empty() bool
- func (ctx Context) FromString(s string) Inputs
- func (ctx Context) Int() int
- func (ctx Context) OneHot() int
- func (ctx Context) Parts() []int
- func (i Context) String() string
- func (ctx Context) Tensor() etensor.Tensor
- func (ctx Context) TensorScaled(scale float32) etensor.Tensor
- type DALrnRule
- type DaModParams
- type DaRType
- type DelInhState
- type DelayedInhibParams
- type IAmygPrjn
- type IBlAmygLayer
- type ICElAmygLayer
- type IMSNLayer
- type IMSNPrjn
- type IModLayer
- type INetwork
- type ISetScalePrjn
- type IUS
- type Inputs
- type LHBRMTgInternalState
- type LHbRMTgGains
- type LHbRMTgLayer
- type MSNLayer
- func (ly *MSNLayer) AlphaCycInit(updtActAvg bool)
- func (ly *MSNLayer) AsMSNLayer() *MSNLayer
- func (ly *MSNLayer) AsMod() *ModLayer
- func (ly *MSNLayer) Build() error
- func (ly *MSNLayer) ClearMSNTrace()
- func (ly *MSNLayer) Defaults()
- func (ly *MSNLayer) GetDA() float32
- func (ly *MSNLayer) GetMonitorVal(data []string) float64
- func (ly *MSNLayer) InhibFmGeAct(ltime *leabra.Time)
- func (ly *MSNLayer) InitActs()
- func (ly *MSNLayer) ModsFmInc(_ *leabra.Time)
- func (ly *MSNLayer) PoolDelayedInhib(pl *leabra.Pool)
- func (ly *MSNLayer) QuarterInitPrvs(ltime *leabra.Time)
- func (ly *MSNLayer) RecvPrjnVals(vals *[]float32, varNm string, sendLay emer.Layer, sendIdx1D int, ...) error
- func (ly *MSNLayer) SendPrjnVals(vals *[]float32, varNm string, recvLay emer.Layer, recvIdx1D int, ...) error
- func (ly *MSNLayer) SetDA(da float32)
- type MSNParams
- type MSNPrjn
- func (pj *MSNPrjn) AsMSNPrjn() *MSNPrjn
- func (pj *MSNPrjn) Build() error
- func (pj *MSNPrjn) ClearTrace()
- func (pj *MSNPrjn) DWt()
- func (pj *MSNPrjn) Defaults()
- func (pj *MSNPrjn) InitWts()
- func (pj *MSNPrjn) SynVal(varNm string, sidx, ridx int) float32
- func (pj *MSNPrjn) SynVal1D(varIdx int, synIdx int) float32
- func (pj *MSNPrjn) SynVarIdx(varNm string) (int, error)
- type MSNTraceParams
- type ModLayer
- func (ly *ModLayer) ActFmG(_ *leabra.Time)
- func (ly *ModLayer) AddModReceiver(rcvr ModReceiver, scale float32)
- func (ly *ModLayer) AsLeabra() *leabra.Layer
- func (ly *ModLayer) AsMod() *ModLayer
- func (ly *ModLayer) AvgMaxMod(_ *leabra.Time)
- func (ly *ModLayer) Build() error
- func (ly *ModLayer) ClearModActs()
- func (ly *ModLayer) ClearModLevels()
- func (ly *ModLayer) DALrnFmDA(da float32) float32
- func (ly *ModLayer) Defaults()
- func (ly *ModLayer) GScaleFmAvgAct()
- func (ly *ModLayer) GetDA() float32
- func (ly *ModLayer) GetMonitorVal(data []string) float64
- func (ly *ModLayer) Init()
- func (ly *ModLayer) InitActs()
- func (ly *ModLayer) ModSendValue(ni int32) float32
- func (ly *ModLayer) ModUnitVals(vals *[]float32, varNm string) error
- func (ly *ModLayer) ModsFmInc(_ *leabra.Time)
- func (ly *ModLayer) ReceiveMods(sender ModSender, scale float32)
- func (ly *ModLayer) SendMods(_ *leabra.Time)
- func (ly *ModLayer) SetDA(da float32)
- func (ly *ModLayer) UnitVal1D(varIdx int, idx int) float32
- func (ly *ModLayer) UnitValByIdx(vidx ModNeuronVar, idx int) float32
- func (ly *ModLayer) UnitVals(vals *[]float32, varNm string) error
- func (ly *ModLayer) UnitValsTensor(tsr etensor.Tensor, varNm string) error
- func (ly *ModLayer) UnitVarIdx(varNm string) (int, error)
- func (ly *ModLayer) UnitVarNames() []string
- func (ly *ModLayer) UpdateParams()
- type ModNeuron
- type ModNeuronVar
- type ModParams
- type ModPool
- type ModRcvrParams
- type ModReceiver
- type ModSender
- type Modulators
- type NegUS
- type Network
- func (nt *Network) AddBlAmygLayer(name string, nY, nX, nNeurY, nNeurX int, val Valence, dar DaRType, ...) *BlAmygLayer
- func (nt *Network) AddCElAmygLayer(name string, nY, nX, nNeurY, nNeurX int, acqExt AcqExt, val Valence, ...) *CElAmygLayer
- func (nt *Network) AddMSNLayer(name string, nY, nX, nNeurY, nNeurX int, cpmt StriatalCompartment, da DaRType) *MSNLayer
- func (nt *Network) AddVTALayer(name string, val Valence) *VTALayer
- func (nt *Network) AsLeabra() *leabra.Network
- func (nt *Network) AvgMaxMod(ltime *leabra.Time)
- func (nt *Network) ClearMSNTraces(_ *leabra.Time)
- func (nt *Network) ClearModActs(_ *leabra.Time)
- func (nt *Network) ConnectLayersActMod(sender ModSender, rcvr ModReceiver, scale float32)
- func (nt *Network) Cycle(ltime *leabra.Time)
- func (nt *Network) CycleImpl(ltime *leabra.Time)
- func (nt *Network) InitActs()
- func (nt *Network) QuarterInitPrvs(ltime *leabra.Time)
- func (nt *Network) RecvModInc(ltime *leabra.Time)
- func (nt *Network) SendMods(ltime *leabra.Time)
- func (nt *Network) SynVarNames() []string
- func (nt *Network) SynVarProps() map[string]string
- func (nt *Network) UnitVarNames() []string
- type PPTgLayer
- func (ly *PPTgLayer) ActFmG(_ *leabra.Time)
- func (ly *PPTgLayer) Build() error
- func (ly *PPTgLayer) Defaults()
- func (ly *PPTgLayer) GetDA() float32
- func (ly *PPTgLayer) GetMonitorVal(data []string) float64
- func (ly *PPTgLayer) InitActs()
- func (ly *PPTgLayer) QuarterFinal(ltime *leabra.Time)
- func (ly *PPTgLayer) SetDA(da float32)
- type PVLayer
- type PackedUSTimeState
- func (ps PackedUSTimeState) Empty() bool
- func (pus PackedUSTimeState) FromString(s string) PackedUSTimeState
- func (ps PackedUSTimeState) Shape() []int
- func (ps PackedUSTimeState) Stim() Stim
- func (ps PackedUSTimeState) String() string
- func (ps PackedUSTimeState) Tensor() etensor.Tensor
- func (ps PackedUSTimeState) TensorScaled(scale float32) etensor.Tensor
- func (ps PackedUSTimeState) US() US
- func (ps PackedUSTimeState) USTimeIn() Tick
- func (ps PackedUSTimeState) Unpack() USTimeState
- func (ps PackedUSTimeState) Valence() Valence
- type PosUS
- type Stim
- type StriatalCompartment
- type Tick
- type TraceSyn
- type US
- type USTimeState
- func (usts USTimeState) Coords() []int
- func (usts USTimeState) CoordsString() string
- func (usts USTimeState) Empty() bool
- func (usts USTimeState) EnumVal() int
- func (usts USTimeState) OneHot(scale float32) etensor.Tensor
- func (usts USTimeState) Pack() PackedUSTimeState
- func (usts USTimeState) String() string
- func (usts USTimeState) Tensor() etensor.Tensor
- func (usts USTimeState) TensorScaleAndAdd(scale float32, other USTimeState) etensor.Tensor
- func (usts USTimeState) TensorScaled(scale float32) etensor.Tensor
- func (usts USTimeState) TsrOffset() []int
- type VTADAGains
- type VTALayer
- func (ly *VTALayer) ActFmG(ltime *leabra.Time)
- func (ly *VTALayer) Build() error
- func (ly *VTALayer) CyclePost(_ *leabra.Time)
- func (ly *VTALayer) Defaults()
- func (ly *VTALayer) GetMonitorVal(data []string) float64
- func (ly *VTALayer) VTAAct(ltime *leabra.Time)
- func (ly *VTALayer) VTAActN(_ *leabra.Time)
- func (ly *VTALayer) VTAActP(_ *leabra.Time)
- type VTAState
- type Valence
Constants ¶
const NoUSTimeIn = 320
Variables ¶
var ( TraceVars = []string{"NTr", "Tr"} SynapseVarProps = map[string]string{ "NTr": `auto-scale:"+"`, "Tr": `auto-scale:"+"`, } TraceVarsMap map[string]int SynapseVarsAll []string )
var ( // ModNeuronVars are the modulator neurons plus some custom variables that sub-types use for their // algo-specific cases -- need a consistent set of overall network-level vars for display / generic // interface. ModNeuronVars = []string{ DA.String(), ACh.String(), SE.String(), ModAct.String(), ModLevel.String(), ModNet.String(), ModLrn.String(), PVAct.String(), } ModNeuronVarsMap map[string]int ModNeuronVarsAll []string )
var ContextInShape = []int{20, 3}
Context
var CtxMap = map[string]Context{ CtxA.String(): CtxA, CtxA_B.String(): CtxA_B, CtxA_C.String(): CtxA_C, CtxB.String(): CtxB, CtxB_B.String(): CtxB_B, CtxB_C.String(): CtxB_C, CtxC.String(): CtxC, CtxC_B.String(): CtxC_B, CtxC_C.String(): CtxC_C, CtxD.String(): CtxD, CtxD_B.String(): CtxD_B, CtxD_C.String(): CtxD_C, CtxE.String(): CtxE, CtxE_B.String(): CtxE_B, CtxE_C.String(): CtxE_C, CtxF.String(): CtxF, CtxF_B.String(): CtxF_B, CtxF_C.String(): CtxF_C, CtxU.String(): CtxU, CtxU_B.String(): CtxU_B, CtxU_C.String(): CtxU_C, CtxV.String(): CtxV, CtxV_B.String(): CtxV_B, CtxV_C.String(): CtxV_C, CtxW.String(): CtxW, CtxW_B.String(): CtxW_B, CtxW_C.String(): CtxW_C, CtxX.String(): CtxX, CtxX_B.String(): CtxX_B, CtxX_C.String(): CtxX_C, CtxY.String(): CtxY, CtxY_B.String(): CtxY_B, CtxY_C.String(): CtxY_C, CtxZ.String(): CtxZ, CtxZ_B.String(): CtxZ_B, CtxZ_C.String(): CtxZ_C, CtxAX.String(): CtxAX, CtxAX_B.String(): CtxAX_B, CtxAX_C.String(): CtxAX_C, CtxAB.String(): CtxAB, CtxAB_B.String(): CtxAB_B, CtxAB_C.String(): CtxAB_C, CtxBY.String(): CtxBY, CtxBY_B.String(): CtxBY_B, CtxBY_C.String(): CtxBY_C, CtxCD.String(): CtxCD, CtxCD_B.String(): CtxCD_B, CtxCD_C.String(): CtxCD_C, CtxCX.String(): CtxCX, CtxCX_B.String(): CtxCX_B, CtxCX_C.String(): CtxCX_C, CtxCY.String(): CtxCY, CtxCY_B.String(): CtxCY_B, CtxCY_C.String(): CtxCY_C, CtxCZ.String(): CtxCZ, CtxCZ_B.String(): CtxCZ_B, CtxCZ_C.String(): CtxCZ_C, CtxDU.String(): CtxDU, }
var CtxRe, _ = regexp.Compile("([ABCDEFUVWXYZ])([ABCDEFUVWXYZ]?)_?([ABCDEFUVWXYZ]?)")
var StimRe, _ = regexp.Compile("([ABCDEFUVWXYZ])([ABCDEFUVWXYZ]?)_(Rf|NR)")
var KiT_AcqExt = kit.Enums.AddEnum(NAcqExt, kit.NotBitFlag, nil)
var KiT_BlAmygLayer = kit.Types.AddType(&BlAmygLayer{}, nil)
var KiT_CElAmygLayer = kit.Types.AddType(&CElAmygLayer{}, nil)
var KiT_Context = kit.Enums.AddEnum(NContexts+1, kit.NotBitFlag, nil)
var KiT_DALrnRule = kit.Enums.AddEnum(DALrnRuleN, kit.NotBitFlag, nil)
var KiT_DaRType = kit.Enums.AddEnum(DaRTypeN, kit.NotBitFlag, nil)
var KiT_LHbRMTgLayer = kit.Types.AddType(&LHbRMTgLayer{}, leabra.LayerProps)
var KiT_MSNLayer = kit.Types.AddType(&MSNLayer{}, leabra.LayerProps)
var KiT_ModLayer = kit.Types.AddType(&ModLayer{}, nil)
var KiT_ModNeuron = kit.Types.AddType(&ModNeuron{}, nil)
var KiT_ModNeuronVar = kit.Enums.AddEnum(ModNeuronVarsN, kit.NotBitFlag, nil)
var KiT_ModParams = kit.Types.AddType(&ModParams{}, nil)
var KiT_Modulators = kit.Types.AddType(&Modulators{}, nil)
var KiT_Network = kit.Types.AddType(&Network{}, NetworkProps)
var KiT_PPTgLayer = kit.Types.AddType(&PPTgLayer{}, leabra.LayerProps)
var KiT_Stim = kit.Enums.AddEnum(StimN+1, kit.NotBitFlag, nil)
var KiT_StriatalCompartment = kit.Enums.AddEnum(NSComp, kit.NotBitFlag, nil)
var KiT_Tick = kit.Enums.AddEnum(TickN+1, kit.NotBitFlag, nil)
var KiT_Valence = kit.Enums.AddEnum(ValenceN, kit.NotBitFlag, nil)
var NegSMap = map[string]NegUS{ Shock.String(): Shock, Nausea.String(): Nausea, Sharp.String(): Sharp, OtherNeg.String(): OtherNeg, }
var NetworkProps = leabra.NetworkProps
var PosSMap = map[string]PosUS{ Water.String(): Water, Food.String(): Food, Mate.String(): Mate, OtherPos.String(): OtherPos, }
var StimInShape = []int{12, 1}
Stim : conditioned stimuli
var StimMap = map[string]Stim{ StmA.String(): StmA, StmB.String(): StmB, StmC.String(): StmC, StmD.String(): StmD, StmE.String(): StmE, StmF.String(): StmF, StmU.String(): StmU, StmV.String(): StmV, StmW.String(): StmW, StmX.String(): StmX, StmY.String(): StmY, StmZ.String(): StmZ, StmNone.String(): StmNone, "": StmNone, }
var StmGrpMap = map[Stim]int{ StmNone: 0, StmA: 1, StmB: 2, StmC: 3, StmD: 1, StmE: 2, StmF: 3, StmX: 4, StmU: 4, StmY: 5, StmV: 5, StmZ: 6, StmW: 7, }
var TickMap = map[string]Tick{ T0.String(): T0, T1.String(): T1, T2.String(): T2, T3.String(): T3, T4.String(): T4, T5.String(): T5, T6.String(): T6, T7.String(): T7, T8.String(): T8, T9.String(): T9, TckNone.String(): TckNone, }
var USInShape = []int{4}
var USNone = US(PosUSNone)
var USTRe, _ = regexp.Compile("([ABCDEFUVWXYZ]?)_?(Pos|Neg)US([0123])_t([01234])")
var USTimeInShape = []int{16, 2, 4, 5}
USTimeIn
Functions ¶
func NeuronVarIdxByName ¶
NeuronVarIdxByName returns the index of the variable in the Neuron, or error
func SynapseVarByName ¶
VarByName returns variable by name, or error
Types ¶
type AmygModPrjn ¶
type AmygModPrjn struct { leabra.Prjn // only for Leabra algorithm: if initializing the weights, set the connection scaling parameter in addition to intializing the weights -- for specifically-supported specs, this will for example set a gaussian scaling parameter on top of random initial weights, instead of just setting the initial weights to a gaussian weighted value -- for other specs that do not support a custom init_wts function, this will set the scale values to what the random weights would otherwise be set to, and set the initial weight value to a constant (init_wt_val) SetScale bool `` /* 550-byte string literal not displayed */ // minimum scale value for SetScale projections SetScaleMin float32 `desc:"minimum scale value for SetScale projections"` // maximum scale value for SetScale projections SetScaleMax float32 `desc:"maximum scale value for SetScale projections"` // constant initial weight value for specs that do not support a custom init_wts function and have set_scale set: the scale values are set to what the random weights would otherwise be set to, and the initial weight value is set to this constant: the net actual weight value is scale * init_wt_val.. InitWtVal float32 `` /* 303-byte string literal not displayed */ // gain multiplier on abs(DA) learning rate multiplier DALRGain float32 `desc:"gain multiplier on abs(DA) learning rate multiplier"` // constant baseline amount of learning prior to abs(DA) factor -- should be near zero otherwise offsets in activation will drive learning in the absence of DA significance DALRBase float32 `` /* 176-byte string literal not displayed */ // minimum threshold for phasic abs(da) signals to count as non-zero; useful to screen out spurious da signals due to tiny VSPatch-to-LHb signals on t2 & t4 timesteps that can accumulate over many trials - 0.02 seems to work okay DALrnThr float32 `` /* 234-byte string literal not displayed */ // minimum threshold for delta activation to count as non-zero; useful to screen out spurious learning due to unintended delta activity - 0.02 seems to work okay for both acquisition and extinction guys ActDeltaThr float32 `` /* 207-byte string literal not displayed */ // if true, recv unit deep_lrn value modulates learning ActLrnMod bool `desc:"if true, recv unit deep_lrn value modulates learning"` // only ru->deep_lrn values > this get to learn - 0.05f seems to work okay ActLrnThr float32 `desc:"only ru->deep_lrn values > this get to learn - 0.05f seems to work okay"` // parameters for dopaminergic modulation DaMod DaModParams `desc:"parameters for dopaminergic modulation"` }
AmygModPrjn holds parameters and state variables for modulatory projections to amygdala layers
func (*AmygModPrjn) AsAmygModPrjn ¶
func (pj *AmygModPrjn) AsAmygModPrjn() *AmygModPrjn
AsAmygModPrjn returns a pointer to the modulatory variables for an amygdala projection
func (*AmygModPrjn) DWt ¶
func (pj *AmygModPrjn) DWt()
DWt computes DA-modulated weight changes for amygdala layers
func (*AmygModPrjn) Defaults ¶
func (pj *AmygModPrjn) Defaults()
func (*AmygModPrjn) GaussScale ¶
func (pj *AmygModPrjn) GaussScale(_, _ int, _, _ *etensor.Shape) float32
GaussScale returns gaussian weight value for given unit indexes in given send and recv layers according to Gaussian Sigma and MaxWt.
func (*AmygModPrjn) InitWts ¶
func (pj *AmygModPrjn) InitWts()
InitWts sets initial weights, possibly including SetScale calculations
type AvgMaxModLayer ¶
type BlAmygLayer ¶
type BlAmygLayer struct { // modulation state ModLayer `desc:"modulation state"` // positive or negative valence Valence Valence `desc:"positive or negative valence"` // inter-layer inhibition parameters and state ILI interinhib.InterInhib `desc:"inter-layer inhibition parameters and state"` }
BlAmygLayer contains values specific to BLA layers, including Interlayer Inhibition (ILI)
func (*BlAmygLayer) AsBlAmygLayer ¶
func (ly *BlAmygLayer) AsBlAmygLayer() *BlAmygLayer
AsBlAmygLayer returns a pointer to the layer specifically as a BLA layer.
func (*BlAmygLayer) Build ¶
func (ly *BlAmygLayer) Build() error
func (*BlAmygLayer) Defaults ¶
func (ly *BlAmygLayer) Defaults()
func (*BlAmygLayer) GetMonitorVal ¶
func (ly *BlAmygLayer) GetMonitorVal(data []string) float64
GetMonitorVal retrieves a value for a trace of some quantity, possibly more than just a variable
func (*BlAmygLayer) InhibFmGeAct ¶
func (ly *BlAmygLayer) InhibFmGeAct(ltime *leabra.Time)
InhibiFmGeAct computes inhibition Gi from Ge and Act averages within relevant Pools
type CElAmygLayer ¶
type CElAmygLayer struct { ModLayer // basic parameters determining what type CEl layer this is CElTyp CElAmygLayerType `desc:"basic parameters determining what type CEl layer this is"` // use deep_mod_net for value from acquisition / go units, instead of inhibition current (otherwise use gi_syn) -- allows simpler parameter setting without titrating inhibition and this learning modulation signal AcqDeepMod bool `` /* 216-byte string literal not displayed */ }
func (*CElAmygLayer) AsCElAmygLayer ¶
func (ly *CElAmygLayer) AsCElAmygLayer() *CElAmygLayer
func (*CElAmygLayer) Build ¶
func (ly *CElAmygLayer) Build() error
func (*CElAmygLayer) Defaults ¶
func (ly *CElAmygLayer) Defaults()
type CElAmygLayerType ¶
type Context ¶
type Context int
const ( CtxA Context = iota // A CtxA_B // A_B CtxA_C // A_C CtxB // B CtxB_B // B_B CtxB_C // B_C CtxC // C CtxC_B // C_B CtxC_C // C_C CtxD // D CtxD_B // D_B CtxD_C // D_C CtxE // E CtxE_B // E_B CtxE_C // E_C CtxF // F CtxF_B // F_B CtxF_C // F_C CtxU // U CtxU_B // U_B CtxU_C // U_C CtxV // V CtxV_B // V_B CtxV_C // V_C CtxW // W CtxW_B // W_B CtxW_C // W_C CtxX // X CtxX_B // X_B CtxX_C // X_C CtxY // Y CtxY_B // Y_B CtxY_C // Y_C CtxZ // Z CtxZ_B // Z_B CtxZ_C // Z_C CtxAX // AX CtxAX_B // AX_B CtxAX_C // AX_C CtxAB // AB CtxAB_B // AB_B CtxAB_C // AB_C CtxBY // BY CtxBY_B // BY_B CtxBY_C // BY_C CtxCD // CD CtxCD_B // CD_B CtxCD_C // CD_C CtxCX // CX CtxCX_B // CX_B CtxCX_C // CX_C CtxCY // CY CtxCY_B // CY_B CtxCY_C // CY_C CtxCZ // CZ CtxCZ_B // CZ_B CtxCZ_C // CZ_C CtxDU // DU CtxNone // NoContext NContexts = CtxNone )
func (Context) FromString ¶
type DaModParams ¶ added in v1.1.12
type DaModParams struct { // whether to use dopamine modulation On bool `desc:"whether to use dopamine modulation"` // dopamine receptor type, D1 or D2 RecepType DaRType `inactive:"+" desc:"dopamine receptor type, D1 or D2"` // multiplicative gain factor applied to positive dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! BurstGain float32 `` /* 173-byte string literal not displayed */ // multiplicative gain factor applied to negative dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! should be small for acq, but roughly equal to burst_da_gain for ext DipGain float32 `` /* 241-byte string literal not displayed */ }
DaModParams specifies parameters shared by all layers that receive dopaminergic modulatory input.
type DaRType ¶
type DaRType int
Dopamine receptor type, for D1R and D2R dopamine receptors
const ( // D1R: primarily expresses Dopamine D1 Receptors -- dopamine is excitatory and bursts of dopamine lead to increases in synaptic weight, while dips lead to decreases -- direct pathway in dorsal striatum D1R DaRType = iota // D2R: primarily expresses Dopamine D2 Receptors -- dopamine is inhibitory and bursts of dopamine lead to decreases in synaptic weight, while dips lead to increases -- indirect pathway in dorsal striatum D2R DaRTypeN )
type DelInhState ¶
type DelInhState struct { // netin from previous quarter, used for delayed inhibition GePrvQ float32 `desc:"netin from previous quarter, used for delayed inhibition"` // netin from previous "trial" (alpha cycle), used for delayed inhibition GePrvTrl float32 `desc:"netin from previous \"trial\" (alpha cycle), used for delayed inhibition"` }
DelInhState contains extra variables for MSNLayer neurons -- stored separately
type DelayedInhibParams ¶
type DelayedInhibParams struct { // add in a portion of inhibition from previous time period Active bool `desc:"add in a portion of inhibition from previous time period"` // proportion of per-unit net input on previous gamma-frequency quarter to add in as inhibition PrvQ float32 `desc:"proportion of per-unit net input on previous gamma-frequency quarter to add in as inhibition"` // proportion of per-unit net input on previous trial to add in as inhibition PrvTrl float32 `desc:"proportion of per-unit net input on previous trial to add in as inhibition"` }
Delayed inhibition for matrix compartment layers
type IAmygPrjn ¶
type IAmygPrjn interface {
AsAmygModPrjn() *AmygModPrjn // recast the projection as a moddulatory projection
}
IAmygPrjn has one method, AsAmygModPrjn, which recasts the projection as a moddulatory projection
type IBlAmygLayer ¶
type IBlAmygLayer interface {
AsBlAmygLayer() *BlAmygLayer
}
IBlAmygLayer has one method, AsBlAmygLayer, that returns a pointer to the layer specifically as a BLA layer.
type ICElAmygLayer ¶
type ICElAmygLayer interface {
AsCElAmygLayer() *CElAmygLayer
}
type ISetScalePrjn ¶
type ISetScalePrjn interface {
InitWts()
}
ISetScalePrjn initializes weights, including special scale calculations
type Inputs ¶
type LHBRMTgInternalState ¶ added in v1.1.12
type LHBRMTgInternalState struct { VSPatchPosD1 float32 VSPatchPosD2 float32 VSPatchNegD1 float32 VSPatchNegD2 float32 VSMatrixPosD1 float32 VSMatrixPosD2 float32 VSMatrixNegD1 float32 VSMatrixNegD2 float32 PosPV float32 NegPV float32 VSPatchPosNet float32 VSPatchNegNet float32 VSMatrixPosNet float32 VSMatrixNegNet float32 NetPos float32 NetNeg float32 }
type LHbRMTgGains ¶
type LHbRMTgGains struct { // final overall gain on everything All float32 `desc:"final overall gain on everything"` // patch D1 APPETITIVE pathway - versus pos PV outcomes VSPatchPosD1 float32 `desc:"patch D1 APPETITIVE pathway - versus pos PV outcomes"` // patch D2 APPETITIVE pathway versus vspatch_pos_D1 VSPatchPosD2 float32 `desc:"patch D2 APPETITIVE pathway versus vspatch_pos_D1"` // proportion of positive reward prediction error (RPE) to use if RPE results from a predicted omission of positive VSPatchPosDisinhib float32 `desc:"proportion of positive reward prediction error (RPE) to use if RPE results from a predicted omission of positive"` // gain on VS matrix D1 APPETITIVE guys VSMatrixPosD1 float32 `desc:"gain on VS matrix D1 APPETITIVE guys"` // VS matrix D2 APPETITIVE VSMatrixPosD2 float32 `desc:"VS matrix D2 APPETITIVE"` // VS patch D1 pathway versus neg PV outcomes VSPatchNegD1 float32 `desc:"VS patch D1 pathway versus neg PV outcomes"` // VS patch D2 pathway versus vspatch_neg_D1 VSPatchNegD2 float32 `desc:"VS patch D2 pathway versus vspatch_neg_D1"` // VS matrix D1 AVERSIVE VSMatrixNegD1 float32 `desc:"VS matrix D1 AVERSIVE"` // VS matrix D2 AVERSIVE VSMatrixNegD2 float32 `desc:"VS matrix D2 AVERSIVE"` }
Gain constants for LHbRMTg inputs
type LHbRMTgLayer ¶
type LHbRMTgLayer struct { leabra.Layer RcvFrom emer.LayNames // [view: inline] Gains LHbRMTgGains `view:"inline"` // reduction in effective PVNeg net value (when positive) so that negative outcomes can never be completely predicted away -- still allows for positive da for less-bad outcomes PVNegDiscount float32 `` /* 180-byte string literal not displayed */ InternalState LHBRMTgInternalState // for debugging }
func AddLHbRMTgLayer ¶
func AddLHbRMTgLayer(nt *Network, name string) *LHbRMTgLayer
func (*LHbRMTgLayer) ActFmG ¶
func (ly *LHbRMTgLayer) ActFmG(ltime *leabra.Time)
func (*LHbRMTgLayer) Build ¶
func (ly *LHbRMTgLayer) Build() error
func (*LHbRMTgLayer) Defaults ¶
func (ly *LHbRMTgLayer) Defaults()
func (*LHbRMTgLayer) GetMonitorVal ¶
func (ly *LHbRMTgLayer) GetMonitorVal(data []string) float64
GetMonitorVal retrieves a value for a trace of some quantity, possibly more than just a variable
type MSNLayer ¶
type MSNLayer struct { ModLayer // patch or matrix Compartment StriatalCompartment `inactive:"+" desc:"patch or matrix"` // slice of delayed inhibition state for this layer. DIState []DelInhState `desc:"slice of delayed inhibition state for this layer."` // [view: no-inline add-fields] DIParams DelayedInhibParams `view:"no-inline add-fields"` }
func AddMSNLayer ¶
func AddMSNLayer(nt *Network, name string, nY, nX, nNeurY, nNeurX int, cpmt StriatalCompartment, da DaRType) *MSNLayer
AddMatrixLayer adds a MSNLayer of given size, with given name. nY = number of pools in Y dimension, nX is pools in X dimension, and each pool has nNeurY, nNeurX neurons. da gives the DaReceptor type (D1R = Go, D2R = NoGo)
func (*MSNLayer) AlphaCycInit ¶
func (*MSNLayer) AsMSNLayer ¶
func (*MSNLayer) Build ¶
Build constructs the layer state, including calling Build on the projections you MUST have properly configured the Inhib.Pool.On setting by this point to properly allocate Pools for the unit groups if necessary.
func (*MSNLayer) ClearMSNTrace ¶
func (ly *MSNLayer) ClearMSNTrace()
func (*MSNLayer) GetMonitorVal ¶
func (*MSNLayer) InhibFmGeAct ¶
InhibFmGeAct computes inhibition Gi from Ge and Act averages within relevant Pools this is here for matrix delyed inhibition, not needed otherwise
func (*MSNLayer) PoolDelayedInhib ¶
func (*MSNLayer) QuarterInitPrvs ¶
func (*MSNLayer) RecvPrjnVals ¶
func (*MSNLayer) SendPrjnVals ¶
type MSNParams ¶
type MSNParams struct { // patch or matrix Compartment StriatalCompartment `inactive:"+" desc:"patch or matrix"` }
Parameters for Dorsal Striatum Medium Spiny Neuron computation
type MSNPrjn ¶
type MSNPrjn struct { leabra.Prjn LearningRule DALrnRule // [view: inline] special parameters for striatum trace learning Trace MSNTraceParams `view:"inline" desc:"special parameters for striatum trace learning"` // trace synaptic state values, ordered by the sending layer units which owns them -- one-to-one with SConIdx array TrSyns []TraceSyn `desc:"trace synaptic state values, ordered by the sending layer units which owns them -- one-to-one with SConIdx array"` // sending layer activation variable name SLActVar string `desc:"sending layer activation variable name"` // receiving layer activation variable name RLActVar string `desc:"receiving layer activation variable name"` // [def: 0.7] [min: 0] for VS matrix TRACE_NO_THAL_VS and DA_HEBB_VS learning rules, this is the maximum value that the deep_mod_net modulatory inputs from the basal amygdala (up state enabling signal) can contribute to learning MaxVSActMod float32 `` /* 230-byte string literal not displayed */ // parameters for dopaminergic modulation DaMod DaModParams `desc:"parameters for dopaminergic modulation"` }
MSNPrjn does dopamine-modulated, for striatum-like layers
func (*MSNPrjn) ClearTrace ¶
func (pj *MSNPrjn) ClearTrace()
func (*MSNPrjn) DWt ¶
func (pj *MSNPrjn) DWt()
DWt computes the weight change (learning) -- on sending projections.
type MSNTraceParams ¶
type MSNTraceParams struct { // [def: true] use the sigmoid derivative factor 2 * act * (1-act) in modulating learning -- otherwise just multiply by msn activation directly -- this is generally beneficial for learning to prevent weights from continuing to increase when activations are already strong (and vice-versa for decreases) Deriv bool `` /* 305-byte string literal not displayed */ // [def: 1] [min: 0] multiplier on trace activation for decaying prior traces -- new trace magnitude drives decay of prior trace -- if gating activation is low, then new trace can be low and decay is slow, so increasing this factor causes learning to be more targeted on recent gating changes Decay float32 `` /* 294-byte string literal not displayed */ // learning rate scale factor, if GateLRScale float32 `desc:"learning rate scale factor, if "` }
Params for for trace-based learning
func (*MSNTraceParams) Defaults ¶
func (tp *MSNTraceParams) Defaults()
func (*MSNTraceParams) MSNActLrnFactor ¶
func (tp *MSNTraceParams) MSNActLrnFactor(act float32) float32
LrnFactor returns multiplicative factor for level of msn activation. If Deriv is 2 * act * (1-act) -- the factor of 2 compensates for otherwise reduction in learning from these factors. Otherwise is just act.
type ModLayer ¶
type ModLayer struct { leabra.Layer // neuron-level modulation state ModNeurs []ModNeuron `desc:"neuron-level modulation state"` // pools for maintaining aggregate values ModPools []ModPool `desc:"pools for maintaining aggregate values"` // layer names and scale values for mods sent from this layer ModReceivers []ModRcvrParams `desc:"layer names and scale values for mods sent from this layer"` // parameters shared by all modulator receiver layers ModParams `desc:"parameters shared by all modulator receiver layers"` // parameters for dopaminergic modulation DaMod DaModParams `desc:"parameters for dopaminergic modulation"` // layer-level neuromodulator levels Modulators `desc:"layer-level neuromodulator levels"` }
ModLayer is a layer that RECEIVES modulatory input
func (*ModLayer) AddModReceiver ¶
func (ly *ModLayer) AddModReceiver(rcvr ModReceiver, scale float32)
AddModReceiver adds a receiving layer to the list of modulatory target layers for a sending layer.
func (*ModLayer) AvgMaxMod ¶
AvgMaxMod runs the standard activation statistics calculation as used for other pools on a layer's ModPools.
func (*ModLayer) ClearModActs ¶
func (ly *ModLayer) ClearModActs()
ClearModActs clears modulatory activation values. This is critical for getting clean results from one trial to the next.
func (*ModLayer) ClearModLevels ¶
func (ly *ModLayer) ClearModLevels()
ClearModLevels resets modulation state variables to their default values for an entire layer.
func (*ModLayer) DALrnFmDA ¶
DALrnFmDA returns effective learning dopamine value from given raw DA value applying Burst and Dip Gain factors, and then reversing sign for D2R. GetDa in cemer
func (*ModLayer) GScaleFmAvgAct ¶
func (ly *ModLayer) GScaleFmAvgAct()
GScaleFmAvgAct sets the value of GScale on incoming projections, based on sending layer subpool activations.
func (*ModLayer) GetMonitorVal ¶
GetMonitorVal retrieves a value for a trace of some quantity, possibly more than just a variable
func (*ModLayer) InitActs ¶
func (ly *ModLayer) InitActs()
InitActs sets modulation state variables to their default values for a layer, including its pools.
func (*ModLayer) ModSendValue ¶
ModSendValue returns the value of ModSent for one modulatory pool, specified by ni.
func (*ModLayer) ModsFmInc ¶
ModsFmInc sets ModLrn and ModLevel based on individual neuron activation and incoming ModNet values.
If ModNet is below threshold, ModLrn is set to 0, and ModLevel is set to either 0 or 1 depending on the value of the ModNetThreshold parameter.
If ModNet is above threshold, ModLrn for each neuron is set to the ratio of its ModNet input to its subpool activation value, with special cases for extreme values.
func (*ModLayer) ReceiveMods ¶
ReceiveMods computes ModNet, based on the value from the sender, times a scale value.
func (*ModLayer) SendMods ¶
SendMods calculates the level of modulation to send to receivers, based on subpool activations, and calls ReceiveMods for the receivers to process sent values.
func (*ModLayer) UnitVal1D ¶
UnitVal1D returns value of given variable index on given unit, using 1-dimensional index. returns NaN on invalid index. This is the core unit var access method used by other methods, so it is the only one that needs to be updated for derived layer types.
func (*ModLayer) UnitValByIdx ¶
func (ly *ModLayer) UnitValByIdx(vidx ModNeuronVar, idx int) float32
UnitValByIdx returns value of given variable by variable index and flat neuron index (from layer or neuron-specific one).
func (*ModLayer) UnitVals ¶
// UnitVals fills in values of given variable name on unit, // for each unit in the layer, into given float32 slice (only resized if not big enough). // Returns error on invalid var name.
func (*ModLayer) UnitValsTensor ¶
// UnitValsTensor returns values of given variable name on unit // for each unit in the layer, as a float32 tensor in same shape as layer units.
func (*ModLayer) UnitVarIdx ¶
UnitVarIdx returns the index of given variable within the Neuron, according to UnitVarNames() list (using a map to lookup index), or -1 and error message if not found.
func (*ModLayer) UnitVarNames ¶
UnitVarNames returns a list of variable names available on the units in this layer Mod returns *layer level* vars
func (*ModLayer) UpdateParams ¶
func (ly *ModLayer) UpdateParams()
UpdateParams passes on an UpdateParams call to the layer's underlying Leabra layer.
type ModNeuron ¶
type ModNeuron struct { // neuron-level modulator activation Modulators `desc:"neuron-level modulator activation"` // activity level for modulation ModAct float32 `desc:"activity level for modulation"` // degree of full modulation to apply ModLevel float32 `desc:"degree of full modulation to apply"` // modulation input from sender ModNet float32 `desc:"modulation input from sender"` // multiplier for DA modulation of learning rate ModLrn float32 `desc:"multiplier for DA modulation of learning rate"` // direct activation from US PVAct float32 `desc:"direct activation from US"` }
ModNeuron encapsulates the variables used by all layers that receive modulatory input
func (*ModNeuron) InitActs ¶
func (mnr *ModNeuron) InitActs()
InitActs sets modulation state variables to their default values for one neuron.
func (*ModNeuron) VarByIndex ¶
VarByIndex returns variable using index (0 = first variable in NeuronVars list)
type ModNeuronVar ¶
type ModNeuronVar int
NeuronVars are indexes into extra neuron-level variables
const ( DA ModNeuronVar = iota ACh SE ModAct ModLevel ModNet ModLrn PVAct Cust1 ModNeuronVarsN )
func (ModNeuronVar) String ¶
func (i ModNeuronVar) String() string
type ModParams ¶
type ModParams struct { // [viewif: On] how much to multiply Da in the minus phase to add to Ge input -- use negative values for NoGo/indirect pathway/D2 type neurons Minus float32 `` /* 145-byte string literal not displayed */ // [viewif: On] how much to multiply Da in the plus phase to add to Ge input -- use negative values for NoGo/indirect pathway/D2 type neurons Plus float32 `` /* 144-byte string literal not displayed */ // [viewif: DaMod.On&&ModGain] for negative dopamine, how much to change the default gain value as a function of dopamine: gain = gain * (1 + da * NegNain) -- da is multiplied by minus or plus depending on phase NegGain float32 `` /* 214-byte string literal not displayed */ // [viewif: DaMod.On&&ModGain] for positive dopamine, how much to change the default gain value as a function of dopamine: gain = gain * (1 + da * PosGain) -- da is multiplied by minus or plus depending on phase PosGain float32 `` /* 214-byte string literal not displayed */ // for modulation coming from the BLA via deep_mod_net -- when this modulation signal is below zero, does it have the ability to zero out the patch activations? i.e., is the modulation required to enable patch firing? ActModZero bool `` /* 222-byte string literal not displayed */ // threshold on deep_mod_net before deep mod is applied -- if not receiving even this amount of overall input from deep_mod sender, then do not use the deep_mod_net to drive deep_mod and deep_lrn values -- only for SUPER units -- based on LAYER level maximum for base LeabraLayerSpec, PVLV classes are based on actual deep_mod_net for each unit ModNetThreshold float32 `` /* 348-byte string literal not displayed */ // threshold for including neuron activation in total to send (for ModNet) ModSendThreshold float32 `desc:"threshold for including neuron activation in total to send (for ModNet)"` // does this layer send modulation to other layers? IsModSender bool `desc:"does this layer send modulation to other layers?"` // does this layer receive modulation from other layers? IsModReceiver bool `desc:"does this layer receive modulation from other layers?"` // does this layer receive a direct PV input? IsPVReceiver bool `desc:"does this layer receive a direct PV input?"` }
ModParams contains values that control a receiving layer's response to modulatory inputs
type ModPool ¶
type ModPool struct { ModNetStats minmax.AvgMax32 // modulation level transmitted to receiver layers ModSent float32 `desc:"modulation level transmitted to receiver layers"` // threshold for sending modulation. values below this are not added to the pool-level total ModSendThreshold float32 `desc:"threshold for sending modulation. values below this are not added to the pool-level total"` }
ModPool is similar to a standard Pool structure, and uses the same code to compute running statistics.
type ModRcvrParams ¶
type ModRcvrParams struct { // name of receiving layer RcvName string `desc:"name of receiving layer"` // scale factor for modulation to this receiver Scale float32 `desc:"scale factor for modulation to this receiver"` }
ModRcvrParams specifies the name of a layer that receives modulatory input, and a scale factor--critical for inputs from large layers such as BLA.
type ModReceiver ¶
type ModReceiver interface { ReceiveMods(sender ModSender, scale float32) // copy incoming modulation values into the layer's own ModNet variable ModsFmInc(ltime *leabra.Time) // set modulation levels }
ModReceiver has one method to integrate incoming modulation, and another
type Modulators ¶
type Modulators struct { // current dopamine level for this layer DA float32 `desc:"current dopamine level for this layer"` // current acetylcholine level for this layer ACh float32 `desc:"current acetylcholine level for this layer"` // current serotonin level for this layer SE float32 `desc:"current serotonin level for this layer"` }
Modulators are modulatory neurotransmitters. Currently ACh and SE are only placeholders.
func (*Modulators) InitActs ¶
func (ml *Modulators) InitActs()
InitActs zeroes activation levels for a set of modulator variables.
type Network ¶
func (*Network) AddBlAmygLayer ¶
func (nt *Network) AddBlAmygLayer(name string, nY, nX, nNeurY, nNeurX int, val Valence, dar DaRType, lTyp emer.LayerType) *BlAmygLayer
AddBlAmygLayer adds a Basolateral Amygdala layer with specified 4D geometry, acquisition/extinction, valence, and DA receptor type
func (*Network) AddCElAmygLayer ¶
func (nt *Network) AddCElAmygLayer(name string, nY, nX, nNeurY, nNeurX int, acqExt AcqExt, val Valence, dar DaRType) *CElAmygLayer
AddCElAmygLayer adds a CentroLateral Amygdala layer with specified 4D geometry, acquisition/extinction, valence, and DA receptor type
Geometry is 4D.
nY = number of pools in Y dimension, nX is pools in X dimension, and each pool has nNeurY * nNeurX neurons. da parameter gives the DaReceptor type (D1R = Go, D2R = NoGo). acqExt (AcqExt) specifies whether this layer is involved with acquisition or extinction. val is positive (appetitive) or negative (aversive) Valence.
func (*Network) AddMSNLayer ¶
func (nt *Network) AddMSNLayer(name string, nY, nX, nNeurY, nNeurX int, cpmt StriatalCompartment, da DaRType) *MSNLayer
AddMatrixLayer adds a MSNLayer of given size, with given name.
Geometry is 4D.
nY = number of pools in Y dimension, nX is pools in X dimension, and each pool has nNeurY * nNeurX neurons.
cpmt specifies patch or matrix StriatalCompartment
da parameter gives the DaReceptor type (DaRType) (D1R = Go, D2R = NoGo)
func (*Network) AddVTALayer ¶
AddVTALayer adds a positive or negative Valence VTA layer
func (*Network) ClearMSNTraces ¶
func (*Network) ClearModActs ¶
func (*Network) ConnectLayersActMod ¶
func (nt *Network) ConnectLayersActMod(sender ModSender, rcvr ModReceiver, scale float32)
func (*Network) QuarterInitPrvs ¶
func (*Network) RecvModInc ¶
func (*Network) SynVarNames ¶
func (*Network) SynVarProps ¶
SynVarProps returns properties for variables
func (*Network) UnitVarNames ¶
UnitVarNames returns a list of variable names available on the units in this layer
type PPTgLayer ¶
type PPTgLayer struct { leabra.Layer Ge float32 GePrev float32 SendAct float32 DA float32 // gain on input activation DNetGain float32 `desc:"gain on input activation"` // activation threshold for passing through ActThreshold float32 `desc:"activation threshold for passing through"` // clamp activation directly, after applying gain ClampActivation bool `desc:"clamp activation directly, after applying gain"` }
The PPTg passes on a positively-rectified version of its input signal.
func AddPPTgLayer ¶
Add a Pedunculopontine Gyrus layer. Acts as a positive rectifier for its inputs.
func (*PPTgLayer) GetMonitorVal ¶
GetMonitorVal retrieves a value for a trace of some quantity, possibly more than just a variable
func (*PPTgLayer) QuarterFinal ¶
type PVLayer ¶
Primary Value input layer. Sends activation directly to its receivers, bypassing the standard mechanisms.
func AddPVLayer ¶
func (*PVLayer) AddPVReceiver ¶
func (*PVLayer) GetMonitorVal ¶
type PackedUSTimeState ¶
type PackedUSTimeState int64
const USTimeNone PackedUSTimeState = 0
func PUSTFromString ¶
func PUSTFromString(s string) PackedUSTimeState
func (PackedUSTimeState) Empty ¶
func (ps PackedUSTimeState) Empty() bool
func (PackedUSTimeState) FromString ¶
func (pus PackedUSTimeState) FromString(s string) PackedUSTimeState
func (PackedUSTimeState) Shape ¶
func (ps PackedUSTimeState) Shape() []int
func (PackedUSTimeState) Stim ¶
func (ps PackedUSTimeState) Stim() Stim
func (PackedUSTimeState) String ¶
func (ps PackedUSTimeState) String() string
func (PackedUSTimeState) Tensor ¶
func (ps PackedUSTimeState) Tensor() etensor.Tensor
func (PackedUSTimeState) TensorScaled ¶
func (ps PackedUSTimeState) TensorScaled(scale float32) etensor.Tensor
func (PackedUSTimeState) US ¶
func (ps PackedUSTimeState) US() US
func (PackedUSTimeState) USTimeIn ¶
func (ps PackedUSTimeState) USTimeIn() Tick
func (PackedUSTimeState) Unpack ¶
func (ps PackedUSTimeState) Unpack() USTimeState
func (PackedUSTimeState) Valence ¶
func (ps PackedUSTimeState) Valence() Valence
type PosUS ¶
type PosUS US
positive and negative subtypes of US
func (PosUS) FromString ¶
func (PosUS) PosUSEmpty ¶
type StriatalCompartment ¶
type StriatalCompartment int
const ( PATCH StriatalCompartment = iota MATRIX NSComp )
func (StriatalCompartment) String ¶
func (i StriatalCompartment) String() string
type TraceSyn ¶
type TraceSyn struct { // new trace -- drives updates to trace value -- su * (1-ru_msn) for gated, or su * ru_msn for not-gated (or for non-thalamic cases) NTr float32 `` /* 136-byte string literal not displayed */ // current ongoing trace of activations, which drive learning -- adds ntr and clears after learning on current values -- includes both thal gated (+ and other nongated, - inputs) Tr float32 `` /* 183-byte string literal not displayed */ }
TraceSyn holds extra synaptic state for trace projections
func (*TraceSyn) SetVarByIndex ¶
func (*TraceSyn) SetVarByName ¶
SetVarByName sets synapse variable to given value
func (*TraceSyn) VarByIndex ¶
VarByIndex returns variable using index (0 = first variable in SynapseVars list)
type US ¶
type US int
func (US) FromString ¶
func (US) TensorScaled ¶
func (pos PosUS) TensorScaled(scale float32) etensor.Tensor { return TensorScaled(pos, 1.0 / scale) }
func (neg NegUS) TensorScaled(scale float32) etensor.Tensor { return TensorScaled(neg, 1.0 / scale) }
type USTimeState ¶
type USTimeState struct { // CS value Stm Stim `desc:"CS value"` // a US value or absent (USNone) US US `desc:"a US value or absent (USNone)"` // PV d, POS, NEG, or absent (ValNone) Val Valence `desc:"PV d, POS, NEG, or absent (ValNone)"` // Within-trial timestep Tck Tick `desc:"Within-trial timestep"` }
func USTFromString ¶
func USTFromString(uss string) USTimeState
func (USTimeState) Coords ¶
func (usts USTimeState) Coords() []int
func (USTimeState) CoordsString ¶
func (usts USTimeState) CoordsString() string
func (USTimeState) Empty ¶
func (usts USTimeState) Empty() bool
func (USTimeState) EnumVal ¶
func (usts USTimeState) EnumVal() int
func (USTimeState) Pack ¶
func (usts USTimeState) Pack() PackedUSTimeState
func (USTimeState) String ¶
func (usts USTimeState) String() string
func (USTimeState) Tensor ¶
func (usts USTimeState) Tensor() etensor.Tensor
func (USTimeState) TensorScaleAndAdd ¶
func (usts USTimeState) TensorScaleAndAdd(scale float32, other USTimeState) etensor.Tensor
func (USTimeState) TensorScaled ¶
func (usts USTimeState) TensorScaled(scale float32) etensor.Tensor
func (USTimeState) TsrOffset ¶
func (usts USTimeState) TsrOffset() []int
type VTADAGains ¶
type VTADAGains struct { // overall multiplier for dopamine values DA float32 `desc:"overall multiplier for dopamine values"` // gain on bursts from PPTg PPTg float32 `desc:"gain on bursts from PPTg"` // gain on dips/bursts from LHbRMTg LHb float32 `desc:"gain on dips/bursts from LHbRMTg"` // gain on positive PV component of total phasic DA signal (net after subtracting VSPatchIndir (PVi) shunt signal) PV float32 `desc:"gain on positive PV component of total phasic DA signal (net after subtracting VSPatchIndir (PVi) shunt signal)"` // gain on VSPatch projection that shunts bursting in VTA (for VTAp = VSPatchPosD1, for VTAn = VSPatchNegD2) PVIBurstShunt float32 `desc:"gain on VSPatch projection that shunts bursting in VTA (for VTAp = VSPatchPosD1, for VTAn = VSPatchNegD2)"` // gain on VSPatch projection that opposes shunting of bursting in VTA (for VTAp = VSPatchPosD2, for VTAn = VSPatchNegD1) PVIAntiBurstShunt float32 `desc:"gain on VSPatch projection that opposes shunting of bursting in VTA (for VTAp = VSPatchPosD2, for VTAn = VSPatchNegD1)"` // gain on VSPatch projection that shunts dipping of VTA (currently only VTAp supported = VSPatchNegD2) -- optional and somewhat controversial PVIDipShunt float32 `` /* 146-byte string literal not displayed */ // gain on VSPatch projection that opposes the shunting of dipping in VTA (currently only VTAp supported = VSPatchNegD1) PVIAntiDipShunt float32 `desc:"gain on VSPatch projection that opposes the shunting of dipping in VTA (currently only VTAp supported = VSPatchNegD1)"` }
Gain constants for inputs to the VTA
func (*VTADAGains) Defaults ¶
func (dag *VTADAGains) Defaults()
type VTALayer ¶
type VTALayer struct { rl.ClampDaLayer SendVal float32 // VTA layer DA valence, positive or negative Valence Valence `desc:"VTA layer DA valence, positive or negative"` // set a tonic 'dopamine' (DA) level (offset to add to da values) TonicDA float32 `desc:"set a tonic 'dopamine' (DA) level (offset to add to da values)"` // [view: inline] gains for various VTA inputs DAGains VTADAGains `view:"inline" desc:"gains for various VTA inputs"` RecvFrom map[string]emer.Layer // input values--for debugging only InternalState VTAState `desc:"input values--for debugging only"` }
VTA internal state
func (*VTALayer) GetMonitorVal ¶
GetMonitorVal is for monitoring during run. Includes values beyond the scope of neuron fields.
type VTAState ¶ added in v1.1.12
type VTAState struct { PPTgDAp float32 LHbDA float32 PosPVAct float32 VSPosPVI float32 VSNegPVI float32 BurstLHbDA float32 DipLHbDA float32 TotBurstDA float32 TotDipDA float32 NetDipDA float32 NetDA float32 SendVal float32 }
monitoring and debugging only. Received values from all inputs