Skip to content

Commit

Permalink
add docs for bg, pfc, rl layers
Browse files Browse the repository at this point in the history
  • Loading branch information
rcoreilly committed Oct 19, 2024
1 parent 930965b commit 53ba0f6
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 7 deletions.
11 changes: 11 additions & 0 deletions leabra/pbwm_net.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,12 @@ func (nt *Network) AddDorsalBG(prefix string, nY, nMaint, nOut, nNeurY, nNeurX i
pt.AddClass("BgFixed")
pt = nt.ConnectLayers(gpe, gpi, one2one, GPiThalPath)
pt.AddClass("BgFixed")

mtxGo.Doc = "Matrisome (Matrix) striatum medium spiny neuron (MSN), which is the input layer of the basal ganglia (BG), with more D1 than D2 dopamine receptors, that drives the direct pathway to disinhibit BG outputs, favoring a 'Go' response"
mtxNoGo.Doc = "Matrisome (Matrix) striatum medium spiny neuron (MSN), which is the input layer of the basal ganglia (BG), with more D2 than D1 dopamine receptors, that drives the indirect pathway through the globus pallidus external segment (GPe) net inhibit BG outputs, favoring a 'NoGo' response"
gpe.Doc = "Globus pallidus external segment (GPe) of the BG that is tonically active and inhibited by the Matrix NoGo pathway, causing disinhibition of the GPi, and net inhibition of overall BG output responding."
gpi.Doc = "Globus pallidus internal segment (GPi) of the BG that is tonically active and inhibited by the Matrix Go pathway (and disinhibited by the GPe via NoGo), which then inhibits the thalamus (Thal), with the net effect of disinhibiting cortical areas on BG Go pathway activation. This layer summarizes both GPi and Thal in a net excitatory, activity-positive manner."
cin.Doc = "Cholinergic interneurons (CIN) that represent a positively rectified, non-prediction-discounted reward and overall sensory salience signal, that modulates overall BG activity and learning around salient events."
return
}

Expand Down Expand Up @@ -141,9 +147,13 @@ func (nt *Network) AddPFC(prefix string, nY, nMaint, nOut, nNeurY, nNeurX int, d
}
if nMaint > 0 {
pfcMnt, pfcMntD = nt.AddPFCLayer(prefix+"mnt", nY, nMaint, nNeurY, nNeurX, false, dynMaint)
pfcMnt.Doc = "Prefrontal Cortex (PFC) maintenance (mnt) superficial layer, which receives inputs from other brain areas and drives BG (basal ganglia) gated input into the robust maintenance deep layers"
pfcMntD.Doc = "Prefrontal Cortex (PFC) maintenance (mnt) deep layer, which has special intrinsic circuits and channels supporting robust active firing even in the absence of other inputs, and holds on to information relevant for behavioral responses, but does not directly drive those outputs"
}
if nOut > 0 {
pfcOut, pfcOutD = nt.AddPFCLayer(prefix+"out", nY, nOut, nNeurY, nNeurX, true, dynMaint)
pfcOut.Doc = "Prefrontal Cortex (PFC) output (out) superficial layer, which receives inputs from PFC maintenance and other brain areas and drives BG (basal ganglia) gated input into the output deep layers"
pfcOutD.Doc = "Prefrontal Cortex (PFC) output (out) deep layer, which drives behavioral output pathways, either as direct motor outputs, or top-down modulation of pathways that then drive outputs"
}

// todo: need a Rect pathway from MntD -> out if !dynMaint, or something else..
Expand All @@ -153,6 +163,7 @@ func (nt *Network) AddPFC(prefix string, nY, nMaint, nOut, nNeurY, nNeurX int, d
pt := nt.ConnectLayers(pfcMntD, pfcOut, paths.NewOneToOne(), ForwardPath)
pt.AddClass("PFCMntDToOut")
}

return
}

Expand Down
19 changes: 12 additions & 7 deletions leabra/rl.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,11 @@ func (nt *Network) AddRWLayers(prefix string, space float32) (rew, rp, da *Layer
da.RW.RewLay = rew.Name
rp.PlaceBehind(rew, space)
da.PlaceBehind(rp, space)

rew.Doc = "Reward input, activated by external rewards, e.g., the US = unconditioned stimulus"
rp.Doc = "Reward Prediction according to Rescorla-Wagner (RW) model, representing learned estimate of Rew layer activity on each trial, using linear activation function"
da.Doc = "Dopamine (DA)-like signal reflecting the difference Rew - RWPred, or reward prediction error (RPE), on each trial"

return
}

Expand Down Expand Up @@ -285,6 +290,7 @@ func (nt *Network) AddTDLayers(prefix string, space float32) (rew, rp, ri, td *L
td = nt.AddLayer2D(prefix+"TD", 1, 1, TDDaLayer)
ri.TD.PredLay = rp.Name
td.TD.IntegLay = ri.Name

rp.PlaceBehind(rew, space)
ri.PlaceBehind(rp, space)
td.PlaceBehind(ri, space)
Expand All @@ -295,12 +301,11 @@ func (nt *Network) AddTDLayers(prefix string, space float32) (rew, rp, ri, td *L
pt.WtInit.Mean = 1
pt.WtInit.Var = 0
pt.WtInit.Sym = false
// {Sel: ".TDToInteg", Desc: "rew to integ",
// Params: params.Params{
// "Path.Learn.Learn": "false",
// "Path.WtInit.Mean": "1",
// "Path.WtInit.Var": "0",
// "Path.WtInit.Sym": "false",
// }},

rew.Doc = "Reward input, activated by external rewards, e.g., the US = unconditioned stimulus"
rp.Doc = "Reward Prediction, representing estimated value V(t) in the minus phase, and in plus phase computes estimated V(t+1) based on learned weights"
ri.Doc = "Integration of Pred + Rew, representing estimated value V(t) in the minus phase, and estimated V(t+1) + r(t) in the plus phase"
td.Doc = "Temporal Difference (TD) computes a dopamine (DA)-like signal as difference between the Integ activations across plus - minus phases: [V(t+1) + r(t)] - V(t), where V are estimated cumulative discounted future reward values"

return
}

0 comments on commit 53ba0f6

Please sign in to comment.