Skip to content

Commit

Permalink
sir2 actually 2, using last v2 params
Browse files Browse the repository at this point in the history
  • Loading branch information
rcoreilly committed Oct 29, 2024
1 parent ba32fb2 commit adfd213
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 43 deletions.
10 changes: 5 additions & 5 deletions examples/sir2/enumgen.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

37 changes: 15 additions & 22 deletions examples/sir2/sir2.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,23 +80,16 @@ var ParamSets = params.Sets{
"Layer.Act.Clamp.Range.Min": "-1",
"Layer.Act.Clamp.Range.Max": "1",
}},
{Sel: ".PFCMntDToOut", Desc: "PFC Deep -> PFC fixed",
Params: params.Params{
"Path.Learn.Learn": "false",
"Path.WtInit.Mean": "0.8",
"Path.WtInit.Var": "0",
"Path.WtInit.Sym": "false",
}},
{Sel: ".PFCMntDToOut", Desc: "PFC MntD -> PFC Out fixed",
Params: params.Params{
"Path.Learn.Learn": "false",
"Path.WtInit.Mean": "0.8",
"Path.WtInit.Var": "0",
"Path.WtInit.Sym": "false",
}},
{Sel: ".FmPFCOutD", Desc: "If multiple stripes, PFC OutD needs to be strong b/c avg act says weak",
{Sel: ".FmPFCOutD", Desc: "PFC OutD needs to be strong b/c avg act says weak",
Params: params.Params{
"Path.WtScale.Abs": "1", // increase in proportion to number of stripes
"Path.WtScale.Abs": "4",
}},
{Sel: ".PFCFixed", Desc: "Input -> PFC",
Params: params.Params{
Expand All @@ -107,7 +100,7 @@ var ParamSets = params.Sets{
}},
{Sel: ".MatrixPath", Desc: "Matrix learning",
Params: params.Params{
"Path.Learn.Lrate": "0.04", // .04 > .1
"Path.Learn.Lrate": "0.04", // .04 > .1 > .02
"Path.WtInit.Var": "0.1",
"Path.Trace.GateNoGoPosLR": "1", // 0.1 default
"Path.Trace.NotGatedLR": "0.7", // 0.7 default
Expand All @@ -118,8 +111,8 @@ var ParamSets = params.Sets{
{Sel: ".MatrixLayer", Desc: "exploring these options",
Params: params.Params{
"Layer.Act.XX1.Gain": "100",
"Layer.Inhib.Layer.Gi": "1.9",
"Layer.Inhib.Layer.FB": "0.5",
"Layer.Inhib.Layer.Gi": "2.2", // 2.2 > 1.8 > 2.4
"Layer.Inhib.Layer.FB": "1", // 1 > .5
"Layer.Inhib.Pool.On": "true",
"Layer.Inhib.Pool.Gi": "2.1", // def 1.9
"Layer.Inhib.Pool.FB": "0",
Expand All @@ -130,19 +123,19 @@ var ParamSets = params.Sets{
}},
{Sel: "#GPiThal", Desc: "defaults also set automatically by layer but included here just to be sure",
Params: params.Params{
"Layer.Inhib.Layer.Gi": "1.8",
"Layer.Inhib.Layer.FB": "1", // was 0.5
"Layer.Inhib.Layer.Gi": "1.8", // 1.8 > 2.0
"Layer.Inhib.Layer.FB": "1", // 1.0 > 0.5
"Layer.Inhib.Pool.On": "false",
"Layer.Inhib.ActAvg.Init": ".2",
"Layer.Inhib.ActAvg.Fixed": "true",
"Layer.Act.Dt.GTau": "3",
"Layer.GPiGate.GeGain": "3",
"Layer.GPiGate.NoGo": "1", // 1.25?
"Layer.GPiGate.Thr": "0.2", // 0.25?
"Layer.GPiGate.NoGo": "1.25", // was 1 default
"Layer.GPiGate.Thr": "0.25", // .2 default
}},
{Sel: "#GPeNoGo", Desc: "GPe is a regular layer -- needs special params",
Params: params.Params{
"Layer.Inhib.Layer.Gi": "2.4", // 2.4 > 2.2 > 1.8
"Layer.Inhib.Layer.Gi": "2.4", // 2.4 > 2.2 > 1.8 > 2.6
"Layer.Inhib.Layer.FB": "0.5",
"Layer.Inhib.Layer.FBTau": "3", // otherwise a bit jumpy
"Layer.Inhib.Pool.On": "false",
Expand Down Expand Up @@ -186,8 +179,8 @@ var ParamSets = params.Sets{
}},
{Sel: "#RWPred", Desc: "keep it guessing",
Params: params.Params{
"Layer.RW.PredRange.Min": "0.01", // increasing to .05, .95 can be useful for harder tasks
"Layer.RW.PredRange.Max": "0.99",
"Layer.RW.PredRange.Min": "0.05", // single most important param! was .01 -- need penalty..
"Layer.RW.PredRange.Max": "0.95",
}},
},
}
Expand Down Expand Up @@ -325,12 +318,12 @@ func (ss *Sim) ConfigNet(net *leabra.Network) {
da.Name = "SNc"

inp := net.AddLayer2D("Input", 1, 4, leabra.InputLayer)
ctrl := net.AddLayer2D("CtrlInput", 1, 3, leabra.InputLayer)
ctrl := net.AddLayer2D("CtrlInput", 1, 5, leabra.InputLayer)
out := net.AddLayer2D("Output", 1, 4, leabra.TargetLayer)
hid := net.AddLayer2D("Hidden", 7, 7, leabra.SuperLayer)

// args: nY, nMaint, nOut, nNeurBgY, nNeurBgX, nNeurPfcY, nNeurPfcX
mtxGo, mtxNoGo, gpe, gpi, cin, pfcMnt, pfcMntD, pfcOut, pfcOutD := net.AddPBWM("", 1, 1, 1, 1, 3, 1, 4)
mtxGo, mtxNoGo, gpe, gpi, cin, pfcMnt, pfcMntD, pfcOut, pfcOutD := net.AddPBWM("", 4, 2, 2, 1, 5, 1, 4)
_ = gpe
_ = gpi
_ = pfcMnt
Expand Down Expand Up @@ -539,7 +532,7 @@ func (ss *Sim) ApplyReward(train bool) {
} else {
en = ss.Envs.ByMode(etime.Test).(*SIREnv)
}
if en.Act != Recall { // only reward on recall trials!
if en.Act != Recall1 && en.Act != Recall2 { // only reward on recall trials!
return
}
out := ss.Net.LayerByName("Output")
Expand Down
51 changes: 36 additions & 15 deletions examples/sir2/sir2_env.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,11 @@ import (
type Actions int32 //enums:enum

const (
Store Actions = iota
Store1 Actions = iota
Store2
Ignore
Recall
Recall1
Recall2
)

// SIREnv implements the store-ignore-recall task
Expand All @@ -43,9 +45,12 @@ type SIREnv struct {
Stim int

// current stimulus being maintained
Maint int
Maint1 int

// input pattern with stim
// current stimulus being maintained
Maint2 int

// stimulus input pattern
Input tensor.Float64

// input pattern with action
Expand All @@ -58,7 +63,7 @@ type SIREnv struct {
Reward tensor.Float64

// trial is the step counter within epoch
Trial env.Counter `view:"inline"`
Trial env.Counter `display:"inline"`
}

func (ev *SIREnv) Label() string { return ev.Name }
Expand Down Expand Up @@ -89,29 +94,34 @@ func (ev *SIREnv) State(element string) tensor.Tensor {
return nil
}

func (ev *SIREnv) Actions() env.Elements {
return nil
}

// StimStr returns a letter string rep of stim (A, B...)
func (ev *SIREnv) StimStr(stim int) string {
return string([]byte{byte('A' + stim)})
}

// String returns the current state as a string
func (ev *SIREnv) String() string {
return fmt.Sprintf("%s_%s_mnt_%s_rew_%g", ev.Act, ev.StimStr(ev.Stim), ev.StimStr(ev.Maint), ev.Reward.Values[0])
return fmt.Sprintf("%s_%s_mnt1_%s_mnt2_%s_rew_%g", ev.Act, ev.StimStr(ev.Stim), ev.StimStr(ev.Maint1), ev.StimStr(ev.Maint2), ev.Reward.Values[0])
}

func (ev *SIREnv) Init(run int) {
ev.Trial.Scale = etime.Trial
ev.Trial.Init()
ev.Trial.Cur = -1 // init state -- key so that first Step() = 0
ev.Maint = -1
ev.Maint1 = -1
ev.Maint2 = -1
}

// SetState sets the input, output states
func (ev *SIREnv) SetState() {
ev.CtrlInput.SetZeros()
ev.CtrlInput.Values[ev.Act] = 1
ev.Input.SetZeros()
if ev.Act != Recall {
if ev.Act != Recall1 && ev.Act != Recall2 {
ev.Input.Values[ev.Stim] = 1
}
ev.Output.SetZeros()
Expand All @@ -134,22 +144,33 @@ func (ev *SIREnv) SetReward(netout int) bool {
func (ev *SIREnv) StepSIR() {
for {
ev.Act = Actions(rand.Intn(int(ActionsN)))
if ev.Act == Store && ev.Maint >= 0 { // already full
if ev.Act == Store1 && ev.Maint1 >= 0 { // already full
continue
}
if ev.Act == Recall1 && ev.Maint1 < 0 { // nothing
continue
}
if ev.Act == Store2 && ev.Maint2 >= 0 { // already full
continue
}
if ev.Act == Recall && ev.Maint < 0 { // nothign
if ev.Act == Recall2 && ev.Maint2 < 0 { // nothing
continue
}
break
}
ev.Stim = rand.Intn(ev.NStim)
switch ev.Act {
case Store:
ev.Maint = ev.Stim
case Store1:
ev.Maint1 = ev.Stim
case Store2:
ev.Maint2 = ev.Stim
case Ignore:
case Recall:
ev.Stim = ev.Maint
ev.Maint = -1
case Recall1:
ev.Stim = ev.Maint1
ev.Maint1 = -1
case Recall2:
ev.Stim = ev.Maint2
ev.Maint2 = -1
}
ev.SetState()
}
Expand Down
2 changes: 1 addition & 1 deletion examples/sir2/typegen.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit adfd213

Please sign in to comment.