Skip to content

Commit

Permalink
hip pathways
Browse files Browse the repository at this point in the history
  • Loading branch information
rcoreilly committed Oct 15, 2024
1 parent fdd4f17 commit f1b85eb
Show file tree
Hide file tree
Showing 4 changed files with 244 additions and 0 deletions.
204 changes: 204 additions & 0 deletions leabra/hip.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,204 @@
// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package leabra

import (
"cogentcore.org/core/math32"
)

// Contrastive Hebbian Learning (CHL) parameters
type CHLParams struct {

// if true, use CHL learning instead of standard XCAL learning -- allows easy exploration of CHL vs. XCAL
On bool

// amount of hebbian learning (should be relatively small, can be effective at .0001)
Hebb float32 `default:"0.001" min:"0" max:"1"`

// amount of error driven learning, automatically computed to be 1-Hebb
Err float32 `default:"0.999" min:"0" max:"1" edit:"-"`

// if true, use ActQ1 as the minus phase -- otherwise ActM
MinusQ1 bool

// proportion of correction to apply to sending average activation for hebbian learning component (0=none, 1=all, .5=half, etc)
SAvgCor float32 `default:"0.4:0.8" min:"0" max:"1"`

// threshold of sending average activation below which learning does not occur (prevents learning when there is no input)
SAvgThr float32 `default:"0.001" min:"0"`
}

func (ch *CHLParams) Defaults() {
ch.On = true
ch.Hebb = 0.001
ch.SAvgCor = 0.4
ch.SAvgThr = 0.001
ch.Update()
}

func (ch *CHLParams) Update() {
ch.Err = 1 - ch.Hebb
}

// MinusAct returns the minus-phase activation to use based on settings (ActM vs. ActQ1)
func (ch *CHLParams) MinusAct(actM, actQ1 float32) float32 {
if ch.MinusQ1 {
return actQ1
}
return actM
}

// HebbDWt computes the hebbian DWt value from sending, recv acts, savgCor, and linear Wt
func (ch *CHLParams) HebbDWt(sact, ract, savgCor, linWt float32) float32 {
return ract * (sact*(savgCor-linWt) - (1-sact)*linWt)
}

// ErrDWt computes the error-driven DWt value from sending,
// recv acts in both phases, and linear Wt, which is used
// for soft weight bounding (always applied here, separate from hebbian
// which has its own soft weight bounding dynamic).
func (ch *CHLParams) ErrDWt(sactP, sactM, ractP, ractM, linWt float32) float32 {
err := (ractP * sactP) - (ractM * sactM)
if err > 0 {
err *= (1 - linWt)
} else {
err *= linWt
}
return err
}

// DWt computes the overall dwt from hebbian and error terms
func (ch *CHLParams) DWt(hebb, err float32) float32 {
return ch.Hebb*hebb + ch.Err*err
}

func (pt *Path) CHLDefaults() {
pt.Learn.Norm.On = false // off by default
pt.Learn.Momentum.On = false // off by default
pt.Learn.WtBal.On = false // todo: experiment
}

// SAvgCor computes the sending average activation, corrected according to the SAvgCor
// correction factor (typically makes layer appear more sparse than it is)
func (pt *Path) SAvgCor(slay *Layer) float32 {
savg := .5 + pt.CHL.SAvgCor*(slay.Pools[0].ActAvg.ActPAvgEff-0.5)
savg = math32.Max(pt.CHL.SAvgThr, savg) // keep this computed value within bounds
return 0.5 / savg
}

// DWtCHL computes the weight change (learning) for CHL
func (pt *Path) DWtCHL() {
slay := pt.Send
rlay := pt.Recv
if slay.Pools[0].ActP.Avg < pt.CHL.SAvgThr { // inactive, no learn
return
}
for si := range slay.Neurons {
sn := &slay.Neurons[si]
nc := int(pt.SConN[si])
st := int(pt.SConIndexSt[si])
syns := pt.Syns[st : st+nc]
scons := pt.SConIndex[st : st+nc]
snActM := pt.CHL.MinusAct(sn.ActM, sn.ActQ1)

savgCor := pt.SAvgCor(slay)

for ci := range syns {
sy := &syns[ci]
ri := scons[ci]
rn := &rlay.Neurons[ri]
rnActM := pt.CHL.MinusAct(rn.ActM, rn.ActQ1)

hebb := pt.CHL.HebbDWt(sn.ActP, rn.ActP, savgCor, sy.LWt)
err := pt.CHL.ErrDWt(sn.ActP, snActM, rn.ActP, rnActM, sy.LWt)

dwt := pt.CHL.DWt(hebb, err)
norm := float32(1)
if pt.Learn.Norm.On {
norm = pt.Learn.Norm.NormFromAbsDWt(&sy.Norm, math32.Abs(dwt))
}
if pt.Learn.Momentum.On {
dwt = norm * pt.Learn.Momentum.MomentFromDWt(&sy.Moment, dwt)
} else {
dwt *= norm
}
sy.DWt += pt.Learn.Lrate * dwt
}
// aggregate max DWtNorm over sending synapses
if pt.Learn.Norm.On {
maxNorm := float32(0)
for ci := range syns {
sy := &syns[ci]
if sy.Norm > maxNorm {
maxNorm = sy.Norm
}
}
for ci := range syns {
sy := &syns[ci]
sy.Norm = maxNorm
}
}
}
}

func (pt *Path) EcCa1Defaults() {
pt.Learn.Norm.On = false // off by default
pt.Learn.Momentum.On = false // off by default
pt.Learn.WtBal.On = false // todo: experiment
}

// DWt computes the weight change (learning) -- on sending pathways
// Delta version
func (pt *Path) DWtEcCa1() {
if !pt.Learn.Learn {
return
}
slay := pt.Send
rlay := pt.Recv
for si := range slay.Neurons {
sn := &slay.Neurons[si]
nc := int(pt.SConN[si])
st := int(pt.SConIndexSt[si])
syns := pt.Syns[st : st+nc]
scons := pt.SConIndex[st : st+nc]

for ci := range syns {
sy := &syns[ci]
ri := scons[ci]
rn := &rlay.Neurons[ri]

err := (sn.ActP * rn.ActP) - (sn.ActQ1 * rn.ActQ1)
bcm := pt.Learn.BCMdWt(sn.AvgSLrn, rn.AvgSLrn, rn.AvgL)
bcm *= pt.Learn.XCal.LongLrate(rn.AvgLLrn)
err *= pt.Learn.XCal.MLrn
dwt := bcm + err

norm := float32(1)
if pt.Learn.Norm.On {
norm = pt.Learn.Norm.NormFromAbsDWt(&sy.Norm, math32.Abs(dwt))
}
if pt.Learn.Momentum.On {
dwt = norm * pt.Learn.Momentum.MomentFromDWt(&sy.Moment, dwt)
} else {
dwt *= norm
}
sy.DWt += pt.Learn.Lrate * dwt
}
// aggregate max DWtNorm over sending synapses
if pt.Learn.Norm.On {
maxNorm := float32(0)
for ci := range syns {
sy := &syns[ci]
if sy.Norm > maxNorm {
maxNorm = sy.Norm
}
}
for ci := range syns {
sy := &syns[ci]
sy.Norm = maxNorm
}
}
}
}
12 changes: 12 additions & 0 deletions leabra/path.go
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,18 @@ func (pt *Path) DWt() {
if !pt.Learn.Learn {
return
}
switch {
case pt.Type == CHLPath && pt.CHL.On:
pt.DWtCHL()
case pt.Type == EcCa1Path:
pt.DWtEcCa1()
default:
pt.DWtStd()
}
}

// DWt computes the weight change (learning) -- on sending pathways
func (pt *Path) DWtStd() {
slay := pt.Send
rlay := pt.Recv
for si := range slay.Neurons {
Expand Down
16 changes: 16 additions & 0 deletions leabra/pathbase.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,10 @@ type Path struct {
// synaptic-level learning parameters
Learn LearnSynParams `display:"add-fields"`

// CHL are the parameters for CHL learning. if CHL is On then
// WtSig.SoftBound is automatically turned off, as it is incompatible.
CHL CHLParams `display:"inline"`

// synaptic state values, ordered by the sending layer
// units which owns them -- one-to-one with SConIndex array.
Syns []Synapse
Expand Down Expand Up @@ -117,6 +121,14 @@ func (pt *Path) Defaults() {
pt.WtInit.Defaults()
pt.WtScale.Defaults()
pt.Learn.Defaults()
pt.CHL.Defaults()
switch pt.Type {
case CHLPath:
pt.CHLDefaults()
case EcCa1Path:
pt.EcCa1Defaults()
default:
}
pt.GScale = 1
}

Expand All @@ -125,6 +137,10 @@ func (pt *Path) UpdateParams() {
pt.WtScale.Update()
pt.Learn.Update()
pt.Learn.LrateInit = pt.Learn.Lrate
if pt.CHL.On {
pt.Learn.WtSig.SoftBound = false
}
pt.CHL.Update()
}

// AllParams returns a listing of all parameters in the Layer
Expand Down
12 changes: 12 additions & 0 deletions leabra/pathtypes.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,4 +35,16 @@ const (
// takes into account the temporal delays in the activation states.
// Can also add self context from CT for deeper temporal context.
CTCtxtPath

// CHLPath implements Contrastive Hebbian Learning.
CHLPath

// EcCa1Path implements special learning for EC <-> CA1 pathways
// in the hippocampus to perform error-driven learning of this
// encoder pathway according to the ThetaPhase algorithm.
// uses Contrastive Hebbian Learning (CHL) on ActP - ActQ1
// Q1: ECin -> CA1 -> ECout : ActQ1 = minus phase for auto-encoder
// Q2, 3: CA3 -> CA1 -> ECout : ActM = minus phase for recall
// Q4: ECin -> CA1, ECin -> ECout : ActP = plus phase for everything
EcCa1Path
)

0 comments on commit f1b85eb

Please sign in to comment.