Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# clamp ratorIn and ratorOut at the end of each trial (Tperiod) for 100ms.
# Error clamped below during end of the trial for 100ms.
clampValsZeros = np.zeros(Nexc)
clampValsNegs = -100.*np.ones(Nexc)
endTrialClamp = nengo.Node(lambda t: clampValsZeros if (t%Tperiod)<(Tperiod-Tclamp) else clampValsNegs)
nengo.Connection(endTrialClamp,ratorIn.neurons,synapse=1e-3)
nengo.Connection(endTrialClamp,ratorOut.neurons,synapse=1e-3)
# fast synapse for fast-reacting clamp
if plastDecoders:
# don't use the same seeds across the connections,
# else they seem to be all evaluated at the same values of low-dim variables
# causing seed-dependent convergence issues possibly due to similar frozen noise across connections
if initLearned:
# default transform is unity
InEtoE = nengo.Connection(ratorIn, ratorOut, synapse=tau)
else:
InEtoE = nengo.Connection(ratorIn, ratorOut, transform=Wdyn2, synapse=tau)
#np.random.seed(1)
#InEtoE = nengo.Connection(nodeIn, ratorOut, synapse=None,
# transform=np.random.uniform(-20,20,size=(N,N))+np.eye(N))
EtoE = nengo.Connection(ratorOut, ratorOut,
transform=Wdyn2, synapse=tau) # synapse is tau_syn for filtering
else:
# If initLearned==True, these weights will be reset below using InEtoEfake and EtoEfake
if copycatLayer and not copycatPreLearned: # if copycatLayer from Wdesired, we don't learn the FF transform,
# else we cannot compare to copycatweights, since a constant is arbitrary between ff and rec.
InEtoE = nengo.Connection(ratorIn.neurons, ratorOut.neurons, synapse=tau)
# the system didn't learn in this case
# possibly the problem is neurons to neurons here and ensemble to ensemble for InEtoEexpect
else:
InEtoE = nengo.Connection(ratorIn.neurons, ratorOut.neurons,
if copycatLayer and not copycatPreLearned: # if copycatLayer from Wdesired, we don't learn the FF transform,
# else we cannot compare to copycatweights, since a constant is arbitrary between ff and rec.
InEtoE = nengo.Connection(ratorIn.neurons, ratorOut.neurons, synapse=tau)
# the system didn't learn in this case
# possibly the problem is neurons to neurons here and ensemble to ensemble for InEtoEexpect
else:
InEtoE = nengo.Connection(ratorIn.neurons, ratorOut.neurons,
transform=Wdyn2/20., synapse=tau)
# Wdyn2 same as for EtoE, but mean(InEtoE) = mean(EtoE)/20
EtoE = nengo.Connection(ratorOut.neurons, ratorOut.neurons,
transform=Wdyn2, synapse=tau) # synapse is tau_syn for filtering
# initLearned
if initLearned and not inhibition: # initLearned=True will set bidirectional weights
# thus only useful if inhibition=False
InEtoEfake = nengo.Connection(ratorIn, ratorOut, synapse=tau)
EtoEfake = nengo.Connection(ratorOut, ratorOut,
function=Wdesired, synapse=tau) # synapse is tau_syn for filtering
# probes
nodeIn_probe = nengo.Probe(nodeIn, synapse=None)
ratorIn_probe = nengo.Probe(ratorIn, synapse=tau)
# don't probe what is encoded in ratorIn, rather what is sent to ratorOut
# 'output' reads out the output of the connection InEtoE in nengo 2.2.1.dev0
# but in older nengo ~2.0, the full variable encoded in ratorOut (the post-ensemble of InEtoE)
# NOTE: InEtoE is from neurons to neurons, so 'output' is Nexc-dim not N-dim!
#ratorIn_probe = nengo.Probe(InEtoE, 'output')
#ratorIn_probe = nengo.Probe(InEtoE, 'input', synapse=tau)
# don't probe ratorOut here as this calls build_decoders() separately for this;
# just call build_decoders() once for ratorOut2error, and probe 'output' of that connection below
#ratorOut_probe = nengo.Probe(ratorOut, synapse=tau)
# synapse is tau for filtering
if i == 0:
nengo.Connection(input_images, layer.neurons,
transform=W.T, synapse=pstc)
else:
nengo.Connection(layers[-1].neurons, layer.neurons,
transform=W.T * amp, synapse=pstc)
layers.append(layer)
# --- make code layer
W, b = weights[-1], biases[-1]
code_layer = nengo.networks.EnsembleArray(50, b.size, label='code', radius=15)
code_bias = nengo.Node(output=b)
nengo.Connection(code_bias, code_layer.input, synapse=0)
nengo.Connection(layers[-1].neurons, code_layer.input,
transform=W.T * amp * 1000, synapse=pstc)
# --- make cleanup
class_layer = nengo.networks.EnsembleArray(100, 10, label='class', radius=15)
class_bias = nengo.Node(output=bc)
nengo.Connection(class_bias, class_layer.input, synapse=0)
nengo.Connection(code_layer.output, class_layer.input,
transform=Wc.T, synapse=pstc)
test = nengo.Node(output=test_dots, size_in=n_labels)
nengo.Connection(class_layer.output, test)
probe_code = nengo.Probe(code_layer.output, synapse=0.03)
probe_class = nengo.Probe(class_layer.output, synapse=0.03)
probe_test = nengo.Probe(test, synapse=0.01)
def Nengo(n_neurons, time):
t0 = t()
t1 = t()
model = nengo.Network()
with model:
X = nengo.Ensemble(n_neurons, dimensions=1, neuron_type=nengo.LIF())
Y = nengo.Ensemble(n_neurons, dimensions=2, neuron_type=nengo.LIF())
nengo.Connection(X, Y, transform=np.random.rand(n_neurons, n_neurons))
with nengo.Simulator(model) as sim:
sim.run(time / 1000)
return t() - t0, t() - t1
nengo.Connection(bias, layer.neurons, transform=np.eye(n), synapse=0)
if i == 0:
nengo.Connection(input_images, layer.neurons,
transform=W.T, synapse=pstc)
else:
nengo.Connection(layers[-1].neurons, layer.neurons,
transform=W.T * amp, synapse=pstc)
layers.append(layer)
# --- make code layer
W, b = weights[-1], biases[-1]
code_layer = nengo.networks.EnsembleArray(50, b.size, label='code', radius=15)
code_bias = nengo.Node(output=b)
nengo.Connection(code_bias, code_layer.input, synapse=0)
nengo.Connection(layers[-1].neurons, code_layer.input,
transform=W.T * amp * 1000, synapse=pstc)
# --- make cleanup
class_layer = nengo.networks.EnsembleArray(100, 10, label='class', radius=15)
class_bias = nengo.Node(output=bc)
nengo.Connection(class_bias, class_layer.input, synapse=0)
nengo.Connection(code_layer.output, class_layer.input,
transform=Wc.T, synapse=pstc)
test = nengo.Node(output=test_dots, size_in=n_labels)
nengo.Connection(class_layer.output, test)
probe_code = nengo.Probe(code_layer.output, synapse=0.03)
probe_class = nengo.Probe(class_layer.output, synapse=0.03)
probe_test = nengo.Probe(test, synapse=0.01)
nengo_probe.attr == constants.RECORD_OUTPUT_FLAG):
# create new vertex and add to probe map.
app_vertex = ValueSinkApplicationVertex(
rng=random_number_generator,
label="Sink vertex for neurons {} for probeable "
"attribute {}".format(nengo_probe.label,
nengo_probe.attr),
size_in=nengo_probe.size_in,
seed=helpful_functions.get_seed(nengo_probe))
nengo_to_app_graph_map[nengo_probe] = app_vertex
nengo_operator_graph.add_vertex(app_vertex)
# build connection and let connection conversion do rest
with host_network:
nengo_connection = nengo.Connection(
nengo_probe.target, nengo_probe,
synapse=nengo_probe.synapse,
solver=nengo_probe.solver,
seed=nengo_to_app_graph_map[nengo_probe].seed)
self._connection_conversion(
nengo_connection, nengo_operator_graph,
nengo_to_app_graph_map, random_number_generator,
host_network, decoder_cache, live_io_receivers,
live_io_senders)
else:
raise NotProbeableException(
"operator {} does not support probing {}".format(
app_vertex, nengo_probe.attr))
transform=W.T, synapse=pstc)
else:
nengo.Connection(layers[-1].neurons, layer.neurons,
transform=W.T * amp / dt, synapse=pstc)
layers.append(layer)
# --- make cleanup
class_layer = nengo.networks.EnsembleArray(Nclass, 10, label='class', radius=5)
class_bias = nengo.Node(output=bc)
nengo.Connection(class_bias, class_layer.input, synapse=0)
nengo.Connection(layers[-1].neurons, class_layer.input,
transform=Wc.T * amp / dt, synapse=pstc)
test = nengo.Node(output=test_dots, size_in=n_labels)
nengo.Connection(class_layer.output, test)
# --- make centroid classifier node
def centroid_test_fn(t, x):
i = int(t / presentation_time)
d = ((x - code_means)**2).sum(1)
return test_labels[i] == labels[np.argmin(d)]
centroid_test = nengo.Node(centroid_test_fn, size_in=layers[-1].n_neurons)
nengo.Connection(layers[-1].neurons, centroid_test,
transform=amp / dt, synapse=pstc)
# --- make dot classifier node
def dot_test_fn(t, x):
i = int(t / presentation_time)
# d = np.dot(code_means, x)
d = np.dot(code_means - code_mean, x - code_mean)
seed=seedR1) )
nengo.Connection(errNoiseNode,error)
###
### Add the relevant pre signal to the error ensemble ###
###
if recurrentLearning: # L2 rec learning
if copycatLayer:
# Error = post - desired_output
rateEvolve2error = nengo.Connection(expectOut,error,synapse=tau,transform=-np.eye(N))
# - desired output here (post above)
# tau-filtered expectOut must be compared to tau-filtered ratorOut (post above)
else:
rateEvolve = nengo.Node(rateEvolveFn)
# Error = post - desired_output
rateEvolve2error = nengo.Connection(rateEvolve,error,synapse=tau,transform=-np.eye(N))
#rateEvolve2error = nengo.Connection(rateEvolve,error,synapse=None,transform=-np.eye(N))
# - desired output here (post above)
# unfiltered non-spiking reference is compared to tau-filtered spiking ratorOut (post above)
plasticConnEE = EtoE
rateEvolve_probe = nengo.Probe(rateEvolve2error, 'output')
# save the filtered/unfiltered reference as this is the 'actual' reference
###
### Add the exc learning rules to the connection, and the error ensemble to the learning rule ###
###
EtoERulesDict = { 'PES' : nengo.PES(learning_rate=PES_learning_rate_rec,
pre_tau=tau) }
plasticConnEE.learning_rule_type = EtoERulesDict
#plasticConnEE.learning_rule['PES'].learning_rate=0
# learning_rate has no effect
# set to zero, yet works fine!