How to use the psyneulink.core function in psyneulink

To help you get started, we’ve selected a few psyneulink examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PrincetonUniversity / PsyNeuLink / tests / models / test_botvinick.py View on Github external
function=psyneulink.core.components.Logistic(x_0=4.0),  # bias 4.0 is -4.0 in the paper see Docs for description
                                                         integrator_mode=True,
                                                         hetero=-2,
                                                         integration_rate=0.01,  # cohen-huston text says 0.01
                                                         name='COLORS_HIDDEN')

    words_hidden_layer = pnl.RecurrentTransferMechanism(size=3,
                                                        function=psyneulink.core.components.Logistic(x_0=4.0),
                                                        integrator_mode=True,
                                                        hetero=-2,
                                                        integration_rate=0.01,
                                                        name='WORDS_HIDDEN')

    #   Response layer, responses: ('red', 'green')
    response_layer = pnl.RecurrentTransferMechanism(size=2,
                                                    function=psyneulink.core.components.Logistic,
                                                    hetero=-2.0,
                                                    integrator_mode=True,
                                                    integration_rate=0.01,
                                                    output_ports = [pnl.RESULT,
                                                                     {pnl.NAME: 'DECISION_ENERGY',
                                                                      pnl.VARIABLE: (pnl.OWNER_VALUE,0),
                                                                      pnl.FUNCTION: psyneulink.core.components.Stability(
                                                                          default_variable = np.array([0.0, 0.0]),
                                                                          metric = pnl.ENERGY,
                                                                          matrix = np.array([[0.0, -4.0],
                                                                                            [-4.0, 0.0]]))}],
                                                    name='RESPONSE', )

    # Mapping projections---------------------------------------------------------------------------------------------------

    color_input_weights = pnl.MappingProjection(matrix=np.array([[1.0, 0.0, 0.0],
github PrincetonUniversity / PsyNeuLink / tests / documentation / core / test_state_docs.py View on Github external
def test_gating_signal_docs():
    fail, total = doctest.testmod(pnl.core.components.ports.modulatorysignals.gatingsignal)

    if fail > 0:
        pytest.fail("{} out of {} examples failed".format(fail, total))
github PrincetonUniversity / PsyNeuLink / tests / documentation / core / test_mechanism_docs.py View on Github external
def test_mechanisms():
    fail, total = doctest.testmod(pnl.core.components.mechanisms.mechanism)
    if fail > 0:
        pytest.fail("{} out of {} examples failed".format(fail, total),
                    pytrace=False)
github PrincetonUniversity / PsyNeuLink / Scripts / Misc / multitasking_nn_model.py View on Github external
self.learning_rate = learning_rate
        self.bias = bias
        self.weight_init_scale = weight_init_scale
        self.decay = decay
        self.hidden_path_size = hidden_path_size
        self.output_path_size = output_path_size

        # implement equivalents of setData, configure, and constructor
        self.num_tasks = self.num_dimensions ** 2

        # Here we would initialize the layer - instead initializing the PNL model:
        self.task_layer = pnl.TransferMechanism(size=self.num_tasks,
                                                name='task_input')
        self.hidden_layer = pnl.TransferMechanism(size=self.hidden_layer_size,
                                                  name='hidden',
                                                  function=psyneulink.core.components.functions.transferfunctions
                                                  .Logistic)
        self.hidden_bias = pnl.TransferMechanism(default_variable=np.ones((self.hidden_layer_size,)),
                                                 name='hidden bias')
        self.input_layers = self._generate_io_layers('input')
        self.output_layers = self._generate_io_layers('output')
        self._generate_output_bias_layers()
        self._generate_processes()
        self._generate_system()
github PrincetonUniversity / PsyNeuLink / Scripts / To Debug / Markus Stroop.py View on Github external
response_layer.integrator_mode = False
        colors_hidden_layer.noise = 0
        words_hidden_layer.noise = 0
        response_layer.noise = 0

        my_Stroop.run(inputs=initialize_trial_input)
        # but didn't want to run accumulators so set those back to zero
        respond_green_accumulator.reinitialize(0)
        respond_red_accumulator.reinitialize(0)

        # now put back in integrator mode and noise
        colors_hidden_layer.integrator_mode = True
        words_hidden_layer.integrator_mode = True
        response_layer.integrator_mode = True
        colors_hidden_layer.noise = psyneulink.core.components.functions.distributionfunctions.NormalDist(mean=0, standard_deviation=unit_noise).function
        words_hidden_layer.noise = psyneulink.core.components.functions.distributionfunctions.NormalDist(mean=0, standard_deviation=unit_noise).function
        response_layer.noise = psyneulink.core.components.functions.distributionfunctions.NormalDist(mean=0, standard_deviation=unit_noise).function

        # run system with test pattern
        my_Stroop.run(inputs=test_trial_input, termination_processing=terminate_trial)

        # store results
        my_red_accumulator_results = respond_red_accumulator.log.nparray_dictionary()
        # print('respond_red_accumulator.log.nparray_dictionary(): ',respond_red_accumulator.log.nparray_dictionary())
        # how many cycles to run? count the length of the log
        num_timesteps = np.asarray(np.size(my_red_accumulator_results['value'])).reshape(1, 1)
        # print('num_timesteps; ', num_timesteps)
        # value of parts of the system
        red_activity = np.asarray(respond_red_accumulator.value).reshape(1, 1)
        green_activity = np.asarray(respond_green_accumulator.value).reshape(1, 1)
        colors_hidden_layer_value = np.asarray(colors_hidden_layer.value).reshape(2, 1)
        # print('colors_hidden_layer_value: ', colors_hidden_layer_value)
github PrincetonUniversity / PsyNeuLink / Scripts / Misc / Markus Stroop.py View on Github external
#   Response layer, responses: ('red', 'green')
#tau = 0.1 (here, smoothing factor)
#should be randomly distributed noise to the net input of each unit (except input unit)

# Now a RecurrentTransferMechanism compared to Lauda's Stroop model!
response_layer = pnl.RecurrentTransferMechanism(size=2,  #Recurrent
                                                function=psyneulink.core.components.functions.transferfunctions.Logistic,  #pnl.Stability(matrix=np.matrix([[0.0, -1.0], [-1.0, 0.0]])),
                                                name='RESPONSE',
                                                output_ports = [pnl.RESULT,
                                          {pnl.NAME: 'DECISION_ENERGY',
                                          pnl.VARIABLE: (pnl.OWNER_VALUE,0),
                                           pnl.FUNCTION: psyneulink.core.components.functions.objectivefunctions
                                                .Stability(default_variable=np.array([0.0, -1.0]),
                                                                                                                           metric=pnl.ENERGY,
                                                                                                                           matrix=np.array([[0.0, -1.0], [-1.0, 0.0]]))}],
                                                integrator_mode=True,  #)
                                                # noise=pnl.NormalDist(mean=0.0, standard_deviation=.01).function)
                                                integration_rate=0.1)

#response_layer.set_log_conditions('value')
#response_layer.set_log_conditions('gain')


#   SET UP CONNECTIONS
#   rows correspond to sender
#   columns correspond to: weighting of the contribution that a given sender makes to the receiver
#   in linear algebra terms can write out the matrix
#   Input to hidden
github PrincetonUniversity / PsyNeuLink / Scripts / Misc / xor_in_psyneulink_and_pytorch.py View on Github external
# learning rate (determines the size of learning updates during training)
# higher learning rates speed up training but may reduce accuracy or prevent convergence
learning_rate = 10


# XOR in PsyNeuLink System ------------------------------------------------------------------------

system_start_time = time.time()  # used to time how long the system takes to train
# Create mechanisms and projections to represent the layers and parameters:

xor_in = pnl.TransferMechanism(name='input_layer',
                           default_variable=np.zeros(2))

xor_hid = pnl.TransferMechanism(name='hidden_layer',
                                default_variable=np.zeros(10),
                                function=pnl.core.components.functions.transferfunctions.Logistic())

xor_out = pnl.TransferMechanism(name='output_layer',
                                default_variable=np.zeros(1),
                                function=pnl.core.components.functions.transferfunctions.Logistic())

# projection that takes the signal from the input layer and transforms it to get an input for
# the hidden layer (the xor_hid mechanism)
hid_map = pnl.MappingProjection(name='input_to_hidden',
                            matrix=np.random.randn(2,10) * 0.1,
                            sender=xor_in,
                            receiver=xor_hid)

# projection that takes the signal from the hidden layer and transforms it to get an input for
# the output layer (the xor_out mechanism)
out_map = pnl.MappingProjection(name='hidden_to_output',
                            matrix=np.random.randn(10,1) * 0.1,
github PrincetonUniversity / PsyNeuLink / psyneulink / library / models / Cohen_Huston1994_horse_race.py View on Github external
task_input_layer = pnl.TransferMechanism(size=2,
                                         function=psyneulink.core.components.functions.transferfunctions.Linear,
                                         name='TASK_INPUT')

#   Task layer, tasks: ('name the color', 'read the word')
task_layer = pnl.RecurrentTransferMechanism(size=2,
                                            function=psyneulink.core.components.functions.transferfunctions.Logistic(),
                                            hetero=-2,
                                            integrator_mode=True,
                                            integration_rate=0.1,
                                            name='TASK')

#   Hidden layer units, colors: ('red','green') words: ('RED','GREEN')
colors_hidden_layer = pnl.RecurrentTransferMechanism(size=3,
                                                     function=psyneulink.core.components.functions.transferfunctions
                                                     .Logistic(x_0=4.0),
                                                     integrator_mode=True,
                                                     hetero=-2.0,
                                                     # noise=pnl.NormalDist(mean=0.0, standard_deviation=.0).function,
                                                     integration_rate=0.1,  # cohen-huston text says 0.01
                                                     name='COLORS HIDDEN')

words_hidden_layer = pnl.RecurrentTransferMechanism(size=3,
                                                    function=psyneulink.core.components.functions.transferfunctions.Logistic(x_0=4.0),
                                                    hetero=-2,
                                                    integrator_mode=True,
                                                    # noise=pnl.NormalDist(mean=0.0, standard_deviation=.05).function,
                                                    integration_rate=0.1,
                                                    name='WORDS HIDDEN')
#   Response layer, responses: ('red', 'green'): RecurrentTransferMechanism for self inhibition matrix
response_layer = pnl.RecurrentTransferMechanism(size=2,
github PrincetonUniversity / PsyNeuLink / Scripts / Examples / System / Multitasking.py View on Github external
import psyneulink as pnl
import psyneulink.core.components.functions.transferfunctions

stimulus_layer = pnl.TransferMechanism(size=4)
task_layer = pnl.TransferMechanism(size=4)
hidden_layer = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions.Logistic)
output_layer = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions.Logistic)

network_process = pnl.Process(pathway=[stimulus_layer, hidden_layer, output_layer])
hidden_control_process = pnl.Process(pathway=[task_layer, hidden_layer])
output_control_process = pnl.Process(pathway=[task_layer, output_layer])

multitasking_system = pnl.System(processes=[network_process, hidden_control_process, output_control_process])

# WEIGHTS TO COME FROM SEBASTIAN

example_stimulus_inputs = [[1,0,0,1],[1,0,1,0]]
example_task_inputs = [[0,0,0,1],[1,0,0,0]]
example_training_pattern = [[0,0,0,1],[1,0,0,0]]

# RUN THIS TO GET SPACE OF INPUTS ON WHICH TO OPTIMIZE LCAMechanism PARAMS:
inputs_to_LCA = multitasking_system.run(inputs={stimulus_layer:example_stimulus_inputs,
                                                task_layer:example_task_inputs})
github PrincetonUniversity / PsyNeuLink / Scripts / Examples / System / Gating-Mechanism. with UDF.py View on Github external
params={pnl.ADDITIVE_PARAM:'phase',
                              pnl.MULTIPLICATIVE_PARAM:'amplitude'}):
    frequency = input[0]
    t = input[1]
    return amplitude * np.sin(2 * np.pi * frequency * t + phase)

Input_Layer = pnl.TransferMechanism(
    name='Input_Layer',
    default_variable=np.zeros((2,)),
    function=psyneulink.core.components.functions.transferfunctions.Logistic
)

Output_Layer = pnl.TransferMechanism(
        name='Output_Layer',
        default_variable=[0, 0, 0],
        function=psyneulink.core.components.functions.transferfunctions.Linear,
        # function=pnl.Logistic,
        # output_ports={pnl.NAME: 'RESULTS USING UDF',
        #                pnl.VARIABLE: [(pnl.OWNER_VALUE,0), pnl.TIME_STEP],
        #                pnl.FUNCTION: my_sinusoidal_fct}
        output_ports={pnl.NAME: 'RESULTS USING UDF',
                       # pnl.VARIABLE: (pnl.OWNER_VALUE, 0),
                       pnl.FUNCTION: psyneulink.core.components.functions.transferfunctions.Linear(slope=pnl.GATING)
                       # pnl.FUNCTION: pnl.Logistic(gain=pnl.GATING)
                       # pnl.FUNCTION: my_linear_fct
                       # pnl.FUNCTION: my_exp_fct
                       # pnl.FUNCTION:pnl.UserDefinedFunction(custom_function=my_simple_linear_fct,
                       #                                      params={pnl.ADDITIVE_PARAM:'b',
                       #                                              pnl.MULTIPLICATIVE_PARAM:'m',
                       #                                              },
                                                            # m=pnl.GATING,
                                                            # b=2.0