How to use the psyneulink.core.llvm function in psyneulink

To help you get started, we’ve selected a few psyneulink examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PrincetonUniversity / PsyNeuLink / tests / functions / test_optimization.py View on Github external
def test_llvm(obj_func, metric, normalize, direction, selection, benchmark):
    variable = test_var
    result = results[obj_func][metric][normalize][direction][selection]
    benchmark.group = "OptimizationFunction " + str(obj_func) + " " + metric

    of = obj_func(default_variable=variable, metric=metric, normalize=normalize)
    f = OPTFunctions.GridSearch(objective_function=of, default_variable=variable,
                                search_space=search_space, direction=direction,
                                select_randomly_from_optimal_values=(selection=='RANDOM'),
                                seed=0)
    e = pnlvm.execution.FuncExecution(f)
    res = e.execute(variable)
    benchmark(e.execute, variable)

    assert np.allclose(res[0], result[0])
    assert np.allclose(res[1], result[1])
github PrincetonUniversity / PsyNeuLink / tests / mechanisms / test_ddm_mechanism.py View on Github external
def test_DDM_noise(mode, benchmark, noise, expected):
    T = DDM(
        name='DDM',
        function=DriftDiffusionIntegrator(
            noise=0.5,
            rate=1.0,
            time_step_size=1.0
        )
    )
    if mode == "Python":
        ex = T.execute
    elif mode == "LLVM":
        ex = pnlvm.execution.MechExecution(T).execute
    elif mode == "PTX":
        ex = pnlvm.execution.MechExecution(T).cuda_execute

    val = ex([10])
    assert np.allclose(val[0][0][0], 8.194383551861414)
    benchmark(ex, [10])
github PrincetonUniversity / PsyNeuLink / tests / mechanisms / test_recurrent_transfer_mechanism.py View on Github external
def test_recurrent_mech_no_inputs(self, benchmark, mode):
        R = RecurrentTransferMechanism(
            name='R'
        )
        np.testing.assert_allclose(R.defaults.variable, [[0]])
        if mode == 'Python':
            EX = R.execute
        elif mode == 'LLVM':
            e = pnlvm.execution.MechExecution(R)
            EX = e.execute
        elif mode == 'PTX':
            e = pnlvm.execution.MechExecution(R)
            EX = e.cuda_execute

        val = EX([10])
        benchmark(EX, [1])
        np.testing.assert_allclose(val, [[10.]])
github PrincetonUniversity / PsyNeuLink / tests / mechanisms / test_control_mechanism.py View on Github external
def test_lc_control_mech_basic(self, benchmark, mode):

        LC = pnl.LCControlMechanism(
            base_level_gain=3.0,
            scaling_factor_gain=0.5,
            default_variable = 10.0
        )
        if mode == 'Python':
            EX = LC.execute
        elif mode == 'LLVM':
            e = pnlvm.execution.MechExecution(LC)
            EX = e.execute
        elif mode == 'PTX':
            e = pnlvm.execution.MechExecution(LC)
            EX = e.cuda_execute

        val = EX([10.0])

        # LLVM returns combination of all output ports so let's do that for
        # Python as well
        if mode == 'Python':
            val = [s.value for s in LC.output_ports]

        benchmark(EX, [10.0])

        # All values are the same because LCControlMechanism assigns all of its ControlSignals to the same value
        # (the 1st item of its function's value).
        # FIX: 6/6/19 - Python returns 3d array but LLVM returns 2d array
        #               (np.allclose bizarrely passes for LLVM because all the values are the same)
        assert np.allclose(val, [[[3.00139776]], [[3.00139776]], [[3.00139776]], [[3.00139776]]])
github PrincetonUniversity / PsyNeuLink / psyneulink / core / components / mechanisms / processing / transfermechanism.py View on Github external
def _gen_llvm_is_finished_cond(self, ctx, builder, params, state):
        current = pnlvm.helpers.get_state_ptr(builder, self, state, "value")
        threshold_ptr = pnlvm.helpers.get_param_ptr(builder, self, params,
                                                    "termination_threshold")
        if isinstance(threshold_ptr.type.pointee, pnlvm.ir.LiteralStructType):
            # Threshold is not defined, return the old value of finished flag
            assert len(threshold_ptr.type.pointee) == 0
            is_finished_ptr = pnlvm.helpers.get_state_ptr(builder, self, state,
                                                          "is_finished_flag")
            is_finished_flag = builder.load(is_finished_ptr)
            return builder.fcmp_ordered("!=", is_finished_flag,
                                              is_finished_flag.type(0))

        # If modulated, termination threshold is single element array
        if isinstance(threshold_ptr.type.pointee, pnlvm.ir.ArrayType):
            assert len(threshold_ptr.type.pointee) == 1
            threshold_ptr = builder.gep(threshold_ptr, [ctx.int32_ty(0),
                                                        ctx.int32_ty(0)])
github PrincetonUniversity / PsyNeuLink / psyneulink / core / components / mechanisms / modulatory / control / optimizationcontrolmechanism.py View on Github external
def _get_evaluate_param_struct_type(self, ctx):
        num_estimates = ctx.int32_ty
        intensity_cost = (ctx.get_param_struct_type(op.intensity_cost_function) for op in self.output_ports)
        intensity_cost_struct = pnlvm.ir.LiteralStructType(intensity_cost)
        return pnlvm.ir.LiteralStructType([intensity_cost_struct, num_estimates])
github PrincetonUniversity / PsyNeuLink / psyneulink / library / compositions / compiledloss.py View on Github external
def _gen_inject_loss_differential(self, ctx, builder, value, target, output=None, sum_loss=False):
        dim = len(value.type.pointee)
        assert len(target.type.pointee) == dim
        if output is None:
            output = builder.alloca(pnlvm.ir.types.ArrayType(ctx.float_ty, dim))
            # zero output vector
            builder.store(output.type.pointee(None), output)
        assert len(output.type.pointee) == dim

        if sum_loss is False:
            # we take mean
            gen_inject_vec_sub(ctx, builder, value, target, output)
            # multiply each element i by 2/n to get dC/da_i
            scalar_mult = builder.fdiv(ctx.float_ty(2), ctx.float_ty(dim))
            with pnlvm.helpers.for_loop_zero_inc(builder, ctx.int32_ty(dim), "mse_mean_mult_loop") as (b1, index):
                element_ptr = b1.gep(output, [ctx.int32_ty(0), index])
                b1.store(b1.fmul(b1.load(element_ptr),scalar_mult),element_ptr)
        else:
            # in this case, we add the loss
            tmp = gen_inject_vec_sub(ctx, builder, value, target)
            gen_inject_vec_add(ctx, builder, output, tmp, output)
        return output
github PrincetonUniversity / PsyNeuLink / psyneulink / library / compositions / pytorchllvmhelper.py View on Github external
def gen_inject_mat_binop(ctx, builder, op, m1, m2, output_mat=None):
    x = len(m1.type.pointee)
    y = len(m1.type.pointee.element)
    assert len(m2.type.pointee) == x and len(m2.type.pointee.element) == y

    if output_mat is None:
        output_mat = builder.alloca(
            pnlvm.ir.types.ArrayType(
                pnlvm.ir.types.ArrayType(ctx.float_ty, y), x))
    assert len(output_mat.type.pointee) == x
    assert len(output_mat.type.pointee.element) == y

    builtin = ctx.import_llvm_function(op)
    builder.call(builtin, [builder.bitcast(m1, ctx.float_ty.as_pointer()),
                            builder.bitcast(m2, ctx.float_ty.as_pointer()),
                            ctx.int32_ty(x), ctx.int32_ty(y),
                            builder.bitcast(output_mat, ctx.float_ty.as_pointer())])
    return output_mat
github PrincetonUniversity / PsyNeuLink / psyneulink / core / components / functions / selectionfunctions.py View on Github external
def _gen_llvm_function_body(self, ctx, builder, _, state, arg_in, arg_out, *, tags:frozenset):
        idx_ptr = builder.alloca(ctx.int32_ty)
        builder.store(ctx.int32_ty(0), idx_ptr)

        if self.mode in {PROB, PROB_INDICATOR}:
            rng_f = ctx.import_llvm_function("__pnl_builtin_mt_rand_double")
            dice_ptr = builder.alloca(ctx.float_ty)
            mt_state_ptr = pnlvm.helpers.get_state_ptr(builder, self, state, "random_state")
            builder.call(rng_f, [mt_state_ptr, dice_ptr])
            dice = builder.load(dice_ptr)
            sum_ptr = builder.alloca(ctx.float_ty)
            builder.store(ctx.float_ty(-0.0), sum_ptr)
            prob_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(1)])
            arg_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)])

        with pnlvm.helpers.array_ptr_loop(builder, arg_in, "search") as (b1, index):
            idx = b1.load(idx_ptr)
            prev_ptr = b1.gep(arg_in, [ctx.int32_ty(0), idx])
            current_ptr = b1.gep(arg_in, [ctx.int32_ty(0), index])
            prev = b1.load(prev_ptr)
            current = b1.load(current_ptr)

            prev_res_ptr = b1.gep(arg_out, [ctx.int32_ty(0), idx])
            cur_res_ptr = b1.gep(arg_out, [ctx.int32_ty(0), index])
            if self.mode not in {PROB, PROB_INDICATOR}:
                fabs = ctx.get_builtin("fabs", [current.type])
            if self.mode == MAX_VAL:
                cmp_op = ">="
                cmp_prev = prev
                cmp_curr = current
                val = current
            elif self.mode == MAX_ABS_VAL:
github PrincetonUniversity / PsyNeuLink / psyneulink / library / compositions / pytorchmodelcreator.py View on Github external
if proj_idx == 0:
                            gen_inject_vxm_transposed(
                                ctx, builder, efferent_node_error, weights_llvmlite, error_val)
                        else:
                            new_val = gen_inject_vxm_transposed(
                                ctx, builder, efferent_node_error, weights_llvmlite)

                            gen_inject_vec_add(
                                ctx, builder, new_val, error_val, error_val)

                    gen_inject_vec_hadamard(
                        ctx, builder, activation_func_derivative, error_val, error_val)

                pnlvm.helpers.printf_float_array(
                    builder, activation_func_derivative, prefix=f"{node}\tdSigma:\t")
                pnlvm.helpers.printf_float_array(
                    builder, error_val, prefix=f"{node}\terror:\t")

        # 4) compute weight gradients
        for (node, err_val) in error_dict.items():
            if node in input_nodes:
                continue
            for proj in node.afferents:
                # get a_(l-1)
                afferent_node_activation = builder.gep(model_output, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(proj.sender._idx), ctx.int32_ty(0)])

                # get dimensions of weight matrix
                weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, params)
                pnlvm.helpers.printf_float_matrix(builder, weights_llvmlite, prefix= f"{proj.sender._mechanism} -> {proj.receiver._mechanism}\n", override_debug=False)
                # update delta_W
                node_delta_w = builder.gep(delta_w, [ctx.int32_ty(0), ctx.int32_ty(proj._idx)])