How to use the pyglm.utils.basis.project_onto_basis function in pyglm

To help you get started, we’ve selected a few pyglm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github slinderman / theano_pyglm / pyglm / models / model_factory.py View on Github external
mean_filter_tr = flattened_filters_t[Y==r].mean(axis=0)

        # TODO: Make sure the filters are being normalized properly!

        # Project the mean filters onto the basis
        to_vars['latent']['sharedtuningcurve_provider']['w_x'][:,r] = \
            project_onto_basis(mean_filter_xr,
                               to_popn.glm.bkgd_model.spatial_basis).ravel()

        # Temporal part of the filter
        temporal_basis = to_popn.glm.bkgd_model.temporal_basis
        t_temporal_basis = np.arange(temporal_basis.shape[0])
        t_mean_filter_tr = np.linspace(0, temporal_basis.shape[0]-1, mean_filter_tr.shape[0])
        interp_mean_filter_tr = np.interp(t_temporal_basis, t_mean_filter_tr, mean_filter_tr)
        to_vars['latent']['sharedtuningcurve_provider']['w_t'][:,r] = \
            project_onto_basis(interp_mean_filter_tr, temporal_basis).ravel()

    # Initialize locations based on stimuls filters
    to_vars['latent']['location_provider']['L'] = locs.ravel().astype(np.int)
github slinderman / theano_pyglm / pyglm / inference / smart_init.py View on Github external
# Compute the initial weights for each neuron
    for i,n in enumerate(Ns):
        sn = np.squeeze(s[i,:,:])
        if sn.ndim == 1:
            sn = np.reshape(sn, [sn.size, 1])

        if spatiotemporal:
           # Factorize the STA into a spatiotemporal filter using SVD
           # CAUTION! Numpy svd returns V transpose whereas Matlab svd returns V!
           U,Sig,V = np.linalg.svd(sn)
           f_t = U[:,0] * np.sqrt(Sig[0])
           f_x = V[0,:] * np.sqrt(Sig[0])

           # Project this onto the spatial and temporal bases
           w_t = project_onto_basis(f_t, population.glm.bkgd_model.ibasis_t.get_value())
           w_x = project_onto_basis(f_x, population.glm.bkgd_model.ibasis_x.get_value())

           # Flatten into 1D vectors
           w_t = np.ravel(w_t)
           w_x = np.ravel(w_x)
           
           x0['glms'][n]['bkgd']['w_x'] = w_x
           x0['glms'][n]['bkgd']['w_t'] = w_t
        elif temporal:
            # Only using a temporal filter
            D_stim = sn.shape[1]
            B = population.glm.bkgd_model.ibasis.get_value().shape[1]
            
            # Project this onto the spatial and temporal bases
            w_t = np.zeros((B*D_stim,1))
            for d in np.arange(D_stim):
                w_t[d*B:(d+1)*B] = project_onto_basis(sn[:,d],
github slinderman / theano_pyglm / pyglm / inference / smart_init.py View on Github external
# Compute the initial weights for each neuron
    for i,n in enumerate(Ns):
        sn = np.squeeze(s[i,:,:])
        if sn.ndim == 1:
            sn = np.reshape(sn, [sn.size, 1])

        if spatiotemporal:
           # Factorize the STA into a spatiotemporal filter using SVD
           # CAUTION! Numpy svd returns V transpose whereas Matlab svd returns V!
           U,Sig,V = np.linalg.svd(sn)
           f_t = U[:,0] * np.sqrt(Sig[0])
           f_x = V[0,:] * np.sqrt(Sig[0])

           # Project this onto the spatial and temporal bases
           w_t = project_onto_basis(f_t, population.glm.bkgd_model.ibasis_t.get_value())
           w_x = project_onto_basis(f_x, population.glm.bkgd_model.ibasis_x.get_value())

           # Flatten into 1D vectors
           w_t = np.ravel(w_t)
           w_x = np.ravel(w_x)
           
           x0['glms'][n]['bkgd']['w_x'] = w_x
           x0['glms'][n]['bkgd']['w_t'] = w_t
        elif temporal:
            # Only using a temporal filter
            D_stim = sn.shape[1]
            B = population.glm.bkgd_model.ibasis.get_value().shape[1]
            
            # Project this onto the spatial and temporal bases
            w_t = np.zeros((B*D_stim,1))
            for d in np.arange(D_stim):
github slinderman / theano_pyglm / pyglm / models / model_factory.py View on Github external
print 'Filter cluster labels from kmeans: ',  Y

    # Initialize type based on stimulus filter
    to_vars['latent']['sharedtuningcurve_provider']['Y'] = Y

    # Initialize shared tuning curves (project onto the bases)
    from pyglm.utils.basis import project_onto_basis
    for r in range(R):
        mean_filter_xr = flattened_filters_x[Y==r].mean(axis=0)
        mean_filter_tr = flattened_filters_t[Y==r].mean(axis=0)

        # TODO: Make sure the filters are being normalized properly!

        # Project the mean filters onto the basis
        to_vars['latent']['sharedtuningcurve_provider']['w_x'][:,r] = \
            project_onto_basis(mean_filter_xr,
                               to_popn.glm.bkgd_model.spatial_basis).ravel()

        # Temporal part of the filter
        temporal_basis = to_popn.glm.bkgd_model.temporal_basis
        t_temporal_basis = np.arange(temporal_basis.shape[0])
        t_mean_filter_tr = np.linspace(0, temporal_basis.shape[0]-1, mean_filter_tr.shape[0])
        interp_mean_filter_tr = np.interp(t_temporal_basis, t_mean_filter_tr, mean_filter_tr)
        to_vars['latent']['sharedtuningcurve_provider']['w_t'][:,r] = \
            project_onto_basis(interp_mean_filter_tr, temporal_basis).ravel()

    # Initialize locations based on stimuls filters
    to_vars['latent']['location_provider']['L'] = locs.ravel().astype(np.int)
github slinderman / theano_pyglm / pyglm / inference / smart_init.py View on Github external
# Flatten into 1D vectors
           w_t = np.ravel(w_t)
           w_x = np.ravel(w_x)
           
           x0['glms'][n]['bkgd']['w_x'] = w_x
           x0['glms'][n]['bkgd']['w_t'] = w_t
        elif temporal:
            # Only using a temporal filter
            D_stim = sn.shape[1]
            B = population.glm.bkgd_model.ibasis.get_value().shape[1]
            
            # Project this onto the spatial and temporal bases
            w_t = np.zeros((B*D_stim,1))
            for d in np.arange(D_stim):
                w_t[d*B:(d+1)*B] = project_onto_basis(sn[:,d], 
                                                      population.glm.bkgd_model.ibasis.get_value())
            # Flatten into a 1D vector 
            w_t = np.ravel(w_t)
            x0['glms'][n]['bkgd']['w_stim'] = w_t