Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if n_jobs == 1:
# Do a separate branch because joblib doesn't do a good job of
# managing the python debugger so use --n-jobs=1 (n_jobs=1) when
# debugging
progress('\tIterating over {} events ...\n'.format(n_events))
summaries = []
for event_id, event_df in grouped:
summary = _single_event_psi(
event_id, event_df, reads2d,
isoform1_junctions, isoform2_junctions,
min_reads=min_reads,
uneven_coverage_multiplier=uneven_coverage_multiplier,
method=method)
summaries.append(summary)
else:
processors = n_jobs if n_jobs > 0 else joblib.cpu_count()
progress("\tParallelizing {} events' Psi calculation across {} "
"CPUs ...\n".format(n_events, processors))
summaries = joblib.Parallel(n_jobs=n_jobs)(
joblib.delayed(_single_event_psi)(
event_id, event_df, reads2d,
isoform1_junctions, isoform2_junctions,
min_reads=min_reads,
uneven_coverage_multiplier=uneven_coverage_multiplier,
method=method)
for event_id, event_df in grouped)
return summaries
def electre1(nmtx, ncriteria, nweights, p, q, njobs=None):
# determine the njobs
njobs = njobs or joblib.cpu_count()
# get the concordance and discordance info
# multiprocessing environment
with joblib.Parallel(n_jobs=njobs) as jobs:
mtx_concordance = concordance(nmtx, ncriteria, nweights, jobs)
mtx_discordance = discordance(nmtx, ncriteria, jobs)
with np.errstate(invalid='ignore'):
outrank = (
(mtx_concordance >= p) & (mtx_discordance <= q))
kernel_mask = ~outrank.any(axis=0)
kernel = np.where(kernel_mask)[0]
return kernel, outrank, mtx_concordance, mtx_discordance
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
if sample_weights is None:
sample_weights = np.ones(X.shape[0])
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
def simus(
nmtx, ncriteria, nweights,
rank_by=1, b=None, solver="pulp", njobs=None
):
# determine the njobs
njobs = njobs or joblib.cpu_count()
t_nmtx = nmtx.T
# check the b array and complete the missing values
b = np.asarray(b)
if None in b:
mins = np.min(t_nmtx, axis=1)
maxs = np.max(t_nmtx, axis=1)
auto_b = np.where(ncriteria == MAX, maxs, mins)
b = np.where(b.astype(bool), b, auto_b)
# multiprocessing environment
with joblib.Parallel(n_jobs=njobs) as jobs:
# create and execute the stages
if n_jobs == 1:
# Do a separate branch because joblib doesn't do a good job of
# managing the python debugger so use --n-jobs=1 (n_jobs=1) when
# debugging
progress('\tIterating over {} events ...\n'.format(n_events))
summaries = []
for event_id, event_df in grouped:
summary = _single_event_psi(
event_id, event_df, reads2d,
isoform1_junctions, isoform2_junctions,
min_reads=min_reads,
uneven_coverage_multiplier=uneven_coverage_multiplier,
method=method)
summaries.append(summary)
else:
processors = n_jobs if n_jobs > 0 else joblib.cpu_count()
progress("\tParallelizing {} events' Psi calculation across {} "
"CPUs ...\n".format(n_events, processors))
summaries = joblib.Parallel(n_jobs=n_jobs)(
joblib.delayed(_single_event_psi)(
event_id, event_df, reads2d,
isoform1_junctions, isoform2_junctions,
min_reads=min_reads,
uneven_coverage_multiplier=uneven_coverage_multiplier,
method=method)
for event_id, event_df in grouped)
return summaries
self.subsample = np.clip(subsample, 0.0, 1.0)
self.random_state = random_state
self.z_type = z_type
self.leaves = []
self.feature_importances_ = None
self.n_features_ = 0
self.tree_ind = np.zeros((1,6), dtype=np.int)
self.tree_val = np.zeros((1,2), dtype=np.float)
self.mask = None
self.xdim = None
self.cnvs = None
self.cnvsn = None
self.n_jobs = n_jobs
if self.n_jobs < 0:
self.n_jobs = cpu_count()