Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _equalize_pil(img, mask=None):
histogram = cv2.calcHist([img], [0], mask, [256], (0, 256)).ravel()
h = [_f for _f in histogram if _f]
if len(h) <= 1:
return img.copy()
step = np.sum(h[:-1]) // 255
if not step:
return img.copy()
lut = np.empty(256, dtype=np.uint8)
n = step // 2
for i in range(256):
lut[i] = min(n // step, 255)
n += histogram[i]
return cv2.LUT(img, np.array(lut))
meaning_vectors = []
for sent in unique_sentences:
line_as_list = sent.split()
vectors = []
for word in line_as_list:
vector = vocab.get(word)
if vector is not None:
vectors.append(vector[1] * 0.001 /
(0.001 + vector[0] / word_count))
if len(vectors) == 0:
meaning_vectors.append(np.zeros(embedding_dim))
else:
meaning_vectors.append(np.sum(np.array(vectors), axis=0) /
len(vectors))
np.save(output_file_path, np.array(meaning_vectors).
reshape(-1, embedding_dim))
pth = sp_stats.norm.sf(zth)
above_th = zmap > zth
if len(np.where(above_th)[0]) == 0:
return None, None ## FIXME
zmap_th = zmap[above_th]
xyz_th = xyz[above_th]
# Clustering
## Extract local maxima and connex components above some threshold
ff = field_from_graph_and_data(wgraph_from_3d_grid(xyz_th, k=18), zmap_th)
maxima, depth = ff.get_local_maxima(th=zth)
labels = ff.cc()
## Make list of clusters, each cluster being a dictionary
clusters = []
for k in range(labels.max() + 1):
s = np.sum(labels == k)
if s >= cluster_th:
in_cluster = labels[maxima] == k
m = maxima[in_cluster]
d = depth[in_cluster]
sorted = d.argsort()[::-1]
clusters.append({'size': s,
'maxima': m[sorted],
'depth': d[sorted]})
## Sort clusters by descending size order
clusters.sort(key=lambda c : c['size'], reverse=True)
# FDR-corrected p-values
fdr_pvalue = empirical_pvalue.gaussian_fdr(zmap)[above_th]
# Default "nulls"
#winnoi_a = np.array(self.corr.data[i0-nw:i0]) + \
# np.array(self.corr.data[i1:i1+nw])
winnoi_a = np.array(self.corr.data[i0-2*nw:i0-nw])
winsig_c = self.corr.data[i2:i3]
#winnoi_c = list(self.corr.data[i2-nw:i2]) + \
# list(self.corr.data[i3:i3+nw])
winnoi_c = np.array(self.corr.data[i3+nw:i3+2*nw])
# Test: the winsig and winnoi must have the same length
snrc = np.sum(np.power(winsig_c,2))/ \
np.sum(np.power(winnoi_c,2))
snra = np.sum(np.power(winsig_a,2))/ \
np.sum(np.power(winnoi_a,2))
return (snrc,snra,win_ind)
def _calc_message_order(self):
''' Given the clique list and their adjacency matrix, calculate a message passing order. '''
unmessaged = np.copy(self.clique_edges)
message_ordering = list()
reverse_messages = list()
while True:
next_cliques = np.nonzero(np.sum(unmessaged, axis=1)==1)[0] # Find all cliques which can pass a message immediately
if (len(next_cliques)==0):
break
for src_clique in next_cliques:
target_cliques = np.nonzero(unmessaged[:,src_clique])[0] # Find index of the one unmessaged neighbour
if (len(target_cliques)==0):
break # This happens when this is the last clique remaining
target_clique = target_cliques[0]
message_ordering.append((src_clique, target_clique))
reverse_messages.append((target_clique, src_clique))
unmessaged[src_clique,target_clique]=0
unmessaged[target_clique, src_clique]=0
assert np.all(unmessaged==0)
res = list(message_ordering) + list(reversed(reverse_messages))
return res
def DurbinWatson(residuals):
residual_terms = []
for nres, res in enumerate(residuals):
try:
residual_terms.append(residuals[nres+1] - residuals[nres])
except:
pass
residual_terms = np.array(residual_terms)
numerator = np.sum(residual_terms**2)
denominator = np.sum(residuals**2)
return numerator / denominator
def dice_coef_np(y_true,y_pred):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def contains(self, x):
return x.shape == self.shape and np.allclose(
np.sum(x, axis=-1), np.ones_like(x[..., 0]))
def step(self, action):
# action is a permutation of identity matrix
assert self.action_space.contains(action)
# action corresponding to one of the permutation
port_mapping = self.all_mappings[action]
# assign packet to the output port
for (i, p) in enumerate(port_mapping):
self.queue_occupancy[i, p] = max(self.queue_occupancy[i, p] - 1, 0)
# reamining total queue length
reward = - np.sum(self.queue_occupancy)
# never ending environment
done = False
# sample new traffic
incoming_traffic = self.sample_from_bistochastic_matrix()
self.queue_occupancy += incoming_traffic
# cap the observation
if np.any(self.queue_occupancy > config.ss_state_max_queue):
obs_queue = np.minimum(
self.queue_occupancy, config.ss_state_max_queue)
logger.warn('Queue occupancy is clipped since it exceeds max queue value ' +
str(config.ss_state_max_queue))
else:
obs_queue = self.queue_occupancy
def _compute_separator(self, K):
sv = (self._alphas.flat > self.sv_cutoff)
D = spdiag(self._y)
self._b = (np.sum(D * sv) - np.sum(self._alphas.T * D * self._V * K)) / np.sum(sv)
self._dotprods = (self._alphas.T * D * self._V * K).T
self._predictions = self._b + self._dotprods