Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Check threshold
if threshold is None:
raise TypeError("No threshold specified. Please specify a [ms] threshold.")
if threshold <= 0:
raise ValueError("Invalid value for 'threshold'. Value must not be <= 0.")
# Count NN20
nnd = tools.nni_diff(nn)
nnxx = sum(i > threshold for i in nnd)
pnnxx = nnxx / len(nnd) * 100
# Output
args = (nnxx, pnnxx)
names = ('nn%i' % threshold, 'pnn%i' % threshold)
return utils.ReturnTuple(args, names)
if kmax is None:
kmax = int(round(np.sqrt(N)))
# initialization grid
grid = {
'k': np.random.random_integers(low=kmin, high=kmax, size=nensemble)
}
# run consensus
clusters, = consensus(data=data,
k=k,
linkage=linkage,
fcn=kmeans,
grid=grid)
return utils.ReturnTuple((clusters,), ('clusters',))
# Output
args = (fig, tri_index,)
names = ('tri_histogram', 'tri_index',)
# If histogram should not be plotted
else:
D, bins = _get_histogram(nn, figsize=figsize, binsize=binsize, legend=legend, plot=plot)
# Compute Triangular index: number of nn intervals / maximum value of the distribution
tri_index = nn.size / D.max()
# Output
args = (tri_index, )
names = ('tri_index', )
return utils.ReturnTuple(args, names)
l3 = mpl.patches.Patch(facecolor='g', alpha=0.0, label='D(X): %i' % D.max())
l4 = mpl.patches.Patch(facecolor='g', alpha=0.0, label='X: %.3f$ms$' % bins[np.argmax(D)])
l5 = mpl.patches.Patch(facecolor='white', alpha=0.0, label='N: %.3f$ms$' % tinn_vals['tinn_n'])
l6 = mpl.patches.Patch(facecolor='white', alpha=0.0, label='M: %.3fms' % tinn_vals['tinn_m'])
l7 = mpl.patches.Patch(facecolor='white', alpha=0.0, label='TINN: %.3fms' % tinn_vals['tinn'])
l8 = mpl.patches.Patch(facecolor='white', alpha=0.0, label='Tri. Index: %.3f' % trindex)
ax.legend(handles=[l1, l2, l3, l4, l5, l6, l7, l8], loc=0, ncol=1)
# Show plot
if show:
plt.show()
# Output
args = (fig, tinn_vals['tinn_n'], tinn_vals['tinn_m'], tinn_vals['tinn'], trindex)
names = ('nni_histogram', 'tinn_n', 'tinn_m', 'tinn', 'tri_index')
return utils.ReturnTuple(args, names)
# absolute deviation
ad = np.mean(np.abs(signal - median))
# kurtosis
kurt = stats.kurtosis(signal, bias=False)
# skweness
skew = stats.skew(signal, bias=False)
# output
args = (mean, median, minVal, maxVal, maxAmp, sigma2, sigma, ad, kurt, skew)
names = ('mean', 'median', 'min', 'max', 'max_amp', 'var', 'std_dev',
'abs_dev', 'kurtosis', 'skewness')
return utils.ReturnTuple(args, names)
# Compute PSD according to the Lomb-Scargle method
# Specify frequency grid
frequencies = np.linspace(0, 0.41, nfft)
# Compute angular frequencies
a_frequencies = np.asarray(2 * np.pi / frequencies)
powers = np.asarray(lombscargle(t, nn, a_frequencies, normalize=True))
# Fix power = inf at f=0
powers[0] = 2
# Apply moving average filter
if ma_size is not None:
powers = biosppy.signals.tools.smoother(powers, size=ma_size)['signal']
# Define metadata
meta = utils.ReturnTuple((nfft, ma_size, ), ('lomb_nfft', 'lomb_ma'))
if mode not in ['normal', 'dev', 'devplot']:
warnings.warn("Unknown mode '%s'. Will proceed with 'normal' mode." % mode, stacklevel=2)
mode = 'normal'
# Normal Mode:
# Returns frequency parameters, PSD plot figure and no frequency & power series/arrays
if mode == 'normal':
# ms^2 to s^2
powers = powers * 10 ** 6
# Compute frequency parameters
params, freq_i = _compute_parameters('lomb', frequencies, powers, fbands)
# Plot parameters
figure = _plot_psd('lomb', frequencies, powers, freq_i, params, show, show_param, legend)
"""
# Check input data and load JSON file content
if hrv_file is None:
raise TypeError("No input data provided. Please specify input data.")
elif type(hrv_file) is str:
data = json.load(open(hrv_file, 'r'))
elif isinstance(hrv_file, file):
data = json.load(hrv_file)
results = dict()
for key in data.keys():
results[str(key)] = data[key] if type(data[key]) is not unicode else str(data[key])
# Create utils.ReturnTuple object from imported data
return utils.ReturnTuple(results.values(), results.keys())
# most frequent class
predMax = counts.argmax()
if random:
# check for repeats
ind = np.nonzero(counts == counts[predMax])[0]
length = len(ind)
if length > 1:
predMax = ind[np.random.randint(0, length)]
decision = unq[predMax]
cnt = counts[predMax]
out = utils.ReturnTuple((decision, cnt), ('decision', 'count'))
return out
label = r'$ \alpha_{2}: %0.2f$' % alpha2
ax.plot(vals, flucts, 'go', markersize=1)
ax.plot(vals, poly, 'g', label=label, alpha=0.7)
# Add legend
if legend:
ax.legend()
ax.grid()
# Plot axis
if show:
plt.show()
# Output
args = (fig, alpha1, alpha2,)
return utils.ReturnTuple(args, ('dfa_plot', 'dfa_alpha1', 'dfa_alpha2', ))
aux = float(np.sum(res == unq[i]))
w = weights.get(n, 1.)
counts[i] += ((aux / ns) * w)
# most frequent class
predMax = counts.argmax()
counts /= counts.sum()
decision = unq[predMax]
confidence = counts[predMax]
# output
args = (decision, confidence, counts, unq)
names = ('decision', 'confidence', 'counts', 'classes')
return utils.ReturnTuple(args, names)