Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
assert ny >= 10, 'y must have at least 10 samples.'
assert 0 < ci < 1, 'ci must be between 0 and 1.'
if paired:
assert nx == ny, 'x and y must have the same size when paired=True.'
# Robust percentile
x_per = hd(x, percentiles)
y_per = hd(y, percentiles)
delta = y_per - x_per
# Compute bootstrap distribution of differences
rng = np.random.RandomState(seed)
if paired:
bootsam = rng.choice(np.arange(nx), size=(nx, n_boot), replace=True)
bootstat = (hd(y[bootsam], percentiles, axis=0) -
hd(x[bootsam], percentiles, axis=0))
else:
x_list = rng.choice(x, size=(nx, n_boot), replace=True)
y_list = rng.choice(y, size=(ny, n_boot), replace=True)
bootstat = (hd(y_list, percentiles, axis=0) -
hd(x_list, percentiles, axis=0))
# Find upper and lower confidence interval for each quantiles
# Bias-corrected confidence interval
lower, median_per, upper = [], [], []
for i, d in enumerate(delta):
ci = _bca(bootstat[i, :], d, n_boot)
median_per.append(_bca(bootstat[i, :], d, n_boot, alpha=1)[0])
lower.append(ci[0])
upper.append(ci[1])
lower = np.asarray(lower)
# Robust percentile
x_per = hd(x, percentiles)
y_per = hd(y, percentiles)
delta = y_per - x_per
# Compute bootstrap distribution of differences
rng = np.random.RandomState(seed)
if paired:
bootsam = rng.choice(np.arange(nx), size=(nx, n_boot), replace=True)
bootstat = (hd(y[bootsam], percentiles, axis=0) -
hd(x[bootsam], percentiles, axis=0))
else:
x_list = rng.choice(x, size=(nx, n_boot), replace=True)
y_list = rng.choice(y, size=(ny, n_boot), replace=True)
bootstat = (hd(y_list, percentiles, axis=0) -
hd(x_list, percentiles, axis=0))
# Find upper and lower confidence interval for each quantiles
# Bias-corrected confidence interval
lower, median_per, upper = [], [], []
for i, d in enumerate(delta):
ci = _bca(bootstat[i, :], d, n_boot)
median_per.append(_bca(bootstat[i, :], d, n_boot, alpha=1)[0])
lower.append(ci[0])
upper.append(ci[1])
lower = np.asarray(lower)
median_per = np.asarray(median_per)
upper = np.asarray(upper)
# Create long-format dataFrame for use with Seaborn
# Robust percentile
x_per = hd(x, percentiles)
y_per = hd(y, percentiles)
delta = y_per - x_per
# Compute bootstrap distribution of differences
rng = np.random.RandomState(seed)
if paired:
bootsam = rng.choice(np.arange(nx), size=(nx, n_boot), replace=True)
bootstat = (hd(y[bootsam], percentiles, axis=0) -
hd(x[bootsam], percentiles, axis=0))
else:
x_list = rng.choice(x, size=(nx, n_boot), replace=True)
y_list = rng.choice(y, size=(ny, n_boot), replace=True)
bootstat = (hd(y_list, percentiles, axis=0) -
hd(x_list, percentiles, axis=0))
# Find upper and lower confidence interval for each quantiles
# Bias-corrected confidence interval
lower, median_per, upper = [], [], []
for i, d in enumerate(delta):
ci = _bca(bootstat[i, :], d, n_boot)
median_per.append(_bca(bootstat[i, :], d, n_boot, alpha=1)[0])
lower.append(ci[0])
upper.append(ci[1])
lower = np.asarray(lower)
median_per = np.asarray(median_per)
upper = np.asarray(upper)
# Create long-format dataFrame for use with Seaborn
data = pd.DataFrame({'value': np.concatenate([x, y]),
x = np.asarray(x)
y = np.asarray(y)
percentiles = np.asarray(percentiles) / 100 # Convert to 0 - 1 range
assert x.ndim == 1, 'x must be 1D.'
assert y.ndim == 1, 'y must be 1D.'
nx, ny = x.size, y.size
assert not np.isnan(x).any(), 'Missing values are not allowed.'
assert not np.isnan(y).any(), 'Missing values are not allowed.'
assert nx >= 10, 'x must have at least 10 samples.'
assert ny >= 10, 'y must have at least 10 samples.'
assert 0 < ci < 1, 'ci must be between 0 and 1.'
if paired:
assert nx == ny, 'x and y must have the same size when paired=True.'
# Robust percentile
x_per = hd(x, percentiles)
y_per = hd(y, percentiles)
delta = y_per - x_per
# Compute bootstrap distribution of differences
rng = np.random.RandomState(seed)
if paired:
bootsam = rng.choice(np.arange(nx), size=(nx, n_boot), replace=True)
bootstat = (hd(y[bootsam], percentiles, axis=0) -
hd(x[bootsam], percentiles, axis=0))
else:
x_list = rng.choice(x, size=(nx, n_boot), replace=True)
y_list = rng.choice(y, size=(ny, n_boot), replace=True)
bootstat = (hd(y_list, percentiles, axis=0) -
hd(x_list, percentiles, axis=0))
# Find upper and lower confidence interval for each quantiles
y = np.asarray(y)
percentiles = np.asarray(percentiles) / 100 # Convert to 0 - 1 range
assert x.ndim == 1, 'x must be 1D.'
assert y.ndim == 1, 'y must be 1D.'
nx, ny = x.size, y.size
assert not np.isnan(x).any(), 'Missing values are not allowed.'
assert not np.isnan(y).any(), 'Missing values are not allowed.'
assert nx >= 10, 'x must have at least 10 samples.'
assert ny >= 10, 'y must have at least 10 samples.'
assert 0 < ci < 1, 'ci must be between 0 and 1.'
if paired:
assert nx == ny, 'x and y must have the same size when paired=True.'
# Robust percentile
x_per = hd(x, percentiles)
y_per = hd(y, percentiles)
delta = y_per - x_per
# Compute bootstrap distribution of differences
rng = np.random.RandomState(seed)
if paired:
bootsam = rng.choice(np.arange(nx), size=(nx, n_boot), replace=True)
bootstat = (hd(y[bootsam], percentiles, axis=0) -
hd(x[bootsam], percentiles, axis=0))
else:
x_list = rng.choice(x, size=(nx, n_boot), replace=True)
y_list = rng.choice(y, size=(ny, n_boot), replace=True)
bootstat = (hd(y_list, percentiles, axis=0) -
hd(x_list, percentiles, axis=0))
# Find upper and lower confidence interval for each quantiles
# Bias-corrected confidence interval