Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
>>> meta = MKDAChi2()
>>> result = meta.fit(dset)
>>> corrector = FDRCorrector(method='bh', alpha=0.05)
>>> cresult = corrector.transform(result)
"""
pAgF_p_vals = result.get_map('consistency_p', return_type='array')
pFgA_p_vals = result.get_map('specificity_p', return_type='array')
pAgF_z_vals = result.get_map('consistency_z', return_type='array')
pFgA_z_vals = result.get_map('specificity_z', return_type='array')
pAgF_sign = np.sign(pAgF_z_vals)
pFgA_sign = np.sign(pFgA_z_vals)
_, pAgF_p_FDR, _, _ = multipletests(pAgF_p_vals, alpha=alpha,
method='fdr_bh',
is_sorted=False,
returnsorted=False)
pAgF_z_FDR = p_to_z(pAgF_p_FDR, tail='two') * pAgF_sign
_, pFgA_p_FDR, _, _ = multipletests(pFgA_p_vals, alpha=alpha,
method='fdr_bh',
is_sorted=False,
returnsorted=False)
pFgA_z_FDR = p_to_z(pFgA_p_FDR, tail='two') * pFgA_sign
images = {
'consistency_z_FDR': pAgF_z_FDR,
'specificity_z_FDR': pFgA_z_FDR,
}
return images
null_distribution = np.cumsum(null_distribution[::-1, :], axis=0)[::-1, :]
null_distribution /= np.max(null_distribution, axis=0)
# Get the hist_bins associated with each voxel's ale value, in order to
# get the p-value from the associated bin in the null distribution.
n_bins = len(hist_bins)
ale_bins = round2(ale_values * step).astype(int)
ale_bins[ale_bins > n_bins] = n_bins
# Get p-values by getting the ale_bin-th value in null_distribution
# per voxel.
p_values = np.empty_like(ale_bins).astype(float)
for i, (x, y) in enumerate(zip(null_distribution.transpose(), ale_bins)):
p_values[i] = x[y]
z_values = p_to_z(p_values, tail='one')
return p_values, z_values
p_ri = special.chdtrc(1, chi2_ri)
sign_ri = np.sign(p_selected_g_term - p_selected_g_noterm).ravel() # pylint: disable=no-member
# Multiple comparisons correction across terms. Separately done for FI and RI.
if correction is not None:
_, p_corr_fi, _, _ = multipletests(p_fi, alpha=u, method=correction,
returnsorted=False)
_, p_corr_ri, _, _ = multipletests(p_ri, alpha=u, method=correction,
returnsorted=False)
else:
p_corr_fi = p_fi
p_corr_ri = p_ri
# Compute z-values
z_corr_fi = p_to_z(p_corr_fi, 'two') * sign_fi
z_corr_ri = p_to_z(p_corr_ri, 'two') * sign_ri
# Effect size
# est. prob. of brain state described by term finding activation in ROI
p_selected_g_term_g_prior = prior * p_selected_g_term + (1 - prior) * p_selected_g_noterm
# est. prob. of activation in ROI reflecting brain state described by term
p_term_g_selected_g_prior = p_selected_g_term * prior / p_selected_g_term_g_prior
arr = np.array([p_corr_fi, z_corr_fi, p_selected_g_term_g_prior, # pylint: disable=no-member
p_corr_ri, z_corr_ri, p_term_g_selected_g_prior]).T
out_df = pd.DataFrame(data=arr, index=features,
columns=['pForward', 'zForward', 'probForward',
'pReverse', 'zReverse', 'probReverse'])
out_df.index.name = 'Term'
return out_df
def _ale_to_p(self, ale_values):
"""
Compute p- and z-values.
"""
step = 1 / np.mean(np.diff(self.null_distributions['histogram_bins']))
# Determine p- and z-values from ALE values and null distribution.
p_values = np.ones(ale_values.shape)
idx = np.where(ale_values > 0)[0]
ale_bins = round2(ale_values[idx] * step)
p_values[idx] = self.null_distributions['histogram_weights'][ale_bins]
z_values = p_to_z(p_values, tail='one')
return p_values, z_values
p_fi[n_selected_term < 5] = 1.
p_ri[n_selected_term < 5] = 1.
# Multiple comparisons correction across features. Separately done for FI and RI.
if correction is not None:
_, p_corr_fi, _, _ = multipletests(p_fi, alpha=u, method=correction,
returnsorted=False)
_, p_corr_ri, _, _ = multipletests(p_ri, alpha=u, method=correction,
returnsorted=False)
else:
p_corr_fi = p_fi
p_corr_ri = p_ri
# Compute z-values
z_corr_fi = p_to_z(p_corr_fi, 'two') * sign_fi
z_corr_ri = p_to_z(p_corr_ri, 'two') * sign_ri
# Effect size
arr = np.array([p_corr_fi, z_corr_fi, l_selected_g_term, # pylint: disable=no-member
p_corr_ri, z_corr_ri, p_term_g_selected]).T
out_df = pd.DataFrame(data=arr, index=features,
columns=['pForward', 'zForward', 'likelihoodForward',
'pReverse', 'zReverse', 'probReverse'])
out_df.index.name = 'Term'
return out_df
iter_grp1_ale_values *= (1. - red_ma_arr[j_exp, :])
iter_grp1_ale_values = 1 - iter_grp1_ale_values
iter_grp2_ale_values = np.ones(np.sum(grp2_voxel))
for j_exp in id_idx[n_grp1:]:
iter_grp2_ale_values *= (1. - red_ma_arr[j_exp, :])
iter_grp2_ale_values = 1 - iter_grp2_ale_values
iter_diff_values[i_iter, :] = iter_grp2_ale_values - iter_grp1_ale_values
for voxel in range(np.sum(grp2_voxel)):
# TODO: Check that upper is appropriate
grp2_p_arr[voxel] = null_to_p(diff_ale_values[voxel],
iter_diff_values[:, voxel],
tail='upper')
grp2_z_arr = p_to_z(grp2_p_arr, tail='one')
# Unmask
grp2_z_map = np.zeros(grp2_voxel.shape[0])
grp2_z_map[:] = np.nan
grp2_z_map[grp2_voxel] = grp2_z_arr
# Fill in output map
diff_z_map = np.zeros(image1.shape[0])
diff_z_map[grp2_voxel] = -1 * grp2_z_map[grp2_voxel]
# could overwrite some values. not a problem.
diff_z_map[grp1_voxel] = grp1_z_map[grp1_voxel]
images = {'grp1-grp2_z': diff_z_map}
self.results = MetaResult(self, self.mask, maps=images)
iter_t_maps[np.isnan(iter_t_maps)] = 0
for voxel in range(iter_t_maps.shape[1]):
p_map[voxel] = null_to_p(t_map[voxel], iter_t_maps[:, voxel])
# Crop p-values of 0 or 1 to nearest values that won't evaluate to
# 0 or 1. Prevents inf z-values.
p_map[p_map < 1e-16] = 1e-16
p_map[p_map > (1. - 1e-16)] = 1. - 1e-16
elif null != 'theoretical':
raise ValueError('Input null must be "theoretical" or "empirical".')
# Convert p to z, preserving signs
sign = np.sign(t_map)
sign[sign == 0] = 1
z_map = p_to_z(p_map, tail='two') * sign
log_p_map = -np.log10(p_map)
images = {'t': t_map,
'z': z_map,
'p': p_map,
'log_p': log_p_map}
return images