Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
print('Positive predictive value %0.2f' % evaluation['ppv'])
print('Negative predictive value %0.2f \n' % evaluation['npv'])
if save_dual_coefficients:
np.save(join(output_directory, classification_str + '__dual_coefficients'), dual_coefficients[0])
np.save(join(output_directory, classification_str + '__sv_indices'), sv_indices)
np.save(join(output_directory, classification_str + '__intersect'), intersect)
if save_original_weights or save_features_image:
weights_orig = features_weights(current_subjects, dual_coefficients[0], sv_indices, scaler, data_mask)
if save_original_weights:
np.save(join(output_directory, classification_str + '__weights'), weights_orig)
if save_features_image:
weights_to_nifti(weights_orig, image_list[0], join(output_directory, classification_str + '__features_image.nii'))
if save_subject_classification:
save_subjects_prediction(current_subjects, current_diagnosis, y, y_hat, join(output_directory, classification_str + '__subjects.tsv'))
results[(dx1, dx2)] = evaluation # evaluate_prediction(y, y_hat)
results_to_tsv(results, dx_filter, join(output_directory, 'resume' + ('_balanced' if balanced else '_not_balanced') + '.tsv'))
shared_x = None
gc.collect()
print('Accuracy %0.2f' % evaluation['accuracy'])
print('Balanced accuracy %0.2f' % evaluation['balanced_accuracy'])
print('Sensitivity %0.2f' % evaluation['sensitivity'])
print('Specificity %0.2f' % evaluation['specificity'])
print('Positive predictive value %0.2f' % evaluation['ppv'])
print('Negative predictive value %0.2f \n' % evaluation['npv'])
if save_weights or save_features_image:
weights_orig = revert_mask(coefficients, data_mask, orig_shape)
if save_weights:
np.save(join(output_directory, classification_str + '__intersect'), intersect)
np.save(join(output_directory, classification_str + '__weights'), weights_orig)
if save_features_image:
weights_to_nifti(weights_orig, image_list[0], join(output_directory, classification_str + '__features_image.nii'))
if save_subject_classification:
save_subjects_prediction(current_subjects, current_diagnosis, y, y_hat, join(output_directory, classification_str + '__subjects.csv'))
results[(dx1, dx2)] = evaluate_prediction(y, y_hat)
results_to_csv(results, dx_filter, join(output_directory, 'resume' + ('_positive' if positive else '') + '.csv'))
print 'False negative %0.2f' % len(evaluation['predictions'][3])
print 'Accuracy %0.2f' % evaluation['accuracy']
print 'Balanced accuracy %0.2f' % evaluation['balanced_accuracy']
print 'Sensitivity %0.2f' % evaluation['sensitivity']
print 'Specificity %0.2f' % evaluation['specificity']
print 'Positive predictive value %0.2f' % evaluation['ppv']
print 'Negative predictive value %0.2f \n' % evaluation['npv']
weights_orig = revert_mask(w, data_mask, orig_shape)
if save_original_weights:
np.save(join(output_directory, classification_str + '__weights'), weights_orig)
if save_features_image:
weights_to_nifti(weights_orig, image_list[0], join(output_directory, classification_str + '__features_image.nii'))
if save_subject_classification:
save_subjects_prediction(current_subjects, current_diagnosis, y, y_hat, join(output_directory, classification_str + '__subjects.csv'))
results[(dx1, dx2)] = evaluate_prediction(y, y_hat)
results_to_csv(results, dx_filter, join(output_directory, 'resume.csv'))
def save_weights_as_nifti(self, weights, output_dir):
if self._images is None:
self.get_images()
output_filename = path.join(output_dir, 'weights.nii.gz')
data = vbio.revert_mask(weights, self._data_mask, self._orig_shape)
vbio.weights_to_nifti(data, self._images[0], output_filename)
def save_weights_as_nifti(self, weights, output_dir):
if self._images is None:
self.get_images()
output_filename = path.join(output_dir, 'weights.nii.gz')
data = vbio.revert_mask(weights, self._data_mask, self._orig_shape)
vbio.weights_to_nifti(data, self._images[0], output_filename)