How to use the nibabel.save function in nibabel

To help you get started, we’ve selected a few nibabel examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nipy / dipy / 1.0.0 / _downloads / e9d79d89e545a2e65e6b29151ae27c1e / snr_in_cc.py View on Github external
mins = np.array(mins)
maxs = np.array(maxs)
diff = (maxs - mins) // 4
bounds_min = mins + diff
bounds_max = maxs - diff

CC_box[bounds_min[0]:bounds_max[0],
       bounds_min[1]:bounds_max[1],
       bounds_min[2]:bounds_max[2]] = 1

mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box, threshold,
                                     return_cfa=True)

cfa_img = nib.Nifti1Image((cfa*255).astype(np.uint8), affine)
mask_cc_part_img = nib.Nifti1Image(mask_cc_part.astype(np.uint8), affine)
nib.save(mask_cc_part_img, 'mask_CC_part.nii.gz')

import matplotlib.pyplot as plt
region = 40
fig = plt.figure('Corpus callosum segmentation')
plt.subplot(1, 2, 1)
plt.title("Corpus callosum (CC)")
plt.axis('off')
red = cfa[..., 0]
plt.imshow(np.rot90(red[region, ...]))

plt.subplot(1, 2, 2)
plt.title("CC mask used for SNR computation")
plt.axis('off')
plt.imshow(np.rot90(mask_cc_part[region, ...]))
fig.savefig("CC_segmentation.png", bbox_inches='tight')
github neuropoly / spinalcordtoolbox / spinalcordtoolbox / deepseg_sc / core.py View on Github external
def heatmap2optic(fname_heatmap, lambda_value, fname_out, z_max, algo='dpdt'):
    """Run OptiC on the heatmap computed by CNN_1."""
    import nibabel as nib
    os.environ["FSLOUTPUTTYPE"] = "NIFTI_PAIR"

    optic_input = fname_heatmap.split('.nii')[0]

    cmd_optic = 'isct_spine_detect -ctype="%s" -lambda="%s" "%s" "%s" "%s"' % \
                (algo, str(lambda_value), "NONE", optic_input, optic_input)
    sct.run(cmd_optic, verbose=1)

    optic_hdr_filename = optic_input + '_ctr.hdr'
    img = nib.load(optic_hdr_filename)
    nib.save(img, fname_out)

    # crop the centerline if z_max < data.shape[2] and -brain == 1
    if z_max is not None:
        sct.printv('Cropping brain section.')
        ctr_nii = Image(fname_out)
        ctr_nii.data[:, :, z_max:] = 0
        ctr_nii.save()
github nistats / nistats / examples / plot_mixed_gambles.py View on Github external
from nilearn.datasets import fetch_mixed_gambles
from nistats.second_level_model import SecondLevelModel

# write directory
write_dir = 'results'
first_level_paths = os.path.join('results', 'first_level_imgs')
if not path.exists(first_level_paths):
    os.makedirs(first_level_paths)

# get data ###################################################
data = fetch_mixed_gambles(16)
zmap_paths = []
for zidx, zmap in enumerate(data.zmaps):
    zmap_path = os.path.join(first_level_paths, 'map_%03d.nii' % zidx)
    nib.save(zmap, zmap_path)
    zmap_paths.append(zmap_path)
behavioral_target = np.ravel(['gain_%d' % (i + 1) for i in data.gain]).tolist()
subjects_id = np.ravel([['sub_%02d' % (i + 1)] * 48 for i in range(16)])
subjects_id = subjects_id.tolist()
mask_filename = data.mask_img


# Second level model #########################################
# create input for second-level analysis
df_columns = [subjects_id, behavioral_target, zmap_paths]
df_columns = zip(*df_columns)
df_column_names = ['model_id', 'map_name', 'effects_map_path']
first_level_df = pd.DataFrame(df_columns, columns=df_column_names)

# estimate second level model
second_level_model = SecondLevelModel(mask=mask_filename, smoothing_fwhm=3.0)
github neurodata / ndstore / webservices / ndwsnifti.py View on Github external
# coerce the data type 
    if ch.channel_datatype in DTYPE_uint8:   
      niidata = np.array(niidata, dtype='
github neurodata / m2g / ndmg / preproc / preproc_func.py View on Github external
# of radiofrequency excitation
        func_im = nb.load(self.func)
        tr = func_im.header.get_zooms()[3]
        if tr == 0:
            raise ZeroDivisionError(
                "Failed to determine number of frames to" " trim due to tr=0."
            )
        nvol_trim = int(np.floor(15 / float(tr)))
        # remove the first nvol_trim timesteps
        mssg = "Scrubbing first 15 seconds ({0:d} volumes due" " to tr={1: .3f}s)"
        print((mssg.format(nvol_trim, tr)))
        trimmed_dat = func_im.get_data()[:, :, :, nvol_trim:]
        trimmed_im = nb.Nifti1Image(
            dataobj=trimmed_dat, header=func_im.header, affine=func_im.affine
        )
        nb.save(img=trimmed_im, filename=trim_func)

        # use slicetimer if user passes slicetiming information
        if stc is not None:
            self.slice_time_correct(trim_func, stc_func, tr, stc)
        else:
            stc_func = trim_func
        # motion correct using the mean volume (FSL default)
        self.motion_correct(stc_func, self.motion_func, None)
        self.mc_params = "{}.par".format(self.motion_func)
        cmd = "cp {} {}".format(self.motion_func, self.preproc_func)
        mgu.execute_cmd(cmd, verb=True)
github nipy / dipy / scratch / very_scratch / registration_example.py View on Github external
def register_FA_same_subj_diff_sessions(dname_grid,dname_shell):


    print('create temporary directory')
    tmp_dir='/tmp'

    print('load dicom data')
    data_gr,affine_gr,bvals_gr,gradients_gr=dp.load_dcm_dir(dname_grid)
    data_sh,affine_sh,bvals_sh,gradients_sh=dp.load_dcm_dir(dname_shell)

    print('save DWI reference as nifti')
    tmp_grid=os.path.join(tmp_dir,os.path.basename(dname_grid)+'_ref.nii')
    tmp_shell=os.path.join(tmp_dir,os.path.basename(dname_shell)+'_ref.nii')    
    ni.save(ni.Nifti1Image(data_gr[...,0],affine_gr),tmp_grid)    
    ni.save(ni.Nifti1Image(data_sh[...,0],affine_sh),tmp_shell)

    print('prepare filenames for haircut (bet)')
    tmp_grid_bet=os.path.join(os.path.dirname(tmp_grid),\
                                  os.path.splitext(os.path.basename(dname_grid))[0]+\
                                  '_ref_bet.nii.gz')    
    tmp_shell_bet=os.path.join(os.path.dirname(tmp_shell),\
                                   os.path.splitext(os.path.basename(dname_shell))[0]+\
                                   '_ref_bet.nii.gz')

    print('bet is running')
    haircut_dwi_reference(tmp_grid,tmp_grid_bet)
    haircut_dwi_reference(tmp_shell,tmp_shell_bet)

    print('load nii.gz reference (s0) volumes')
    img_gr_bet=ni.load(tmp_grid_bet)
github dPys / PyNets / pynets / registration / reg_utils.py View on Github external
static_affine, moving_affine,
                                     starting_affine=rigid_map.affine)

    # We now perform the non-rigid deformation using the Symmetric Diffeomorphic Registration(SyN) Algorithm:
    metric = CCMetric(3)
    level_iters = [10, 10, 5]
    sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)

    mapping = sdr.optimize(static, moving, static_affine, moving_affine,
                           affine_opt.affine)
    warped_moving = mapping.transform(moving)

    # Save warped FA image
    run_uuid = '%s_%s' % (strftime('%Y%m%d_%H%M%S'), uuid.uuid4())
    warped_fa = '{}/warped_fa_{}.nii.gz'.format(working_dir, run_uuid)
    nib.save(nib.Nifti1Image(warped_moving, affine=static_affine), warped_fa)

    # We show the registration result with:
    regtools.overlay_slices(static, warped_moving, None, 0, "Static", "Moving",
                            "%s%s%s%s" % (working_dir, "/transformed_sagittal_", run_uuid, ".png"))
    regtools.overlay_slices(static, warped_moving, None, 1, "Static", "Moving",
                            "%s%s%s%s" % (working_dir, "/transformed_coronal_", run_uuid, ".png"))
    regtools.overlay_slices(static, warped_moving, None, 2, "Static", "Moving",
                            "%s%s%s%s" % (working_dir, "/transformed_axial_", run_uuid, ".png"))

    return mapping, affine_map, warped_fa
github trislett / TFCE_mediation / STEP_1_mediation_vertexTFCE.py View on Github external
img_data_lh = nib.freesurfer.mghformat.load("lh.all.%s.%s.mgh" % (surface,FWHM))
data_full_lh = img_data_lh.get_data()
data_lh = np.squeeze(data_full_lh)
affine_mask_lh = img_data_lh.get_affine()
n = data_lh.shape[1]
outdata_mask_lh = np.zeros_like(data_full_lh[:,:,:,1])
img_data_rh = nib.freesurfer.mghformat.load("rh.all.%s.%s.mgh" % (surface,FWHM))
data_full_rh = img_data_rh.get_data()
data_rh = np.squeeze(data_full_rh)
affine_mask_rh = img_data_rh.get_affine()
outdata_mask_rh = np.zeros_like(data_full_rh[:,:,:,1])
if not os.path.exists("lh.mean.%s.%s.mgh" % (surface,FWHM)):
	mean_lh = np.sum(data_lh,axis=1)/data_lh.shape[1]
	outmean_lh = np.zeros_like(data_full_lh[:,:,:,1])
	outmean_lh[:,0,0] = mean_lh
	nib.save(nib.freesurfer.mghformat.MGHImage(outmean_lh,affine_mask_lh),"lh.mean.%s.%s.mgh" % (surface,FWHM))
	mean_rh = np.sum(data_rh,axis=1)/data_rh.shape[1]
	outmean_rh = np.zeros_like(data_full_rh[:,:,:,1])
	outmean_rh[:,0,0] = mean_rh
	nib.save(nib.freesurfer.mghformat.MGHImage(outmean_rh,affine_mask_rh),"rh.mean.%s.%s.mgh" % (surface,FWHM))
else:
	img_mean_lh = nib.freesurfer.mghformat.load("lh.mean.%s.%s.mgh" % (surface,FWHM))
	mean_full_lh = img_mean_lh.get_data()
	mean_lh = np.squeeze(mean_full_lh)
	img_mean_rh = nib.freesurfer.mghformat.load("rh.mean.%s.%s.mgh" % (surface,FWHM))
	mean_full_rh = img_mean_rh.get_data()
	mean_rh = np.squeeze(mean_full_rh)

#create masks
if opts.fmri:
	maskthresh = opts.fmri
	bin_mask_lh = np.logical_or(mean_lh > maskthresh, mean_lh < (-1*maskthresh))
github nipy / nilabels / labels_manager / helpers / manipulations.py View on Github external
input_im_path = arrange_path(self.input_data_folder, data_in)
            im_labels = nib.load(input_im_path)
            data_labels = im_labels.get_data()
            data_symmetrised = symmetrise_data(data_labels,
                                               axis=axis,
                                               plane_intercept=plane_intercept,
                                               side_to_copy=side_to_copy,
                                               keep_in_data_dimensions=keep_in_data_dimensions)

            if data_out is None:
                return data_symmetrised

            else:
                output_im_path = arrange_path(self.output_data_folder, data_out)
                im_symmetrised = set_new_data(im_labels, data_symmetrised)
                nib.save(im_symmetrised, output_im_path)
                print 'Symmetrised image of {0} saved in {1}.'.format(input_im_path, output_im_path)
                return None
        else:
            raise IOError
github scilus / scilpy / scripts / scil_run_nlmeans.py View on Github external
if sigma is not None:
        log.info('User supplied noise standard deviation is {}'.format(sigma))
        # Broadcast the single value to a whole 3D volume for nlmeans
        sigma = np.ones(data.shape[:3]) * sigma
    else:
        log.info('Estimating noise')
        sigma = _get_basic_sigma(vol.get_fdata(dtype=np.float32), log)

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=DeprecationWarning)
        data_denoised = nlmeans(
            data, sigma, mask=mask, rician=args.N > 0,
            num_threads=args.nbr_processes)

    nb.save(nb.Nifti1Image(
        data_denoised, vol.affine, vol.header), args.output)