Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# coding: utf8
import clinica.pipelines.engine as cpe
__author__ = "Jorge Samper-Gonzalez"
__copyright__ = "Copyright 2016-2019 The Aramis Lab Team"
__credits__ = ["Jorge Samper-Gonzalez"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Jorge Samper-Gonzalez"
__email__ = "jorge.samper-gonzalez@inria.fr"
__status__ = "Development"
class T1VolumeExistingDartel(cpe.Pipeline):
"""T1VolumeExistingDartel - Reuse existing Dartel template.
Args:
input_dir: A BIDS directory.
output_dir: An empty output directory where CAPS structured data will be written.
subjects_sessions_list: The Subjects-Sessions list file (in .tsv format).
Returns:
A clinica pipeline object containing the T1VolumeExistingDartel pipeline.
"""
def __init__(self,
bids_directory=None,
caps_directory=None,
tsv_file=None,
base_dir=None,
name=None,
# coding: utf8
import clinica.pipelines.engine as cpe
__author__ = "Junhao Wen"
__copyright__ = "Copyright 2016-2018, The Aramis Lab Team"
__credits__ = ["Junhao Wen"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Junhao Wen"
__email__ = "Junhao.Wen@inria.fr"
__status__ = "Development"
class StatisticsSurface(cpe.Pipeline):
"""
Based on the Matlab toolbox [SurfStat](http://www.math.mcgill.ca/keith/surfstat/), which performs statistical
analyses of univariate and multivariate surface and volumetric data using the generalized linear model (GLM),
this pipelines performs analyses including group comparison and correlation with the surface-based features.
Currently, this pipelines fits the normalised cortical thickness on FsAverage from `t1-freesurfer` pipelines.
New features will be added in the future.
Args:
caps_directory: str, the output folder of recon-all which will contain the result files: ?h.thickness.fwhm**.mgh.
tsv_file: str, Path to the tsv containing the information for GLM.
design_matrix: str, the linear model that fits into the GLM, for example '1+group'.
contrast: string, the contrast matrix for GLM, if the factor you choose is categorized variable, clinica_surfstat will create two contrasts,
for example, contrast = 'Label', this will create contrastpos = Label.AD - Label.CN, contrastneg = Label.CN - Label.AD; if the fac-
tory that you choose is a continuous factor, clinica_surfstat will just create one contrast, for example, contrast = 'Age', but note,
the string name that you choose should be exactly the same with the columns names in your subjects_visits_tsv.
# coding: utf8
import clinica.pipelines.engine as cpe
__author__ = "Jorge Samper-Gonzalez"
__copyright__ = "Copyright 2016-2019 The Aramis Lab Team"
__credits__ = ["Jorge Samper-Gonzalez"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Jorge Samper-Gonzalez"
__email__ = "jorge.samper-gonzalez@inria.fr"
__status__ = "Development"
class T1VolumeCreateDartel(cpe.Pipeline):
"""T1VolumeCreateDartel - Create new Dartel template.
Args:
input_dir: A BIDS directory.
output_dir: An empty output directory where CAPS structured data will be written.
subjects_sessions_list: The Subjects-Sessions list file (in .tsv format).
Returns:
A clinica pipeline object containing the T1VolumeCreateDartel pipeline.
"""
def __init__(self,
bids_directory=None,
caps_directory=None,
tsv_file=None,
base_dir=None,
name=None,
# WARNING: Don't put any import statement here except if it's absolutely
# necessary. Put it *inside* the different methods.
# Otherwise it will slow down the dynamic loading of the pipelines list by the
# command line tool.
import clinica.pipelines.engine as cpe
__author__ = "Simona Bottani"
__copyright__ = "Copyright 2016-2019 The Aramis Lab Team"
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Simona Bottani"
__email__ = "simona.bottani@icm-institute.org"
__status__ = "Development"
class SpatialSVM(cpe.Pipeline):
"""SpatialSVM - Prepare input data for SVM with spatial and anatomical regularization.
Todos:
- [ ] Final version of CAPS.
- [ ] Remove --voxel_size flag and detect automatically this parameter.
Args:
input_dir: A BIDS directory.
output_dir: An empty output directory where CAPS structured data will be written.
subjects_sessions_list: The Subjects-Sessions list file (in .tsv format).
Returns:
A clinica pipeline object containing the SpatialSVM pipeline.
Raises:
"""
from nipype import config
__author__ = ["Alexandre Routier", "Thomas Jacquemont"]
__copyright__ = "Copyright 2016-2019 The Aramis Lab Team"
__credits__ = ["Nipype"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__status__ = "Development"
# Use hash instead of parameters for iterables folder names
# Otherwise path will be too long and generate OSError
cfg = dict(execution={'parameterize_dirs': False})
config.update_config(cfg)
class DwiDti(cpe.Pipeline):
"""DTI-based processing of DWI datasets.
Args:
input_dir(str): Input directory in a CAPS hierarchy.
output_dir(str): Output directory in a CAPS hierarchy.
subjects_sessions_list(str): The Subjects-Sessions list file (in .tsv
format).
Returns:
A clinica pipeline object containing the DwiDti pipeline.
Raises:
"""
def check_custom_dependencies(self): pass
# coding: utf8
# WARNING: Don't put any import statement here except if it's absolutely
# necessary. Put it *inside* the different methods.
# Otherwise it will slow down the dynamic loading of the pipelines list by the
# command line tool.
import clinica.pipelines.engine as cpe
# Use hash instead of parameters for iterables folder names
from nipype import config
cfg = dict(execution={'parameterize_dirs': False})
config.update_config(cfg)
class DwiConnectome(cpe.Pipeline):
"""Connectome-based processing of corrected DWI datasets.
Args:
input_dir: A BIDS directory.
output_dir: An empty output directory where CAPS structured data will
be written.
subjects_sessions_list: The Subjects-Sessions list file (in .tsv
format).
Returns:
A clinica pipeline object containing the DwiConnectome pipeline.
"""
def check_custom_dependencies(self):
"""Check dependencies that can not be listed in the `info.json` file.
"""
# coding: utf8
__author__ = "Alexis Guyot"
__copyright__ = "Copyright 2016-2019, The Aramis Lab Team"
__credits__ = ["Alexis Guyot"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Alexis Guyot"
__email__ = "alexis.guyot@icm-institute.org"
__status__ = "Development"
import clinica.pipelines.engine as cpe
class T1FreeSurferLongitudinalCorrection(cpe.Pipeline):
"""FreeSurfer Longitudinal correction class
Creates a pipeline that runs the Freesurfer longitudinal
(correction) processing module for each subjects in a .tsv-defined
list of subjects/sessions. This requires a prior run of
t1-freesurfer on the TSV file, followed by a run of
t1-freesurfer-template on the same TSV file. For each subject, all the
timepoints (sessions) are re-processed based on a template computed
with t1-freesurfer-template for that specific subject.
Todos: N/A
Returns:
A clinica pipeline object containing the T1FreeSurferLongitudinalCorrection pipeline
Raises:
# coding: utf8
import clinica.pipelines.engine as cpe
__author__ = "Simona Bottani"
__copyright__ = "Copyright 2016-2019 The Aramis Lab Team"
__credits__ = ["Simona Bottani"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Simona Bottani"
__email__ = "simona.bottani@icm-institute.org"
__status__ = "Development"
class T1VolumeParcellation(cpe.Pipeline):
"""T1VolumeParcellation - Computation of mean GM concentration for a set of regions
Args:
input_dir: A BIDS directory.
output_dir: An empty output directory where CAPS structured data will be written.
subjects_sessions_list: The Subjects-Sessions list file (in .tsv format).
Returns:
A clinica pipeline object containing the T1VolumeParcellation pipeline.
"""
def check_custom_dependencies(self):
"""Check dependencies that can not be listed in the `info.json` file.
"""
pass
def get_input_fields(self):
# coding: utf-8
__author__ = "Arnaud Marcoux"
__copyright__ = "Copyright 2016-2019 The Aramis Lab Team"
__credits__ = ["Arnaud Marcoux", "Michael Bacci"]
__license__ = "See LICENSE.txt file"
__version__ = "1.0.0"
__maintainer__ = "Arnaud Marcoux"
__email__ = "arnaud.marcoux@inria.fr"
__status__ = "Development"
import clinica.pipelines.engine as cpe
class PetSurface(cpe.Pipeline):
"""Project PET signal onto the surface of the cortex.
Args:
input_dir: A BIDS directory.
output_dir: An empty output directory where CAPS structured data will be
written.
subjects_sessions_list: The Subjects-Sessions list file (in .tsv
format).
Returns:
A clinica pipeline object containing the PetSurface pipeline.
"""
def check_custom_dependencies(self):
"""Check dependencies that can not be listed in the `info.json` file.
# coding: utf8
__author__ = "Junhao Wen"
__copyright__ = "Copyright 2016-2018, The Aramis Lab Team"
__credits__ = ["Junhao Wen"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Junhao Wen"
__email__ = "Junhao.Wen@inria.fr"
__status__ = "Development"
# command line tool.
import clinica.pipelines.engine as cpe
class DwiPreprocessingNoddi(cpe.Pipeline):
"""dwi_preprocessing_noddi SHORT DESCRIPTION.
Warnings:
- A WARNING.
Todos:
- [x] A FILLED TODO ITEM.
- [ ] AN ON-GOING TODO ITEM.
Args:
input_dir: A BIDS directory.
output_dir: An empty output directory where CAPS structured data will be written.
subjects_sessions_list: The Subjects-Sessions list file (in .tsv format).
Returns:
A clinica pipeline object containing the dwi_preprocessing_noddi pipeline.