Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
estimator_data = pd.Series([X, y, self.scoring,
False,
self.fit_params, self.return_train_score,
True, True, True,
self.error_score],
index=estimator_labels,
name='estimator Data')
fname = 'estimatordata.hdf5'
estimatorname = os.path.join(tempfolder, fname)
estimator_data.to_hdf(estimatorname, 'Estimator Data')
estimatordata = f"vfs://tmp/GS/{name}/{fname}"
# Create the fastr network
network = fastr.create_network('WORC_GridSearch_' + name)
estimator_data = network.create_source('HDF5', id='estimator_source')
traintest_data = network.create_source('HDF5', id='traintest')
parameter_data = network.create_source('JsonFile', id='parameters')
sink_output = network.create_sink('HDF5', id='output')
fitandscore = network.create_node('worc/fitandscore:1.0', tool_version='1.0', id='fitandscore', resources=ResourceLimit(memory='2G'))
fitandscore.inputs['estimatordata'].input_group = 'estimator'
fitandscore.inputs['traintest'].input_group = 'traintest'
fitandscore.inputs['parameters'].input_group = 'parameters'
fitandscore.inputs['estimatordata'] = estimator_data.output
fitandscore.inputs['traintest'] = traintest_data.output
fitandscore.inputs['parameters'] = parameter_data.output
sink_output.input = fitandscore.outputs['fittedestimator']
source_data = {'estimator_source': estimatordata,
def create_network(self):
'''
Add evaluate components to network.
'''
# Create all nodes
self.node_slicer =\
self.network.create_node('worc/Slicer:1.0', tool_version='1.0', id='Slicer', resources=ResourceLimit(memory='20G'))
# Create sinks
self.sink_PNG =\
self.network.create_sink('PNGFile', id='PNG')
self.sink_PNGZoomed =\
self.network.create_sink('PNGFile', id='PNGZoomed')
# Create links to sinks
self.sink_PNG.input = self.node_slicer.outputs['out']
self.sink_PNGZoomed.input = self.node_slicer.outputs['outzoom']
# Create sources if not supplied by a WORC network
if self.mode == 'StandAlone':
self.source_images = self.network.create_source('ITKImageFile', id='Images')
self.source_segmentations = self.network.create_source('ITKImageFile', id='Segmentations')
tool_version='0.2',
id='transformix_seg_train_' + label,
resources=ResourceLimit(memory=memory_transformix))
self.transformix_im_nodes_train[label] =\
self.network.create_node(transformix_node,
tool_version='0.2',
id='transformix_im_train_' + label,
resources=ResourceLimit(memory=memory_transformix))
if self.TrainTest:
self.elastix_nodes_test[label] =\
self.network.create_node(elastix_node,
tool_version='0.2',
id='elastix_test_' + label,
resources=ResourceLimit(memory=memory_elastix))
self.transformix_seg_nodes_test[label] =\
self.network.create_node(transformix_node,
tool_version='0.2',
id='transformix_seg_test_' + label,
resources=ResourceLimit(memory=memory_transformix))
self.transformix_im_nodes_test[label] =\
self.network.create_node(transformix_node,
tool_version='0.2',
id='transformix_im_test_' + label,
resources=ResourceLimit(memory=memory_transformix))
# Create sources_segmentation
# M1 = moving, others = fixed
self.elastix_nodes_train[label].inputs['fixed_image'] =\
self.converters_seg_train[label].outputs['image']
# Input the parameters
self.nodes_segmentix_train[label].inputs['parameters'] =\
self.sources_parameters[label].output
self.sinks_segmentations_segmentix_train[label].input =\
self.nodes_segmentix_train[label].outputs['segmentation_out']
if self.TrainTest:
self.sinks_segmentations_segmentix_test[label] =\
self.network.create_sink('ITKImageFile',
id='segmentations_out_segmentix_test_' + label)
self.nodes_segmentix_test[label] =\
self.network.create_node('segmentix/Segmentix:1.0',
tool_version='1.0',
id='segmentix_test_' + label, resources=ResourceLimit(memory=memory))
self.nodes_segmentix_test[label].inputs['image'] =\
self.converters_im_test[label].outputs['image']
if hasattr(self, 'transformix_seg_nodes_test'):
if label in self.transformix_seg_nodes_test.keys():
# Use output of registration in segmentix
self.nodes_segmentix_test[label].inputs['segmentation_in'] =\
self.transformix_seg_nodes_test[label].outputs['image']
else:
# Use original segmentation
self.nodes_segmentix_test[label].inputs['segmentation_in'] =\
self.converters_seg_test[label].outputs['image']
else:
# Use original segmentation
self.nodes_segmentix_test[label].inputs['segmentation_in'] =\
# Assume provided segmentation is on first modality
if nmod > 0:
# Use elastix and transformix for registration
# NOTE: Assume elastix node type is on first configuration
elastix_node =\
str(self.configs[0]['General']['RegistrationNode'])
transformix_node =\
str(self.configs[0]['General']['TransformationNode'])
memory_elastix = self.fastr_memory_parameters['Elastix']
self.elastix_nodes_train[label] =\
self.network.create_node(elastix_node,
tool_version='0.2',
id='elastix_train_' + label,
resources=ResourceLimit(memory=memory_elastix))
memory_transformix = self.fastr_memory_parameters['Elastix']
self.transformix_seg_nodes_train[label] =\
self.network.create_node(transformix_node,
tool_version='0.2',
id='transformix_seg_train_' + label,
resources=ResourceLimit(memory=memory_transformix))
self.transformix_im_nodes_train[label] =\
self.network.create_node(transformix_node,
tool_version='0.2',
id='transformix_im_train_' + label,
resources=ResourceLimit(memory=memory_transformix))
if self.TrainTest:
self.elastix_nodes_test[label] =\
tool_version='1.0',
id='decomposition',
resources=ResourceLimit(memory='12G'),
step_id='Evaluation')
self.node_Ranked_Percentages =\
self.network.create_node('worc/PlotRankedScores:1.0',
tool_version='1.0',
id='plot_ranked_percentages',
resources=ResourceLimit(memory='20G'),
step_id='Evaluation')
self.node_Ranked_Posteriors =\
self.network.create_node('worc/PlotRankedScores:1.0',
tool_version='1.0',
id='plot_ranked_posteriors',
resources=ResourceLimit(memory='20G'),
step_id='Evaluation')
self.node_Boxplots_Features =\
self.network.create_node('worc/PlotBoxplotFeatures:1.0',
tool_version='1.0',
id='plot_boxplot_features',
resources=ResourceLimit(memory='12G'),
step_id='Evaluation')
# Create sinks
self.sink_ROC_PNG =\
self.network.create_sink('PNGFile', id='ROC_PNG',
step_id='general_sinks')
self.sink_ROC_Tex =\
self.network.create_sink('TexFile', id='ROC_Tex',
step_id='general_sinks')
self.sink_ROC_CSV =\
message = f'Toolbox {calcfeat_node} not recognized!'
raise WORCexceptions.WORCKeyError(message)
self.source_toolbox_name[label] =\
self.network.create_constant('String', toolbox,
id=f'toolbox_name_{toolbox}_{label}')
conv_train.inputs['toolbox'] = self.source_toolbox_name[label].output
conv_train.inputs['config'] = self.sources_parameters[label].output
if self.TrainTest:
conv_test =\
self.network.create_node('worc/FeatureConverter:1.0',
tool_version='1.0',
id='featureconverter_test_' + node_ID,
resources=ResourceLimit(memory='4G'))
conv_test.inputs['feat_in'] = node_test.outputs['features']
conv_test.inputs['toolbox'] = self.source_toolbox_name[label].output
conv_test.inputs['config'] = self.sources_parameters[label].output
# Append to nodes to list
self.calcfeatures_train[label].append(node_train)
self.featureconverter_train[label].append(conv_train)
if self.TrainTest:
self.calcfeatures_test[label].append(node_test)
self.featureconverter_test[label].append(conv_test)
def add_preprocessing(self, preprocess_node, label, nmod):
"""Add nodes required for preprocessing of images."""
memory = self.fastr_memory_parameters['Preprocessing']
self.preprocessing_train[label] = self.network.create_node(preprocess_node, tool_version='1.0', id='preprocessing_train_' + label, resources=ResourceLimit(memory=memory))
if self.TrainTest:
self.preprocessing_test[label] = self.network.create_node(preprocess_node, tool_version='1.0', id='preprocessing_test_' + label, resources=ResourceLimit(memory=memory))
# Create required links
self.preprocessing_train[label].inputs['parameters'] = self.sources_parameters[label].output
self.preprocessing_train[label].inputs['image'] = self.converters_im_train[label].outputs['image']
if self.TrainTest:
self.preprocessing_test[label].inputs['parameters'] = self.sources_parameters[label].output
self.preprocessing_test[label].inputs['image'] = self.converters_im_test[label].outputs['image']
if self.metadata_train and len(self.metadata_train) >= nmod + 1:
self.preprocessing_train[label].inputs['metadata'] = self.sources_metadata_train[label].output
if self.metadata_test and len(self.metadata_test) >= nmod + 1:
self.preprocessing_test[label].inputs['metadata'] = self.sources_metadata_test[label].output
# If there are masks to use in normalization, add them here
def add_ComBat(self):
"""Add ComBat harmonization to the network.
Note: applied on all objects, not in a train-test or cross-val setting.
"""
memory = self.fastr_memory_parameters['ComBat']
self.ComBat =\
self.network.create_node('combat/ComBat:1.0',
tool_version='1.0',
id='ComBat',
resources=ResourceLimit(memory=memory))
# Create sink for ComBat output
self.sinks_features_train_ComBat = self.network.create_sink('HDF5', id='features_train_ComBat')
# Create links for inputs
self.link_combat_1 = self.network.create_link(self.source_class_config.output, self.ComBat.inputs['config'])
self.link_combat_2 = self.network.create_link(self.source_patientclass_train.output, self.ComBat.inputs['patientclass_train'])
self.link_combat_1.collapse = 'conf'
self.link_combat_2.collapse = 'pctrain'
self.links_Combat1_train = dict()
self.links_Combat1_test = dict()
# Link Combat output to both sink and classify node
self.links_Combat_out_train = self.network.create_link(self.ComBat.outputs['features_train_out'], self.classify.inputs['features_train'])
self.links_Combat_out_train.collapse = 'ComBat'
self.sinks_features_train_ComBat.input = self.ComBat.outputs['features_train_out']
self.converters_masks_train[label].inputs['image'] = self.sources_masks_train[label].output
if self.masks_test and len(self.masks_test) >= nmod + 1:
# Create mask source and convert
self.sources_masks_test[label] = self.network.create_source('ITKImageFile', id='mask_test_' + label, node_group='test')
memory = self.fastr_memory_parameters['WORCCastConvert']
self.converters_masks_test[label] = self.network.create_node('worc/WORCCastConvert:0.3.2', tool_version='0.1', id='convert_mask_test_' + label, node_group='test', resources=ResourceLimit(memory=memory))
self.converters_masks_test[label].inputs['image'] = self.sources_masks_test[label].output
# First convert the images
if any(modality in mod for modality in ['MR', 'CT', 'MG', 'PET']):
# Use WORC PXCastConvet for converting image formats
memory = self.fastr_memory_parameters['WORCCastConvert']
self.converters_im_train[label] = self.network.create_node('worc/WORCCastConvert:0.3.2', tool_version='0.1', id='convert_im_train_' + label, resources=ResourceLimit(memory=memory))
if self.TrainTest:
self.converters_im_test[label] = self.network.create_node('worc/WORCCastConvert:0.3.2', tool_version='0.1', id='convert_im_test_' + label, resources=ResourceLimit(memory=memory))
else:
raise WORCexceptions.WORCTypeError(('No valid image type for modality {}: {} provided.').format(str(nmod), mod))
# Create required links
self.converters_im_train[label].inputs['image'] = self.sources_images_train[label].output
if self.TrainTest:
self.converters_im_test[label].inputs['image'] = self.sources_images_test[label].output
# -----------------------------------------------------
# Preprocessing
preprocess_node = str(self.configs[nmod]['General']['Preprocessing'])
print('\t - Adding preprocessing node for image preprocessing.')
self.add_preprocessing(preprocess_node, label, nmod)
# -----------------------------------------------------