Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def update_config(config):
"""
Populates config file with default config values.
And made changes if necessary.
"""
# in old version java_mem was used, new is mem
if ('java_mem' in config) and (not ('mem' in config)):
config['mem']=config['java_mem']
# get default values and update them with values specified in config file
default_config = make_default_config()
utils.update_config(default_config, config)
return default_config
ap = argparse.ArgumentParser()
ap.add_argument('config', help='Main config.yaml file')
ap.add_argument('hub_config', help='Track hub config YAML file')
ap.add_argument('--additional-configs', nargs='+',
help='Additional config files with which to update the main '
'config')
args = ap.parse_args()
# Access configured options. See comments in example hub_config.yaml for
# details
config = yaml.load(open(args.config), Loader=yaml.FullLoader)
if args.additional_configs:
for cfg in args.additional_configs:
update_config(config, yaml.load(open(cfg), Loader=yaml.FullLoader))
hub_config = yaml.load(open(args.hub_config), Loader=yaml.FullLoader)
hub, genomes_file, genome, trackdb = default_hub(
hub_name=hub_config['hub']['name'],
short_label=hub_config['hub']['short_label'],
long_label=hub_config['hub']['long_label'],
email=hub_config['hub']['email'],
genome=hub_config['hub']['genome']
)
c = ChIPSeqConfig(config, os.path.join(os.path.dirname(args.config), 'chipseq_patterns.yaml'))
# Set up subgroups based on unique values from columns specified in the config
df = pandas.read_csv(config['sampletable'], comment='#', sep='\t')
cols = hub_config['subgroups']['columns']
try:
with open(metadata_file, "r") as fh:
reader = csv.DictReader(fh.readlines())
metadata_list = [row for row in reader]
run2sample = {row["Run"]:row["SampleName"] for row in metadata_list}
config_default = {
'bio.ngs.settings' : {
'sampleinfo' : metadata_file
},
'bio.ngs.tools.sratools': {
'_datadir': os.path.dirname(metadata_file),
'_run2sample' : run2sample,
'_metadata' : metadata_list
},
}
update_config(config_default, config)
config = config_default
except Exception:
raise Exception("""
no metadata file '{metadata}' found
please initiate analysis by running 'snakemake {metadata}'
""".format(metadata=metadata_file))
return config
ap = argparse.ArgumentParser()
ap.add_argument('config', help='Main config.yaml file')
ap.add_argument('hub_config', help='Track hub config YAML file')
ap.add_argument('--additional-configs', nargs='+',
help='Additional config files with which to update the main '
'config',)
args = ap.parse_args()
# Access configured options. See comments in example hub_config.yaml for
# details
config = yaml.load(open(args.config), Loader=yaml.FullLoader)
hub_config = yaml.load(open(args.hub_config), Loader=yaml.FullLoader)
if args.additional_configs:
for cfg in args.additional_configs:
update_config(config, yaml.load(open(cfg), Loader=yaml.FullLoader))
c = RNASeqConfig(config, os.path.join(os.path.dirname(args.config), 'rnaseq_patterns.yaml'))
hub, genomes_file, genome, trackdb = default_hub(
hub_name=hub_config['hub']['name'],
short_label=hub_config['hub']['short_label'],
long_label=hub_config['hub']['long_label'],
email=hub_config['hub']['email'],
genome=hub_config['hub']['genome']
)
# Set up subgroups based on the configured columns
df = pandas.read_csv(config['sampletable'], comment='#', sep='\t')
cols = hub_config['subgroups']['columns']
subgroups = []
for col in cols: