Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
benchmark_dir = "doc/examples/tmp/benchmark-models-petab"
if not os.path.exists(benchmark_dir):
git.Repo.clone_from(
"https://github.com/benchmarking-initiative"
"/benchmark-models-petab.git",
benchmark_dir, depth=1)
g = git.Git(benchmark_dir)
# update repo if online
try:
g.pull()
except git.exc.GitCommandError:
pass
# create problem
petab_problem = petab.Problem.from_yaml(os.path.join(
benchmark_dir, "Benchmark-Models",
"Boehm_JProteomeRes2014", "Boehm_JProteomeRes2014.yaml"))
# compile amici
model = amici.petab_import.import_petab_problem(petab_problem)
solver = model.getSolver()
# import to pyabc
importer = pyabc.petab.AmiciPetabImporter(petab_problem, model, solver)
# extract required objects
prior = importer.create_prior()
model = importer.create_model()
kernel = importer.create_kernel()
# call model
def test_preeq_guesses():
"""
Test whether optimization with preequilibration guesses works, asserts
that steadystate guesses are written and checks that gradient is still
correct with guesses set.
"""
petab_problem = petab.Problem.from_yaml(
folder_base + "Zheng_PNAS2012/Zheng_PNAS2012.yaml")
petab_problem.model_name = "Zheng_PNAS2012"
importer = pypesto.PetabImporter(petab_problem)
obj = importer.create_objective()
problem = importer.create_problem(obj)
# assert that initial guess is uninformative
assert problem.objective.steadystate_guesses['fval'] == np.inf
optimizer = pypesto.ScipyOptimizer('L-BFGS-B', options={'maxiter': 50})
result = pypesto.minimize(
problem=problem, optimizer=optimizer, n_starts=1,
)
assert problem.objective.steadystate_guesses['fval'] < np.inf
assert len(obj.steadystate_guesses['data']) == 1
def test_pipeline_forward():
petab_problem = petab.Problem.from_yaml(EXAMPLE_YAML)
selector = ModelSelector(petab_problem, EXAMPLE_MODELS)
model_list = [model for model in selector.model_generator()]
selected_models, _, selection_history = selector.select('forward', 'AIC')
assert models_compared_with(INITIAL_VIRTUAL_MODEL, selection_history) == \
{'M5_0', 'M6_0', 'M7_0'}
assert models_compared_with('M6_0', selection_history) == \
{'M3_0', 'M4_0'}
selected_models, local_selection_history, selection_history = \
selector.select('forward', 'AIC')
# includes models compared to `INITIAL_VIRTUAL_MODEL` in first run, as
# `selection_history` includes them (they were not retested)
assert models_compared_with(INITIAL_VIRTUAL_MODEL, selection_history) == \
{'M5_0', 'M6_0', 'M7_0', 'M2_0'}
-------
problem:
The problem containing correctly fixed parameter values.
"""
# overwrite petab_problem by problem in case it refers to yaml
# TODO if yaml is specified in the model spec file, then a new problem
# might be created for each model row. This may be undesirable as the same
# model might be compiled for each model row with the same YAML value
if petab_problem is None and YAML_FILENAME in row.keys():
raise NotImplementedError()
# TODO untested
# YAML_FILENAME_COLUMN is not currently specified in the model
# specifications file (instead, the SBML .xml file is)
petab_problem = row[YAML_FILENAME]
if isinstance(petab_problem, str):
petab_problem = petab.Problem.from_yaml(petab_problem)
## drop row entries not referring to parameters
## TODO switch to just YAML_FILENAME
#for key in [YAML_FILENAME, SBML_FILENAME, MODEL_ID]:
# if key in row.keys():
# row.pop(key)
row_parameters = {k: row[k] for k in row if k not in NOT_PARAMETERS}
for par_id, par_val in row_parameters.items():
#for par_id, par_val in row.items():
if par_id not in petab_problem.x_ids:
print(Fore.YELLOW + f'Warning: parameter {par_id} is not defined '
f'in PETab model. It will be ignored.')
continue
if not np.isnan(par_val):
petab_problem.parameter_df[ESTIMATE].loc[par_id] = 0
A single, unambiguous model selection row.
petab_problem:
The petab problem for which to perform model selection.
obj:
The objective to modify for model selection.
Returns
-------
problem:
The problem containing correctly fixed parameter values.
"""
# overwrite petab_problem by problem in case it refers to yaml
if petab_problem is None and YAML_FILENAME_COLUMN in row.keys():
petab_problem = row[YAML_FILENAME_COLUMN].str
if isinstance(petab_problem, str):
petab_problem = petab.Problem.from_yaml(petab_problem)
importer = PetabImporter(petab_problem)
# drop row entries not referring to parameters
for key in [YAML_FILENAME_COLUMN, MODEL_NAME_COLUMN]:
if key in row.keys():
row.pop(key)
for par_id, par_val in row.items():
if par_id not in petab_problem.x_ids:
print(Fore.YELLOW + f'Warning: parameter {par_id} is not defined '
f'in PETab model. It will be ignored.')
continue
if not np.isnan(par_val):
petab_problem.parameter_df[ESTIMATE].loc[par_id] = 0
petab_problem.parameter_df[NOMINAL_VALUE].loc[par_id] = par_val
# petab_problem.parameter_df.lowerBound.loc[par_id] = float("NaN")