Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
"f": cp.Uniform(0.95, 1.05)
}
# different orders for the 2 parameters
my_sampler = uq.sampling.SCSampler(vary=vary, polynomial_order=[2, 5],
quadrature_rule="G")
# Associate the sampler with the campaign
my_campaign.set_sampler(my_sampler)
# Will draw all (of the finite set of samples)
my_campaign.draw_samples()
my_campaign.populate_runs_dir()
# Use this instead to run the samples using EasyVVUQ on the localhost
my_campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(
"tests/sc/sc_model.py ade_in.json"))
my_campaign.collate()
# Post-processing analysis
analysis = uq.analysis.SCAnalysis(sampler=my_sampler, qoi_cols=output_columns)
my_campaign.apply_analysis(analysis)
#import pickle
#pickle.dump(analysis, open('analysis.p', 'wb'))
results = my_campaign.get_last_analysis()
return results, my_sampler, analysis
random_sampler = uq.elements.sampling.RandomSampler(my_campaign)
my_campaign.add_runs(random_sampler, max_num=number_of_samples)
assert(len(my_campaign.runs) == number_of_samples)
print(my_campaign.log)
my_campaign.populate_runs_dir()
assert(len(my_campaign.runs_dir) > 0)
assert(os.path.exists(my_campaign.runs_dir))
assert(os.path.isdir(my_campaign.runs_dir))
my_campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(
"tests/cannonsim/bin/cannonsim input.cannon output.csv"))
output_filename = 'output.csv'
output_columns = ['Dist', 'lastvx', 'lastvy']
aggregate = uq.elements.collate.AggregateSamples(
my_campaign,
output_filename=output_filename,
output_columns=output_columns,
header=0)
aggregate.apply()
assert(len(my_campaign.data) > 0)
stats = uq.elements.analysis.BasicStats(
my_campaign, value_cols=output_columns)
}
sampler = uq.sampling.BasicSweep(sweep=sweep)
my_campaign = uq.Campaign(name='aggregate_by_var', work_dir=tmpdir, db_location='sqlite:///')
my_campaign.add_app(name="cannon_test",
params=params,
encoder=encoder,
decoder=decoder,
collater=collater)
my_campaign.set_app("cannon_test")
sampler = uq.sampling.BasicSweep(sweep=sweep)
my_campaign.set_sampler(sampler)
my_campaign.draw_samples()
my_campaign.populate_runs_dir()
actions = uq.actions.ExecuteLocal("tests/cannonsim/bin/cannonsim in.cannon output.csv")
my_campaign.apply_for_each_run_dir(actions)
my_campaign.collate()
results = my_campaign.get_collation_result()
assert 'Variable' in results.columns
assert 'Value' in results.columns
n_runs = sampler.count
n_vars = len(output_cols)
assert len(results) == n_runs * n_vars
my_campaign = None
# Load state in new campaign object
reloaded_campaign = uq.Campaign(state_file=state_file, work_dir=tmpdir)
reloaded_campaign.set_app("cannonsim")
# Draw remaining samples, execute and collate
print("Processing remaining samples...")
reloaded_campaign.draw_samples()
print("List of runs added:")
pprint(reloaded_campaign.list_runs())
print("---")
reloaded_campaign.populate_runs_dir()
reloaded_campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(
"tests/cannonsim/bin/cannonsim in.cannon output.csv"))
print("Completed runs:")
pprint(reloaded_campaign.scan_completed())
print("All completed?", reloaded_campaign.all_complete())
reloaded_campaign.collate()
print("data:\n", reloaded_campaign.get_collation_result())
# Print the campaign log
pprint(reloaded_campaign._log)
"tests/sc/poly_model_anisotropic.py poly_in.json"))
my_campaign.collate()
data_frame = my_campaign.get_collation_result()
# Post-processing analysis
analysis = uq.analysis.SCAnalysis(sampler=my_sampler, qoi_cols=output_columns)
my_campaign.apply_analysis(analysis)
for i in range(number_of_adaptations):
my_sampler.look_ahead(analysis.l_norm)
my_campaign.draw_samples()
my_campaign.populate_runs_dir()
my_campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(
"tests/sc/poly_model_anisotropic.py poly_in.json"))
my_campaign.collate()
data_frame = my_campaign.get_collation_result()
analysis.adapt_dimension('f', data_frame)
my_campaign.apply_analysis(analysis)
results = my_campaign.get_last_analysis()
analysis.plot_grid()
# analytic mean and standard deviation
a = np.ones(d) * 0.01
effective_d = 1
a[0:effective_d] = 1.0
assert("seed" in my_campaign.params_info)
my_campaign.vary_param(
"seed", dist=uq.distributions.uniform_integer(
0, 10000000))
assert("seed" in my_campaign.vars)
random_sampler = uq.elements.sampling.RandomSampler(my_campaign)
my_campaign.add_runs(random_sampler, max_num=number_of_samples)
assert(len(my_campaign.runs) == number_of_samples)
my_campaign.populate_runs_dir()
my_campaign.apply_for_each_run_dir(
uq.actions.ExecuteLocal("/usr/bin/lammps -i in.CG.lammps"))
assert(len(my_campaign.runs_dir) > 0)
assert(os.path.exists(my_campaign.runs_dir))
assert(os.path.isdir(my_campaign.runs_dir))
output_filename = 'output_replica.csv'
output_columns = ['pe', 'temp', 'pres']
aggregate = uq.elements.collate.AggregateSamples(
my_campaign, average=True,
output_filename=output_filename,
output_columns=output_columns)
aggregate.apply()
assert(len(my_campaign.data) > 0)
my_campaign.vary_param("f", dist=cp.distributions.Uniform(-1, 1))
# 3. Select the SC sampler to create a tensor grid
sc_sampler = uq.elements.sampling.SCSampler(my_campaign, m)
number_of_samples = sc_sampler.number_of_samples
my_campaign.add_runs(sc_sampler, max_num=number_of_samples)
# 4. Create directories containing inputs for each run containing the
# parameters determined by the `Sampler`(s).
# This makes use of the `Encoder` specified in the input file.
my_campaign.populate_runs_dir()
# 5. Run execution - note this method of running all jobs is just for demo
# purposes.
my_campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal("run_ADE.py ade_in.json"))
# 6. Aggregate the results from all runs.
# This makes use of the `Decoder` selected in the input file to interpret the
# run output and produce data that can be integrated in a summary pandas
# dataframe.
output_filename = my_campaign.params_info['out_file']['default']
output_columns = ['u']
aggregate = uq.elements.collate.AggregateSamples(
my_campaign,
output_filename=output_filename,
output_columns=output_columns,
header=0,
)
aggregate.apply()
my_campaign.populate_runs_dir()
assert(len(my_campaign.runs_dir) > 0)
assert(os.path.exists(my_campaign.runs_dir))
assert(os.path.isdir(my_campaign.runs_dir))
run0dir = os.path.join(my_campaign.runs_dir, "Run_0/")
assert(os.path.exists(run0dir))
# check the fixture got transferred correctly
assert(os.path.exists(os.path.join(run0dir, "bias1.txt")))
my_campaign.apply_for_each_run_dir(
uq.actions.ExecuteLocal("tests/gauss/gauss_json.py gauss_in.json"))
aggregate = uq.elements.collate.AggregateSamples(my_campaign, average=True)
aggregate.apply()
assert(len(my_campaign.data) > 0)
ensemble_boot = uq.elements.analysis.EnsembleBoot(my_campaign)
results, output_file = ensemble_boot.apply()
print(my_campaign)
my_campaign.save_state(output_json)
assert(os.path.exists(output_json))
assert(os.path.isfile(output_json))
my_sampler = uq.sampling.RandomSampler(vary=vary)
my_campaign.set_sampler(my_sampler)
# 5. Get run parameters
my_campaign.draw_samples(num_samples=3,
replicas=5)
# 6. Create run input directories
my_campaign.populate_runs_dir()
print(my_campaign)
# 7. Run Application
# - gauss is executed for each sample
my_campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(cmd,
interpret='python3'))
# 8. Collate output
my_campaign.collate()
# 9. Run Analysis
# - Calculate bootstrap statistics for collated data
stats = uq.analysis.EnsembleBoot(groupby=["mu"], qoi_cols=["Value"])
my_campaign.apply_analysis(stats)
print("stats:\n", my_campaign.get_last_analysis())
"mu": cp.Uniform(1.0, 100.0),
}
my_sampler = uq.sampling.RandomSampler(vary=vary)
my_campaign.set_sampler(my_sampler)
# 5. Get run parameters
my_campaign.draw_samples(num_samples=3, replicas=5)
# 6. Create run input directories
my_campaign.populate_runs_dir()
# 7. Run Application
# - gauss is executed for each sample
my_campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(cmd, interpret='python3'))
# 8. Collate output
my_campaign.collate()
# 9. Print the list of runs
pprint(my_campaign.list_runs())
# 10. Save the Campaign
my_campaign.save_state("campaign_state.json")
# 11. Load state in new campaign object
print("Reloading campaign...")
reloaded_campaign = uq.Campaign(state_file="campaign_state.json", work_dir=".")
# 12. Draw some more samples, execute the runs, and collate onto existing dataframe
reloaded_campaign.draw_samples(num_samples=1, replicas=5)