Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
delimiter='$',
target_filename='cooling_in.json')
decoder = uq.decoders.SimpleCSV(target_filename=output_filename,
output_columns=output_columns,
header=0)
# Create a collation element for this campaign
collater = uq.collate.AggregateSamples(average=False)
# Create the sampler
vary = {
"kappa": cp.Uniform(0.025, 0.075),
"t_env": cp.Uniform(15, 25)
}
sampler = uq.sampling.PCESampler(vary=vary,
polynomial_order=3)
actions = uq.actions.ExecuteLocal("tests/cooling/cooling_model.py cooling_in.json")
stats = uq.analysis.PCEAnalysis(sampler=sampler,
qoi_cols=output_columns)
campaign(tmpdir, 'pce', 'pce', params, encoder, decoder, sampler,
collater, actions, stats, vary, 0, 1)
sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=4)
my_campaign = uq.Campaign(name='gauss_vector', work_dir=tmpdir)
my_campaign.add_app(name="gauss_vector",
params=params,
encoder=encoder,
decoder=decoder,
collater=collater)
my_campaign.set_sampler(sampler)
my_campaign.draw_samples()
my_campaign.populate_runs_dir()
my_campaign.apply_for_each_run_dir(actions)
my_campaign.collate()
data = my_campaign.get_collation_result()
print("===== DATA:\n ", data)
analysis = uq.analysis.PCEAnalysis(sampler=sampler, qoi_cols=["numbers"])
my_campaign.apply_analysis(analysis)
results = my_campaign.get_last_analysis()
encoder = uq.encoders.GenericEncoder(
template_fname='tests/cooling/cooling.template',
delimiter='$',
target_filename='cooling_in.json')
decoder = uq.decoders.SimpleCSV(target_filename=output_filename,
output_columns=output_columns,
header=0)
collater = uq.collate.AggregateSamples(average=False)
vary = {
"kappa": cp.Uniform(0.025, 0.075),
"t_env": cp.Uniform(15, 25)
}
cooling_sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)
cooling_action = uq.actions.ExecuteLocal("tests/cooling/cooling_model.py cooling_in.json")
cooling_stats = uq.analysis.PCEAnalysis(sampler=cooling_sampler, qoi_cols=output_columns)
return params, encoder, decoder, collater, cooling_sampler, cooling_action, cooling_stats
client.close()
client.shutdown()
time_end = time.time()
print('Time for phase 4 = %.3f' % (time_end-time_start))
time_start = time.time()
my_campaign.collate()
time_end = time.time()
print('Time for phase 5 = %.3f' % (time_end-time_start))
time_start = time.time()
# Post-processing analysis
my_campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=my_campaign.get_active_sampler(), qoi_cols=["te", "ne", "rho", "rho_norm"]))
time_end = time.time()
print('Time for phase 6 = %.3f' % (time_end-time_start))
time_start = time.time()
# Get Descriptive Statistics
results = my_campaign.get_last_analysis()
stats = results['statistical_moments']['te']
per = results['percentiles']['te']
sobols = results['sobols_first']['te']
rho = results['statistical_moments']['rho']['mean']
rho_norm = results['statistical_moments']['rho_norm']['mean']
time_end = time.time()
print('Time for phase 7 = %.3f' % (time_end-time_start))
time_start = time.time()
cmd = f"{cwd}/fusion_model.py fusion_in.json"
my_campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(cmd, interpret='python3'))
time_end = time.time()
print('Time for phase 4 = %.3f' % (time_end-time_start))
time_start = time.time()
# Collate the results
my_campaign.collate()
time_end = time.time()
print('Time for phase 5 = %.3f' % (time_end-time_start))
time_start = time.time()
# Post-processing analysis
my_campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=my_campaign.get_active_sampler(), qoi_cols=["te", "ne", "rho", "rho_norm"]))
time_end = time.time()
print('Time for phase 6 = %.3f' % (time_end-time_start))
time_start = time.time()
# Get Descriptive Statistics
results = my_campaign.get_last_analysis()
stats = results['statistical_moments']['te']
per = results['percentiles']['te']
sobols = results['sobols_first']['te']
rho = results['statistical_moments']['rho']['mean']
rho_norm = results['statistical_moments']['rho_norm']['mean']
time_end = time.time()
print('Time for phase 7 = %.3f' % (time_end-time_start))
time_start = time.time()
polynomial_order=3)
# Associate the sampler with the campaign
my_campaign.set_sampler(my_sampler)
# Will draw all (of the finite set of samples)
my_campaign.draw_samples()
my_campaign.populate_runs_dir()
my_campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(
"cooling_model.py cooling_in.json"))
my_campaign.collate()
# Post-processing analysis
my_analysis = uq.analysis.PCEAnalysis(sampler=my_sampler,
qoi_cols=["te", "ti"])
my_campaign.apply_analysis(my_analysis)
# Get Descriptive Statistics
results = my_campaign.get_last_analysis()
stats = results['statistical_moments']['te']
per = results['percentiles']['te']
sobols = results['sobols_first']['te']
cluster = SLURMCluster(job_extra=['--cluster=mpp2'], queue='mpp2_batch',
cores=28, memory='1 GB')
cluster.scale(1)
print(cluster.job_script())
client = Client(cluster)
my_campaign.populate_runs_dir()
cwd = os.getcwd()
cmd = f"{cwd}/cooling_model.py cooling_in.json"
my_campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(cmd, interpret='python3'), client)
my_campaign.collate()
# Post-processing analysis
my_analysis = uq.analysis.PCEAnalysis(sampler=my_sampler,
qoi_cols=["te"])
my_campaign.apply_analysis(my_analysis)
# Get Descriptive Statistics
results = my_campaign.get_last_analysis()
stats = results['statistical_moments']['te']
per = results['percentiles']['te']
sobols = results['sobols_first']['te']
print(stats)
print(per)
print(sobols)