Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
overrides = [str(i) for i in scenarios[scenario]]
logger.info(
'Using scenario `{}` leading to the application of '
'overrides `{}`.'.format(scenario, overrides)
)
else:
overrides = str(scenario).split(',')
logger.info(
'Applying the following overrides without a '
'specific scenario name: {}'.format(overrides)
)
overrides_from_scenario = combine_overrides(config_model, overrides)
warning_messages = checks.check_overrides(config_model, overrides_from_scenario)
exceptions.print_warnings_and_raise_errors(warnings=warning_messages)
# FutureWarning: If config nor override_dict include an explicit objective cost class, check scenario dict.
# Added in 0.6.4-dev, to be removed in v0.7.0-dev
if has_explicit_cost_class is False:
has_explicit_cost_class = isinstance(overrides_from_scenario.get_key('run.objective_options.cost_class', None), dict)
config_model.union(
overrides_from_scenario, allow_override=True, allow_replacement=True
)
for k, v in overrides_from_scenario.as_dict_flat().items():
debug_comments.set_key(
'{}'.format(k),
'Applied from override')
else:
overrides = []
if isinstance(filenames.index, pd.MultiIndex):
filenames.index = filenames.index.remove_unused_levels()
# 6) Get all timeseries data from dataframes stored in model_run
timeseries_data = []
key_errors = []
for loc_tech, (filename, column) in filenames.iteritems():
try:
timeseries_data.append(model_run.timeseries_data[filename].loc[:, column].values)
except KeyError:
key_errors.append(
'column `{}` not found in file `{}`, but was requested by '
'loc::tech `{}`.'.format(column, filename, loc_tech)
)
if key_errors:
exceptions.print_warnings_and_raise_errors(errors=key_errors)
timeseries_data_series = pd.DataFrame(index=filenames.index,
columns=data.timesteps.values,
data=timeseries_data).stack()
timeseries_data_series.index.rename('timesteps', -1, inplace=True)
# 7) Add time dimension to the relevent DataArray and update the '='
# dimensions with the time varying data (static data is just duplicated
# at each timestep)
timeseries_data_array = xr.broadcast(data[variable], data.timesteps)[0].copy()
timeseries_data_array.loc[
xr.DataArray.from_series(timeseries_data_series).coords
] = xr.DataArray.from_series(timeseries_data_series).values
# 8) assign correct dtype (might be string/object accidentally)
# string 'nan' to NaN:
debug_comments : AttrDict
"""
model_run = AttrDict()
model_run['scenario'] = scenario
model_run['applied_overrides'] = ';'.join(applied_overrides)
# 1) Initial checks on model configuration
warning_messages, errors = checks.check_initial(config)
exceptions.print_warnings_and_raise_errors(warnings=warning_messages, errors=errors)
# 2) Fully populate techs
# Raises ModelError if necessary
model_run['techs'], debug_techs, errors = process_techs(config)
debug_comments.set_key('model_run.techs', debug_techs)
exceptions.print_warnings_and_raise_errors(errors=errors)
# 3) Fully populate tech_groups
model_run['tech_groups'] = process_tech_groups(config, model_run['techs'])
# 4) Fully populate locations
model_run['locations'], debug_locs, warning_messages, errors = locations.process_locations(
config, model_run['techs']
)
debug_comments.set_key('model_run.locations', debug_locs)
exceptions.print_warnings_and_raise_errors(warnings=warning_messages, errors=errors)
# 5) Fully populate timeseries data
# Raises ModelErrors if there are problems with timeseries data at this stage
model_run['timeseries_data'], model_run['timesteps'] = (
process_timeseries_data(config, model_run)
)
def final_timedimension_processing(model_data):
# Final checking of the data
model_data, final_check_comments, warns, errors = checks.check_model_data(model_data)
exceptions.print_warnings_and_raise_errors(warnings=warns, errors=errors)
model_data = add_max_demand_timesteps(model_data)
model_data = add_zero_carrier_ratio_sets(model_data)
model_data = reorganise_xarray_dimensions(model_data)
return model_data
# Operated units is carried over between iterations, so must be defined in a milp model
if ('loc_techs_milp' in model_data.dims.keys() and
'operated_units' not in model_data.data_vars.keys()):
model_data['operated_units'] = (
xr.DataArray([0 for loc_tech in model_data.loc_techs_milp.values],
dims='loc_techs_milp')
)
model_data['operated_units'].attrs['is_result'] = 1
model_data['operated_units'].attrs['operate_param'] = 1
exceptions.warn(
'daily operated units not defined, set to zero for all '
'loc::techs in loc_techs_milp, for use in iterative optimisation'
)
comments, warnings, errors = checks.check_operate_params(model_data)
exceptions.print_warnings_and_raise_errors(warnings=warnings, errors=errors)
# Initialize our variables
solver = run_config['solver']
solver_io = run_config.get('solver_io', None)
solver_options = run_config.get('solver_options', None)
save_logs = run_config.get('save_logs', None)
window = run_config['operation']['window']
horizon = run_config['operation']['horizon']
window_to_horizon = horizon - window
# get the cumulative sum of timestep resolution, to find where we hit our window and horizon
timestep_cumsum = model_data.timestep_resolution.cumsum('timesteps').to_pandas()
# get the timesteps at which we start and end our windows
window_ends = timestep_cumsum.where(
(timestep_cumsum % window == 0) | (timestep_cumsum == timestep_cumsum[-1])
)
# 6) Grab additional relevant bits from run and model config
model_run['run'] = config['run']
model_run['model'] = config['model']
model_run['group_constraints'] = config.get('group_constraints', {})
# 7) Initialize sets
all_sets = sets.generate_simple_sets(model_run)
all_sets.union(sets.generate_loc_tech_sets(model_run, all_sets))
all_sets = AttrDict({k: list(v) for k, v in all_sets.items()})
model_run['sets'] = all_sets
model_run['constraint_sets'] = constraint_sets.generate_constraint_sets(model_run)
# 8) Final sense-checking
final_check_comments, warning_messages, errors = checks.check_final(model_run)
debug_comments.union(final_check_comments)
exceptions.print_warnings_and_raise_errors(warnings=warning_messages, errors=errors)
# 9) Build a debug data dict with comments and the original configs
debug_data = AttrDict({
'comments': debug_comments,
'config_initial': config,
})
return model_run, debug_data