Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def generate_uninitialized(self):
"""Generate an uninitialized ensemble by bootstrapping the
initialized prediction ensemble.
Returns:
Bootstrapped (uninitialized) ensemble as a Dataset.
"""
has_dataset(
self._datasets['control'], 'control', 'generate an uninitialized ensemble.'
)
uninit = bootstrap_uninit_pm_ensemble_from_control_cftime(
self._datasets['initialized'], self._datasets['control']
)
datasets = self._datasets.copy()
datasets.update({'uninitialized': uninit})
return self._construct_direct(datasets, kind='perfect')
):
"""Compares the initialized ensemble to the control run.
Args:
metric (str, default 'pearson_r' or Metric):
Metric to apply in the comparison.
comparison (str, default 'm2e'):
How to compare the climate prediction ensemble to the control.
reference (str, list of str): reference forecasts to compare against.
**metric_kwargs (optional): arguments passed to `metric`.
Returns:
Dataset of comparison results with `skill` dimension for difference
references compared against.
"""
has_dataset(self._datasets['control'], 'control', 'compute a metric')
input_dict = {
'ensemble': self._datasets['initialized'],
'control': self._datasets['control'],
'init': True,
}
init_skill = self._apply_climpred_function(
compute_perfect_model,
input_dict=input_dict,
metric=metric,
comparison=comparison,
**metric_kwargs,
)
if self._temporally_smoothed:
init_skill = _reset_temporal_axis(
init_skill, self._temporally_smoothed, dim='lead'
)
uninitialized simulations is smaller or equal to zero
based on bootstrapping with replacement.
* p_pers_over_init: p value of the hypothesis that the
difference of skill between the initialized and persistence
simulations is smaller or equal to zero based on
bootstrapping with replacement.
Reference:
* Goddard, L., A. Kumar, A. Solomon, D. Smith, G. Boer, P.
Gonzalez, V. Kharin, et al. “A Verification Framework for
Interannual-to-Decadal Predictions Experiments.” Climate
Dynamics 40, no. 1–2 (January 1, 2013): 245–72.
https://doi.org/10/f4jjvf.
"""
has_dataset(self._datasets['control'], 'control', 'iteration')
input_dict = {
'ensemble': self._datasets['initialized'],
'control': self._datasets['control'],
'init': True,
}
return self._apply_climpred_function(
bootstrap_perfect_model,
input_dict=input_dict,
metric=metric,
comparison=comparison,
sig=sig,
iterations=iterations,
pers_sig=pers_sig,
**metric_kwargs,
)
Args:
metric (str, default 'pearson_r'):
Metric to apply to the persistence forecast.
Returns:
Dataset of persistence forecast results (if ``refname`` is declared),
or dictionary of Datasets with keys corresponding to verification data
name.
Reference:
* Chapter 8 (Short-Term Climate Prediction) in
Van den Dool, Huug. Empirical methods in short-term climate
prediction. Oxford University Press, 2007.
"""
has_dataset(
self._datasets['control'], 'control', 'compute a persistence forecast'
)
input_dict = {
'ensemble': self._datasets['initialized'],
'control': self._datasets['control'],
'init': True,
}
res = self._apply_climpred_function(
compute_persistence,
input_dict=input_dict,
metric=metric,
alignment='same_inits',
)
if self._temporally_smoothed:
res = _reset_temporal_axis(res, self._temporally_smoothed, dim='lead')
return res
):
"""Compares the bootstrapped uninitialized run to the control run.
Args:
metric (str, default 'pearson_r'):
Metric to apply in the comparison.
comparison (str, default 'm2m'):
How to compare to the control run.
running (int, default None):
Size of the running window for variance smoothing.
**metric_kwargs (optional): arguments passed to `metric`.
Returns:
Result of the comparison as a Dataset.
"""
has_dataset(
self._datasets['uninitialized'],
'uninitialized',
'compute an uninitialized metric',
)
input_dict = {
'ensemble': self._datasets['uninitialized'],
'control': self._datasets['control'],
'init': False,
}
res = self._apply_climpred_function(
compute_perfect_model,
input_dict=input_dict,
metric=metric,
comparison=comparison,
**metric_kwargs,
)
reference=r,
metric=metric,
comparison=comparison,
dim=dim,
**metric_kwargs,
)
for lead in forecast['lead'].data
]
ref = xr.concat(metric_over_leads, dim='lead', **CONCAT_KWARGS)
ref['lead'] = forecast['lead']
result = xr.concat([result, ref], dim='skill', **CONCAT_KWARGS)
# Add dimension/coordinate for different references.
result = result.assign_coords(skill=['init'] + reference)
return result
has_dataset(
self._datasets['observations'], 'observational', 'verify a forecast'
)
if 'historical' in reference or 'uninitialized' in reference:
has_dataset(
self._datasets['uninitialized'],
'uninitialized',
'compute an uninitialized reference forecast',
)
hist = self._datasets['uninitialized']
else:
hist = None
# TODO: Get rid of this somehow. Might use attributes.
input_dict = {
'name': name,
'init': True,
**metric_kwargs,
)
for lead in forecast['lead'].data
]
ref = xr.concat(metric_over_leads, dim='lead', **CONCAT_KWARGS)
ref['lead'] = forecast['lead']
result = xr.concat([result, ref], dim='skill', **CONCAT_KWARGS)
# Add dimension/coordinate for different references.
result = result.assign_coords(skill=['init'] + reference)
return result
has_dataset(
self._datasets['observations'], 'observational', 'verify a forecast'
)
if 'historical' in reference or 'uninitialized' in reference:
has_dataset(
self._datasets['uninitialized'],
'uninitialized',
'compute an uninitialized reference forecast',
)
hist = self._datasets['uninitialized']
else:
hist = None
# TODO: Get rid of this somehow. Might use attributes.
input_dict = {
'name': name,
'init': True,
}
res = self._apply_climpred_function(
_verify,
input_dict=input_dict,