Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
Returns:
metric (Metric): metric class
comparison (Comparison): comparison class.
dim (list of str or str): corrected dimension to apply metric to.
"""
# check kind allowed
is_in_list(kind, ['hindcast', 'PM'], 'kind')
# set default dim
if dim is None:
dim = 'init' if kind == 'hindcast' else ['init', 'member']
# check allowed dims
if kind == 'hindcast':
is_in_list(dim, ['member', 'init'], 'dim')
elif kind == 'PM':
is_in_list(dim, ['member', 'init', ['init', 'member']], 'dim')
# get metric and comparison strings incorporating alias
metric = METRIC_ALIASES.get(metric, metric)
comparison = COMPARISON_ALIASES.get(comparison, comparison)
METRICS = HINDCAST_METRICS if kind == 'hindcast' else PM_METRICS
COMPARISONS = HINDCAST_COMPARISONS if kind == 'hindcast' else PM_COMPARISONS
metric = get_metric_class(metric, METRICS)
comparison = get_comparison_class(comparison, COMPARISONS)
# check whether combination of metric and comparison works
PROBABILISTIC_COMPARISONS = (
PROBABILISTIC_HINDCAST_COMPARISONS
if kind == 'hindcast'
else PROBABILISTIC_PM_COMPARISONS
)
def _get_metric_comparison_dim(metric, comparison, dim, kind):
"""Returns `metric`, `comparison` and `dim` for compute functions.
Args:
metric (str): metric or alias string
comparison (str): Description of parameter `comparison`.
dim (list of str or str): dimension to apply metric to.
kind (str): experiment type from ['hindcast', 'PM'].
Returns:
metric (Metric): metric class
comparison (Comparison): comparison class.
dim (list of str or str): corrected dimension to apply metric to.
"""
# check kind allowed
is_in_list(kind, ['hindcast', 'PM'], 'kind')
# set default dim
if dim is None:
dim = 'init' if kind == 'hindcast' else ['init', 'member']
# check allowed dims
if kind == 'hindcast':
is_in_list(dim, ['member', 'init'], 'dim')
elif kind == 'PM':
is_in_list(dim, ['member', 'init', ['init', 'member']], 'dim')
# get metric and comparison strings incorporating alias
metric = METRIC_ALIASES.get(metric, metric)
comparison = COMPARISON_ALIASES.get(comparison, comparison)
METRICS = HINDCAST_METRICS if kind == 'hindcast' else PM_METRICS
COMPARISONS = HINDCAST_COMPARISONS if kind == 'hindcast' else PM_COMPARISONS
metric = get_metric_class(metric, METRICS)
* compute_persistence()
* compute_perfect_model()
* compute_hindcast()
Args:
metric (str): name of metric.
list_ (list): check whether metric in list
Returns:
metric (Metric): class object of the metric.
"""
if isinstance(metric, metrics.Metric):
return metric
elif isinstance(metric, str):
# check if metric allowed
is_in_list(metric, list_, 'metric')
metric = METRIC_ALIASES.get(metric, metric)
return getattr(metrics, '__' + metric)
else:
raise ValueError(
f'Please provide metric as str or Metric class, found {type(metric)}'
)
dim (list of str or str): dimension to apply metric to.
kind (str): experiment type from ['hindcast', 'PM'].
Returns:
metric (Metric): metric class
comparison (Comparison): comparison class.
dim (list of str or str): corrected dimension to apply metric to.
"""
# check kind allowed
is_in_list(kind, ['hindcast', 'PM'], 'kind')
# set default dim
if dim is None:
dim = 'init' if kind == 'hindcast' else ['init', 'member']
# check allowed dims
if kind == 'hindcast':
is_in_list(dim, ['member', 'init'], 'dim')
elif kind == 'PM':
is_in_list(dim, ['member', 'init', ['init', 'member']], 'dim')
# get metric and comparison strings incorporating alias
metric = METRIC_ALIASES.get(metric, metric)
comparison = COMPARISON_ALIASES.get(comparison, comparison)
METRICS = HINDCAST_METRICS if kind == 'hindcast' else PM_METRICS
COMPARISONS = HINDCAST_COMPARISONS if kind == 'hindcast' else PM_COMPARISONS
metric = get_metric_class(metric, METRICS)
comparison = get_comparison_class(comparison, COMPARISONS)
# check whether combination of metric and comparison works
PROBABILISTIC_COMPARISONS = (
PROBABILISTIC_HINDCAST_COMPARISONS
if kind == 'hindcast'
* 'maximize': Use all available initializations at each lead that verify
against the observations provided. This changes both the set of
initializations and the verification window used at each lead.
Returns:
inits (dict): Keys are the lead time integer, values are an ``xr.DataArray`` of
initialization dates.
verif_dates (dict): Keys are the lead time integer, values are an
``xr.CFTimeIndex`` of verification dates.
"""
if isinstance(reference, str):
reference = [reference]
elif reference is None:
reference = []
is_in_list(alignment, VALID_ALIGNMENTS, 'alignment')
units = forecast['lead'].attrs['units']
leads = forecast['lead'].values
# `init` renamed to `time` in compute functions.
all_inits = forecast['time']
all_verifs = verif['time']
# If aligning historical reference, need to account for potential differences in its
# temporal coverage. Note that the historical reference only aligns verification
# dates and doesn't care about inits.
if hist is not None:
all_verifs = np.sort(list(set(all_verifs.data) & set(hist['time'].data)))
all_verifs = xr.DataArray(all_verifs, dims=['time'], coords=[all_verifs])
# Construct list of `n` offset over all leads.
n, freq = get_multiple_lead_cftime_shift_args(units, leads)
* e2o: Compare the ensemble mean to the verification data.
* m2o: Compare each ensemble member to the verification data.
Args:
comparison (str): name of comparison.
Returns:
comparison (Comparison): comparison class.
"""
if isinstance(comparison, comparisons.Comparison):
return comparison
elif isinstance(comparison, str):
# check if comparison allowed
is_in_list(comparison, list_, 'comparison')
comparison = COMPARISON_ALIASES.get(comparison, comparison)
return getattr(comparisons, '__' + comparison)
else:
is_in_list(comparison, list_, 'comparison')
return getattr(comparisons, '__' + comparison)