Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if not all(arg.units == ref_unit for arg in args):
raise ValueError('All units must be identical.')
def __check_interval_compatibility__(forecast, observation):
if observation.interval_length > forecast.interval_length:
raise ValueError('observation.interval_length cannot be greater than '
'forecast.interval_length.')
if ('instant' in forecast.interval_label and
'instant' not in observation.interval_label):
raise ValueError('Instantaneous forecasts cannot be evaluated against '
'interval average observations.')
@dataclass(frozen=True)
class ForecastObservation(BaseModel):
"""
Class for pairing Forecast and Observation objects for evaluation.
Parameters
----------
forecast: :py:class:`solarforecastarbiter.datamodel.Forecast`
observation: :py:class:`solarforecastarbiter.datamodel.Observation`
reference_forecast: :py:class:`solarforecastarbiter.datamodel.Forecast` or None
normalization: float or None
If None, determined by __set_normalization__
uncertainty: None, float, or str
If None, uncertainty is not accounted for. Float specifies the
uncertainty as a percentage from 0 to 100%. If str, may be
'observation_uncertainty' to indicate that the value should be
set to ``observation.uncertainty``, or may be coerceable to a
float.
if not set(metrics) <= ALLOWED_EVENT_METRICS.keys():
raise ValueError("Metrics must be in "
"ALLOWED_EVENT_METRICS.")
elif isinstance(fx, Forecast):
if not set(metrics) <= ALLOWED_DETERMINISTIC_METRICS.keys():
raise ValueError("Metrics must be in "
"ALLOWED_DETERMINISTIC_METRICS.")
def __check_categories__(categories):
if not set(categories) <= ALLOWED_CATEGORIES.keys():
raise ValueError('Categories must be in ALLOWED_CATEGORIES')
@dataclass(frozen=True)
class ValidationResult(BaseModel):
"""Stores the validation result for a single flag for a forecast and
observation pair.
Parameters
----------
flag: str
The quality flag being recorded. See
:py:mod:`solarforecastarbiter.validation.quality_mapping`.
count: int
The number of timestamps that were flagged.
"""
flag: str
count: int
@dataclass(frozen=True)
):
raise ValueError(
'One of observation_id OR aggregate_id must be set')
def __check_plot_spec__(plot_spec):
"""Ensure that the provided plot specification is a valid JSON object"""
try:
spec_dict = json.loads(plot_spec)
validate(instance=spec_dict, schema={'type': 'object'})
except (json.JSONDecodeError, ValidationError):
raise ValueError('Figure spec must be a valid json object.')
@dataclass(frozen=True)
class ReportFigure(BaseModel):
"""Parent class for different types of Report Figures"""
def __post_init__(self):
if type(self) == ReportFigure:
raise ValueError("Invalid Report Figure. Figures must be of class "
"PlotlyReportFigure or BokehReportFigure.")
@classmethod
def from_dict(model, input_dict, raise_on_extra=False):
dict_ = input_dict.copy()
if model != ReportFigure:
return super().from_dict(dict_, raise_on_extra)
figure_class = dict_.get('figure_class')
if figure_class == 'plotly':
return PlotlyReportFigure.from_dict(dict_, raise_on_extra)
elif figure_class == 'bokeh':
return BokehReportFigure.from_dict(dict_, raise_on_extra)
raise TypeError(
"'cost_function_parameters' must be of type DatetimeCost "
"for 'datetime' cost function.")
elif self.cost_function == 'constant':
if not isinstance(self.cost_function_parameters, ConstantCost):
raise TypeError(
"'cost_function_parameters' must be of type ConstantCost "
"for 'constant' cost function.")
else:
raise ValueError(
"'cost_function' must be one of 'timeofday', 'datetime', or"
" 'constant'")
@dataclass(frozen=True)
class ErrorBandCost(BaseModel):
"""Cost that varies based on the error value. For each error band,
one of the other cost functions is applied to the errors within the band.
If an error value does not fall within any band ranges, no cost is
calculated for that error.
Parameters
----------
bands : tuple of :py:class:`solarforecastarbiter.datamodel.CostBand`
Specification of the error bands and associated cost functions.
Notes
-----
Each error is restricted to a single band/cost function, so the
order in bands determines which band is applied in ascending
priority. For example, if ``bands[0].error_range = (0, 2)``
and ``bands[1].error_range == (1, 3)``, the cost function of
processed_forecasts_observations: Tuple[ProcessedForecastObservation, ...]
messages: Tuple[ReportMessage, ...] = ()
data_checksum: Union[str, None] = None
def __check_cost_consistency__(object_pairs, available_costs):
cost_names = [ac.name for ac in available_costs]
for op in object_pairs:
if op.cost is not None and op.cost not in cost_names:
raise ValueError(
f'Object pair cost, {op.cost}, not present in cost '
'parameters specified here')
@dataclass(frozen=True)
class ReportParameters(BaseModel):
"""Parameters required to define and generate a Report.
Parameters
----------
name : str
Name of the report.
start : pandas.Timestamp
Start time of the reporting period.
end : pandas.Timestamp
End time of the reporting period.
forecast_fill_method : {'drop', 'forward', float}
Indicates what process to use for handling missing forecasts.
object_pairs: Tuple of ForecastObservation or ForecastAggregate
Paired Forecasts and Observations or Aggregates to be analyzed
in the report.
metrics : Tuple of str
timezone : str, default None
IANA timezone string to use when constructing datetimes. If None,
the timezone of the observations is used, which is the report
timezone when calculated in a report.
"""
times: Tuple[datetime.time, ...]
cost: Tuple[float, ...]
aggregation: str
net: bool
fill: str
timezone: str = None
__post_init__ = __validate_cost__('times')
@dataclass(frozen=True)
class DatetimeCost(BaseModel):
"""Cost values based on datetimes.
Parameters
----------
datetimes : tuple/iterable of datetime-like objects
The datetimes to associate with each cost value
cost : tuple of float
The cost per unit error of the forecasted variable for each datetime.
Must have the same length as `datetimes`.
aggregation : str
Aggregation method to use after calculating cost for the error series.
Currently only 'sum' or 'mean' are available.
net : bool
If True, compute the 'net' aggregate error instead of first calcuating
the absolute error before performing the aggregation.
fill : str
if not all(arg.variable == variable for arg in args):
raise ValueError('All variables must be identical.')
def __check_aggregate_interval_compatibility__(interval, *args):
if any(arg.interval_length > interval for arg in args):
raise ValueError('observation.interval_length cannot be greater than '
'aggregate.interval_length.')
if any(arg.interval_value_type not in ('interval_mean', 'instantaneous')
for arg in args):
raise ValueError('Only observations with interval_value_type of '
'interval_mean or instantaneous are acceptable')
@dataclass(frozen=True)
class Aggregate(BaseModel):
"""
Class for keeping track of Aggregate metadata. Aggregates always
have interval_value_type of 'interval_mean'.
Parameters
----------
name : str
Name of the Aggregate, e.g. Utility X Solar PV
description : str
A description of what the aggregate is.
variable : str
Variable name, e.g. power, GHI. Each allowed variable has an
associated pre-defined unit. All observations that make up the
Aggregate must also have this variable.
aggregate_type : str
The aggregation function that will be applied to observations.
uncertainty: Union[float, None] = None
cost: Union[str, None] = None
data_object: Aggregate = field(init=False)
def __post_init__(self):
if self.normalization is None:
__set_aggregate_normalization__(self)
if self.uncertainty is not None:
object.__setattr__(self, 'uncertainty', float(self.uncertainty))
object.__setattr__(self, 'data_object', self.aggregate)
__check_units__(self.forecast, self.data_object)
__check_interval_compatibility__(self.forecast, self.data_object)
@dataclass(frozen=True)
class BaseFilter(BaseModel):
"""
Base class for filters to be applied in a report.
"""
@classmethod
def from_dict(model, input_dict, raise_on_extra=False):
dict_ = input_dict.copy()
if model != BaseFilter:
return super().from_dict(dict_, raise_on_extra)
if 'quality_flags' in dict_:
return QualityFlagFilter.from_dict(dict_, raise_on_extra)
elif 'time_of_day_range' in dict_:
return TimeOfDayFilter.from_dict(dict_, raise_on_extra)
elif 'value_range' in dict_:
return ValueFilter.from_dict(dict_, raise_on_extra)
else:
:py:class:`solarforecastarbiter.datamodel.Site`
:py:class:`solarforecastarbiter.datamodel.SingleAxisModelingParameters`
:py:class:`solarforecastarbiter.datamodel.FixedTiltModelingParameters`
"""
modeling_parameters: PVModelingParameters = field(
default_factory=PVModelingParameters)
def __set_units__(cls):
if cls.variable not in ALLOWED_VARIABLES:
raise ValueError('variable %s is not allowed' % cls.variable)
object.__setattr__(cls, 'units', ALLOWED_VARIABLES[cls.variable])
@dataclass(frozen=True)
class Observation(BaseModel):
"""
A class for keeping track of metadata associated with an observation.
Units are set according to the variable type.
Parameters
----------
name : str
Name of the Observation
variable : str
Variable name, e.g. power, GHI. Each allowed variable has an
associated pre-defined unit.
interval_value_type : str
The type of the data in the observation. Typically interval_mean or
instantaneous, but additional types may be defined for events.
interval_length : pandas.Timedelta
The length of time between consecutive data points, e.g. 5 minutes,
valid_point_count: int
forecast_values: Union[pd.Series, str, None]
observation_values: Union[pd.Series, str, None]
reference_forecast_values: Union[pd.Series, str, None] = None
validation_results: Tuple[ValidationResult, ...] = ()
preprocessing_results: Tuple[PreprocessingResult, ...] = ()
# This may need to be a series, e.g. normalize by the average
# observed value per day. Hence, repeat here instead of
# only in original
normalization_factor: Union[pd.Series, float] = 1.0
uncertainty: Union[None, float] = None
cost: Union[Cost, None] = None
@dataclass(frozen=True)
class MetricValue(BaseModel):
"""Class for storing the result of a single metric calculation.
Parameters
----------
category: str
The category of the metric value, e.g. total, monthly, hourly.
metric: str
The metric that was calculated.
index: str
The index of the metric value, e.g. '1-12' for monthly metrics or
0-23 for hourly.
value: float
The value calculated for the metric.
"""
category: str
metric: str