Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
access_token : str
Token to access the API
observation_id : str
ID of the observation to fetch values and validate
start : datetime-like
Start time to limit observation fetch
end : datetime-like
End time to limit observation fetch
only_missing : boolean, default False
If True, only periods that have not had daily validation applied
are fetched and validated. Otherwise all data between start and end
is validated.
base_url : str, default None
URL for the API to fetch and post data
"""
session = APISession(access_token, base_url=base_url)
observation = session.get_observation(observation_id)
_split_validation(session, observation, start, end, only_missing)
def get_apisession(token, base_url=None):
return APISession(token, base_url=base_url)
Failures will attempt to post a message for the failure in an
empty RawReport to the API.
Parameters
----------
session : :py:class:`solarforecastarbiter.api.APISession`
API session for getting and posting data
report_id : str
ID of the report to fetch from the API and generate the raw
report for
Returns
-------
raw_report : :py:class:`solarforecastarbiter.datamodel.RawReport`
"""
session = APISession(access_token, base_url=base_url)
fail_wrapper = capture_report_failure(report_id, session)
report = fail_wrapper(session.get_report, err_msg=(
'Failed to retrieve report. Perhaps the report does not exist, '
'the user does not have permission, or the connection failed.')
)(report_id)
data = fail_wrapper(get_data_for_report, err_msg=(
'Failed to retrieve data for report which may indicate a lack '
'of permissions or that an object does not exist.')
)(session, report)
raw_report = fail_wrapper(create_raw_report_from_data, err_msg=(
'Unhandled exception when computing report.')
)(report, data)
fail_wrapper(session.post_raw_report, err_msg=(
'Computation of report completed, but failed to upload result to '
'the API.')
)(report.report_id, raw_report)
def make_latest_persistence_forecasts(token, max_run_time, base_url=None):
"""Make all reference persistence forecasts that need to be made up to
*max_run_time*.
Parameters
----------
token : str
Access token for the API
max_run_time : pandas.Timestamp
Last possible run time of the forecast generation
base_url : str or None, default None
Alternate base_url of the API
"""
session = api.APISession(token, base_url=base_url)
forecasts = session.list_forecasts()
observations = session.list_observations()
params = generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time)
for fx, obs, index, data_start, issue_times in params:
load_data = _preload_load_data(session, obs, data_start, max_run_time)
serlist = []
logger.info('Making persistence forecast for %s:%s from %s to %s',
fx.name, fx.forecast_id, issue_times[0], issue_times[-1])
for issue_time in issue_times:
run_time = issue_time
try:
fx_ser = run_persistence(
session, obs, fx, run_time, issue_time,
index=index, load_data=load_data)
except ValueError as e:
run. Only forecasts that belong to the same provider/organization
of the token user will be updated.
Parameters
----------
token : str
Access token for the API
run_time : pandas.Timestamp
Run time of the forecast generation
issue_buffer : pandas.Timedelta
Maximum time between *run_time* and the next initialization time of
each forecast that will be updated
base_url : str or None, default None
Alternate base_url of the API
"""
session = api.APISession(token, base_url=base_url)
user_info = session.get_user_info()
forecasts = session.list_forecasts()
forecasts += session.list_probabilistic_forecasts()
forecasts = [fx for fx in forecasts
if fx.provider == user_info['organization']]
forecast_df = find_reference_nwp_forecasts(forecasts, run_time)
execute_for = forecast_df[
forecast_df.next_issue_time <= run_time + issue_buffer]
if execute_for.empty:
logger.info('No forecasts to be made at %s', run_time)
return
process_nwp_forecast_groups(session, run_time, execute_for)
def report(verbose, user, password, base_url, report_file, output_file,
format, serialization_roundtrip, orca_server_url):
"""
Make a report. See API documentation's POST reports section for
REPORT_FILE requirements.
"""
set_log_level(verbose)
token = cli_access_token(user, password)
with open(report_file) as f:
metadata = json.load(f)
session = APISession(token, base_url=base_url)
report = session.process_report_dict(metadata)
if orca_server_url is not None:
import plotly.io as pio
pio.orca.config.server_url = orca_server_url
if serialization_roundtrip:
with mock_raw_report_endpoints(base_url):
session.create_report(report)
reports.compute_report(token, 'no_id', base_url)
full_report = session.get_report('no_id')
else:
data = reports.get_data_for_report(session, report)
raw_report = reports.create_raw_report_from_data(report, data)
full_report = report.replace(raw_report=raw_report, status='complete')
# assumed dashboard url based on api url
dash_url = base_url.replace('api', 'dashboard')
if (
----------
access_token : str
Token to access the API
start : datetime-like
Start time to limit observation fetch
end : datetime-like
End time to limit observation fetch
only_missing : boolean, default True
If True, only periods that have not had daily validation applied
are fetched and validated. Otherwise all data between start and end
is validated.
base_url : str, default None
URL for the API to fetch and post data
"""
session = APISession(access_token, base_url=base_url)
user_info = session.get_user_info()
observations = [obs for obs in session.list_observations()
if obs.provider == user_info['organization']]
for observation in observations:
_split_validation(session, observation, start, end, only_missing)
def make_latest_probabilistic_persistence_forecasts(
token, max_run_time, base_url=None):
"""Make all reference probabilistic persistence forecasts that need to
be made up to *max_run_time*.
Parameters
----------
token : str
Access token for the API
max_run_time : pandas.Timestamp
Last possible run time of the forecast generation
base_url : str or None, default None
Alternate base_url of the API
"""
session = api.APISession(token, base_url=base_url)
forecasts = session.list_probabilistic_forecasts()
observations = session.list_observations()
params = generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time)
for fx, obs, index, data_start, issue_times in params:
load_data = _preload_load_data(session, obs, data_start, max_run_time)
out = defaultdict(list)
logger.info('Making persistence forecast for %s:%s from %s to %s',
fx.name, fx.forecast_id, issue_times[0], issue_times[-1])
for issue_time in issue_times:
run_time = issue_time
try:
fx_list = run_persistence(
session, obs, fx, run_time, issue_time,
index=index, load_data=load_data)
except ValueError as e: