Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_it_raises_exception_when_timeout_reached(self, *_):
await_no_resources_found.retry.wait = wait_none()
await_no_resources_found.retry.stop = stop_after_attempt(1)
with pytest.raises(RetryError):
delete_namespace("a_namespace")
async def test_fetch_and_parse_00(empty_page):
"""Ensure an empty page raises an exception."""
with pytest.raises(RetryError):
apd.fetch_and_parse.retry.stop = stop_after_attempt(1)
await apd.fetch_and_parse(None, 'url')
try:
_capture(fut, tb)
finally:
del tb
if not self.retry(fut):
return fut.result()
if self._after_attempts:
self._after_attempts(attempt_number)
delay_since_first_attempt_ms = int(
round(now() * 1000)
) - start_time
if self.stop(attempt_number, delay_since_first_attempt_ms):
six.raise_from(RetryError(fut), fut.exception())
if self.wait:
sleep = self.wait(attempt_number, delay_since_first_attempt_ms)
else:
sleep = 0
self.sleep(sleep / 1000.0)
attempt_number += 1
results = {}
for url in self.config.urls:
results[url] = {
'sizes': None,
'min_document_width': None,
}
# responsive check
try:
result = self.check_responsiveness(url)
results[url] = result
except TimeoutException as e:
logging.warn("TimeoutException when checking responsiveness for %s: %s" % (url, e))
pass
except tenacity.RetryError as re:
logging.warn("RetryError when checking responsiveness for %s: %s" % (url, re))
pass
self.driver.quit()
return results
async def main():
logger.info("Wait for RedisDB...")
try:
await wait_redis()
except tenacity.RetryError:
logger.error("Failed to establish connection with RedisDB.")
exit(1)
logger.info("Wait for PostgreSQL...")
try:
await wait_postgres()
except tenacity.RetryError:
logger.error("Failed to establish connection with PostgreSQL.")
exit(1)
logger.info("Ready.")
'logs': None,
'font_families': None,
}
# responsive check
try:
sizes = self.check_responsiveness(url)
results[url] = {
'sizes': sizes,
'min_document_width': min([s['document_width'] for s in sizes]),
'logs': self.capture_log(),
}
except TimeoutException as e:
logging.warn("TimeoutException when checking responsiveness for %s: %s" % (url, e))
pass
except tenacity.RetryError as re:
logging.warn("RetryError when checking responsiveness for %s: %s" % (url, re))
pass
try:
self.scroll_to_bottom()
except TimeoutException as e:
logging.warn("TimeoutException in scroll_to_bottom for %s: %s" % (url, e))
pass
except tenacity.RetryError as re:
logging.warn("RetryError in scroll_to_bottom for %s: %s" % (url, re))
pass
# CSS collection
font_families = None
try:
logging.info(u'%s %s' % (verb, url))
http_method = getattr(requests, verb)
resp = http_method(url, headers=headers, **kwargs)
logging.info(u'shippable status code: %s' % resp.status_code)
logging.info(u'shippable reason: %s' % resp.reason)
if resp.status_code not in [200, 302, 400]:
logging.error(u'RC: %s' % resp.status_code)
raise TryAgain
return resp
try:
logging.debug(u'%s' % url)
return _inner_fetch(verb=verb)
except RetryError as e:
logging.error(e)
def update_health_monitor(self, health_monitor_id, health_monitor_updates):
"""Updates a health monitor.
:param pool_id: ID of the pool to have it's health monitor updated
:param health_monitor_updates: Dict containing updated health monitor
:returns: None
:raises HMNotFound: The referenced health monitor was not found
"""
health_mon = None
try:
health_mon = self._get_db_obj_until_pending_update(
self._health_mon_repo, health_monitor_id)
except tenacity.RetryError as e:
LOG.warning('Health monitor did not go into %s in 60 seconds. '
'This either due to an in-progress Octavia upgrade '
'or an overloaded and failing database. Assuming '
'an upgrade is in progress and continuing.',
constants.PENDING_UPDATE)
health_mon = e.last_attempt.result()
pool = health_mon.pool
listeners = pool.listeners
pool.health_monitor = health_mon
load_balancer = pool.load_balancer
update_hm_tf = self._taskflow_load(
self._health_monitor_flows.get_update_health_monitor_flow(),
store={constants.HEALTH_MON: health_mon,
constants.POOL: pool,
def get_port_ofport(self, port_name):
"""Get the port's assigned ofport, retrying if not yet assigned."""
ofport = INVALID_OFPORT
try:
ofport = self._get_port_val(port_name, "ofport")
except tenacity.RetryError:
LOG.exception("Timed out retrieving ofport on port %s.",
port_name)
return ofport
storage.upload(locustfile)
kube.create_deployment(ms.controller)
kube.wait_until_pod_ready(ms.controller)
if ms.worker:
kube.create_deployment(ms.worker)
kube.create_service(ms.service)
kube.create_ingress(ms.ingress)
if ms.others:
kube.try_creating_custom_objects(ms.others)
except (kube.ApiException, RetryError) as err:
logging.error("Kubernetes operation failed: %s", err.reason)