Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
@staticmethod
def retry(method_to_check, retry_timeout=RETRY_TIMEOUT,
retry_interval=RETRY_INTERVAL):
return Retrying(stop_max_delay=retry_timeout * 1000,
wait_fixed=retry_interval * 1000).call(method_to_check)
def wrapped_f(*args, **kw):
if _retry_init:
rargs, rkw = _retry_init(dargs, dkw)
else:
rargs, rkw = dargs, dkw
return Retrying(*rargs, **rkw).call(_warn_about_exceptions(f), *args, **kw)
def register_exchanges_with_retry():
def retry_if_io_error(exception):
return isinstance(exception, socket.error)
retrying_obj = retrying.Retrying(
retry_on_exception=retry_if_io_error,
wait_fixed=cfg.CONF.messaging.connection_retry_wait,
stop_max_attempt_number=cfg.CONF.messaging.connection_retries
)
return retrying_obj.call(register_exchanges)
def _wrapper(*args, **kwargs):
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
wait_func=_backoff_sleep,
stop_func=_print_stop)
return r.call(f, *args, **kwargs)
def db_func_with_retry(db_func, *args, **kwargs):
"""
This method is a generic retry function to support database setup and cleanup.
"""
# Using as an annotation would be nice but annotations are evaluated at import
# time and simple ways to use the annotation means the config gets read before
# it is setup. Likely there is a way to use some proxies to delay the actual
# reading of config values however this is lesser code.
retrying_obj = retrying.Retrying(
retry_on_exception=_retry_if_connection_error,
wait_exponential_multiplier=cfg.CONF.database.connection_retry_backoff_mul * 1000,
wait_exponential_max=cfg.CONF.database.connection_retry_backoff_max_s * 1000,
stop_max_delay=cfg.CONF.database.connection_retry_max_delay_m * 60 * 1000,
)
return retrying_obj.call(db_func, *args, **kwargs)
def _wrapper(*args, **kwargs):
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
wait_func=_backoff_sleep,
stop_func=_print_stop)
return r.call(f, *args, **kwargs)
# Client sets max_retries only
if max_retries is not None and max_retry_time is None:
stop_max_delay = None
stop_max_attempt_number = max_retries + 1
wait_exponential_multiplier = self.RETRY_DEFAULT_EXPONENTIAL_BACKOFF_MS
else:
stop_max_delay = (max_retry_time or self.RETRY_DEFAUT_MAX_RETRY_TIME_S) * 1000.0
stop_max_attempt_number = (max_retries or self.RETRY_DEFAULT_MAX_RETRIES) + 1
# Compute the backoff to allow for max_retries queries during the allowed delay
# Solves the following formula (assumes requests are immediate):
# max_retry_time = sum(exp_multiplier * 2 ** i) for i from 1 to max_retries + 1
wait_exponential_multiplier = stop_max_delay / ((2 ** (stop_max_attempt_number + 1)) - 2)
return Retrying(stop_max_attempt_number=stop_max_attempt_number,
stop_max_delay=stop_max_delay,
retry_on_exception=_hc_retry_on_exception,
wait_exponential_multiplier=wait_exponential_multiplier,
wait_jitter_max=self.RETRY_DEFAULT_JITTER_MS)