Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_neutron_bgp_speaker_appears_on_agent(self):
openstack_utils.neutron_bgp_speaker_appears_on_agent.retry.stop = \
tenacity.stop_after_attempt(1)
self.assertEqual(
openstack_utils.neutron_bgp_speaker_appears_on_agent(
self.neutronclient, 'FAKE_AGENT_ID'),
self.bgp_speakers)
stop=stop_after_attempt(3),
wait=wait_fixed(5)
)
def notify(self, message):
"""Send the notification.
Args:
message (str): The message to send.
"""
max_message_size = 4096
message_chunks = self.chunk_message(message=message, max_message_size=max_message_size)
#print(message_chunks)
#exit()
for message_chunk in message_chunks:
self.bot.send_message(chat_id=self.chat_id, text=message_chunk, parse_mode=self.parse_mode)
@tenacity.retry(reraise=True, retry=tenacity.retry_if_exception_type(ApiError), stop=tenacity.stop_after_attempt(3))
def get(kind, name=None, namespace=None):
command = kubectl_command_builder('get', resource=kind, name=name, namespace=namespace, flags=['-o', 'yaml'])
get_process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=sys.stderr)
objects = yaml.safe_load(get_process.stdout)
if get_process.wait() != 0:
raise ApiError
return objects
self._declare_notification_queue_binding(target)
except pika_drv_exc.ConnectionException as e:
LOG.warning("Problem during declaring notification queue "
"binding. %s", e)
return True
elif isinstance(ex, (pika_drv_exc.ConnectionException,
pika_drv_exc.MessageRejectedException)):
LOG.warning("Problem during sending notification. %s", ex)
return True
else:
return False
if retry:
retrier = tenacity.retry(
stop=(tenacity.stop_never if retry == -1 else
tenacity.stop_after_attempt(retry)),
retry=tenacity.retry_if_exception(on_exception),
wait=tenacity.wait_fixed(
self._pika_engine.notification_retry_delay
)
)
else:
retrier = None
msg = pika_drv_msg.PikaOutgoingMessage(self._pika_engine, message,
ctxt)
return msg.send(
exchange=(
target.exchange or
self._pika_engine.default_notification_exchange
),
routing_key=target.topic,
stop=stop_after_attempt(5))
def pull_from_url(dep, configs):
'''
:param dep: name of a dependency
:param configs: a dict from dep_urls_py.yaml
:return: boolean
It downloads files form urls to a temp directory first in order to avoid
to deal with any temp files. It helps keep clean final directory.
'''
if dep in configs:
config = configs[dep]
dest_dir = '/'.join([LICENSE_DIR, dep])
cur_temp_dir = tempfile.mkdtemp()
try:
if config['license'] == 'skip':
if retry_max_attempts is None:
retry_max_attempts = 10
if retry_max_wait is None:
retry_max_wait = 60
if retry_multiplier is None:
retry_multiplier = 0.5
# We split up send_batch and actually_send_batch so that we can use tenacity to
# handle retries for us, while still getting to use the Nurser.start_soon interface.
# This also makes it easier to deal with the error handling aspects of sending a
# batch, from the work of actually sending. The general rule here is that errors
# shoudl not escape from this function.
send = actually_send_batch.retry_with(
wait=tenacity.wait_exponential(multiplier=retry_multiplier, max=retry_max_wait),
stop=tenacity.stop_after_attempt(retry_max_attempts),
)
try:
await send(bq, table, template_suffix, batch, *args, **kwargs)
# We've tried to send this batch to BigQuery, however for one reason or another
# we were unable to do so. We should log this error, but otherwise we're going
# to just drop this on the floor because there's not much else we can do here
# except buffer it forever (which is not a great idea).
except trio.TooSlowError:
logger.error("Timed out sending %d items; Dropping them.", len(batch))
except Exception:
logger.exception("Error sending %d items; Dropping them.", len(batch))
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
def create_load_balancer(self, loadbalancer, flavor=None,
availability_zone=None):
"""Creates a load balancer by allocating Amphorae.
First tries to allocate an existing Amphora in READY state.
If none are available it will attempt to build one specifically
for this load balancer.
:param loadbalancer: The dict of load balancer to create
:returns: None
:raises NoResultFound: Unable to find the object
"""
lb = self._lb_repo.get(db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
if not lb:
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
@retry(reraise=True, stop=stop_after_attempt(3))
def aliyun_run_ore(build, args):
build.refresh_meta()
ore_args = ['ore']
if args.log_level:
ore_args.extend(['--log-level', args.log_level])
if args.force:
ore_args.extend(['--force'])
region = "us-west-1"
if args.region is not None:
region = args.region[0]
upload_name = f"{build.build_name}-{build.build_id}"
if args.name_suffix:
upload_name = f"{build.build_name}-{args.name_suffix}-{build.build_id}"