Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
image_name = self.image_name = '{prefix}{build_slug}:{ref}'.format(
prefix=image_prefix,
build_slug=safe_build_slug,
ref=ref
).replace('_', '-').lower()
if self.settings['use_registry']:
image_manifest = await self.registry.get_image_manifest(*'/'.join(image_name.split('/')[-2:]).split(':', 1))
image_found = bool(image_manifest)
else:
# Check if the image exists locally!
# Assume we're running in single-node mode or all binder pods are assigned to the same node!
docker_client = docker.from_env(version='auto')
try:
docker_client.images.get(image_name)
except docker.errors.ImageNotFound:
# image doesn't exist, so do a build!
image_found = False
else:
image_found = True
# Launch a notebook server if the image already is built
kube = self.settings['kubernetes_client']
if image_found:
await self.emit({
'phase': 'built',
'imageName': image_name,
'message': 'Found built image, launching...\n'
})
with LAUNCHES_INPROGRESS.track_inprogress():
await self.launch(kube, provider)
' rev: %s, %s' %
(self.assembly.uuid, repo_url, revision, str(e)))
if head_sha:
logger.log(logging.INFO, 'Finished cloning repo.')
break
elif i < MAX_GIT_CLONE_RETRY - 1:
clone_dir = '{}/code'.format(destination)
res = self._remove_cloned_repo(destination)
if res != 0:
LOG.critical('critical: cannot remove dir %s,'
' disk may be full.' % clone_dir)
time.sleep(3)
try:
self.docker.remove_container(container=ct.get('Id'))
except (errors.DockerException, errors.APIError):
pass
return head_sha
def get_runtime_volume_id(self):
try:
container_data = self.client.api.inspect_container(
self.container_name_for_service('conductor')
)
except docker_errors.APIError:
raise ValueError('Conductor container not found.')
mounts = container_data['Mounts']
try:
usr_mount, = [mount for mount in mounts if mount['Destination'] == '/usr']
except ValueError:
raise ValueError('Runtime volume not found on Conductor')
return usr_mount['Name']
async def call_docker_with_retries(func, timeout_handler=None, apierr_handler=None):
assert callable(func)
retries = 0
exc = None
while True:
try:
return func()
except docker.errors.APIError as e:
if callable(apierr_handler):
apierr_handler(e)
exc = e
break
except requests.exceptions.Timeout as e:
if retries == 3:
if callable(timeout_handler):
timeout_handler()
exc = e
break
retries += 1
await asyncio.sleep(0.2)
return exc
def docker_logs(self, container_id, show_stdout, show_stderr, follow):
try:
return (self.cli.logs(container=container_id, stdout=show_stdout, stderr=show_stderr, follow=follow))\
.decode('utf-8')
except docker.errors.APIError as ex:
if "configured logging reader does not support reading" in str(ex):
message = "Docker logging driver is not set to be 'json-file' or 'journald'"
DagdaLogger.get_logger().error(message)
raise DagdaError(message)
else:
message = "Unexpected exception of type {0} occurred: {1!r}" \
.format(type(ex).__name__, str(ex))
DagdaLogger.get_logger().error(message)
raise ex
def remove(self):
if self.running():
self.stop()
try:
self.client.remove_container(self.container, v=True, force=True)
except docker.errors.NotFound:
# the reaper may already have kicked in to delete the stoped container
pass
except docker.errors.APIError:
# removal already in progress
pass
self.container = None
def get_docker_containers(module, container_list):
from requests.exceptions import ConnectionError
if len(container_list) == 0:
pass
client = docker.from_env()
try:
containers = client.containers.list()
docker_list = [{'container_name': i.attrs['Name'].strip('/')} for i
in containers if i.attrs['Name'].strip('/') in
container_list]
return docker_list
except docker.errors.APIError as e:
module.fail_json(
msg='Error listing containers: {}'.format(to_native(e)))
except ConnectionError as e:
module.fail_json(
msg='Error connecting to Docker: {}'.format(to_native(e))
)