Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def backup_blocks(self, backup):
LOG.info("ftp backup_blocks ")
self.init()
# should recreate ssh for new process
tmpdir = self._create_tempdir()
try:
data = utils.path_join(tmpdir, "data")
LOG.info("backup_blocksa datadown=%s" % data)
self.get_file(backup.data_path, data)
with open(data, 'rb') as backup_file:
while True:
chunk = backup_file.read(self.max_segment_size)
if chunk == '':
break
if len(chunk):
yield chunk
finally:
shutil.rmtree(tmpdir)
def rmtree(self, path):
LOG.info("ftp rmtree path=%s" % path)
files = []
self.ftp.dir(path, files.append)
LOG.info('rm files=%s' % files)
for f in files:
attr = f.split()[0]
file_name = f.split()[-1]
filepath = utils.path_join(path, file_name)
if attr.startswith('d'):
self.rmtree(filepath)
else:
self.ftp.delete(filepath)
self.ftp.rmd(path)
def write_backup(self, rich_queue, backup):
"""
Stores backup in storage
:type rich_queue: freezer.streaming.RichQueue
:type backup: freezer.storage.base.Backup
"""
try:
tmpdir = tempfile.mkdtemp()
except Exception:
LOG.error("Unable to create a tmp directory")
raise
try:
data_meta = utils.path_join(tmpdir, "data_meta")
LOG.info("ftp write data_meta %s" % data_meta)
backup = backup.copy(storage=self)
path = backup.data_path
self.create_dirs(path.rsplit('/', 1)[0])
with open(data_meta, mode='wb') as b_file:
for message in rich_queue.get_messages():
b_file.write(message)
self.put_file(data_meta, path)
finally:
shutil.rmtree(tmpdir)
:type timestamp: int
:param level: current incremental level of backup
:type level: int
:return:
"""
self.hostname_backup_name = hostname_backup_name
self.timestamp = timestamp
self.level = level
self.engine = engine
self.storage = storage
self.level_zero_timestamp = level_zero_timestamp
if storage:
self.increments_data_path = utils.path_join(
self.storage.storage_path, "data", self.engine.name,
self.hostname_backup_name, self.level_zero_timestamp)
self.increments_metadata_path = utils.path_join(
self.storage.storage_path, "metadata", self.engine.name,
self.hostname_backup_name, self.level_zero_timestamp)
self.data_prefix_path = utils.path_join(
self.increments_data_path,
"{0}_{1}".format(self.level, self.timestamp))
self.engine_metadata_path = utils.path_join(
self.data_prefix_path, "engine_metadata")
self.metadata_path = utils.path_join(
self.increments_metadata_path,
"{0}_{1}".format(self.level, self.timestamp), "metadata")
self.data_path = utils.path_join(self.data_prefix_path, "data")
self.segments_path = utils.path_join(self.data_prefix_path,
"segments")
# create backup_basedir
backup_basedir = "{0}/{1}".format(self.storage_path,
package_name)
self.create_dirs(backup_basedir)
# define backup_data_name
backup_basepath = "{0}/{1}".format(backup_basedir,
split[0])
backup_metadata = "%s/metadata" % backup_basedir
# write backup to backup_basepath
data_backup = utils.path_join(tmpdir, "data_backup")
with open(data_backup, 'wb') as backup_file:
for el in stream:
backup_file.write(el)
self.put_file(data_backup, backup_basepath)
# write data matadata to backup_metadata
metadata = utils.path_join(tmpdir, "metadata")
with open(metadata, 'wb') as backup_meta:
backup_meta.write(json.dumps(headers))
self.put_file(metadata, backup_metadata)
finally:
shutil.rmtree(tmpdir)
return info, image
elif self.storage.type in ['ftp', 'ftps']:
image_file = "{0}/{1}/{2}/{3}".format(self.container, path,
backup, path)
metadata_file = "{0}/{1}/{2}/metadata".format(self.container,
path, backup)
try:
tmpdir = tempfile.mkdtemp()
except Exception:
LOG.error("Unable to create a tmp directory")
raise
try:
data_image = utils.path_join(tmpdir, "data_image")
LOG.info('create image restore ftp storage')
self.storage.get_file(image_file, data_image)
data_meta = utils.path_join(tmpdir, "data_meta")
self.storage.get_file(metadata_file, data_meta)
data = open(data_image, 'rb')
info = json.load(open(data_meta, 'r'))
image = self.client_manager.create_image(
name="restore_{}".format(path),
container_format="bare",
disk_format="raw",
data=data)
return info, image
finally:
shutil.rmtree(tmpdir)
else:
return {}
if storage:
self.increments_data_path = utils.path_join(
self.storage.storage_path, "data", self.engine.name,
self.hostname_backup_name, self.level_zero_timestamp)
self.increments_metadata_path = utils.path_join(
self.storage.storage_path, "metadata", self.engine.name,
self.hostname_backup_name, self.level_zero_timestamp)
self.data_prefix_path = utils.path_join(
self.increments_data_path,
"{0}_{1}".format(self.level, self.timestamp))
self.engine_metadata_path = utils.path_join(
self.data_prefix_path, "engine_metadata")
self.metadata_path = utils.path_join(
self.increments_metadata_path,
"{0}_{1}".format(self.level, self.timestamp), "metadata")
self.data_path = utils.path_join(self.data_prefix_path, "data")
self.segments_path = utils.path_join(self.data_prefix_path,
"segments")
:param hostname_backup_name: name (hostname_backup_name) of backup
:type hostname_backup_name: str
:param timestamp: timestamp of backup (when it was executed)
:type timestamp: int
:param level: current incremental level of backup
:type level: int
:return:
"""
self.hostname_backup_name = hostname_backup_name
self.timestamp = timestamp
self.level = level
self.engine = engine
self.storage = storage
self.level_zero_timestamp = level_zero_timestamp
if storage:
self.increments_data_path = utils.path_join(
self.storage.storage_path, "data", self.engine.name,
self.hostname_backup_name, self.level_zero_timestamp)
self.increments_metadata_path = utils.path_join(
self.storage.storage_path, "metadata", self.engine.name,
self.hostname_backup_name, self.level_zero_timestamp)
self.data_prefix_path = utils.path_join(
self.increments_data_path,
"{0}_{1}".format(self.level, self.timestamp))
self.engine_metadata_path = utils.path_join(
self.data_prefix_path, "engine_metadata")
self.metadata_path = utils.path_join(
self.increments_metadata_path,
"{0}_{1}".format(self.level, self.timestamp), "metadata")
self.data_path = utils.path_join(self.data_prefix_path, "data")
self.segments_path = utils.path_join(self.data_prefix_path,
"segments")
def rmtree(self, path):
files = self.ftp.listdir(path=path)
for f in files:
filepath = utils.path_join(path, f)
if self._is_dir(filepath):
self.rmtree(filepath)
else:
self.ftp.remove(filepath)
self.ftp.rmdir(path)
self.engine = engine
self.storage = storage
self.level_zero_timestamp = level_zero_timestamp
if storage:
self.increments_data_path = utils.path_join(
self.storage.storage_path, "data", self.engine.name,
self.hostname_backup_name, self.level_zero_timestamp)
self.increments_metadata_path = utils.path_join(
self.storage.storage_path, "metadata", self.engine.name,
self.hostname_backup_name, self.level_zero_timestamp)
self.data_prefix_path = utils.path_join(
self.increments_data_path,
"{0}_{1}".format(self.level, self.timestamp))
self.engine_metadata_path = utils.path_join(
self.data_prefix_path, "engine_metadata")
self.metadata_path = utils.path_join(
self.increments_metadata_path,
"{0}_{1}".format(self.level, self.timestamp), "metadata")
self.data_path = utils.path_join(self.data_prefix_path, "data")
self.segments_path = utils.path_join(self.data_prefix_path,
"segments")