Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
ldb = LooseObjectDB(os.path.join(rwrepo.git_dir, 'objects'))
for randomize in range(2):
desc = (randomize and 'random ') or ''
print >> sys.stderr, "Creating %s data ..." % desc
st = time()
size, stream = make_memory_file(self.large_data_size_bytes, randomize)
elapsed = time() - st
print >> sys.stderr, "Done (in %f s)" % elapsed
# writing - due to the compression it will seem faster than it is
st = time()
binsha = ldb.store(IStream('blob', size, stream)).binsha
elapsed_add = time() - st
assert ldb.has_object(binsha)
db_file = ldb.readable_db_object_path(bin_to_hex(binsha))
fsize_kib = os.path.getsize(db_file) / 1000
size_kib = size / 1000
print >> sys.stderr, "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add)
# reading all at once
st = time()
ostream = ldb.stream(binsha)
shadata = ostream.read()
elapsed_readall = time() - st
stream.seek(0)
assert shadata == stream.getvalue()
print >> sys.stderr, "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, elapsed_readall, size_kib / elapsed_readall)
def _map_loose_object(self, sha):
"""
:return: memory map of that file to allow random read access
:raise BadObject: if object could not be located"""
db_path = self.db_path(self.object_path(bin_to_hex(sha)))
try:
return file_contents_ro_filepath(db_path, flags=self._fd_open_flags)
except OSError as e:
if e.errno != ENOENT:
# try again without noatime
try:
return file_contents_ro_filepath(db_path)
except OSError as new_e:
raise BadObject(sha) from new_e
# didn't work because of our flag, don't try it again
self._fd_open_flags = 0
else:
raise BadObject(sha) from e
# END handle error
:param filepath: full path to the log file
:param oldbinsha: binary sha of the previous commit
:param newbinsha: binary sha of the current commit
:param message: message describing the change to the reference
:param write: If True, the changes will be written right away. Otherwise
the change will not be written
:return: RefLogEntry objects which was appended to the log
:note: As we are append-only, concurrent access is not a problem as we
do not interfere with readers."""
if len(oldbinsha) != 20 or len(newbinsha) != 20:
raise ValueError("Shas need to be given in binary format")
# END handle sha type
assure_directory_exists(filepath, is_file=True)
committer = isinstance(config_reader, Actor) and config_reader or Actor.committer(config_reader)
entry = RefLogEntry(
(bin_to_hex(oldbinsha), bin_to_hex(newbinsha), committer, (int(time.time()), time.altzone), message))
lf = LockFile(filepath)
lf._obtain_lock_or_raise()
fd = open(filepath, 'a')
try:
fd.write(repr(entry))
finally:
fd.close()
lf._release_lock()
# END handle write operation
return entry
and corresponding index file. The pack contains all OStream objects contained in object iter.
:param base_dir: directory which is to contain the files
:return: PackEntity instance initialized with the new pack
**Note:** for more information on the other parameters see the write_pack method"""
pack_fd, pack_path = tempfile.mkstemp('', 'pack', base_dir)
index_fd, index_path = tempfile.mkstemp('', 'index', base_dir)
pack_write = lambda d: os.write(pack_fd, d)
index_write = lambda d: os.write(index_fd, d)
pack_binsha, index_binsha = cls.write_pack(object_iter, pack_write, index_write, object_count, zlib_compression)
os.close(pack_fd)
os.close(index_fd)
fmt = "pack-%s.%s"
new_pack_path = os.path.join(base_dir, fmt % (bin_to_hex(pack_binsha), 'pack'))
new_index_path = os.path.join(base_dir, fmt % (bin_to_hex(pack_binsha), 'idx'))
os.rename(pack_path, new_pack_path)
os.rename(index_path, new_index_path)
return cls(new_pack_path)
def hexsha(self):
""":return: 40 byte hex version of our 20 byte binary sha"""
return bin_to_hex(self.binsha)
def hexsha(self):
""":return: 40 byte hex version of our 20 byte binary sha"""
return bin_to_hex(self.binsha)
def short_to_long(odb, hexsha):
""":return: long hexadecimal sha1 from the given less-than-40 byte hexsha
or None if no candidate could be found.
:param hexsha: hexsha with less than 40 byte"""
try:
return bin_to_hex(odb.partial_to_complete_sha_hex(hexsha))
except BadObject:
return None
# END exception handling
:param config_reader: configuration reader of the repository - used to obtain
user information. May be None
:param filepath: full path to the log file
:param oldbinsha: binary sha of the previous commit
:param newbinsha: binary sha of the current commit
:param message: message describing the change to the reference
:param write: If True, the changes will be written right away. Otherwise
the change will not be written
:return: RefLogEntry objects which was appended to the log
:note: As we are append-only, concurrent access is not a problem as we
do not interfere with readers."""
if len(oldbinsha) != 20 or len(newbinsha) != 20:
raise ValueError("Shas need to be given in binary format")
#END handle sha type
assure_directory_exists(filepath, is_file=True)
entry = RefLogEntry((bin_to_hex(oldbinsha), bin_to_hex(newbinsha), Actor.committer(config_reader), (int(time.time()), time.altzone), message))
lf = LockFile(filepath)
lf._obtain_lock_or_raise()
fd = open(filepath, 'a')
try:
fd.write(repr(entry))
finally:
fd.close()
lf._release_lock()
#END handle write operation
return entry