Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
----------
lif: LifFile
The LifFile to get the ranges from
read_dims: Dict[str: int]
The list of locked dimensions
Returns
-------
Dict[Dimension: range]
These ranges can then be used to iterate through the specified YX images
"""
if read_dims is None:
read_dims = {}
data_shape = LifReader._dims_shape(lif=lif)
# If S is in read_dims then use the specified value and the specified dims for
# that scene
if Dimensions.Scene in read_dims:
s_range = range(
read_dims[Dimensions.Scene], read_dims[Dimensions.Scene] + 1
)
s_dict = data_shape[s_range[0]]
else:
s_range = range(*data_shape[0][Dimensions.Scene])
s_dict = data_shape[0]
# Map the dims over to ranges and if the dim is in read_dims make the range
# over the single dim
integrated_dims = {Dimensions.Scene: s_range}
for dim in [Dimensions.Time, Dimensions.Channel, Dimensions.SpatialZ]:
Dimensions.SpatialY,
Dimensions.SpatialX,
],
S: int = 0,
**kwargs,
):
# Run super init to check filepath provided
super().__init__(data, **kwargs)
# Store parameters needed for _daread
self.chunk_by_dims = chunk_by_dims
self.specific_s_index = S
lif = LifFile(filename=self._file)
# _chunk_offsets is a list of ndarrays
# (only way I could deal with inconsistent scene shape)
self._chunk_offsets, self._chunk_lengths = LifReader._compute_offsets(lif=lif)
Returns
-------
True / False
True if it has the right header byte structure False if it does not.
"""
with BufferReader(buffer) as buffer_reader:
header = buffer_reader.read_bytes(n_bytes=8)
# If the buffer is to short return false
if len(buffer_reader.endianness) < 2 or len(header) < 8:
return False
# Check for the magic byte
if (
buffer_reader.endianness[0] != LifReader.LIF_MAGIC_BYTE
and header[1] != LifReader.LIF_MAGIC_BYTE
):
return False
# Check for the memory byte, if magic byte and memory byte are present
# return True
if header[6] == LifReader.LIF_MEMORY_BYTE:
return True
return False
def _read_immediate(self) -> np.ndarray:
# Get image dims indicies
lif = LifFile(filename=self._file)
image_dim_indices = LifReader._dims_shape(lif=lif)
# Catch inconsistent scene dimension sizes
if len(image_dim_indices) > 1:
# Choose the provided scene
log.info(
f"File contains variable dimensions per scene, "
f"selected scene: {self.specific_s_index} for data retrieval."
)
data, _ = LifReader._get_array_from_offset(
self._file,
self._chunk_offsets,
self._chunk_lengths,
self.metadata,
{Dimensions.Scene: self.specific_s_index},
)
else:
# If the list is length one that means that all the scenes in the image
# have the same dimensions
# Read all data in the image
data, _ = LifReader._get_array_from_offset(
self._file, self._chunk_offsets, self._chunk_lengths, self.metadata,
)
return data
"""
with BufferReader(buffer) as buffer_reader:
header = buffer_reader.read_bytes(n_bytes=8)
# If the buffer is to short return false
if len(buffer_reader.endianness) < 2 or len(header) < 8:
return False
# Check for the magic byte
if (
buffer_reader.endianness[0] != LifReader.LIF_MAGIC_BYTE
and header[1] != LifReader.LIF_MAGIC_BYTE
):
return False
# Check for the memory byte, if magic byte and memory byte are present
# return True
if header[6] == LifReader.LIF_MEMORY_BYTE:
return True
return False
offsets: List[numpy.ndarray]
A List of numpy ndarrays offsets, see _compute_offsets for more details.
read_lengths: numpy.ndarray
A 1D numpy array of read lengths, the index is the scene index
meta: xml.etree.ElementTree.Element
The root element of the metadata etree from the file.
read_dims: Optional[Dict[str, int]]
The dimensions to read from the file as a dictionary of string to integer.
Default: None (Read all data from the image)
Returns
-------
data: np.ndarray
The data read for the dimensions provided.
"""
data, dims = LifReader._read_image(
img=img,
offsets=offsets,
read_lengths=read_lengths,
meta=meta,
read_dims=read_dims,
)
return data
for current_dim_begin_index, curr_dim_index in zip(begin_indicies, i)
)
# Zip the dims with the read indices
this_chunk_read_dims = dict(
zip(blocked_dimension_order, this_chunk_read_indicies)
)
# Remove the dimensions that we want to chunk by from the read dims
for d in chunk_by_dims:
if d in this_chunk_read_dims:
this_chunk_read_dims.pop(d)
# Add delayed array to lazy arrays at index
lazy_arrays[i] = da.from_delayed(
delayed(LifReader._imread)(
img, offsets, read_lengths, lif.xml_root, this_chunk_read_dims
),
shape=sample_chunk_shape,
dtype=sample.dtype,
)
# Convert the numpy array of lazy readers into a dask array and fill the inner
# most empty dimensions with chunks
merged = da.block(lazy_arrays.tolist())
# Because we have set certain dimensions to be chunked and others not
# we will need to transpose back to original dimension ordering
# Example being, if the original dimension ordering was "SZYX" and we want to
# chunk by "S", "Y", and "X" we created an array with dimensions ordering "ZSYX"
transpose_indices = []
transpose_required = False
def _read_delayed(self) -> da.core.Array:
"""
Returns
-------
Constructed dask array where each chunk is a delayed read from the LIF file.
Places dimensions in the native order (i.e. "TZCYX")
"""
dask_array, _ = LifReader._daread(
self._file,
self._chunk_offsets,
self._chunk_lengths,
chunk_by_dims=self.chunk_by_dims,
S=self.specific_s_index,
)
return dask_array
Returns
-------
numpy.ndarray
a stack of images as a numpy.ndarray
List[Tuple[str, int]]
The shape of the data being returned
"""
if read_dims is None:
read_dims = {}
lif = LifFile(im_path)
# Data has already been checked for consistency. The dims are either consistent
# or S is specified selected_ranges get's the ranges for the Dimension for the
# range unless the dim is explicitly specified
selected_ranges = LifReader._read_dims_to_ranges(lif, read_dims)
s_index = read_dims[Dimensions.Scene] if Dimensions.Scene in read_dims else 0
lif_img = lif.get_image(img_n=s_index)
x_size = lif_img.dims[0]
y_size = lif_img.dims[1]
pixel_type = LifReader.get_pixel_type(meta, s_index)
# The ranged dims
ranged_dims = [
(dim, len(selected_ranges[dim]))
for dim in [
Dimensions.Scene,
Dimensions.Time,
Dimensions.Channel,
Dimensions.SpatialZ,
]
]
The LifFile object with an open file pointer to the file.
Returns
-------
List[numpy.ndarray]
The list of numpy arrays holds the offsets and it should be accessed as
[S][T,C,Z].
numpy.ndarray
The second numpy array holds the plane read length per Scene.
"""
scene_list = []
scene_img_length_list = []
for s_index, img in enumerate(lif.get_iter_image()):
pixel_type = LifReader.get_pixel_type(lif.xml_root, s_index)
(
x_size,
y_size,
z_size,
t_size,
) = img.dims # in comments in this block these correspond to X, Y, Z, T
c_size = img.channels # C
img_offset, img_block_length = img.offsets
offsets = np.zeros(shape=(t_size, c_size, z_size), dtype=np.uint64)
t_offset = c_size * z_size
z_offset = c_size
seek_distance = c_size * z_size * t_size
if img_block_length == 0:
# In the case of a blank image, we can calculate the length from
# the metadata in the LIF. When this is read by the parser,
# it is set to zero initially.