Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def pre_processing(observe):
processed_observe = np.uint8(
resize(rgb2gray(observe), ( 84, 84 ), mode='constant') * 255)
return processed_observe
def set_superpixel_mask(self):
"""Use Simple Linear Iterative Clustering (SLIC) to get superpixels."""
# Get superpixel size and number
spixel_size = self.cd.spixel_size_baseMag * (
self.cd.MAG / self.cd.slide_info['magnification'])
n_spixels = int(
self.tissue_rgb.shape[0] * self.tissue_rgb.shape[1] / spixel_size)
# get superpixel mask
# optionally use grayscale instead of RGB -- seems more robust to
# color variations and sometimes gives better results
if self.cd.use_grayscale:
self.spixel_mask = slic(
rgb2gray(self.tissue_rgb), n_segments=n_spixels,
compactness=self.cd.compactness)
else:
self.spixel_mask = slic(
self.tissue_rgb, n_segments=n_spixels,
compactness=self.cd.compactness)
# restrict to tissue mask
tmask = resize(
self.tissue_mask, output_shape=self.spixel_mask.shape,
order=0, preserve_range=True)
self.spixel_mask[tmask == 0] = 0
def process_single_image(self, image_file_path, output_img_size):
# Read in the image
I = skimage.io.imread(image_file_path)
# If image was in color:
if len(I.shape) == 3:
I = skimage.color.rgb2gray(I)
I *= 255
I = I.astype('uint8')
if len(I.shape) != 3:
I = I[:, :, numpy.newaxis]
# Detect face and crop it out
I_crop, success_flag = self.detect_crop_face(I)
#print I_crop.dtype, I_crop.min(), I_crop.max()
# If face was successfully detected.
# Align face in 96x96 image
if success_flag:
I_out = I_crop
I_out = numpy.uint8(skimage.transform.resize(I_out, (96, 96), preserve_range=True))
#print I_out.dtype, I_out.min(), I_out.max()
def generate(self, x_fixed, x_target_fixed, pose_fixed, part_bbox_fixed, part_vis_fixed, root_path=None, path=None, idx=None, save=True):
G = self.sess.run(self.G, {self.x: x_fixed, self.pose: pose_fixed, self.part_bbox: part_bbox_fixed, self.part_vis: part_vis_fixed})
ssim_G_x_list = []
for i in xrange(G.shape[0]):
G_gray = rgb2gray((G[i,:]).clip(min=0,max=255).astype(np.uint8))
x_gray = rgb2gray(((x_fixed[i,:]+1)*127.5).clip(min=0,max=255).astype(np.uint8))
ssim_G_x_list.append(ssim(G_gray, x_gray, data_range=x_gray.max() - x_gray.min(), multichannel=False))
ssim_G_x_mean = np.mean(ssim_G_x_list)
if path is None and save:
path = os.path.join(root_path, '{}_G_ssim{}.png'.format(idx,ssim_G_x_mean))
save_image(G, path)
print("[*] Samples saved: {}".format(path))
return G
def generate(self, x_fixed, x_target_fixed, pose_target_fixed, root_path=None, path=None, idx=None, save=True):
G = self.sess.run(self.G, {self.x: x_fixed, self.pose_target: pose_target_fixed})
ssim_G_x_list = []
# x_0_255 = utils_wgan.unprocess_image(x_target_fixed, 127.5, 127.5)
for i in xrange(G.shape[0]):
# G_gray = rgb2gray((G[i,:]/127.5-1).clip(min=-1,max=1))
# x_target_gray = rgb2gray((x_target_fixed[i,:]).clip(min=-1,max=1))
G_gray = rgb2gray((G[i,:]).clip(min=0,max=255).astype(np.uint8))
x_target_gray = rgb2gray(((x_target_fixed[i,:]+1)*127.5).clip(min=0,max=255).astype(np.uint8))
ssim_G_x_list.append(ssim(G_gray, x_target_gray, data_range=x_target_gray.max() - x_target_gray.min(), multichannel=False))
ssim_G_x_mean = np.mean(ssim_G_x_list)
if path is None and save:
path = os.path.join(root_path, '{}_G_ssim{}.png'.format(idx,ssim_G_x_mean))
save_image(G, path)
print("[*] Samples saved: {}".format(path))
return G
def preprocess(self, observ):
return resize(rgb2gray(observ), self.screen_size)
def color_splash(image, mask):
"""Apply color splash effect.
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result image.
"""
# Make a grayscale copy of the image. The grayscale copy still
# has 3 RGB channels, though.
gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
# Copy color pixels from the original color image where mask is set
if mask.shape[-1] > 0:
# We're treating all instances as one, so collapse the mask into one layer
mask = (np.sum(mask, -1, keepdims=True) >= 1)
splash = np.where(mask, image, gray).astype(np.uint8)
else:
splash = gray.astype(np.uint8)
return splash
def extract_roi(img, labels_to_keep=[1,2]):
label_img = segmentation.slic(img, compactness=30, n_segments=6)
labels = np.unique(label_img);print(labels)
gray = rgb2gray(img);
for label in labels:
if(label not in labels_to_keep):
logicalIndex = (label_img == label)
gray[logicalIndex] = 0;
Display.show_image(gray)
io.imsave("grayy.png", gray)
def transform_screen(self, data):
return rgb2gray(imresize(data, self.screen))[None, ...]
def feature_extraction_single_rolled(self,img_file, output_path=None,ppi=500):
block_size = 16
if not os.path.exists(img_file):
return None
img = io.imread(img_file,s_grey=True)
if ppi!=500:
img = cv2.resize(img, (0, 0), fx=500.0/ppi, fy=500.0/ppi)
img = preprocessing.adjust_image_size(img, block_size)
if len(img.shape)>2:
img = rgb2gray(img)
h, w = img.shape
start = timeit.default_timer()
mask = get_maps.get_quality_map_intensity(img)
stop = timeit.default_timer()
print('time for cropping : %f' % (stop - start))
start = timeit.default_timer()
contrast_img = preprocessing.local_constrast_enhancement(img)
mnt = self.minu_model.run_whole_image(contrast_img, minu_thr=0.1)
stop = timeit.default_timer()
minu_time = stop - start
print('time for minutiae : %f' % (stop - start))
name = os.path.basename(img_file)
show.show_minutiae(img,mnt,block=True)
return None