Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import copy
# cm = matplotlib.cm.gray
# load example image
import pkg_resources
DATA_PATH = pkg_resources.resource_filename('pynufft', 'src/data/')
# PHANTOM_FILE = pkg_resources.resource_filename('pynufft', 'data/phantom_256_256.txt')
import numpy
import matplotlib.pyplot
import scipy
image = scipy.misc.ascent()
image = scipy.misc.imresize(image, (256,256))
image=image.astype(numpy.float)/numpy.max(image[...])
Nd = (256, 256) # image space size
Kd = (512, 512) # k-space size
Jd = (6, 6) # interpolation size
# load k-space points
om = numpy.load(DATA_PATH+'om2D.npz')['arr_0']
nfft = NUFFT_cpu() # CPU
nfft.plan(om, Nd, Kd, Jd)
NufftObj = NUFFT_hsa()
for offset_x in six.moves.range(0, 8 + 4, 4):
im = img[offset_y:offset_y + self.cropping_size,
offset_x:offset_x + self.cropping_size]
# global contrast normalization
im = im.astype(np.float)
im -= im.reshape(-1, 3).mean(axis=0)
im -= im.reshape(-1, 3).std(axis=0) + 1e-5
imgs.append(im)
imgs.append(np.fliplr(im))
for offset_y in six.moves.range(0, 4 + 2, 2):
for offset_x in six.moves.range(0, 4 + 2, 2):
im = img[offset_y:offset_y + self.scaling_size,
offset_x:offset_x + self.scaling_size]
im = imresize(im, (self.cropping_size, self.cropping_size),
'nearest')
# global contrast normalization
im = im.astype(np.float)
im -= im.reshape(-1, 3).mean(axis=0)
im -= im.reshape(-1, 3).std(axis=0) + 1e-5
imgs.append(im)
imgs.append(np.fliplr(im))
imgs = np.asarray(imgs, dtype=np.float32)
return imgs
def center_crop(x, crop_h, crop_w=None, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
rate = np.random.uniform(0, 1, size=1)
if rate < 0.5:
x = np.fliplr(x)
#first crop tp 178x178 and resize to 128x128
return scipy.misc.imresize(x[20:218-20, 0: 178], [resize_w, resize_w])
def expand_mask(bbox, mini_mask, image_shape):
"""Resizes mini masks back to image size. Reverses the change
of minimize_mask().
See inspect_data.ipynb notebook for more details.
"""
mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
m = mini_mask[:, :, i]
y1, x1, y2, x2 = bbox[i][:4]
h = y2 - y1
w = x2 - x1
m = scipy.misc.imresize(m.astype(float), (h, w), interp='bilinear')
mask[y1:y2, x1:x2, i] = np.where(m >= 128, 1, 0)
return mask
def deepmind_preprocessor(state):
state = greyscale_preprocessor(state)
#state = np.array(cv2.resize(state, (84, 84)))
resized_screen = scipy.misc.imresize(state, (110,84))
state = resized_screen[18:102, :]
return state
def process_frame(frame):
s = scipy.misc.imresize(frame,[84,84])
s = np.reshape(s,[np.prod(s.shape)])
return s
def preprocess_img(self, img_out):
# Deal with BW images
if len(img_out.shape) == 2:
img_out = np.dstack([img_out] * 3)
# Resize to target size (256, 256)
img_resize = misc.imresize(img_out, size=self.target_size, interp=self.resize_type)
# We need to reorder RGB -> BGR as model was initially trained with Caffe
img_reorder = img_resize[:, :, self.reorder_dim]
# We need to subtract imagenet image mean
img_meansub = (img_reorder - self.imgmean)
# Take a central crop of 227x227.
# The model was trained with random crops of this dimension
# and existing features were extracted with a central crop like this
w_off = int((img_meansub.shape[0] - self.crop_size[0]) / 2.0)
h_off = int((img_meansub.shape[1] - self.crop_size[1]) / 2.0)
img_out = img_meansub[w_off:w_off + self.crop_size[0], h_off:h_off + self.crop_size[1], :]
return img_out
for class_i in child[1:]:
sub_child = os.listdir("/Users/.../video/images" + "/" + video_class + "/" + class_i)
for image_fol in sub_child[1:]:
if (video_class == 'class_4' ):
if(count%4 == 0):
image = imread("/Users/.../video/images" + "/" + video_class + "/" + class_i + "/" + image_fol)
image = imresize(image , (224,224))
x.append(image)
y.append(output)
cv2.imwrite('/Users/.../video/' + video_class + '/' + str(count) + '_' + image_fol,image)
count+=1
else:
if(count%8 == 0):
image = imread("/Users/.../video/images" + "/" + video_class + "/" + class_i + "/" + image_fol)
image = imresize(image , (224,224))
x.append(image)
y.append(output)
cv2.imwrite('/Users/.../video/' + video_class + '/' + str(count) + '_' + image_fol,image)
count+=1
output+=1
x = np.array(x)
y = np.array(y)
print("x",len(x),"y",len(y))
a `dict` with calibration points
"""
objp = np.zeros((rows * cols, 3), np.float32)
objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2)
objpoints = []
imgpoints = []
images = glob(calib_path)
cal_images = np.zeros((len(images), *cal_image_size), dtype=np.uint8)
successfull_cnt = 0
for idx, fname in enumerate(tqdm(images, desc='Processing image')):
img = scipy.misc.imread(fname)
if img.shape[0] != cal_image_size[0] or img.shape[1] != cal_image_size[1]:
img = scipy.misc.imresize(img, cal_image_size)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None)
if ret:
successfull_cnt += 1
objpoints.append(objp)
imgpoints.append(corners)
img = cv2.drawChessboardCorners(img, (cols, rows), corners, ret)
cal_images[idx] = img
print("%s/%s camera calibration images processed." % (successfull_cnt, len(images)))
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, cal_image_size[:-1], None,
class_name, filename)
# Find the extension of the image
exts = ('jpg', 'png')
for ext in exts:
temp_path = image_path_without_ext + '.' + ext
image_path = ''
if os.path.exists(temp_path):
image_path = temp_path
break
try:
img = misc.imread(image_path)
except (IOError, ValueError, IndexError) as e:
errorMessage = '{}: {}'.format(image_path, e)
print(errorMessage)
else:
scaled = misc.imresize(img, args.prealigned_scale, interp='bilinear')
sz1 = scaled.shape[1]/2
sz2 = args.image_size/2
cropped = scaled[(sz1-sz2):(sz1+sz2),(sz1-sz2):(sz1+sz2),:]
print(image_path)
nrof_prealigned_images += 1
misc.imsave(output_filename, cropped)
else:
print('Unable to align "%s"' % image_path)
print('Total number of images: %d' % nrof_images_total)
print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
print('Number of pre-aligned images: %d' % nrof_prealigned_images)