Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
est_eigenvecs = np.array(est_eigenvecs)[est_inds][::-1]
eigenvals.append(est_eigenvals)
eigenvecs.append(est_eigenvecs)
eigenvals = np.array(eigenvals)
eigenvecs = np.array(eigenvecs)
real_eigenvals, real_eigenvecs = np.linalg.eig(real_hessian)
real_inds = np.argsort(real_eigenvals)
real_eigenvals = np.array(real_eigenvals)[real_inds][::-1]
real_eigenvecs = np.array(real_eigenvecs)[real_inds][::-1]
# Plot eigenvalue error
plt.suptitle("Hessian eigendecomposition errors: %d trials" % ntrials)
plt.subplot(1, 2, 1)
plt.title("Eigenvalues")
plt.plot(list(range(nparams)), real_eigenvals, label="True Eigenvals")
plot_eigenval_estimates(eigenvals, label="Estimates")
plt.legend()
# Plot eigenvector L2 norm error
plt.subplot(1, 2, 2)
plt.title("Eigenvector cosine simliarity")
plot_eigenvec_errors(real_eigenvecs, eigenvecs, label="Estimates")
plt.legend()
plt.savefig("full.png")
plt.clf()
return real_hessian
####### TRAINING #######
hist = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=2,
validation_data=(x_test, y_test))
# Evaluation
score = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model.save(model_name)
# plotting the metrics
fig = plt.figure()
plt.subplot(2,1,1)
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='lower right')
plt.subplot(2,1,2)
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.tight_layout()
def _plot(self):
for i, plotter in enumerate(self.plots):
plt.subplot(len(self.plots), 1, i + 1)
plotter.plot(fontsize=self.fontsize_, show_legend=self.show_legend_)
else: # single output
loss = criterion(output, target, target_weight)
acc = accuracy(score_map, target.cpu(), idx)
# generate predictions
preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64])
for n in range(score_map.size(0)):
predictions[meta['index'][n], :, :] = preds[n, :, :]
if debug:
gt_batch_img = batch_with_heatmap(input, target)
pred_batch_img = batch_with_heatmap(input, score_map)
if not gt_win or not pred_win:
plt.subplot(121)
gt_win = plt.imshow(gt_batch_img)
plt.subplot(122)
pred_win = plt.imshow(pred_batch_img)
else:
gt_win.set_data(gt_batch_img)
pred_win.set_data(pred_batch_img)
plt.pause(.05)
plt.draw()
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
acces.update(acc[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
def format_overlap_plots(self):
plt.subplot(131)
plt.ylabel('Overlap with smallest beam')
plt.xlabel('Crystal length (mm)')
plt.subplot(132)
plt.ylabel('Beam waist (um)')
plt.xlabel('Crystal length (mm)')
plt.subplot(133)
plt.ylabel('Beam curvature (m)')
plt.xlabel('Crystal length (mm)')
import matplotlib.pyplot as plt
import pyregion
region = """
image
circle(100, 100, 80)
box(200, 150, 150, 120, 0)
"""
r = pyregion.parse(region)
mask_1or2 = r.get_mask(shape=(300, 300))
myfilter = r.get_filter()
mask_1and2 = (myfilter[0] & myfilter[1]).mask((300, 300))
plt.subplot(121).imshow(mask_1or2, origin="lower", interpolation="nearest")
plt.subplot(122).imshow(mask_1and2, origin="lower", interpolation="nearest")
plt.show()
for sidx in range(zdim):
if np.sum(cutbestregions[sidx] > 0):
dum, numlabels = ndi.label(cutbestregions[sidx] * smask)
# cut away stray pixels
maxregion = 0
for lidx in range(numlabels):
component = extract_region(dum, lidx + 1)
npixels = np.sum(component)
if npixels > maxregion:
maxregion = npixels
cutbestregions[sidx] = component
if do_plot:
plt.figure()
plt.subplot(131)
plt.imshow(bestregions[sidx])
plt.subplot(132)
plt.imshow(smask)
plt.subplot(133)
plt.imshow(cutbestregions[sidx])
plt.show()
return cutbestregions
def plot_data(X_train, y_train, X_test, y_test):
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
colors = get_colors(y)
colors_train = get_colors(y_train)
colors_test = get_colors(y_test)
plt.figure(figsize=(12, 4), dpi=150)
# Plot all data plot
plt.subplot(131)
plt.axis('equal')
plt.scatter(X[:, 0], X[:, 1], c = colors, s = 10, edgecolors=colors)
plt.title("Data (100%)")
# training data plot
plt.subplot(132)
plt.axis('equal')
#plt.axis('off')
plt.scatter(X_train[:, 0], X_train[:, 1], c = colors_train, s = 10, edgecolors=colors_train)
plt.title("Training Data (80%)")
# testing data plot
plt.subplot(133)
plt.axis('equal')
#plt.axis('off')
for i in xrange(size):
X_1 = X[0][i]
X_2 = X[1][i]
if y[i] == 1:
class_1[0].append(X_1)
class_1[1].append(X_2)
else:
class_2[0].append(X_1)
class_2[1].append(X_2)
plt.figure(figsize=(8, 6), dpi=80)
plt.title(title)
axes = plt.subplot(111)
type1 = axes.scatter(class_1[0], class_1[1], s=20, c='red')
type2 = axes.scatter(class_2[0], class_2[1], s=20, c='green')
plt.show()