Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Develop images
sample_ya = model_a.process(sample_x).numpy()
sample_yb = model_b.process(sample_x).numpy()
if patch_size > 0:
print('Cropping a {p}x{p} patch from the middle'.format(p=patch_size))
xx = (sample_x.shape[2] - patch_size // 2) // 2
yy = (sample_x.shape[1] - patch_size // 2) // 2
sample_x = sample_x[:, yy:yy+patch_size, xx:xx+patch_size, :]
sample_y = sample_y[:, 2*yy:2*(yy+patch_size), 2*xx:2*(xx+patch_size), :]
sample_ya = sample_ya[:, 2*yy:2*(yy+patch_size), 2*xx:2*(xx+patch_size), :]
sample_yb = sample_yb[:, 2*yy:2*(yy+patch_size), 2*xx:2*(xx+patch_size), :]
# Plot images
fig = imdiff.compare_ab_ref(sample_y, sample_ya, sample_yb, fig=plt.figure(), extras=extras)
if output_dir is not None:
from tikzplotlib import save as tikz_save
dcomp = [x for x in fsutil.split(model_b_dirname) if re.match('(ln-.*|[0-9]{3})', x)]
tikz_save('{}/examples_{}_{}_{}_{}.tex'.format(output_dir, camera, image, model_a_dirname, model_b_dirname), figureheight='8cm', figurewidth='8cm', strict=False)
else:
fig.tight_layout()
fig.show(fig)
fig.suptitle('{}, A={}, B={}'.format(image, model_a.model_code, model_b.model_code))
plt.show()
plt.close(fig)
##############################################################################
# Now we compute cross-validation scores
from sklearn.cross_validation import cross_val_score
cv_scores_ovo = cross_val_score(svc_ovo, X, y, cv=5, verbose=1)
cv_scores_ova = cross_val_score(svc_ova, X, y, cv=5, verbose=1)
print('OvO:', cv_scores_ovo.mean())
print('OvA:', cv_scores_ova.mean())
##############################################################################
# Plot barplots of the prediction scores
from matplotlib import pyplot as plt
plt.figure(figsize=(4, 3))
plt.boxplot([cv_scores_ova, cv_scores_ovo])
plt.xticks([1, 2], ['One vs All', 'One vs One'])
plt.title('Prediction: accuracy score')
##############################################################################
# Plot a confusion matrix
#
# We fit on the the first 10 sessions and plot a confusion matrix on the
# last 2 sessions
from sklearn.metrics import confusion_matrix
svc_ovo.fit(X[session < 10], y[session < 10])
y_pred_ovo = svc_ovo.predict(X[session >= 10])
plt.matshow(confusion_matrix(y_pred_ovo, y[session >= 10]))
plt.title('Confusion matrix: One vs One')
params = _lmfit.Parameters()
params.add("a", value=FitROI.max(), vary=True, min=0)
params.add("xc", value=0, vary=True)
params.add("yc", value=0, vary=True)
params.add("s", value=1, vary=True, min=0)
params.add("b", value=FitROI.min(), vary=True, min=0)
results = gaussian2d.fit(FitROI.flatten(), params)
# Get maximum coordinates and add offsets
xc = results.best_values["xc"]
yc = results.best_values["yc"]
xc += X_ + x_max_
yc += Y_ + y_max_
if display:
_plt.figure(figsize=(17, 10))
_plt.subplot(1, 3, 1)
_plt.imshow(imageA, interpolation="none")
_plt.subplot(1, 3, 2)
_plt.imshow(imageB, interpolation="none")
_plt.subplot(1, 3, 3)
_plt.imshow(XCorr, interpolation="none")
_plt.plot(xc, yc, "x")
_plt.show()
xc -= _np.floor(X / 2)
yc -= _np.floor(Y / 2)
return -yc, -xc
T_b[it],
m_flow,
cp_f)
T_f_double_par = DoubleUTube_par.get_temperature(z,
T_f_in_double_par[it],
T_b[it],
m_flow,
cp_f)
T_f_double_ser = DoubleUTube_ser.get_temperature(z,
T_f_in_double_ser[it],
T_b[it],
m_flow,
cp_f)
plt.rc('figure')
fig = plt.figure()
ax3 = fig.add_subplot(131)
# Axis labels
ax3.set_xlabel(r'Temperature (degC)')
ax3.set_ylabel(r'Depth from borehole head (m)')
# Plot temperatures
ax3.plot(T_f_single, z, 'b-', lw=1.5, label='Fluid')
ax3.plot(np.array([T_b[it], T_b[it]]), np.array([0., H]), 'k--', lw=1.5,
label='Borehole wall')
ax3.legend()
ax4 = fig.add_subplot(132)
# Axis labels
ax4.set_xlabel(r'Temperature (degC)')
ax4.set_ylabel(r'Depth from borehole head (m)')
# Plot temperatures
attention scores for for a single model predictions.
"""
# Find out how long the predicted sequence is
target_words = list(predictions_dict["predicted_tokens"])
prediction_len = _get_prediction_length(predictions_dict)
# Get source words
source_len = predictions_dict["features.source_len"]
source_words = predictions_dict["features.source_tokens"][:source_len]
# Plot
plot_data = predictions_dict["attention_scores"][:prediction_len, :
source_len]
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(plot_data, cmap='Blues')
fontdict = {'fontsize': 14}
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
print("".join([str(row) for row in source_words]).replace(
"SEQUENCE_END", ""))
print("========")
print("".join([str(row) for row in target_words]).replace(
"SEQUENCE_END", ""))
# print(source_words.join(""), " >>>>>>>>>>" , target_words.join(""))
doy = row[1]
year = row[0].year
storm_tot = tots[ year - 1900,-1]
tots[ year - 1900, doy: ] = storm_tot + snow
if not is_storm:
# We are in a storm
storm_tot = events[ year - 1900,-1]
events[ year - 1900, doy: ] = storm_tot + 1
is_storm = True
else:
is_storm = False
xticks = [1,32,62,93,124,155, 183, 214, 244]
xticklabels = ['1 Sep','1 Oct', '1 Nov', '1 Dec', '1 Jan','1 Feb', '1 Mar', '1 Apr', '1 May']
fig = plt.figure()
ax3 = fig.add_subplot(111)
v = numpy.average(tots,0)
ax3.plot( numpy.arange(0,260), v / float(max(v)) * 100., lw=2, label="Accumulation %.1f in" % (max(v),) )
v = numpy.average(events,0)
ax3.plot( numpy.arange(0,260), v / float(max(v)) * 100., lw=2, label="Events %.1f" % (max(v),) )
ax3.set_xticks(xticks)
ax3.set_xticklabels(xticklabels)
ax3.set_xlim(31,245)
ax3.set_title("Des Moines Total Snowfall\nEach Winter between 1900-2013")
#ax3.plot([32,135], [avgV,avgV], color='k')
ax3.grid(True)
ax3.set_yticks([0,10,25,50,75,100])
#ax3.set_ylim(0,31)
ax3.set_ylabel("Percentage of Average Total")
ax3.legend(loc=2)
# Create vertex coordinates for each grid cell...
# (<0,0> is at the top left of the grid in this system)
x, y = np.meshgrid(np.arange(nx), np.arange(ny))
x, y = x.flatten(), y.flatten()
points = np.vstack((x,y)).T
for poly_verts in b['segmentation']:
path = mplPath.Path(np.array([[x,y] for x,y in zip(poly_verts[0::2],poly_verts[1::2])]))
grid = path.contains_points(points)
grid = grid.reshape((ny,nx))
the_mask += np.array(grid, dtype=int)
segm_heatmap += imresize(the_mask,(128,128))
fig = plt.figure(figsize=(10,10))
ax = plt.subplot(111)
ax.imshow(segm_heatmap)
plt.savefig('%s/fn_heatmaps.pdf'%loc_dir,bbox_inches='tight')
plt.close()
f.write("\nDone, (t=%.2fs)."%(time.time()-tic))
f.close()
def plot_log(logfile_path, train_dict, test_dict):
num = len(train_dict)
train_num_iters = [train_dict[i]['NumIters'] for i in range(num) if i%2 != 0]
train_los = [train_dict[i]['loss_det'] for i in range(num) if i%2 != 0]
train_acc = [train_dict[i]['acc'] for i in range(num) if i%2 !=0]
train_los_reg = [train_dict[i]['loss_reg'] for i in range(num) if i%2 != 0]
train_num_iters = train_num_iters[1::1]
train_los = train_los[1::1]
train_acc = train_acc[1::1]
train_los_reg = train_los_reg[1::1]
fig = plt.figure(1)
plt.subplot(211)
plt.plot(train_num_iters, train_los, 'g.-', label='train loss_det')
plt.plot(train_num_iters, train_los_reg, 'r.-', label='train loss_reg')
plt.legend(loc='upper center', ncol=2)
plt.subplot(212)
plt.plot(train_num_iters, train_acc, 'b.-', label='train acc')
plt.legend(loc='lower center', ncol=2)
#plt.show()
log_basename = os.path.basename(logfile_path)
#train_filename = os.path.join(output_dir, log_basename + '.png')
train_filename = os.path.join(logfile_path + '.png')
fig.savefig(train_filename)
cv2.normalize(h1, h1_norm)
h1_norm.flatten()
d = cv2.compareHist(h1_norm, hist_norm, method)
results[k] = d
# sort the results
results = sorted([(v, k) for (k, v) in results.items()], reverse = reverse)
# show the query image
fig = plt.figure("Query")
ax = fig.add_subplot(1, 1, 1)
ax.imshow(spots["1_Gray_Spot"])
plt.axis("off")
# initialize the results figure
fig = plt.figure("Results: %s" % (methodName))
fig.suptitle(methodName, fontsize = 20)
# loop over the results
for (i, (v, k)) in enumerate(results):
# show the result
ax = fig.add_subplot(1, len(spots), i + 1)
ax.set_title("%s: %.2f" % (k, v))
plt.imshow(spots[k])
plt.axis("off")
# show the OpenCV methods
plt.show()
f.write("\nNumber of people in images with Background False Positives:\n")
f.write(" - No people: [%d]\n"%no_people)
f.write(" - One person: [%d]\n"%one)
f.write(" - Small group (2-4): [%d]\n"%small_grp)
f.write(" - Large Group (5-8): [%d]\n"%large_grp)
f.write(" - Crowd (>=9): [%d]\n"%crowd)
f.write("\nArea size (in pixels) of Background False Positives:\n")
f.write(" - Small (%d,%d): [%d]\n"%(areaRngs[0][0],areaRngs[0][1],small))
f.write(" - Medium (%d,%d): [%d]\n"%(areaRngs[1][0],areaRngs[1][1],medium))
f.write(" - Large (%d,%d): [%d]\n"%(areaRngs[2][0],areaRngs[2][1],large))
f.write(" - X-Large (%d,%d): [%d]\n"%(areaRngs[3][0],areaRngs[3][1],xlarge))
f.write(" - XX-Large (%d,%d): [%d]\n"%(areaRngs[4][0],areaRngs[4][1],xxlarge))
plt.figure(figsize=(10,10))
plt.imshow(ar_pic,origin='lower')
plt.colorbar()
plt.title('BBox Aspect Ratio',fontsize=20)
plt.xlabel('Width (px)',fontsize=20)
plt.ylabel('Height (px)',fontsize=20)
path = "%s/bckd_false_pos_bbox_aspect_ratio.pdf"%(loc_dir)
plt.savefig(path, bbox_inches='tight')
plt.close()
fig, ax = plt.subplots(figsize=(10,10))
plt.imshow(ar_pic_2,origin='lower')
plt.xticks(xrange(1,len(ar_bins)+1),["%d"%(x) for x in ar_bins],rotation='vertical')
plt.yticks(xrange(1,len(ar_bins)+1),["%d"%(x) for x in ar_bins])
plt.colorbar()
plt.grid()
plt.title('BBox Aspect Ratio',fontsize=20)