Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# We add the labels for each digit.
txts = []
for i in range(2):
# Position of each label.
xtext, ytext = np.median(proj[y == i, :], axis=0)
txt = ax.text(xtext, ytext, str(i), fontsize=24)
txt.set_path_effects([
PathEffects.Stroke(linewidth=5, foreground="w"),
PathEffects.Normal()])
txts.append(txt)
# add labels from representation
rep = [r.split('[')[-1] for r in clf.get_representation().split(']') if r != '']
print('rep:',rep)
plt.xlabel(rep[0])
plt.ylabel(rep[1])
plt.savefig('longitudinal_representation.svg', dpi=120)
for ts in test_time_stamps:
try:
float_test_time_stamps.append(matdates.date2num(datetime.strptime(ts, time_format1)))
except:
float_test_time_stamps.append(matdates.date2num(datetime.strptime(ts, time_format2)))
plt.plot_date(x=float_test_time_stamps, y=test_data, label=test.name, fmt='.-', xdate=True)
plt.legend(fontsize='small', loc='best')
plt.ylabel('MPPS/Core (Norm)')
plt.title('Setup: ' + self.name)
plt.tick_params(
axis='x',
which='both',
bottom='off',
top='off',
labelbottom='off')
plt.xlabel('Time Period: ' + start_date[:-6] + ' - ' + self.end_date)
if save_path:
plt.savefig(os.path.join(save_path, self.name + file_name))
if not self.setup_trend_stats.empty:
(self.setup_trend_stats.round(2)).to_csv(os.path.join(save_path, self.name +
'_trend_stats.csv'))
plt.close('all')
def plot_histogram(lfw_dir):
"""
Function to plot the distribution of cluster sizes in LFW.
"""
filecount_dict = {}
for root, dirs, files in os.walk(lfw_dir):
for dirname in dirs:
n_photos = len(os.listdir(os.path.join(root, dirname)))
filecount_dict[dirname] = n_photos
print("No of unique people: {}".format(len(filecount_dict.keys())))
df = pd.DataFrame(filecount_dict.items(), columns=['Name', 'Count'])
print("Singletons : {}\nTwo :{}\n".format((df['Count'] == 1).sum(),
(df['Count'] == 2).sum()))
plt.hist(df['Count'], bins=max(df['Count']))
plt.title('Cluster Sizes')
plt.xlabel('No of images in folder')
plt.ylabel('No of folders')
plt.show()
def draw_prediction(dates: list, awaited_results: list, prediction_results: list):
plt.figure()
plt.plot(dates[INPUT_SIZE:], awaited_results, color="black") # current prices in reality
plt.plot(dates[INPUT_SIZE:], prediction_results, color="green") # predicted prices by neural network
plt.title('current prices / predicted prices by date')
plt.ylabel('price')
plt.xlabel('date')
plt.legend(['current', 'predicted'], loc='best')
plt.show()
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 8))
plt.scatter(x_values, y_values,marker='x',color=color) #marker='^'
plt.gcf().subplots_adjust(left=0.15)
plt.gcf().subplots_adjust(bottom=0.15) # reserve space for label
# plt.xlabel(attribute,fontsize=15)
# # plt.ylabel("Frequency")
# plt.ylabel("Number",fontsize=15) #
# # plt.title('Histogram of '+attribute)
# # plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
# # plt.axis([40, 160, 0, 0.03])
ax.tick_params(labelsize=20)
plt.xlabel(xlabel,fontsize=20)
plt.ylabel(ylabel,fontsize=20)
# # marked values
iou_thresholds = [0.5]
for iou_thr in iou_thresholds:
ax.axvline(x=iou_thr,color='k',linewidth=0.8,linestyle='--')
# ax.text(area+100, 0.55, '%d $\mathrm{m^2}$'%area, rotation=90,fontsize=20)
ax.text(iou_thr+text_loc_detX, text_locY, '%.1f ' % iou_thr, rotation=90, fontsize=20)
# plt.grid(True)
plt.savefig(output)
basic.outputlogMessage("Output figures to %s"%os.path.abspath(output))
temp_acc_train = sess.run(accuracy, feed_dict={x_data: x_vals_train, y_target: np.transpose([y_vals_train])})
train_acc.append(temp_acc_train)
temp_acc_test = sess.run(accuracy, feed_dict={x_data: x_vals_test, y_target: np.transpose([y_vals_test])})
test_acc.append(temp_acc_test)
if (i+1)%300==0:
print('Loss = ' + str(temp_loss))
###
# Display model performance
###
# Plot loss over time
plt.plot(loss_vec, 'k-')
plt.title('Cross Entropy Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('Cross Entropy Loss')
plt.show()
# Plot train and test accuracy
plt.plot(train_acc, 'k-', label='Train Set Accuracy')
plt.plot(test_acc, 'r--', label='Test Set Accuracy')
plt.title('Train and Test Accuracy')
plt.xlabel('Generation')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
Args:
rank_list:
figsize: Figure width and height in inches
Returns: Nothing but generates a plot
"""
plt.subplots(figsize=figsize)
k_vec = range(1, max(rank_list) + 1)
recalls = [recall_at_k(rank_list, k) for k in k_vec]
plt.plot(k_vec, recalls, color="darkorange", lw=2)
plt.xlim([0.0, max(k_vec)])
plt.ylim([0.0, 101])
plt.ylabel("Recall")
plt.xlabel("Top-K")
plt.title("Recall@k curve")
def verify_hdf5(indices, results_dir):
with h5py.File(workdir + 'ibis.hdf5', 'r') as f:
images = f['ibis_t1']
labels = f['qc_label']
filenames = f['filename']
for index in indices:
img = images[index, target_size[0]//2, :, :]
label = labels[index, ...]
filename = filenames[index, ...]
plt.imshow(img, cmap='gray')
plt.xlabel(str(label))
plt.ylabel(str(filename[2:-1]))
plt.savefig(results_dir + 'img-' + str(index), bbox_inches='tight')
plt.close()
def __plot_curve(self, name, val, ylim, suffix=''):
x = val['learning_curve']['x']
y_train = val['learning_curve']['y_train']
y_cv = val['learning_curve']['y_cv']
plt.plot(x, y_train, 'o-', color='dodgerblue',
label='Training score')
plt.plot(x, y_cv, 'o-', color='darkorange',
label='Cross-validation score')
plt.title(name)
plt.xlabel('Training examples')
plt.ylabel('Score')
plt.grid(True)
plt.ylim(ylim)
plt.legend(loc="lower right")
fname = self.params.out_dir + '/Learning curve_' + name
if suffix != '':
fname += '_' + suffix
fname += '.png'
plt.savefig(fname, bbox_inches='tight')
plt.close()