Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# error line
ax.plot([grid_dict[pre_ind] % X_GRID_NUM, grid_dict[tar_ind] % X_GRID_NUM],
[grid_dict[pre_ind] // X_GRID_NUM, grid_dict[tar_ind] // X_GRID_NUM], label='error line' if i == 0 else "", color='r', linewidth=0.5)
# prediction point
ax.scatter(grid_dict[pre_ind] % X_GRID_NUM, grid_dict[pre_ind] // X_GRID_NUM, label='prediction' if i == 0 else "", color='b', marker='.')
# target point
ax.scatter(grid_dict[tar_ind] % X_GRID_NUM, grid_dict[tar_ind] // X_GRID_NUM, label='target' if i == 0 else "", color='c', marker='.')
ax.legend()
# handles, labels = plt.gca().get_legend_handles_labels()
# by_label = OrderedDict(zip(labels, handles))
# plt.legend(by_label.values(), by_label.keys())
plt.title("Errors of classification{}".format(suffix), y=1.08)
plt.show()
# save error line fig
if baseline:
fig.savefig('./graph_output/errors_visualization_1_1.png') # calssification [200,200,200]
else:
fig.savefig('./graph_output/errors_visualization_1.png') # classification [64,32,16]
# output 2 values (regression)
else:
# 设置x,y主坐标轴
my_x_ticks = np.arange(-40, 40, 10)
my_y_ticks = np.arange(-30, 30, 10)
ax.set_xticks(my_x_ticks, minor=False)
ax.set_yticks(my_y_ticks, minor=False)
ax.plot(time, y, 'b', label='result', zorder=101)
if len(name) < 18:
ax.set_ylabel(name)
else:
# shorten long variable names
ax.set_ylabel('...' + name[-15:])
ax.margins(x=0, y=0.05)
fig.set_size_inches(w=8, h=1.5*len(names), forward=True)
plt.tight_layout()
if filename is None:
plt.show()
else:
dir, _ = os.path.split(filename)
if not os.path.isdir(dir):
os.makedirs(dir)
fig.savefig(filename=filename)
plt.close(fig)
n_images = len(images)
cols = np.round(np.sqrt(n_images)) if cols=='auto' else cols
rows = np.ceil(n_images/float(cols))
fig = plt.figure()
if type(title)==str: fig.suptitle(title, fontsize=20)
for n, image in enumerate(images):
a = fig.add_subplot(cols, rows, n + 1)
if image.ndim == 2: plt.gray()
a.imshow(image)
a.axis('on')
a.axis('equal')
a.set_xticklabels([])
a.set_yticklabels([])
if scaling: fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)
fig.subplots_adjust(wspace=0, hspace=0)
plt.show()
prob_dict = self.get_outcome_probs()
if qubit_subset:
sub_dict = {}
qubit_num = len(self)
for i in qubit_subset:
if i > (2 ** qubit_num - 1):
raise IndexError("Index {} too large for {} qubits.".format(i, qubit_num))
else:
sub_dict[get_bitstring_from_index(i, qubit_num)] = prob_dict[
get_bitstring_from_index(i, qubit_num)
]
prob_dict = sub_dict
plt.bar(range(len(prob_dict)), prob_dict.values(), align="center", color="#6CAFB7")
plt.xticks(range(len(prob_dict)), prob_dict.keys())
plt.show()
trajectory,
correlation_function_step,
out_queue))
processes.append(p)
p.start()
for p in processes:
correlation_full_dict.update(out_queue.get())
p.join()
correlation_vector = np.array([correlation_full_dict[i] for i in correlation_full_dict.keys()]).T
plt.plot(test_frequencies_range,correlation_vector.sum(axis=1).real)
plt.show()
return correlation_vector
labels = terri.keys()
plt.subplots_adjust(bottom = 0.1)
f, ax1 = plt.subplots(1, 1) # ,sharey=True)
plt.scatter(data[0], data[1], marker = 'o', color = '#4B6E9C')
for label, x, y in zip(labels, data[0], data[1]):
plt.annotate(
label,
xy = (x, y), xytext = (0, -12), fontsize=10,
textcoords = 'offset points', ha = 'center', va = 'bottom')
# ax1.scatter(data[0], data[1])
ax1.set_xlim([0.0000005, 0.0025])
ax1.set_xscale('log')
ax1.set_ylabel('Fraction Edges Cut (5 passes)')
ax1.set_xlabel('Nonzero Density (log)')
plt.show()
# The list of all colormaps: http://matplotlib.org/examples/color/colormaps_reference.html
#interp='nearest' # "raw" (non smooth) map
interp = 'bilinear' # "smooth" map
fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.imshow(z_matrix, interpolation=interp, origin='lower')
plt.colorbar(im) # draw the colorbar
# SAVE AND SHOW ###############################################################
plt.savefig("imshow_ax.png")
plt.show()
if layer.__class__.__name__ == "QActivation":
alpha = get_weight_scale(layer.activation, p)
else:
alpha = 1.0
print(
"{:30} {: 8.4f} {: 8.4f}".format(n, np.min(p / alpha),
np.max(p / alpha)),
end="")
if alpha != 1.0:
print(" a[{: 8.4f} {:8.4f}]".format(np.min(alpha), np.max(alpha)))
if plot and layer.__class__.__name__ in [
"QConv2D", "QDense", "QActivation"
]:
plt.hist(p.flatten(), bins=25)
plt.title(layer.name + "(output)")
plt.show()
alpha = None
for i, weights in enumerate(layer.get_weights()):
if hasattr(layer, "get_quantizers") and layer.get_quantizers()[i]:
weights = K.eval(layer.get_quantizers()[i](K.constant(weights)))
if i == 0 and layer.__class__.__name__ in [
"QConv1D", "QConv2D", "QDense"
]:
alpha = get_weight_scale(layer.get_quantizers()[i], weights)
# if alpha is 0, let's remove all weights.
alpha_mask = (alpha == 0.0)
weights = np.where(alpha_mask, weights * alpha, weights / alpha)
if plot:
plt.hist(weights.flatten(), bins=25)
plt.title(layer.name + "(weights)")
plt.show()
print(" ({: 8.4f} {: 8.4f})".format(np.min(weights), np.max(weights)),
print "offset sorting done"
print offsets_sorted2
# delay = offsets_sorted[-1]
# print delay
delay2 = offsets_sorted2[-1]
print delay2
x_b, y_b = plot_peaks(peaks_b)
plt.subplot(2, 1, 1)
plt.plot(x_b, y_b, 'kx')
x_s, y_s = plot_peaks(peaks_s)
plt.subplot(2, 1, 2)
plt.plot(x_s, y_s, 'kx')
plt.show()
if delay2[0] > 0:
return (float(delay2[0])/43, 0)
else:
return (0, abs(float(delay2[0])/43))
assert trpr > .95
# build title of the figure (for sanity check)
meth = p.method.replace(' (', '\n(')
title = f"Method={meth}\nAccuracy={np.around(trpr * 100, 2)}%"
# set to nan everywhere it's not significant
xpac[~is_coupling] = np.nan
vmin, vmax = np.nanmin(xpac), np.nanmax(xpac)
# plot the results
plt.subplot(2, 3, i + 2)
p.comodulogram(xpac, colorbar=False, vmin=vmin, vmax=vmax, title=title)
plt.ylabel(''), plt.xlabel('')
plt.tight_layout()
plt.savefig(f"../figures/r3_functional_pac.png", dpi=300, bbox_inches='tight')
plt.show() # show on demand