Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
Loop that decides the random number seed for the CSD profile,
electrode configurations and etc.
"""
#TrueCSD
t_csd_x, t_csd_y, true_csd = generate_csd_1D(src_width, nm, srcs=srcs,
start_x=0, end_x=1.,
start_y=0, end_y=1,
res_x=100, res_y=100)
if type(noise) == float:
n_spec = [noise]
else:
n_spec = noise
RMS_wek = np.zeros(len(n_spec))
LandR = np.zeros((2, len(n_spec)))
for i, noise in enumerate(n_spec):
plt.close('all')
noise = np.round(noise, 5)
print('numer rekonstrukcji: ', i, 'noise level: ', noise)
#Electrodes
ele_pos, pots = electrode_config(total_ele, true_csd, t_csd_x, t_csd_y, inpos, lpos)
ele_y = ele_pos[:, 1]
gdX = 0.01
x_lims = [0, 1] #CSD estimation place
np.random.seed(srcs)
pots += (np.random.rand(total_ele)*np.max(abs(pots))-np.max(abs(pots))/2)*noise
k, est_csd, est_pot = do_kcsd(ele_y, pots, h=1., gdx=gdX,
xmin=x_lims[0], xmax=x_lims[1], n_src_init=1e4)
save_as = nm + '_noise' + str(np.round(noise*100, 1))
if name == 'lc':
m_norm = k.m_norm
y_pos = np.arange(len(df) + 1)
no_error_score = -10 * np.log10(1/ref_len)
ax.barh(y_pos, df['remaining_err_rate_q'].append(pd.Series(no_error_score)), align='center', color='green', ecolor='black')
ax.set_xlabel('Q(Accuracy)')
ax.set_ylabel('Error Class')
ax.set_ylim((y_pos[0]-0.5, y_pos[-1]+0.5))
ax.set_yticks(y_pos)
ax.set_yticklabels(['total error'] + list(df['klass']))
ax.invert_yaxis() # labels read top-to-bottom
xstart, xend = ax.get_xlim()
ystart, yend = ax.get_ylim()
ax.text(xend - 2.25, ystart - 0.25, '+')
ax.set_title('Q-score after removing error class')
fp = os.path.join(outdir, '{}_remaining_errors.png'.format(prefix))
fig.savefig(fp)
plt.close()
prediction_image = nib.load(prediction_file)
prediction = prediction_image.get_data()
rows.append([dice_coefficient(func(truth), func(prediction)) for func in masking_functions])
df = pd.DataFrame.from_records(rows, columns=header, index=subject_ids)
df.to_csv("./prediction/brats_scores.csv")
scores = dict()
for index, score in enumerate(df.columns):
values = df.values.T[index]
scores[score] = values[np.isnan(values) == False]
plt.boxplot(list(scores.values()), labels=list(scores.keys()))
plt.ylabel("Dice Coefficient")
plt.savefig("validation_scores_boxplot.png")
plt.close()
if os.path.exists("./training.log"):
training_df = pd.read_csv("./training.log").set_index('epoch')
plt.plot(training_df['loss'].values, label='training loss')
plt.plot(training_df['val_loss'].values, label='validation loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.xlim((0, len(training_df.index)))
plt.legend(loc='upper right')
plt.savefig('loss_graph.png')
# Baseline spectrum with continuum
baseline_spec = coeffs_all[:,0]
plt.plot(lambdas, baseline_spec)
contpix_lambda = list(np.loadtxt("pixtest4_lambda.txt",
usecols = (0,), unpack =1))
y = [1]*len(contpix_lambda)
plt.scatter(contpix_lambda, y)
plt.title("Baseline Spectrum with Continuum Pixels")
plt.xlabel(r"Wavelength $\lambda (\AA)$")
plt.ylabel(r"$\theta_0$")
filename = "baseline_spec_with_cont_pix.png"
print "Diagnostic plot: fitted 0th order spectrum, cont pix overlaid."
print "Saved as %s" %filename
plt.savefig(filename)
plt.close()
# Leading coefficients for each label
nlabels = len(pivots)
fig, axarr = plt.subplots(nlabels, sharex=True)
plt.xlabel(r"Wavelength $\lambda (\AA)$")
for i in range(nlabels):
ax = axarr[i]
ax.set_ylabel(r"$\theta_%s$" %i)
ax.set_title("%s" %label_names[i])
ax.plot(lambdas, coeffs_all[:,i+1])
print "Diagnostic plot: leading coefficients as a function of wavelength."
filename = "leading_coeffs.png"
print "Saved as %s" %filename
fig.savefig(filename)
plt.close(fig)
m.drawcoastlines(color=(0.7, 0.7, 0.7))
m.drawparallels(np.arange(0., 91., 30.), color=(0.5, 0.5, 0.5))
m.drawmeridians(np.arange(0., 361., 60.), color=(0.5, 0.5, 0.5))
if filler is not None:
m.pcolormesh(x, y, filler.values, vmin=np.min(laplace_range), vmax=np.max(laplace_range),
cmap=laplace_colormap)
cs = plot_fn(x, y, da.values, contours, cmap=plot_colormap)
plt.clabel(cs, fmt='%1.0f')
ax.text(0.01, 0.01, title, horizontalalignment='left', verticalalignment='bottom', transform=ax.transAxes)
plot_panel(0, verif, 'Observed (%s)' % datetime.strftime(time, '%HZ %e %b %Y'), fill[0])
plot_panel(1, forecast, 'DLWP (%s)' % datetime.strftime(time, '%HZ %e %b %Y'), fill[1])
if file_name is not None:
plt.savefig(file_name, bbox_inches='tight', dpi=200)
plt.close()
hist.append(corr)
hist.sort()
median_corr = round(np.median(hist), 3)
mean_corr = round(np.mean(hist), 3)
print(title)
print('median corr: {} mean corr: {}'.format(median_corr, mean_corr))
# histogram of correlation
fig = plt.figure(figsize=(5, 5))
plt.hist(hist, bins=100, density=True)
plt.xlabel('median=' + str(median_corr) + ', mean=' + str(mean_corr))
plt.ylabel('Density') #todo freq to density
plt.xlim(-1, 1)
plt.title(title)
plt.savefig(fprefix + ".png", bbox_inches='tight') #todo remove \n from out-name
plt.close(fig)
return hist
if team == away:
plt.savefig(charts_units_pairings_teammates + 'onice_xg_away_pairings_teammates_lines.png', bbox_inches='tight', pad_inches=0.2)
elif team == home:
plt.savefig(charts_units_pairings_teammates + 'onice_xg_home_pairings_teammates_lines.png', bbox_inches='tight', pad_inches=0.2)
# exercise a command-line option to show the current figure
if images == 'show':
plt.show()
###
### CLOSE
###
plt.close(fig)
# status update
print('Plotting ' + team + ' pairings with lines 5v5 on-ice shots.')
# status update
print('Finished plotting the 5v5 on-ice shots for pairings with lines.')
def on_train_end(self, logs={}):
plt.close()
def gen_plot(fpr, tpr):
"""Create a pyplot plot and save to buffer."""
plt.figure()
plt.xlabel("FPR", fontsize = 14)
plt.ylabel("TPR", fontsize = 14)
plt.title("ROC Curve", fontsize = 14)
plot = plt.plot(fpr, tpr, linewidth = 2)
buf = io.BytesIO()
plt.savefig(buf, format = 'jpeg')
buf.seek(0)
plt.close()
return buf
if not (i == 1 and j == 2):
ax.set_yticklabels(['{:3.1f}%'.format(x * 100) for x in vals])
else:
ax.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals])
# Legend
lines = linesPhylDiag + [lineADhoRe] # + [lineTrue]
labelsPhylDiag = ['PhylDiag (' + ','.join(vs) + ')' for vs in variableArg_str[1]]
labels = labelsPhylDiag + ['i-ADHoRe 3.0'] #+ ['Simulation']
titleLegend = '(' + ','.join(variableArg_str[0]) + ')'
assert len(lines) == len(labels)
fig.legend(lines, labels, ncol=4, title=titleLegend, loc='upper center', fontsize=15)
fig.tight_layout()
plt.show(block=True)
plt.savefig(arguments['outFigureName'], format='svg')
plt.close()