Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
for line in fileinput.input():
[o, va, ia, vb, ib] = [float(x.strip()) for x in line.split(',')]
target.append(o)
voltages_a.append(va)
currents_a.append(ia)
voltages_b.append(vb)
currents_b.append(ib)
plot(voltages_a, '.', label='va')
plot(currents_a, '.', label='ia')
plot(voltages_b, '.', label='vb')
plot(currents_b, '.', label='ib')
plot(target, '.', label='dac')
xlabel('time (samples)')
ylabel('amplitude (bits)')
sio.savemat("smu.mat", {"v": voltages_a, "i": currents_a, "setpoint": target})
legend(loc='best')
figure()
semilogy(fftfreq(len(voltages_a), 2e-05), fft(voltages_a), '.')
semilogy(fftfreq(len(voltages_b), 2e-05), fft(voltages_b), '.')
semilogy(fftfreq(len(target), 2e-05), fft(target), '.')
savefig("svmi-fft.png")
show()
import scipy.io as sio
import numpy as np
def find1(a, func):
qqq = [i for (i, val) in enumerate(a) if func(val)]
return (np.array(qqq) + 1)
action_3d_path = r''
DATA = sio.loadmat(action_3d_path + 'joint_feat_coordinate.mat')
feat = DATA['feat'][0]
if_contain = DATA['if_contain'][0]
labels = DATA['labels'][0]
data = feat
K = 20
train_ind = []
test_ind = []
testActors = [6, 7, 8, 9, 10]
i = 1
true_i = 0
for a in range(1, 21):
for j in range(1, 11):
def __init__(self, synthtext_folder, target_size=768, viz=False, debug=False):
super(Synth80k, self).__init__(target_size, viz, debug)
self.synthtext_folder = synthtext_folder
gt = scio.loadmat(os.path.join(synthtext_folder, 'gt.mat'))
self.charbox = gt['charBB'][0]
self.image = gt['imnames'][0]
self.imgtxt = gt['txt'][0]
def convert_brainstorm_to_tvb(tvb_data_path, chan_paths):
"""
Convert given set of channels from Brainstorm to TVB formats.
"""
bst_path = tvb_data_path + 'brainstorm/data/TVB-Subject/'
for sens_type, sens_path in chan_paths.items():
# only MEG channels require orientation information
use_ori = sens_type in ('meg', )
# read from MAT file necessary fields
mat = scipy.io.loadmat(bst_path + sens_path)
name = [l[0] for l in mat['Channel']['Name'][0]]
loc = get_field_array(mat['Channel']['Loc'])
if use_ori:
ori = get_field_array(mat['Channel']['Orient'])
# bst uses m, we use mm
loc *= 1e3
# write out to text format
out_fname = '%s/sensors/%s-brainstorm-%d.txt'
out_fname %= tvb_data_path, sens_type, len(name)
with open(out_fname, 'w') as fd:
if use_ori: # MEG
for n, (x, y, z), (ox, oy, oz) in zip(name, loc, ori):
line = '\t'.join(['%s']+['%f']*6) + '\n'
line %= n, x, y, z, ox, oy, oz
fd.write(line)
else: # sEEG, EEG
def load_trajectory(filename, system=None):
data = sp.io.loadmat(filename)
# Load time as a 1D array
t = data['time'].squeeze()
Q_in = data.get('Q', None)
p_in = data.get('p', None)
v_in = data.get('v', None)
u_in = data.get('u', None)
rho_in = data.get('rho', None)
Q_index = [str(s[0]).strip() for s in data['Q_index'].ravel()]
p_index = [str(s[0]).strip() for s in data['p_index'].ravel()]
v_index = [str(s[0]).strip() for s in data['v_index'].ravel()]
u_index = [str(s[0]).strip() for s in data['u_index'].ravel()]
rho_index = [str(s[0]).strip() for s in data['rho_index'].ravel()]
def __init__(self, samples, thetas):
self.fname = samples
data = io.loadmat(samples)
self.data = data['X']
self.y = data['y']
# make sure the labels are in range 0<=y
def _load_label_inst(self, data_id):
label_file = os.path.join(
self.data_dir, 'cls', data_id + '.mat')
inst_file = os.path.join(
self.data_dir, 'inst', data_id + '.mat')
label_anno = scipy.io.loadmat(label_file)
label_img = label_anno['GTcls']['Segmentation'][0][0].astype(np.int32)
inst_anno = scipy.io.loadmat(inst_file)
inst_img = inst_anno['GTinst']['Segmentation'][0][0].astype(np.int32)
inst_img[inst_img == 0] = -1
inst_img[inst_img == 255] = -1
return label_img, inst_img
test_indices += test[i]
np.random.shuffle(train_indices)
np.random.shuffle(test_indices)
return train_indices, test_indices
def res4_model_ss():
model_res4 = ssrn_SS_UP.ResnetBuilder.build_resnet_8((1, img_rows, img_cols, img_channels), nb_classes)
RMS = RMSprop(lr=0.0003)
# Let's train the model using RMSprop
model_res4.compile(loss='categorical_crossentropy', optimizer=RMS, metrics=['accuracy'])
return model_res4
uPavia = sio.loadmat('/home/zilong/SSRN/datasets/UP/PaviaU.mat')
gt_uPavia = sio.loadmat('/home/zilong/SSRN/datasets/UP/PaviaU_gt.mat')
data_IN = uPavia['paviaU']
gt_IN = gt_uPavia['paviaU_gt']
print (data_IN.shape)
#new_gt_IN = set_zeros(gt_IN, [1,4,7,9,13,15,16])
new_gt_IN = gt_IN
batch_size = 16
nb_classes = 9
nb_epoch = 200 #400
img_rows, img_cols = 7, 7 #27, 27
patience = 200
INPUT_DIMENSION_CONV = 103
INPUT_DIMENSION = 103
def ut_generate_ptz_cameras():
"""
Generate PTZ camera demo: Section 3.1
"""
import scipy.io as sio
data = sio.loadmat('../../data/worldcup_dataset_camera_parameter.mat')
print(data.keys())
cc_mean = data['cc_mean']
cc_std = data['cc_std']
cc_min = data['cc_min']
cc_max = data['cc_max']
cc_statistics = [cc_mean, cc_std, cc_min, cc_max]
fl_mean = data['fl_mean']
fl_std = data['fl_std']
fl_min = data['fl_min']
fl_max = data['fl_max']
fl_statistics = [fl_mean, fl_std, fl_min, fl_max]
roll_statistics = [0, 0.2, -1.0, 1.0]
pan_range = [-35.0, 35.0]
def _check_keys(dict):
'''
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
'''
for key in dict:
if isinstance(dict[key], spio.matlab.mio5_params.mat_struct):
dict[key] = _todict(dict[key])
return dict