Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def pytorch_add_newobject():
"""add vectors, put result into new memory"""
import torch
params0 = torch.from_numpy(create_array())
params1 = torch.from_numpy(create_array())
for i in range(args.num_iters):
with timeit('pytorch_add_newobject'):
params0+params1
if torch.cuda.is_available():
# if args.sample_with_collision:
# target['coll_batch'] = target['coll_batch'].cuda()
# if args.sample_with_offroad:
# target['off_batch'] = target['off_batch'].cuda()
if args.sample_with_pos:
target['pos_batch'] = target['pos_batch'].cuda()
if args.sample_with_angle:
target['angle_batch'] = target['angle_batch'].cuda()
if args.target_speed > 0:
target['speed_batch'] = target['speed_batch'].cuda()
if args.target_dist > 0:
target['dist_batch'] = target['dist_batch'].cuda()
weight = (args.gamma ** np.arange(args.pred_step)).reshape((1, args.pred_step, 1))
weight = Variable(torch.from_numpy(weight).float().cuda()).repeat(batch_size, 1, 1)
with torch.no_grad():
output = net(imgs, actions, hidden=hidden, cell=cell, training=False, action_var=action_var)
output['coll_prob'] = F.softmax(output['coll_prob'], -1)
loss = -torch.round(output['coll_prob'][:, :, 0]) * output['value'].view(-1, args.pred_step) + torch.round(output['coll_prob'][:, :, 1]) * 2
# loss = -(output['reward'].view(-1, args.pred_step, 1) * weight).sum(-1).sum(-1) - output['value'][:, -1, 0].contiguous().view(-1) * args.gamma ** args.pred_step
loss = (loss.view(-1, args.pred_step, 1) * weight).sum(-1).sum(-1)
return loss
def load_conv3d(state_dict, name_pt, sess, name_tf, bias=False, bn=True):
# Transfer convolution params
conv_name_tf = os.path.join(name_tf, 'conv_3d')
conv_params = get_conv_params(sess, conv_name_tf, bias=bias)
if bias:
conv_weights, kernel_shape, in_channels, out_channels, strides, padding, conv_bias = conv_params
else:
conv_weights, kernel_shape, in_channels, out_channels, strides, padding = conv_params
conv_weights_rs = np.transpose(
conv_weights, (4, 3, 0, 1,
2)) # to pt format (out_c, in_c, depth, height, width)
state_dict[name_pt + '.conv3d.weight'] = torch.from_numpy(conv_weights_rs)
if bias:
state_dict[name_pt + '.conv3d.bias'] = torch.from_numpy(conv_bias)
# Transfer batch norm params
if bn:
conv_tf_name = os.path.join(name_tf, 'batch_norm')
moving_mean, moving_var, beta = get_bn_params(sess, conv_tf_name)
out_planes = conv_weights_rs.shape[0]
state_dict[name_pt + '.batch3d.weight'] = torch.ones(out_planes)
state_dict[name_pt + '.batch3d.bias'] = torch.from_numpy(beta)
state_dict[name_pt
+ '.batch3d.running_mean'] = torch.from_numpy(moving_mean)
state_dict[name_pt
+ '.batch3d.running_var'] = torch.from_numpy(moving_var)
def next(self):
if self.samples_remaining < self.batch_size:
raise StopIteration
# Init
latent = np.random.randn(self.batch_size, self.n_latent)
latent = torch.from_numpy(latent)
# Sampling
start_id = self.start_id
end_id = self.start_id + self.batch_size
batch_ids_ones = self.shuffle_ones[start_id:end_id]
batch_ids_zeros = self.shuffle_zeros[start_id:end_id]
input_G = self.img[batch_ids_zeros]
input_D = self.img[batch_ids_ones]
# Augmentation
input_G = self.augment(input_G)
input_D = self.augment(input_D)
input_G = np.transpose(input_G, [0, 3, 1, 2])
input_D = np.transpose(input_D, [0, 3, 1, 2])
input_G = np.float32(input_G)/255*2-1
Parameters
----------
l2
A float or np.array representing the per-source regularization
strengths to use, by default 0
Returns
-------
torch.Tensor
L2 loss between learned mu and initial mu
"""
if isinstance(l2, (int, float)):
D = l2 * torch.eye(self.d)
else:
D = torch.diag(torch.from_numpy(l2)).type(torch.float32)
D = D.to(self.config.device)
# Note that mu is a matrix and this is the *Frobenius norm*
return torch.norm(D @ (self.mu - self.mu_init)) ** 2
for i in range(batch_size):
# select a random sentence from the list (joint distribution)
[fs,signal]=scipy.io.wavfile.read(data_folder+wav_lst[snt_id_arr[i]])
signal=signal.astype(float)/32768
# accesing to a random chunk
snt_len=signal.shape[0]
snt_beg=np.random.randint(snt_len-wlen-1) #randint(0, snt_len-2*wlen-1)
snt_end=snt_beg+wlen
sig_batch[i,:]=signal[snt_beg:snt_end]*rand_amp_arr[i]
lab_batch[i]=lab_dict[wav_lst[snt_id_arr[i]]]
inp=torch.from_numpy(sig_batch).float().cuda().contiguous() # Current Frame
lab=torch.from_numpy(lab_batch).float().cuda().contiguous()
return inp,lab
def __getitem_dev(self, index):
series, bounds = self.imgs[index]
(zs, ze), (ys, ye), (xs, xe) = bounds
target = load_label(self.targets, series)
target = target[zs:ze, ys:ye, xs:xe]
target = torch.from_numpy(target.astype(np.int64))
image = load_image(self.images, series)
image = image[0, zs:ze, ys:ye, xs:xe]
# optionally mask out lung volume from image
if self.masks is not None:
mask = load_mask(self.masks, series)
mask = mask[zs:ze, ys:ye, xs:xe]
image -= MIN_BOUND
image = np.multiply(mask, image)
image += MIN_BOUND
image = image.reshape((1, ze-zs, ye-ys, xe-xs))
img = image.astype(np.float32)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
attribute = matcontent['att'].T
x = feature[trainval_loc] # train_features
train_label = label[trainval_loc].astype(int) # train_label
att = attribute[train_label] # train attributes
x_test = feature[test_unseen_loc] # test_feature
test_label = label[test_unseen_loc].astype(int) # test_label
x_test_seen = feature[test_seen_loc] #test_seen_feature
test_label_seen = label[test_seen_loc].astype(int) # test_seen_label
test_id = np.unique(test_label) # test_id
att_pro = attribute[test_id] # test_attribute
# train set
train_features=torch.from_numpy(x)
print(train_features.shape)
train_label=torch.from_numpy(train_label).unsqueeze(1)
print(train_label.shape)
# attributes
all_attributes=np.array(attribute)
print(all_attributes.shape)
attributes = torch.from_numpy(attribute)
# test set
test_features=torch.from_numpy(x_test)
print(test_features.shape)
test_label=torch.from_numpy(test_label).unsqueeze(1)
######################### TEST ###################################
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
gpu_id = args.gpu_id
with torch.cuda.device(gpu_id):
model = model.cuda() if gpu_id >= 0 else model
model.eval()
env_return = env.step(reward=False, action=True)
if env_return is not None:
(states_S, states_G, actions_gt), require_init = env_return
with torch.cuda.device(gpu_id):
states_S = torch.from_numpy(states_S).float()
states_G = torch.from_numpy(states_G).float()
actions_gt = torch.from_numpy(actions_gt).float()
if gpu_id >= 0:
states_S = states_S.cuda()
states_G = states_G.cuda()
actions_gt = actions_gt.cuda()
while True:
actions = model(Variable(states_S), Variable(states_G), require_init)
############################ PLOT ##########################################
actions_np = np.squeeze(np.vstack([np.argmax(action.data.cpu().numpy(), axis=1) for action in actions]))
actions_gt_np = np.squeeze(actions_gt.cpu().numpy())
if require_init[-1] and len(action_gt_per_replay[-1]) > 0:
action_pre_per_replay[-1] = np.ravel(np.hstack(action_pre_per_replay[-1]))
action_gt_per_replay[-1] = np.ravel(np.hstack(action_gt_per_replay[-1]))
if random.choice([True, False]):
# graphs are isomorphic
nodes = list(graph1.nodes())
random.shuffle(nodes)
am2 = nx.to_numpy_matrix(graph1, nodelist=nodes)
classes[i] = 0
else:
# graphs are (probably not) isomorphic
graph2 = nx.gnm_random_graph(n, m)
am2 = nx.to_numpy_matrix(graph2)
classes[i] = 1
data[i, 0, :, :] = torch.from_numpy(am1)
data[i, 1, :, :] = torch.from_numpy(am2)
return data, classes