Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __getitem__(self, index):
return (torch.Tensor(np.random.rand(1, 28, 28)), np.random.randint(10))
def __init__(self, input_size, q2c: bool=True, query_dots: bool=True):
super(BiAttn, self).__init__()
self.input_size = input_size
self.q2c = q2c
self.query_dots = query_dots
self.w_x = nn.Parameter(torch.Tensor(input_size, 1))
self.w_y = nn.Parameter(torch.Tensor(input_size, 1))
self.w_dot = nn.Parameter(torch.Tensor(input_size, 1))
self.bias = nn.Parameter(torch.Tensor(1))
self.reset_parameters()
def _make_simple_list(cls, data):
if isinstance(data, torch.Tensor):
return data.tolist()
elif isinstance(data, List) and all(
isinstance(elem, torch.Tensor) for elem in data
):
return [elem.tolist() for elem in data]
elif (
isinstance(data, List)
and all(isinstance(elem, List) for elem in data)
and all(
isinstance(elem, torch.Tensor) for elemlist in data for elem in elemlist
)
):
return [[elem.tolist() for elem in elemlist] for elemlist in data]
elif isinstance(data, List):
return data
elif isinstance(data, tuple):
return data[0].tolist()
else:
raise NotImplementedError()
model = CharacterLevelCNN(args, number_of_classes)
if torch.cuda.is_available():
model.cuda()
if not bool(args.focal_loss):
if bool(args.class_weights):
class_counts = dict(Counter(train_labels))
m = max(class_counts.values())
for c in class_counts:
class_counts[c] = m / class_counts[c]
weights = []
for k in sorted(class_counts.keys()):
weights.append(class_counts[k])
weights = torch.Tensor(weights)
if torch.cuda.is_available():
weights = weights.cuda()
print(f'passing weights to CrossEntropyLoss : {weights}')
criterion = nn.CrossEntropyLoss(weight=weights)
else:
criterion = nn.CrossEntropyLoss()
else:
if args.alpha is None:
criterion = FocalLoss(gamma=args.gamma, alpha=None)
else:
criterion = FocalLoss(gamma=args.gamma,
alpha=[args.alpha] * number_of_classes)
if args.optimizer == 'sgd':
if args.scheduler == 'clr':
start_time = time.time()
for episode in range(1, args.episode_num+1):
total_reward = 0.
env_info = env.reset(train_mode=True)[default_brain]
obs = env_info.vector_observations[0]
done = False
# Keep interacting until agent reaches a terminal state.
while not done:
step_count += 1
# Collect experience (s, a, r, s') using some policy
action = select_action(torch.Tensor(obs).to(device), act_num, qf)
env_info = env.step(int(action))[default_brain]
next_obs = env_info.vector_observations[0]
reward = env_info.rewards[0]
done = env_info.local_done[0]
# Add experience to replay buffer
replay_buffer.add(obs, action, reward, next_obs, done)
# Start training when the number of experience is greater than batch size
if step_count > args.batch_size:
batch = replay_buffer.sample(args.batch_size)
train_model(qf, qf_target, qf_optimizer, batch, step_count)
total_reward += reward
if name:
reuse = cg.get_variable_scope().reuse
name = _get_name_with_scope(name)
if reuse:
# Return the variable if already allocated.
if name in tensor_by_name:
glog.debug("Reuse variable {}".format(name))
return tensor_by_name[name]
elif name in tensor_by_name:
name = _append_num(name)
# Allocate the variable.
if not callable(initializer):
shape = None
if type(initializer) is th.Tensor:
t = initializer
t.requires_grad = True
else:
t = Tensor(initializer, requires_grad=trainable)
else:
t = Tensor(initializer(shape), requires_grad=trainable)
if name:
cache_tensor(t, name)
t._is_variable = True
glog.debug("Created new variable {}".format(name))
return t
assert all([isinstance(gt, str) for gt in gt_attrib]) or \
all([isinstance(gt, (np.integer, int, bool)) for gt in gt_attrib]), \
"multi-label groundtruth array should contain class names or class-wise binary flags"
if all([isinstance(gt, str) for gt in gt_attrib]):
for gt in gt_attrib:
assert gt in self.class_names, f"label '{gt}' not found in task class names"
sample_idxs[gt].append(sample_idx)
else:
assert len(gt_attrib) == len(self.class_names), \
"unexpected multi-label one-hot vector shape\n" \
f"(should be {len(self.class_names)}-element long, was {len(gt_attrib)})"
for class_name, class_flag in zip(self.class_names, gt_attrib):
if class_flag:
sample_idxs[class_name].append(sample_idx)
else:
assert isinstance(gt_attrib, (str, int, np.ndarray, torch.Tensor)) and \
thelper.utils.is_scalar(gt_attrib), \
"unexpected classification sample gt type (need scalar, string or int)"
if isinstance(gt_attrib, str):
assert gt_attrib in self.class_names, f"label '{gt_attrib}' not found in task class names"
else:
if isinstance(gt_attrib, torch.Tensor):
gt_attrib = gt_attrib.item()
# dataset must already be using indices, we will forgive this...
# (this is pretty much always the case for torchvision datasets)
assert 0 <= gt_attrib < len(self.class_names), "class name given as out-of-range index"
gt_attrib = self.class_names[gt_attrib]
sample_idxs[gt_attrib].append(sample_idx)
# remember: when using multi-label mode, the sample indices might be duplicated across class groups
return sample_idxs
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C00, C01, C10, C11.
"""
# Convert indices tensor to numpy array
ind = indices.detach().numpy()
C11 = C00 = - (1 - (t[ind])) * (t[ind])
C01 = (t[ind]) * (t[ind])
C10 = (1 - (t[ind])) * (1 - (t[ind]))
return torch.Tensor(C00)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C01)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C10)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C11)[None, None, None, :].permute(3, 0, 1, 2).to(device)
man_mask = get_man_mask(S)
man_loc = get_man_xy_np_coordinate(man_mask)
subgoal_index, subgoal_mask = sample_from_random_subgoal_set(random_subgoals_set) # random g
subgoal_frame = create_mask_frame(base_img,subgoal_mask)
g = single_channel_frame_to_1_84_84(subgoal_frame)
steps = 0
R = 0
episode_rewards = []
if (t > LEARNING_STARTS and t % LEARNING_FREQ == 0):
states, subgoals, actions, rewards, state_primes, intrinsic_dones = \
experience_memory.sample_controller(batch_size=batch_size)
x = np.concatenate((states,subgoals),axis=1)
x = torch.Tensor(x)
xp = np.concatenate((state_primes,subgoals),axis=1)
xp = torch.Tensor(xp)
actions = torch.Tensor(actions).type(dlongtype)
rewards = torch.Tensor(rewards).type(dtype)
intrinsic_dones = torch.Tensor(intrinsic_dones).type(dtype)
if torch.cuda.is_available():
with torch.cuda.device(0):
x = torch.Tensor(x).to(device0).type(dtype)/255
xp = torch.Tensor(xp).to(device0).type(dtype)/255
actions = actions.to(device0)
rewards = rewards.to(device0)
intrinsic_dones = intrinsic_dones.to(device0)
# if torch.cuda.device_count() > 0:
# Qt.to(device0)
# Qt_t = Qt_t.to(device0)