Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _forward(self, xs, ilens, ys=None, olens=None, spembs=None, is_inference=False):
# forward encoder
x_masks = self._source_mask(ilens)
hs, _ = self.encoder(xs, x_masks) # (B, Tmax, adim)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# forward duration predictor and length regulator
d_masks = make_pad_mask(ilens).to(xs.device)
if is_inference:
d_outs = self.duration_predictor.inference(hs, d_masks) # (B, Tmax)
hs = self.length_regulator(hs, d_outs, ilens) # (B, Lmax, adim)
else:
with torch.no_grad():
ds = self.duration_calculator(xs, ilens, ys, olens, spembs) # (B, Tmax)
d_outs = self.duration_predictor(hs, d_masks) # (B, Tmax)
hs = self.length_regulator(hs, ds, ilens) # (B, Lmax, adim)
# forward decoder
if olens is not None:
if self.reduction_factor > 1:
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
olens_in = olens
h_masks = self._source_mask(olens_in)
def __init__(self, n_head, n_feat, dropout_rate):
super(MultiHeadedAttention, self).__init__()
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.attn = None
self.dropout = nn.Dropout(p=dropout_rate)
uttid = 'uttid{}'.format(i)
f[uttid] = x
desire_xs.append(x)
desire_ys.append(np.array([1, 2, 3, 4]))
batch = []
with open(scp, 'r') as f:
for line in f:
uttid, path = line.strip().split()
batch.append((uttid,
{'input': [{'feat': path,
'name': 'input1'}],
'output': [{'tokenid': '1 2 3 4',
'name': 'target1'}]}))
load_inputs_and_targets = LoadInputsAndTargets()
xs, ys = load_inputs_and_targets(batch)
for x, xd in zip(xs, desire_xs):
np.testing.assert_array_equal(x, xd)
for y, yd in zip(ys, desire_ys):
np.testing.assert_array_equal(y, yd)
def convert_batch(batch, backend="pytorch", is_cuda=False, idim=5, odim=5):
ilens = np.array([x[1]['output'][1]['shape'][0] for x in batch])
olens = np.array([x[1]['output'][0]['shape'][0] for x in batch])
xs = [np.random.randint(0, idim, ilen).astype(np.int32) for ilen in ilens]
ys = [np.random.randint(0, odim, olen).astype(np.int32) for olen in olens]
is_pytorch = backend == "pytorch"
if is_pytorch:
xs = pad_list([torch.from_numpy(x).long() for x in xs], idim)
ilens = torch.from_numpy(ilens).long()
ys = pad_list([torch.from_numpy(y).long() for y in ys], -1)
if is_cuda:
xs = xs.cuda()
ilens = ilens.cuda()
ys = ys.cuda()
else:
if is_cuda:
xp = importlib.import_module('cupy')
xs = [chainer.Variable(xp.array(x)) for x in xs]
ys = [chainer.Variable(xp.array(y)) for y in ys]
ilens = xp.array(ilens)
else:
xs = [chainer.Variable(x) for x in xs]
ys = [chainer.Variable(y) for y in ys]
return xs, ilens, ys
def prepare_inputs(idim, odim, ilens, olens, spk_embed_dim=None,
device=torch.device('cpu')):
xs = [np.random.randint(0, idim, l) for l in ilens]
ys = [np.random.randn(l, odim) for l in olens]
ilens = torch.LongTensor(ilens).to(device)
olens = torch.LongTensor(olens).to(device)
xs = pad_list([torch.from_numpy(x).long() for x in xs], 0).to(device)
ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)
labels = ys.new_zeros(ys.size(0), ys.size(1))
for i, l in enumerate(olens):
labels[i, l - 1:] = 1
batch = {
"xs": xs,
"ilens": ilens,
"ys": ys,
"labels": labels,
"olens": olens,
}
if spk_embed_dim is not None:
batch["spembs"] = torch.FloatTensor(np.random.randn(len(ilens), spk_embed_dim)).to(device)
return batch
def prepare_inputs(idim, odim, ilens, olens, spk_embed_dim=None,
device=torch.device('cpu')):
xs = [np.random.randint(0, idim, l) for l in ilens]
ys = [np.random.randn(l, odim) for l in olens]
ilens = torch.LongTensor(ilens).to(device)
olens = torch.LongTensor(olens).to(device)
xs = pad_list([torch.from_numpy(x).long() for x in xs], 0).to(device)
ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)
labels = ys.new_zeros(ys.size(0), ys.size(1))
for i, l in enumerate(olens):
labels[i, l - 1:] = 1
batch = {
"xs": xs,
"ilens": ilens,
"ys": ys,
"labels": labels,
"olens": olens,
}
if spk_embed_dim is not None:
batch["spembs"] = torch.FloatTensor(np.random.randn(len(ilens), spk_embed_dim)).to(device)
return batch
def prepare_inputs(mode, num_encs=2, is_cuda=False):
ilens_list = [[20, 15] for _ in range(num_encs)]
olens = [4, 3]
np.random.seed(1)
assert len(ilens_list[0]) == len(ilens_list[1]) == len(olens)
xs_list = [[np.random.randn(ilen, 40).astype(np.float32) for ilen in ilens] for ilens in ilens_list]
ys = [np.random.randint(1, 5, olen).astype(np.int32) for olen in olens]
ilens_list = [np.array([x.shape[0] for x in xs], dtype=np.int32) for xs in xs_list]
if mode == "pytorch":
ilens_list = [torch.from_numpy(ilens).long() for ilens in ilens_list]
xs_pad_list = [pad_list([torch.from_numpy(x).float() for x in xs], 0) for xs in xs_list]
ys_pad = pad_list([torch.from_numpy(y).long() for y in ys], -1)
if is_cuda:
xs_pad_list = [xs_pad.cuda() for xs_pad in xs_pad_list]
ilens_list = [ilens.cuda() for ilens in ilens_list]
ys_pad = ys_pad.cuda()
return xs_pad_list, ilens_list, ys_pad
else:
raise ValueError("Invalid mode")
def prepare_inputs(mode, num_encs=2, is_cuda=False):
ilens_list = [[20, 15] for _ in range(num_encs)]
olens = [4, 3]
np.random.seed(1)
assert len(ilens_list[0]) == len(ilens_list[1]) == len(olens)
xs_list = [[np.random.randn(ilen, 40).astype(np.float32) for ilen in ilens] for ilens in ilens_list]
ys = [np.random.randint(1, 5, olen).astype(np.int32) for olen in olens]
ilens_list = [np.array([x.shape[0] for x in xs], dtype=np.int32) for xs in xs_list]
if mode == "pytorch":
ilens_list = [torch.from_numpy(ilens).long() for ilens in ilens_list]
xs_pad_list = [pad_list([torch.from_numpy(x).float() for x in xs], 0) for xs in xs_list]
ys_pad = pad_list([torch.from_numpy(y).long() for y in ys], -1)
if is_cuda:
xs_pad_list = [xs_pad.cuda() for xs_pad in xs_pad_list]
ilens_list = [ilens.cuda() for ilens in ilens_list]
ys_pad = ys_pad.cuda()
return xs_pad_list, ilens_list, ys_pad
else:
raise ValueError("Invalid mode")
def convert_batch(batch, backend="pytorch", is_cuda=False, idim=40, odim=5, num_inputs=2):
ilens_list = [np.array([x[1]['input'][idx]['shape'][0] for x in batch]) for idx in range(num_inputs)]
olens = np.array([x[1]['output'][0]['shape'][0] for x in batch])
xs_list = [[np.random.randn(ilen, idim).astype(np.float32) for ilen in ilens_list[idx]] for idx in
range(num_inputs)]
ys = [np.random.randint(1, odim, olen).astype(np.int32) for olen in olens]
is_pytorch = backend == "pytorch"
if is_pytorch:
xs_list = [pad_list([torch.from_numpy(x).float() for x in xs_list[idx]], 0) for idx in range(num_inputs)]
ilens_list = [torch.from_numpy(ilens_list[idx]).long() for idx in range(num_inputs)]
ys = pad_list([torch.from_numpy(y).long() for y in ys], -1)
if is_cuda:
xs_list = [xs_list[idx].cuda() for idx in range(num_inputs)]
ilens_list = [ilens_list[idx].cuda() for idx in range(num_inputs)]
ys = ys.cuda()
return xs_list, ilens_list, ys
def convert_batch(batch, backend="pytorch", is_cuda=False, idim=40, odim=5, num_inputs=2):
ilens_list = [np.array([x[1]['input'][idx]['shape'][0] for x in batch]) for idx in range(num_inputs)]
olens = np.array([x[1]['output'][0]['shape'][0] for x in batch])
xs_list = [[np.random.randn(ilen, idim).astype(np.float32) for ilen in ilens_list[idx]] for idx in
range(num_inputs)]
ys = [np.random.randint(1, odim, olen).astype(np.int32) for olen in olens]
is_pytorch = backend == "pytorch"
if is_pytorch:
xs_list = [pad_list([torch.from_numpy(x).float() for x in xs_list[idx]], 0) for idx in range(num_inputs)]
ilens_list = [torch.from_numpy(ilens_list[idx]).long() for idx in range(num_inputs)]
ys = pad_list([torch.from_numpy(y).long() for y in ys], -1)
if is_cuda:
xs_list = [xs_list[idx].cuda() for idx in range(num_inputs)]
ilens_list = [ilens_list[idx].cuda() for idx in range(num_inputs)]
ys = ys.cuda()
return xs_list, ilens_list, ys