Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if args.word_rnnlm:
rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = rnnlm_args.char_list_dict
char_dict = {x: i for i, x in enumerate(train_args.char_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(
len(word_dict), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
if rnnlm is not None:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.MultiLevelLM(word_rnnlm.predictor,
rnnlm.predictor, word_dict, char_dict))
else:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor,
word_dict, char_dict))
# gpu
if args.ngpu == 1:
gpu_id = range(args.ngpu)
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
# read json data
with open(args.recog_json, 'rb') as f:
js = json.load(f)['utts']
new_js = {}
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
if args.word_rnnlm:
rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = rnnlm_args.char_list_dict
char_dict = {x: i for i, x in enumerate(train_args.char_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(
len(word_dict), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
if rnnlm is not None:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.MultiLevelLM(word_rnnlm.predictor,
rnnlm.predictor, word_dict, char_dict))
else:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor,
word_dict, char_dict))
# gpu
if args.ngpu == 1:
def __init__(self, predictor,
lossfun=F.cross_entropy,
label_key=-1):
if not (isinstance(label_key, (int, str))):
raise TypeError('label_key must be int or str, but is %s' %
type(label_key))
super(ClassifierWithState, self).__init__()
self.lossfun = lossfun
self.y = None
self.loss = None
self.label_key = label_key
self.predictor = predictor
self.reporter = Reporter()
:param Namespace args: The program arguments
"""
set_deterministic_pytorch(args)
# read training config
idim, odim, train_args = get_model_conf(args.model, args.model_conf)
# load trained model parameters
logging.info('reading model parameters from ' + args.model)
model = E2E(idim, odim, train_args)
torch_load(args.model, model)
model.recog_args = args
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
if args.word_rnnlm:
rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = rnnlm_args.char_list_dict
char_dict = {x: i for i, x in enumerate(train_args.char_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(
len(word_dict), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
if args.word_rnnlm:
rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = rnnlm_args.char_list_dict
char_dict = {x: i for i, x in enumerate(train_args.char_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(
len(word_dict), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
if rnnlm is not None:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.MultiLevelLM(word_rnnlm.predictor,
rnnlm.predictor, word_dict, char_dict))
else:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor,
word_dict, char_dict))
# gpu
if args.ngpu == 1:
gpu_id = range(args.ngpu)
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
js = load_json(args.recog_json)
if args.word_rnnlm:
rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = rnnlm_args.char_list_dict
char_dict = {x: i for i, x in enumerate(train_args.char_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(
len(word_dict), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
if rnnlm is not None:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.MultiLevelLM(word_rnnlm.predictor,
rnnlm.predictor, word_dict, char_dict))
else:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor,
word_dict, char_dict))
# gpu
if args.ngpu == 1:
gpu_id = range(args.ngpu)
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
js = load_json(args.recog_json)
new_js = {}
if args.batch_size == 0:
new_js = single_beam_search(model, js, args, train_args, rnnlm)
rnnlm.eval()
else:
rnnlm = None
if args.word_rnnlm:
rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = rnnlm_args.char_list_dict
char_dict = {x: i for i, x in enumerate(train_args.char_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(
len(word_dict), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
if rnnlm is not None:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.MultiLevelLM(word_rnnlm.predictor,
rnnlm.predictor, word_dict, char_dict))
else:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor,
word_dict, char_dict))
# gpu
if args.ngpu == 1:
gpu_id = range(args.ngpu)
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
# read json data
:param Namespace args: The program arguments
"""
set_deterministic_pytorch(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning('cuda is not available')
idim, odim = get_dimensions(args.valid_json)
# specify model architecture
model = E2E(idim, odim, args)
if args.rnnlm is not None:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch.load(args.rnnlm, rnnlm)
model.rnnlm = rnnlm
# write model config
write_conf(args, idim, odim)
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.ngpu)))
logging.info('batch size is automatically increased (%d -> %d)' % (
args.batch_size, args.batch_size * args.ngpu))
args.batch_size *= args.ngpu
def trans(args):
"""Decode with the given args
:param Namespace args: The program arguments
"""
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, MTInterface)
model.recog_args = args
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
# read json data
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
utts = list(valid_json.keys())
idim = int(valid_json[utts[0]]['output'][1]['shape'][1])
odim = int(valid_json[utts[0]]['output'][0]['shape'][1])
logging.info('#input dims : ' + str(idim))
logging.info('#output dims: ' + str(odim))
# specify model architecture
model_class = dynamic_import(args.model_module)
model = model_class(idim, odim, args)
assert isinstance(model, MTInterface)
if args.rnnlm is not None:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch.load(args.rnnlm, rnnlm)
model.rnnlm = rnnlm
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + '/model.json'
with open(model_conf, 'wb') as f:
logging.info('writing a model config file to ' + model_conf)
f.write(json.dumps((idim, odim, vars(args)),
indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
for key in sorted(vars(args).keys()):
logging.info('ARGS: ' + key + ': ' + str(vars(args)[key]))