Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
else:
dataset = NDG(args.val, args.v_land)
if dataset.have_landmarks:
log.info('Use alignment for the train data')
dataset.transform = t.Compose([Rescale((48, 48)), ToTensor(switch_rb=True)])
else:
exit()
val_loader = DataLoader(dataset, batch_size=args.val_batch_size, num_workers=4, shuffle=False, pin_memory=True)
model = models_landmarks['landnet']()
assert args.snapshot is not None
if args.compr_config:
config = Config.from_json(args.compr_config)
compression_algo = create_compression_algorithm(model, config)
model = compression_algo.model
log.info('Testing snapshot ' + args.snapshot + ' ...')
model = load_model_state(model, args.snapshot, args.device, eval_state=True)
model.eval()
cudnn.benchmark = True
model = torch.nn.DataParallel(model, device_ids=[args.device], )
log.info('Face landmarks model:')
log.info(model)
avg_err, per_point_avg_err, failures_rate = evaluate(val_loader, model)
log.info('Avg RMSE error: {}'.format(avg_err))
log.info('Per landmark RMSE error: {}'.format(per_point_avg_err))
log.info('Failure rate: {}'.format(failures_rate))
def main(argv):
parser = get_common_argument_parser()
arguments = parser.parse_args(args=argv)
config = Config.from_json(arguments.config)
config.update_from_args(arguments, parser)
if config.dist_url == "env://":
config.update_from_env()
if config.mode.lower() != 'test':
if not osp.exists(config.log_dir):
os.makedirs(config.log_dir)
config.log_dir = str(config.log_dir)
configure_paths(config)
print("Save directory:", config.log_dir)
else:
config.log_dir = "/tmp/"
config.execution_mode = get_execution_mode(config)
start_worker(main_worker, config)
def main(argv):
parser = get_argument_parser()
args = parser.parse_args(args=argv)
config = Config.from_json(args.config)
config.update_from_args(args, parser)
if config.dist_url == "env://":
config.update_from_env()
configure_paths(config)
source_root = Path(__file__).absolute().parents[2] # nncf root
create_code_snapshot(source_root, osp.join(config.log_dir, "snapshot.tar.gz"))
if config.seed is not None:
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
config.execution_mode = get_execution_mode(config)
log_path = './logs/{:%Y_%m_%d_%H_%M}_{}'.format(datetime.datetime.now(), args.snap_prefix)
writer = SummaryWriter(log_path)
if not osp.exists(args.snap_folder):
os.mkdir(args.snap_folder)
model = models_backbones[args.model](embedding_size=args.embed_size,
num_classes=dataset.get_num_classes(), feature=False)
set_dropout_fn = model.set_dropout_ratio
compression_algo = None
if args.snap_to_resume is not None:
if args.compr_config:
config = Config.from_json(args.compr_config)
compression_algo = create_compression_algorithm(model, config)
model = compression_algo.model
log.info('Resuming snapshot ' + args.snap_to_resume + ' ...')
model = load_model_state(model, args.snap_to_resume, args.devices[0], eval_state=False)
model = torch.nn.DataParallel(model, device_ids=args.devices)
else:
model = torch.nn.DataParallel(model, device_ids=args.devices, output_device=args.devices[0])
model.cuda()
model.train()
cudnn.benchmark = True
if args.to_onnx is not None:
if args.compr_config:
compression_algo.export_model(args.to_onnx)
else:
parser.add_argument('-pp', '--plugin_dir', type=str, default=None, help='Path to a plugin folder')
parser.add_argument('-c', '--compr_config', help='Path to a file with compression parameters', required=False)
args = parser.parse_args()
if args.engine == 'pt':
assert args.snap is not None, 'To evaluate PyTorch snapshot, please, specify --snap option.'
if args.compr_config:
patch_torch_operators()
with torch.cuda.device(args.devices[0]):
data, embeddings_fun = load_test_dataset(args)
model = models_backbones[args.model](embedding_size=args.embed_size, feature=True)
if args.compr_config:
config = Config.from_json(args.compr_config)
compression_algo = create_compression_algorithm(model, config)
model = compression_algo.model
model = load_model_state(model, args.snap, args.devices[0])
evaluate(args, data, model, embeddings_fun, args.val_batch_size, args.dump_embeddings,
args.roc_fname, args.snap, True, args.show_failed)
if args.compr_config and "sparsity_level" in compression_algo.statistics():
log.info("Sparsity level: {0:.2f}".format(
compression_algo.statistics()['sparsity_rate_for_sparsified_modules']))
else:
from utils.ie_tools import load_ie_model
assert args.fr_model is not None, 'To evaluate IE model, please, specify --fr_model option.'
fr_model = load_ie_model(args.fr_model, 'CPU', args.plugin_dir)
lm_model = None
def main(argv):
parser = get_argument_parser()
args = parser.parse_args(args=argv)
config = Config.from_json(args.config)
config.update_from_args(args, parser)
configure_paths(config)
source_root = Path(__file__).absolute().parents[2] # nncf root
create_code_snapshot(source_root, osp.join(config.log_dir, "snapshot.tar.gz"))
config.execution_mode = get_execution_mode(config)
if config.dataset_dir is not None:
config.train_imgs = config.train_ano = config.test_imgs = config.test_anno = config.dataset_dir
start_worker(main_worker, config)