Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
exe.run(startup_prog)
if pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.io.load_vars(exe, pretrained_model, main_program=train_prog,
predicate=if_exist)
if parallel:
loss.persistable = True
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = True
train_exe = fluid.ParallelExecutor(main_program=train_prog,
use_cuda=use_gpu, loss_name=loss.name, build_strategy=build_strategy)
test_reader = reader.test(data_args, val_file_list, batch_size)
test_py_reader.decorate_paddle_reader(test_reader)
def save_model(postfix, main_prog):
model_path = os.path.join(model_save_dir, postfix)
if os.path.isdir(model_path):
shutil.rmtree(model_path)
print('save models to %s' % (model_path))
fluid.io.save_persistables(exe, model_path, main_program=main_prog)
best_map = 0.
test_map = None
def test(epoc_id, best_map):
_, accum_map = map_eval.get_map_var()
map_eval.reset(exe)
every_epoc_map=[] # for CE
test_py_reader.start()
# declare vars
image = fluid.layers.data(name=IMG_NAME, shape=image_shape, dtype='float32')
logits = model.net(input=image, class_dim=class_dim)
# clone program and graph for inference
infer_program = fluid.default_main_program().clone(for_test=True)
image.stop_gradient = False
label = fluid.layers.data(name=LABEL_NAME, shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=logits, label=label)
avg_cost = fluid.layers.mean(x=cost)
BATCH_SIZE = 1
test_reader = paddle.batch(
reader.test(TEST_LIST, DATA_PATH), batch_size=BATCH_SIZE)
# setup run environment
enable_gpu = use_cuda and args.use_gpu
place = fluid.CUDAPlace(0) if enable_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# advbox demo
m = PaddleModel(
fluid.default_main_program(),
IMG_NAME,
LABEL_NAME,
logits.name,
avg_cost.name,
(0, 1),
channel_axis=3)
# Adversarial method: CW
def infer(infer_program, image, logits, place, exe):
print("--------------------inference-------------------")
test_batch_size = 1
test_reader = paddle.batch(reader.test(TEST_LIST, DATA_PATH), batch_size=test_batch_size)
feeder = fluid.DataFeeder(place=place, feed_list=[image])
fetch_list = [logits.name]
label_res = {}
for batch_id, data in enumerate(test_reader()):
data_img = data[0][0]
filename = data[0][1]
result = exe.run(infer_program,
fetch_list=fetch_list,
feed=feeder.feed([data_img]))
#print(result)
result = result[0][0]
pred_label = np.argmax(result)
print("Test-{0}-score: {1}, class {2}, name={3}"
.format(batch_id, result[pred_label], pred_label, filename))
num_classes, image_shape)
nmsed_out = fluid.layers.detection_output(
locs, confs, box, box_var, nms_threshold=args.nms_threshold)
loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box, box_var)
loss = fluid.layers.reduce_sum(loss)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# yapf: disable
if model_dir:
def if_exist(var):
return os.path.exists(os.path.join(model_dir, var.name))
fluid.io.load_vars(exe, model_dir, predicate=if_exist)
# yapf: enable
test_reader = reader.test(data_args, test_list, batch_size)
feeder = fluid.DataFeeder(
place=place,
feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
def get_dt_res(nmsed_out_v, data):
dts_res = []
lod = nmsed_out_v[0].lod()[0]
nmsed_out_v = np.array(nmsed_out_v[0])
real_batch_size = min(batch_size, len(data))
assert (len(lod) == real_batch_size + 1), \
"Error Lod Tensor offset dimension. Lod({}) vs. batch_size({})".format(len(lod), batch_size)
k = 0
for i in range(real_batch_size):
dt_num_this_img = lod[i + 1] - lod[i]
image_id = int(data[i][4][0])
image_width = int(data[i][4][1])
fluid.memory_optimize(fluid.default_main_program())
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
def is_parameter(var):
if isinstance(var, Parameter):
return isinstance(var, Parameter)
if test_model is not None:
vars = filter(is_parameter, inference_program.list_vars())
fluid.io.load_vars(exe, test_model, vars=vars)
# reader
test_reader = paddle.batch(reader.test(seg_num), batch_size=batch_size / 16)
feeder = fluid.DataFeeder(place=place, feed_list=[image, label])
fetch_list = [avg_cost.name, acc_top1.name, acc_top5.name]
# test
cnt = 0
pass_id = 0
test_info = [[], [], []]
for batch_id, data in enumerate(test_reader()):
t1 = time.time()
loss, acc1, acc5 = exe.run(inference_program,
fetch_list=fetch_list,
feed=feeder.feed(data))
t2 = time.time()
period = t2 - t1
loss = np.mean(loss)
fluid.io.load_persistables(exe, pretrained_model)
if save_inference:
fluid.io.save_inference_model(
dirname=model_name,
feeded_var_names=['image'],
main_program=test_program,
target_vars=out,
executor=exe,
model_filename='model',
params_filename='params')
print("model: ",model_name," is already saved")
exit(0)
test_batch_size = 1
img_size = image_shape[1]
test_reader = paddle.batch(reader.test(args, img_size), batch_size=test_batch_size)
feeder = fluid.DataFeeder(place=place, feed_list=[image])
TOPK = 1
for batch_id, data in enumerate(test_reader()):
result = exe.run(test_program,
fetch_list=fetch_list,
feed=feeder.feed(data))
result = result[0][0]
pred_label = np.argsort(result)[::-1][:TOPK]
print("Test-{0}-score: {1}, class {2}"
.format(batch_id, result[pred_label], pred_label))
sys.stdout.flush()
if pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.io.load_vars(
exe, pretrained_model, main_program=train_prog, predicate=if_exist)
devicenum = get_gpu_num()
assert (args.train_batch_size % devicenum) == 0
train_batch_size = args.train_batch_size / devicenum
test_batch_size = args.test_batch_size
train_reader = paddle.batch(reader.train(args), batch_size=train_batch_size, drop_last=True)
test_reader = paddle.batch(reader.test(args), batch_size=test_batch_size, drop_last=False)
test_feeder = fluid.DataFeeder(place=place, feed_list=[image, label])
train_py_reader.decorate_paddle_reader(train_reader)
train_exe = fluid.ParallelExecutor(
main_program=train_prog,
use_cuda=args.use_gpu,
loss_name=train_cost.name)
totalruntime = 0
train_py_reader.start()
iter_no = 0
train_info = [0, 0, 0]
while iter_no <= args.total_iter_num:
t1 = time.time()
lr, loss, feas, label = train_exe.run(fetch_list=train_fetch_list)
t2 = time.time()
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.io.load_vars(exe, pretrained_model, main_program=train_prog,
predicate=if_exist)
if parallel:
train_exe = fluid.ParallelExecutor(main_program=train_prog,
use_cuda=use_gpu, loss_name=loss.name)
train_reader = reader.train(data_args,
train_file_list,
batch_size_per_device,
shuffle=is_shuffle,
use_multiprocessing=True,
num_workers=num_workers,
max_queue=24,
enable_ce=enable_ce)
test_reader = reader.test(data_args, val_file_list, batch_size)
train_py_reader.decorate_paddle_reader(train_reader)
test_py_reader.decorate_paddle_reader(test_reader)
def save_model(postfix, main_prog):
model_path = os.path.join(model_save_dir, postfix)
if os.path.isdir(model_path):
shutil.rmtree(model_path)
print('save models to %s' % (model_path))
fluid.io.save_persistables(exe, model_path, main_program=main_prog)
best_map = 0.
def test(epoc_id, best_map):
_, accum_map = map_eval.get_map_var()
map_eval.reset(exe)
every_epoc_map=[]
test_py_reader.start()