Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
'pose_mask_r4_0': dataset_utils.int64_feature(pose_mask_r4_0.astype(np.int64).flatten().tolist()),
'pose_mask_r4_1': dataset_utils.int64_feature(pose_mask_r4_1.astype(np.int64).flatten().tolist()),
'pose_mask_r6_0': dataset_utils.int64_feature(pose_mask_r7_0.astype(np.int64).flatten().tolist()),
'pose_mask_r6_1': dataset_utils.int64_feature(pose_mask_r7_1.astype(np.int64).flatten().tolist()),
'seg_0': dataset_utils.int64_feature(seg_0.astype(np.int64).flatten().tolist()),
'seg_1': dataset_utils.int64_feature(seg_1.astype(np.int64).flatten().tolist()),
'shape': dataset_utils.int64_feature(shape_0),
'indices_r4_0': dataset_utils.int64_feature(np.array(indices_r4_0).astype(np.int64).flatten().tolist()),
'values_r4_0': dataset_utils.float_feature(np.array(values_r4_0).astype(np.float).flatten().tolist()),
'indices_r4_1': dataset_utils.int64_feature(np.array(indices_r4_1).astype(np.int64).flatten().tolist()),
'values_r4_1': dataset_utils.float_feature(np.array(values_r4_1).astype(np.float).flatten().tolist()),
'pose_subs_0': dataset_utils.float_feature(pose_subs_0),
'pose_subs_1': dataset_utils.float_feature(pose_subs_1),
'part_bbox_0': dataset_utils.int64_feature(np.array(part_bbox_list_0).astype(np.int64).flatten().tolist()),
'part_bbox_1': dataset_utils.int64_feature(np.array(part_bbox_list_1).astype(np.int64).flatten().tolist()),
'part_vis_0': dataset_utils.int64_feature(np.array(visibility_list_0).astype(np.int64).flatten().tolist()),
'part_vis_1': dataset_utils.int64_feature(np.array(visibility_list_1).astype(np.int64).flatten().tolist()),
}))
return example
ddd_bbox_w_list.append(ddd_bbox_w)
ddd_bbox_l_list.append(ddd_bbox_l)
ddd_bbox_x_list.append(ddd_bbox_x)
ddd_bbox_y_list.append(ddd_bbox_y)
ddd_bbox_z_list.append(ddd_bbox_z)
ddd_bbox_ry_list.append(ddd_bbox_ry)
image_format = b'PNG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': int64_feature(shape[0]),
'image/width': int64_feature(shape[1]),
'image/channels': int64_feature(shape[2]),
'image/shape': int64_feature(shape),
'image/object/bbox/xmin': float_feature(bbox_x1_list),
'image/object/bbox/xmax': float_feature(bbox_x2_list),
'image/object/bbox/ymin': float_feature(bbox_y1_list),
'image/object/bbox/ymax': float_feature(bbox_y2_list),
'image/object/bbox/label': int64_feature(label_list),
'image/object/bbox/label_text': bytes_feature(type_list),
'image/object/bbox/occlusion': int64_feature(occl_list),
'image/object/bbox/truncation': float_feature(trun_list),
'image/object/observation/alpha': float_feature(alpha_list),
'image/format': bytes_feature(image_format),
'image/encoded': bytes_feature(image_data),
'image/object/3Dbbox/h': float_feature(ddd_bbox_h_list),
'image/object/3Dbbox/w': float_feature(ddd_bbox_w_list),
'image/object/3Dbbox/l': float_feature(ddd_bbox_l_list),
'image/object/3Dbbox/x': float_feature(ddd_bbox_x_list),
'image/object/3Dbbox/y': float_feature(ddd_bbox_y_list),
'image/object/3Dbbox/z': float_feature(ddd_bbox_z_list),
'image/object/3Dbbox/ry': float_feature(ddd_bbox_ry_list)
}))
for orgin in oriented_bbox:
assert len(orgin) == 8
[l.append(point) for l, point in zip([x1, x2, x3, x4, y1, y2, y3, y4], orgin)]
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': int64_feature(shape[0]),
'image/width': int64_feature(shape[1]),
'image/channels': int64_feature(shape[2]),
'image/shape': int64_feature(shape),
'image/filename': bytes_feature(filename.encode('utf-8')),
'image/object/bbox/xmin': float_feature(xmin),
'image/object/bbox/xmax': float_feature(xmax),
'image/object/bbox/ymin': float_feature(ymin),
'image/object/bbox/ymax': float_feature(ymax),
'image/object/bbox/x1': float_feature(x1),
'image/object/bbox/y1': float_feature(y1),
'image/object/bbox/x2': float_feature(x2),
'image/object/bbox/y2': float_feature(y2),
'image/object/bbox/x3': float_feature(x3),
'image/object/bbox/y3': float_feature(y3),
'image/object/bbox/x4': float_feature(x4),
'image/object/bbox/y4': float_feature(y4),
'image/object/bbox/label': int64_feature(labels),
'image/object/bbox/label_text': bytes_feature(labels_text),
'image/object/bbox/difficult': int64_feature(difficult),
'image/object/bbox/truncated': int64_feature(truncated),
'image/object/bbox/ignored': int64_feature(ignored),
'image/format': bytes_feature(image_format),
'image/encoded': bytes_feature(image_data)}))
return example
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': int64_feature(shape[0]),
'image/width': int64_feature(shape[1]),
'image/channels': int64_feature(shape[2]),
'image/shape': int64_feature(shape),
'image/object/bbox/xmin': float_feature(xmin),
'image/object/bbox/xmax': float_feature(xmax),
'image/object/bbox/ymin': float_feature(ymin),
'image/object/bbox/ymax': float_feature(ymax),
'image/object/bbox/label': int64_feature(labels),
'image/object/bbox/label_text': bytes_feature(labels_text),
'image/object/bbox/difficult': int64_feature(difficult),
'image/object/bbox/truncated': int64_feature(truncated),
'image/format': bytes_feature(image_format),
'image/encoded': bytes_feature(image_data)}))
return example
'id_1': dataset_utils.int64_feature(id_map[id_1]),
'cam_0': dataset_utils.int64_feature(int(cam_0)),
'cam_1': dataset_utils.int64_feature(int(cam_1)),
'image_format': dataset_utils.bytes_feature('jpg'),
'image_height': dataset_utils.int64_feature(height),
'image_width': dataset_utils.int64_feature(width),
'real_data': dataset_utils.int64_feature(1),
'attrs_0': dataset_utils.int64_feature(attrs_0),
'attrs_1': dataset_utils.int64_feature(attrs_1),
'attrs_w2v25_0': dataset_utils.float_feature(attrs_w2v25_0),
'attrs_w2v25_1': dataset_utils.float_feature(attrs_w2v25_1),
'attrs_w2v50_0': dataset_utils.float_feature(attrs_w2v50_0),
'attrs_w2v50_1': dataset_utils.float_feature(attrs_w2v50_1),
'attrs_w2v100_0': dataset_utils.float_feature(attrs_w2v100_0),
'attrs_w2v100_1': dataset_utils.float_feature(attrs_w2v100_1),
'attrs_w2v150_0': dataset_utils.float_feature(attrs_w2v150_0),
'attrs_w2v150_1': dataset_utils.float_feature(attrs_w2v150_1),
'pose_peaks_0': dataset_utils.float_feature(pose_peaks_0.flatten().tolist()),
'pose_peaks_1': dataset_utils.float_feature(pose_peaks_1.flatten().tolist()),
'pose_peaks_0_rcv': dataset_utils.float_feature(pose_peaks_0_rcv.flatten().tolist()),
'pose_peaks_1_rcv': dataset_utils.float_feature(pose_peaks_1_rcv.flatten().tolist()),
'pose_mask_r4_0': dataset_utils.int64_feature(pose_mask_r4_0.astype(np.int64).flatten().tolist()),
'pose_mask_r4_1': dataset_utils.int64_feature(pose_mask_r4_1.astype(np.int64).flatten().tolist()),
'pose_mask_r6_0': dataset_utils.int64_feature(pose_mask_r7_0.astype(np.int64).flatten().tolist()),
'pose_mask_r6_1': dataset_utils.int64_feature(pose_mask_r7_1.astype(np.int64).flatten().tolist()),
'seg_0': dataset_utils.int64_feature(seg_0.astype(np.int64).flatten().tolist()),
'seg_1': dataset_utils.int64_feature(seg_1.astype(np.int64).flatten().tolist()),
'shape': dataset_utils.int64_feature(shape_0),
'indices_r4_0': dataset_utils.int64_feature(np.array(indices_r4_0).astype(np.int64).flatten().tolist()),
'values_r4_0': dataset_utils.float_feature(np.array(values_r4_0).astype(np.float).flatten().tolist()),
'image/height': int64_feature(shape[0]),
'image/width': int64_feature(shape[1]),
'image/channels': int64_feature(shape[2]),
'image/shape': int64_feature(shape),
'image/filename': bytes_feature(filename.encode('utf-8')),
'image/object/bbox/xmin': float_feature(xmin),
'image/object/bbox/xmax': float_feature(xmax),
'image/object/bbox/ymin': float_feature(ymin),
'image/object/bbox/ymax': float_feature(ymax),
'image/object/bbox/x1': float_feature(x1),
'image/object/bbox/y1': float_feature(y1),
'image/object/bbox/x2': float_feature(x2),
'image/object/bbox/y2': float_feature(y2),
'image/object/bbox/x3': float_feature(x3),
'image/object/bbox/y3': float_feature(y3),
'image/object/bbox/x4': float_feature(x4),
'image/object/bbox/y4': float_feature(y4),
'image/object/bbox/label': int64_feature(labels),
'image/object/bbox/label_text': bytes_feature(labels_text),
'image/object/bbox/difficult': int64_feature(difficult),
'image/object/bbox/truncated': int64_feature(truncated),
'image/object/bbox/ignored': int64_feature(ignored),
'image/format': bytes_feature(image_format),
'image/encoded': bytes_feature(image_data)}))
return example
'image_name_0': dataset_utils.bytes_feature(pairs[idx][0]),
'image_name_1': dataset_utils.bytes_feature(pairs[idx][1]),
'image_raw_0': dataset_utils.bytes_feature(image_raw_0),
'image_raw_1': dataset_utils.bytes_feature(image_raw_1),
'label': dataset_utils.int64_feature(labels[idx]),
'id_0': dataset_utils.int64_feature(id_map[id_0]),
'id_1': dataset_utils.int64_feature(id_map[id_1]),
'cam_0': dataset_utils.int64_feature(int(cam_0)),
'cam_1': dataset_utils.int64_feature(int(cam_1)),
'image_format': dataset_utils.bytes_feature('jpg'),
'image_height': dataset_utils.int64_feature(height),
'image_width': dataset_utils.int64_feature(width),
'real_data': dataset_utils.int64_feature(1),
'attrs_0': dataset_utils.int64_feature(attrs_0),
'attrs_1': dataset_utils.int64_feature(attrs_1),
'attrs_w2v25_0': dataset_utils.float_feature(attrs_w2v25_0),
'attrs_w2v25_1': dataset_utils.float_feature(attrs_w2v25_1),
'attrs_w2v50_0': dataset_utils.float_feature(attrs_w2v50_0),
'attrs_w2v50_1': dataset_utils.float_feature(attrs_w2v50_1),
'attrs_w2v100_0': dataset_utils.float_feature(attrs_w2v100_0),
'attrs_w2v100_1': dataset_utils.float_feature(attrs_w2v100_1),
'attrs_w2v150_0': dataset_utils.float_feature(attrs_w2v150_0),
'attrs_w2v150_1': dataset_utils.float_feature(attrs_w2v150_1),
'pose_peaks_0': dataset_utils.float_feature(pose_peaks_0.flatten().tolist()),
'pose_peaks_1': dataset_utils.float_feature(pose_peaks_1.flatten().tolist()),
'pose_peaks_0_rcv': dataset_utils.float_feature(pose_peaks_0_rcv.flatten().tolist()),
'pose_peaks_1_rcv': dataset_utils.float_feature(pose_peaks_1_rcv.flatten().tolist()),
'pose_mask_r4_0': dataset_utils.int64_feature(pose_mask_r4_0.astype(np.int64).flatten().tolist()),
'pose_mask_r4_1': dataset_utils.int64_feature(pose_mask_r4_1.astype(np.int64).flatten().tolist()),
'pose_mask_r6_0': dataset_utils.int64_feature(pose_mask_r7_0.astype(np.int64).flatten().tolist()),
'pose_mask_r6_1': dataset_utils.int64_feature(pose_mask_r7_1.astype(np.int64).flatten().tolist()),
'seg_0': dataset_utils.int64_feature(seg_0.astype(np.int64).flatten().tolist()),