Validate only on pose result, add visual check
Browse files- convert_to_mixed.py +32 -22
convert_to_mixed.py
CHANGED
@@ -5,6 +5,8 @@ import onnx
|
|
5 |
import onnxruntime as ort
|
6 |
from onnxconverter_common import auto_mixed_precision_model_path
|
7 |
import argparse
|
|
|
|
|
8 |
|
9 |
PROVIDERS=[('TensorrtExecutionProvider', {'trt_fp16_enable':True,}), 'CUDAExecutionProvider', 'CPUExecutionProvider']
|
10 |
|
@@ -30,11 +32,34 @@ def load_and_preprocess_image(image_path, size=(224, 224)):
|
|
30 |
image = np.expand_dims(image, axis=0)
|
31 |
return image
|
32 |
|
33 |
-
def
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
def main(args):
|
40 |
model_input_size = detect_model_input_size(args.source_model_path)
|
@@ -43,28 +68,13 @@ def main(args):
|
|
43 |
auto_mixed_precision_model_path.auto_convert_mixed_precision_model_path(source_model_path=args.source_model_path,
|
44 |
input_feed=input_feed,
|
45 |
target_model_path=args.target_model_path,
|
46 |
-
customized_validate_func=
|
47 |
rtol=args.rtol, atol=args.atol,
|
48 |
provider=PROVIDERS,
|
49 |
keep_io_types=True,
|
50 |
verbose=True)
|
51 |
|
52 |
-
|
53 |
-
converted_result = infer(args.target_model_path, input_feed)
|
54 |
-
|
55 |
-
is_close = np.allclose(original_result[0], converted_result[0], rtol=args.rtol, atol=args.atol)
|
56 |
-
print(f"Validation result: {'Success' if is_close else 'Failure'}")
|
57 |
-
|
58 |
-
print('Compare Joint Coordinates:')
|
59 |
-
original_keypoints, converted_keypoints = original_result[1][0, :, :, :2], converted_result[1][0, :, :, :2]
|
60 |
-
for n_det in range(original_keypoints.shape[0]):
|
61 |
-
print(f'\tDetection {n_det}:')
|
62 |
-
for n_joint in range(original_keypoints.shape[1]):
|
63 |
-
original_x, original_y = original_keypoints[n_det][n_joint]
|
64 |
-
converted_x, converted_y = converted_keypoints[n_det][n_joint]
|
65 |
-
print(f'\t\tJoint {n_joint}:')
|
66 |
-
print(f'\t\t\tX: {original_x:.2f}, {converted_x:.2f}')
|
67 |
-
print(f'\t\t\tY: {original_y:.2f}, {converted_y:.2f}')
|
68 |
|
69 |
if __name__ == "__main__":
|
70 |
parser = argparse.ArgumentParser(description="Convert an ONNX model to mixed precision format.")
|
|
|
5 |
import onnxruntime as ort
|
6 |
from onnxconverter_common import auto_mixed_precision_model_path
|
7 |
import argparse
|
8 |
+
from rtmo_gpu import RTMO_GPU, draw_skeleton
|
9 |
+
import cv2
|
10 |
|
11 |
PROVIDERS=[('TensorrtExecutionProvider', {'trt_fp16_enable':True,}), 'CUDAExecutionProvider', 'CPUExecutionProvider']
|
12 |
|
|
|
32 |
image = np.expand_dims(image, axis=0)
|
33 |
return image
|
34 |
|
35 |
+
def validate_pose(res1, res2):
|
36 |
+
rtmo = RTMO_GPU(args.source_model_path)
|
37 |
+
poses1 = rtmo.postprocess(res1)
|
38 |
+
poses2 = rtmo.postprocess(res2)
|
39 |
+
|
40 |
+
for k1, k2 in zip(poses1, poses2):
|
41 |
+
if not np.allclose(k1, k2, rtol=args.rtol, atol=args.atol):
|
42 |
+
return False
|
43 |
+
return True
|
44 |
+
|
45 |
+
def infer_on_image(onnx_model, model_input_size, test_image_path):
|
46 |
+
body = RTMO_GPU(onnx_model=onnx_model,
|
47 |
+
model_input_size=model_input_size,
|
48 |
+
is_yolo_nas_pose=False)
|
49 |
+
|
50 |
+
frame = cv2.imread(test_image_path)
|
51 |
+
img_show = frame.copy()
|
52 |
+
keypoints, scores = body(img_show)
|
53 |
+
|
54 |
+
img_show = draw_skeleton(img_show,
|
55 |
+
keypoints,
|
56 |
+
scores,
|
57 |
+
kpt_thr=0.3,
|
58 |
+
line_width=2)
|
59 |
+
img_show = cv2.resize(img_show, (788, 525))
|
60 |
+
cv2.imshow(f'{args.target_model_path}', img_show)
|
61 |
+
cv2.waitKey(0)
|
62 |
+
cv2.destroyAllWindows()
|
63 |
|
64 |
def main(args):
|
65 |
model_input_size = detect_model_input_size(args.source_model_path)
|
|
|
68 |
auto_mixed_precision_model_path.auto_convert_mixed_precision_model_path(source_model_path=args.source_model_path,
|
69 |
input_feed=input_feed,
|
70 |
target_model_path=args.target_model_path,
|
71 |
+
customized_validate_func=validate_pose,
|
72 |
rtol=args.rtol, atol=args.atol,
|
73 |
provider=PROVIDERS,
|
74 |
keep_io_types=True,
|
75 |
verbose=True)
|
76 |
|
77 |
+
infer_on_image(args.target_model_path, model_input_size, args.test_image_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
|
79 |
if __name__ == "__main__":
|
80 |
parser = argparse.ArgumentParser(description="Convert an ONNX model to mixed precision format.")
|