federico commited on
Commit
42180f1
β€’
1 Parent(s): 20f7ff3

Complete refqctoring using Blocks and making a nicer web page

Browse files
Files changed (3) hide show
  1. app.py +157 -48
  2. utils/img_util.py +32 -11
  3. utils/labels.py +14 -2
app.py CHANGED
@@ -12,7 +12,60 @@ from ai.detection import detect
12
  from laeo_per_frame.interaction_per_frame_uncertainty import LAEO_computation
13
  from utils.hpe import hpe, project_ypr_in2d
14
  from utils.img_util import resize_preserving_ar, percentage_to_pixel, draw_key_points_pose, \
15
- visualize_vector
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
 
18
  def load_image(camera, ):
@@ -25,20 +78,26 @@ def load_image(camera, ):
25
  return False, None
26
 
27
 
28
- def demo_play(img, laeo=True, rgb=False, show_keypoints=True, only_face=False):
29
  # webcam in use
30
 
31
  # gpus = tf.config.list_physical_devices('GPU')
32
 
33
  # img = np.array(frame)
34
- if not rgb:
35
- img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
36
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
37
 
38
  img_resized, new_old_shape = resize_preserving_ar(img, input_shape_od_model)
39
 
 
 
 
 
 
 
 
 
40
  print('inference centernet')
41
- detections, elapsed_time = detect(model, img_resized, min_score_thresh,
42
  new_old_shape) # detection classes boxes scores
43
  # probably to draw on resized
44
  # img_with_detections = draw_detections(img_resized, detections, max_boxes_to_draw, None, None, None)
@@ -61,16 +120,19 @@ def demo_play(img, laeo=True, rgb=False, show_keypoints=True, only_face=False):
61
  # size=50)
62
 
63
  people_list.append({'yaw' : yaw[0].numpy()[0],
64
- 'yaw_u' : 0,
65
  'pitch' : pitch[0].numpy()[0],
66
- 'pitch_u' : 0,
67
  'roll' : roll[0].numpy()[0],
68
- 'roll_u' : 0,
69
  'center_xy': [tdx, tdy]
70
  })
71
 
 
72
  if show_keypoints:
 
73
  for i in range(len(det)):
 
74
  img = draw_key_points_pose(img, kpt[i], only_face=only_face)
75
 
76
  # call LAEO
@@ -83,15 +145,34 @@ def demo_play(img, laeo=True, rgb=False, show_keypoints=True, only_face=False):
83
  interaction_matrix = np.zeros((len(people_list), len(people_list)))
84
  # coloured arrow print per person
85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  for index, person in enumerate(people_list):
87
  green = round((max(interaction_matrix[index, :])) * 255)
88
  colour = (0, green, 0)
89
  if green < 40:
90
  colour = (255, 0, 0)
91
- vector = project_ypr_in2d(person['yaw'], person['pitch'], person['roll'])
92
- img = visualize_vector(img, person['center_xy'], vector, title="",
93
- color=colour)
94
- return img
 
 
 
95
 
96
 
97
  if __name__=='__main__':
@@ -114,7 +195,7 @@ if __name__=='__main__':
114
 
115
  input_shape_od_model = (512, 512)
116
  # params
117
- min_score_thresh, max_boxes_to_draw, min_distance = .45, 50, 1.5
118
 
119
  print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
120
 
@@ -123,37 +204,65 @@ if __name__=='__main__':
123
  live = True
124
  title = "Head Pose Estimation and LAEO"
125
 
126
- demo_webcam = gr.Interface(
127
- fn=function_to_call,
128
- inputs=[gr.Image(source="webcam"), # with no streaming-> acquire images
129
- gr.Checkbox(value=True, label="LAEO", info="Compute and display LAEO"),
130
- gr.Checkbox(value=False, label="rgb", info="Display output on W/B image"),
131
- gr.Checkbox(value=True, label="show_keypoints", info="Display keypoints on image"),
132
- gr.Checkbox(value=True, label="show_keypoints_only_face",
133
- info="Display only face keypoints on image"),
134
- ],
135
- outputs="image",
136
- live=live,
137
- title=title,
138
- description="This is a demo developed by Federico Figari T. at MaLGa Lab, University of Genoa, Italy. You can choose to have only the Head Pose Estimation or also the LAEO computation (more than 1 person should be in the image). You need to take a picture and the algorithm will calculate the Head Pose and will be showed as an arrow on your face. LAEO, instead is showed colouring the arrow in green.",
139
- )
140
-
141
- demo_upload = gr.Interface(
142
- fn=function_to_call,
143
- inputs=[gr.Image(source="upload", ), # with no streaming-> acquire images
144
- gr.Checkbox(value=True, label="LAEO", info="Compute and display LAEO"),
145
- gr.Checkbox(value=False, label="rgb", info="Display output on W/B image"),
146
- gr.Checkbox(value=True, label="show_keypoints", info="Display keypoints on image"),
147
- gr.Checkbox(value=True, label="show_keypoints_only_face",
148
- info="Display only face keypoints on image"),
149
- ],
150
- outputs=gr.Image(height=238, width=585), # shape=gr.Image(source="upload", ).shape
151
- live=live,
152
- title=title,
153
- description="This is a demo developed by Federico Figari T. at MaLGa Lab, University of Genoa, Italy. You can choose to have only the Head Pose Estimation or also the LAEO computation (more than 1 person should be in the image). You need to upload an image and the algorithm will calculate the Head Pose and will be showed as an arrow on your face. LAEO, instead is showed colouring the arrow in green.",
154
- examples=[["LAEO_demo_data/examples/1.jpg"], ["LAEO_demo_data/examples/20.jpg"]]
155
- )
156
-
157
- demo_tabbed = gr.TabbedInterface([demo_webcam, demo_upload], ["Demo from webcam", "Demo from upload"])
158
-
159
- demo_tabbed.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  from laeo_per_frame.interaction_per_frame_uncertainty import LAEO_computation
13
  from utils.hpe import hpe, project_ypr_in2d
14
  from utils.img_util import resize_preserving_ar, percentage_to_pixel, draw_key_points_pose, \
15
+ visualize_vector, draw_axis, draw_axis_3d, draw_cones
16
+
17
+ # <a href="https://malga.unige.it/" target="_blank"><nobr>Lab MaLGa UniGe</nobr></a>
18
+ WEBSITE = """
19
+ <div class="embed_hidden">
20
+ <h1 style='text-align: center'>Head Pose Estimation and LAEO computation </h1>
21
+ <h2 style='text-align: center'>
22
+ <a target="_blank" href="https://github.com/Malga-Vision/LAEO_demo"> <nobr> Code for LAEO </nobr></a>
23
+ <br>
24
+ <a target="_blank" href="https://github.com/Malga-Vision/HHP-Net/tree/master"> <nobr> Code for HPE </nobr></a>
25
+
26
+ </h2>
27
+ <h2 style='text-align: center'>
28
+ <nobr><a href="https://github.com/Malga-Vision" target="_blank"><nobr>MaLGa Vision GitHub</nobr></a> &emsp;</nobr>
29
+ </h2>
30
+
31
+ <h3 style="text-align:center;">
32
+ <a href="https://fede1995.github.io/" target="_blank"><nobr>Federico FT</nobr></a> &emsp;
33
+ </h3>
34
+
35
+ <h2> Description </h2>
36
+ <p>
37
+ This space illustrates a method for Head Pose Estimation and also LAEO detection. The code is based on experiments and research carried out at the UNiversity of Genoa (Italy) in the MaLGa Laboratory.
38
+ This demo has been set up by Federico Figari Tomenotti.
39
+ DISCLAIMER: does not work properly on smartphones and sometimes on Safari web browser.
40
+ </p>
41
+ <h2> Usage </h2>
42
+ <p>
43
+ The flags allow the user to choose what to display on the result image, and to change the sensitivity for the person detection algorithm.
44
+ The Head Pose orientation can be described only as one vector (arrow) or a triplet of angles: yaw, pitch and roll projected on the image plane.
45
+ The uncertainty result is the mean of the uncertainty compute on the three angles.
46
+ The run botton is needed to run the demo on an image after changing flag settings.
47
+ For every detailed explanation on the algorithms refer to the paper which will be out soon.
48
+ </p>
49
+
50
+ </div>
51
+ """
52
+
53
+ WEBSITE_citation = """
54
+ <h2 style='text-align: center'>
55
+ Citation
56
+ </h2>
57
+
58
+ If you find this code useful for your research, please use the following BibTeX entry.
59
+
60
+ ```
61
+ @inproceedings{cantarini2022hhp,
62
+ title={HHP-Net: A light Heteroscedastic neural network for Head Pose estimation with uncertainty},
63
+ author={Cantarini, Giorgio and Tomenotti, Federico Figari and Noceti, Nicoletta and Odone, Francesca},
64
+ booktitle={Proceedings of the IEEE/CVF Winter Conference on applications of computer vision},
65
+ pages={3521--3530},
66
+ year={2022}
67
+ }
68
+ ```"""
69
 
70
 
71
  def load_image(camera, ):
 
78
  return False, None
79
 
80
 
81
+ def demo_play(img, laeo=True, rgb=False, show_keypoints=True, only_face=False, Head_Pose_representation='Vector', detection_threshold=0.45):
82
  # webcam in use
83
 
84
  # gpus = tf.config.list_physical_devices('GPU')
85
 
86
  # img = np.array(frame)
87
+
 
 
88
 
89
  img_resized, new_old_shape = resize_preserving_ar(img, input_shape_od_model)
90
 
91
+ if not rgb:
92
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # covert at grey scale
93
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # it is still grey scale but with 3 channels to add the colours of the points and lines
94
+ # img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
95
+ else: # if RGB
96
+ # img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
97
+ pass
98
+
99
  print('inference centernet')
100
+ detections, elapsed_time = detect(model, img_resized, detection_threshold,
101
  new_old_shape) # detection classes boxes scores
102
  # probably to draw on resized
103
  # img_with_detections = draw_detections(img_resized, detections, max_boxes_to_draw, None, None, None)
 
120
  # size=50)
121
 
122
  people_list.append({'yaw' : yaw[0].numpy()[0],
123
+ 'yaw_u' : yaw[0].numpy()[1],
124
  'pitch' : pitch[0].numpy()[0],
125
+ 'pitch_u' : pitch[0].numpy()[1],
126
  'roll' : roll[0].numpy()[0],
127
+ 'roll_u' : roll[0].numpy()[1],
128
  'center_xy': [tdx, tdy]
129
  })
130
 
131
+
132
  if show_keypoints:
133
+ # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
134
  for i in range(len(det)):
135
+ # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
136
  img = draw_key_points_pose(img, kpt[i], only_face=only_face)
137
 
138
  # call LAEO
 
145
  interaction_matrix = np.zeros((len(people_list), len(people_list)))
146
  # coloured arrow print per person
147
 
148
+ print(f'Head pose representation: {Head_Pose_representation}')
149
+ def visualise_hpe(yaw, pitch, roll, image=None, tdx=None, tdy=None, size=50, yaw_uncertainty=-1, pitch_uncertainty=-1, roll_uncertainty=-1, openpose=False, title="", color=(255, 0, 0)):
150
+ if str(Head_Pose_representation).lower() == 'vector':
151
+ vector = project_ypr_in2d(person['yaw'], person['pitch'], person['roll'])
152
+ image = visualize_vector(image, [tdx, tdy], vector, title=title, color=color)
153
+ return image
154
+ elif str(Head_Pose_representation).lower() == 'axis':
155
+ image = draw_axis_3d(yaw, pitch, roll, image=image, tdx=tdx, tdy=tdy, size=size)
156
+ return image
157
+ elif str(Head_Pose_representation).lower() == 'cone':
158
+ _, image = draw_cones(yaw, pitch, roll, unc_yaw=yaw_uncertainty, unc_pitch=pitch_uncertainty, unc_roll=roll_uncertainty, image=image, tdx=tdx, tdy=tdy, size=size)
159
+ return image
160
+ else:
161
+ return image
162
+
163
+
164
  for index, person in enumerate(people_list):
165
  green = round((max(interaction_matrix[index, :])) * 255)
166
  colour = (0, green, 0)
167
  if green < 40:
168
  colour = (255, 0, 0)
169
+ img = visualise_hpe(person['yaw'], person['pitch'], person['roll'], image=img, tdx=person['center_xy'][0], tdy=person['center_xy'][1], size=50, yaw_uncertainty=person['yaw_u'], pitch_uncertainty=person['pitch_u'], roll_uncertainty=person['roll_u'], title="", color=colour)
170
+ # vector = project_ypr_in2d(person['yaw'], person['pitch'], person['roll'])
171
+ # img = visualize_vector(img, person['center_xy'], vector, title="",
172
+ # color=colour)
173
+ uncertainty_mean = [i['yaw_u'] + i['pitch_u'] + i['roll_u'] for i in people_list]
174
+ uncertainty_mean_str = ''.join([str(round(i, 2)) + ' ' for i in uncertainty_mean])
175
+ return img, uncertainty_mean_str
176
 
177
 
178
  if __name__=='__main__':
 
195
 
196
  input_shape_od_model = (512, 512)
197
  # params
198
+ min_score_thresh, max_boxes_to_draw, min_distance = .25, 50, 1.5
199
 
200
  print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
201
 
 
204
  live = True
205
  title = "Head Pose Estimation and LAEO"
206
 
207
+ print(os.getcwd())
208
+
209
+ with gr.Blocks() as demo:
210
+ gr.Markdown(WEBSITE)
211
+ with gr.Tab("demo_webcam"):
212
+ with gr.Row():
213
+ with gr.Column():
214
+ input_img = gr.Image(label="Input Image", source="webcam")
215
+ laeo = gr.Checkbox(value=True, label="LAEO", info="Compute and display LAEO")
216
+ rgb = gr.Checkbox(value=False, label="rgb", info="Display output on W/B image")
217
+ show_keypoints = gr.Checkbox(value=True, label="show_keypoints", info="Display keypoints on image")
218
+ show_keypoints_only_face = gr.Checkbox(value=True, label="show_keypoints_only_face",
219
+ info="Display only face keypoints on image")
220
+ Head_Pose_representation = gr.Radio(["Vector", "Axis", "None"], label="Head_Pose_representation",
221
+ info="Which representation to show", value="Vector")
222
+ detection_threshold = gr.Slider(0.01, 1, value=0.45, step=0.01, interactive=True,
223
+ label="detection_threshold", info="Choose in [0, 1]")
224
+
225
+ button = gr.Button(label="RUN", type="default")
226
+ with gr.Column():
227
+ outputs = gr.Image(label="Output Image", shape=(512, 512))
228
+ uncert = gr.Label(label="Uncertainty", value="0.0")
229
+
230
+ input_img.change(function_to_call, inputs=[input_img, laeo, rgb, show_keypoints, show_keypoints_only_face,
231
+ Head_Pose_representation, detection_threshold], outputs=[outputs, uncert])
232
+ button.click(function_to_call, inputs=[input_img, laeo, rgb, show_keypoints, show_keypoints_only_face,
233
+ Head_Pose_representation, detection_threshold], outputs=[outputs, uncert])
234
+
235
+ with gr.Tab("demo_upload"):
236
+ with gr.Row():
237
+ with gr.Column():
238
+ input_img = gr.Image(label="Input Image", source="upload")
239
+ laeo = gr.Checkbox(value=True, label="LAEO", info="Compute and display LAEO")
240
+ rgb = gr.Checkbox(value=False, label="rgb", info="Display output on W/B image")
241
+ show_keypoints = gr.Checkbox(value=True, label="show_keypoints", info="Display keypoints on image")
242
+ show_keypoints_only_face = gr.Checkbox(value=True, label="show_keypoints_only_face",
243
+ info="Display only face keypoints on image")
244
+ Head_Pose_representation = gr.Radio(["Vector", "Axis", "None"],
245
+ label="Head_Pose_representation",
246
+ info="Which representation to show", value="Vector")
247
+ detection_threshold = gr.Slider(0.01, 1, value=0.45, step=0.01, interactive=True,
248
+ label="detection_threshold", info="Choose in [0, 1]")
249
+
250
+ button = gr.Button(label="RUN", type="default")
251
+ with gr.Column():
252
+ outputs = gr.Image(height=238, width=585, label="Output Image")
253
+ uncert = gr.Label(label="Uncertainty", value="0.0")
254
+ examples_text =gr.Markdown("## Image Examples")
255
+ examples = gr.Examples([["LAEO_demo_data/examples/1.jpg"], ["LAEO_demo_data/examples/300wlp_0.png"],
256
+ ["LAEO_demo_data/examples/AWFL_2.jpg"],
257
+ ["LAEO_demo_data/examples/BIWI_3.png"]], inputs=input_img,) # add all other flags
258
+
259
+ input_img.change(function_to_call, inputs=[input_img, laeo, rgb, show_keypoints, show_keypoints_only_face,
260
+ Head_Pose_representation, detection_threshold],
261
+ outputs=[outputs, uncert])
262
+ button.click(function_to_call, inputs=[input_img, laeo, rgb, show_keypoints, show_keypoints_only_face,
263
+ Head_Pose_representation, detection_threshold],
264
+ outputs=[outputs, uncert])
265
+
266
+ gr.Markdown(WEBSITE_citation)
267
+
268
+ demo.launch()
utils/img_util.py CHANGED
@@ -3,7 +3,11 @@ import os
3
  import json
4
  import numpy as np
5
  from math import cos, sin, pi
6
- from utils.labels import coco_category_index, rgb_colors, color_pose, color_pose_normalized, pose_id_part, face_category_index, body_parts_openpose, body_parts, face_points, face_points_openpose, pose_id_part_zedcam, face_points_zedcam, body_parts_zedcam
 
 
 
 
7
  # from src.utils.my_utils import fit_plane_least_square # , retrieve_line_from_two_points
8
 
9
 
@@ -294,6 +298,8 @@ def draw_key_points_pose(image, kpt, openpose=False, only_face=False):
294
  :img (numpy.ndarray): The image with the drawings of lines and key points
295
  """
296
 
 
 
297
  thickness = max (image.shape[0] // 100, image.shape[1] // 100)
298
  if thickness == 0:
299
  thickness = 1
@@ -311,17 +317,17 @@ def draw_key_points_pose(image, kpt, openpose=False, only_face=False):
311
 
312
  for j in range(len(kpt)):
313
  # 0 nose, 1/2 left/right eye, 3/4 left/right ear
314
- color = color_pose["blue"]
315
  if j == face_pts[0]:
316
- color = color_pose["purple"]# naso
317
  if j == face_pts[1]:
318
- color = color_pose["green"]#["light_pink"]#Leye
319
  if j == face_pts[2]:
320
- color = color_pose["dark_pink"]#Reye
321
  if j == face_pts[3]:
322
- color = color_pose["light_orange"]#LEar
323
  if j == face_pts[4]:
324
- color = color_pose["yellow"]# REar
325
  if only_face and j in face_pts:
326
  if openpose:
327
  cv2.circle(image, (int(kpt[j][0]), int(kpt[j][1])), 1, color, thickness=thickness)
@@ -652,7 +658,7 @@ def draw_cones(yaw, pitch, roll, unc_yaw, unc_pitch, unc_roll, image=None, tdx=N
652
  list_projection_xy = [sin(yaw), -cos(yaw) * sin(pitch)]
653
  return list_projection_xy, image
654
 
655
- def draw_axis_3d(yaw, pitch, roll, image=None, tdx=None, tdy=None, size=50, yaw_uncertainty=-1, pitch_uncertainty=-1, roll_uncertainty=-1):
656
  """
657
  Draw yaw pitch and roll axis on the image if passed as input and returns the vector containing the projection of the vector on the image plane
658
  Args:
@@ -670,6 +676,11 @@ def draw_axis_3d(yaw, pitch, roll, image=None, tdx=None, tdy=None, size=50, yaw_
670
  Returns:
671
  :list_projection_xy (list): list containing the unit vector [x, y, z]
672
  """
 
 
 
 
 
673
  pitch = pitch * np.pi / 180
674
  yaw = -(yaw * np.pi / 180)
675
  roll = roll * np.pi / 180
@@ -692,8 +703,18 @@ def draw_axis_3d(yaw, pitch, roll, image=None, tdx=None, tdy=None, size=50, yaw_
692
  x3 = size * (sin(yaw)) + tdx
693
  y3 = size * (-cos(yaw) * sin(pitch)) + tdy
694
  z3 = size * (cos(pitch) * cos(yaw)) + tdy
 
 
 
 
 
 
 
 
 
 
695
  if image is not None:
696
- cv2.line(image, (int(tdx), int(tdy)), (int(x1), int(y1)), (0, 0, 255), 2)
697
- cv2.line(image, (int(tdx), int(tdy)), (int(x2), int(y2)), (0, 255, 0), 2)
698
- cv2.line(image, (int(tdx), int(tdy)), (int(x3), int(y3)), (255, 0, 0), 2)
699
  return image
 
3
  import json
4
  import numpy as np
5
  from math import cos, sin, pi
6
+ from utils.labels import coco_category_index, rgb_colors, color_pose, color_pose_normalized, pose_id_part, \
7
+ face_category_index, body_parts_openpose, body_parts, face_points, face_points_openpose, pose_id_part_zedcam, \
8
+ face_points_zedcam, body_parts_zedcam, color_pose_rgb
9
+
10
+
11
  # from src.utils.my_utils import fit_plane_least_square # , retrieve_line_from_two_points
12
 
13
 
 
298
  :img (numpy.ndarray): The image with the drawings of lines and key points
299
  """
300
 
301
+
302
+
303
  thickness = max (image.shape[0] // 100, image.shape[1] // 100)
304
  if thickness == 0:
305
  thickness = 1
 
317
 
318
  for j in range(len(kpt)):
319
  # 0 nose, 1/2 left/right eye, 3/4 left/right ear
320
+ color = color_pose_rgb["blue"]
321
  if j == face_pts[0]:
322
+ color = color_pose_rgb["purple"]# naso
323
  if j == face_pts[1]:
324
+ color = color_pose_rgb["green"]#["light_pink"]#Leye
325
  if j == face_pts[2]:
326
+ color = color_pose_rgb["dark_pink"]#Reye
327
  if j == face_pts[3]:
328
+ color = color_pose_rgb["light_orange"]#LEar
329
  if j == face_pts[4]:
330
+ color = color_pose_rgb["yellow"]# REar
331
  if only_face and j in face_pts:
332
  if openpose:
333
  cv2.circle(image, (int(kpt[j][0]), int(kpt[j][1])), 1, color, thickness=thickness)
 
658
  list_projection_xy = [sin(yaw), -cos(yaw) * sin(pitch)]
659
  return list_projection_xy, image
660
 
661
+ def draw_axis_3d(yaw, pitch, roll, image=None, tdx=None, tdy=None, size=50, yaw_uncertainty=-1, pitch_uncertainty=-1, roll_uncertainty=-1, openpose=False):
662
  """
663
  Draw yaw pitch and roll axis on the image if passed as input and returns the vector containing the projection of the vector on the image plane
664
  Args:
 
676
  Returns:
677
  :list_projection_xy (list): list containing the unit vector [x, y, z]
678
  """
679
+ if openpose:
680
+ temp= tdy
681
+ tdy = tdx
682
+ tdx = tdy
683
+
684
  pitch = pitch * np.pi / 180
685
  yaw = -(yaw * np.pi / 180)
686
  roll = roll * np.pi / 180
 
703
  x3 = size * (sin(yaw)) + tdx
704
  y3 = size * (-cos(yaw) * sin(pitch)) + tdy
705
  z3 = size * (cos(pitch) * cos(yaw)) + tdy
706
+
707
+ red_rgb = (255, 0, 0)
708
+ green_rgb = (0, 255, 0)
709
+ blue_rgb = (0, 0, 255)
710
+
711
+ red_bgr = (0, 0, 255)
712
+ green_bgr = (0, 255, 0)
713
+ blue_bgr = (255, 0, 0)
714
+
715
+
716
  if image is not None:
717
+ cv2.line(image, (int(tdx), int(tdy)), (int(x1), int(y1)), red_rgb, 3)
718
+ cv2.line(image, (int(tdx), int(tdy)), (int(x2), int(y2)), green_rgb, 3)
719
+ cv2.line(image, (int(tdx), int(tdy)), (int(x3), int(y3)), blue_rgb, 3)
720
  return image
utils/labels.py CHANGED
@@ -174,17 +174,29 @@ rgb_colors = {
174
  90: (186, 85, 211),
175
  }
176
 
177
- color_pose = {
178
  "purple": (255, 0, 100),
179
  "light_pink": (80, 0, 255),
180
  "dark_pink": (220, 0, 255),
181
- "light_orange": (0, 80, 255),
182
  "dark_orange": (255, 220, 0.),
183
  "yellow": (0, 220, 255),
184
  "blue": (255, 0, 0),
185
  "green": (0,255,0),
186
  }
187
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  color_pose_normalized = {
189
  "purple": (100/255., 0/255., 255/255.),
190
  "light_pink": (255/255., 0/255., 80/255.),
 
174
  90: (186, 85, 211),
175
  }
176
 
177
+ color_pose = { # BGR
178
  "purple": (255, 0, 100),
179
  "light_pink": (80, 0, 255),
180
  "dark_pink": (220, 0, 255),
181
+ "light_orange": (255, 80, 0),
182
  "dark_orange": (255, 220, 0.),
183
  "yellow": (0, 220, 255),
184
  "blue": (255, 0, 0),
185
  "green": (0,255,0),
186
  }
187
 
188
+ color_pose_rgb= { # RGB
189
+ "purple": (100, 0, 255),
190
+ "light_pink": (255, 0, 80),
191
+ "dark_pink": (255, 0, 220),
192
+ "light_orange": (0, 80, 255),
193
+ "dark_orange": (0, 220, 255.),
194
+ "yellow": (255, 220, 0),
195
+ "blue": (0, 0, 255),
196
+ "green": (0,255,0),
197
+ }
198
+
199
+
200
  color_pose_normalized = {
201
  "purple": (100/255., 0/255., 255/255.),
202
  "light_pink": (255/255., 0/255., 80/255.),