federico commited on
Commit
20f7ff3
β€’
1 Parent(s): ccd2946

refactored drawing part for small images, and added one useful flag: only face kpts print

Browse files
Files changed (2) hide show
  1. app.py +12 -10
  2. utils/img_util.py +37 -14
app.py CHANGED
@@ -11,7 +11,7 @@ import tensorflow as tf
11
  from ai.detection import detect
12
  from laeo_per_frame.interaction_per_frame_uncertainty import LAEO_computation
13
  from utils.hpe import hpe, project_ypr_in2d
14
- from utils.img_util import resize_preserving_ar, draw_detections, percentage_to_pixel, draw_key_points_pose, \
15
  visualize_vector
16
 
17
 
@@ -25,7 +25,7 @@ def load_image(camera, ):
25
  return False, None
26
 
27
 
28
- def demo_play(img, laeo=True, rgb=False, show_keypoints=True):
29
  # webcam in use
30
 
31
  # gpus = tf.config.list_physical_devices('GPU')
@@ -41,7 +41,7 @@ def demo_play(img, laeo=True, rgb=False, show_keypoints=True):
41
  detections, elapsed_time = detect(model, img_resized, min_score_thresh,
42
  new_old_shape) # detection classes boxes scores
43
  # probably to draw on resized
44
- img_with_detections = draw_detections(img_resized, detections, max_boxes_to_draw, None, None, None)
45
  # cv2.imshow("aa", img_with_detections)
46
 
47
  det, kpt = percentage_to_pixel(img.shape, detections['detection_boxes'], detections['detection_scores'],
@@ -71,7 +71,7 @@ def demo_play(img, laeo=True, rgb=False, show_keypoints=True):
71
 
72
  if show_keypoints:
73
  for i in range(len(det)):
74
- img = draw_key_points_pose(img, kpt[i])
75
 
76
  # call LAEO
77
  clip_uncertainty = 0.5
@@ -87,14 +87,13 @@ def demo_play(img, laeo=True, rgb=False, show_keypoints=True):
87
  green = round((max(interaction_matrix[index, :])) * 255)
88
  colour = (0, green, 0)
89
  if green < 40:
90
- colour = (0, 0, 255)
91
  vector = project_ypr_in2d(person['yaw'], person['pitch'], person['roll'])
92
  img = visualize_vector(img, person['center_xy'], vector, title="",
93
  color=colour)
94
  return img
95
 
96
 
97
-
98
  if __name__=='__main__':
99
  if not os.path.exists("LAEO_demo_data"):
100
  gdown.download_folder("https://drive.google.com/drive/folders/1nQ1Cb_tBEhWxy183t-mIcVH7AhAfa6NO?usp=drive_link",
@@ -120,19 +119,20 @@ if __name__=='__main__':
120
  print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
121
 
122
  function_to_call = demo_play
123
- outputs = "image"
124
  live = True
125
  title = "Head Pose Estimation and LAEO"
126
 
127
-
128
  demo_webcam = gr.Interface(
129
  fn=function_to_call,
130
  inputs=[gr.Image(source="webcam"), # with no streaming-> acquire images
131
  gr.Checkbox(value=True, label="LAEO", info="Compute and display LAEO"),
132
  gr.Checkbox(value=False, label="rgb", info="Display output on W/B image"),
133
  gr.Checkbox(value=True, label="show_keypoints", info="Display keypoints on image"),
 
 
134
  ],
135
- outputs=outputs,
136
  live=live,
137
  title=title,
138
  description="This is a demo developed by Federico Figari T. at MaLGa Lab, University of Genoa, Italy. You can choose to have only the Head Pose Estimation or also the LAEO computation (more than 1 person should be in the image). You need to take a picture and the algorithm will calculate the Head Pose and will be showed as an arrow on your face. LAEO, instead is showed colouring the arrow in green.",
@@ -144,8 +144,10 @@ if __name__=='__main__':
144
  gr.Checkbox(value=True, label="LAEO", info="Compute and display LAEO"),
145
  gr.Checkbox(value=False, label="rgb", info="Display output on W/B image"),
146
  gr.Checkbox(value=True, label="show_keypoints", info="Display keypoints on image"),
 
 
147
  ],
148
- outputs=outputs,
149
  live=live,
150
  title=title,
151
  description="This is a demo developed by Federico Figari T. at MaLGa Lab, University of Genoa, Italy. You can choose to have only the Head Pose Estimation or also the LAEO computation (more than 1 person should be in the image). You need to upload an image and the algorithm will calculate the Head Pose and will be showed as an arrow on your face. LAEO, instead is showed colouring the arrow in green.",
 
11
  from ai.detection import detect
12
  from laeo_per_frame.interaction_per_frame_uncertainty import LAEO_computation
13
  from utils.hpe import hpe, project_ypr_in2d
14
+ from utils.img_util import resize_preserving_ar, percentage_to_pixel, draw_key_points_pose, \
15
  visualize_vector
16
 
17
 
 
25
  return False, None
26
 
27
 
28
+ def demo_play(img, laeo=True, rgb=False, show_keypoints=True, only_face=False):
29
  # webcam in use
30
 
31
  # gpus = tf.config.list_physical_devices('GPU')
 
41
  detections, elapsed_time = detect(model, img_resized, min_score_thresh,
42
  new_old_shape) # detection classes boxes scores
43
  # probably to draw on resized
44
+ # img_with_detections = draw_detections(img_resized, detections, max_boxes_to_draw, None, None, None)
45
  # cv2.imshow("aa", img_with_detections)
46
 
47
  det, kpt = percentage_to_pixel(img.shape, detections['detection_boxes'], detections['detection_scores'],
 
71
 
72
  if show_keypoints:
73
  for i in range(len(det)):
74
+ img = draw_key_points_pose(img, kpt[i], only_face=only_face)
75
 
76
  # call LAEO
77
  clip_uncertainty = 0.5
 
87
  green = round((max(interaction_matrix[index, :])) * 255)
88
  colour = (0, green, 0)
89
  if green < 40:
90
+ colour = (255, 0, 0)
91
  vector = project_ypr_in2d(person['yaw'], person['pitch'], person['roll'])
92
  img = visualize_vector(img, person['center_xy'], vector, title="",
93
  color=colour)
94
  return img
95
 
96
 
 
97
  if __name__=='__main__':
98
  if not os.path.exists("LAEO_demo_data"):
99
  gdown.download_folder("https://drive.google.com/drive/folders/1nQ1Cb_tBEhWxy183t-mIcVH7AhAfa6NO?usp=drive_link",
 
119
  print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
120
 
121
  function_to_call = demo_play
122
+ # outputs = gr.Image(shape=(512, 512))
123
  live = True
124
  title = "Head Pose Estimation and LAEO"
125
 
 
126
  demo_webcam = gr.Interface(
127
  fn=function_to_call,
128
  inputs=[gr.Image(source="webcam"), # with no streaming-> acquire images
129
  gr.Checkbox(value=True, label="LAEO", info="Compute and display LAEO"),
130
  gr.Checkbox(value=False, label="rgb", info="Display output on W/B image"),
131
  gr.Checkbox(value=True, label="show_keypoints", info="Display keypoints on image"),
132
+ gr.Checkbox(value=True, label="show_keypoints_only_face",
133
+ info="Display only face keypoints on image"),
134
  ],
135
+ outputs="image",
136
  live=live,
137
  title=title,
138
  description="This is a demo developed by Federico Figari T. at MaLGa Lab, University of Genoa, Italy. You can choose to have only the Head Pose Estimation or also the LAEO computation (more than 1 person should be in the image). You need to take a picture and the algorithm will calculate the Head Pose and will be showed as an arrow on your face. LAEO, instead is showed colouring the arrow in green.",
 
144
  gr.Checkbox(value=True, label="LAEO", info="Compute and display LAEO"),
145
  gr.Checkbox(value=False, label="rgb", info="Display output on W/B image"),
146
  gr.Checkbox(value=True, label="show_keypoints", info="Display keypoints on image"),
147
+ gr.Checkbox(value=True, label="show_keypoints_only_face",
148
+ info="Display only face keypoints on image"),
149
  ],
150
+ outputs=gr.Image(height=238, width=585), # shape=gr.Image(source="upload", ).shape
151
  live=live,
152
  title=title,
153
  description="This is a demo developed by Federico Figari T. at MaLGa Lab, University of Genoa, Italy. You can choose to have only the Head Pose Estimation or also the LAEO computation (more than 1 person should be in the image). You need to upload an image and the algorithm will calculate the Head Pose and will be showed as an arrow on your face. LAEO, instead is showed colouring the arrow in green.",
utils/img_util.py CHANGED
@@ -265,15 +265,23 @@ def visualize_vector(image, center, unit_vector, title="", color=(0, 0, 255)):
265
  Returns:
266
  :result (numpy.ndarray): The image with the vectors drawn
267
  """
 
 
 
 
 
 
 
 
268
  unit_vector_draw = [unit_vector[0] * image.shape[0]*0.15, unit_vector[1] * image.shape[0]*0.15]
269
  point = [center[0] + unit_vector_draw[0], center[1] + unit_vector_draw[1]]
270
 
271
- result = cv2.arrowedLine(image, (int(center[0]), int(center[1])), (int(point[0]), int(point[1])), color, thickness=4, tipLength=0.3)
272
 
273
  return result
274
 
275
 
276
- def draw_key_points_pose(image, kpt, openpose=False):
277
  """
278
  Draw the key points and the lines connecting them; it expects the output of CenterNet (not OpenPose format)
279
 
@@ -286,6 +294,13 @@ def draw_key_points_pose(image, kpt, openpose=False):
286
  :img (numpy.ndarray): The image with the drawings of lines and key points
287
  """
288
 
 
 
 
 
 
 
 
289
  parts = body_parts_openpose if openpose else body_parts
290
  kpt_score = None
291
  threshold = 0.4
@@ -307,21 +322,29 @@ def draw_key_points_pose(image, kpt, openpose=False):
307
  color = color_pose["light_orange"]#LEar
308
  if j == face_pts[4]:
309
  color = color_pose["yellow"]# REar
310
- if openpose:
311
- cv2.circle(image, (int(kpt[j][0]), int(kpt[j][1])), 1, color, 2)
312
- else:
313
- cv2.circle(image, (int(kpt[j][1]), int(kpt[j][0])), 1, color, 2)
 
 
 
 
 
 
 
314
  # cv2.putText(img, pose_id_part[i], (int(kpts[j][i, 1] * img.shape[1]), int(kpts[j][i, 0] * img.shape[0])), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 1, cv2.LINE_AA)
315
 
316
- for part in parts:
317
- if int(kpt[part[0]][1]) != 0 and int(kpt[part[0]][0]) != 0 and int(kpt[part[1]][1]) != 0 and int(
318
- kpt[part[1]][0]) != 0:
 
319
 
320
- if openpose:
321
- cv2.line(overlay, (int(kpt[part[0]][0]), int(kpt[part[0]][1])), (int(kpt[part[1]][0]), int(kpt[part[1]][1])), (255, 255, 255), 2)
322
- else:
323
- cv2.line(overlay, (int(kpt[part[0]][1]), int(kpt[part[0]][0])),
324
- (int(kpt[part[1]][1]), int(kpt[part[1]][0])), (255, 255, 255), 2)
325
 
326
  alpha = 0.4
327
  image = cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0)
 
265
  Returns:
266
  :result (numpy.ndarray): The image with the vectors drawn
267
  """
268
+
269
+ thickness = image.shape[0] // 100
270
+ if thickness == 0 or thickness == 1:
271
+ thickness = 1
272
+ # if image.shape[0] > 150 or image.shape[1] > 150:
273
+ #
274
+ # else:
275
+ # thickness = 1
276
  unit_vector_draw = [unit_vector[0] * image.shape[0]*0.15, unit_vector[1] * image.shape[0]*0.15]
277
  point = [center[0] + unit_vector_draw[0], center[1] + unit_vector_draw[1]]
278
 
279
+ result = cv2.arrowedLine(image, (int(center[0]), int(center[1])), (int(point[0]), int(point[1])), color, thickness=thickness, tipLength=0.3)
280
 
281
  return result
282
 
283
 
284
+ def draw_key_points_pose(image, kpt, openpose=False, only_face=False):
285
  """
286
  Draw the key points and the lines connecting them; it expects the output of CenterNet (not OpenPose format)
287
 
 
294
  :img (numpy.ndarray): The image with the drawings of lines and key points
295
  """
296
 
297
+ thickness = max (image.shape[0] // 100, image.shape[1] // 100)
298
+ if thickness == 0:
299
+ thickness = 1
300
+ if thickness == 1:
301
+ thickness = -1
302
+
303
+
304
  parts = body_parts_openpose if openpose else body_parts
305
  kpt_score = None
306
  threshold = 0.4
 
322
  color = color_pose["light_orange"]#LEar
323
  if j == face_pts[4]:
324
  color = color_pose["yellow"]# REar
325
+ if only_face and j in face_pts:
326
+ if openpose:
327
+ cv2.circle(image, (int(kpt[j][0]), int(kpt[j][1])), 1, color, thickness=thickness)
328
+ else:
329
+ cv2.circle(image, (int(kpt[j][1]), int(kpt[j][0])), 1, color, thickness=thickness)
330
+ elif not only_face:
331
+ if openpose:
332
+ cv2.circle(image, (int(kpt[j][0]), int(kpt[j][1])), 1, color, thickness=thickness)
333
+ else:
334
+ cv2.circle(image, (int(kpt[j][1]), int(kpt[j][0])), 1, color, thickness=thickness)
335
+
336
  # cv2.putText(img, pose_id_part[i], (int(kpts[j][i, 1] * img.shape[1]), int(kpts[j][i, 0] * img.shape[0])), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 1, cv2.LINE_AA)
337
 
338
+ if not only_face:
339
+ for part in parts:
340
+ if int(kpt[part[0]][1]) != 0 and int(kpt[part[0]][0]) != 0 and int(kpt[part[1]][1]) != 0 and int(
341
+ kpt[part[1]][0]) != 0:
342
 
343
+ if openpose:
344
+ cv2.line(overlay, (int(kpt[part[0]][0]), int(kpt[part[0]][1])), (int(kpt[part[1]][0]), int(kpt[part[1]][1])), (255, 255, 255), 2)
345
+ else:
346
+ cv2.line(overlay, (int(kpt[part[0]][1]), int(kpt[part[0]][0])),
347
+ (int(kpt[part[1]][1]), int(kpt[part[1]][0])), (255, 255, 255), 2)
348
 
349
  alpha = 0.4
350
  image = cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0)