federico commited on
Commit
e94d136
β€’
1 Parent(s): 186a6e0

In the image panel added option for customizing dimensions of plotted infos

Browse files
Files changed (2) hide show
  1. app.py +15 -9
  2. utils/img_util.py +38 -23
app.py CHANGED
@@ -78,7 +78,7 @@ def load_image(camera, ):
78
  return False, None
79
 
80
 
81
- def demo_play(img, laeo=True, rgb=False, show_keypoints=True, only_face=False, Head_Pose_representation='Vector', detection_threshold=0.45):
82
  # webcam in use
83
 
84
  # gpus = tf.config.list_physical_devices('GPU')
@@ -133,7 +133,7 @@ def demo_play(img, laeo=True, rgb=False, show_keypoints=True, only_face=False, H
133
  # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
134
  for i in range(len(det)):
135
  # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
136
- img = draw_key_points_pose(img, kpt[i], only_face=only_face)
137
 
138
  # call LAEO
139
  clip_uncertainty = 0.5
@@ -149,10 +149,10 @@ def demo_play(img, laeo=True, rgb=False, show_keypoints=True, only_face=False, H
149
  def visualise_hpe(yaw, pitch, roll, image=None, tdx=None, tdy=None, size=50, yaw_uncertainty=-1, pitch_uncertainty=-1, roll_uncertainty=-1, openpose=False, title="", color=(255, 0, 0)):
150
  if str(Head_Pose_representation).lower() == 'vector':
151
  vector = project_ypr_in2d(person['yaw'], person['pitch'], person['roll'])
152
- image = visualize_vector(image, [tdx, tdy], vector, title=title, color=color)
153
  return image
154
  elif str(Head_Pose_representation).lower() == 'axis':
155
- image = draw_axis_3d(yaw, pitch, roll, image=image, tdx=tdx, tdy=tdy, size=size)
156
  return image
157
  elif str(Head_Pose_representation).lower() == 'cone':
158
  _, image = draw_cones(yaw, pitch, roll, unc_yaw=yaw_uncertainty, unc_pitch=pitch_uncertainty, unc_roll=roll_uncertainty, image=image, tdx=tdx, tdy=tdy, size=size)
@@ -166,7 +166,7 @@ def demo_play(img, laeo=True, rgb=False, show_keypoints=True, only_face=False, H
166
  colour = (0, green, 0)
167
  if green < 40:
168
  colour = (255, 0, 0)
169
- img = visualise_hpe(person['yaw'], person['pitch'], person['roll'], image=img, tdx=person['center_xy'][0], tdy=person['center_xy'][1], size=50, yaw_uncertainty=person['yaw_u'], pitch_uncertainty=person['pitch_u'], roll_uncertainty=person['roll_u'], title="", color=colour)
170
  # vector = project_ypr_in2d(person['yaw'], person['pitch'], person['roll'])
171
  # img = visualize_vector(img, person['center_xy'], vector, title="",
172
  # color=colour)
@@ -247,21 +247,27 @@ if __name__=='__main__':
247
  info="Which representation to show", value="Vector")
248
  detection_threshold = gr.Slider(0.01, 1, value=0.45, step=0.01, interactive=True,
249
  label="detection_threshold", info="Choose in [0, 1]")
250
-
 
 
 
 
 
251
  with gr.Column():
252
  outputs = gr.Image(height=238, width=585, label="Output Image")
253
  uncert = gr.Label(label="Uncertainty", value="0.0")
254
  examples_text =gr.Markdown("## Image Examples")
255
  examples = gr.Examples([["LAEO_demo_data/examples/1.jpg"], ["LAEO_demo_data/examples/300wlp_0.png"],
256
  ["LAEO_demo_data/examples/AWFL_2.jpg"],
257
- ["LAEO_demo_data/examples/BIWI_3.png"]], inputs=input_img,) # add all other flags
258
 
259
  input_img.change(function_to_call, inputs=[input_img, laeo, rgb, show_keypoints, show_keypoints_only_face,
260
- Head_Pose_representation, detection_threshold],
261
  outputs=[outputs, uncert])
262
  button.click(function_to_call, inputs=[input_img, laeo, rgb, show_keypoints, show_keypoints_only_face,
263
- Head_Pose_representation, detection_threshold],
264
  outputs=[outputs, uncert])
 
265
 
266
  gr.Markdown(WEBSITE_citation)
267
 
 
78
  return False, None
79
 
80
 
81
+ def demo_play(img, laeo=True, rgb=False, show_keypoints=True, only_face=False, Head_Pose_representation='Vector', detection_threshold=0.45, thickness_points:int=None, thickness_lines:int=2, size_plots:int=50):
82
  # webcam in use
83
 
84
  # gpus = tf.config.list_physical_devices('GPU')
 
133
  # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
134
  for i in range(len(det)):
135
  # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
136
+ img = draw_key_points_pose(img, kpt[i], only_face=only_face, thickness_points=thickness_points, thickness_lines=thickness_lines)
137
 
138
  # call LAEO
139
  clip_uncertainty = 0.5
 
149
  def visualise_hpe(yaw, pitch, roll, image=None, tdx=None, tdy=None, size=50, yaw_uncertainty=-1, pitch_uncertainty=-1, roll_uncertainty=-1, openpose=False, title="", color=(255, 0, 0)):
150
  if str(Head_Pose_representation).lower() == 'vector':
151
  vector = project_ypr_in2d(person['yaw'], person['pitch'], person['roll'])
152
+ image = visualize_vector(image, [tdx, tdy], vector, title=title, color=color, thickness_lines=thickness_lines)
153
  return image
154
  elif str(Head_Pose_representation).lower() == 'axis':
155
+ image = draw_axis_3d(yaw, pitch, roll, image=image, tdx=tdx, tdy=tdy, size=size, thickness_lines=thickness_lines)
156
  return image
157
  elif str(Head_Pose_representation).lower() == 'cone':
158
  _, image = draw_cones(yaw, pitch, roll, unc_yaw=yaw_uncertainty, unc_pitch=pitch_uncertainty, unc_roll=roll_uncertainty, image=image, tdx=tdx, tdy=tdy, size=size)
 
166
  colour = (0, green, 0)
167
  if green < 40:
168
  colour = (255, 0, 0)
169
+ img = visualise_hpe(person['yaw'], person['pitch'], person['roll'], image=img, tdx=person['center_xy'][0], tdy=person['center_xy'][1], size=size_plots, yaw_uncertainty=person['yaw_u'], pitch_uncertainty=person['pitch_u'], roll_uncertainty=person['roll_u'], title="", color=colour)
170
  # vector = project_ypr_in2d(person['yaw'], person['pitch'], person['roll'])
171
  # img = visualize_vector(img, person['center_xy'], vector, title="",
172
  # color=colour)
 
247
  info="Which representation to show", value="Vector")
248
  detection_threshold = gr.Slider(0.01, 1, value=0.45, step=0.01, interactive=True,
249
  label="detection_threshold", info="Choose in [0, 1]")
250
+ thickness_points = gr.Slider(1,100, value=1, step=1, interactive=True,
251
+ label='key point dimension', info='key point dimension in result')
252
+ thickness_lines = gr.Slider(0, 20, value=2, step=1, interactive=True,
253
+ label='arrows thickness', info='lines between keepoints dimension')
254
+ size_elements = gr.Slider(10, 100, value=50, step=1, interactive=True,
255
+ label='size of displayed axis', info='size of displayed axis and cones')
256
  with gr.Column():
257
  outputs = gr.Image(height=238, width=585, label="Output Image")
258
  uncert = gr.Label(label="Uncertainty", value="0.0")
259
  examples_text =gr.Markdown("## Image Examples")
260
  examples = gr.Examples([["LAEO_demo_data/examples/1.jpg"], ["LAEO_demo_data/examples/300wlp_0.png"],
261
  ["LAEO_demo_data/examples/AWFL_2.jpg"],
262
+ ["LAEO_demo_data/examples/BIWI_3.png"]], inputs=[input_img, True, False, True, True, "Vector", 0.45]) # add all other flags
263
 
264
  input_img.change(function_to_call, inputs=[input_img, laeo, rgb, show_keypoints, show_keypoints_only_face,
265
+ Head_Pose_representation, detection_threshold, thickness_points, thickness_lines, size_elements],
266
  outputs=[outputs, uncert])
267
  button.click(function_to_call, inputs=[input_img, laeo, rgb, show_keypoints, show_keypoints_only_face,
268
+ Head_Pose_representation, detection_threshold, thickness_points, thickness_lines, size_elements],
269
  outputs=[outputs, uncert])
270
+ # TODO create a function only to redraw last result if changed some sliders etc
271
 
272
  gr.Markdown(WEBSITE_citation)
273
 
utils/img_util.py CHANGED
@@ -253,7 +253,7 @@ def draw_axis(yaw, pitch, roll, image=None, tdx=None, tdy=None, size=50):
253
  return list_projection_xy
254
 
255
 
256
- def visualize_vector(image, center, unit_vector, title="", color=(0, 0, 255)):
257
  """
258
  Draw the projected vector on the image plane and return the image
259
 
@@ -269,10 +269,13 @@ def visualize_vector(image, center, unit_vector, title="", color=(0, 0, 255)):
269
  Returns:
270
  :result (numpy.ndarray): The image with the vectors drawn
271
  """
 
 
 
 
 
 
272
 
273
- thickness = image.shape[0] // 100
274
- if thickness == 0 or thickness == 1:
275
- thickness = 1
276
  # if image.shape[0] > 150 or image.shape[1] > 150:
277
  #
278
  # else:
@@ -280,12 +283,12 @@ def visualize_vector(image, center, unit_vector, title="", color=(0, 0, 255)):
280
  unit_vector_draw = [unit_vector[0] * image.shape[0]*0.15, unit_vector[1] * image.shape[0]*0.15]
281
  point = [center[0] + unit_vector_draw[0], center[1] + unit_vector_draw[1]]
282
 
283
- result = cv2.arrowedLine(image, (int(center[0]), int(center[1])), (int(point[0]), int(point[1])), color, thickness=thickness, tipLength=0.3)
284
 
285
  return result
286
 
287
 
288
- def draw_key_points_pose(image, kpt, openpose=False, only_face=False):
289
  """
290
  Draw the key points and the lines connecting them; it expects the output of CenterNet (not OpenPose format)
291
 
@@ -298,13 +301,19 @@ def draw_key_points_pose(image, kpt, openpose=False, only_face=False):
298
  :img (numpy.ndarray): The image with the drawings of lines and key points
299
  """
300
 
301
-
302
-
303
- thickness = max (image.shape[0] // 100, image.shape[1] // 100)
304
- if thickness == 0:
305
- thickness = 1
306
- if thickness == 1:
307
- thickness = -1
 
 
 
 
 
 
308
 
309
 
310
  parts = body_parts_openpose if openpose else body_parts
@@ -330,14 +339,14 @@ def draw_key_points_pose(image, kpt, openpose=False, only_face=False):
330
  color = color_pose_rgb["yellow"]# REar
331
  if only_face and j in face_pts:
332
  if openpose:
333
- cv2.circle(image, (int(kpt[j][0]), int(kpt[j][1])), 1, color, thickness=thickness)
334
  else:
335
- cv2.circle(image, (int(kpt[j][1]), int(kpt[j][0])), 1, color, thickness=thickness)
336
  elif not only_face:
337
  if openpose:
338
- cv2.circle(image, (int(kpt[j][0]), int(kpt[j][1])), 1, color, thickness=thickness)
339
  else:
340
- cv2.circle(image, (int(kpt[j][1]), int(kpt[j][0])), 1, color, thickness=thickness)
341
 
342
  # cv2.putText(img, pose_id_part[i], (int(kpts[j][i, 1] * img.shape[1]), int(kpts[j][i, 0] * img.shape[0])), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 1, cv2.LINE_AA)
343
 
@@ -347,10 +356,10 @@ def draw_key_points_pose(image, kpt, openpose=False, only_face=False):
347
  kpt[part[1]][0]) != 0:
348
 
349
  if openpose:
350
- cv2.line(overlay, (int(kpt[part[0]][0]), int(kpt[part[0]][1])), (int(kpt[part[1]][0]), int(kpt[part[1]][1])), (255, 255, 255), 2)
351
  else:
352
  cv2.line(overlay, (int(kpt[part[0]][1]), int(kpt[part[0]][0])),
353
- (int(kpt[part[1]][1]), int(kpt[part[1]][0])), (255, 255, 255), 2)
354
 
355
  alpha = 0.4
356
  image = cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0)
@@ -658,7 +667,7 @@ def draw_cones(yaw, pitch, roll, unc_yaw, unc_pitch, unc_roll, image=None, tdx=N
658
  list_projection_xy = [sin(yaw), -cos(yaw) * sin(pitch)]
659
  return list_projection_xy, image
660
 
661
- def draw_axis_3d(yaw, pitch, roll, image=None, tdx=None, tdy=None, size=50, yaw_uncertainty=-1, pitch_uncertainty=-1, roll_uncertainty=-1, openpose=False):
662
  """
663
  Draw yaw pitch and roll axis on the image if passed as input and returns the vector containing the projection of the vector on the image plane
664
  Args:
@@ -676,6 +685,12 @@ def draw_axis_3d(yaw, pitch, roll, image=None, tdx=None, tdy=None, size=50, yaw_
676
  Returns:
677
  :list_projection_xy (list): list containing the unit vector [x, y, z]
678
  """
 
 
 
 
 
 
679
  if openpose:
680
  temp= tdy
681
  tdy = tdx
@@ -714,7 +729,7 @@ def draw_axis_3d(yaw, pitch, roll, image=None, tdx=None, tdy=None, size=50, yaw_
714
 
715
 
716
  if image is not None:
717
- cv2.line(image, (int(tdx), int(tdy)), (int(x1), int(y1)), red_rgb, 3)
718
- cv2.line(image, (int(tdx), int(tdy)), (int(x2), int(y2)), green_rgb, 3)
719
- cv2.line(image, (int(tdx), int(tdy)), (int(x3), int(y3)), blue_rgb, 3)
720
  return image
 
253
  return list_projection_xy
254
 
255
 
256
+ def visualize_vector(image, center, unit_vector, title="", color=(0, 0, 255), **kwargs):
257
  """
258
  Draw the projected vector on the image plane and return the image
259
 
 
269
  Returns:
270
  :result (numpy.ndarray): The image with the vectors drawn
271
  """
272
+ if 'thickness_lines' in kwargs and kwargs['thickness_lines']!=None:
273
+ thickness = kwargs['thickness_lines']
274
+ else:
275
+ thickness = image.shape[0] // 100
276
+ if thickness==0 or thickness==1:
277
+ thickness = 1
278
 
 
 
 
279
  # if image.shape[0] > 150 or image.shape[1] > 150:
280
  #
281
  # else:
 
283
  unit_vector_draw = [unit_vector[0] * image.shape[0]*0.15, unit_vector[1] * image.shape[0]*0.15]
284
  point = [center[0] + unit_vector_draw[0], center[1] + unit_vector_draw[1]]
285
 
286
+ result = cv2.arrowedLine(image, (int(center[0]), int(center[1])), (int(point[0]), int(point[1])), color, thickness=int(thickness), tipLength=0.3)
287
 
288
  return result
289
 
290
 
291
+ def draw_key_points_pose(image, kpt, openpose=False, only_face=False, **kwargs):
292
  """
293
  Draw the key points and the lines connecting them; it expects the output of CenterNet (not OpenPose format)
294
 
 
301
  :img (numpy.ndarray): The image with the drawings of lines and key points
302
  """
303
 
304
+ if 'thickness_points' in kwargs and kwargs['thickness_points'] != None:
305
+ thickness = kwargs['thickness_points']
306
+ else:
307
+ thickness = max (image.shape[0] // 100, image.shape[1] // 100)
308
+ if thickness == 0:
309
+ thickness = 1
310
+ if thickness == 1:
311
+ thickness = -1
312
+
313
+ if 'thickness_lines' in kwargs and kwargs['thickness_lines'] !=None:
314
+ thickness_lines = kwargs['thickness_lines']
315
+ else:
316
+ thickness_lines = 2
317
 
318
 
319
  parts = body_parts_openpose if openpose else body_parts
 
339
  color = color_pose_rgb["yellow"]# REar
340
  if only_face and j in face_pts:
341
  if openpose:
342
+ cv2.circle(image, (int(kpt[j][0]), int(kpt[j][1])), 1, color, thickness=int(thickness))
343
  else:
344
+ cv2.circle(image, (int(kpt[j][1]), int(kpt[j][0])), 1, color, thickness=int(thickness))
345
  elif not only_face:
346
  if openpose:
347
+ cv2.circle(image, (int(kpt[j][0]), int(kpt[j][1])), 1, color, thickness=int(thickness))
348
  else:
349
+ cv2.circle(image, (int(kpt[j][1]), int(kpt[j][0])), 1, color, thickness=int(thickness))
350
 
351
  # cv2.putText(img, pose_id_part[i], (int(kpts[j][i, 1] * img.shape[1]), int(kpts[j][i, 0] * img.shape[0])), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 1, cv2.LINE_AA)
352
 
 
356
  kpt[part[1]][0]) != 0:
357
 
358
  if openpose:
359
+ cv2.line(overlay, (int(kpt[part[0]][0]), int(kpt[part[0]][1])), (int(kpt[part[1]][0]), int(kpt[part[1]][1])), (255, 255, 255), int(thickness_lines))
360
  else:
361
  cv2.line(overlay, (int(kpt[part[0]][1]), int(kpt[part[0]][0])),
362
+ (int(kpt[part[1]][1]), int(kpt[part[1]][0])), (255, 255, 255), int(thickness_lines))
363
 
364
  alpha = 0.4
365
  image = cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0)
 
667
  list_projection_xy = [sin(yaw), -cos(yaw) * sin(pitch)]
668
  return list_projection_xy, image
669
 
670
+ def draw_axis_3d(yaw, pitch, roll, image=None, tdx=None, tdy=None, size=50, yaw_uncertainty=-1, pitch_uncertainty=-1, roll_uncertainty=-1, openpose=False, **kwargs):
671
  """
672
  Draw yaw pitch and roll axis on the image if passed as input and returns the vector containing the projection of the vector on the image plane
673
  Args:
 
685
  Returns:
686
  :list_projection_xy (list): list containing the unit vector [x, y, z]
687
  """
688
+
689
+ if 'thickness_lines' in kwargs and kwargs['thickness_lines'] != None:
690
+ thickness = kwargs['thickness_lines']
691
+ else:
692
+ thickness= 3
693
+
694
  if openpose:
695
  temp= tdy
696
  tdy = tdx
 
729
 
730
 
731
  if image is not None:
732
+ cv2.line(image, (int(tdx), int(tdy)), (int(x1), int(y1)), red_rgb, int(thickness))
733
+ cv2.line(image, (int(tdx), int(tdy)), (int(x2), int(y2)), green_rgb, int(thickness))
734
+ cv2.line(image, (int(tdx), int(tdy)), (int(x3), int(y3)), blue_rgb, int(thickness))
735
  return image