federico commited on
Commit
eea32c6
β€’
1 Parent(s): 9d11120

requirements.txt and packages.txt added

Browse files
Files changed (3) hide show
  1. gradio_demo.py +13 -20
  2. packages.txt +1 -0
  3. requirements.txt +3 -0
gradio_demo.py CHANGED
@@ -8,7 +8,6 @@ import cv2
8
  import numpy as np
9
  import tensorflow as tf
10
 
11
-
12
  from ai.detection import detect
13
  from laeo_per_frame.interaction_per_frame_uncertainty import LAEO_computation
14
  from utils.hpe import hpe, project_ypr_in2d
@@ -16,7 +15,6 @@ from utils.img_util import resize_preserving_ar, draw_detections, percentage_to_
16
  visualize_vector
17
 
18
 
19
-
20
  def load_image(camera, ):
21
  # Capture the video frame by frame
22
  try:
@@ -26,12 +24,10 @@ def load_image(camera, ):
26
  logging.Logger('Error reading frame')
27
  return False, None
28
 
 
29
  def demo_play(img, laeo=True, rgb=False):
30
  # webcam in use
31
 
32
-
33
-
34
-
35
  # gpus = tf.config.list_physical_devices('GPU')
36
 
37
  # img = np.array(frame)
@@ -51,7 +47,6 @@ def demo_play(img, laeo=True, rgb=False):
51
  det, kpt = percentage_to_pixel(img.shape, detections['detection_boxes'], detections['detection_scores'],
52
  detections['detection_keypoints'], detections['detection_keypoint_scores'])
53
 
54
-
55
  # center_xy, yaw, pitch, roll = head_pose_estimation(kpt, 'centernet', gaze_model=gaze_model)
56
 
57
  # _________ extract hpe and print to img
@@ -82,11 +77,10 @@ def demo_play(img, laeo=True, rgb=False):
82
  binarize_uncertainty = False
83
  if laeo:
84
  interaction_matrix = LAEO_computation(people_list, clipping_value=clip_uncertainty,
85
- clip=binarize_uncertainty)
86
  else:
87
  interaction_matrix = np.zeros((len(people_list), len(people_list)))
88
  # coloured arrow print per person
89
- # TODO coloured arrow print per person
90
 
91
  for index, person in enumerate(people_list):
92
  green = round((max(interaction_matrix[index, :])) * 255)
@@ -99,19 +93,18 @@ def demo_play(img, laeo=True, rgb=False):
99
  return img
100
 
101
 
102
-
103
  demo = gr.Interface(
104
- fn= demo_play,
105
- inputs = [gr.Image(source="webcam", streaming=True),
106
- gr.Checkbox(value=True, label="LAEO", info="Compute and display LAEO"),
107
- gr.Checkbox(value=True, label="rgb", info="Display output on W/B image"),
108
- ],
109
- outputs="image",
110
- live=True
111
- )
112
-
113
- if __name__ == '__main__':
114
- if not os.path.exists("data"):
115
  gdown.download_folder("https://drive.google.com/drive/folders/1nQ1Cb_tBEhWxy183t-mIcVH7AhAfa6NO?usp=drive_link",
116
  use_cookies=False)
117
  gaze_model_path = 'data/head_pose_estimation'
 
8
  import numpy as np
9
  import tensorflow as tf
10
 
 
11
  from ai.detection import detect
12
  from laeo_per_frame.interaction_per_frame_uncertainty import LAEO_computation
13
  from utils.hpe import hpe, project_ypr_in2d
 
15
  visualize_vector
16
 
17
 
 
18
  def load_image(camera, ):
19
  # Capture the video frame by frame
20
  try:
 
24
  logging.Logger('Error reading frame')
25
  return False, None
26
 
27
+
28
  def demo_play(img, laeo=True, rgb=False):
29
  # webcam in use
30
 
 
 
 
31
  # gpus = tf.config.list_physical_devices('GPU')
32
 
33
  # img = np.array(frame)
 
47
  det, kpt = percentage_to_pixel(img.shape, detections['detection_boxes'], detections['detection_scores'],
48
  detections['detection_keypoints'], detections['detection_keypoint_scores'])
49
 
 
50
  # center_xy, yaw, pitch, roll = head_pose_estimation(kpt, 'centernet', gaze_model=gaze_model)
51
 
52
  # _________ extract hpe and print to img
 
77
  binarize_uncertainty = False
78
  if laeo:
79
  interaction_matrix = LAEO_computation(people_list, clipping_value=clip_uncertainty,
80
+ clip=binarize_uncertainty)
81
  else:
82
  interaction_matrix = np.zeros((len(people_list), len(people_list)))
83
  # coloured arrow print per person
 
84
 
85
  for index, person in enumerate(people_list):
86
  green = round((max(interaction_matrix[index, :])) * 255)
 
93
  return img
94
 
95
 
 
96
  demo = gr.Interface(
97
+ fn=demo_play,
98
+ inputs=[gr.Image(source="webcam", streaming=True),
99
+ gr.Checkbox(value=True, label="LAEO", info="Compute and display LAEO"),
100
+ gr.Checkbox(value=True, label="rgb", info="Display output on W/B image"),
101
+ ],
102
+ outputs="image",
103
+ live=True
104
+ )
105
+
106
+ if __name__=='__main__':
107
+ if not os.path.exists("data"):
108
  gdown.download_folder("https://drive.google.com/drive/folders/1nQ1Cb_tBEhWxy183t-mIcVH7AhAfa6NO?usp=drive_link",
109
  use_cookies=False)
110
  gaze_model_path = 'data/head_pose_estimation'
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python3-opencv
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ numpy
2
+ opencv-python
3
+ tensorflow