ParisNeo commited on
Commit
3389e85
1 Parent(s): 79081e0

enhanced UI

Browse files
Files changed (2) hide show
  1. .gitignore +0 -0
  2. app.py +103 -19
.gitignore ADDED
File without changes
app.py CHANGED
@@ -29,7 +29,7 @@ if not faces_path.exists():
29
 
30
 
31
  # Build face analyzer while specifying that we want to extract just a single face
32
- fa = FaceAnalyzer(max_nb_faces=1)
33
 
34
 
35
  box_colors=[
@@ -51,13 +51,19 @@ class UI():
51
  self.is_recording=False
52
  self.face_name=None
53
  self.nb_images = 20
 
54
  # Important to set. If higher than this distance, the face is considered unknown
55
  self.threshold = 4e-1
56
  self.faces_db_preprocessed_path = Path(__file__).parent/"faces_db_preprocessed"
57
  self.current_name = None
58
  self.current_face_files = []
59
  self.draw_landmarks = True
 
60
  self.upgrade_faces()
 
 
 
 
61
 
62
  with gr.Blocks() as demo:
63
  gr.Markdown("## FaceAnalyzer face recognition test")
@@ -67,9 +73,12 @@ class UI():
67
  with gr.Row():
68
  with gr.Column():
69
  self.rt_webcam = gr.Image(label="Input Image", source="webcam", streaming=True)
 
 
 
70
  with gr.Column():
71
  self.rt_rec_img = gr.Image(label="Output Image")
72
- self.rt_webcam.change(self.recognize, inputs=self.rt_webcam, outputs=self.rt_rec_img, show_progress=False)
73
  with gr.TabItem('Image Recognize'):
74
  with gr.Blocks():
75
  with gr.Row():
@@ -77,16 +86,16 @@ class UI():
77
  self.rt_inp_img = gr.Image(label="Input Image")
78
  with gr.Column():
79
  self.rt_rec_img = gr.Image(label="Output Image")
80
- self.rt_inp_img.change(self.recognize2, inputs=self.rt_inp_img, outputs=self.rt_rec_img, show_progress=True)
81
  with gr.TabItem('Add face from webcam'):
82
  with gr.Blocks():
83
  with gr.Row():
84
  with gr.Column():
85
  self.img = gr.Image(label="Input Image", source="webcam", streaming=True)
86
  self.txtFace_name = gr.Textbox(label="face_name")
87
- self.txtFace_name.change(self.set_face_name, inputs=self.txtFace_name, show_progress=False)
88
  self.status = gr.Label(label="Status")
89
- self.img.change(self.record, inputs=self.img, outputs=self.status, show_progress=False)
 
90
  with gr.Column():
91
  self.btn_start = gr.Button("Start Recording face")
92
  self.btn_start.click(self.start_stop)
@@ -97,15 +106,16 @@ class UI():
97
  self.gallery = gr.Gallery(
98
  label="Uploaded Images", show_label=False, elem_id="gallery"
99
  ).style(grid=[2], height="auto")
 
 
100
  self.add_file = gr.Files(label="Files",file_types=["image"])
101
  self.add_file.change(self.add_files, self.add_file, self.gallery)
102
  self.txtFace_name2 = gr.Textbox(label="face_name")
103
- self.txtFace_name2.change(self.set_face_name, inputs=self.txtFace_name2, show_progress=False)
104
- self.status = gr.Label(label="Status")
105
- self.img.change(self.record, inputs=self.img, outputs=self.status, show_progress=False)
106
- with gr.Column():
107
  self.btn_start = gr.Button("Build face embeddings")
108
- self.btn_start.click(self.start_stop)
 
 
 
109
  with gr.TabItem('Known Faces List'):
110
  with gr.Blocks():
111
  with gr.Row():
@@ -131,8 +141,19 @@ class UI():
131
  self.sld_nb_images.change(self.set_nb_images, self.sld_nb_images)
132
  self.cb_draw_landmarks = gr.Checkbox(label="Draw landmarks", value=True)
133
  self.cb_draw_landmarks.change(self.set_draw_landmarks, self.cb_draw_landmarks)
 
 
 
134
 
135
  demo.queue().launch()
 
 
 
 
 
 
 
 
136
  def add_files(self, files):
137
  for file in files:
138
  img = cv2.cvtColor(cv2.imread(file.name), cv2.COLOR_BGR2RGB)
@@ -148,6 +169,10 @@ class UI():
148
  def set_draw_landmarks(self, value):
149
  self.draw_landmarks=value
150
 
 
 
 
 
151
  def cosine_distance(self, u, v):
152
  """
153
  Computes the cosine distance between two vectors.
@@ -174,14 +199,17 @@ class UI():
174
  finger_print = pickle.load(f)
175
  self.known_faces.append(finger_print)
176
  self.known_faces_names.append(file.stem)
 
177
  if hasattr(self, "faces_list"):
178
  self.faces_list.update([[n] for n in self.known_faces_names])
179
 
180
  def set_face_name(self, face_name):
181
  self.face_name=face_name
 
182
 
183
  def start_stop(self):
184
  self.is_recording=True
 
185
 
186
  def process_db(self, images):
187
  for i,image in enumerate(images):
@@ -199,7 +227,7 @@ class UI():
199
  # Get a realigned version of the landmarksx
200
  vertices = face.get_face_outer_vertices()
201
  image = face.getFaceBox(image, vertices,margins=(30,30,30,30))
202
- embedding = DeepFace.represent(image)[0]["embedding"]
203
  embeddings_cloud.append(embedding)
204
  cv2.imwrite(str(self.faces_db_preprocessed_path/f"im_{i}.png"), cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
205
  except Exception as ex:
@@ -214,11 +242,12 @@ class UI():
214
  pickle.dump({"mean":embeddings_cloud_mean, "inv_cov":embeddings_cloud_inv_cov},f)
215
  print(f"Saved {name}")
216
 
217
- def record(self, image):
218
  if self.face_name is None:
219
  self.embeddings_cloud=[]
220
  self.is_recording=False
221
  return "Please input a face name"
 
222
  if self.is_recording and image is not None:
223
  if self.i < self.nb_images:
224
  # Process the image to extract faces and draw the masks on the face in the image
@@ -228,7 +257,7 @@ class UI():
228
  face = fa.faces[0]
229
  vertices = face.get_face_outer_vertices()
230
  image = face.getFaceBox(image, vertices, margins=(40,40,40,40))
231
- embedding = DeepFace.represent(image)[0]["embedding"]
232
  self.embeddings_cloud.append(embedding)
233
  self.i+=1
234
  cv2.imshow('Face Mesh', cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
@@ -255,8 +284,60 @@ class UI():
255
  return f"Saved {name} embeddings"
256
  else:
257
  return "Waiting"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
 
259
- def recognize(self, image):
 
 
 
260
  # Process the image to extract faces and draw the masks on the face in the image
261
  fa.process(image)
262
 
@@ -266,7 +347,7 @@ class UI():
266
  face = fa.faces[i]
267
  vertices = face.get_face_outer_vertices()
268
  face_image = face.getFaceBox(image, vertices, margins=(40,40,40,40))
269
- embedding = DeepFace.represent(face_image)[0]["embedding"]
270
  if self.draw_landmarks:
271
  face.draw_landmarks(image, color=(0,0,0))
272
  nearest_distance = 1e100
@@ -293,11 +374,14 @@ class UI():
293
  # Return the resulting frame
294
  return image
295
 
296
- def recognize2(self, image):
297
  if image is None:
298
  return None
299
- image = cv2.resize(image, fa.image_size)
300
  # Process the image to extract faces and draw the masks on the face in the image
 
 
 
 
301
  fa.process(image)
302
 
303
  if fa.nb_faces>0:
@@ -306,7 +390,7 @@ class UI():
306
  face = fa.faces[i]
307
  vertices = face.get_face_outer_vertices()
308
  face_image = face.getFaceBox(image, vertices, margins=(40,40,40,40))
309
- embedding = DeepFace.represent(face_image)[0]["embedding"]
310
  if self.draw_landmarks:
311
  face.draw_landmarks(image, color=(0,0,0))
312
  nearest_distance = 1e100
@@ -328,7 +412,7 @@ class UI():
328
  else:
329
  face.draw_bounding_box(image, thickness=1,text=f"{self.known_faces_names[nearest]}:{nearest_distance:.3e}")
330
  except Exception as ex:
331
- pass
332
 
333
  # Return the resulting frame
334
  return image
 
29
 
30
 
31
  # Build face analyzer while specifying that we want to extract just a single face
32
+ fa = FaceAnalyzer(max_nb_faces=3)
33
 
34
 
35
  box_colors=[
 
51
  self.is_recording=False
52
  self.face_name=None
53
  self.nb_images = 20
54
+ self.nb_faces = 3
55
  # Important to set. If higher than this distance, the face is considered unknown
56
  self.threshold = 4e-1
57
  self.faces_db_preprocessed_path = Path(__file__).parent/"faces_db_preprocessed"
58
  self.current_name = None
59
  self.current_face_files = []
60
  self.draw_landmarks = True
61
+ self.webcam_process = False
62
  self.upgrade_faces()
63
+ try:
64
+ DeepFace.represent(np.zeros((100,100,3)), enforce_detection=False)
65
+ except Exception as ex:
66
+ pass
67
 
68
  with gr.Blocks() as demo:
69
  gr.Markdown("## FaceAnalyzer face recognition test")
 
73
  with gr.Row():
74
  with gr.Column():
75
  self.rt_webcam = gr.Image(label="Input Image", source="webcam", streaming=True)
76
+ self.start_streaming = gr.Button("Start webcam")
77
+ self.start_streaming.click(self.start_webcam, [], [])
78
+
79
  with gr.Column():
80
  self.rt_rec_img = gr.Image(label="Output Image")
81
+ self.rt_webcam.change(self.process_webcam, inputs=self.rt_webcam, outputs=self.rt_rec_img, show_progress=False)
82
  with gr.TabItem('Image Recognize'):
83
  with gr.Blocks():
84
  with gr.Row():
 
86
  self.rt_inp_img = gr.Image(label="Input Image")
87
  with gr.Column():
88
  self.rt_rec_img = gr.Image(label="Output Image")
89
+ self.rt_inp_img.change(self.process_image, inputs=self.rt_inp_img, outputs=self.rt_rec_img, show_progress=True)
90
  with gr.TabItem('Add face from webcam'):
91
  with gr.Blocks():
92
  with gr.Row():
93
  with gr.Column():
94
  self.img = gr.Image(label="Input Image", source="webcam", streaming=True)
95
  self.txtFace_name = gr.Textbox(label="face_name")
 
96
  self.status = gr.Label(label="Status")
97
+ self.txtFace_name.change(self.set_face_name, inputs=self.txtFace_name, outputs=self.status, show_progress=False)
98
+ self.img.change(self.record_from_webcam, inputs=self.img, outputs=self.status, show_progress=False)
99
  with gr.Column():
100
  self.btn_start = gr.Button("Start Recording face")
101
  self.btn_start.click(self.start_stop)
 
106
  self.gallery = gr.Gallery(
107
  label="Uploaded Images", show_label=False, elem_id="gallery"
108
  ).style(grid=[2], height="auto")
109
+ self.btn_clear = gr.Button("Clear")
110
+
111
  self.add_file = gr.Files(label="Files",file_types=["image"])
112
  self.add_file.change(self.add_files, self.add_file, self.gallery)
113
  self.txtFace_name2 = gr.Textbox(label="face_name")
 
 
 
 
114
  self.btn_start = gr.Button("Build face embeddings")
115
+ self.status = gr.Label(label="Status")
116
+ self.txtFace_name2.change(self.set_face_name, inputs=self.txtFace_name2, outputs=self.status, show_progress=False)
117
+ self.btn_start.click(self.record_from_files, inputs=self.gallery, outputs=self.status, show_progress=False)
118
+ self.btn_clear.click(self.clear_galery,[],[])
119
  with gr.TabItem('Known Faces List'):
120
  with gr.Blocks():
121
  with gr.Row():
 
141
  self.sld_nb_images.change(self.set_nb_images, self.sld_nb_images)
142
  self.cb_draw_landmarks = gr.Checkbox(label="Draw landmarks", value=True)
143
  self.cb_draw_landmarks.change(self.set_draw_landmarks, self.cb_draw_landmarks)
144
+ self.sld_nb_faces = gr.Slider(1,50,3,label="Maximum number of faces")
145
+ self.sld_nb_faces.change(self.set_nb_faces, self.sld_nb_faces)
146
+
147
 
148
  demo.queue().launch()
149
+
150
+ def clear_galery(self):
151
+ self.gallery.update(value=[])
152
+
153
+ def start_webcam(self):
154
+ self.webcam_process=not self.webcam_process
155
+
156
+
157
  def add_files(self, files):
158
  for file in files:
159
  img = cv2.cvtColor(cv2.imread(file.name), cv2.COLOR_BGR2RGB)
 
169
  def set_draw_landmarks(self, value):
170
  self.draw_landmarks=value
171
 
172
+ def set_nb_faces(self,nb_faces):
173
+ self.nb_faces = nb_faces
174
+ fa.nb_faces = nb_faces
175
+
176
  def cosine_distance(self, u, v):
177
  """
178
  Computes the cosine distance between two vectors.
 
199
  finger_print = pickle.load(f)
200
  self.known_faces.append(finger_print)
201
  self.known_faces_names.append(file.stem)
202
+
203
  if hasattr(self, "faces_list"):
204
  self.faces_list.update([[n] for n in self.known_faces_names])
205
 
206
  def set_face_name(self, face_name):
207
  self.face_name=face_name
208
+ return f"face name set to {self.face_name}"
209
 
210
  def start_stop(self):
211
  self.is_recording=True
212
+
213
 
214
  def process_db(self, images):
215
  for i,image in enumerate(images):
 
227
  # Get a realigned version of the landmarksx
228
  vertices = face.get_face_outer_vertices()
229
  image = face.getFaceBox(image, vertices,margins=(30,30,30,30))
230
+ embedding = DeepFace.represent(image, enforce_detection=False)[0]["embedding"]
231
  embeddings_cloud.append(embedding)
232
  cv2.imwrite(str(self.faces_db_preprocessed_path/f"im_{i}.png"), cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
233
  except Exception as ex:
 
242
  pickle.dump({"mean":embeddings_cloud_mean, "inv_cov":embeddings_cloud_inv_cov},f)
243
  print(f"Saved {name}")
244
 
245
+ def record_from_webcam(self, image):
246
  if self.face_name is None:
247
  self.embeddings_cloud=[]
248
  self.is_recording=False
249
  return "Please input a face name"
250
+
251
  if self.is_recording and image is not None:
252
  if self.i < self.nb_images:
253
  # Process the image to extract faces and draw the masks on the face in the image
 
257
  face = fa.faces[0]
258
  vertices = face.get_face_outer_vertices()
259
  image = face.getFaceBox(image, vertices, margins=(40,40,40,40))
260
+ embedding = DeepFace.represent(image, enforce_detection=False)[0]["embedding"]
261
  self.embeddings_cloud.append(embedding)
262
  self.i+=1
263
  cv2.imshow('Face Mesh', cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
 
284
  return f"Saved {name} embeddings"
285
  else:
286
  return "Waiting"
287
+
288
+ def record_from_files(self, images, progress=gr.Progress()):
289
+ if self.face_name is None:
290
+ self.embeddings_cloud=[]
291
+ self.is_recording=False
292
+ return "Please input a face name"
293
+
294
+ if images is not None:
295
+ progress(0, desc="Starting...")
296
+ for entry in progress.tqdm(images):
297
+ image = cv2.cvtColor(cv2.imread(entry["name"]), cv2.COLOR_BGR2RGB)
298
+ if image is None:
299
+ return None
300
+ # Process the image to extract faces and draw the masks on the face in the image
301
+ if image.shape[1]>640:
302
+ image = cv2.resize(image,(int(640*(image.shape[1]/image.shape[0])),640))
303
+ fa.image_size=(image.shape[1],image.shape[0],3)
304
+ # Process the image to extract faces and draw the masks on the face in the image
305
+ fa.process(image)
306
+ if fa.nb_faces>0:
307
+ try:
308
+ face = fa.faces[0]
309
+ vertices = face.get_face_outer_vertices()
310
+ image = face.getFaceBox(image, vertices, margins=(40,40,40,40))
311
+ embedding = DeepFace.represent(image, enforce_detection=False)[0]["embedding"]
312
+ self.embeddings_cloud.append(embedding)
313
+ self.i+=1
314
+ cv2.imshow('Face Mesh', cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
315
+ except Exception as ex:
316
+ print(ex)
317
+ # Now let's find out where the face lives inside the latent space (128 dimensions space)
318
+
319
+ embeddings_cloud = np.array(self.embeddings_cloud)
320
+ embeddings_cloud_mean = embeddings_cloud.mean(axis=0)
321
+ embeddings_cloud_inv_cov = embeddings_cloud.std(axis=0)
322
+ # Now we save it.
323
+ # create a dialog box to ask for the subject name
324
+ name = self.face_name
325
+ with open(str(faces_path/f"{name}.pkl"),"wb") as f:
326
+ pickle.dump({"mean":embeddings_cloud_mean, "inv_cov":embeddings_cloud_inv_cov},f)
327
+ print(f"Saved {name} embeddings")
328
+ self.i=0
329
+ self.embeddings_cloud=[]
330
+ self.is_recording=False
331
+ self.upgrade_faces()
332
+
333
+ return f"Saved {name} embeddings"
334
+ else:
335
+ return "Waiting"
336
 
337
+ def process_webcam(self, image):
338
+ if not self.webcam_process:
339
+ return None
340
+
341
  # Process the image to extract faces and draw the masks on the face in the image
342
  fa.process(image)
343
 
 
347
  face = fa.faces[i]
348
  vertices = face.get_face_outer_vertices()
349
  face_image = face.getFaceBox(image, vertices, margins=(40,40,40,40))
350
+ embedding = DeepFace.represent(face_image, enforce_detection=False)[0]["embedding"]
351
  if self.draw_landmarks:
352
  face.draw_landmarks(image, color=(0,0,0))
353
  nearest_distance = 1e100
 
374
  # Return the resulting frame
375
  return image
376
 
377
+ def process_image(self, image):
378
  if image is None:
379
  return None
 
380
  # Process the image to extract faces and draw the masks on the face in the image
381
+ if image.shape[1]>640:
382
+ image = cv2.resize(image,(int(640*(image.shape[1]/image.shape[0])),640))
383
+ fa.image_size=(image.shape[1],image.shape[0],3)
384
+
385
  fa.process(image)
386
 
387
  if fa.nb_faces>0:
 
390
  face = fa.faces[i]
391
  vertices = face.get_face_outer_vertices()
392
  face_image = face.getFaceBox(image, vertices, margins=(40,40,40,40))
393
+ embedding = DeepFace.represent(face_image, enforce_detection=False)[0]["embedding"]
394
  if self.draw_landmarks:
395
  face.draw_landmarks(image, color=(0,0,0))
396
  nearest_distance = 1e100
 
412
  else:
413
  face.draw_bounding_box(image, thickness=1,text=f"{self.known_faces_names[nearest]}:{nearest_distance:.3e}")
414
  except Exception as ex:
415
+ image=face_image
416
 
417
  # Return the resulting frame
418
  return image