LovnishVerma commited on
Commit
610503e
1 Parent(s): 5ca6adb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -62
app.py CHANGED
@@ -1,45 +1,30 @@
1
  import streamlit as st
2
- # from transformers import pipeline
3
  from PIL import Image
4
  import face_recognition
5
  import cv2
6
  import numpy as np
7
- import requests
8
  import os
9
 
10
- st.title("AIMLJan24 - Face Recognition")
11
-
12
- # create list of encoding of all images in photos folder
13
- # Load images for face recognition
14
- Images = [] # List to store Images
15
- classnames = [] # List to store classnames
16
- directory = "photos"
17
- myList = os.listdir(directory)
18
-
19
- st.write("Photographs found in folder : ")
20
- for cls in myList:
21
- if os.path.splitext(cls)[1] in [".jpg", ".jpeg"]:
22
- img_path = os.path.join(directory, cls)
23
- curImg = cv2.imread(img_path)
24
- Images.append(curImg)
25
- st.write(os.path.splitext(cls)[0])
26
- classnames.append(os.path.splitext(cls)[0])
27
 
28
- # Load images for face recognition
29
- encodeListknown = [face_recognition.face_encodings(img)[0] for img in Images]
30
-
31
- # camera to take photo of user in question
32
- file_name = st.camera_input("Take a picture") #st.file_uploader("Upload image ")
33
-
34
- if file_name is not None:
35
- col1, col2 = st.columns(2)
36
 
37
- test_image = Image.open(file_name)
38
- image = np.asarray(test_image)
39
 
40
- imgS = cv2.resize(image, (0, 0), None, 0.25, 0.25)
 
41
  imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
42
- facesCurFrame = face_recognition.face_locations(imgS)
43
  encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
44
 
45
  name = "Unknown" # Default name for unknown faces
@@ -48,48 +33,36 @@ if file_name is not None:
48
  # Checking if faces are detected
49
  if len(encodesCurFrame) > 0:
50
  for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
51
- # Assuming that encodeListknown is defined and populated in your code
52
- matches = face_recognition.compare_faces(encodeListknown, encodeFace)
53
- faceDis = face_recognition.face_distance(encodeListknown, encodeFace)
54
  matchIndex = np.argmin(faceDis)
55
 
56
  if matches[matchIndex]:
57
- name = classnames[matchIndex].upper()
58
  match_found = True # Set the flag to True
59
 
60
  y1, x2, y2, x1 = faceLoc
61
  y1, x2, y2, x1 = (y1 * 4), (x2 * 4), (y2 * 4) ,(x1 * 4)
62
- cv2.rectangle(test_image , (x1, y1), (x2, y2), (0, 255, 0), 2)
63
- cv2.rectangle(test_image , (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
64
- cv2.putText(test_image , name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
65
 
66
- st.image(test_image , use_column_width=True, output_format="PNG")
67
- else:
68
- st.warning("No faces detected in the image. Face recognition failed.")
69
 
70
- # image = Image.open(file_name)
71
- # col1.image(image, use_column_width=True)
72
-
73
- # pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
74
-
75
- # st.title("AIMLJan24 First App on Hugging face - Hot Dog? Or Not?")
76
-
77
- # file_name = st.file_uploader("Upload the test image to find is this hot dog ! ")
78
-
79
- # if file_name is not None:
80
- # col1, col2 = st.columns(2)
81
 
82
- # image = Image.open(file_name)
83
- # col1.image(image, use_column_width=True)
84
- # predictions = pipeline(image)
85
 
86
- # col2.header("Probabilities")
87
- # for p in predictions:
88
- # col2.subheader(f"{ p['label'] }: { round(p['score'] * 100, 1)}%")
89
 
 
 
90
 
91
- # # my first app
92
- # import streamlit as st
 
 
93
 
94
- # x = st.slider('Select a value')
95
- # st.write(x, 'squared is', x * x)
 
1
  import streamlit as st
 
2
  from PIL import Image
3
  import face_recognition
4
  import cv2
5
  import numpy as np
 
6
  import os
7
 
8
+ def load_images(directory):
9
+ images = []
10
+ classnames = []
11
+ file_list = os.listdir(directory)
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ st.write("Photographs found in folder : ")
14
+ for file in file_list:
15
+ if os.path.splitext(file)[1] in [".jpg", ".jpeg"]:
16
+ img_path = os.path.join(directory, file)
17
+ cur_img = cv2.imread(img_path)
18
+ images.append(cur_img)
19
+ st.write(os.path.splitext(file)[0])
20
+ classnames.append(os.path.splitext(file)[0])
21
 
22
+ return images, classnames
 
23
 
24
+ def recognize_faces(test_image, known_encodings, class_names):
25
+ imgS = cv2.resize(test_image, (0, 0), None, 0.25, 0.25)
26
  imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
27
+ facesCurFrame = face_recognition.face_locations(imgS)
28
  encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
29
 
30
  name = "Unknown" # Default name for unknown faces
 
33
  # Checking if faces are detected
34
  if len(encodesCurFrame) > 0:
35
  for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
36
+ matches = face_recognition.compare_faces(known_encodings, encodeFace)
37
+ faceDis = face_recognition.face_distance(known_encodings, encodeFace)
 
38
  matchIndex = np.argmin(faceDis)
39
 
40
  if matches[matchIndex]:
41
+ name = class_names[matchIndex].upper()
42
  match_found = True # Set the flag to True
43
 
44
  y1, x2, y2, x1 = faceLoc
45
  y1, x2, y2, x1 = (y1 * 4), (x2 * 4), (y2 * 4) ,(x1 * 4)
46
+ cv2.rectangle(test_image, (x1, y1), (x2, y2), (0, 255, 0), 2)
47
+ cv2.rectangle(test_image, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
48
+ cv2.putText(test_image, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
49
 
50
+ return test_image
 
 
51
 
52
+ st.title("AIMLJan24 - Face Recognition")
 
 
 
 
 
 
 
 
 
 
53
 
54
+ # Load images for face recognition
55
+ directory = "photos"
56
+ Images, classnames = load_images(directory)
57
 
58
+ # Load images for face recognition
59
+ encodeListknown = [face_recognition.face_encodings(img)[0] for img in Images]
 
60
 
61
+ # camera to take photo of user in question
62
+ file_name = st.file_uploader("Upload image")
63
 
64
+ if file_name is not None:
65
+ test_image = np.array(Image.open(file_name))
66
+ image_with_recognition = recognize_faces(test_image, encodeListknown, classnames)
67
+ st.image(image_with_recognition, use_column_width=True, output_format="PNG")
68