Spaces:
Sleeping
Sleeping
Zhu-FaceOnLive
commited on
Commit
•
c2b0164
1
Parent(s):
81efcf0
Upload 9 files
Browse files- Dockerfile +19 -0
- app.py +217 -0
- gradio/demo.py +114 -0
- gradio/examples/1.jpg +0 -0
- gradio/examples/2.jpg +0 -0
- gradio/examples/3.jpg +0 -0
- gradio/examples/4.jpg +0 -0
- requirements.txt +6 -0
- run.sh +4 -0
Dockerfile
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM ubuntu:20.04
|
2 |
+
RUN ln -snf /usr/share/zoneinfo/$CONTAINER_TIMEZONE /etc/localtime && echo $CONTAINER_TIMEZONE > /etc/timezone
|
3 |
+
RUN apt-get update -y
|
4 |
+
RUN apt-get install -y python3 python3-pip python3-opencv
|
5 |
+
RUN apt-get install -y libcurl4-openssl-dev libssl-dev
|
6 |
+
RUN mkdir -p /home/FaceOnLive_v6
|
7 |
+
RUN mkdir -p /home/FaceOnLive_v6/facewrapper
|
8 |
+
WORKDIR /home/FaceOnLive_v6
|
9 |
+
COPY ./facewrapper ./facewrapper
|
10 |
+
COPY ./facewrapper/libs/libimutils.so /usr/lib
|
11 |
+
COPY ./gradio ./gradio
|
12 |
+
COPY ./openvino /usr/lib
|
13 |
+
COPY ./app.py ./app.py
|
14 |
+
COPY ./run.sh .
|
15 |
+
COPY ./requirements.txt ./requirements.txt
|
16 |
+
RUN pip3 install -r requirements.txt
|
17 |
+
RUN chmod a+x run.sh
|
18 |
+
CMD ["./run.sh"]
|
19 |
+
EXPOSE 8000
|
app.py
ADDED
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
sys.path.append('.')
|
3 |
+
|
4 |
+
from flask import Flask, request, jsonify
|
5 |
+
from time import gmtime, strftime
|
6 |
+
import os
|
7 |
+
import base64
|
8 |
+
import json
|
9 |
+
import cv2
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
from facewrapper.facewrapper import ttv_version
|
13 |
+
from facewrapper.facewrapper import ttv_get_hwid
|
14 |
+
from facewrapper.facewrapper import ttv_init
|
15 |
+
from facewrapper.facewrapper import ttv_init_offline
|
16 |
+
from facewrapper.facewrapper import ttv_extract_feature
|
17 |
+
from facewrapper.facewrapper import ttv_compare_feature
|
18 |
+
|
19 |
+
app = Flask(__name__)
|
20 |
+
|
21 |
+
app.config['SITE'] = "http://0.0.0.0:8000/"
|
22 |
+
app.config['DEBUG'] = False
|
23 |
+
|
24 |
+
licenseKey = os.environ.get("LICENSE_KEY")
|
25 |
+
licensePath = "license.txt"
|
26 |
+
modelFolder = os.path.abspath(os.path.dirname(__file__)) + '/facewrapper/dict'
|
27 |
+
|
28 |
+
version = ttv_version()
|
29 |
+
print("version: ", version.decode('utf-8'))
|
30 |
+
|
31 |
+
ret = ttv_init(modelFolder.encode('utf-8'), licenseKey.encode('utf-8'))
|
32 |
+
if ret != 0:
|
33 |
+
print(f"online init failed: {ret}");
|
34 |
+
|
35 |
+
hwid = ttv_get_hwid()
|
36 |
+
print("hwid: ", hwid.decode('utf-8'))
|
37 |
+
|
38 |
+
ret = ttv_init_offline(modelFolder.encode('utf-8'), licensePath.encode('utf-8'))
|
39 |
+
if ret != 0:
|
40 |
+
print(f"offline init failed: {ret}")
|
41 |
+
exit(-1)
|
42 |
+
else:
|
43 |
+
print(f"offline init ok")
|
44 |
+
|
45 |
+
else:
|
46 |
+
print(f"online init ok")
|
47 |
+
|
48 |
+
@app.route('/api/compare_face', methods=['POST'])
|
49 |
+
def compare_face():
|
50 |
+
file1 = request.files['image1']
|
51 |
+
image1 = cv2.imdecode(np.fromstring(file1.read(), np.uint8), cv2.IMREAD_COLOR)
|
52 |
+
if image1 is None:
|
53 |
+
result = "image1: is null!"
|
54 |
+
status = "ok"
|
55 |
+
response = jsonify({"status": status, "data": {"result": result}})
|
56 |
+
response.status_code = 200
|
57 |
+
response.headers["Content-Type"] = "application/json; charset=utf-8"
|
58 |
+
return response
|
59 |
+
|
60 |
+
file2 = request.files['image2']
|
61 |
+
image2 = cv2.imdecode(np.fromstring(file2.read(), np.uint8), cv2.IMREAD_COLOR)
|
62 |
+
if image2 is None:
|
63 |
+
result = "image2: is null!"
|
64 |
+
status = "ok"
|
65 |
+
response = jsonify({"status": status, "data": {"result": result}})
|
66 |
+
response.status_code = 200
|
67 |
+
response.headers["Content-Type"] = "application/json; charset=utf-8"
|
68 |
+
return response
|
69 |
+
|
70 |
+
faceRect1 = np.zeros([4], dtype=np.int32)
|
71 |
+
feature1 = np.zeros([2048], dtype=np.uint8)
|
72 |
+
featureSize1 = np.zeros([1], dtype=np.int32)
|
73 |
+
|
74 |
+
ret = ttv_extract_feature(image1, image1.shape[1], image1.shape[0], faceRect1, feature1, featureSize1)
|
75 |
+
if ret <= 0:
|
76 |
+
if ret == -1:
|
77 |
+
result = "license error!"
|
78 |
+
elif ret == -2:
|
79 |
+
result = "init error!"
|
80 |
+
elif ret == 0:
|
81 |
+
result = "image1: no face detected!"
|
82 |
+
|
83 |
+
status = "ok"
|
84 |
+
response = jsonify({"status": status, "data": {"result": result}})
|
85 |
+
response.status_code = 200
|
86 |
+
response.headers["Content-Type"] = "application/json; charset=utf-8"
|
87 |
+
return response
|
88 |
+
|
89 |
+
faceRect2 = np.zeros([4], dtype=np.int32)
|
90 |
+
feature2 = np.zeros([2048], dtype=np.uint8)
|
91 |
+
featureSize2 = np.zeros([1], dtype=np.int32)
|
92 |
+
|
93 |
+
ret = ttv_extract_feature(image2, image2.shape[1], image2.shape[0], faceRect2, feature2, featureSize2)
|
94 |
+
if ret <= 0:
|
95 |
+
if ret == -1:
|
96 |
+
result = "license error!"
|
97 |
+
elif ret == -2:
|
98 |
+
result = "init error!"
|
99 |
+
elif ret == 0:
|
100 |
+
result = "image2: no face detected!"
|
101 |
+
|
102 |
+
status = "ok"
|
103 |
+
response = jsonify({"status": status, "data": {"result": result}})
|
104 |
+
response.status_code = 200
|
105 |
+
response.headers["Content-Type"] = "application/json; charset=utf-8"
|
106 |
+
return response
|
107 |
+
|
108 |
+
similarity = ttv_compare_feature(feature1, feature2)
|
109 |
+
if similarity > 0.7:
|
110 |
+
result = "same"
|
111 |
+
else:
|
112 |
+
result = "different"
|
113 |
+
|
114 |
+
status = "ok"
|
115 |
+
response = jsonify(
|
116 |
+
{
|
117 |
+
"status": status,
|
118 |
+
"data": {
|
119 |
+
"result": result,
|
120 |
+
"similarity": float(similarity),
|
121 |
+
"face1": {"x1": int(faceRect1[0]), "y1": int(faceRect1[1]), "x2": int(faceRect1[2]), "y2" : int(faceRect1[3])},
|
122 |
+
"face2": {"x1": int(faceRect2[0]), "y1": int(faceRect2[1]), "x2": int(faceRect2[2]), "y2" : int(faceRect2[3])},
|
123 |
+
}
|
124 |
+
})
|
125 |
+
|
126 |
+
response.status_code = 200
|
127 |
+
response.headers["Content-Type"] = "application/json; charset=utf-8"
|
128 |
+
return response
|
129 |
+
|
130 |
+
|
131 |
+
@app.route('/api/compare_face_base64', methods=['POST'])
|
132 |
+
def coompare_face_base64():
|
133 |
+
content = request.get_json()
|
134 |
+
imageBase641 = content['image1']
|
135 |
+
image1 = cv2.imdecode(np.frombuffer(base64.b64decode(imageBase641), dtype=np.uint8), cv2.IMREAD_COLOR)
|
136 |
+
|
137 |
+
if image1 is None:
|
138 |
+
result = "image1: is null!"
|
139 |
+
status = "ok"
|
140 |
+
response = jsonify({"status": status, "data": {"result": result}})
|
141 |
+
response.status_code = 200
|
142 |
+
response.headers["Content-Type"] = "application/json; charset=utf-8"
|
143 |
+
return response
|
144 |
+
|
145 |
+
imageBase642 = content['image2']
|
146 |
+
image2 = cv2.imdecode(np.frombuffer(base64.b64decode(imageBase642), dtype=np.uint8), cv2.IMREAD_COLOR)
|
147 |
+
|
148 |
+
if image2 is None:
|
149 |
+
result = "image2: is null!"
|
150 |
+
status = "ok"
|
151 |
+
response = jsonify({"status": status, "data": {"result": result}})
|
152 |
+
response.status_code = 200
|
153 |
+
response.headers["Content-Type"] = "application/json; charset=utf-8"
|
154 |
+
return response
|
155 |
+
|
156 |
+
faceRect1 = np.zeros([4], dtype=np.int32)
|
157 |
+
feature1 = np.zeros([2048], dtype=np.uint8)
|
158 |
+
featureSize1 = np.zeros([1], dtype=np.int32)
|
159 |
+
|
160 |
+
ret = ttv_extract_feature(image1, image1.shape[1], image1.shape[0], faceRect1, feature1, featureSize1)
|
161 |
+
if ret <= 0:
|
162 |
+
if ret == -1:
|
163 |
+
result = "license error!"
|
164 |
+
elif ret == -2:
|
165 |
+
result = "init error!"
|
166 |
+
elif ret == 0:
|
167 |
+
result = "image1: no face detected!"
|
168 |
+
|
169 |
+
status = "ok"
|
170 |
+
response = jsonify({"status": status, "data": {"result": result}})
|
171 |
+
response.status_code = 200
|
172 |
+
response.headers["Content-Type"] = "application/json; charset=utf-8"
|
173 |
+
return response
|
174 |
+
|
175 |
+
faceRect2 = np.zeros([4], dtype=np.int32)
|
176 |
+
feature2 = np.zeros([2048], dtype=np.uint8)
|
177 |
+
featureSize2 = np.zeros([1], dtype=np.int32)
|
178 |
+
|
179 |
+
ret = ttv_extract_feature(image2, image2.shape[1], image2.shape[0], faceRect2, feature2, featureSize2)
|
180 |
+
if ret <= 0:
|
181 |
+
if ret == -1:
|
182 |
+
result = "license error!"
|
183 |
+
elif ret == -2:
|
184 |
+
result = "init error!"
|
185 |
+
elif ret == 0:
|
186 |
+
result = "image2: no face detected!"
|
187 |
+
|
188 |
+
status = "ok"
|
189 |
+
response = jsonify({"status": status, "data": {"result": result}})
|
190 |
+
response.status_code = 200
|
191 |
+
response.headers["Content-Type"] = "application/json; charset=utf-8"
|
192 |
+
return response
|
193 |
+
|
194 |
+
similarity = ttv_compare_feature(feature1, feature2)
|
195 |
+
if similarity > 0.7:
|
196 |
+
result = "same"
|
197 |
+
else:
|
198 |
+
result = "different"
|
199 |
+
|
200 |
+
status = "ok"
|
201 |
+
response = jsonify(
|
202 |
+
{
|
203 |
+
"status": status,
|
204 |
+
"data": {
|
205 |
+
"result": result,
|
206 |
+
"similarity": float(similarity),
|
207 |
+
"face1": {"x1": int(faceRect1[0]), "y1": int(faceRect1[1]), "x2": int(faceRect1[2]), "y2" : int(faceRect1[3])},
|
208 |
+
"face2": {"x1": int(faceRect2[0]), "y1": int(faceRect2[1]), "x2": int(faceRect2[2]), "y2" : int(faceRect2[3])},
|
209 |
+
}
|
210 |
+
})
|
211 |
+
response.status_code = 200
|
212 |
+
response.headers["Content-Type"] = "application/json; charset=utf-8"
|
213 |
+
return response
|
214 |
+
|
215 |
+
if __name__ == '__main__':
|
216 |
+
port = int(os.environ.get("PORT", 8000))
|
217 |
+
app.run(host='0.0.0.0', port=port)
|
gradio/demo.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import requests
|
3 |
+
import json
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
def compare_face(frame1, frame2):
|
7 |
+
url = "http://127.0.0.1:8000/api/compare_face"
|
8 |
+
files = {'image1': open(frame1, 'rb'), 'image2': open(frame2, 'rb')}
|
9 |
+
|
10 |
+
r = requests.post(url=url, files=files)
|
11 |
+
faces = None
|
12 |
+
|
13 |
+
try:
|
14 |
+
image1 = Image.open(frame1)
|
15 |
+
image2 = Image.open(frame2)
|
16 |
+
|
17 |
+
face1 = None
|
18 |
+
face2 = None
|
19 |
+
data = r.json().get('data')
|
20 |
+
if data.get('face1') is not None:
|
21 |
+
face = data.get('face1')
|
22 |
+
x1 = face.get('x1')
|
23 |
+
y1 = face.get('y1')
|
24 |
+
x2 = face.get('x2')
|
25 |
+
y2 = face.get('y2')
|
26 |
+
if x1 < 0:
|
27 |
+
x1 = 0
|
28 |
+
if y1 < 0:
|
29 |
+
y1 = 0
|
30 |
+
if x2 >= image1.width:
|
31 |
+
x2 = image1.width - 1
|
32 |
+
if y2 >= image1.height:
|
33 |
+
y2 = image1.height - 1
|
34 |
+
|
35 |
+
face1 = image1.crop((x1, y1, x2, y2))
|
36 |
+
face_image_ratio = face1.width / float(face1.height)
|
37 |
+
resized_w = int(face_image_ratio * 150)
|
38 |
+
resized_h = 150
|
39 |
+
|
40 |
+
face1 = face1.resize((int(resized_w), int(resized_h)))
|
41 |
+
|
42 |
+
if data.get('face2') is not None:
|
43 |
+
face = data.get('face2')
|
44 |
+
x1 = face.get('x1')
|
45 |
+
y1 = face.get('y1')
|
46 |
+
x2 = face.get('x2')
|
47 |
+
y2 = face.get('y2')
|
48 |
+
|
49 |
+
if x1 < 0:
|
50 |
+
x1 = 0
|
51 |
+
if y1 < 0:
|
52 |
+
y1 = 0
|
53 |
+
if x2 >= image2.width:
|
54 |
+
x2 = image2.width - 1
|
55 |
+
if y2 >= image2.height:
|
56 |
+
y2 = image2.height - 1
|
57 |
+
|
58 |
+
face2 = image2.crop((x1, y1, x2, y2))
|
59 |
+
face_image_ratio = face2.width / float(face2.height)
|
60 |
+
resized_w = int(face_image_ratio * 150)
|
61 |
+
resized_h = 150
|
62 |
+
|
63 |
+
face2 = face2.resize((int(resized_w), int(resized_h)))
|
64 |
+
|
65 |
+
if face1 is not None and face2 is not None:
|
66 |
+
new_image = Image.new('RGB',(face1.width + face2.width + 10, 150), (80,80,80))
|
67 |
+
|
68 |
+
new_image.paste(face1,(0,0))
|
69 |
+
new_image.paste(face2,(face1.width + 10, 0))
|
70 |
+
faces = new_image.copy()
|
71 |
+
elif face1 is not None and face2 is None:
|
72 |
+
new_image = Image.new('RGB',(face1.width + face1.width + 10, 150), (80,80,80))
|
73 |
+
|
74 |
+
new_image.paste(face1,(0,0))
|
75 |
+
faces = new_image.copy()
|
76 |
+
elif face1 is None and face2 is not None:
|
77 |
+
new_image = Image.new('RGB',(face2.width + face2.width + 10, 150), (80,80,80))
|
78 |
+
|
79 |
+
new_image.paste(face2,(face2.width + 10, 0))
|
80 |
+
faces = new_image.copy()
|
81 |
+
except:
|
82 |
+
pass
|
83 |
+
|
84 |
+
return [r.json(), faces]
|
85 |
+
|
86 |
+
with gr.Blocks() as demo:
|
87 |
+
gr.Markdown(
|
88 |
+
"""
|
89 |
+
# Face Recognition
|
90 |
+
Get your own Face Recognition Server by duplicating this space.<br/>
|
91 |
+
Or run on your own machine using docker.<br/>
|
92 |
+
```docker run -it -p 7860:7860 --platform=linux/amd64 \
|
93 |
+
-e LICENSE_KEY="YOUR_VALUE_HERE" \
|
94 |
+
registry.hf.space/faceonlive-face-recognition-sdk:latest ```<br/><br/>
|
95 |
+
Contact us at [email protected] for issues and support.<br/>
|
96 |
+
"""
|
97 |
+
)
|
98 |
+
with gr.Row():
|
99 |
+
with gr.Column():
|
100 |
+
compare_face_input1 = gr.Image(type='filepath', height=480)
|
101 |
+
gr.Examples(['gradio/examples/1.jpg', 'gradio/examples/2.jpg'],
|
102 |
+
inputs=compare_face_input1)
|
103 |
+
compare_face_button = gr.Button("Compare Face")
|
104 |
+
with gr.Column():
|
105 |
+
compare_face_input2 = gr.Image(type='filepath', height=480)
|
106 |
+
gr.Examples(['gradio/examples/3.jpg', 'gradio/examples/4.jpg'],
|
107 |
+
inputs=compare_face_input2)
|
108 |
+
with gr.Column():
|
109 |
+
compare_face_output = gr.Image(type="pil", height=150)
|
110 |
+
compare_result_output = gr.JSON(label='Result')
|
111 |
+
|
112 |
+
compare_face_button.click(compare_face, inputs=[compare_face_input1, compare_face_input2], outputs=[compare_result_output, compare_face_output])
|
113 |
+
|
114 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|
gradio/examples/1.jpg
ADDED
gradio/examples/2.jpg
ADDED
gradio/examples/3.jpg
ADDED
gradio/examples/4.jpg
ADDED
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
flask
|
2 |
+
flask-cors
|
3 |
+
gradio
|
4 |
+
opencv-python
|
5 |
+
numpy==1.20.3
|
6 |
+
pillow
|
run.sh
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
exec python3 app.py &
|
4 |
+
exec python3 gradio/demo.py
|