vaugheu commited on
Commit
dc13b53
β€’
1 Parent(s): e4caada

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +266 -0
app.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+ import tempfile
5
+ from collections import Counter
6
+ import pandas as pd
7
+ import pyttsx3
8
+
9
+ # import streamlit.components.v1 as components
10
+
11
+ # # embed streamlit docs in a streamlit app
12
+ # components.iframe("https://nafisrayan.github.io/ThreeJS-Hand-Control-Panel/", height=500, width=500)
13
+
14
+ p_time = 0
15
+
16
+ st.sidebar.title('Settings')
17
+ model_type = st.sidebar.selectbox(
18
+ 'Choose YOLO Model', ('YOLOv8', 'YOLOv9', 'YOLOv10')
19
+ )
20
+
21
+ st.title(f'{model_type} Predictions')
22
+ sample_img = cv2.imread('logo2.jpg')
23
+ FRAME_WINDOW = st.image(sample_img, channels='BGR')
24
+ cap = None
25
+
26
+
27
+ def speak(audio):
28
+ engine = pyttsx3.init('sapi5')
29
+ voices = engine.getProperty('voices')
30
+
31
+ engine.setProperty('voice', voices[1].id)
32
+ engine.say(audio)
33
+ engine.runAndWait()
34
+
35
+ if st.sidebar.checkbox('Load Model Options'):
36
+
37
+ # YOLOv8 Model
38
+ if model_type == 'YOLOv8':
39
+ path_model_file = 'yolov8.pt'
40
+ from ultralytics import YOLO
41
+ model = YOLO(path_model_file)
42
+
43
+ if model_type == 'YOLOv9':
44
+ path_model_file = 'yolov9c.pt'
45
+ from ultralytics import YOLO
46
+ model = YOLO(path_model_file)
47
+ if model_type == 'YOLOv10':
48
+ st.caption("Work in Progress... >_<")
49
+ # path_model_file = 'yolov10n.pt'
50
+ # from ultralytics import YOLO
51
+ # model = YOLO(path_model_file)
52
+
53
+ # Load Class names
54
+ class_labels = model.names
55
+
56
+ # Inference Mode
57
+ options = st.sidebar.radio(
58
+ 'Options:', ('Webcam', 'Image', 'Video'), index=1) # removed RTSP for now
59
+
60
+ # Confidence
61
+ confidence = st.sidebar.slider(
62
+ 'Detection Confidence', min_value=0.0, max_value=1.0, value=0.25)
63
+
64
+ # Draw thickness
65
+ draw_thick = st.sidebar.slider(
66
+ 'Draw Thickness:', min_value=1,
67
+ max_value=20, value=3
68
+ )
69
+
70
+ color_pick_list = [None]*len(class_labels)
71
+
72
+
73
+ # Image
74
+ if options == 'Image':
75
+ upload_img_file = st.sidebar.file_uploader(
76
+ 'Upload Image', type=['jpg', 'jpeg', 'png'])
77
+ if upload_img_file is not None:
78
+ pred = st.checkbox(f'Predict Using {model_type}')
79
+ file_bytes = np.asarray(
80
+ bytearray(upload_img_file.read()), dtype=np.uint8)
81
+ img = cv2.imdecode(file_bytes, 1)
82
+ FRAME_WINDOW.image(img, channels='BGR')
83
+ # st.caption(model(img)[0][0])
84
+
85
+ if pred:
86
+ def predict(model, imag, classes=[], conf=confidence):
87
+ if classes:
88
+ results = model.predict(imag, classes=classes, conf=confidence)
89
+ else:
90
+ results = model.predict(imag, conf=conf)
91
+
92
+ return results
93
+
94
+ def predict_and_detect(model, img, classes=[], conf=confidence, rectangle_thickness=draw_thick, text_scale=draw_thick, text_thickness=draw_thick):
95
+ results = predict(model, img, classes, conf=conf)
96
+
97
+ # Initialize a Counter to keep track of class occurrences
98
+ class_counts = Counter()
99
+
100
+ for result in results:
101
+ for box in result.boxes:
102
+ # Update the counter with the class name
103
+ class_name = result.names[int(box.cls[0])]
104
+ class_counts[class_name] += 1
105
+
106
+ # Draw the bounding box and label with a random color
107
+ color = tuple(np.random.randint(0, 255, size=3).tolist())
108
+ cv2.rectangle(img, (int(box.xyxy[0][0]), int(box.xyxy[0][1])),
109
+ (int(box.xyxy[0][2]), int(box.xyxy[0][3])), color, rectangle_thickness)
110
+ cv2.putText(img, f"{class_name}",
111
+ (int(box.xyxy[0][0]), int(box.xyxy[0][1]) - 10),
112
+ cv2.FONT_HERSHEY_PLAIN, text_scale, color, text_thickness)
113
+
114
+ # Convert the Counter to a DataFrame for easy viewing
115
+ df_fq = pd.DataFrame.from_dict(class_counts, orient='index', columns=['Number'])
116
+ df_fq.index.name = 'Class'
117
+
118
+ return img, df_fq
119
+
120
+ img, df_fq = predict_and_detect(model, img, classes=[], conf=confidence)
121
+ FRAME_WINDOW.image(img, channels='BGR')
122
+
123
+ # Updating Inference results
124
+ with st.container():
125
+ st.markdown("<h2>Inference Statistics</h2>", unsafe_allow_html=True)
126
+ st.markdown("<h3>Detected objects in curret Frame</h3>", unsafe_allow_html=True)
127
+ st.dataframe(df_fq)
128
+ # print("πŸš€ ~ df_fq:", df_fq)
129
+
130
+ list_of_tuples = [(row.Number, row.Index) for row in df_fq.itertuples()]
131
+
132
+ print("πŸš€ ~ list_of_tuples:", list_of_tuples)
133
+
134
+ speak(f'This is what I have found {list_of_tuples}')
135
+
136
+ # Video
137
+ if options == 'Video':
138
+ upload_video_file = st.sidebar.file_uploader(
139
+ 'Upload Video', type=['mp4', 'avi', 'mkv'])
140
+ if upload_video_file is not None:
141
+ pred = st.checkbox(f'Predict Using {model_type}')
142
+ tfile = tempfile.NamedTemporaryFile(delete=False)
143
+ tfile.write(upload_video_file.read())
144
+ cap = cv2.VideoCapture(tfile.name)
145
+
146
+ while True:
147
+ success, img = cap.read()
148
+ if not success:
149
+ st.error(f"Video NOT working\nCheck Video settings!", icon="🚨")
150
+ break
151
+
152
+ if pred:
153
+ def predict(model, img, classes=[], conf=confidence):
154
+ if classes:
155
+ results = model.predict(img, classes=classes, conf=confidence)
156
+ else:
157
+ results = model.predict(img, conf=conf)
158
+ return results
159
+
160
+ def predict_and_detect(model, img, classes=[], conf=confidence, rectangle_thickness=draw_thick, text_scale=draw_thick, text_thickness=draw_thick):
161
+ results = predict(model, img, classes, conf=conf)
162
+
163
+ # Initialize a Counter to keep track of class occurrences
164
+ class_counts = Counter()
165
+
166
+ for result in results:
167
+ for box in result.boxes:
168
+ # Update the counter with the class name
169
+ class_name = result.names[int(box.cls[0])]
170
+ class_counts[class_name] += 1
171
+
172
+ # Draw the bounding box and label with a random color
173
+ color = tuple(np.random.randint(0, 255, size=3).tolist())
174
+ cv2.rectangle(img, (int(box.xyxy[0][0]), int(box.xyxy[0][1])),
175
+ (int(box.xyxy[0][2]), int(box.xyxy[0][3])), color, rectangle_thickness)
176
+ cv2.putText(img, f"{class_name}",
177
+ (int(box.xyxy[0][0]), int(box.xyxy[0][1]) - 10),
178
+ cv2.FONT_HERSHEY_PLAIN, text_scale, color, text_thickness)
179
+
180
+ # Convert the Counter to a DataFrame for easy viewing
181
+ df_fq = pd.DataFrame.from_dict(class_counts, orient='index', columns=['Number'])
182
+ df_fq.index.name = 'Class'
183
+
184
+ return img, df_fq
185
+
186
+ img, df_fq = predict_and_detect(model, img, classes=[], conf=confidence)
187
+ FRAME_WINDOW.image(img, channels='BGR')
188
+
189
+ # Updating Inference results
190
+ with st.container():
191
+ st.markdown("<h2>Inference Statistics</h2>", unsafe_allow_html=True)
192
+ st.markdown("<h3>Detected objects in current Frame</h3>", unsafe_allow_html=True)
193
+ st.dataframe(df_fq)
194
+ # print("πŸš€ ~ df_fq:", df_fq)
195
+
196
+ list_of_tuples = [(row.Number, row.Index) for row in df_fq.itertuples()]
197
+
198
+ print("πŸš€ ~ list_of_tuples:", list_of_tuples)
199
+
200
+ # speak(f'This is what I have found {list_of_tuples}')
201
+
202
+ # Webcam
203
+ if options == 'Webcam':
204
+ cam_options = st.sidebar.selectbox('Select Webcam Channel', ('0', '1', '2', '3'))
205
+
206
+ if not cam_options == 'Select Channel':
207
+ pred = st.checkbox(f'Predict Using {model_type}')
208
+ cap = cv2.VideoCapture(int(cam_options))
209
+
210
+ while True:
211
+ success, img = cap.read()
212
+ if not success:
213
+ st.error(f"Webcam NOT working\nCheck Webcam settings!", icon="🚨")
214
+ break
215
+
216
+ if pred:
217
+ def predict(model, img, classes=[], conf=confidence):
218
+ if classes:
219
+ results = model.predict(img, classes=classes, conf=confidence)
220
+ else:
221
+ results = model.predict(img, conf=conf)
222
+ return results
223
+
224
+ def predict_and_detect(model, img, classes=[], conf=confidence, rectangle_thickness=draw_thick, text_scale=draw_thick, text_thickness=draw_thick):
225
+ results = predict(model, img, classes, conf=conf)
226
+
227
+ # Initialize a Counter to keep track of class occurrences
228
+ class_counts = Counter()
229
+
230
+ for result in results:
231
+ for box in result.boxes:
232
+ # Update the counter with the class name
233
+ class_name = result.names[int(box.cls[0])]
234
+ class_counts[class_name] += 1
235
+
236
+ # Draw the bounding box and label with a random color
237
+ color = tuple(np.random.randint(0, 255, size=3).tolist())
238
+ cv2.rectangle(img, (int(box.xyxy[0][0]), int(box.xyxy[0][1])),
239
+ (int(box.xyxy[0][2]), int(box.xyxy[0][3])), color, rectangle_thickness)
240
+ cv2.putText(img, f"{class_name}",
241
+ (int(box.xyxy[0][0]), int(box.xyxy[0][1]) - 10),
242
+ cv2.FONT_HERSHEY_PLAIN, text_scale, color, text_thickness)
243
+
244
+ # Convert the Counter to a DataFrame for easy viewing
245
+ df_fq = pd.DataFrame.from_dict(class_counts, orient='index', columns=['Number'])
246
+ df_fq.index.name = 'Class'
247
+
248
+ return img, df_fq
249
+
250
+ img, df_fq = predict_and_detect(model, img, classes=[], conf=confidence)
251
+ FRAME_WINDOW.image(img, channels='BGR')
252
+
253
+ # Updating Inference results
254
+ with st.container():
255
+ st.markdown("<h2>Inference Statistics</h2>", unsafe_allow_html=True)
256
+ st.markdown("<h3>Detected objects in current Frame</h3>", unsafe_allow_html=True)
257
+ st.dataframe(df_fq)
258
+ # print("πŸš€ ~ df_fq:", df_fq)
259
+
260
+ list_of_tuples = [(row.Number, row.Index) for row in df_fq.itertuples()]
261
+
262
+ print("πŸš€ ~ list_of_tuples:", list_of_tuples)
263
+
264
+ # speak(f'This is what I have found {list_of_tuples}')
265
+
266
+