kasper-boy commited on
Commit
34630ca
1 Parent(s): 4501c46

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +235 -0
app.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+
3
+ import gradio as gr
4
+ import numpy as np
5
+ import supervision as sv
6
+ from inference import get_model
7
+
8
+ MARKDOWN = """
9
+ <h1 style='text-align: center'>Evolving-YOLO-V8-V9-V10</h1>
10
+ Welcome to Evolving-YOLO-V8-V9-V10! This demo showcases the performance of various YOLO models
11
+ pre-trained on the COCO dataset.
12
+ - **YOLOv8**
13
+ <div style="display: flex; align-items: center;">
14
+ <a href="https://github.com/ultralytics/ultralytics" style="margin-right: 10px;">
15
+ <img src="https://badges.aleen42.com/src/github.svg">
16
+ </a>
17
+ <a href="https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov8-object-detection-on-custom-dataset.ipynb" style="margin-right: 10px;">
18
+ <img src="https://colab.research.google.com/assets/colab-badge.svg">
19
+ </a>
20
+ </div>
21
+ - **YOLOv9**
22
+ <div style="display: flex; align-items: center;">
23
+ <a href="https://github.com/WongKinYiu/yolov9" style="margin-right: 10px;">
24
+ <img src="https://badges.aleen42.com/src/github.svg">
25
+ </a>
26
+ <a href="https://arxiv.org/abs/2402.13616" style="margin-right: 10px;">
27
+ <img src="https://img.shields.io/badge/arXiv-2402.13616-b31b1b.svg">
28
+ </a>
29
+ <a href="https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov9-object-detection-on-custom-dataset.ipynb" style="margin-right: 10px;">
30
+ <img src="https://colab.research.google.com/assets/colab-badge.svg">
31
+ </a>
32
+ </div>
33
+ - **YOLOv10**
34
+ <div style="display: flex; align-items: center;">
35
+ <a href="https://github.com/THU-MIG/yolov10" style="margin-right: 10px;">
36
+ <img src="https://badges.aleen42.com/src/github.svg">
37
+ </a>
38
+ <a href="https://arxiv.org/abs/2405.14458" style="margin-right: 10px;">
39
+ <img src="https://img.shields.io/badge/arXiv-2405.14458-b31b1b.svg">
40
+ </a>
41
+ <a href="https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov10-object-detection-on-custom-dataset.ipynb" style="margin-right: 10px;">
42
+ <img src="https://colab.research.google.com/assets/colab-badge.svg">
43
+ </a>
44
+ </div>
45
+ """
46
+
47
+ IMAGE_EXAMPLES = [
48
+ ['https://media.roboflow.com/supervision/image-examples/people-walking.png', 0.3, 0.3, 0.1],
49
+ ['https://media.roboflow.com/supervision/image-examples/vehicles.png', 0.3, 0.3, 0.1],
50
+ ['https://media.roboflow.com/supervision/image-examples/basketball-1.png', 0.3, 0.3, 0.1],
51
+ ]
52
+
53
+ YOLO_V8_MODEL = get_model(model_id="coco/8")
54
+ YOLO_V9_MODEL = get_model(model_id="coco/17")
55
+ YOLO_V10_MODEL = get_model(model_id="coco/22")
56
+
57
+ LABEL_ANNOTATORS = sv.LabelAnnotator(text_color=sv.Color.black())
58
+ BOUNDING_BOX_ANNOTATORS = sv.BoundingBoxAnnotator()
59
+
60
+
61
+ def detect_and_annotate(
62
+ model,
63
+ input_image: np.ndarray,
64
+ confidence_threshold: float,
65
+ iou_threshold: float,
66
+ class_id_mapping: dict = None
67
+ ) -> np.ndarray:
68
+ result = model.infer(
69
+ input_image,
70
+ confidence=confidence_threshold,
71
+ iou_threshold=iou_threshold
72
+ )[0]
73
+ detections = sv.Detections.from_inference(result)
74
+
75
+ if class_id_mapping:
76
+ detections.class_id = np.array([
77
+ class_id_mapping[class_id]
78
+ for class_id
79
+ in detections.class_id
80
+ ])
81
+
82
+ labels = [
83
+ f"{class_name} ({confidence:.2f})"
84
+ for class_name, confidence
85
+ in zip(detections['class_name'], detections.confidence)
86
+ ]
87
+
88
+ annotated_image = input_image.copy()
89
+ annotated_image = BOUNDING_BOX_ANNOTATORS.annotate(
90
+ scene=annotated_image, detections=detections)
91
+ annotated_image = LABEL_ANNOTATORS.annotate(
92
+ scene=annotated_image, detections=detections, labels=labels)
93
+ return annotated_image
94
+
95
+
96
+ def process_image(
97
+ input_image: np.ndarray,
98
+ yolo_v8_confidence_threshold: float,
99
+ yolo_v9_confidence_threshold: float,
100
+ yolo_v10_confidence_threshold: float,
101
+ iou_threshold: float
102
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
103
+ yolo_v8_annotated_image = detect_and_annotate(
104
+ YOLO_V8_MODEL, input_image, yolo_v8_confidence_threshold, iou_threshold)
105
+ yolo_v9_annotated_image = detect_and_annotate(
106
+ YOLO_V9_MODEL, input_image, yolo_v9_confidence_threshold, iou_threshold)
107
+ yolo_10_annotated_image = detect_and_annotate(
108
+ YOLO_V10_MODEL, input_image, yolo_v10_confidence_threshold, iou_threshold)
109
+
110
+ return (
111
+ yolo_v8_annotated_image,
112
+ yolo_v9_annotated_image,
113
+ yolo_10_annotated_image
114
+ )
115
+
116
+
117
+ yolo_v8_confidence_threshold_component = gr.Slider(
118
+ minimum=0,
119
+ maximum=1.0,
120
+ value=0.3,
121
+ step=0.01,
122
+ label="YOLOv8 Confidence Threshold",
123
+ info=(
124
+ "The confidence threshold for the YOLO model. Lower the threshold to "
125
+ "reduce false negatives, enhancing the model's sensitivity to detect "
126
+ "sought-after objects. Conversely, increase the threshold to minimize false "
127
+ "positives, preventing the model from identifying objects it shouldn't."
128
+ ))
129
+
130
+ yolo_v9_confidence_threshold_component = gr.Slider(
131
+ minimum=0,
132
+ maximum=1.0,
133
+ value=0.3,
134
+ step=0.01,
135
+ label="YOLOv9 Confidence Threshold",
136
+ info=(
137
+ "The confidence threshold for the YOLO model. Lower the threshold to "
138
+ "reduce false negatives, enhancing the model's sensitivity to detect "
139
+ "sought-after objects. Conversely, increase the threshold to minimize false "
140
+ "positives, preventing the model from identifying objects it shouldn't."
141
+ ))
142
+
143
+ yolo_v10_confidence_threshold_component = gr.Slider(
144
+ minimum=0,
145
+ maximum=1.0,
146
+ value=0.3,
147
+ step=0.01,
148
+ label="YOLOv10 Confidence Threshold",
149
+ info=(
150
+ "The confidence threshold for the YOLO model. Lower the threshold to "
151
+ "reduce false negatives, enhancing the model's sensitivity to detect "
152
+ "sought-after objects. Conversely, increase the threshold to minimize false "
153
+ "positives, preventing the model from identifying objects it shouldn't."
154
+ ))
155
+
156
+ iou_threshold_component = gr.Slider(
157
+ minimum=0,
158
+ maximum=1.0,
159
+ value=0.5,
160
+ step=0.01,
161
+ label="IoU Threshold",
162
+ info=(
163
+ "The Intersection over Union (IoU) threshold for non-maximum suppression. "
164
+ "Decrease the value to lessen the occurrence of overlapping bounding boxes, "
165
+ "making the detection process stricter. On the other hand, increase the value "
166
+ "to allow more overlapping bounding boxes, accommodating a broader range of "
167
+ "detections."
168
+ ))
169
+
170
+
171
+ with gr.Blocks() as demo:
172
+ gr.Markdown(MARKDOWN)
173
+ with gr.Accordion("Configuration", open=False):
174
+ with gr.Row():
175
+ yolo_v8_confidence_threshold_component.render()
176
+ yolo_v9_confidence_threshold_component.render()
177
+ yolo_v10_confidence_threshold_component.render()
178
+ iou_threshold_component.render()
179
+ with gr.Row():
180
+ input_image_component = gr.Image(
181
+ type='pil',
182
+ label='Input'
183
+ )
184
+ yolo_v8_output_image_component = gr.Image(
185
+ type='pil',
186
+ label='YOLOv8'
187
+ )
188
+ with gr.Row():
189
+ yolo_v9_output_image_component = gr.Image(
190
+ type='pil',
191
+ label='YOLOv9'
192
+ )
193
+ yolo_v10_output_image_component = gr.Image(
194
+ type='pil',
195
+ label='YOLOv10'
196
+ )
197
+ submit_button_component = gr.Button(
198
+ value='Submit',
199
+ scale=1,
200
+ variant='primary'
201
+ )
202
+ gr.Examples(
203
+ fn=process_image,
204
+ examples=IMAGE_EXAMPLES,
205
+ inputs=[
206
+ input_image_component,
207
+ yolo_v8_confidence_threshold_component,
208
+ yolo_v9_confidence_threshold_component,
209
+ yolo_v10_confidence_threshold_component,
210
+ iou_threshold_component
211
+ ],
212
+ outputs=[
213
+ yolo_v8_output_image_component,
214
+ yolo_v9_output_image_component,
215
+ yolo_v10_output_image_component
216
+ ]
217
+ )
218
+
219
+ submit_button_component.click(
220
+ fn=process_image,
221
+ inputs=[
222
+ input_image_component,
223
+ yolo_v8_confidence_threshold_component,
224
+ yolo_v9_confidence_threshold_component,
225
+ yolo_v10_confidence_threshold_component,
226
+ iou_threshold_component
227
+ ],
228
+ outputs=[
229
+ yolo_v8_output_image_component,
230
+ yolo_v9_output_image_component,
231
+ yolo_v10_output_image_component
232
+ ]
233
+ )
234
+
235
+ demo.launch(debug=False, show_error=True, max_threads=1)