Spaces:
Runtime error
Runtime error
nyonyong
commited on
Commit
β’
30c8b41
1
Parent(s):
0ed6548
First commit
Browse filesThis view is limited to 50 files because it contains too many changes. Β
See raw diff
- TextDetection/.dockerignore +222 -0
- TextDetection/LICENSE +661 -0
- TextDetection/__pycache__/export.cpython-310.pyc +0 -0
- TextDetection/__pycache__/export.cpython-39.pyc +0 -0
- TextDetection/benchmarks.py +174 -0
- TextDetection/classify/predict.py +226 -0
- TextDetection/classify/train.py +333 -0
- TextDetection/classify/tutorial.ipynb +0 -0
- TextDetection/classify/val.py +170 -0
- TextDetection/detect.py +271 -0
- TextDetection/export.py +863 -0
- TextDetection/hubconf.py +169 -0
- TextDetection/requirements.txt +49 -0
- TextDetection/runs/wordDetection/F1_curve.png +0 -0
- TextDetection/runs/wordDetection/PR_curve.png +0 -0
- TextDetection/runs/wordDetection/P_curve.png +0 -0
- TextDetection/runs/wordDetection/R_curve.png +0 -0
- TextDetection/runs/wordDetection/confusion_matrix.png +0 -0
- TextDetection/runs/wordDetection/hyp.yaml +28 -0
- TextDetection/runs/wordDetection/labels.jpg +0 -0
- TextDetection/runs/wordDetection/labels_correlogram.jpg +0 -0
- TextDetection/runs/wordDetection/opt.yaml +68 -0
- TextDetection/runs/wordDetection/results.csv +101 -0
- TextDetection/runs/wordDetection/results.png +0 -0
- TextDetection/runs/wordDetection/weights/best.pt +3 -0
- TextDetection/runs/wordDetection/weights/last.pt +3 -0
- TextDetection/segment/predict.py +284 -0
- TextDetection/segment/train.py +666 -0
- TextDetection/segment/tutorial.ipynb +595 -0
- TextDetection/segment/val.py +473 -0
- TextDetection/setup.cfg +56 -0
- TextDetection/utils/__init__.py +86 -0
- TextDetection/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- TextDetection/utils/__pycache__/__init__.cpython-39.pyc +0 -0
- TextDetection/utils/__pycache__/augmentations.cpython-310.pyc +0 -0
- TextDetection/utils/__pycache__/augmentations.cpython-39.pyc +0 -0
- TextDetection/utils/__pycache__/autoanchor.cpython-310.pyc +0 -0
- TextDetection/utils/__pycache__/autoanchor.cpython-39.pyc +0 -0
- TextDetection/utils/__pycache__/dataloaders.cpython-310.pyc +0 -0
- TextDetection/utils/__pycache__/dataloaders.cpython-39.pyc +0 -0
- TextDetection/utils/__pycache__/downloads.cpython-310.pyc +0 -0
- TextDetection/utils/__pycache__/downloads.cpython-39.pyc +0 -0
- TextDetection/utils/__pycache__/general.cpython-310.pyc +0 -0
- TextDetection/utils/__pycache__/general.cpython-39.pyc +0 -0
- TextDetection/utils/__pycache__/metrics.cpython-310.pyc +0 -0
- TextDetection/utils/__pycache__/metrics.cpython-39.pyc +0 -0
- TextDetection/utils/__pycache__/plots.cpython-310.pyc +0 -0
- TextDetection/utils/__pycache__/plots.cpython-39.pyc +0 -0
- TextDetection/utils/__pycache__/torch_utils.cpython-310.pyc +0 -0
- TextDetection/utils/__pycache__/torch_utils.cpython-39.pyc +0 -0
TextDetection/.dockerignore
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Repo-specific DockerIgnore -------------------------------------------------------------------------------------------
|
2 |
+
.git
|
3 |
+
.cache
|
4 |
+
.idea
|
5 |
+
runs
|
6 |
+
output
|
7 |
+
coco
|
8 |
+
storage.googleapis.com
|
9 |
+
|
10 |
+
data/samples/*
|
11 |
+
**/results*.csv
|
12 |
+
*.jpg
|
13 |
+
|
14 |
+
# Neural Network weights -----------------------------------------------------------------------------------------------
|
15 |
+
**/*.pt
|
16 |
+
**/*.pth
|
17 |
+
**/*.onnx
|
18 |
+
**/*.engine
|
19 |
+
**/*.mlmodel
|
20 |
+
**/*.torchscript
|
21 |
+
**/*.torchscript.pt
|
22 |
+
**/*.tflite
|
23 |
+
**/*.h5
|
24 |
+
**/*.pb
|
25 |
+
*_saved_model/
|
26 |
+
*_web_model/
|
27 |
+
*_openvino_model/
|
28 |
+
|
29 |
+
# Below Copied From .gitignore -----------------------------------------------------------------------------------------
|
30 |
+
# Below Copied From .gitignore -----------------------------------------------------------------------------------------
|
31 |
+
|
32 |
+
|
33 |
+
# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
|
34 |
+
# Byte-compiled / optimized / DLL files
|
35 |
+
__pycache__/
|
36 |
+
*.py[cod]
|
37 |
+
*$py.class
|
38 |
+
|
39 |
+
# C extensions
|
40 |
+
*.so
|
41 |
+
|
42 |
+
# Distribution / packaging
|
43 |
+
.Python
|
44 |
+
env/
|
45 |
+
build/
|
46 |
+
develop-eggs/
|
47 |
+
dist/
|
48 |
+
downloads/
|
49 |
+
eggs/
|
50 |
+
.eggs/
|
51 |
+
lib/
|
52 |
+
lib64/
|
53 |
+
parts/
|
54 |
+
sdist/
|
55 |
+
var/
|
56 |
+
wheels/
|
57 |
+
*.egg-info/
|
58 |
+
wandb/
|
59 |
+
.installed.cfg
|
60 |
+
*.egg
|
61 |
+
|
62 |
+
# PyInstaller
|
63 |
+
# Usually these files are written by a python script from a template
|
64 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
65 |
+
*.manifest
|
66 |
+
*.spec
|
67 |
+
|
68 |
+
# Installer logs
|
69 |
+
pip-log.txt
|
70 |
+
pip-delete-this-directory.txt
|
71 |
+
|
72 |
+
# Unit test / coverage reports
|
73 |
+
htmlcov/
|
74 |
+
.tox/
|
75 |
+
.coverage
|
76 |
+
.coverage.*
|
77 |
+
.cache
|
78 |
+
nosetests.xml
|
79 |
+
coverage.xml
|
80 |
+
*.cover
|
81 |
+
.hypothesis/
|
82 |
+
|
83 |
+
# Translations
|
84 |
+
*.mo
|
85 |
+
*.pot
|
86 |
+
|
87 |
+
# Django stuff:
|
88 |
+
*.log
|
89 |
+
local_settings.py
|
90 |
+
|
91 |
+
# Flask stuff:
|
92 |
+
instance/
|
93 |
+
.webassets-cache
|
94 |
+
|
95 |
+
# Scrapy stuff:
|
96 |
+
.scrapy
|
97 |
+
|
98 |
+
# Sphinx documentation
|
99 |
+
docs/_build/
|
100 |
+
|
101 |
+
# PyBuilder
|
102 |
+
target/
|
103 |
+
|
104 |
+
# Jupyter Notebook
|
105 |
+
.ipynb_checkpoints
|
106 |
+
|
107 |
+
# pyenv
|
108 |
+
.python-version
|
109 |
+
|
110 |
+
# celery beat schedule file
|
111 |
+
celerybeat-schedule
|
112 |
+
|
113 |
+
# SageMath parsed files
|
114 |
+
*.sage.py
|
115 |
+
|
116 |
+
# dotenv
|
117 |
+
.env
|
118 |
+
|
119 |
+
# virtualenv
|
120 |
+
.venv*
|
121 |
+
venv*/
|
122 |
+
ENV*/
|
123 |
+
|
124 |
+
# Spyder project settings
|
125 |
+
.spyderproject
|
126 |
+
.spyproject
|
127 |
+
|
128 |
+
# Rope project settings
|
129 |
+
.ropeproject
|
130 |
+
|
131 |
+
# mkdocs documentation
|
132 |
+
/site
|
133 |
+
|
134 |
+
# mypy
|
135 |
+
.mypy_cache/
|
136 |
+
|
137 |
+
|
138 |
+
# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
|
139 |
+
|
140 |
+
# General
|
141 |
+
.DS_Store
|
142 |
+
.AppleDouble
|
143 |
+
.LSOverride
|
144 |
+
|
145 |
+
# Icon must end with two \r
|
146 |
+
Icon
|
147 |
+
Icon?
|
148 |
+
|
149 |
+
# Thumbnails
|
150 |
+
._*
|
151 |
+
|
152 |
+
# Files that might appear in the root of a volume
|
153 |
+
.DocumentRevisions-V100
|
154 |
+
.fseventsd
|
155 |
+
.Spotlight-V100
|
156 |
+
.TemporaryItems
|
157 |
+
.Trashes
|
158 |
+
.VolumeIcon.icns
|
159 |
+
.com.apple.timemachine.donotpresent
|
160 |
+
|
161 |
+
# Directories potentially created on remote AFP share
|
162 |
+
.AppleDB
|
163 |
+
.AppleDesktop
|
164 |
+
Network Trash Folder
|
165 |
+
Temporary Items
|
166 |
+
.apdisk
|
167 |
+
|
168 |
+
|
169 |
+
# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
|
170 |
+
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
|
171 |
+
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
172 |
+
|
173 |
+
# User-specific stuff:
|
174 |
+
.idea/*
|
175 |
+
.idea/**/workspace.xml
|
176 |
+
.idea/**/tasks.xml
|
177 |
+
.idea/dictionaries
|
178 |
+
.html # Bokeh Plots
|
179 |
+
.pg # TensorFlow Frozen Graphs
|
180 |
+
.avi # videos
|
181 |
+
|
182 |
+
# Sensitive or high-churn files:
|
183 |
+
.idea/**/dataSources/
|
184 |
+
.idea/**/dataSources.ids
|
185 |
+
.idea/**/dataSources.local.xml
|
186 |
+
.idea/**/sqlDataSources.xml
|
187 |
+
.idea/**/dynamic.xml
|
188 |
+
.idea/**/uiDesigner.xml
|
189 |
+
|
190 |
+
# Gradle:
|
191 |
+
.idea/**/gradle.xml
|
192 |
+
.idea/**/libraries
|
193 |
+
|
194 |
+
# CMake
|
195 |
+
cmake-build-debug/
|
196 |
+
cmake-build-release/
|
197 |
+
|
198 |
+
# Mongo Explorer plugin:
|
199 |
+
.idea/**/mongoSettings.xml
|
200 |
+
|
201 |
+
## File-based project format:
|
202 |
+
*.iws
|
203 |
+
|
204 |
+
## Plugin-specific files:
|
205 |
+
|
206 |
+
# IntelliJ
|
207 |
+
out/
|
208 |
+
|
209 |
+
# mpeltonen/sbt-idea plugin
|
210 |
+
.idea_modules/
|
211 |
+
|
212 |
+
# JIRA plugin
|
213 |
+
atlassian-ide-plugin.xml
|
214 |
+
|
215 |
+
# Cursive Clojure plugin
|
216 |
+
.idea/replstate.xml
|
217 |
+
|
218 |
+
# Crashlytics plugin (for Android Studio and IntelliJ)
|
219 |
+
com_crashlytics_export_strings.xml
|
220 |
+
crashlytics.properties
|
221 |
+
crashlytics-build.properties
|
222 |
+
fabric.properties
|
TextDetection/LICENSE
ADDED
@@ -0,0 +1,661 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GNU AFFERO GENERAL PUBLIC LICENSE
|
2 |
+
Version 3, 19 November 2007
|
3 |
+
|
4 |
+
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
5 |
+
Everyone is permitted to copy and distribute verbatim copies
|
6 |
+
of this license document, but changing it is not allowed.
|
7 |
+
|
8 |
+
Preamble
|
9 |
+
|
10 |
+
The GNU Affero General Public License is a free, copyleft license for
|
11 |
+
software and other kinds of works, specifically designed to ensure
|
12 |
+
cooperation with the community in the case of network server software.
|
13 |
+
|
14 |
+
The licenses for most software and other practical works are designed
|
15 |
+
to take away your freedom to share and change the works. By contrast,
|
16 |
+
our General Public Licenses are intended to guarantee your freedom to
|
17 |
+
share and change all versions of a program--to make sure it remains free
|
18 |
+
software for all its users.
|
19 |
+
|
20 |
+
When we speak of free software, we are referring to freedom, not
|
21 |
+
price. Our General Public Licenses are designed to make sure that you
|
22 |
+
have the freedom to distribute copies of free software (and charge for
|
23 |
+
them if you wish), that you receive source code or can get it if you
|
24 |
+
want it, that you can change the software or use pieces of it in new
|
25 |
+
free programs, and that you know you can do these things.
|
26 |
+
|
27 |
+
Developers that use our General Public Licenses protect your rights
|
28 |
+
with two steps: (1) assert copyright on the software, and (2) offer
|
29 |
+
you this License which gives you legal permission to copy, distribute
|
30 |
+
and/or modify the software.
|
31 |
+
|
32 |
+
A secondary benefit of defending all users' freedom is that
|
33 |
+
improvements made in alternate versions of the program, if they
|
34 |
+
receive widespread use, become available for other developers to
|
35 |
+
incorporate. Many developers of free software are heartened and
|
36 |
+
encouraged by the resulting cooperation. However, in the case of
|
37 |
+
software used on network servers, this result may fail to come about.
|
38 |
+
The GNU General Public License permits making a modified version and
|
39 |
+
letting the public access it on a server without ever releasing its
|
40 |
+
source code to the public.
|
41 |
+
|
42 |
+
The GNU Affero General Public License is designed specifically to
|
43 |
+
ensure that, in such cases, the modified source code becomes available
|
44 |
+
to the community. It requires the operator of a network server to
|
45 |
+
provide the source code of the modified version running there to the
|
46 |
+
users of that server. Therefore, public use of a modified version, on
|
47 |
+
a publicly accessible server, gives the public access to the source
|
48 |
+
code of the modified version.
|
49 |
+
|
50 |
+
An older license, called the Affero General Public License and
|
51 |
+
published by Affero, was designed to accomplish similar goals. This is
|
52 |
+
a different license, not a version of the Affero GPL, but Affero has
|
53 |
+
released a new version of the Affero GPL which permits relicensing under
|
54 |
+
this license.
|
55 |
+
|
56 |
+
The precise terms and conditions for copying, distribution and
|
57 |
+
modification follow.
|
58 |
+
|
59 |
+
TERMS AND CONDITIONS
|
60 |
+
|
61 |
+
0. Definitions.
|
62 |
+
|
63 |
+
"This License" refers to version 3 of the GNU Affero General Public License.
|
64 |
+
|
65 |
+
"Copyright" also means copyright-like laws that apply to other kinds of
|
66 |
+
works, such as semiconductor masks.
|
67 |
+
|
68 |
+
"The Program" refers to any copyrightable work licensed under this
|
69 |
+
License. Each licensee is addressed as "you". "Licensees" and
|
70 |
+
"recipients" may be individuals or organizations.
|
71 |
+
|
72 |
+
To "modify" a work means to copy from or adapt all or part of the work
|
73 |
+
in a fashion requiring copyright permission, other than the making of an
|
74 |
+
exact copy. The resulting work is called a "modified version" of the
|
75 |
+
earlier work or a work "based on" the earlier work.
|
76 |
+
|
77 |
+
A "covered work" means either the unmodified Program or a work based
|
78 |
+
on the Program.
|
79 |
+
|
80 |
+
To "propagate" a work means to do anything with it that, without
|
81 |
+
permission, would make you directly or secondarily liable for
|
82 |
+
infringement under applicable copyright law, except executing it on a
|
83 |
+
computer or modifying a private copy. Propagation includes copying,
|
84 |
+
distribution (with or without modification), making available to the
|
85 |
+
public, and in some countries other activities as well.
|
86 |
+
|
87 |
+
To "convey" a work means any kind of propagation that enables other
|
88 |
+
parties to make or receive copies. Mere interaction with a user through
|
89 |
+
a computer network, with no transfer of a copy, is not conveying.
|
90 |
+
|
91 |
+
An interactive user interface displays "Appropriate Legal Notices"
|
92 |
+
to the extent that it includes a convenient and prominently visible
|
93 |
+
feature that (1) displays an appropriate copyright notice, and (2)
|
94 |
+
tells the user that there is no warranty for the work (except to the
|
95 |
+
extent that warranties are provided), that licensees may convey the
|
96 |
+
work under this License, and how to view a copy of this License. If
|
97 |
+
the interface presents a list of user commands or options, such as a
|
98 |
+
menu, a prominent item in the list meets this criterion.
|
99 |
+
|
100 |
+
1. Source Code.
|
101 |
+
|
102 |
+
The "source code" for a work means the preferred form of the work
|
103 |
+
for making modifications to it. "Object code" means any non-source
|
104 |
+
form of a work.
|
105 |
+
|
106 |
+
A "Standard Interface" means an interface that either is an official
|
107 |
+
standard defined by a recognized standards body, or, in the case of
|
108 |
+
interfaces specified for a particular programming language, one that
|
109 |
+
is widely used among developers working in that language.
|
110 |
+
|
111 |
+
The "System Libraries" of an executable work include anything, other
|
112 |
+
than the work as a whole, that (a) is included in the normal form of
|
113 |
+
packaging a Major Component, but which is not part of that Major
|
114 |
+
Component, and (b) serves only to enable use of the work with that
|
115 |
+
Major Component, or to implement a Standard Interface for which an
|
116 |
+
implementation is available to the public in source code form. A
|
117 |
+
"Major Component", in this context, means a major essential component
|
118 |
+
(kernel, window system, and so on) of the specific operating system
|
119 |
+
(if any) on which the executable work runs, or a compiler used to
|
120 |
+
produce the work, or an object code interpreter used to run it.
|
121 |
+
|
122 |
+
The "Corresponding Source" for a work in object code form means all
|
123 |
+
the source code needed to generate, install, and (for an executable
|
124 |
+
work) run the object code and to modify the work, including scripts to
|
125 |
+
control those activities. However, it does not include the work's
|
126 |
+
System Libraries, or general-purpose tools or generally available free
|
127 |
+
programs which are used unmodified in performing those activities but
|
128 |
+
which are not part of the work. For example, Corresponding Source
|
129 |
+
includes interface definition files associated with source files for
|
130 |
+
the work, and the source code for shared libraries and dynamically
|
131 |
+
linked subprograms that the work is specifically designed to require,
|
132 |
+
such as by intimate data communication or control flow between those
|
133 |
+
subprograms and other parts of the work.
|
134 |
+
|
135 |
+
The Corresponding Source need not include anything that users
|
136 |
+
can regenerate automatically from other parts of the Corresponding
|
137 |
+
Source.
|
138 |
+
|
139 |
+
The Corresponding Source for a work in source code form is that
|
140 |
+
same work.
|
141 |
+
|
142 |
+
2. Basic Permissions.
|
143 |
+
|
144 |
+
All rights granted under this License are granted for the term of
|
145 |
+
copyright on the Program, and are irrevocable provided the stated
|
146 |
+
conditions are met. This License explicitly affirms your unlimited
|
147 |
+
permission to run the unmodified Program. The output from running a
|
148 |
+
covered work is covered by this License only if the output, given its
|
149 |
+
content, constitutes a covered work. This License acknowledges your
|
150 |
+
rights of fair use or other equivalent, as provided by copyright law.
|
151 |
+
|
152 |
+
You may make, run and propagate covered works that you do not
|
153 |
+
convey, without conditions so long as your license otherwise remains
|
154 |
+
in force. You may convey covered works to others for the sole purpose
|
155 |
+
of having them make modifications exclusively for you, or provide you
|
156 |
+
with facilities for running those works, provided that you comply with
|
157 |
+
the terms of this License in conveying all material for which you do
|
158 |
+
not control copyright. Those thus making or running the covered works
|
159 |
+
for you must do so exclusively on your behalf, under your direction
|
160 |
+
and control, on terms that prohibit them from making any copies of
|
161 |
+
your copyrighted material outside their relationship with you.
|
162 |
+
|
163 |
+
Conveying under any other circumstances is permitted solely under
|
164 |
+
the conditions stated below. Sublicensing is not allowed; section 10
|
165 |
+
makes it unnecessary.
|
166 |
+
|
167 |
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
168 |
+
|
169 |
+
No covered work shall be deemed part of an effective technological
|
170 |
+
measure under any applicable law fulfilling obligations under article
|
171 |
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
172 |
+
similar laws prohibiting or restricting circumvention of such
|
173 |
+
measures.
|
174 |
+
|
175 |
+
When you convey a covered work, you waive any legal power to forbid
|
176 |
+
circumvention of technological measures to the extent such circumvention
|
177 |
+
is effected by exercising rights under this License with respect to
|
178 |
+
the covered work, and you disclaim any intention to limit operation or
|
179 |
+
modification of the work as a means of enforcing, against the work's
|
180 |
+
users, your or third parties' legal rights to forbid circumvention of
|
181 |
+
technological measures.
|
182 |
+
|
183 |
+
4. Conveying Verbatim Copies.
|
184 |
+
|
185 |
+
You may convey verbatim copies of the Program's source code as you
|
186 |
+
receive it, in any medium, provided that you conspicuously and
|
187 |
+
appropriately publish on each copy an appropriate copyright notice;
|
188 |
+
keep intact all notices stating that this License and any
|
189 |
+
non-permissive terms added in accord with section 7 apply to the code;
|
190 |
+
keep intact all notices of the absence of any warranty; and give all
|
191 |
+
recipients a copy of this License along with the Program.
|
192 |
+
|
193 |
+
You may charge any price or no price for each copy that you convey,
|
194 |
+
and you may offer support or warranty protection for a fee.
|
195 |
+
|
196 |
+
5. Conveying Modified Source Versions.
|
197 |
+
|
198 |
+
You may convey a work based on the Program, or the modifications to
|
199 |
+
produce it from the Program, in the form of source code under the
|
200 |
+
terms of section 4, provided that you also meet all of these conditions:
|
201 |
+
|
202 |
+
a) The work must carry prominent notices stating that you modified
|
203 |
+
it, and giving a relevant date.
|
204 |
+
|
205 |
+
b) The work must carry prominent notices stating that it is
|
206 |
+
released under this License and any conditions added under section
|
207 |
+
7. This requirement modifies the requirement in section 4 to
|
208 |
+
"keep intact all notices".
|
209 |
+
|
210 |
+
c) You must license the entire work, as a whole, under this
|
211 |
+
License to anyone who comes into possession of a copy. This
|
212 |
+
License will therefore apply, along with any applicable section 7
|
213 |
+
additional terms, to the whole of the work, and all its parts,
|
214 |
+
regardless of how they are packaged. This License gives no
|
215 |
+
permission to license the work in any other way, but it does not
|
216 |
+
invalidate such permission if you have separately received it.
|
217 |
+
|
218 |
+
d) If the work has interactive user interfaces, each must display
|
219 |
+
Appropriate Legal Notices; however, if the Program has interactive
|
220 |
+
interfaces that do not display Appropriate Legal Notices, your
|
221 |
+
work need not make them do so.
|
222 |
+
|
223 |
+
A compilation of a covered work with other separate and independent
|
224 |
+
works, which are not by their nature extensions of the covered work,
|
225 |
+
and which are not combined with it such as to form a larger program,
|
226 |
+
in or on a volume of a storage or distribution medium, is called an
|
227 |
+
"aggregate" if the compilation and its resulting copyright are not
|
228 |
+
used to limit the access or legal rights of the compilation's users
|
229 |
+
beyond what the individual works permit. Inclusion of a covered work
|
230 |
+
in an aggregate does not cause this License to apply to the other
|
231 |
+
parts of the aggregate.
|
232 |
+
|
233 |
+
6. Conveying Non-Source Forms.
|
234 |
+
|
235 |
+
You may convey a covered work in object code form under the terms
|
236 |
+
of sections 4 and 5, provided that you also convey the
|
237 |
+
machine-readable Corresponding Source under the terms of this License,
|
238 |
+
in one of these ways:
|
239 |
+
|
240 |
+
a) Convey the object code in, or embodied in, a physical product
|
241 |
+
(including a physical distribution medium), accompanied by the
|
242 |
+
Corresponding Source fixed on a durable physical medium
|
243 |
+
customarily used for software interchange.
|
244 |
+
|
245 |
+
b) Convey the object code in, or embodied in, a physical product
|
246 |
+
(including a physical distribution medium), accompanied by a
|
247 |
+
written offer, valid for at least three years and valid for as
|
248 |
+
long as you offer spare parts or customer support for that product
|
249 |
+
model, to give anyone who possesses the object code either (1) a
|
250 |
+
copy of the Corresponding Source for all the software in the
|
251 |
+
product that is covered by this License, on a durable physical
|
252 |
+
medium customarily used for software interchange, for a price no
|
253 |
+
more than your reasonable cost of physically performing this
|
254 |
+
conveying of source, or (2) access to copy the
|
255 |
+
Corresponding Source from a network server at no charge.
|
256 |
+
|
257 |
+
c) Convey individual copies of the object code with a copy of the
|
258 |
+
written offer to provide the Corresponding Source. This
|
259 |
+
alternative is allowed only occasionally and noncommercially, and
|
260 |
+
only if you received the object code with such an offer, in accord
|
261 |
+
with subsection 6b.
|
262 |
+
|
263 |
+
d) Convey the object code by offering access from a designated
|
264 |
+
place (gratis or for a charge), and offer equivalent access to the
|
265 |
+
Corresponding Source in the same way through the same place at no
|
266 |
+
further charge. You need not require recipients to copy the
|
267 |
+
Corresponding Source along with the object code. If the place to
|
268 |
+
copy the object code is a network server, the Corresponding Source
|
269 |
+
may be on a different server (operated by you or a third party)
|
270 |
+
that supports equivalent copying facilities, provided you maintain
|
271 |
+
clear directions next to the object code saying where to find the
|
272 |
+
Corresponding Source. Regardless of what server hosts the
|
273 |
+
Corresponding Source, you remain obligated to ensure that it is
|
274 |
+
available for as long as needed to satisfy these requirements.
|
275 |
+
|
276 |
+
e) Convey the object code using peer-to-peer transmission, provided
|
277 |
+
you inform other peers where the object code and Corresponding
|
278 |
+
Source of the work are being offered to the general public at no
|
279 |
+
charge under subsection 6d.
|
280 |
+
|
281 |
+
A separable portion of the object code, whose source code is excluded
|
282 |
+
from the Corresponding Source as a System Library, need not be
|
283 |
+
included in conveying the object code work.
|
284 |
+
|
285 |
+
A "User Product" is either (1) a "consumer product", which means any
|
286 |
+
tangible personal property which is normally used for personal, family,
|
287 |
+
or household purposes, or (2) anything designed or sold for incorporation
|
288 |
+
into a dwelling. In determining whether a product is a consumer product,
|
289 |
+
doubtful cases shall be resolved in favor of coverage. For a particular
|
290 |
+
product received by a particular user, "normally used" refers to a
|
291 |
+
typical or common use of that class of product, regardless of the status
|
292 |
+
of the particular user or of the way in which the particular user
|
293 |
+
actually uses, or expects or is expected to use, the product. A product
|
294 |
+
is a consumer product regardless of whether the product has substantial
|
295 |
+
commercial, industrial or non-consumer uses, unless such uses represent
|
296 |
+
the only significant mode of use of the product.
|
297 |
+
|
298 |
+
"Installation Information" for a User Product means any methods,
|
299 |
+
procedures, authorization keys, or other information required to install
|
300 |
+
and execute modified versions of a covered work in that User Product from
|
301 |
+
a modified version of its Corresponding Source. The information must
|
302 |
+
suffice to ensure that the continued functioning of the modified object
|
303 |
+
code is in no case prevented or interfered with solely because
|
304 |
+
modification has been made.
|
305 |
+
|
306 |
+
If you convey an object code work under this section in, or with, or
|
307 |
+
specifically for use in, a User Product, and the conveying occurs as
|
308 |
+
part of a transaction in which the right of possession and use of the
|
309 |
+
User Product is transferred to the recipient in perpetuity or for a
|
310 |
+
fixed term (regardless of how the transaction is characterized), the
|
311 |
+
Corresponding Source conveyed under this section must be accompanied
|
312 |
+
by the Installation Information. But this requirement does not apply
|
313 |
+
if neither you nor any third party retains the ability to install
|
314 |
+
modified object code on the User Product (for example, the work has
|
315 |
+
been installed in ROM).
|
316 |
+
|
317 |
+
The requirement to provide Installation Information does not include a
|
318 |
+
requirement to continue to provide support service, warranty, or updates
|
319 |
+
for a work that has been modified or installed by the recipient, or for
|
320 |
+
the User Product in which it has been modified or installed. Access to a
|
321 |
+
network may be denied when the modification itself materially and
|
322 |
+
adversely affects the operation of the network or violates the rules and
|
323 |
+
protocols for communication across the network.
|
324 |
+
|
325 |
+
Corresponding Source conveyed, and Installation Information provided,
|
326 |
+
in accord with this section must be in a format that is publicly
|
327 |
+
documented (and with an implementation available to the public in
|
328 |
+
source code form), and must require no special password or key for
|
329 |
+
unpacking, reading or copying.
|
330 |
+
|
331 |
+
7. Additional Terms.
|
332 |
+
|
333 |
+
"Additional permissions" are terms that supplement the terms of this
|
334 |
+
License by making exceptions from one or more of its conditions.
|
335 |
+
Additional permissions that are applicable to the entire Program shall
|
336 |
+
be treated as though they were included in this License, to the extent
|
337 |
+
that they are valid under applicable law. If additional permissions
|
338 |
+
apply only to part of the Program, that part may be used separately
|
339 |
+
under those permissions, but the entire Program remains governed by
|
340 |
+
this License without regard to the additional permissions.
|
341 |
+
|
342 |
+
When you convey a copy of a covered work, you may at your option
|
343 |
+
remove any additional permissions from that copy, or from any part of
|
344 |
+
it. (Additional permissions may be written to require their own
|
345 |
+
removal in certain cases when you modify the work.) You may place
|
346 |
+
additional permissions on material, added by you to a covered work,
|
347 |
+
for which you have or can give appropriate copyright permission.
|
348 |
+
|
349 |
+
Notwithstanding any other provision of this License, for material you
|
350 |
+
add to a covered work, you may (if authorized by the copyright holders of
|
351 |
+
that material) supplement the terms of this License with terms:
|
352 |
+
|
353 |
+
a) Disclaiming warranty or limiting liability differently from the
|
354 |
+
terms of sections 15 and 16 of this License; or
|
355 |
+
|
356 |
+
b) Requiring preservation of specified reasonable legal notices or
|
357 |
+
author attributions in that material or in the Appropriate Legal
|
358 |
+
Notices displayed by works containing it; or
|
359 |
+
|
360 |
+
c) Prohibiting misrepresentation of the origin of that material, or
|
361 |
+
requiring that modified versions of such material be marked in
|
362 |
+
reasonable ways as different from the original version; or
|
363 |
+
|
364 |
+
d) Limiting the use for publicity purposes of names of licensors or
|
365 |
+
authors of the material; or
|
366 |
+
|
367 |
+
e) Declining to grant rights under trademark law for use of some
|
368 |
+
trade names, trademarks, or service marks; or
|
369 |
+
|
370 |
+
f) Requiring indemnification of licensors and authors of that
|
371 |
+
material by anyone who conveys the material (or modified versions of
|
372 |
+
it) with contractual assumptions of liability to the recipient, for
|
373 |
+
any liability that these contractual assumptions directly impose on
|
374 |
+
those licensors and authors.
|
375 |
+
|
376 |
+
All other non-permissive additional terms are considered "further
|
377 |
+
restrictions" within the meaning of section 10. If the Program as you
|
378 |
+
received it, or any part of it, contains a notice stating that it is
|
379 |
+
governed by this License along with a term that is a further
|
380 |
+
restriction, you may remove that term. If a license document contains
|
381 |
+
a further restriction but permits relicensing or conveying under this
|
382 |
+
License, you may add to a covered work material governed by the terms
|
383 |
+
of that license document, provided that the further restriction does
|
384 |
+
not survive such relicensing or conveying.
|
385 |
+
|
386 |
+
If you add terms to a covered work in accord with this section, you
|
387 |
+
must place, in the relevant source files, a statement of the
|
388 |
+
additional terms that apply to those files, or a notice indicating
|
389 |
+
where to find the applicable terms.
|
390 |
+
|
391 |
+
Additional terms, permissive or non-permissive, may be stated in the
|
392 |
+
form of a separately written license, or stated as exceptions;
|
393 |
+
the above requirements apply either way.
|
394 |
+
|
395 |
+
8. Termination.
|
396 |
+
|
397 |
+
You may not propagate or modify a covered work except as expressly
|
398 |
+
provided under this License. Any attempt otherwise to propagate or
|
399 |
+
modify it is void, and will automatically terminate your rights under
|
400 |
+
this License (including any patent licenses granted under the third
|
401 |
+
paragraph of section 11).
|
402 |
+
|
403 |
+
However, if you cease all violation of this License, then your
|
404 |
+
license from a particular copyright holder is reinstated (a)
|
405 |
+
provisionally, unless and until the copyright holder explicitly and
|
406 |
+
finally terminates your license, and (b) permanently, if the copyright
|
407 |
+
holder fails to notify you of the violation by some reasonable means
|
408 |
+
prior to 60 days after the cessation.
|
409 |
+
|
410 |
+
Moreover, your license from a particular copyright holder is
|
411 |
+
reinstated permanently if the copyright holder notifies you of the
|
412 |
+
violation by some reasonable means, this is the first time you have
|
413 |
+
received notice of violation of this License (for any work) from that
|
414 |
+
copyright holder, and you cure the violation prior to 30 days after
|
415 |
+
your receipt of the notice.
|
416 |
+
|
417 |
+
Termination of your rights under this section does not terminate the
|
418 |
+
licenses of parties who have received copies or rights from you under
|
419 |
+
this License. If your rights have been terminated and not permanently
|
420 |
+
reinstated, you do not qualify to receive new licenses for the same
|
421 |
+
material under section 10.
|
422 |
+
|
423 |
+
9. Acceptance Not Required for Having Copies.
|
424 |
+
|
425 |
+
You are not required to accept this License in order to receive or
|
426 |
+
run a copy of the Program. Ancillary propagation of a covered work
|
427 |
+
occurring solely as a consequence of using peer-to-peer transmission
|
428 |
+
to receive a copy likewise does not require acceptance. However,
|
429 |
+
nothing other than this License grants you permission to propagate or
|
430 |
+
modify any covered work. These actions infringe copyright if you do
|
431 |
+
not accept this License. Therefore, by modifying or propagating a
|
432 |
+
covered work, you indicate your acceptance of this License to do so.
|
433 |
+
|
434 |
+
10. Automatic Licensing of Downstream Recipients.
|
435 |
+
|
436 |
+
Each time you convey a covered work, the recipient automatically
|
437 |
+
receives a license from the original licensors, to run, modify and
|
438 |
+
propagate that work, subject to this License. You are not responsible
|
439 |
+
for enforcing compliance by third parties with this License.
|
440 |
+
|
441 |
+
An "entity transaction" is a transaction transferring control of an
|
442 |
+
organization, or substantially all assets of one, or subdividing an
|
443 |
+
organization, or merging organizations. If propagation of a covered
|
444 |
+
work results from an entity transaction, each party to that
|
445 |
+
transaction who receives a copy of the work also receives whatever
|
446 |
+
licenses to the work the party's predecessor in interest had or could
|
447 |
+
give under the previous paragraph, plus a right to possession of the
|
448 |
+
Corresponding Source of the work from the predecessor in interest, if
|
449 |
+
the predecessor has it or can get it with reasonable efforts.
|
450 |
+
|
451 |
+
You may not impose any further restrictions on the exercise of the
|
452 |
+
rights granted or affirmed under this License. For example, you may
|
453 |
+
not impose a license fee, royalty, or other charge for exercise of
|
454 |
+
rights granted under this License, and you may not initiate litigation
|
455 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
456 |
+
any patent claim is infringed by making, using, selling, offering for
|
457 |
+
sale, or importing the Program or any portion of it.
|
458 |
+
|
459 |
+
11. Patents.
|
460 |
+
|
461 |
+
A "contributor" is a copyright holder who authorizes use under this
|
462 |
+
License of the Program or a work on which the Program is based. The
|
463 |
+
work thus licensed is called the contributor's "contributor version".
|
464 |
+
|
465 |
+
A contributor's "essential patent claims" are all patent claims
|
466 |
+
owned or controlled by the contributor, whether already acquired or
|
467 |
+
hereafter acquired, that would be infringed by some manner, permitted
|
468 |
+
by this License, of making, using, or selling its contributor version,
|
469 |
+
but do not include claims that would be infringed only as a
|
470 |
+
consequence of further modification of the contributor version. For
|
471 |
+
purposes of this definition, "control" includes the right to grant
|
472 |
+
patent sublicenses in a manner consistent with the requirements of
|
473 |
+
this License.
|
474 |
+
|
475 |
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
476 |
+
patent license under the contributor's essential patent claims, to
|
477 |
+
make, use, sell, offer for sale, import and otherwise run, modify and
|
478 |
+
propagate the contents of its contributor version.
|
479 |
+
|
480 |
+
In the following three paragraphs, a "patent license" is any express
|
481 |
+
agreement or commitment, however denominated, not to enforce a patent
|
482 |
+
(such as an express permission to practice a patent or covenant not to
|
483 |
+
sue for patent infringement). To "grant" such a patent license to a
|
484 |
+
party means to make such an agreement or commitment not to enforce a
|
485 |
+
patent against the party.
|
486 |
+
|
487 |
+
If you convey a covered work, knowingly relying on a patent license,
|
488 |
+
and the Corresponding Source of the work is not available for anyone
|
489 |
+
to copy, free of charge and under the terms of this License, through a
|
490 |
+
publicly available network server or other readily accessible means,
|
491 |
+
then you must either (1) cause the Corresponding Source to be so
|
492 |
+
available, or (2) arrange to deprive yourself of the benefit of the
|
493 |
+
patent license for this particular work, or (3) arrange, in a manner
|
494 |
+
consistent with the requirements of this License, to extend the patent
|
495 |
+
license to downstream recipients. "Knowingly relying" means you have
|
496 |
+
actual knowledge that, but for the patent license, your conveying the
|
497 |
+
covered work in a country, or your recipient's use of the covered work
|
498 |
+
in a country, would infringe one or more identifiable patents in that
|
499 |
+
country that you have reason to believe are valid.
|
500 |
+
|
501 |
+
If, pursuant to or in connection with a single transaction or
|
502 |
+
arrangement, you convey, or propagate by procuring conveyance of, a
|
503 |
+
covered work, and grant a patent license to some of the parties
|
504 |
+
receiving the covered work authorizing them to use, propagate, modify
|
505 |
+
or convey a specific copy of the covered work, then the patent license
|
506 |
+
you grant is automatically extended to all recipients of the covered
|
507 |
+
work and works based on it.
|
508 |
+
|
509 |
+
A patent license is "discriminatory" if it does not include within
|
510 |
+
the scope of its coverage, prohibits the exercise of, or is
|
511 |
+
conditioned on the non-exercise of one or more of the rights that are
|
512 |
+
specifically granted under this License. You may not convey a covered
|
513 |
+
work if you are a party to an arrangement with a third party that is
|
514 |
+
in the business of distributing software, under which you make payment
|
515 |
+
to the third party based on the extent of your activity of conveying
|
516 |
+
the work, and under which the third party grants, to any of the
|
517 |
+
parties who would receive the covered work from you, a discriminatory
|
518 |
+
patent license (a) in connection with copies of the covered work
|
519 |
+
conveyed by you (or copies made from those copies), or (b) primarily
|
520 |
+
for and in connection with specific products or compilations that
|
521 |
+
contain the covered work, unless you entered into that arrangement,
|
522 |
+
or that patent license was granted, prior to 28 March 2007.
|
523 |
+
|
524 |
+
Nothing in this License shall be construed as excluding or limiting
|
525 |
+
any implied license or other defenses to infringement that may
|
526 |
+
otherwise be available to you under applicable patent law.
|
527 |
+
|
528 |
+
12. No Surrender of Others' Freedom.
|
529 |
+
|
530 |
+
If conditions are imposed on you (whether by court order, agreement or
|
531 |
+
otherwise) that contradict the conditions of this License, they do not
|
532 |
+
excuse you from the conditions of this License. If you cannot convey a
|
533 |
+
covered work so as to satisfy simultaneously your obligations under this
|
534 |
+
License and any other pertinent obligations, then as a consequence you may
|
535 |
+
not convey it at all. For example, if you agree to terms that obligate you
|
536 |
+
to collect a royalty for further conveying from those to whom you convey
|
537 |
+
the Program, the only way you could satisfy both those terms and this
|
538 |
+
License would be to refrain entirely from conveying the Program.
|
539 |
+
|
540 |
+
13. Remote Network Interaction; Use with the GNU General Public License.
|
541 |
+
|
542 |
+
Notwithstanding any other provision of this License, if you modify the
|
543 |
+
Program, your modified version must prominently offer all users
|
544 |
+
interacting with it remotely through a computer network (if your version
|
545 |
+
supports such interaction) an opportunity to receive the Corresponding
|
546 |
+
Source of your version by providing access to the Corresponding Source
|
547 |
+
from a network server at no charge, through some standard or customary
|
548 |
+
means of facilitating copying of software. This Corresponding Source
|
549 |
+
shall include the Corresponding Source for any work covered by version 3
|
550 |
+
of the GNU General Public License that is incorporated pursuant to the
|
551 |
+
following paragraph.
|
552 |
+
|
553 |
+
Notwithstanding any other provision of this License, you have
|
554 |
+
permission to link or combine any covered work with a work licensed
|
555 |
+
under version 3 of the GNU General Public License into a single
|
556 |
+
combined work, and to convey the resulting work. The terms of this
|
557 |
+
License will continue to apply to the part which is the covered work,
|
558 |
+
but the work with which it is combined will remain governed by version
|
559 |
+
3 of the GNU General Public License.
|
560 |
+
|
561 |
+
14. Revised Versions of this License.
|
562 |
+
|
563 |
+
The Free Software Foundation may publish revised and/or new versions of
|
564 |
+
the GNU Affero General Public License from time to time. Such new versions
|
565 |
+
will be similar in spirit to the present version, but may differ in detail to
|
566 |
+
address new problems or concerns.
|
567 |
+
|
568 |
+
Each version is given a distinguishing version number. If the
|
569 |
+
Program specifies that a certain numbered version of the GNU Affero General
|
570 |
+
Public License "or any later version" applies to it, you have the
|
571 |
+
option of following the terms and conditions either of that numbered
|
572 |
+
version or of any later version published by the Free Software
|
573 |
+
Foundation. If the Program does not specify a version number of the
|
574 |
+
GNU Affero General Public License, you may choose any version ever published
|
575 |
+
by the Free Software Foundation.
|
576 |
+
|
577 |
+
If the Program specifies that a proxy can decide which future
|
578 |
+
versions of the GNU Affero General Public License can be used, that proxy's
|
579 |
+
public statement of acceptance of a version permanently authorizes you
|
580 |
+
to choose that version for the Program.
|
581 |
+
|
582 |
+
Later license versions may give you additional or different
|
583 |
+
permissions. However, no additional obligations are imposed on any
|
584 |
+
author or copyright holder as a result of your choosing to follow a
|
585 |
+
later version.
|
586 |
+
|
587 |
+
15. Disclaimer of Warranty.
|
588 |
+
|
589 |
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
590 |
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
591 |
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
592 |
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
593 |
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
594 |
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
595 |
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
596 |
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
597 |
+
|
598 |
+
16. Limitation of Liability.
|
599 |
+
|
600 |
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
601 |
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
602 |
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
603 |
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
604 |
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
605 |
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
606 |
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
607 |
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
608 |
+
SUCH DAMAGES.
|
609 |
+
|
610 |
+
17. Interpretation of Sections 15 and 16.
|
611 |
+
|
612 |
+
If the disclaimer of warranty and limitation of liability provided
|
613 |
+
above cannot be given local legal effect according to their terms,
|
614 |
+
reviewing courts shall apply local law that most closely approximates
|
615 |
+
an absolute waiver of all civil liability in connection with the
|
616 |
+
Program, unless a warranty or assumption of liability accompanies a
|
617 |
+
copy of the Program in return for a fee.
|
618 |
+
|
619 |
+
END OF TERMS AND CONDITIONS
|
620 |
+
|
621 |
+
How to Apply These Terms to Your New Programs
|
622 |
+
|
623 |
+
If you develop a new program, and you want it to be of the greatest
|
624 |
+
possible use to the public, the best way to achieve this is to make it
|
625 |
+
free software which everyone can redistribute and change under these terms.
|
626 |
+
|
627 |
+
To do so, attach the following notices to the program. It is safest
|
628 |
+
to attach them to the start of each source file to most effectively
|
629 |
+
state the exclusion of warranty; and each file should have at least
|
630 |
+
the "copyright" line and a pointer to where the full notice is found.
|
631 |
+
|
632 |
+
<one line to give the program's name and a brief idea of what it does.>
|
633 |
+
Copyright (C) <year> <name of author>
|
634 |
+
|
635 |
+
This program is free software: you can redistribute it and/or modify
|
636 |
+
it under the terms of the GNU Affero General Public License as published by
|
637 |
+
the Free Software Foundation, either version 3 of the License, or
|
638 |
+
(at your option) any later version.
|
639 |
+
|
640 |
+
This program is distributed in the hope that it will be useful,
|
641 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
642 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
643 |
+
GNU Affero General Public License for more details.
|
644 |
+
|
645 |
+
You should have received a copy of the GNU Affero General Public License
|
646 |
+
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
647 |
+
|
648 |
+
Also add information on how to contact you by electronic and paper mail.
|
649 |
+
|
650 |
+
If your software can interact with users remotely through a computer
|
651 |
+
network, you should also make sure that it provides a way for users to
|
652 |
+
get its source. For example, if your program is a web application, its
|
653 |
+
interface could display a "Source" link that leads users to an archive
|
654 |
+
of the code. There are many ways you could offer source, and different
|
655 |
+
solutions will be better for different programs; see section 13 for the
|
656 |
+
specific requirements.
|
657 |
+
|
658 |
+
You should also get your employer (if you work as a programmer) or school,
|
659 |
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
660 |
+
For more information on this, and how to apply and follow the GNU AGPL, see
|
661 |
+
<https://www.gnu.org/licenses/>.
|
TextDetection/__pycache__/export.cpython-310.pyc
ADDED
Binary file (31.2 kB). View file
|
|
TextDetection/__pycache__/export.cpython-39.pyc
ADDED
Binary file (31.1 kB). View file
|
|
TextDetection/benchmarks.py
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 π by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Run YOLOv5 benchmarks on all supported export formats
|
4 |
+
|
5 |
+
Format | `export.py --include` | Model
|
6 |
+
--- | --- | ---
|
7 |
+
PyTorch | - | yolov5s.pt
|
8 |
+
TorchScript | `torchscript` | yolov5s.torchscript
|
9 |
+
ONNX | `onnx` | yolov5s.onnx
|
10 |
+
OpenVINO | `openvino` | yolov5s_openvino_model/
|
11 |
+
TensorRT | `engine` | yolov5s.engine
|
12 |
+
CoreML | `coreml` | yolov5s.mlmodel
|
13 |
+
TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
|
14 |
+
TensorFlow GraphDef | `pb` | yolov5s.pb
|
15 |
+
TensorFlow Lite | `tflite` | yolov5s.tflite
|
16 |
+
TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
|
17 |
+
TensorFlow.js | `tfjs` | yolov5s_web_model/
|
18 |
+
|
19 |
+
Requirements:
|
20 |
+
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
|
21 |
+
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
|
22 |
+
$ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT
|
23 |
+
|
24 |
+
Usage:
|
25 |
+
$ python benchmarks.py --weights yolov5s.pt --img 640
|
26 |
+
"""
|
27 |
+
|
28 |
+
import argparse
|
29 |
+
import platform
|
30 |
+
import sys
|
31 |
+
import time
|
32 |
+
from pathlib import Path
|
33 |
+
|
34 |
+
import pandas as pd
|
35 |
+
|
36 |
+
FILE = Path(__file__).resolve()
|
37 |
+
ROOT = FILE.parents[0] # YOLOv5 root directory
|
38 |
+
if str(ROOT) not in sys.path:
|
39 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
40 |
+
# ROOT = ROOT.relative_to(Path.cwd()) # relative
|
41 |
+
|
42 |
+
import export
|
43 |
+
from models.experimental import attempt_load
|
44 |
+
from models.yolo import SegmentationModel
|
45 |
+
from segment.val import run as val_seg
|
46 |
+
from utils import notebook_init
|
47 |
+
from utils.general import LOGGER, check_yaml, file_size, print_args
|
48 |
+
from utils.torch_utils import select_device
|
49 |
+
from val import run as val_det
|
50 |
+
|
51 |
+
|
52 |
+
def run(
|
53 |
+
weights=ROOT / 'yolov5s.pt', # weights path
|
54 |
+
imgsz=640, # inference size (pixels)
|
55 |
+
batch_size=1, # batch size
|
56 |
+
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
57 |
+
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
58 |
+
half=False, # use FP16 half-precision inference
|
59 |
+
test=False, # test exports only
|
60 |
+
pt_only=False, # test PyTorch only
|
61 |
+
hard_fail=False, # throw error on benchmark failure
|
62 |
+
):
|
63 |
+
y, t = [], time.time()
|
64 |
+
device = select_device(device)
|
65 |
+
model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc.
|
66 |
+
for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU)
|
67 |
+
try:
|
68 |
+
assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
|
69 |
+
assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
|
70 |
+
if 'cpu' in device.type:
|
71 |
+
assert cpu, 'inference not supported on CPU'
|
72 |
+
if 'cuda' in device.type:
|
73 |
+
assert gpu, 'inference not supported on GPU'
|
74 |
+
|
75 |
+
# Export
|
76 |
+
if f == '-':
|
77 |
+
w = weights # PyTorch format
|
78 |
+
else:
|
79 |
+
w = export.run(weights=weights,
|
80 |
+
imgsz=[imgsz],
|
81 |
+
include=[f],
|
82 |
+
batch_size=batch_size,
|
83 |
+
device=device,
|
84 |
+
half=half)[-1] # all others
|
85 |
+
assert suffix in str(w), 'export failed'
|
86 |
+
|
87 |
+
# Validate
|
88 |
+
if model_type == SegmentationModel:
|
89 |
+
result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half)
|
90 |
+
metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls))
|
91 |
+
else: # DetectionModel:
|
92 |
+
result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half)
|
93 |
+
metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls))
|
94 |
+
speed = result[2][1] # times (preprocess, inference, postprocess)
|
95 |
+
y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference
|
96 |
+
except Exception as e:
|
97 |
+
if hard_fail:
|
98 |
+
assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}'
|
99 |
+
LOGGER.warning(f'WARNING β οΈ Benchmark failure for {name}: {e}')
|
100 |
+
y.append([name, None, None, None]) # mAP, t_inference
|
101 |
+
if pt_only and i == 0:
|
102 |
+
break # break after PyTorch
|
103 |
+
|
104 |
+
# Print results
|
105 |
+
LOGGER.info('\n')
|
106 |
+
parse_opt()
|
107 |
+
notebook_init() # print system info
|
108 |
+
c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', '']
|
109 |
+
py = pd.DataFrame(y, columns=c)
|
110 |
+
LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)')
|
111 |
+
LOGGER.info(str(py if map else py.iloc[:, :2]))
|
112 |
+
if hard_fail and isinstance(hard_fail, str):
|
113 |
+
metrics = py['mAP50-95'].array # values to compare to floor
|
114 |
+
floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
|
115 |
+
assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}'
|
116 |
+
return py
|
117 |
+
|
118 |
+
|
119 |
+
def test(
|
120 |
+
weights=ROOT / 'yolov5s.pt', # weights path
|
121 |
+
imgsz=640, # inference size (pixels)
|
122 |
+
batch_size=1, # batch size
|
123 |
+
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
124 |
+
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
125 |
+
half=False, # use FP16 half-precision inference
|
126 |
+
test=False, # test exports only
|
127 |
+
pt_only=False, # test PyTorch only
|
128 |
+
hard_fail=False, # throw error on benchmark failure
|
129 |
+
):
|
130 |
+
y, t = [], time.time()
|
131 |
+
device = select_device(device)
|
132 |
+
for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable)
|
133 |
+
try:
|
134 |
+
w = weights if f == '-' else \
|
135 |
+
export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights
|
136 |
+
assert suffix in str(w), 'export failed'
|
137 |
+
y.append([name, True])
|
138 |
+
except Exception:
|
139 |
+
y.append([name, False]) # mAP, t_inference
|
140 |
+
|
141 |
+
# Print results
|
142 |
+
LOGGER.info('\n')
|
143 |
+
parse_opt()
|
144 |
+
notebook_init() # print system info
|
145 |
+
py = pd.DataFrame(y, columns=['Format', 'Export'])
|
146 |
+
LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)')
|
147 |
+
LOGGER.info(str(py))
|
148 |
+
return py
|
149 |
+
|
150 |
+
|
151 |
+
def parse_opt():
|
152 |
+
parser = argparse.ArgumentParser()
|
153 |
+
parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
|
154 |
+
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
|
155 |
+
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
156 |
+
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
|
157 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
158 |
+
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
159 |
+
parser.add_argument('--test', action='store_true', help='test exports only')
|
160 |
+
parser.add_argument('--pt-only', action='store_true', help='test PyTorch only')
|
161 |
+
parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric')
|
162 |
+
opt = parser.parse_args()
|
163 |
+
opt.data = check_yaml(opt.data) # check YAML
|
164 |
+
print_args(vars(opt))
|
165 |
+
return opt
|
166 |
+
|
167 |
+
|
168 |
+
def main(opt):
|
169 |
+
test(**vars(opt)) if opt.test else run(**vars(opt))
|
170 |
+
|
171 |
+
|
172 |
+
if __name__ == '__main__':
|
173 |
+
opt = parse_opt()
|
174 |
+
main(opt)
|
TextDetection/classify/predict.py
ADDED
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 π by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
|
4 |
+
|
5 |
+
Usage - sources:
|
6 |
+
$ python classify/predict.py --weights yolov5s-cls.pt --source 0 # webcam
|
7 |
+
img.jpg # image
|
8 |
+
vid.mp4 # video
|
9 |
+
screen # screenshot
|
10 |
+
path/ # directory
|
11 |
+
list.txt # list of images
|
12 |
+
list.streams # list of streams
|
13 |
+
'path/*.jpg' # glob
|
14 |
+
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
15 |
+
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
16 |
+
|
17 |
+
Usage - formats:
|
18 |
+
$ python classify/predict.py --weights yolov5s-cls.pt # PyTorch
|
19 |
+
yolov5s-cls.torchscript # TorchScript
|
20 |
+
yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
21 |
+
yolov5s-cls_openvino_model # OpenVINO
|
22 |
+
yolov5s-cls.engine # TensorRT
|
23 |
+
yolov5s-cls.mlmodel # CoreML (macOS-only)
|
24 |
+
yolov5s-cls_saved_model # TensorFlow SavedModel
|
25 |
+
yolov5s-cls.pb # TensorFlow GraphDef
|
26 |
+
yolov5s-cls.tflite # TensorFlow Lite
|
27 |
+
yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
|
28 |
+
yolov5s-cls_paddle_model # PaddlePaddle
|
29 |
+
"""
|
30 |
+
|
31 |
+
import argparse
|
32 |
+
import os
|
33 |
+
import platform
|
34 |
+
import sys
|
35 |
+
from pathlib import Path
|
36 |
+
|
37 |
+
import torch
|
38 |
+
import torch.nn.functional as F
|
39 |
+
|
40 |
+
FILE = Path(__file__).resolve()
|
41 |
+
ROOT = FILE.parents[1] # YOLOv5 root directory
|
42 |
+
if str(ROOT) not in sys.path:
|
43 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
44 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
45 |
+
|
46 |
+
from models.common import DetectMultiBackend
|
47 |
+
from utils.augmentations import classify_transforms
|
48 |
+
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
49 |
+
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
50 |
+
increment_path, print_args, strip_optimizer)
|
51 |
+
from utils.plots import Annotator
|
52 |
+
from utils.torch_utils import select_device, smart_inference_mode
|
53 |
+
|
54 |
+
|
55 |
+
@smart_inference_mode()
|
56 |
+
def run(
|
57 |
+
weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s)
|
58 |
+
source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)
|
59 |
+
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
60 |
+
imgsz=(224, 224), # inference size (height, width)
|
61 |
+
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
62 |
+
view_img=False, # show results
|
63 |
+
save_txt=False, # save results to *.txt
|
64 |
+
nosave=False, # do not save images/videos
|
65 |
+
augment=False, # augmented inference
|
66 |
+
visualize=False, # visualize features
|
67 |
+
update=False, # update all models
|
68 |
+
project=ROOT / 'runs/predict-cls', # save results to project/name
|
69 |
+
name='exp', # save results to project/name
|
70 |
+
exist_ok=False, # existing project/name ok, do not increment
|
71 |
+
half=False, # use FP16 half-precision inference
|
72 |
+
dnn=False, # use OpenCV DNN for ONNX inference
|
73 |
+
vid_stride=1, # video frame-rate stride
|
74 |
+
):
|
75 |
+
source = str(source)
|
76 |
+
save_img = not nosave and not source.endswith('.txt') # save inference images
|
77 |
+
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
|
78 |
+
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
|
79 |
+
webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
|
80 |
+
screenshot = source.lower().startswith('screen')
|
81 |
+
if is_url and is_file:
|
82 |
+
source = check_file(source) # download
|
83 |
+
|
84 |
+
# Directories
|
85 |
+
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
86 |
+
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
87 |
+
|
88 |
+
# Load model
|
89 |
+
device = select_device(device)
|
90 |
+
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
91 |
+
stride, names, pt = model.stride, model.names, model.pt
|
92 |
+
imgsz = check_img_size(imgsz, s=stride) # check image size
|
93 |
+
|
94 |
+
# Dataloader
|
95 |
+
bs = 1 # batch_size
|
96 |
+
if webcam:
|
97 |
+
view_img = check_imshow(warn=True)
|
98 |
+
dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
|
99 |
+
bs = len(dataset)
|
100 |
+
elif screenshot:
|
101 |
+
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
|
102 |
+
else:
|
103 |
+
dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
|
104 |
+
vid_path, vid_writer = [None] * bs, [None] * bs
|
105 |
+
|
106 |
+
# Run inference
|
107 |
+
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
|
108 |
+
seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
|
109 |
+
for path, im, im0s, vid_cap, s in dataset:
|
110 |
+
with dt[0]:
|
111 |
+
im = torch.Tensor(im).to(model.device)
|
112 |
+
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
|
113 |
+
if len(im.shape) == 3:
|
114 |
+
im = im[None] # expand for batch dim
|
115 |
+
|
116 |
+
# Inference
|
117 |
+
with dt[1]:
|
118 |
+
results = model(im)
|
119 |
+
|
120 |
+
# Post-process
|
121 |
+
with dt[2]:
|
122 |
+
pred = F.softmax(results, dim=1) # probabilities
|
123 |
+
|
124 |
+
# Process predictions
|
125 |
+
for i, prob in enumerate(pred): # per image
|
126 |
+
seen += 1
|
127 |
+
if webcam: # batch_size >= 1
|
128 |
+
p, im0, frame = path[i], im0s[i].copy(), dataset.count
|
129 |
+
s += f'{i}: '
|
130 |
+
else:
|
131 |
+
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
|
132 |
+
|
133 |
+
p = Path(p) # to Path
|
134 |
+
save_path = str(save_dir / p.name) # im.jpg
|
135 |
+
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
|
136 |
+
|
137 |
+
s += '%gx%g ' % im.shape[2:] # print string
|
138 |
+
annotator = Annotator(im0, example=str(names), pil=True)
|
139 |
+
|
140 |
+
# Print results
|
141 |
+
top5i = prob.argsort(0, descending=True)[:5].tolist() # top 5 indices
|
142 |
+
s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, "
|
143 |
+
|
144 |
+
# Write results
|
145 |
+
text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i)
|
146 |
+
if save_img or view_img: # Add bbox to image
|
147 |
+
annotator.text((32, 32), text, txt_color=(255, 255, 255))
|
148 |
+
if save_txt: # Write to file
|
149 |
+
with open(f'{txt_path}.txt', 'a') as f:
|
150 |
+
f.write(text + '\n')
|
151 |
+
|
152 |
+
# Stream results
|
153 |
+
im0 = annotator.result()
|
154 |
+
if view_img:
|
155 |
+
if platform.system() == 'Linux' and p not in windows:
|
156 |
+
windows.append(p)
|
157 |
+
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
|
158 |
+
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
|
159 |
+
cv2.imshow(str(p), im0)
|
160 |
+
cv2.waitKey(1) # 1 millisecond
|
161 |
+
|
162 |
+
# Save results (image with detections)
|
163 |
+
if save_img:
|
164 |
+
if dataset.mode == 'image':
|
165 |
+
cv2.imwrite(save_path, im0)
|
166 |
+
else: # 'video' or 'stream'
|
167 |
+
if vid_path[i] != save_path: # new video
|
168 |
+
vid_path[i] = save_path
|
169 |
+
if isinstance(vid_writer[i], cv2.VideoWriter):
|
170 |
+
vid_writer[i].release() # release previous video writer
|
171 |
+
if vid_cap: # video
|
172 |
+
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
173 |
+
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
174 |
+
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
175 |
+
else: # stream
|
176 |
+
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
177 |
+
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
178 |
+
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
179 |
+
vid_writer[i].write(im0)
|
180 |
+
|
181 |
+
# Print time (inference-only)
|
182 |
+
LOGGER.info(f'{s}{dt[1].dt * 1E3:.1f}ms')
|
183 |
+
|
184 |
+
# Print results
|
185 |
+
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
|
186 |
+
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
|
187 |
+
if save_txt or save_img:
|
188 |
+
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
189 |
+
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
190 |
+
if update:
|
191 |
+
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
|
192 |
+
|
193 |
+
|
194 |
+
def parse_opt():
|
195 |
+
parser = argparse.ArgumentParser()
|
196 |
+
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)')
|
197 |
+
parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
|
198 |
+
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
|
199 |
+
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w')
|
200 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
201 |
+
parser.add_argument('--view-img', action='store_true', help='show results')
|
202 |
+
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
203 |
+
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
204 |
+
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
205 |
+
parser.add_argument('--visualize', action='store_true', help='visualize features')
|
206 |
+
parser.add_argument('--update', action='store_true', help='update all models')
|
207 |
+
parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save results to project/name')
|
208 |
+
parser.add_argument('--name', default='exp', help='save results to project/name')
|
209 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
210 |
+
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
211 |
+
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
212 |
+
parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
|
213 |
+
opt = parser.parse_args()
|
214 |
+
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
215 |
+
print_args(vars(opt))
|
216 |
+
return opt
|
217 |
+
|
218 |
+
|
219 |
+
def main(opt):
|
220 |
+
check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
221 |
+
run(**vars(opt))
|
222 |
+
|
223 |
+
|
224 |
+
if __name__ == '__main__':
|
225 |
+
opt = parse_opt()
|
226 |
+
main(opt)
|
TextDetection/classify/train.py
ADDED
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 π by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Train a YOLOv5 classifier model on a classification dataset
|
4 |
+
|
5 |
+
Usage - Single-GPU training:
|
6 |
+
$ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224
|
7 |
+
|
8 |
+
Usage - Multi-GPU DDP training:
|
9 |
+
$ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
|
10 |
+
|
11 |
+
Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data'
|
12 |
+
YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt
|
13 |
+
Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html
|
14 |
+
"""
|
15 |
+
|
16 |
+
import argparse
|
17 |
+
import os
|
18 |
+
import subprocess
|
19 |
+
import sys
|
20 |
+
import time
|
21 |
+
from copy import deepcopy
|
22 |
+
from datetime import datetime
|
23 |
+
from pathlib import Path
|
24 |
+
|
25 |
+
import torch
|
26 |
+
import torch.distributed as dist
|
27 |
+
import torch.hub as hub
|
28 |
+
import torch.optim.lr_scheduler as lr_scheduler
|
29 |
+
import torchvision
|
30 |
+
from torch.cuda import amp
|
31 |
+
from tqdm import tqdm
|
32 |
+
|
33 |
+
FILE = Path(__file__).resolve()
|
34 |
+
ROOT = FILE.parents[1] # YOLOv5 root directory
|
35 |
+
if str(ROOT) not in sys.path:
|
36 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
37 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
38 |
+
|
39 |
+
from classify import val as validate
|
40 |
+
from models.experimental import attempt_load
|
41 |
+
from models.yolo import ClassificationModel, DetectionModel
|
42 |
+
from utils.dataloaders import create_classification_dataloader
|
43 |
+
from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_info, check_git_status,
|
44 |
+
check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save)
|
45 |
+
from utils.loggers import GenericLogger
|
46 |
+
from utils.plots import imshow_cls
|
47 |
+
from utils.torch_utils import (ModelEMA, de_parallel, model_info, reshape_classifier_output, select_device, smart_DDP,
|
48 |
+
smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first)
|
49 |
+
|
50 |
+
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
|
51 |
+
RANK = int(os.getenv('RANK', -1))
|
52 |
+
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
|
53 |
+
GIT_INFO = check_git_info()
|
54 |
+
|
55 |
+
|
56 |
+
def train(opt, device):
|
57 |
+
init_seeds(opt.seed + 1 + RANK, deterministic=True)
|
58 |
+
save_dir, data, bs, epochs, nw, imgsz, pretrained = \
|
59 |
+
opt.save_dir, Path(opt.data), opt.batch_size, opt.epochs, min(os.cpu_count() - 1, opt.workers), \
|
60 |
+
opt.imgsz, str(opt.pretrained).lower() == 'true'
|
61 |
+
cuda = device.type != 'cpu'
|
62 |
+
|
63 |
+
# Directories
|
64 |
+
wdir = save_dir / 'weights'
|
65 |
+
wdir.mkdir(parents=True, exist_ok=True) # make dir
|
66 |
+
last, best = wdir / 'last.pt', wdir / 'best.pt'
|
67 |
+
|
68 |
+
# Save run settings
|
69 |
+
yaml_save(save_dir / 'opt.yaml', vars(opt))
|
70 |
+
|
71 |
+
# Logger
|
72 |
+
logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None
|
73 |
+
|
74 |
+
# Download Dataset
|
75 |
+
with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
|
76 |
+
data_dir = data if data.is_dir() else (DATASETS_DIR / data)
|
77 |
+
if not data_dir.is_dir():
|
78 |
+
LOGGER.info(f'\nDataset not found β οΈ, missing path {data_dir}, attempting download...')
|
79 |
+
t = time.time()
|
80 |
+
if str(data) == 'imagenet':
|
81 |
+
subprocess.run(['bash', str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True)
|
82 |
+
else:
|
83 |
+
url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip'
|
84 |
+
download(url, dir=data_dir.parent)
|
85 |
+
s = f"Dataset download success β
({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n"
|
86 |
+
LOGGER.info(s)
|
87 |
+
|
88 |
+
# Dataloaders
|
89 |
+
nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()]) # number of classes
|
90 |
+
trainloader = create_classification_dataloader(path=data_dir / 'train',
|
91 |
+
imgsz=imgsz,
|
92 |
+
batch_size=bs // WORLD_SIZE,
|
93 |
+
augment=True,
|
94 |
+
cache=opt.cache,
|
95 |
+
rank=LOCAL_RANK,
|
96 |
+
workers=nw)
|
97 |
+
|
98 |
+
test_dir = data_dir / 'test' if (data_dir / 'test').exists() else data_dir / 'val' # data/test or data/val
|
99 |
+
if RANK in {-1, 0}:
|
100 |
+
testloader = create_classification_dataloader(path=test_dir,
|
101 |
+
imgsz=imgsz,
|
102 |
+
batch_size=bs // WORLD_SIZE * 2,
|
103 |
+
augment=False,
|
104 |
+
cache=opt.cache,
|
105 |
+
rank=-1,
|
106 |
+
workers=nw)
|
107 |
+
|
108 |
+
# Model
|
109 |
+
with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
|
110 |
+
if Path(opt.model).is_file() or opt.model.endswith('.pt'):
|
111 |
+
model = attempt_load(opt.model, device='cpu', fuse=False)
|
112 |
+
elif opt.model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0
|
113 |
+
model = torchvision.models.__dict__[opt.model](weights='IMAGENET1K_V1' if pretrained else None)
|
114 |
+
else:
|
115 |
+
m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models
|
116 |
+
raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m))
|
117 |
+
if isinstance(model, DetectionModel):
|
118 |
+
LOGGER.warning("WARNING β οΈ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'")
|
119 |
+
model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model
|
120 |
+
reshape_classifier_output(model, nc) # update class count
|
121 |
+
for m in model.modules():
|
122 |
+
if not pretrained and hasattr(m, 'reset_parameters'):
|
123 |
+
m.reset_parameters()
|
124 |
+
if isinstance(m, torch.nn.Dropout) and opt.dropout is not None:
|
125 |
+
m.p = opt.dropout # set dropout
|
126 |
+
for p in model.parameters():
|
127 |
+
p.requires_grad = True # for training
|
128 |
+
model = model.to(device)
|
129 |
+
|
130 |
+
# Info
|
131 |
+
if RANK in {-1, 0}:
|
132 |
+
model.names = trainloader.dataset.classes # attach class names
|
133 |
+
model.transforms = testloader.dataset.torch_transforms # attach inference transforms
|
134 |
+
model_info(model)
|
135 |
+
if opt.verbose:
|
136 |
+
LOGGER.info(model)
|
137 |
+
images, labels = next(iter(trainloader))
|
138 |
+
file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / 'train_images.jpg')
|
139 |
+
logger.log_images(file, name='Train Examples')
|
140 |
+
logger.log_graph(model, imgsz) # log model
|
141 |
+
|
142 |
+
# Optimizer
|
143 |
+
optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay)
|
144 |
+
|
145 |
+
# Scheduler
|
146 |
+
lrf = 0.01 # final lr (fraction of lr0)
|
147 |
+
# lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine
|
148 |
+
lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear
|
149 |
+
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
|
150 |
+
# scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1,
|
151 |
+
# final_div_factor=1 / 25 / lrf)
|
152 |
+
|
153 |
+
# EMA
|
154 |
+
ema = ModelEMA(model) if RANK in {-1, 0} else None
|
155 |
+
|
156 |
+
# DDP mode
|
157 |
+
if cuda and RANK != -1:
|
158 |
+
model = smart_DDP(model)
|
159 |
+
|
160 |
+
# Train
|
161 |
+
t0 = time.time()
|
162 |
+
criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing) # loss function
|
163 |
+
best_fitness = 0.0
|
164 |
+
scaler = amp.GradScaler(enabled=cuda)
|
165 |
+
val = test_dir.stem # 'val' or 'test'
|
166 |
+
LOGGER.info(f'Image sizes {imgsz} train, {imgsz} test\n'
|
167 |
+
f'Using {nw * WORLD_SIZE} dataloader workers\n'
|
168 |
+
f"Logging results to {colorstr('bold', save_dir)}\n"
|
169 |
+
f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n'
|
170 |
+
f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}")
|
171 |
+
for epoch in range(epochs): # loop over the dataset multiple times
|
172 |
+
tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness
|
173 |
+
model.train()
|
174 |
+
if RANK != -1:
|
175 |
+
trainloader.sampler.set_epoch(epoch)
|
176 |
+
pbar = enumerate(trainloader)
|
177 |
+
if RANK in {-1, 0}:
|
178 |
+
pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format=TQDM_BAR_FORMAT)
|
179 |
+
for i, (images, labels) in pbar: # progress bar
|
180 |
+
images, labels = images.to(device, non_blocking=True), labels.to(device)
|
181 |
+
|
182 |
+
# Forward
|
183 |
+
with amp.autocast(enabled=cuda): # stability issues when enabled
|
184 |
+
loss = criterion(model(images), labels)
|
185 |
+
|
186 |
+
# Backward
|
187 |
+
scaler.scale(loss).backward()
|
188 |
+
|
189 |
+
# Optimize
|
190 |
+
scaler.unscale_(optimizer) # unscale gradients
|
191 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
|
192 |
+
scaler.step(optimizer)
|
193 |
+
scaler.update()
|
194 |
+
optimizer.zero_grad()
|
195 |
+
if ema:
|
196 |
+
ema.update(model)
|
197 |
+
|
198 |
+
if RANK in {-1, 0}:
|
199 |
+
# Print
|
200 |
+
tloss = (tloss * i + loss.item()) / (i + 1) # update mean losses
|
201 |
+
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
|
202 |
+
pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + ' ' * 36
|
203 |
+
|
204 |
+
# Test
|
205 |
+
if i == len(pbar) - 1: # last batch
|
206 |
+
top1, top5, vloss = validate.run(model=ema.ema,
|
207 |
+
dataloader=testloader,
|
208 |
+
criterion=criterion,
|
209 |
+
pbar=pbar) # test accuracy, loss
|
210 |
+
fitness = top1 # define fitness as top1 accuracy
|
211 |
+
|
212 |
+
# Scheduler
|
213 |
+
scheduler.step()
|
214 |
+
|
215 |
+
# Log metrics
|
216 |
+
if RANK in {-1, 0}:
|
217 |
+
# Best fitness
|
218 |
+
if fitness > best_fitness:
|
219 |
+
best_fitness = fitness
|
220 |
+
|
221 |
+
# Log
|
222 |
+
metrics = {
|
223 |
+
'train/loss': tloss,
|
224 |
+
f'{val}/loss': vloss,
|
225 |
+
'metrics/accuracy_top1': top1,
|
226 |
+
'metrics/accuracy_top5': top5,
|
227 |
+
'lr/0': optimizer.param_groups[0]['lr']} # learning rate
|
228 |
+
logger.log_metrics(metrics, epoch)
|
229 |
+
|
230 |
+
# Save model
|
231 |
+
final_epoch = epoch + 1 == epochs
|
232 |
+
if (not opt.nosave) or final_epoch:
|
233 |
+
ckpt = {
|
234 |
+
'epoch': epoch,
|
235 |
+
'best_fitness': best_fitness,
|
236 |
+
'model': deepcopy(ema.ema).half(), # deepcopy(de_parallel(model)).half(),
|
237 |
+
'ema': None, # deepcopy(ema.ema).half(),
|
238 |
+
'updates': ema.updates,
|
239 |
+
'optimizer': None, # optimizer.state_dict(),
|
240 |
+
'opt': vars(opt),
|
241 |
+
'git': GIT_INFO, # {remote, branch, commit} if a git repo
|
242 |
+
'date': datetime.now().isoformat()}
|
243 |
+
|
244 |
+
# Save last, best and delete
|
245 |
+
torch.save(ckpt, last)
|
246 |
+
if best_fitness == fitness:
|
247 |
+
torch.save(ckpt, best)
|
248 |
+
del ckpt
|
249 |
+
|
250 |
+
# Train complete
|
251 |
+
if RANK in {-1, 0} and final_epoch:
|
252 |
+
LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)'
|
253 |
+
f"\nResults saved to {colorstr('bold', save_dir)}"
|
254 |
+
f'\nPredict: python classify/predict.py --weights {best} --source im.jpg'
|
255 |
+
f'\nValidate: python classify/val.py --weights {best} --data {data_dir}'
|
256 |
+
f'\nExport: python export.py --weights {best} --include onnx'
|
257 |
+
f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')"
|
258 |
+
f'\nVisualize: https://netron.app\n')
|
259 |
+
|
260 |
+
# Plot examples
|
261 |
+
images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels
|
262 |
+
pred = torch.max(ema.ema(images.to(device)), 1)[1]
|
263 |
+
file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / 'test_images.jpg')
|
264 |
+
|
265 |
+
# Log results
|
266 |
+
meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()}
|
267 |
+
logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch)
|
268 |
+
logger.log_model(best, epochs, metadata=meta)
|
269 |
+
|
270 |
+
|
271 |
+
def parse_opt(known=False):
|
272 |
+
parser = argparse.ArgumentParser()
|
273 |
+
parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='initial weights path')
|
274 |
+
parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...')
|
275 |
+
parser.add_argument('--epochs', type=int, default=10, help='total training epochs')
|
276 |
+
parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs')
|
277 |
+
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)')
|
278 |
+
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
|
279 |
+
parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
|
280 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
281 |
+
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
282 |
+
parser.add_argument('--project', default=ROOT / 'runs/train-cls', help='save to project/name')
|
283 |
+
parser.add_argument('--name', default='exp', help='save to project/name')
|
284 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
285 |
+
parser.add_argument('--pretrained', nargs='?', const=True, default=True, help='start from i.e. --pretrained False')
|
286 |
+
parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'AdamW', 'RMSProp'], default='Adam', help='optimizer')
|
287 |
+
parser.add_argument('--lr0', type=float, default=0.001, help='initial learning rate')
|
288 |
+
parser.add_argument('--decay', type=float, default=5e-5, help='weight decay')
|
289 |
+
parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing epsilon')
|
290 |
+
parser.add_argument('--cutoff', type=int, default=None, help='Model layer cutoff index for Classify() head')
|
291 |
+
parser.add_argument('--dropout', type=float, default=None, help='Dropout (fraction)')
|
292 |
+
parser.add_argument('--verbose', action='store_true', help='Verbose mode')
|
293 |
+
parser.add_argument('--seed', type=int, default=0, help='Global training seed')
|
294 |
+
parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
|
295 |
+
return parser.parse_known_args()[0] if known else parser.parse_args()
|
296 |
+
|
297 |
+
|
298 |
+
def main(opt):
|
299 |
+
# Checks
|
300 |
+
if RANK in {-1, 0}:
|
301 |
+
print_args(vars(opt))
|
302 |
+
check_git_status()
|
303 |
+
check_requirements(ROOT / 'requirements.txt')
|
304 |
+
|
305 |
+
# DDP mode
|
306 |
+
device = select_device(opt.device, batch_size=opt.batch_size)
|
307 |
+
if LOCAL_RANK != -1:
|
308 |
+
assert opt.batch_size != -1, 'AutoBatch is coming soon for classification, please pass a valid --batch-size'
|
309 |
+
assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
|
310 |
+
assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
|
311 |
+
torch.cuda.set_device(LOCAL_RANK)
|
312 |
+
device = torch.device('cuda', LOCAL_RANK)
|
313 |
+
dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo')
|
314 |
+
|
315 |
+
# Parameters
|
316 |
+
opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
|
317 |
+
|
318 |
+
# Train
|
319 |
+
train(opt, device)
|
320 |
+
|
321 |
+
|
322 |
+
def run(**kwargs):
|
323 |
+
# Usage: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m')
|
324 |
+
opt = parse_opt(True)
|
325 |
+
for k, v in kwargs.items():
|
326 |
+
setattr(opt, k, v)
|
327 |
+
main(opt)
|
328 |
+
return opt
|
329 |
+
|
330 |
+
|
331 |
+
if __name__ == '__main__':
|
332 |
+
opt = parse_opt()
|
333 |
+
main(opt)
|
TextDetection/classify/tutorial.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
TextDetection/classify/val.py
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 π by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Validate a trained YOLOv5 classification model on a classification dataset
|
4 |
+
|
5 |
+
Usage:
|
6 |
+
$ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)
|
7 |
+
$ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet
|
8 |
+
|
9 |
+
Usage - formats:
|
10 |
+
$ python classify/val.py --weights yolov5s-cls.pt # PyTorch
|
11 |
+
yolov5s-cls.torchscript # TorchScript
|
12 |
+
yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
13 |
+
yolov5s-cls_openvino_model # OpenVINO
|
14 |
+
yolov5s-cls.engine # TensorRT
|
15 |
+
yolov5s-cls.mlmodel # CoreML (macOS-only)
|
16 |
+
yolov5s-cls_saved_model # TensorFlow SavedModel
|
17 |
+
yolov5s-cls.pb # TensorFlow GraphDef
|
18 |
+
yolov5s-cls.tflite # TensorFlow Lite
|
19 |
+
yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
|
20 |
+
yolov5s-cls_paddle_model # PaddlePaddle
|
21 |
+
"""
|
22 |
+
|
23 |
+
import argparse
|
24 |
+
import os
|
25 |
+
import sys
|
26 |
+
from pathlib import Path
|
27 |
+
|
28 |
+
import torch
|
29 |
+
from tqdm import tqdm
|
30 |
+
|
31 |
+
FILE = Path(__file__).resolve()
|
32 |
+
ROOT = FILE.parents[1] # YOLOv5 root directory
|
33 |
+
if str(ROOT) not in sys.path:
|
34 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
35 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
36 |
+
|
37 |
+
from models.common import DetectMultiBackend
|
38 |
+
from utils.dataloaders import create_classification_dataloader
|
39 |
+
from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr,
|
40 |
+
increment_path, print_args)
|
41 |
+
from utils.torch_utils import select_device, smart_inference_mode
|
42 |
+
|
43 |
+
|
44 |
+
@smart_inference_mode()
|
45 |
+
def run(
|
46 |
+
data=ROOT / '../datasets/mnist', # dataset dir
|
47 |
+
weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s)
|
48 |
+
batch_size=128, # batch size
|
49 |
+
imgsz=224, # inference size (pixels)
|
50 |
+
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
51 |
+
workers=8, # max dataloader workers (per RANK in DDP mode)
|
52 |
+
verbose=False, # verbose output
|
53 |
+
project=ROOT / 'runs/val-cls', # save to project/name
|
54 |
+
name='exp', # save to project/name
|
55 |
+
exist_ok=False, # existing project/name ok, do not increment
|
56 |
+
half=False, # use FP16 half-precision inference
|
57 |
+
dnn=False, # use OpenCV DNN for ONNX inference
|
58 |
+
model=None,
|
59 |
+
dataloader=None,
|
60 |
+
criterion=None,
|
61 |
+
pbar=None,
|
62 |
+
):
|
63 |
+
# Initialize/load model and set device
|
64 |
+
training = model is not None
|
65 |
+
if training: # called by train.py
|
66 |
+
device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
|
67 |
+
half &= device.type != 'cpu' # half precision only supported on CUDA
|
68 |
+
model.half() if half else model.float()
|
69 |
+
else: # called directly
|
70 |
+
device = select_device(device, batch_size=batch_size)
|
71 |
+
|
72 |
+
# Directories
|
73 |
+
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
74 |
+
save_dir.mkdir(parents=True, exist_ok=True) # make dir
|
75 |
+
|
76 |
+
# Load model
|
77 |
+
model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half)
|
78 |
+
stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
|
79 |
+
imgsz = check_img_size(imgsz, s=stride) # check image size
|
80 |
+
half = model.fp16 # FP16 supported on limited backends with CUDA
|
81 |
+
if engine:
|
82 |
+
batch_size = model.batch_size
|
83 |
+
else:
|
84 |
+
device = model.device
|
85 |
+
if not (pt or jit):
|
86 |
+
batch_size = 1 # export.py models default to batch-size 1
|
87 |
+
LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
|
88 |
+
|
89 |
+
# Dataloader
|
90 |
+
data = Path(data)
|
91 |
+
test_dir = data / 'test' if (data / 'test').exists() else data / 'val' # data/test or data/val
|
92 |
+
dataloader = create_classification_dataloader(path=test_dir,
|
93 |
+
imgsz=imgsz,
|
94 |
+
batch_size=batch_size,
|
95 |
+
augment=False,
|
96 |
+
rank=-1,
|
97 |
+
workers=workers)
|
98 |
+
|
99 |
+
model.eval()
|
100 |
+
pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile())
|
101 |
+
n = len(dataloader) # number of batches
|
102 |
+
action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing'
|
103 |
+
desc = f'{pbar.desc[:-36]}{action:>36}' if pbar else f'{action}'
|
104 |
+
bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0)
|
105 |
+
with torch.cuda.amp.autocast(enabled=device.type != 'cpu'):
|
106 |
+
for images, labels in bar:
|
107 |
+
with dt[0]:
|
108 |
+
images, labels = images.to(device, non_blocking=True), labels.to(device)
|
109 |
+
|
110 |
+
with dt[1]:
|
111 |
+
y = model(images)
|
112 |
+
|
113 |
+
with dt[2]:
|
114 |
+
pred.append(y.argsort(1, descending=True)[:, :5])
|
115 |
+
targets.append(labels)
|
116 |
+
if criterion:
|
117 |
+
loss += criterion(y, labels)
|
118 |
+
|
119 |
+
loss /= n
|
120 |
+
pred, targets = torch.cat(pred), torch.cat(targets)
|
121 |
+
correct = (targets[:, None] == pred).float()
|
122 |
+
acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy
|
123 |
+
top1, top5 = acc.mean(0).tolist()
|
124 |
+
|
125 |
+
if pbar:
|
126 |
+
pbar.desc = f'{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}'
|
127 |
+
if verbose: # all classes
|
128 |
+
LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}")
|
129 |
+
LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}")
|
130 |
+
for i, c in model.names.items():
|
131 |
+
acc_i = acc[targets == i]
|
132 |
+
top1i, top5i = acc_i.mean(0).tolist()
|
133 |
+
LOGGER.info(f'{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}')
|
134 |
+
|
135 |
+
# Print results
|
136 |
+
t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image
|
137 |
+
shape = (1, 3, imgsz, imgsz)
|
138 |
+
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t)
|
139 |
+
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
|
140 |
+
|
141 |
+
return top1, top5, loss
|
142 |
+
|
143 |
+
|
144 |
+
def parse_opt():
|
145 |
+
parser = argparse.ArgumentParser()
|
146 |
+
parser.add_argument('--data', type=str, default=ROOT / '../datasets/mnist', help='dataset path')
|
147 |
+
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model.pt path(s)')
|
148 |
+
parser.add_argument('--batch-size', type=int, default=128, help='batch size')
|
149 |
+
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)')
|
150 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
151 |
+
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
152 |
+
parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output')
|
153 |
+
parser.add_argument('--project', default=ROOT / 'runs/val-cls', help='save to project/name')
|
154 |
+
parser.add_argument('--name', default='exp', help='save to project/name')
|
155 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
156 |
+
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
157 |
+
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
158 |
+
opt = parser.parse_args()
|
159 |
+
print_args(vars(opt))
|
160 |
+
return opt
|
161 |
+
|
162 |
+
|
163 |
+
def main(opt):
|
164 |
+
check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
165 |
+
run(**vars(opt))
|
166 |
+
|
167 |
+
|
168 |
+
if __name__ == '__main__':
|
169 |
+
opt = parse_opt()
|
170 |
+
main(opt)
|
TextDetection/detect.py
ADDED
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 π by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
|
4 |
+
|
5 |
+
Usage - sources:
|
6 |
+
$ python detect.py --weights yolov5s.pt --source 0 # webcam
|
7 |
+
img.jpg # image
|
8 |
+
vid.mp4 # video
|
9 |
+
screen # screenshot
|
10 |
+
path/ # directory
|
11 |
+
list.txt # list of images
|
12 |
+
list.streams # list of streams
|
13 |
+
'path/*.jpg' # glob
|
14 |
+
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
15 |
+
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
16 |
+
|
17 |
+
Usage - formats:
|
18 |
+
$ python detect.py --weights yolov5s.pt # PyTorch
|
19 |
+
yolov5s.torchscript # TorchScript
|
20 |
+
yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
21 |
+
yolov5s_openvino_model # OpenVINO
|
22 |
+
yolov5s.engine # TensorRT
|
23 |
+
yolov5s.mlmodel # CoreML (macOS-only)
|
24 |
+
yolov5s_saved_model # TensorFlow SavedModel
|
25 |
+
yolov5s.pb # TensorFlow GraphDef
|
26 |
+
yolov5s.tflite # TensorFlow Lite
|
27 |
+
yolov5s_edgetpu.tflite # TensorFlow Edge TPU
|
28 |
+
yolov5s_paddle_model # PaddlePaddle
|
29 |
+
"""
|
30 |
+
|
31 |
+
import argparse
|
32 |
+
import os
|
33 |
+
import platform
|
34 |
+
import sys
|
35 |
+
from pathlib import Path
|
36 |
+
|
37 |
+
import torch
|
38 |
+
|
39 |
+
FILE = Path(__file__).resolve()
|
40 |
+
ROOT = FILE.parents[0] # YOLOv5 root directory
|
41 |
+
if str(ROOT) not in sys.path:
|
42 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
43 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
44 |
+
|
45 |
+
from models.common import DetectMultiBackend
|
46 |
+
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
47 |
+
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
48 |
+
increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
|
49 |
+
from utils.plots import Annotator, colors, save_one_box
|
50 |
+
from utils.torch_utils import select_device, smart_inference_mode
|
51 |
+
|
52 |
+
|
53 |
+
@smart_inference_mode()
|
54 |
+
def run(
|
55 |
+
weights=ROOT / 'yolov5s.pt', # model path or triton URL
|
56 |
+
source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)
|
57 |
+
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
58 |
+
imgsz=(640, 640), # inference size (height, width)
|
59 |
+
conf_thres=0.25, # confidence threshold
|
60 |
+
iou_thres=0.45, # NMS IOU threshold
|
61 |
+
max_det=1000, # maximum detections per image
|
62 |
+
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
63 |
+
view_img=False, # show results
|
64 |
+
save_txt=True, # save results to *.txt
|
65 |
+
save_conf=True, # save confidences in --save-txt labels
|
66 |
+
save_crop=False, # save cropped prediction boxes
|
67 |
+
nosave=False, # do not save images/videos
|
68 |
+
classes=None, # filter by class: --class 0, or --class 0 2 3
|
69 |
+
agnostic_nms=False, # class-agnostic NMS
|
70 |
+
augment=False, # augmented inference
|
71 |
+
visualize=False, # visualize features
|
72 |
+
update=False, # update all models
|
73 |
+
project=ROOT / 'runs/detect', # save results to project/name
|
74 |
+
name='exp', # save results to project/name
|
75 |
+
exist_ok=False, # existing project/name ok, do not increment
|
76 |
+
line_thickness=3, # bounding box thickness (pixels)
|
77 |
+
hide_labels=False, # hide labels
|
78 |
+
hide_conf=False, # hide confidences
|
79 |
+
half=False, # use FP16 half-precision inference
|
80 |
+
dnn=False, # use OpenCV DNN for ONNX inference
|
81 |
+
vid_stride=1, # video frame-rate stride
|
82 |
+
):
|
83 |
+
source = str(source)
|
84 |
+
save_img = not nosave and not source.endswith('.txt') # save inference images
|
85 |
+
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
|
86 |
+
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
|
87 |
+
webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
|
88 |
+
screenshot = source.lower().startswith('screen')
|
89 |
+
if is_url and is_file:
|
90 |
+
source = check_file(source) # download
|
91 |
+
|
92 |
+
# Directories
|
93 |
+
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
94 |
+
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
95 |
+
|
96 |
+
# Load model
|
97 |
+
device = select_device(device)
|
98 |
+
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
99 |
+
stride, names, pt = model.stride, model.names, model.pt
|
100 |
+
imgsz = check_img_size(imgsz, s=stride) # check image size
|
101 |
+
|
102 |
+
# Dataloader
|
103 |
+
bs = 1 # batch_size
|
104 |
+
if webcam:
|
105 |
+
view_img = check_imshow(warn=True)
|
106 |
+
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
|
107 |
+
bs = len(dataset)
|
108 |
+
elif screenshot:
|
109 |
+
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
|
110 |
+
else:
|
111 |
+
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
|
112 |
+
vid_path, vid_writer = [None] * bs, [None] * bs
|
113 |
+
|
114 |
+
# Run inference
|
115 |
+
model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup
|
116 |
+
seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
|
117 |
+
for path, im, im0s, vid_cap, s in dataset:
|
118 |
+
with dt[0]:
|
119 |
+
im = torch.from_numpy(im).to(model.device)
|
120 |
+
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
|
121 |
+
im /= 255 # 0 - 255 to 0.0 - 1.0
|
122 |
+
if len(im.shape) == 3:
|
123 |
+
im = im[None] # expand for batch dim
|
124 |
+
|
125 |
+
# Inference
|
126 |
+
with dt[1]:
|
127 |
+
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
|
128 |
+
pred = model(im, augment=augment, visualize=visualize)
|
129 |
+
|
130 |
+
# NMS
|
131 |
+
with dt[2]:
|
132 |
+
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
|
133 |
+
|
134 |
+
# Second-stage classifier (optional)
|
135 |
+
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
|
136 |
+
|
137 |
+
# Process predictions
|
138 |
+
for i, det in enumerate(pred): # per image
|
139 |
+
seen += 1
|
140 |
+
if webcam: # batch_size >= 1
|
141 |
+
p, im0, frame = path[i], im0s[i].copy(), dataset.count
|
142 |
+
s += f'{i}: '
|
143 |
+
else:
|
144 |
+
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
|
145 |
+
|
146 |
+
p = Path(p) # to Path
|
147 |
+
save_path = str(save_dir / p.name) # im.jpg
|
148 |
+
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
|
149 |
+
s += '%gx%g ' % im.shape[2:] # print string
|
150 |
+
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
151 |
+
imc = im0.copy() if save_crop else im0 # for save_crop
|
152 |
+
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
|
153 |
+
if len(det):
|
154 |
+
# Rescale boxes from img_size to im0 size
|
155 |
+
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
|
156 |
+
|
157 |
+
# Print results
|
158 |
+
for c in det[:, 5].unique():
|
159 |
+
n = (det[:, 5] == c).sum() # detections per class
|
160 |
+
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
|
161 |
+
|
162 |
+
# Write results
|
163 |
+
for *xyxy, conf, cls in reversed(det):
|
164 |
+
if save_txt: # Write to file
|
165 |
+
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
166 |
+
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
|
167 |
+
with open(f'{txt_path}.txt', 'a') as f:
|
168 |
+
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
169 |
+
|
170 |
+
if save_img or save_crop or view_img: # Add bbox to image
|
171 |
+
c = int(cls) # integer class
|
172 |
+
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
|
173 |
+
annotator.box_label(xyxy, label, color=colors(c, True))
|
174 |
+
if save_crop:
|
175 |
+
###changed part###
|
176 |
+
xydata = str(xyxy).replace("[tensor(", "x")
|
177 |
+
xydata = str(xydata).replace("tensor(", "y",1)
|
178 |
+
xydata = str(xydata).replace("tensor(", "w",1)
|
179 |
+
xydata = str(xydata).replace("tensor(", "h",1)
|
180 |
+
xydata = str(xydata).replace("]", "_",1)
|
181 |
+
xydata = str(xydata).replace(".), ", "")
|
182 |
+
xydata = str(xydata).replace(".)", "")
|
183 |
+
#str.replace("tensor", "_")
|
184 |
+
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{xydata}{p.stem}.jpg', BGR=True)
|
185 |
+
###changed part###
|
186 |
+
|
187 |
+
# Stream results
|
188 |
+
im0 = annotator.result()
|
189 |
+
if view_img:
|
190 |
+
if platform.system() == 'Linux' and p not in windows:
|
191 |
+
windows.append(p)
|
192 |
+
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
|
193 |
+
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
|
194 |
+
cv2.imshow(str(p), im0)
|
195 |
+
cv2.waitKey(1) # 1 millisecond
|
196 |
+
|
197 |
+
# Save results (image with detections)
|
198 |
+
if save_img:
|
199 |
+
if dataset.mode == 'image':
|
200 |
+
cv2.imwrite(save_path, im0)
|
201 |
+
else: # 'video' or 'stream'
|
202 |
+
if vid_path[i] != save_path: # new video
|
203 |
+
vid_path[i] = save_path
|
204 |
+
if isinstance(vid_writer[i], cv2.VideoWriter):
|
205 |
+
vid_writer[i].release() # release previous video writer
|
206 |
+
if vid_cap: # video
|
207 |
+
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
208 |
+
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
209 |
+
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
210 |
+
else: # stream
|
211 |
+
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
212 |
+
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
213 |
+
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
214 |
+
vid_writer[i].write(im0)
|
215 |
+
|
216 |
+
# Print time (inference-only)
|
217 |
+
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
|
218 |
+
|
219 |
+
# Print results
|
220 |
+
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
|
221 |
+
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
|
222 |
+
if save_txt or save_img:
|
223 |
+
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
224 |
+
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
225 |
+
if update:
|
226 |
+
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
|
227 |
+
|
228 |
+
|
229 |
+
def parse_opt():
|
230 |
+
parser = argparse.ArgumentParser()
|
231 |
+
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
|
232 |
+
parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
|
233 |
+
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
|
234 |
+
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
|
235 |
+
parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
|
236 |
+
parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
|
237 |
+
parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
|
238 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
239 |
+
parser.add_argument('--view-img', action='store_true', help='show results')
|
240 |
+
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
241 |
+
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
242 |
+
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
|
243 |
+
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
244 |
+
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
|
245 |
+
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
|
246 |
+
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
247 |
+
parser.add_argument('--visualize', action='store_true', help='visualize features')
|
248 |
+
parser.add_argument('--update', action='store_true', help='update all models')
|
249 |
+
parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')
|
250 |
+
parser.add_argument('--name', default='exp', help='save results to project/name')
|
251 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
252 |
+
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
|
253 |
+
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
|
254 |
+
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
|
255 |
+
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
256 |
+
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
257 |
+
parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
|
258 |
+
opt = parser.parse_args()
|
259 |
+
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
260 |
+
print_args(vars(opt))
|
261 |
+
return opt
|
262 |
+
|
263 |
+
|
264 |
+
def main(opt):
|
265 |
+
check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
266 |
+
run(**vars(opt))
|
267 |
+
|
268 |
+
|
269 |
+
if __name__ == '__main__':
|
270 |
+
opt = parse_opt()
|
271 |
+
main(opt)
|
TextDetection/export.py
ADDED
@@ -0,0 +1,863 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 π by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
|
4 |
+
|
5 |
+
Format | `export.py --include` | Model
|
6 |
+
--- | --- | ---
|
7 |
+
PyTorch | - | yolov5s.pt
|
8 |
+
TorchScript | `torchscript` | yolov5s.torchscript
|
9 |
+
ONNX | `onnx` | yolov5s.onnx
|
10 |
+
OpenVINO | `openvino` | yolov5s_openvino_model/
|
11 |
+
TensorRT | `engine` | yolov5s.engine
|
12 |
+
CoreML | `coreml` | yolov5s.mlmodel
|
13 |
+
TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
|
14 |
+
TensorFlow GraphDef | `pb` | yolov5s.pb
|
15 |
+
TensorFlow Lite | `tflite` | yolov5s.tflite
|
16 |
+
TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
|
17 |
+
TensorFlow.js | `tfjs` | yolov5s_web_model/
|
18 |
+
PaddlePaddle | `paddle` | yolov5s_paddle_model/
|
19 |
+
|
20 |
+
Requirements:
|
21 |
+
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
|
22 |
+
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
|
23 |
+
|
24 |
+
Usage:
|
25 |
+
$ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...
|
26 |
+
|
27 |
+
Inference:
|
28 |
+
$ python detect.py --weights yolov5s.pt # PyTorch
|
29 |
+
yolov5s.torchscript # TorchScript
|
30 |
+
yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
31 |
+
yolov5s_openvino_model # OpenVINO
|
32 |
+
yolov5s.engine # TensorRT
|
33 |
+
yolov5s.mlmodel # CoreML (macOS-only)
|
34 |
+
yolov5s_saved_model # TensorFlow SavedModel
|
35 |
+
yolov5s.pb # TensorFlow GraphDef
|
36 |
+
yolov5s.tflite # TensorFlow Lite
|
37 |
+
yolov5s_edgetpu.tflite # TensorFlow Edge TPU
|
38 |
+
yolov5s_paddle_model # PaddlePaddle
|
39 |
+
|
40 |
+
TensorFlow.js:
|
41 |
+
$ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
|
42 |
+
$ npm install
|
43 |
+
$ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model
|
44 |
+
$ npm start
|
45 |
+
"""
|
46 |
+
|
47 |
+
import argparse
|
48 |
+
import contextlib
|
49 |
+
import json
|
50 |
+
import os
|
51 |
+
import platform
|
52 |
+
import re
|
53 |
+
import subprocess
|
54 |
+
import sys
|
55 |
+
import time
|
56 |
+
import warnings
|
57 |
+
from pathlib import Path
|
58 |
+
|
59 |
+
import pandas as pd
|
60 |
+
import torch
|
61 |
+
from torch.utils.mobile_optimizer import optimize_for_mobile
|
62 |
+
|
63 |
+
FILE = Path(__file__).resolve()
|
64 |
+
ROOT = FILE.parents[0] # YOLOv5 root directory
|
65 |
+
if str(ROOT) not in sys.path:
|
66 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
67 |
+
if platform.system() != 'Windows':
|
68 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
69 |
+
|
70 |
+
from models.experimental import attempt_load
|
71 |
+
from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel
|
72 |
+
from utils.dataloaders import LoadImages
|
73 |
+
from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version,
|
74 |
+
check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save)
|
75 |
+
from utils.torch_utils import select_device, smart_inference_mode
|
76 |
+
|
77 |
+
MACOS = platform.system() == 'Darwin' # macOS environment
|
78 |
+
|
79 |
+
|
80 |
+
class iOSModel(torch.nn.Module):
|
81 |
+
|
82 |
+
def __init__(self, model, im):
|
83 |
+
super().__init__()
|
84 |
+
b, c, h, w = im.shape # batch, channel, height, width
|
85 |
+
self.model = model
|
86 |
+
self.nc = model.nc # number of classes
|
87 |
+
if w == h:
|
88 |
+
self.normalize = 1. / w
|
89 |
+
else:
|
90 |
+
self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]) # broadcast (slower, smaller)
|
91 |
+
# np = model(im)[0].shape[1] # number of points
|
92 |
+
# self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4) # explicit (faster, larger)
|
93 |
+
|
94 |
+
def forward(self, x):
|
95 |
+
xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1)
|
96 |
+
return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4)
|
97 |
+
|
98 |
+
|
99 |
+
def export_formats():
|
100 |
+
# YOLOv5 export formats
|
101 |
+
x = [
|
102 |
+
['PyTorch', '-', '.pt', True, True],
|
103 |
+
['TorchScript', 'torchscript', '.torchscript', True, True],
|
104 |
+
['ONNX', 'onnx', '.onnx', True, True],
|
105 |
+
['OpenVINO', 'openvino', '_openvino_model', True, False],
|
106 |
+
['TensorRT', 'engine', '.engine', False, True],
|
107 |
+
['CoreML', 'coreml', '.mlmodel', True, False],
|
108 |
+
['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True],
|
109 |
+
['TensorFlow GraphDef', 'pb', '.pb', True, True],
|
110 |
+
['TensorFlow Lite', 'tflite', '.tflite', True, False],
|
111 |
+
['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False],
|
112 |
+
['TensorFlow.js', 'tfjs', '_web_model', False, False],
|
113 |
+
['PaddlePaddle', 'paddle', '_paddle_model', True, True], ]
|
114 |
+
return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
|
115 |
+
|
116 |
+
|
117 |
+
def try_export(inner_func):
|
118 |
+
# YOLOv5 export decorator, i..e @try_export
|
119 |
+
inner_args = get_default_args(inner_func)
|
120 |
+
|
121 |
+
def outer_func(*args, **kwargs):
|
122 |
+
prefix = inner_args['prefix']
|
123 |
+
try:
|
124 |
+
with Profile() as dt:
|
125 |
+
f, model = inner_func(*args, **kwargs)
|
126 |
+
LOGGER.info(f'{prefix} export success β
{dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)')
|
127 |
+
return f, model
|
128 |
+
except Exception as e:
|
129 |
+
LOGGER.info(f'{prefix} export failure β {dt.t:.1f}s: {e}')
|
130 |
+
return None, None
|
131 |
+
|
132 |
+
return outer_func
|
133 |
+
|
134 |
+
|
135 |
+
@try_export
|
136 |
+
def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
|
137 |
+
# YOLOv5 TorchScript model export
|
138 |
+
LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
|
139 |
+
f = file.with_suffix('.torchscript')
|
140 |
+
|
141 |
+
ts = torch.jit.trace(model, im, strict=False)
|
142 |
+
d = {'shape': im.shape, 'stride': int(max(model.stride)), 'names': model.names}
|
143 |
+
extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap()
|
144 |
+
if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
|
145 |
+
optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
|
146 |
+
else:
|
147 |
+
ts.save(str(f), _extra_files=extra_files)
|
148 |
+
return f, None
|
149 |
+
|
150 |
+
|
151 |
+
@try_export
|
152 |
+
def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')):
|
153 |
+
# YOLOv5 ONNX export
|
154 |
+
check_requirements('onnx>=1.12.0')
|
155 |
+
import onnx
|
156 |
+
|
157 |
+
LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
|
158 |
+
f = file.with_suffix('.onnx')
|
159 |
+
|
160 |
+
output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0']
|
161 |
+
if dynamic:
|
162 |
+
dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640)
|
163 |
+
if isinstance(model, SegmentationModel):
|
164 |
+
dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
|
165 |
+
dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160)
|
166 |
+
elif isinstance(model, DetectionModel):
|
167 |
+
dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
|
168 |
+
|
169 |
+
torch.onnx.export(
|
170 |
+
model.cpu() if dynamic else model, # --dynamic only compatible with cpu
|
171 |
+
im.cpu() if dynamic else im,
|
172 |
+
f,
|
173 |
+
verbose=False,
|
174 |
+
opset_version=opset,
|
175 |
+
do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
|
176 |
+
input_names=['images'],
|
177 |
+
output_names=output_names,
|
178 |
+
dynamic_axes=dynamic or None)
|
179 |
+
|
180 |
+
# Checks
|
181 |
+
model_onnx = onnx.load(f) # load onnx model
|
182 |
+
onnx.checker.check_model(model_onnx) # check onnx model
|
183 |
+
|
184 |
+
# Metadata
|
185 |
+
d = {'stride': int(max(model.stride)), 'names': model.names}
|
186 |
+
for k, v in d.items():
|
187 |
+
meta = model_onnx.metadata_props.add()
|
188 |
+
meta.key, meta.value = k, str(v)
|
189 |
+
onnx.save(model_onnx, f)
|
190 |
+
|
191 |
+
# Simplify
|
192 |
+
if simplify:
|
193 |
+
try:
|
194 |
+
cuda = torch.cuda.is_available()
|
195 |
+
check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1'))
|
196 |
+
import onnxsim
|
197 |
+
|
198 |
+
LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
|
199 |
+
model_onnx, check = onnxsim.simplify(model_onnx)
|
200 |
+
assert check, 'assert check failed'
|
201 |
+
onnx.save(model_onnx, f)
|
202 |
+
except Exception as e:
|
203 |
+
LOGGER.info(f'{prefix} simplifier failure: {e}')
|
204 |
+
return f, model_onnx
|
205 |
+
|
206 |
+
|
207 |
+
@try_export
|
208 |
+
def export_openvino(file, metadata, half, int8, data, prefix=colorstr('OpenVINO:')):
|
209 |
+
# YOLOv5 OpenVINO export
|
210 |
+
check_requirements('openvino-dev>=2022.3') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
211 |
+
import openvino.runtime as ov # noqa
|
212 |
+
from openvino.tools import mo # noqa
|
213 |
+
|
214 |
+
LOGGER.info(f'\n{prefix} starting export with openvino {ov.__version__}...')
|
215 |
+
f = str(file).replace(file.suffix, f'_openvino_model{os.sep}')
|
216 |
+
f_onnx = file.with_suffix('.onnx')
|
217 |
+
f_ov = str(Path(f) / file.with_suffix('.xml').name)
|
218 |
+
if int8:
|
219 |
+
check_requirements('nncf')
|
220 |
+
import nncf
|
221 |
+
import numpy as np
|
222 |
+
from openvino.runtime import Core
|
223 |
+
|
224 |
+
from utils.dataloaders import create_dataloader
|
225 |
+
core = Core()
|
226 |
+
onnx_model = core.read_model(f_onnx) # export
|
227 |
+
|
228 |
+
def prepare_input_tensor(image: np.ndarray):
|
229 |
+
input_tensor = image.astype(np.float32) # uint8 to fp16/32
|
230 |
+
input_tensor /= 255.0 # 0 - 255 to 0.0 - 1.0
|
231 |
+
|
232 |
+
if input_tensor.ndim == 3:
|
233 |
+
input_tensor = np.expand_dims(input_tensor, 0)
|
234 |
+
return input_tensor
|
235 |
+
|
236 |
+
def gen_dataloader(yaml_path, task='train', imgsz=640, workers=4):
|
237 |
+
data_yaml = check_yaml(yaml_path)
|
238 |
+
data = check_dataset(data_yaml)
|
239 |
+
dataloader = create_dataloader(data[task],
|
240 |
+
imgsz=imgsz,
|
241 |
+
batch_size=1,
|
242 |
+
stride=32,
|
243 |
+
pad=0.5,
|
244 |
+
single_cls=False,
|
245 |
+
rect=False,
|
246 |
+
workers=workers)[0]
|
247 |
+
return dataloader
|
248 |
+
|
249 |
+
# noqa: F811
|
250 |
+
|
251 |
+
def transform_fn(data_item):
|
252 |
+
"""
|
253 |
+
Quantization transform function. Extracts and preprocess input data from dataloader item for quantization.
|
254 |
+
Parameters:
|
255 |
+
data_item: Tuple with data item produced by DataLoader during iteration
|
256 |
+
Returns:
|
257 |
+
input_tensor: Input data for quantization
|
258 |
+
"""
|
259 |
+
img = data_item[0].numpy()
|
260 |
+
input_tensor = prepare_input_tensor(img)
|
261 |
+
return input_tensor
|
262 |
+
|
263 |
+
ds = gen_dataloader(data)
|
264 |
+
quantization_dataset = nncf.Dataset(ds, transform_fn)
|
265 |
+
ov_model = nncf.quantize(onnx_model, quantization_dataset, preset=nncf.QuantizationPreset.MIXED)
|
266 |
+
else:
|
267 |
+
ov_model = mo.convert_model(f_onnx, model_name=file.stem, framework='onnx', compress_to_fp16=half) # export
|
268 |
+
|
269 |
+
ov.serialize(ov_model, f_ov) # save
|
270 |
+
yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
|
271 |
+
return f, None
|
272 |
+
|
273 |
+
|
274 |
+
@try_export
|
275 |
+
def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')):
|
276 |
+
# YOLOv5 Paddle export
|
277 |
+
check_requirements(('paddlepaddle', 'x2paddle'))
|
278 |
+
import x2paddle
|
279 |
+
from x2paddle.convert import pytorch2paddle
|
280 |
+
|
281 |
+
LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...')
|
282 |
+
f = str(file).replace('.pt', f'_paddle_model{os.sep}')
|
283 |
+
|
284 |
+
pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export
|
285 |
+
yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
|
286 |
+
return f, None
|
287 |
+
|
288 |
+
|
289 |
+
@try_export
|
290 |
+
def export_coreml(model, im, file, int8, half, nms, prefix=colorstr('CoreML:')):
|
291 |
+
# YOLOv5 CoreML export
|
292 |
+
check_requirements('coremltools')
|
293 |
+
import coremltools as ct
|
294 |
+
|
295 |
+
LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
|
296 |
+
f = file.with_suffix('.mlmodel')
|
297 |
+
|
298 |
+
if nms:
|
299 |
+
model = iOSModel(model, im)
|
300 |
+
ts = torch.jit.trace(model, im, strict=False) # TorchScript model
|
301 |
+
ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
|
302 |
+
bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None)
|
303 |
+
if bits < 32:
|
304 |
+
if MACOS: # quantization only supported on macOS
|
305 |
+
with warnings.catch_warnings():
|
306 |
+
warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress numpy==1.20 float warning
|
307 |
+
ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
|
308 |
+
else:
|
309 |
+
print(f'{prefix} quantization only supported on macOS, skipping...')
|
310 |
+
ct_model.save(f)
|
311 |
+
return f, ct_model
|
312 |
+
|
313 |
+
|
314 |
+
@try_export
|
315 |
+
def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
|
316 |
+
# YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
|
317 |
+
assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`'
|
318 |
+
try:
|
319 |
+
import tensorrt as trt
|
320 |
+
except Exception:
|
321 |
+
if platform.system() == 'Linux':
|
322 |
+
check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com')
|
323 |
+
import tensorrt as trt
|
324 |
+
|
325 |
+
if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
|
326 |
+
grid = model.model[-1].anchor_grid
|
327 |
+
model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
|
328 |
+
export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
|
329 |
+
model.model[-1].anchor_grid = grid
|
330 |
+
else: # TensorRT >= 8
|
331 |
+
check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0
|
332 |
+
export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
|
333 |
+
onnx = file.with_suffix('.onnx')
|
334 |
+
|
335 |
+
LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
|
336 |
+
assert onnx.exists(), f'failed to export ONNX file: {onnx}'
|
337 |
+
f = file.with_suffix('.engine') # TensorRT engine file
|
338 |
+
logger = trt.Logger(trt.Logger.INFO)
|
339 |
+
if verbose:
|
340 |
+
logger.min_severity = trt.Logger.Severity.VERBOSE
|
341 |
+
|
342 |
+
builder = trt.Builder(logger)
|
343 |
+
config = builder.create_builder_config()
|
344 |
+
config.max_workspace_size = workspace * 1 << 30
|
345 |
+
# config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice
|
346 |
+
|
347 |
+
flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
|
348 |
+
network = builder.create_network(flag)
|
349 |
+
parser = trt.OnnxParser(network, logger)
|
350 |
+
if not parser.parse_from_file(str(onnx)):
|
351 |
+
raise RuntimeError(f'failed to load ONNX file: {onnx}')
|
352 |
+
|
353 |
+
inputs = [network.get_input(i) for i in range(network.num_inputs)]
|
354 |
+
outputs = [network.get_output(i) for i in range(network.num_outputs)]
|
355 |
+
for inp in inputs:
|
356 |
+
LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}')
|
357 |
+
for out in outputs:
|
358 |
+
LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}')
|
359 |
+
|
360 |
+
if dynamic:
|
361 |
+
if im.shape[0] <= 1:
|
362 |
+
LOGGER.warning(f'{prefix} WARNING β οΈ --dynamic model requires maximum --batch-size argument')
|
363 |
+
profile = builder.create_optimization_profile()
|
364 |
+
for inp in inputs:
|
365 |
+
profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)
|
366 |
+
config.add_optimization_profile(profile)
|
367 |
+
|
368 |
+
LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}')
|
369 |
+
if builder.platform_has_fast_fp16 and half:
|
370 |
+
config.set_flag(trt.BuilderFlag.FP16)
|
371 |
+
with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
|
372 |
+
t.write(engine.serialize())
|
373 |
+
return f, None
|
374 |
+
|
375 |
+
|
376 |
+
@try_export
|
377 |
+
def export_saved_model(model,
|
378 |
+
im,
|
379 |
+
file,
|
380 |
+
dynamic,
|
381 |
+
tf_nms=False,
|
382 |
+
agnostic_nms=False,
|
383 |
+
topk_per_class=100,
|
384 |
+
topk_all=100,
|
385 |
+
iou_thres=0.45,
|
386 |
+
conf_thres=0.25,
|
387 |
+
keras=False,
|
388 |
+
prefix=colorstr('TensorFlow SavedModel:')):
|
389 |
+
# YOLOv5 TensorFlow SavedModel export
|
390 |
+
try:
|
391 |
+
import tensorflow as tf
|
392 |
+
except Exception:
|
393 |
+
check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}")
|
394 |
+
import tensorflow as tf
|
395 |
+
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
396 |
+
|
397 |
+
from models.tf import TFModel
|
398 |
+
|
399 |
+
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
400 |
+
f = str(file).replace('.pt', '_saved_model')
|
401 |
+
batch_size, ch, *imgsz = list(im.shape) # BCHW
|
402 |
+
|
403 |
+
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
|
404 |
+
im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
|
405 |
+
_ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
406 |
+
inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size)
|
407 |
+
outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
408 |
+
keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
|
409 |
+
keras_model.trainable = False
|
410 |
+
keras_model.summary()
|
411 |
+
if keras:
|
412 |
+
keras_model.save(f, save_format='tf')
|
413 |
+
else:
|
414 |
+
spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
|
415 |
+
m = tf.function(lambda x: keras_model(x)) # full model
|
416 |
+
m = m.get_concrete_function(spec)
|
417 |
+
frozen_func = convert_variables_to_constants_v2(m)
|
418 |
+
tfm = tf.Module()
|
419 |
+
tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec])
|
420 |
+
tfm.__call__(im)
|
421 |
+
tf.saved_model.save(tfm,
|
422 |
+
f,
|
423 |
+
options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version(
|
424 |
+
tf.__version__, '2.6') else tf.saved_model.SaveOptions())
|
425 |
+
return f, keras_model
|
426 |
+
|
427 |
+
|
428 |
+
@try_export
|
429 |
+
def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')):
|
430 |
+
# YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow
|
431 |
+
import tensorflow as tf
|
432 |
+
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
433 |
+
|
434 |
+
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
435 |
+
f = file.with_suffix('.pb')
|
436 |
+
|
437 |
+
m = tf.function(lambda x: keras_model(x)) # full model
|
438 |
+
m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
|
439 |
+
frozen_func = convert_variables_to_constants_v2(m)
|
440 |
+
frozen_func.graph.as_graph_def()
|
441 |
+
tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
|
442 |
+
return f, None
|
443 |
+
|
444 |
+
|
445 |
+
@try_export
|
446 |
+
def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')):
|
447 |
+
# YOLOv5 TensorFlow Lite export
|
448 |
+
import tensorflow as tf
|
449 |
+
|
450 |
+
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
451 |
+
batch_size, ch, *imgsz = list(im.shape) # BCHW
|
452 |
+
f = str(file).replace('.pt', '-fp16.tflite')
|
453 |
+
|
454 |
+
converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
|
455 |
+
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
|
456 |
+
converter.target_spec.supported_types = [tf.float16]
|
457 |
+
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
458 |
+
if int8:
|
459 |
+
from models.tf import representative_dataset_gen
|
460 |
+
dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False)
|
461 |
+
converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100)
|
462 |
+
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
463 |
+
converter.target_spec.supported_types = []
|
464 |
+
converter.inference_input_type = tf.uint8 # or tf.int8
|
465 |
+
converter.inference_output_type = tf.uint8 # or tf.int8
|
466 |
+
converter.experimental_new_quantizer = True
|
467 |
+
f = str(file).replace('.pt', '-int8.tflite')
|
468 |
+
if nms or agnostic_nms:
|
469 |
+
converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS)
|
470 |
+
|
471 |
+
tflite_model = converter.convert()
|
472 |
+
open(f, 'wb').write(tflite_model)
|
473 |
+
return f, None
|
474 |
+
|
475 |
+
|
476 |
+
@try_export
|
477 |
+
def export_edgetpu(file, prefix=colorstr('Edge TPU:')):
|
478 |
+
# YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/
|
479 |
+
cmd = 'edgetpu_compiler --version'
|
480 |
+
help_url = 'https://coral.ai/docs/edgetpu/compiler/'
|
481 |
+
assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}'
|
482 |
+
if subprocess.run(f'{cmd} > /dev/null 2>&1', shell=True).returncode != 0:
|
483 |
+
LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}')
|
484 |
+
sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system
|
485 |
+
for c in (
|
486 |
+
'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -',
|
487 |
+
'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
|
488 |
+
'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'):
|
489 |
+
subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True)
|
490 |
+
ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
|
491 |
+
|
492 |
+
LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...')
|
493 |
+
f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model
|
494 |
+
f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model
|
495 |
+
|
496 |
+
subprocess.run([
|
497 |
+
'edgetpu_compiler',
|
498 |
+
'-s',
|
499 |
+
'-d',
|
500 |
+
'-k',
|
501 |
+
'10',
|
502 |
+
'--out_dir',
|
503 |
+
str(file.parent),
|
504 |
+
f_tfl, ], check=True)
|
505 |
+
return f, None
|
506 |
+
|
507 |
+
|
508 |
+
@try_export
|
509 |
+
def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')):
|
510 |
+
# YOLOv5 TensorFlow.js export
|
511 |
+
check_requirements('tensorflowjs')
|
512 |
+
import tensorflowjs as tfjs
|
513 |
+
|
514 |
+
LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
|
515 |
+
f = str(file).replace('.pt', '_web_model') # js dir
|
516 |
+
f_pb = file.with_suffix('.pb') # *.pb path
|
517 |
+
f_json = f'{f}/model.json' # *.json path
|
518 |
+
|
519 |
+
args = [
|
520 |
+
'tensorflowjs_converter',
|
521 |
+
'--input_format=tf_frozen_model',
|
522 |
+
'--quantize_uint8' if int8 else '',
|
523 |
+
'--output_node_names=Identity,Identity_1,Identity_2,Identity_3',
|
524 |
+
str(f_pb),
|
525 |
+
str(f), ]
|
526 |
+
subprocess.run([arg for arg in args if arg], check=True)
|
527 |
+
|
528 |
+
json = Path(f_json).read_text()
|
529 |
+
with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
|
530 |
+
subst = re.sub(
|
531 |
+
r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
|
532 |
+
r'"Identity.?.?": {"name": "Identity.?.?"}, '
|
533 |
+
r'"Identity.?.?": {"name": "Identity.?.?"}, '
|
534 |
+
r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, '
|
535 |
+
r'"Identity_1": {"name": "Identity_1"}, '
|
536 |
+
r'"Identity_2": {"name": "Identity_2"}, '
|
537 |
+
r'"Identity_3": {"name": "Identity_3"}}}', json)
|
538 |
+
j.write(subst)
|
539 |
+
return f, None
|
540 |
+
|
541 |
+
|
542 |
+
def add_tflite_metadata(file, metadata, num_outputs):
|
543 |
+
# Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata
|
544 |
+
with contextlib.suppress(ImportError):
|
545 |
+
# check_requirements('tflite_support')
|
546 |
+
from tflite_support import flatbuffers
|
547 |
+
from tflite_support import metadata as _metadata
|
548 |
+
from tflite_support import metadata_schema_py_generated as _metadata_fb
|
549 |
+
|
550 |
+
tmp_file = Path('/tmp/meta.txt')
|
551 |
+
with open(tmp_file, 'w') as meta_f:
|
552 |
+
meta_f.write(str(metadata))
|
553 |
+
|
554 |
+
model_meta = _metadata_fb.ModelMetadataT()
|
555 |
+
label_file = _metadata_fb.AssociatedFileT()
|
556 |
+
label_file.name = tmp_file.name
|
557 |
+
model_meta.associatedFiles = [label_file]
|
558 |
+
|
559 |
+
subgraph = _metadata_fb.SubGraphMetadataT()
|
560 |
+
subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()]
|
561 |
+
subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs
|
562 |
+
model_meta.subgraphMetadata = [subgraph]
|
563 |
+
|
564 |
+
b = flatbuffers.Builder(0)
|
565 |
+
b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
|
566 |
+
metadata_buf = b.Output()
|
567 |
+
|
568 |
+
populator = _metadata.MetadataPopulator.with_model_file(file)
|
569 |
+
populator.load_metadata_buffer(metadata_buf)
|
570 |
+
populator.load_associated_files([str(tmp_file)])
|
571 |
+
populator.populate()
|
572 |
+
tmp_file.unlink()
|
573 |
+
|
574 |
+
|
575 |
+
def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline:')):
|
576 |
+
# YOLOv5 CoreML pipeline
|
577 |
+
import coremltools as ct
|
578 |
+
from PIL import Image
|
579 |
+
|
580 |
+
print(f'{prefix} starting pipeline with coremltools {ct.__version__}...')
|
581 |
+
batch_size, ch, h, w = list(im.shape) # BCHW
|
582 |
+
t = time.time()
|
583 |
+
|
584 |
+
# YOLOv5 Output shapes
|
585 |
+
spec = model.get_spec()
|
586 |
+
out0, out1 = iter(spec.description.output)
|
587 |
+
if platform.system() == 'Darwin':
|
588 |
+
img = Image.new('RGB', (w, h)) # img(192 width, 320 height)
|
589 |
+
# img = torch.zeros((*opt.img_size, 3)).numpy() # img size(320,192,3) iDetection
|
590 |
+
out = model.predict({'image': img})
|
591 |
+
out0_shape, out1_shape = out[out0.name].shape, out[out1.name].shape
|
592 |
+
else: # linux and windows can not run model.predict(), get sizes from pytorch output y
|
593 |
+
s = tuple(y[0].shape)
|
594 |
+
out0_shape, out1_shape = (s[1], s[2] - 5), (s[1], 4) # (3780, 80), (3780, 4)
|
595 |
+
|
596 |
+
# Checks
|
597 |
+
nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height
|
598 |
+
na, nc = out0_shape
|
599 |
+
# na, nc = out0.type.multiArrayType.shape # number anchors, classes
|
600 |
+
assert len(names) == nc, f'{len(names)} names found for nc={nc}' # check
|
601 |
+
|
602 |
+
# Define output shapes (missing)
|
603 |
+
out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80)
|
604 |
+
out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4)
|
605 |
+
# spec.neuralNetwork.preprocessing[0].featureName = '0'
|
606 |
+
|
607 |
+
# Flexible input shapes
|
608 |
+
# from coremltools.models.neural_network import flexible_shape_utils
|
609 |
+
# s = [] # shapes
|
610 |
+
# s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192))
|
611 |
+
# s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384)) # (height, width)
|
612 |
+
# flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s)
|
613 |
+
# r = flexible_shape_utils.NeuralNetworkImageSizeRange() # shape ranges
|
614 |
+
# r.add_height_range((192, 640))
|
615 |
+
# r.add_width_range((192, 640))
|
616 |
+
# flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r)
|
617 |
+
|
618 |
+
# Print
|
619 |
+
print(spec.description)
|
620 |
+
|
621 |
+
# Model from spec
|
622 |
+
model = ct.models.MLModel(spec)
|
623 |
+
|
624 |
+
# 3. Create NMS protobuf
|
625 |
+
nms_spec = ct.proto.Model_pb2.Model()
|
626 |
+
nms_spec.specificationVersion = 5
|
627 |
+
for i in range(2):
|
628 |
+
decoder_output = model._spec.description.output[i].SerializeToString()
|
629 |
+
nms_spec.description.input.add()
|
630 |
+
nms_spec.description.input[i].ParseFromString(decoder_output)
|
631 |
+
nms_spec.description.output.add()
|
632 |
+
nms_spec.description.output[i].ParseFromString(decoder_output)
|
633 |
+
|
634 |
+
nms_spec.description.output[0].name = 'confidence'
|
635 |
+
nms_spec.description.output[1].name = 'coordinates'
|
636 |
+
|
637 |
+
output_sizes = [nc, 4]
|
638 |
+
for i in range(2):
|
639 |
+
ma_type = nms_spec.description.output[i].type.multiArrayType
|
640 |
+
ma_type.shapeRange.sizeRanges.add()
|
641 |
+
ma_type.shapeRange.sizeRanges[0].lowerBound = 0
|
642 |
+
ma_type.shapeRange.sizeRanges[0].upperBound = -1
|
643 |
+
ma_type.shapeRange.sizeRanges.add()
|
644 |
+
ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i]
|
645 |
+
ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i]
|
646 |
+
del ma_type.shape[:]
|
647 |
+
|
648 |
+
nms = nms_spec.nonMaximumSuppression
|
649 |
+
nms.confidenceInputFeatureName = out0.name # 1x507x80
|
650 |
+
nms.coordinatesInputFeatureName = out1.name # 1x507x4
|
651 |
+
nms.confidenceOutputFeatureName = 'confidence'
|
652 |
+
nms.coordinatesOutputFeatureName = 'coordinates'
|
653 |
+
nms.iouThresholdInputFeatureName = 'iouThreshold'
|
654 |
+
nms.confidenceThresholdInputFeatureName = 'confidenceThreshold'
|
655 |
+
nms.iouThreshold = 0.45
|
656 |
+
nms.confidenceThreshold = 0.25
|
657 |
+
nms.pickTop.perClass = True
|
658 |
+
nms.stringClassLabels.vector.extend(names.values())
|
659 |
+
nms_model = ct.models.MLModel(nms_spec)
|
660 |
+
|
661 |
+
# 4. Pipeline models together
|
662 |
+
pipeline = ct.models.pipeline.Pipeline(input_features=[('image', ct.models.datatypes.Array(3, ny, nx)),
|
663 |
+
('iouThreshold', ct.models.datatypes.Double()),
|
664 |
+
('confidenceThreshold', ct.models.datatypes.Double())],
|
665 |
+
output_features=['confidence', 'coordinates'])
|
666 |
+
pipeline.add_model(model)
|
667 |
+
pipeline.add_model(nms_model)
|
668 |
+
|
669 |
+
# Correct datatypes
|
670 |
+
pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString())
|
671 |
+
pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString())
|
672 |
+
pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())
|
673 |
+
|
674 |
+
# Update metadata
|
675 |
+
pipeline.spec.specificationVersion = 5
|
676 |
+
pipeline.spec.description.metadata.versionString = 'https://github.com/ultralytics/yolov5'
|
677 |
+
pipeline.spec.description.metadata.shortDescription = 'https://github.com/ultralytics/yolov5'
|
678 |
+
pipeline.spec.description.metadata.author = '[email protected]'
|
679 |
+
pipeline.spec.description.metadata.license = 'https://github.com/ultralytics/yolov5/blob/master/LICENSE'
|
680 |
+
pipeline.spec.description.metadata.userDefined.update({
|
681 |
+
'classes': ','.join(names.values()),
|
682 |
+
'iou_threshold': str(nms.iouThreshold),
|
683 |
+
'confidence_threshold': str(nms.confidenceThreshold)})
|
684 |
+
|
685 |
+
# Save the model
|
686 |
+
f = file.with_suffix('.mlmodel') # filename
|
687 |
+
model = ct.models.MLModel(pipeline.spec)
|
688 |
+
model.input_description['image'] = 'Input image'
|
689 |
+
model.input_description['iouThreshold'] = f'(optional) IOU Threshold override (default: {nms.iouThreshold})'
|
690 |
+
model.input_description['confidenceThreshold'] = \
|
691 |
+
f'(optional) Confidence Threshold override (default: {nms.confidenceThreshold})'
|
692 |
+
model.output_description['confidence'] = 'Boxes Γ Class confidence (see user-defined metadata "classes")'
|
693 |
+
model.output_description['coordinates'] = 'Boxes Γ [x, y, width, height] (relative to image size)'
|
694 |
+
model.save(f) # pipelined
|
695 |
+
print(f'{prefix} pipeline success ({time.time() - t:.2f}s), saved as {f} ({file_size(f):.1f} MB)')
|
696 |
+
|
697 |
+
|
698 |
+
@smart_inference_mode()
|
699 |
+
def run(
|
700 |
+
data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
|
701 |
+
weights=ROOT / 'yolov5s.pt', # weights path
|
702 |
+
imgsz=(640, 640), # image (height, width)
|
703 |
+
batch_size=1, # batch size
|
704 |
+
device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
705 |
+
include=('torchscript', 'onnx'), # include formats
|
706 |
+
half=False, # FP16 half-precision export
|
707 |
+
inplace=False, # set YOLOv5 Detect() inplace=True
|
708 |
+
keras=False, # use Keras
|
709 |
+
optimize=False, # TorchScript: optimize for mobile
|
710 |
+
int8=False, # CoreML/TF INT8 quantization
|
711 |
+
dynamic=False, # ONNX/TF/TensorRT: dynamic axes
|
712 |
+
simplify=False, # ONNX: simplify model
|
713 |
+
opset=12, # ONNX: opset version
|
714 |
+
verbose=False, # TensorRT: verbose log
|
715 |
+
workspace=4, # TensorRT: workspace size (GB)
|
716 |
+
nms=False, # TF: add NMS to model
|
717 |
+
agnostic_nms=False, # TF: add agnostic NMS to model
|
718 |
+
topk_per_class=100, # TF.js NMS: topk per class to keep
|
719 |
+
topk_all=100, # TF.js NMS: topk for all classes to keep
|
720 |
+
iou_thres=0.45, # TF.js NMS: IoU threshold
|
721 |
+
conf_thres=0.25, # TF.js NMS: confidence threshold
|
722 |
+
):
|
723 |
+
t = time.time()
|
724 |
+
include = [x.lower() for x in include] # to lowercase
|
725 |
+
fmts = tuple(export_formats()['Argument'][1:]) # --include arguments
|
726 |
+
flags = [x in include for x in fmts]
|
727 |
+
assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}'
|
728 |
+
jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans
|
729 |
+
file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights
|
730 |
+
|
731 |
+
# Load PyTorch model
|
732 |
+
device = select_device(device)
|
733 |
+
if half:
|
734 |
+
assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0'
|
735 |
+
assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both'
|
736 |
+
model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model
|
737 |
+
|
738 |
+
# Checks
|
739 |
+
imgsz *= 2 if len(imgsz) == 1 else 1 # expand
|
740 |
+
if optimize:
|
741 |
+
assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu'
|
742 |
+
|
743 |
+
# Input
|
744 |
+
gs = int(max(model.stride)) # grid size (max stride)
|
745 |
+
imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
|
746 |
+
im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
|
747 |
+
|
748 |
+
# Update model
|
749 |
+
model.eval()
|
750 |
+
for k, m in model.named_modules():
|
751 |
+
if isinstance(m, Detect):
|
752 |
+
m.inplace = inplace
|
753 |
+
m.dynamic = dynamic
|
754 |
+
m.export = True
|
755 |
+
|
756 |
+
for _ in range(2):
|
757 |
+
y = model(im) # dry runs
|
758 |
+
if half and not coreml:
|
759 |
+
im, model = im.half(), model.half() # to FP16
|
760 |
+
shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape
|
761 |
+
metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata
|
762 |
+
LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)")
|
763 |
+
|
764 |
+
# Exports
|
765 |
+
f = [''] * len(fmts) # exported filenames
|
766 |
+
warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning
|
767 |
+
if jit: # TorchScript
|
768 |
+
f[0], _ = export_torchscript(model, im, file, optimize)
|
769 |
+
if engine: # TensorRT required before ONNX
|
770 |
+
f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose)
|
771 |
+
if onnx or xml: # OpenVINO requires ONNX
|
772 |
+
f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
|
773 |
+
if xml: # OpenVINO
|
774 |
+
f[3], _ = export_openvino(file, metadata, half, int8, data)
|
775 |
+
if coreml: # CoreML
|
776 |
+
f[4], ct_model = export_coreml(model, im, file, int8, half, nms)
|
777 |
+
if nms:
|
778 |
+
pipeline_coreml(ct_model, im, file, model.names, y)
|
779 |
+
if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats
|
780 |
+
assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.'
|
781 |
+
assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.'
|
782 |
+
f[5], s_model = export_saved_model(model.cpu(),
|
783 |
+
im,
|
784 |
+
file,
|
785 |
+
dynamic,
|
786 |
+
tf_nms=nms or agnostic_nms or tfjs,
|
787 |
+
agnostic_nms=agnostic_nms or tfjs,
|
788 |
+
topk_per_class=topk_per_class,
|
789 |
+
topk_all=topk_all,
|
790 |
+
iou_thres=iou_thres,
|
791 |
+
conf_thres=conf_thres,
|
792 |
+
keras=keras)
|
793 |
+
if pb or tfjs: # pb prerequisite to tfjs
|
794 |
+
f[6], _ = export_pb(s_model, file)
|
795 |
+
if tflite or edgetpu:
|
796 |
+
f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms)
|
797 |
+
if edgetpu:
|
798 |
+
f[8], _ = export_edgetpu(file)
|
799 |
+
add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs))
|
800 |
+
if tfjs:
|
801 |
+
f[9], _ = export_tfjs(file, int8)
|
802 |
+
if paddle: # PaddlePaddle
|
803 |
+
f[10], _ = export_paddle(model, im, file, metadata)
|
804 |
+
|
805 |
+
# Finish
|
806 |
+
f = [str(x) for x in f if x] # filter out '' and None
|
807 |
+
if any(f):
|
808 |
+
cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type
|
809 |
+
det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel)
|
810 |
+
dir = Path('segment' if seg else 'classify' if cls else '')
|
811 |
+
h = '--half' if half else '' # --half FP16 inference arg
|
812 |
+
s = '# WARNING β οΈ ClassificationModel not yet supported for PyTorch Hub AutoShape inference' if cls else \
|
813 |
+
'# WARNING β οΈ SegmentationModel not yet supported for PyTorch Hub AutoShape inference' if seg else ''
|
814 |
+
LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)'
|
815 |
+
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
|
816 |
+
f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}"
|
817 |
+
f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}"
|
818 |
+
f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}"
|
819 |
+
f'\nVisualize: https://netron.app')
|
820 |
+
return f # return list of exported files/dirs
|
821 |
+
|
822 |
+
|
823 |
+
def parse_opt(known=False):
|
824 |
+
parser = argparse.ArgumentParser()
|
825 |
+
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
|
826 |
+
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
|
827 |
+
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)')
|
828 |
+
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
829 |
+
parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
830 |
+
parser.add_argument('--half', action='store_true', help='FP16 half-precision export')
|
831 |
+
parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True')
|
832 |
+
parser.add_argument('--keras', action='store_true', help='TF: use Keras')
|
833 |
+
parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile')
|
834 |
+
parser.add_argument('--int8', action='store_true', help='CoreML/TF/OpenVINO INT8 quantization')
|
835 |
+
parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes')
|
836 |
+
parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model')
|
837 |
+
parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version')
|
838 |
+
parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log')
|
839 |
+
parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)')
|
840 |
+
parser.add_argument('--nms', action='store_true', help='TF: add NMS to model')
|
841 |
+
parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model')
|
842 |
+
parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep')
|
843 |
+
parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep')
|
844 |
+
parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold')
|
845 |
+
parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold')
|
846 |
+
parser.add_argument(
|
847 |
+
'--include',
|
848 |
+
nargs='+',
|
849 |
+
default=['torchscript'],
|
850 |
+
help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle')
|
851 |
+
opt = parser.parse_known_args()[0] if known else parser.parse_args()
|
852 |
+
print_args(vars(opt))
|
853 |
+
return opt
|
854 |
+
|
855 |
+
|
856 |
+
def main(opt):
|
857 |
+
for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]):
|
858 |
+
run(**vars(opt))
|
859 |
+
|
860 |
+
|
861 |
+
if __name__ == '__main__':
|
862 |
+
opt = parse_opt()
|
863 |
+
main(opt)
|
TextDetection/hubconf.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 π by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5
|
4 |
+
|
5 |
+
Usage:
|
6 |
+
import torch
|
7 |
+
model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model
|
8 |
+
model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch
|
9 |
+
model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model
|
10 |
+
model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo
|
11 |
+
"""
|
12 |
+
|
13 |
+
import torch
|
14 |
+
|
15 |
+
|
16 |
+
def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
|
17 |
+
"""Creates or loads a YOLOv5 model
|
18 |
+
|
19 |
+
Arguments:
|
20 |
+
name (str): model name 'yolov5s' or path 'path/to/best.pt'
|
21 |
+
pretrained (bool): load pretrained weights into the model
|
22 |
+
channels (int): number of input channels
|
23 |
+
classes (int): number of model classes
|
24 |
+
autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
|
25 |
+
verbose (bool): print all information to screen
|
26 |
+
device (str, torch.device, None): device to use for model parameters
|
27 |
+
|
28 |
+
Returns:
|
29 |
+
YOLOv5 model
|
30 |
+
"""
|
31 |
+
from pathlib import Path
|
32 |
+
|
33 |
+
from models.common import AutoShape, DetectMultiBackend
|
34 |
+
from models.experimental import attempt_load
|
35 |
+
from models.yolo import ClassificationModel, DetectionModel, SegmentationModel
|
36 |
+
from utils.downloads import attempt_download
|
37 |
+
from utils.general import LOGGER, ROOT, check_requirements, intersect_dicts, logging
|
38 |
+
from utils.torch_utils import select_device
|
39 |
+
|
40 |
+
if not verbose:
|
41 |
+
LOGGER.setLevel(logging.WARNING)
|
42 |
+
check_requirements(ROOT / 'requirements.txt', exclude=('opencv-python', 'tensorboard', 'thop'))
|
43 |
+
name = Path(name)
|
44 |
+
path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path
|
45 |
+
try:
|
46 |
+
device = select_device(device)
|
47 |
+
if pretrained and channels == 3 and classes == 80:
|
48 |
+
try:
|
49 |
+
model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model
|
50 |
+
if autoshape:
|
51 |
+
if model.pt and isinstance(model.model, ClassificationModel):
|
52 |
+
LOGGER.warning('WARNING β οΈ YOLOv5 ClassificationModel is not yet AutoShape compatible. '
|
53 |
+
'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).')
|
54 |
+
elif model.pt and isinstance(model.model, SegmentationModel):
|
55 |
+
LOGGER.warning('WARNING β οΈ YOLOv5 SegmentationModel is not yet AutoShape compatible. '
|
56 |
+
'You will not be able to run inference with this model.')
|
57 |
+
else:
|
58 |
+
model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
|
59 |
+
except Exception:
|
60 |
+
model = attempt_load(path, device=device, fuse=False) # arbitrary model
|
61 |
+
else:
|
62 |
+
cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path
|
63 |
+
model = DetectionModel(cfg, channels, classes) # create model
|
64 |
+
if pretrained:
|
65 |
+
ckpt = torch.load(attempt_download(path), map_location=device) # load
|
66 |
+
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
|
67 |
+
csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect
|
68 |
+
model.load_state_dict(csd, strict=False) # load
|
69 |
+
if len(ckpt['model'].names) == classes:
|
70 |
+
model.names = ckpt['model'].names # set class names attribute
|
71 |
+
if not verbose:
|
72 |
+
LOGGER.setLevel(logging.INFO) # reset to default
|
73 |
+
return model.to(device)
|
74 |
+
|
75 |
+
except Exception as e:
|
76 |
+
help_url = 'https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading'
|
77 |
+
s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.'
|
78 |
+
raise Exception(s) from e
|
79 |
+
|
80 |
+
|
81 |
+
def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None):
|
82 |
+
# YOLOv5 custom or local model
|
83 |
+
return _create(path, autoshape=autoshape, verbose=_verbose, device=device)
|
84 |
+
|
85 |
+
|
86 |
+
def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
87 |
+
# YOLOv5-nano model https://github.com/ultralytics/yolov5
|
88 |
+
return _create('yolov5n', pretrained, channels, classes, autoshape, _verbose, device)
|
89 |
+
|
90 |
+
|
91 |
+
def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
92 |
+
# YOLOv5-small model https://github.com/ultralytics/yolov5
|
93 |
+
return _create('yolov5s', pretrained, channels, classes, autoshape, _verbose, device)
|
94 |
+
|
95 |
+
|
96 |
+
def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
97 |
+
# YOLOv5-medium model https://github.com/ultralytics/yolov5
|
98 |
+
return _create('yolov5m', pretrained, channels, classes, autoshape, _verbose, device)
|
99 |
+
|
100 |
+
|
101 |
+
def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
102 |
+
# YOLOv5-large model https://github.com/ultralytics/yolov5
|
103 |
+
return _create('yolov5l', pretrained, channels, classes, autoshape, _verbose, device)
|
104 |
+
|
105 |
+
|
106 |
+
def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
107 |
+
# YOLOv5-xlarge model https://github.com/ultralytics/yolov5
|
108 |
+
return _create('yolov5x', pretrained, channels, classes, autoshape, _verbose, device)
|
109 |
+
|
110 |
+
|
111 |
+
def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
112 |
+
# YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5
|
113 |
+
return _create('yolov5n6', pretrained, channels, classes, autoshape, _verbose, device)
|
114 |
+
|
115 |
+
|
116 |
+
def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
117 |
+
# YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
|
118 |
+
return _create('yolov5s6', pretrained, channels, classes, autoshape, _verbose, device)
|
119 |
+
|
120 |
+
|
121 |
+
def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
122 |
+
# YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
|
123 |
+
return _create('yolov5m6', pretrained, channels, classes, autoshape, _verbose, device)
|
124 |
+
|
125 |
+
|
126 |
+
def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
127 |
+
# YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
|
128 |
+
return _create('yolov5l6', pretrained, channels, classes, autoshape, _verbose, device)
|
129 |
+
|
130 |
+
|
131 |
+
def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
132 |
+
# YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
|
133 |
+
return _create('yolov5x6', pretrained, channels, classes, autoshape, _verbose, device)
|
134 |
+
|
135 |
+
|
136 |
+
if __name__ == '__main__':
|
137 |
+
import argparse
|
138 |
+
from pathlib import Path
|
139 |
+
|
140 |
+
import numpy as np
|
141 |
+
from PIL import Image
|
142 |
+
|
143 |
+
from utils.general import cv2, print_args
|
144 |
+
|
145 |
+
# Argparser
|
146 |
+
parser = argparse.ArgumentParser()
|
147 |
+
parser.add_argument('--model', type=str, default='yolov5s', help='model name')
|
148 |
+
opt = parser.parse_args()
|
149 |
+
print_args(vars(opt))
|
150 |
+
|
151 |
+
# Model
|
152 |
+
model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True)
|
153 |
+
# model = custom(path='path/to/model.pt') # custom
|
154 |
+
|
155 |
+
# Images
|
156 |
+
imgs = [
|
157 |
+
'data/images/zidane.jpg', # filename
|
158 |
+
Path('data/images/zidane.jpg'), # Path
|
159 |
+
'https://ultralytics.com/images/zidane.jpg', # URI
|
160 |
+
cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
|
161 |
+
Image.open('data/images/bus.jpg'), # PIL
|
162 |
+
np.zeros((320, 640, 3))] # numpy
|
163 |
+
|
164 |
+
# Inference
|
165 |
+
results = model(imgs, size=320) # batched inference
|
166 |
+
|
167 |
+
# Results
|
168 |
+
results.print()
|
169 |
+
results.save()
|
TextDetection/requirements.txt
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 requirements
|
2 |
+
# Usage: pip install -r requirements.txt
|
3 |
+
|
4 |
+
# Base ------------------------------------------------------------------------
|
5 |
+
gitpython>=3.1.30
|
6 |
+
matplotlib>=3.3
|
7 |
+
numpy>=1.18.5
|
8 |
+
opencv-python>=4.1.1
|
9 |
+
Pillow>=7.1.2
|
10 |
+
psutil # system resources
|
11 |
+
PyYAML>=5.3.1
|
12 |
+
requests>=2.23.0
|
13 |
+
scipy>=1.4.1
|
14 |
+
thop>=0.1.1 # FLOPs computation
|
15 |
+
torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended)
|
16 |
+
torchvision>=0.8.1
|
17 |
+
tqdm>=4.64.0
|
18 |
+
ultralytics>=8.0.111
|
19 |
+
# protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
|
20 |
+
|
21 |
+
# Logging ---------------------------------------------------------------------
|
22 |
+
# tensorboard>=2.4.1
|
23 |
+
# clearml>=1.2.0
|
24 |
+
# comet
|
25 |
+
|
26 |
+
# Plotting --------------------------------------------------------------------
|
27 |
+
pandas>=1.1.4
|
28 |
+
seaborn>=0.11.0
|
29 |
+
|
30 |
+
# Export ----------------------------------------------------------------------
|
31 |
+
# coremltools>=6.0 # CoreML export
|
32 |
+
# onnx>=1.10.0 # ONNX export
|
33 |
+
# onnx-simplifier>=0.4.1 # ONNX simplifier
|
34 |
+
# nvidia-pyindex # TensorRT export
|
35 |
+
# nvidia-tensorrt # TensorRT export
|
36 |
+
# scikit-learn<=1.1.2 # CoreML quantization
|
37 |
+
# tensorflow>=2.4.0 # TF exports (-cpu, -aarch64, -macos)
|
38 |
+
# tensorflowjs>=3.9.0 # TF.js export
|
39 |
+
# openvino-dev # OpenVINO export
|
40 |
+
|
41 |
+
# Deploy ----------------------------------------------------------------------
|
42 |
+
setuptools>=65.5.1 # Snyk vulnerability fix
|
43 |
+
# tritonclient[all]~=2.24.0
|
44 |
+
|
45 |
+
# Extras ----------------------------------------------------------------------
|
46 |
+
# ipython # interactive notebook
|
47 |
+
# mss # screenshots
|
48 |
+
# albumentations>=1.0.3
|
49 |
+
# pycocotools>=2.0.6 # COCO mAP
|
TextDetection/runs/wordDetection/F1_curve.png
ADDED
TextDetection/runs/wordDetection/PR_curve.png
ADDED
TextDetection/runs/wordDetection/P_curve.png
ADDED
TextDetection/runs/wordDetection/R_curve.png
ADDED
TextDetection/runs/wordDetection/confusion_matrix.png
ADDED
TextDetection/runs/wordDetection/hyp.yaml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
lr0: 0.01
|
2 |
+
lrf: 0.1
|
3 |
+
momentum: 0.937
|
4 |
+
weight_decay: 0.0005
|
5 |
+
warmup_epochs: 3.0
|
6 |
+
warmup_momentum: 0.8
|
7 |
+
warmup_bias_lr: 0.1
|
8 |
+
box: 0.05
|
9 |
+
cls: 0.3
|
10 |
+
cls_pw: 1.0
|
11 |
+
obj: 0.7
|
12 |
+
obj_pw: 1.0
|
13 |
+
iou_t: 0.2
|
14 |
+
anchor_t: 4.0
|
15 |
+
fl_gamma: 0.0
|
16 |
+
hsv_h: 0.015
|
17 |
+
hsv_s: 0.7
|
18 |
+
hsv_v: 0.4
|
19 |
+
degrees: 0.0
|
20 |
+
translate: 0.1
|
21 |
+
scale: 0.9
|
22 |
+
shear: 0.0
|
23 |
+
perspective: 0.0
|
24 |
+
flipud: 0.0
|
25 |
+
fliplr: 0.5
|
26 |
+
mosaic: 1.0
|
27 |
+
mixup: 0.1
|
28 |
+
copy_paste: 0.1
|
TextDetection/runs/wordDetection/labels.jpg
ADDED
TextDetection/runs/wordDetection/labels_correlogram.jpg
ADDED
TextDetection/runs/wordDetection/opt.yaml
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
weights: yolov5s.pt
|
2 |
+
cfg: C:\Users\ParkLab\yolov5\models\yolov5s.yaml
|
3 |
+
data: C:\Users\ParkLab\yolov5\data\HCR.yaml
|
4 |
+
hyp:
|
5 |
+
lr0: 0.01
|
6 |
+
lrf: 0.1
|
7 |
+
momentum: 0.937
|
8 |
+
weight_decay: 0.0005
|
9 |
+
warmup_epochs: 3.0
|
10 |
+
warmup_momentum: 0.8
|
11 |
+
warmup_bias_lr: 0.1
|
12 |
+
box: 0.05
|
13 |
+
cls: 0.3
|
14 |
+
cls_pw: 1.0
|
15 |
+
obj: 0.7
|
16 |
+
obj_pw: 1.0
|
17 |
+
iou_t: 0.2
|
18 |
+
anchor_t: 4.0
|
19 |
+
fl_gamma: 0.0
|
20 |
+
hsv_h: 0.015
|
21 |
+
hsv_s: 0.7
|
22 |
+
hsv_v: 0.4
|
23 |
+
degrees: 0.0
|
24 |
+
translate: 0.1
|
25 |
+
scale: 0.9
|
26 |
+
shear: 0.0
|
27 |
+
perspective: 0.0
|
28 |
+
flipud: 0.0
|
29 |
+
fliplr: 0.5
|
30 |
+
mosaic: 1.0
|
31 |
+
mixup: 0.1
|
32 |
+
copy_paste: 0.1
|
33 |
+
epochs: 100
|
34 |
+
batch_size: 32
|
35 |
+
imgsz: 640
|
36 |
+
rect: false
|
37 |
+
resume: false
|
38 |
+
nosave: false
|
39 |
+
noval: false
|
40 |
+
noautoanchor: false
|
41 |
+
noplots: false
|
42 |
+
evolve: null
|
43 |
+
bucket: ''
|
44 |
+
cache: null
|
45 |
+
image_weights: false
|
46 |
+
device: '0'
|
47 |
+
multi_scale: false
|
48 |
+
single_cls: false
|
49 |
+
optimizer: SGD
|
50 |
+
sync_bn: false
|
51 |
+
workers: 8
|
52 |
+
project: runs\train
|
53 |
+
name: yolo_word_det
|
54 |
+
exist_ok: false
|
55 |
+
quad: false
|
56 |
+
cos_lr: false
|
57 |
+
label_smoothing: 0.0
|
58 |
+
patience: 100
|
59 |
+
freeze:
|
60 |
+
- 0
|
61 |
+
save_period: -1
|
62 |
+
seed: 0
|
63 |
+
local_rank: -1
|
64 |
+
entity: null
|
65 |
+
upload_dataset: false
|
66 |
+
bbox_interval: -1
|
67 |
+
artifact_alias: latest
|
68 |
+
save_dir: runs\train\yolo_word_det5
|
TextDetection/runs/wordDetection/results.csv
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
epoch, train/box_loss, train/obj_loss, train/cls_loss, metrics/precision, metrics/recall, metrics/mAP_0.5,metrics/mAP_0.5:0.95, val/box_loss, val/obj_loss, val/cls_loss, x/lr0, x/lr1, x/lr2
|
2 |
+
0, 0.14343, 0.1321, 0, 0.10678, 0.24045, 0.070025, 0.016663, 0.12338, 0.16908, 0, 0.070769, 0.0032479, 0.0032479
|
3 |
+
1, 0.11814, 0.17616, 0, 0.29756, 0.4878, 0.29039, 0.090169, 0.09548, 0.21864, 0, 0.04071, 0.006522, 0.006522
|
4 |
+
2, 0.11103, 0.19449, 0, 0.36226, 0.46782, 0.33602, 0.084028, 0.10112, 0.22694, 0, 0.010591, 0.0097361, 0.0097361
|
5 |
+
3, 0.10865, 0.19592, 0, 0.43876, 0.69566, 0.51856, 0.18541, 0.086634, 0.2838, 0, 0.00973, 0.00973, 0.00973
|
6 |
+
4, 0.10498, 0.19423, 0, 0.42118, 0.70279, 0.50729, 0.18303, 0.083472, 0.25062, 0, 0.00973, 0.00973, 0.00973
|
7 |
+
5, 0.10106, 0.1933, 0, 0.75029, 0.74585, 0.7626, 0.36368, 0.0782, 0.20763, 0, 0.00964, 0.00964, 0.00964
|
8 |
+
6, 0.10067, 0.18856, 0, 0.88406, 0.79332, 0.81448, 0.38753, 0.071593, 0.19628, 0, 0.00955, 0.00955, 0.00955
|
9 |
+
7, 0.097406, 0.19524, 0, 0.84659, 0.8041, 0.82744, 0.45989, 0.069756, 0.19131, 0, 0.00946, 0.00946, 0.00946
|
10 |
+
8, 0.096121, 0.19137, 0, 0.87909, 0.80622, 0.83644, 0.48052, 0.067627, 0.18748, 0, 0.00937, 0.00937, 0.00937
|
11 |
+
9, 0.095513, 0.19479, 0, 0.87642, 0.78766, 0.82577, 0.47358, 0.067359, 0.19059, 0, 0.00928, 0.00928, 0.00928
|
12 |
+
10, 0.093917, 0.18792, 0, 0.91461, 0.81907, 0.84504, 0.5117, 0.065237, 0.18478, 0, 0.00919, 0.00919, 0.00919
|
13 |
+
11, 0.093425, 0.19291, 0, 0.90101, 0.80465, 0.83468, 0.474, 0.068537, 0.19139, 0, 0.0091, 0.0091, 0.0091
|
14 |
+
12, 0.09289, 0.18656, 0, 0.92043, 0.82673, 0.85478, 0.53099, 0.064175, 0.18201, 0, 0.00901, 0.00901, 0.00901
|
15 |
+
13, 0.090676, 0.18805, 0, 0.92008, 0.82673, 0.85636, 0.51892, 0.064579, 0.18353, 0, 0.00892, 0.00892, 0.00892
|
16 |
+
14, 0.093122, 0.19202, 0, 0.92253, 0.83725, 0.8624, 0.57188, 0.060162, 0.17608, 0, 0.00883, 0.00883, 0.00883
|
17 |
+
15, 0.09126, 0.1907, 0, 0.92946, 0.83274, 0.8676, 0.54458, 0.062534, 0.17887, 0, 0.00874, 0.00874, 0.00874
|
18 |
+
16, 0.090793, 0.18172, 0, 0.93174, 0.83929, 0.86568, 0.58843, 0.059511, 0.17312, 0, 0.00865, 0.00865, 0.00865
|
19 |
+
17, 0.089946, 0.1857, 0, 0.92914, 0.83106, 0.86364, 0.57621, 0.061277, 0.17707, 0, 0.00856, 0.00856, 0.00856
|
20 |
+
18, 0.091165, 0.1837, 0, 0.92212, 0.83831, 0.86719, 0.55994, 0.061093, 0.17976, 0, 0.00847, 0.00847, 0.00847
|
21 |
+
19, 0.088599, 0.18972, 0, 0.93237, 0.84209, 0.87114, 0.62397, 0.057884, 0.16953, 0, 0.00838, 0.00838, 0.00838
|
22 |
+
20, 0.090061, 0.18562, 0, 0.93445, 0.83685, 0.87553, 0.60871, 0.0585, 0.16933, 0, 0.00829, 0.00829, 0.00829
|
23 |
+
21, 0.088208, 0.18263, 0, 0.93335, 0.84114, 0.87449, 0.60064, 0.059247, 0.17255, 0, 0.0082, 0.0082, 0.0082
|
24 |
+
22, 0.0895, 0.18648, 0, 0.93338, 0.84309, 0.87566, 0.61248, 0.057899, 0.16824, 0, 0.00811, 0.00811, 0.00811
|
25 |
+
23, 0.08849, 0.18464, 0, 0.93093, 0.84751, 0.87541, 0.59865, 0.059204, 0.17046, 0, 0.00802, 0.00802, 0.00802
|
26 |
+
24, 0.087192, 0.18088, 0, 0.93032, 0.84618, 0.87731, 0.63006, 0.057412, 0.16889, 0, 0.00793, 0.00793, 0.00793
|
27 |
+
25, 0.088312, 0.1889, 0, 0.93524, 0.84637, 0.88106, 0.63846, 0.056614, 0.16571, 0, 0.00784, 0.00784, 0.00784
|
28 |
+
26, 0.088479, 0.18287, 0, 0.93518, 0.85074, 0.88419, 0.65769, 0.055262, 0.16252, 0, 0.00775, 0.00775, 0.00775
|
29 |
+
27, 0.087063, 0.18672, 0, 0.9345, 0.84504, 0.87943, 0.61842, 0.05768, 0.16942, 0, 0.00766, 0.00766, 0.00766
|
30 |
+
28, 0.086641, 0.18357, 0, 0.9364, 0.84936, 0.88206, 0.64976, 0.05565, 0.16261, 0, 0.00757, 0.00757, 0.00757
|
31 |
+
29, 0.086989, 0.18444, 0, 0.93528, 0.8431, 0.87314, 0.63409, 0.056122, 0.16428, 0, 0.00748, 0.00748, 0.00748
|
32 |
+
30, 0.085464, 0.17787, 0, 0.93888, 0.85282, 0.88306, 0.65505, 0.054957, 0.16096, 0, 0.00739, 0.00739, 0.00739
|
33 |
+
31, 0.086473, 0.18357, 0, 0.93646, 0.84441, 0.88046, 0.63613, 0.05671, 0.16618, 0, 0.0073, 0.0073, 0.0073
|
34 |
+
32, 0.086182, 0.17859, 0, 0.94104, 0.85224, 0.88425, 0.65941, 0.054651, 0.15962, 0, 0.00721, 0.00721, 0.00721
|
35 |
+
33, 0.085764, 0.17765, 0, 0.93804, 0.85115, 0.8856, 0.64438, 0.056184, 0.16485, 0, 0.00712, 0.00712, 0.00712
|
36 |
+
34, 0.08671, 0.17883, 0, 0.93353, 0.85538, 0.88441, 0.64989, 0.055651, 0.16415, 0, 0.00703, 0.00703, 0.00703
|
37 |
+
35, 0.084131, 0.17841, 0, 0.94215, 0.85281, 0.8878, 0.67065, 0.054301, 0.158, 0, 0.00694, 0.00694, 0.00694
|
38 |
+
36, 0.085284, 0.17247, 0, 0.94246, 0.85436, 0.88769, 0.67468, 0.054275, 0.15919, 0, 0.00685, 0.00685, 0.00685
|
39 |
+
37, 0.085696, 0.17306, 0, 0.94052, 0.85661, 0.88989, 0.67156, 0.054833, 0.15942, 0, 0.00676, 0.00676, 0.00676
|
40 |
+
38, 0.086575, 0.1815, 0, 0.93904, 0.85797, 0.89072, 0.67339, 0.054065, 0.15824, 0, 0.00667, 0.00667, 0.00667
|
41 |
+
39, 0.084747, 0.17844, 0, 0.94037, 0.85318, 0.88526, 0.66841, 0.054299, 0.15972, 0, 0.00658, 0.00658, 0.00658
|
42 |
+
40, 0.085733, 0.17424, 0, 0.94515, 0.85003, 0.89003, 0.66812, 0.053869, 0.15828, 0, 0.00649, 0.00649, 0.00649
|
43 |
+
41, 0.086109, 0.17978, 0, 0.94387, 0.85705, 0.89019, 0.66022, 0.05489, 0.16003, 0, 0.0064, 0.0064, 0.0064
|
44 |
+
42, 0.084408, 0.17579, 0, 0.93925, 0.84928, 0.88556, 0.66384, 0.054702, 0.16477, 0, 0.00631, 0.00631, 0.00631
|
45 |
+
43, 0.083645, 0.17836, 0, 0.94362, 0.86298, 0.89463, 0.68944, 0.052935, 0.15475, 0, 0.00622, 0.00622, 0.00622
|
46 |
+
44, 0.08338, 0.17339, 0, 0.94253, 0.86408, 0.89546, 0.6745, 0.053576, 0.15763, 0, 0.00613, 0.00613, 0.00613
|
47 |
+
45, 0.084568, 0.17409, 0, 0.92463, 0.84786, 0.87813, 0.66222, 0.054652, 0.16843, 0, 0.00604, 0.00604, 0.00604
|
48 |
+
46, 0.084783, 0.1667, 0, 0.94382, 0.85541, 0.88957, 0.67704, 0.053853, 0.15688, 0, 0.00595, 0.00595, 0.00595
|
49 |
+
47, 0.083283, 0.17465, 0, 0.94633, 0.8605, 0.89158, 0.68796, 0.053052, 0.15478, 0, 0.00586, 0.00586, 0.00586
|
50 |
+
48, 0.084094, 0.17705, 0, 0.94249, 0.86307, 0.89251, 0.69703, 0.052554, 0.15498, 0, 0.00577, 0.00577, 0.00577
|
51 |
+
49, 0.083676, 0.17547, 0, 0.94565, 0.86156, 0.89494, 0.69568, 0.052399, 0.15305, 0, 0.00568, 0.00568, 0.00568
|
52 |
+
50, 0.083022, 0.1728, 0, 0.94449, 0.86147, 0.89294, 0.69564, 0.052254, 0.15408, 0, 0.00559, 0.00559, 0.00559
|
53 |
+
51, 0.081813, 0.1704, 0, 0.94366, 0.86687, 0.89606, 0.69269, 0.052507, 0.15389, 0, 0.0055, 0.0055, 0.0055
|
54 |
+
52, 0.083914, 0.1764, 0, 0.92865, 0.84273, 0.87447, 0.66104, 0.054271, 0.16671, 0, 0.00541, 0.00541, 0.00541
|
55 |
+
53, 0.082179, 0.1702, 0, 0.94446, 0.86165, 0.89463, 0.70157, 0.052073, 0.15209, 0, 0.00532, 0.00532, 0.00532
|
56 |
+
54, 0.081849, 0.17025, 0, 0.9405, 0.84998, 0.89014, 0.68139, 0.053257, 0.15801, 0, 0.00523, 0.00523, 0.00523
|
57 |
+
55, 0.083674, 0.17681, 0, 0.94171, 0.86368, 0.89523, 0.69899, 0.051968, 0.15354, 0, 0.00514, 0.00514, 0.00514
|
58 |
+
56, 0.082308, 0.17403, 0, 0.93685, 0.86183, 0.88979, 0.66637, 0.053955, 0.15908, 0, 0.00505, 0.00505, 0.00505
|
59 |
+
57, 0.082482, 0.17658, 0, 0.94371, 0.86209, 0.89347, 0.67414, 0.052889, 0.15547, 0, 0.00496, 0.00496, 0.00496
|
60 |
+
58, 0.081728, 0.16741, 0, 0.94564, 0.86112, 0.89675, 0.70912, 0.051474, 0.1514, 0, 0.00487, 0.00487, 0.00487
|
61 |
+
59, 0.083933, 0.1704, 0, 0.94333, 0.86307, 0.89448, 0.70195, 0.05189, 0.15253, 0, 0.00478, 0.00478, 0.00478
|
62 |
+
60, 0.082575, 0.17006, 0, 0.94497, 0.86607, 0.89639, 0.70839, 0.051361, 0.15026, 0, 0.00469, 0.00469, 0.00469
|
63 |
+
61, 0.082538, 0.16962, 0, 0.93968, 0.8643, 0.89563, 0.69941, 0.051753, 0.1522, 0, 0.0046, 0.0046, 0.0046
|
64 |
+
62, 0.080964, 0.16399, 0, 0.94686, 0.85971, 0.89541, 0.71117, 0.051314, 0.15036, 0, 0.00451, 0.00451, 0.00451
|
65 |
+
63, 0.081617, 0.17485, 0, 0.9446, 0.85228, 0.89139, 0.68514, 0.052893, 0.15677, 0, 0.00442, 0.00442, 0.00442
|
66 |
+
64, 0.082902, 0.17403, 0, 0.94468, 0.85752, 0.89338, 0.6903, 0.052549, 0.15499, 0, 0.00433, 0.00433, 0.00433
|
67 |
+
65, 0.077992, 0.165, 0, 0.94387, 0.8622, 0.89289, 0.69896, 0.05173, 0.15193, 0, 0.00424, 0.00424, 0.00424
|
68 |
+
66, 0.081977, 0.1678, 0, 0.94592, 0.86432, 0.89541, 0.69135, 0.052076, 0.15315, 0, 0.00415, 0.00415, 0.00415
|
69 |
+
67, 0.083111, 0.17345, 0, 0.94692, 0.8613, 0.89599, 0.70917, 0.051276, 0.15006, 0, 0.00406, 0.00406, 0.00406
|
70 |
+
68, 0.081756, 0.17021, 0, 0.94576, 0.86254, 0.89739, 0.71084, 0.051092, 0.15002, 0, 0.00397, 0.00397, 0.00397
|
71 |
+
69, 0.082199, 0.17096, 0, 0.9463, 0.86643, 0.89746, 0.71133, 0.050999, 0.14945, 0, 0.00388, 0.00388, 0.00388
|
72 |
+
70, 0.082778, 0.17548, 0, 0.9482, 0.8609, 0.89782, 0.71145, 0.051164, 0.14959, 0, 0.00379, 0.00379, 0.00379
|
73 |
+
71, 0.080038, 0.16676, 0, 0.94958, 0.86736, 0.90067, 0.70935, 0.0511, 0.15013, 0, 0.0037, 0.0037, 0.0037
|
74 |
+
72, 0.081567, 0.16679, 0, 0.94337, 0.86651, 0.89831, 0.70053, 0.051656, 0.15132, 0, 0.00361, 0.00361, 0.00361
|
75 |
+
73, 0.080533, 0.16562, 0, 0.95096, 0.86536, 0.90124, 0.72249, 0.050545, 0.1472, 0, 0.00352, 0.00352, 0.00352
|
76 |
+
74, 0.081029, 0.16575, 0, 0.94243, 0.85382, 0.89317, 0.6888, 0.052537, 0.15647, 0, 0.00343, 0.00343, 0.00343
|
77 |
+
75, 0.081182, 0.17176, 0, 0.95107, 0.86266, 0.90062, 0.72069, 0.050538, 0.14724, 0, 0.00334, 0.00334, 0.00334
|
78 |
+
76, 0.081938, 0.16753, 0, 0.95038, 0.86699, 0.90307, 0.72068, 0.050536, 0.14704, 0, 0.00325, 0.00325, 0.00325
|
79 |
+
77, 0.081434, 0.16673, 0, 0.95063, 0.86647, 0.90187, 0.72559, 0.050253, 0.14626, 0, 0.00316, 0.00316, 0.00316
|
80 |
+
78, 0.083186, 0.16854, 0, 0.93933, 0.85865, 0.89181, 0.69442, 0.052013, 0.15513, 0, 0.00307, 0.00307, 0.00307
|
81 |
+
79, 0.080724, 0.16352, 0, 0.94591, 0.86574, 0.90045, 0.70892, 0.051192, 0.15029, 0, 0.00298, 0.00298, 0.00298
|
82 |
+
80, 0.082062, 0.16734, 0, 0.94767, 0.86283, 0.899, 0.72162, 0.050446, 0.1479, 0, 0.00289, 0.00289, 0.00289
|
83 |
+
81, 0.082112, 0.16387, 0, 0.94625, 0.87067, 0.8994, 0.72242, 0.050153, 0.14716, 0, 0.0028, 0.0028, 0.0028
|
84 |
+
82, 0.081101, 0.16599, 0, 0.94863, 0.86682, 0.90004, 0.7212, 0.050394, 0.14794, 0, 0.00271, 0.00271, 0.00271
|
85 |
+
83, 0.080418, 0.17192, 0, 0.94982, 0.86881, 0.9021, 0.72215, 0.050213, 0.14673, 0, 0.00262, 0.00262, 0.00262
|
86 |
+
84, 0.080462, 0.16803, 0, 0.9455, 0.86421, 0.89992, 0.70799, 0.051035, 0.14959, 0, 0.00253, 0.00253, 0.00253
|
87 |
+
85, 0.081527, 0.17278, 0, 0.94606, 0.86855, 0.89989, 0.72238, 0.050204, 0.14736, 0, 0.00244, 0.00244, 0.00244
|
88 |
+
86, 0.08107, 0.16729, 0, 0.95121, 0.86766, 0.90192, 0.72732, 0.049951, 0.14585, 0, 0.00235, 0.00235, 0.00235
|
89 |
+
87, 0.080949, 0.16978, 0, 0.94822, 0.8644, 0.89882, 0.7229, 0.050133, 0.14727, 0, 0.00226, 0.00226, 0.00226
|
90 |
+
88, 0.082379, 0.17123, 0, 0.94819, 0.86717, 0.90187, 0.71682, 0.050697, 0.14834, 0, 0.00217, 0.00217, 0.00217
|
91 |
+
89, 0.079732, 0.16071, 0, 0.9487, 0.86813, 0.90238, 0.72628, 0.05002, 0.14618, 0, 0.00208, 0.00208, 0.00208
|
92 |
+
90, 0.0803, 0.1615, 0, 0.95029, 0.86793, 0.90243, 0.72982, 0.049789, 0.14528, 0, 0.00199, 0.00199, 0.00199
|
93 |
+
91, 0.080364, 0.16062, 0, 0.94924, 0.86469, 0.90068, 0.72207, 0.050106, 0.14672, 0, 0.0019, 0.0019, 0.0019
|
94 |
+
92, 0.081735, 0.16763, 0, 0.94764, 0.87037, 0.90141, 0.72725, 0.049953, 0.14589, 0, 0.00181, 0.00181, 0.00181
|
95 |
+
93, 0.080632, 0.16383, 0, 0.94882, 0.86704, 0.90079, 0.72667, 0.04985, 0.14562, 0, 0.00172, 0.00172, 0.00172
|
96 |
+
94, 0.08076, 0.15877, 0, 0.94522, 0.87164, 0.90384, 0.72507, 0.050051, 0.14646, 0, 0.00163, 0.00163, 0.00163
|
97 |
+
95, 0.081695, 0.16942, 0, 0.9482, 0.86917, 0.90346, 0.72735, 0.049871, 0.146, 0, 0.00154, 0.00154, 0.00154
|
98 |
+
96, 0.080296, 0.16362, 0, 0.94895, 0.86483, 0.90046, 0.72406, 0.050098, 0.14637, 0, 0.00145, 0.00145, 0.00145
|
99 |
+
97, 0.080617, 0.16571, 0, 0.95251, 0.86722, 0.90244, 0.73023, 0.049788, 0.14505, 0, 0.00136, 0.00136, 0.00136
|
100 |
+
98, 0.080079, 0.16336, 0, 0.94819, 0.87085, 0.90324, 0.73317, 0.049553, 0.14422, 0, 0.00127, 0.00127, 0.00127
|
101 |
+
99, 0.079671, 0.16214, 0, 0.95115, 0.86978, 0.90437, 0.73298, 0.049534, 0.14428, 0, 0.00118, 0.00118, 0.00118
|
TextDetection/runs/wordDetection/results.png
ADDED
TextDetection/runs/wordDetection/weights/best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9039ccbb2733b3a557ec0e28b3fbc53d3a0489812c102cf9bc082ed2b6266868
|
3 |
+
size 14385013
|
TextDetection/runs/wordDetection/weights/last.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2ef353e8445999f03f471e6667e4559b168213f4b24379b83e007f876754d9d9
|
3 |
+
size 14385013
|
TextDetection/segment/predict.py
ADDED
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 π by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Run YOLOv5 segmentation inference on images, videos, directories, streams, etc.
|
4 |
+
|
5 |
+
Usage - sources:
|
6 |
+
$ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam
|
7 |
+
img.jpg # image
|
8 |
+
vid.mp4 # video
|
9 |
+
screen # screenshot
|
10 |
+
path/ # directory
|
11 |
+
list.txt # list of images
|
12 |
+
list.streams # list of streams
|
13 |
+
'path/*.jpg' # glob
|
14 |
+
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
15 |
+
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
16 |
+
|
17 |
+
Usage - formats:
|
18 |
+
$ python segment/predict.py --weights yolov5s-seg.pt # PyTorch
|
19 |
+
yolov5s-seg.torchscript # TorchScript
|
20 |
+
yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
21 |
+
yolov5s-seg_openvino_model # OpenVINO
|
22 |
+
yolov5s-seg.engine # TensorRT
|
23 |
+
yolov5s-seg.mlmodel # CoreML (macOS-only)
|
24 |
+
yolov5s-seg_saved_model # TensorFlow SavedModel
|
25 |
+
yolov5s-seg.pb # TensorFlow GraphDef
|
26 |
+
yolov5s-seg.tflite # TensorFlow Lite
|
27 |
+
yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU
|
28 |
+
yolov5s-seg_paddle_model # PaddlePaddle
|
29 |
+
"""
|
30 |
+
|
31 |
+
import argparse
|
32 |
+
import os
|
33 |
+
import platform
|
34 |
+
import sys
|
35 |
+
from pathlib import Path
|
36 |
+
|
37 |
+
import torch
|
38 |
+
|
39 |
+
FILE = Path(__file__).resolve()
|
40 |
+
ROOT = FILE.parents[1] # YOLOv5 root directory
|
41 |
+
if str(ROOT) not in sys.path:
|
42 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
43 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
44 |
+
|
45 |
+
from models.common import DetectMultiBackend
|
46 |
+
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
47 |
+
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
48 |
+
increment_path, non_max_suppression, print_args, scale_boxes, scale_segments,
|
49 |
+
strip_optimizer)
|
50 |
+
from utils.plots import Annotator, colors, save_one_box
|
51 |
+
from utils.segment.general import masks2segments, process_mask, process_mask_native
|
52 |
+
from utils.torch_utils import select_device, smart_inference_mode
|
53 |
+
|
54 |
+
|
55 |
+
@smart_inference_mode()
|
56 |
+
def run(
|
57 |
+
weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s)
|
58 |
+
source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)
|
59 |
+
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
60 |
+
imgsz=(640, 640), # inference size (height, width)
|
61 |
+
conf_thres=0.25, # confidence threshold
|
62 |
+
iou_thres=0.45, # NMS IOU threshold
|
63 |
+
max_det=1000, # maximum detections per image
|
64 |
+
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
65 |
+
view_img=False, # show results
|
66 |
+
save_txt=False, # save results to *.txt
|
67 |
+
save_conf=False, # save confidences in --save-txt labels
|
68 |
+
save_crop=False, # save cropped prediction boxes
|
69 |
+
nosave=False, # do not save images/videos
|
70 |
+
classes=None, # filter by class: --class 0, or --class 0 2 3
|
71 |
+
agnostic_nms=False, # class-agnostic NMS
|
72 |
+
augment=False, # augmented inference
|
73 |
+
visualize=False, # visualize features
|
74 |
+
update=False, # update all models
|
75 |
+
project=ROOT / 'runs/predict-seg', # save results to project/name
|
76 |
+
name='exp', # save results to project/name
|
77 |
+
exist_ok=False, # existing project/name ok, do not increment
|
78 |
+
line_thickness=3, # bounding box thickness (pixels)
|
79 |
+
hide_labels=False, # hide labels
|
80 |
+
hide_conf=False, # hide confidences
|
81 |
+
half=False, # use FP16 half-precision inference
|
82 |
+
dnn=False, # use OpenCV DNN for ONNX inference
|
83 |
+
vid_stride=1, # video frame-rate stride
|
84 |
+
retina_masks=False,
|
85 |
+
):
|
86 |
+
source = str(source)
|
87 |
+
save_img = not nosave and not source.endswith('.txt') # save inference images
|
88 |
+
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
|
89 |
+
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
|
90 |
+
webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
|
91 |
+
screenshot = source.lower().startswith('screen')
|
92 |
+
if is_url and is_file:
|
93 |
+
source = check_file(source) # download
|
94 |
+
|
95 |
+
# Directories
|
96 |
+
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
97 |
+
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
98 |
+
|
99 |
+
# Load model
|
100 |
+
device = select_device(device)
|
101 |
+
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
102 |
+
stride, names, pt = model.stride, model.names, model.pt
|
103 |
+
imgsz = check_img_size(imgsz, s=stride) # check image size
|
104 |
+
|
105 |
+
# Dataloader
|
106 |
+
bs = 1 # batch_size
|
107 |
+
if webcam:
|
108 |
+
view_img = check_imshow(warn=True)
|
109 |
+
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
|
110 |
+
bs = len(dataset)
|
111 |
+
elif screenshot:
|
112 |
+
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
|
113 |
+
else:
|
114 |
+
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
|
115 |
+
vid_path, vid_writer = [None] * bs, [None] * bs
|
116 |
+
|
117 |
+
# Run inference
|
118 |
+
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
|
119 |
+
seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
|
120 |
+
for path, im, im0s, vid_cap, s in dataset:
|
121 |
+
with dt[0]:
|
122 |
+
im = torch.from_numpy(im).to(model.device)
|
123 |
+
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
|
124 |
+
im /= 255 # 0 - 255 to 0.0 - 1.0
|
125 |
+
if len(im.shape) == 3:
|
126 |
+
im = im[None] # expand for batch dim
|
127 |
+
|
128 |
+
# Inference
|
129 |
+
with dt[1]:
|
130 |
+
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
|
131 |
+
pred, proto = model(im, augment=augment, visualize=visualize)[:2]
|
132 |
+
|
133 |
+
# NMS
|
134 |
+
with dt[2]:
|
135 |
+
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32)
|
136 |
+
|
137 |
+
# Second-stage classifier (optional)
|
138 |
+
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
|
139 |
+
|
140 |
+
# Process predictions
|
141 |
+
for i, det in enumerate(pred): # per image
|
142 |
+
seen += 1
|
143 |
+
if webcam: # batch_size >= 1
|
144 |
+
p, im0, frame = path[i], im0s[i].copy(), dataset.count
|
145 |
+
s += f'{i}: '
|
146 |
+
else:
|
147 |
+
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
|
148 |
+
|
149 |
+
p = Path(p) # to Path
|
150 |
+
save_path = str(save_dir / p.name) # im.jpg
|
151 |
+
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
|
152 |
+
s += '%gx%g ' % im.shape[2:] # print string
|
153 |
+
imc = im0.copy() if save_crop else im0 # for save_crop
|
154 |
+
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
|
155 |
+
if len(det):
|
156 |
+
if retina_masks:
|
157 |
+
# scale bbox first the crop masks
|
158 |
+
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size
|
159 |
+
masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC
|
160 |
+
else:
|
161 |
+
masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC
|
162 |
+
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size
|
163 |
+
|
164 |
+
# Segments
|
165 |
+
if save_txt:
|
166 |
+
segments = [
|
167 |
+
scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True)
|
168 |
+
for x in reversed(masks2segments(masks))]
|
169 |
+
|
170 |
+
# Print results
|
171 |
+
for c in det[:, 5].unique():
|
172 |
+
n = (det[:, 5] == c).sum() # detections per class
|
173 |
+
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
|
174 |
+
|
175 |
+
# Mask plotting
|
176 |
+
annotator.masks(
|
177 |
+
masks,
|
178 |
+
colors=[colors(x, True) for x in det[:, 5]],
|
179 |
+
im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() /
|
180 |
+
255 if retina_masks else im[i])
|
181 |
+
|
182 |
+
# Write results
|
183 |
+
for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])):
|
184 |
+
if save_txt: # Write to file
|
185 |
+
seg = segments[j].reshape(-1) # (n,2) to (n*2)
|
186 |
+
line = (cls, *seg, conf) if save_conf else (cls, *seg) # label format
|
187 |
+
with open(f'{txt_path}.txt', 'a') as f:
|
188 |
+
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
189 |
+
|
190 |
+
if save_img or save_crop or view_img: # Add bbox to image
|
191 |
+
c = int(cls) # integer class
|
192 |
+
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
|
193 |
+
annotator.box_label(xyxy, label, color=colors(c, True))
|
194 |
+
# annotator.draw.polygon(segments[j], outline=colors(c, True), width=3)
|
195 |
+
if save_crop:
|
196 |
+
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
|
197 |
+
|
198 |
+
# Stream results
|
199 |
+
im0 = annotator.result()
|
200 |
+
if view_img:
|
201 |
+
if platform.system() == 'Linux' and p not in windows:
|
202 |
+
windows.append(p)
|
203 |
+
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
|
204 |
+
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
|
205 |
+
cv2.imshow(str(p), im0)
|
206 |
+
if cv2.waitKey(1) == ord('q'): # 1 millisecond
|
207 |
+
exit()
|
208 |
+
|
209 |
+
# Save results (image with detections)
|
210 |
+
if save_img:
|
211 |
+
if dataset.mode == 'image':
|
212 |
+
cv2.imwrite(save_path, im0)
|
213 |
+
else: # 'video' or 'stream'
|
214 |
+
if vid_path[i] != save_path: # new video
|
215 |
+
vid_path[i] = save_path
|
216 |
+
if isinstance(vid_writer[i], cv2.VideoWriter):
|
217 |
+
vid_writer[i].release() # release previous video writer
|
218 |
+
if vid_cap: # video
|
219 |
+
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
220 |
+
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
221 |
+
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
222 |
+
else: # stream
|
223 |
+
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
224 |
+
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
225 |
+
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
226 |
+
vid_writer[i].write(im0)
|
227 |
+
|
228 |
+
# Print time (inference-only)
|
229 |
+
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
|
230 |
+
|
231 |
+
# Print results
|
232 |
+
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
|
233 |
+
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
|
234 |
+
if save_txt or save_img:
|
235 |
+
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
236 |
+
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
237 |
+
if update:
|
238 |
+
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
|
239 |
+
|
240 |
+
|
241 |
+
def parse_opt():
|
242 |
+
parser = argparse.ArgumentParser()
|
243 |
+
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)')
|
244 |
+
parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
|
245 |
+
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
|
246 |
+
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
|
247 |
+
parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
|
248 |
+
parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
|
249 |
+
parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
|
250 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
251 |
+
parser.add_argument('--view-img', action='store_true', help='show results')
|
252 |
+
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
253 |
+
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
254 |
+
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
|
255 |
+
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
256 |
+
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
|
257 |
+
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
|
258 |
+
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
259 |
+
parser.add_argument('--visualize', action='store_true', help='visualize features')
|
260 |
+
parser.add_argument('--update', action='store_true', help='update all models')
|
261 |
+
parser.add_argument('--project', default=ROOT / 'runs/predict-seg', help='save results to project/name')
|
262 |
+
parser.add_argument('--name', default='exp', help='save results to project/name')
|
263 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
264 |
+
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
|
265 |
+
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
|
266 |
+
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
|
267 |
+
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
268 |
+
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
269 |
+
parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
|
270 |
+
parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution')
|
271 |
+
opt = parser.parse_args()
|
272 |
+
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
273 |
+
print_args(vars(opt))
|
274 |
+
return opt
|
275 |
+
|
276 |
+
|
277 |
+
def main(opt):
|
278 |
+
check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
279 |
+
run(**vars(opt))
|
280 |
+
|
281 |
+
|
282 |
+
if __name__ == '__main__':
|
283 |
+
opt = parse_opt()
|
284 |
+
main(opt)
|
TextDetection/segment/train.py
ADDED
@@ -0,0 +1,666 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 π by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Train a YOLOv5 segment model on a segment dataset
|
4 |
+
Models and datasets download automatically from the latest YOLOv5 release.
|
5 |
+
|
6 |
+
Usage - Single-GPU training:
|
7 |
+
$ python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # from pretrained (recommended)
|
8 |
+
$ python segment/train.py --data coco128-seg.yaml --weights '' --cfg yolov5s-seg.yaml --img 640 # from scratch
|
9 |
+
|
10 |
+
Usage - Multi-GPU DDP training:
|
11 |
+
$ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3
|
12 |
+
|
13 |
+
Models: https://github.com/ultralytics/yolov5/tree/master/models
|
14 |
+
Datasets: https://github.com/ultralytics/yolov5/tree/master/data
|
15 |
+
Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
|
16 |
+
"""
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import math
|
20 |
+
import os
|
21 |
+
import random
|
22 |
+
import subprocess
|
23 |
+
import sys
|
24 |
+
import time
|
25 |
+
from copy import deepcopy
|
26 |
+
from datetime import datetime
|
27 |
+
from pathlib import Path
|
28 |
+
|
29 |
+
import numpy as np
|
30 |
+
import torch
|
31 |
+
import torch.distributed as dist
|
32 |
+
import torch.nn as nn
|
33 |
+
import yaml
|
34 |
+
from torch.optim import lr_scheduler
|
35 |
+
from tqdm import tqdm
|
36 |
+
|
37 |
+
FILE = Path(__file__).resolve()
|
38 |
+
ROOT = FILE.parents[1] # YOLOv5 root directory
|
39 |
+
if str(ROOT) not in sys.path:
|
40 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
41 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
42 |
+
|
43 |
+
import segment.val as validate # for end-of-epoch mAP
|
44 |
+
from models.experimental import attempt_load
|
45 |
+
from models.yolo import SegmentationModel
|
46 |
+
from utils.autoanchor import check_anchors
|
47 |
+
from utils.autobatch import check_train_batch_size
|
48 |
+
from utils.callbacks import Callbacks
|
49 |
+
from utils.downloads import attempt_download, is_url
|
50 |
+
from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info,
|
51 |
+
check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr,
|
52 |
+
get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights,
|
53 |
+
labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save)
|
54 |
+
from utils.loggers import GenericLogger
|
55 |
+
from utils.plots import plot_evolve, plot_labels
|
56 |
+
from utils.segment.dataloaders import create_dataloader
|
57 |
+
from utils.segment.loss import ComputeLoss
|
58 |
+
from utils.segment.metrics import KEYS, fitness
|
59 |
+
from utils.segment.plots import plot_images_and_masks, plot_results_with_masks
|
60 |
+
from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer,
|
61 |
+
smart_resume, torch_distributed_zero_first)
|
62 |
+
|
63 |
+
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
|
64 |
+
RANK = int(os.getenv('RANK', -1))
|
65 |
+
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
|
66 |
+
GIT_INFO = check_git_info()
|
67 |
+
|
68 |
+
|
69 |
+
def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary
|
70 |
+
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \
|
71 |
+
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
|
72 |
+
opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio
|
73 |
+
# callbacks.run('on_pretrain_routine_start')
|
74 |
+
|
75 |
+
# Directories
|
76 |
+
w = save_dir / 'weights' # weights dir
|
77 |
+
(w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
|
78 |
+
last, best = w / 'last.pt', w / 'best.pt'
|
79 |
+
|
80 |
+
# Hyperparameters
|
81 |
+
if isinstance(hyp, str):
|
82 |
+
with open(hyp, errors='ignore') as f:
|
83 |
+
hyp = yaml.safe_load(f) # load hyps dict
|
84 |
+
LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
|
85 |
+
opt.hyp = hyp.copy() # for saving hyps to checkpoints
|
86 |
+
|
87 |
+
# Save run settings
|
88 |
+
if not evolve:
|
89 |
+
yaml_save(save_dir / 'hyp.yaml', hyp)
|
90 |
+
yaml_save(save_dir / 'opt.yaml', vars(opt))
|
91 |
+
|
92 |
+
# Loggers
|
93 |
+
data_dict = None
|
94 |
+
if RANK in {-1, 0}:
|
95 |
+
logger = GenericLogger(opt=opt, console_logger=LOGGER)
|
96 |
+
|
97 |
+
# Config
|
98 |
+
plots = not evolve and not opt.noplots # create plots
|
99 |
+
overlap = not opt.no_overlap
|
100 |
+
cuda = device.type != 'cpu'
|
101 |
+
init_seeds(opt.seed + 1 + RANK, deterministic=True)
|
102 |
+
with torch_distributed_zero_first(LOCAL_RANK):
|
103 |
+
data_dict = data_dict or check_dataset(data) # check if None
|
104 |
+
train_path, val_path = data_dict['train'], data_dict['val']
|
105 |
+
nc = 1 if single_cls else int(data_dict['nc']) # number of classes
|
106 |
+
names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
|
107 |
+
is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
|
108 |
+
|
109 |
+
# Model
|
110 |
+
check_suffix(weights, '.pt') # check weights
|
111 |
+
pretrained = weights.endswith('.pt')
|
112 |
+
if pretrained:
|
113 |
+
with torch_distributed_zero_first(LOCAL_RANK):
|
114 |
+
weights = attempt_download(weights) # download if not found locally
|
115 |
+
ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak
|
116 |
+
model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device)
|
117 |
+
exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
|
118 |
+
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
|
119 |
+
csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
|
120 |
+
model.load_state_dict(csd, strict=False) # load
|
121 |
+
LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
|
122 |
+
else:
|
123 |
+
model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
124 |
+
amp = check_amp(model) # check AMP
|
125 |
+
|
126 |
+
# Freeze
|
127 |
+
freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
|
128 |
+
for k, v in model.named_parameters():
|
129 |
+
v.requires_grad = True # train all layers
|
130 |
+
# v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
|
131 |
+
if any(x in k for x in freeze):
|
132 |
+
LOGGER.info(f'freezing {k}')
|
133 |
+
v.requires_grad = False
|
134 |
+
|
135 |
+
# Image size
|
136 |
+
gs = max(int(model.stride.max()), 32) # grid size (max stride)
|
137 |
+
imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
|
138 |
+
|
139 |
+
# Batch size
|
140 |
+
if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
|
141 |
+
batch_size = check_train_batch_size(model, imgsz, amp)
|
142 |
+
logger.update_params({'batch_size': batch_size})
|
143 |
+
# loggers.on_params_update({"batch_size": batch_size})
|
144 |
+
|
145 |
+
# Optimizer
|
146 |
+
nbs = 64 # nominal batch size
|
147 |
+
accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
|
148 |
+
hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
|
149 |
+
optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay'])
|
150 |
+
|
151 |
+
# Scheduler
|
152 |
+
if opt.cos_lr:
|
153 |
+
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
|
154 |
+
else:
|
155 |
+
lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
|
156 |
+
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
|
157 |
+
|
158 |
+
# EMA
|
159 |
+
ema = ModelEMA(model) if RANK in {-1, 0} else None
|
160 |
+
|
161 |
+
# Resume
|
162 |
+
best_fitness, start_epoch = 0.0, 0
|
163 |
+
if pretrained:
|
164 |
+
if resume:
|
165 |
+
best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
|
166 |
+
del ckpt, csd
|
167 |
+
|
168 |
+
# DP mode
|
169 |
+
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
|
170 |
+
LOGGER.warning(
|
171 |
+
'WARNING β οΈ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
|
172 |
+
'See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started.'
|
173 |
+
)
|
174 |
+
model = torch.nn.DataParallel(model)
|
175 |
+
|
176 |
+
# SyncBatchNorm
|
177 |
+
if opt.sync_bn and cuda and RANK != -1:
|
178 |
+
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
|
179 |
+
LOGGER.info('Using SyncBatchNorm()')
|
180 |
+
|
181 |
+
# Trainloader
|
182 |
+
train_loader, dataset = create_dataloader(
|
183 |
+
train_path,
|
184 |
+
imgsz,
|
185 |
+
batch_size // WORLD_SIZE,
|
186 |
+
gs,
|
187 |
+
single_cls,
|
188 |
+
hyp=hyp,
|
189 |
+
augment=True,
|
190 |
+
cache=None if opt.cache == 'val' else opt.cache,
|
191 |
+
rect=opt.rect,
|
192 |
+
rank=LOCAL_RANK,
|
193 |
+
workers=workers,
|
194 |
+
image_weights=opt.image_weights,
|
195 |
+
quad=opt.quad,
|
196 |
+
prefix=colorstr('train: '),
|
197 |
+
shuffle=True,
|
198 |
+
mask_downsample_ratio=mask_ratio,
|
199 |
+
overlap_mask=overlap,
|
200 |
+
)
|
201 |
+
labels = np.concatenate(dataset.labels, 0)
|
202 |
+
mlc = int(labels[:, 0].max()) # max label class
|
203 |
+
assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
|
204 |
+
|
205 |
+
# Process 0
|
206 |
+
if RANK in {-1, 0}:
|
207 |
+
val_loader = create_dataloader(val_path,
|
208 |
+
imgsz,
|
209 |
+
batch_size // WORLD_SIZE * 2,
|
210 |
+
gs,
|
211 |
+
single_cls,
|
212 |
+
hyp=hyp,
|
213 |
+
cache=None if noval else opt.cache,
|
214 |
+
rect=True,
|
215 |
+
rank=-1,
|
216 |
+
workers=workers * 2,
|
217 |
+
pad=0.5,
|
218 |
+
mask_downsample_ratio=mask_ratio,
|
219 |
+
overlap_mask=overlap,
|
220 |
+
prefix=colorstr('val: '))[0]
|
221 |
+
|
222 |
+
if not resume:
|
223 |
+
if not opt.noautoanchor:
|
224 |
+
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor
|
225 |
+
model.half().float() # pre-reduce anchor precision
|
226 |
+
|
227 |
+
if plots:
|
228 |
+
plot_labels(labels, names, save_dir)
|
229 |
+
# callbacks.run('on_pretrain_routine_end', labels, names)
|
230 |
+
|
231 |
+
# DDP mode
|
232 |
+
if cuda and RANK != -1:
|
233 |
+
model = smart_DDP(model)
|
234 |
+
|
235 |
+
# Model attributes
|
236 |
+
nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
|
237 |
+
hyp['box'] *= 3 / nl # scale to layers
|
238 |
+
hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers
|
239 |
+
hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
|
240 |
+
hyp['label_smoothing'] = opt.label_smoothing
|
241 |
+
model.nc = nc # attach number of classes to model
|
242 |
+
model.hyp = hyp # attach hyperparameters to model
|
243 |
+
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
|
244 |
+
model.names = names
|
245 |
+
|
246 |
+
# Start training
|
247 |
+
t0 = time.time()
|
248 |
+
nb = len(train_loader) # number of batches
|
249 |
+
nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
|
250 |
+
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
|
251 |
+
last_opt_step = -1
|
252 |
+
maps = np.zeros(nc) # mAP per class
|
253 |
+
results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, [email protected], [email protected], val_loss(box, obj, cls)
|
254 |
+
scheduler.last_epoch = start_epoch - 1 # do not move
|
255 |
+
scaler = torch.cuda.amp.GradScaler(enabled=amp)
|
256 |
+
stopper, stop = EarlyStopping(patience=opt.patience), False
|
257 |
+
compute_loss = ComputeLoss(model, overlap=overlap) # init loss class
|
258 |
+
# callbacks.run('on_train_start')
|
259 |
+
LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
|
260 |
+
f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
|
261 |
+
f"Logging results to {colorstr('bold', save_dir)}\n"
|
262 |
+
f'Starting training for {epochs} epochs...')
|
263 |
+
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
|
264 |
+
# callbacks.run('on_train_epoch_start')
|
265 |
+
model.train()
|
266 |
+
|
267 |
+
# Update image weights (optional, single-GPU only)
|
268 |
+
if opt.image_weights:
|
269 |
+
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
|
270 |
+
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
|
271 |
+
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
|
272 |
+
|
273 |
+
# Update mosaic border (optional)
|
274 |
+
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
|
275 |
+
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
|
276 |
+
|
277 |
+
mloss = torch.zeros(4, device=device) # mean losses
|
278 |
+
if RANK != -1:
|
279 |
+
train_loader.sampler.set_epoch(epoch)
|
280 |
+
pbar = enumerate(train_loader)
|
281 |
+
LOGGER.info(('\n' + '%11s' * 8) %
|
282 |
+
('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size'))
|
283 |
+
if RANK in {-1, 0}:
|
284 |
+
pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar
|
285 |
+
optimizer.zero_grad()
|
286 |
+
for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------
|
287 |
+
# callbacks.run('on_train_batch_start')
|
288 |
+
ni = i + nb * epoch # number integrated batches (since train start)
|
289 |
+
imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
|
290 |
+
|
291 |
+
# Warmup
|
292 |
+
if ni <= nw:
|
293 |
+
xi = [0, nw] # x interp
|
294 |
+
# compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
|
295 |
+
accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
|
296 |
+
for j, x in enumerate(optimizer.param_groups):
|
297 |
+
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
|
298 |
+
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)])
|
299 |
+
if 'momentum' in x:
|
300 |
+
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
|
301 |
+
|
302 |
+
# Multi-scale
|
303 |
+
if opt.multi_scale:
|
304 |
+
sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size
|
305 |
+
sf = sz / max(imgs.shape[2:]) # scale factor
|
306 |
+
if sf != 1:
|
307 |
+
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
|
308 |
+
imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
|
309 |
+
|
310 |
+
# Forward
|
311 |
+
with torch.cuda.amp.autocast(amp):
|
312 |
+
pred = model(imgs) # forward
|
313 |
+
loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float())
|
314 |
+
if RANK != -1:
|
315 |
+
loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
|
316 |
+
if opt.quad:
|
317 |
+
loss *= 4.
|
318 |
+
|
319 |
+
# Backward
|
320 |
+
scaler.scale(loss).backward()
|
321 |
+
|
322 |
+
# Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
|
323 |
+
if ni - last_opt_step >= accumulate:
|
324 |
+
scaler.unscale_(optimizer) # unscale gradients
|
325 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
|
326 |
+
scaler.step(optimizer) # optimizer.step
|
327 |
+
scaler.update()
|
328 |
+
optimizer.zero_grad()
|
329 |
+
if ema:
|
330 |
+
ema.update(model)
|
331 |
+
last_opt_step = ni
|
332 |
+
|
333 |
+
# Log
|
334 |
+
if RANK in {-1, 0}:
|
335 |
+
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
|
336 |
+
mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
|
337 |
+
pbar.set_description(('%11s' * 2 + '%11.4g' * 6) %
|
338 |
+
(f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
|
339 |
+
# callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths)
|
340 |
+
# if callbacks.stop_training:
|
341 |
+
# return
|
342 |
+
|
343 |
+
# Mosaic plots
|
344 |
+
if plots:
|
345 |
+
if ni < 3:
|
346 |
+
plot_images_and_masks(imgs, targets, masks, paths, save_dir / f'train_batch{ni}.jpg')
|
347 |
+
if ni == 10:
|
348 |
+
files = sorted(save_dir.glob('train*.jpg'))
|
349 |
+
logger.log_images(files, 'Mosaics', epoch)
|
350 |
+
# end batch ------------------------------------------------------------------------------------------------
|
351 |
+
|
352 |
+
# Scheduler
|
353 |
+
lr = [x['lr'] for x in optimizer.param_groups] # for loggers
|
354 |
+
scheduler.step()
|
355 |
+
|
356 |
+
if RANK in {-1, 0}:
|
357 |
+
# mAP
|
358 |
+
# callbacks.run('on_train_epoch_end', epoch=epoch)
|
359 |
+
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
|
360 |
+
final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
|
361 |
+
if not noval or final_epoch: # Calculate mAP
|
362 |
+
results, maps, _ = validate.run(data_dict,
|
363 |
+
batch_size=batch_size // WORLD_SIZE * 2,
|
364 |
+
imgsz=imgsz,
|
365 |
+
half=amp,
|
366 |
+
model=ema.ema,
|
367 |
+
single_cls=single_cls,
|
368 |
+
dataloader=val_loader,
|
369 |
+
save_dir=save_dir,
|
370 |
+
plots=False,
|
371 |
+
callbacks=callbacks,
|
372 |
+
compute_loss=compute_loss,
|
373 |
+
mask_downsample_ratio=mask_ratio,
|
374 |
+
overlap=overlap)
|
375 |
+
|
376 |
+
# Update best mAP
|
377 |
+
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, [email protected], [email protected]]
|
378 |
+
stop = stopper(epoch=epoch, fitness=fi) # early stop check
|
379 |
+
if fi > best_fitness:
|
380 |
+
best_fitness = fi
|
381 |
+
log_vals = list(mloss) + list(results) + lr
|
382 |
+
# callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
|
383 |
+
# Log val metrics and media
|
384 |
+
metrics_dict = dict(zip(KEYS, log_vals))
|
385 |
+
logger.log_metrics(metrics_dict, epoch)
|
386 |
+
|
387 |
+
# Save model
|
388 |
+
if (not nosave) or (final_epoch and not evolve): # if save
|
389 |
+
ckpt = {
|
390 |
+
'epoch': epoch,
|
391 |
+
'best_fitness': best_fitness,
|
392 |
+
'model': deepcopy(de_parallel(model)).half(),
|
393 |
+
'ema': deepcopy(ema.ema).half(),
|
394 |
+
'updates': ema.updates,
|
395 |
+
'optimizer': optimizer.state_dict(),
|
396 |
+
'opt': vars(opt),
|
397 |
+
'git': GIT_INFO, # {remote, branch, commit} if a git repo
|
398 |
+
'date': datetime.now().isoformat()}
|
399 |
+
|
400 |
+
# Save last, best and delete
|
401 |
+
torch.save(ckpt, last)
|
402 |
+
if best_fitness == fi:
|
403 |
+
torch.save(ckpt, best)
|
404 |
+
if opt.save_period > 0 and epoch % opt.save_period == 0:
|
405 |
+
torch.save(ckpt, w / f'epoch{epoch}.pt')
|
406 |
+
logger.log_model(w / f'epoch{epoch}.pt')
|
407 |
+
del ckpt
|
408 |
+
# callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
|
409 |
+
|
410 |
+
# EarlyStopping
|
411 |
+
if RANK != -1: # if DDP training
|
412 |
+
broadcast_list = [stop if RANK == 0 else None]
|
413 |
+
dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
|
414 |
+
if RANK != 0:
|
415 |
+
stop = broadcast_list[0]
|
416 |
+
if stop:
|
417 |
+
break # must break all DDP ranks
|
418 |
+
|
419 |
+
# end epoch ----------------------------------------------------------------------------------------------------
|
420 |
+
# end training -----------------------------------------------------------------------------------------------------
|
421 |
+
if RANK in {-1, 0}:
|
422 |
+
LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
|
423 |
+
for f in last, best:
|
424 |
+
if f.exists():
|
425 |
+
strip_optimizer(f) # strip optimizers
|
426 |
+
if f is best:
|
427 |
+
LOGGER.info(f'\nValidating {f}...')
|
428 |
+
results, _, _ = validate.run(
|
429 |
+
data_dict,
|
430 |
+
batch_size=batch_size // WORLD_SIZE * 2,
|
431 |
+
imgsz=imgsz,
|
432 |
+
model=attempt_load(f, device).half(),
|
433 |
+
iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65
|
434 |
+
single_cls=single_cls,
|
435 |
+
dataloader=val_loader,
|
436 |
+
save_dir=save_dir,
|
437 |
+
save_json=is_coco,
|
438 |
+
verbose=True,
|
439 |
+
plots=plots,
|
440 |
+
callbacks=callbacks,
|
441 |
+
compute_loss=compute_loss,
|
442 |
+
mask_downsample_ratio=mask_ratio,
|
443 |
+
overlap=overlap) # val best model with plots
|
444 |
+
if is_coco:
|
445 |
+
# callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
|
446 |
+
metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr))
|
447 |
+
logger.log_metrics(metrics_dict, epoch)
|
448 |
+
|
449 |
+
# callbacks.run('on_train_end', last, best, epoch, results)
|
450 |
+
# on train end callback using genericLogger
|
451 |
+
logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs)
|
452 |
+
if not opt.evolve:
|
453 |
+
logger.log_model(best, epoch)
|
454 |
+
if plots:
|
455 |
+
plot_results_with_masks(file=save_dir / 'results.csv') # save results.png
|
456 |
+
files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
|
457 |
+
files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter
|
458 |
+
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
|
459 |
+
logger.log_images(files, 'Results', epoch + 1)
|
460 |
+
logger.log_images(sorted(save_dir.glob('val*.jpg')), 'Validation', epoch + 1)
|
461 |
+
torch.cuda.empty_cache()
|
462 |
+
return results
|
463 |
+
|
464 |
+
|
465 |
+
def parse_opt(known=False):
|
466 |
+
parser = argparse.ArgumentParser()
|
467 |
+
parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s-seg.pt', help='initial weights path')
|
468 |
+
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
|
469 |
+
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path')
|
470 |
+
parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
|
471 |
+
parser.add_argument('--epochs', type=int, default=100, help='total training epochs')
|
472 |
+
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
|
473 |
+
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
|
474 |
+
parser.add_argument('--rect', action='store_true', help='rectangular training')
|
475 |
+
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
|
476 |
+
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
|
477 |
+
parser.add_argument('--noval', action='store_true', help='only validate final epoch')
|
478 |
+
parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
|
479 |
+
parser.add_argument('--noplots', action='store_true', help='save no plot files')
|
480 |
+
parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
|
481 |
+
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
|
482 |
+
parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk')
|
483 |
+
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
|
484 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
485 |
+
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
|
486 |
+
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
|
487 |
+
parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer')
|
488 |
+
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
|
489 |
+
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
490 |
+
parser.add_argument('--project', default=ROOT / 'runs/train-seg', help='save to project/name')
|
491 |
+
parser.add_argument('--name', default='exp', help='save to project/name')
|
492 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
493 |
+
parser.add_argument('--quad', action='store_true', help='quad dataloader')
|
494 |
+
parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
|
495 |
+
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
|
496 |
+
parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
|
497 |
+
parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
|
498 |
+
parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
|
499 |
+
parser.add_argument('--seed', type=int, default=0, help='Global training seed')
|
500 |
+
parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
|
501 |
+
|
502 |
+
# Instance Segmentation Args
|
503 |
+
parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory')
|
504 |
+
parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP')
|
505 |
+
|
506 |
+
return parser.parse_known_args()[0] if known else parser.parse_args()
|
507 |
+
|
508 |
+
|
509 |
+
def main(opt, callbacks=Callbacks()):
|
510 |
+
# Checks
|
511 |
+
if RANK in {-1, 0}:
|
512 |
+
print_args(vars(opt))
|
513 |
+
check_git_status()
|
514 |
+
check_requirements(ROOT / 'requirements.txt')
|
515 |
+
|
516 |
+
# Resume
|
517 |
+
if opt.resume and not opt.evolve: # resume from specified or most recent last.pt
|
518 |
+
last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
|
519 |
+
opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml
|
520 |
+
opt_data = opt.data # original dataset
|
521 |
+
if opt_yaml.is_file():
|
522 |
+
with open(opt_yaml, errors='ignore') as f:
|
523 |
+
d = yaml.safe_load(f)
|
524 |
+
else:
|
525 |
+
d = torch.load(last, map_location='cpu')['opt']
|
526 |
+
opt = argparse.Namespace(**d) # replace
|
527 |
+
opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate
|
528 |
+
if is_url(opt_data):
|
529 |
+
opt.data = check_file(opt_data) # avoid HUB resume auth timeout
|
530 |
+
else:
|
531 |
+
opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
|
532 |
+
check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
|
533 |
+
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
|
534 |
+
if opt.evolve:
|
535 |
+
if opt.project == str(ROOT / 'runs/train-seg'): # if default project name, rename to runs/evolve-seg
|
536 |
+
opt.project = str(ROOT / 'runs/evolve-seg')
|
537 |
+
opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
|
538 |
+
if opt.name == 'cfg':
|
539 |
+
opt.name = Path(opt.cfg).stem # use model.yaml as name
|
540 |
+
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
|
541 |
+
|
542 |
+
# DDP mode
|
543 |
+
device = select_device(opt.device, batch_size=opt.batch_size)
|
544 |
+
if LOCAL_RANK != -1:
|
545 |
+
msg = 'is not compatible with YOLOv5 Multi-GPU DDP training'
|
546 |
+
assert not opt.image_weights, f'--image-weights {msg}'
|
547 |
+
assert not opt.evolve, f'--evolve {msg}'
|
548 |
+
assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size'
|
549 |
+
assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
|
550 |
+
assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
|
551 |
+
torch.cuda.set_device(LOCAL_RANK)
|
552 |
+
device = torch.device('cuda', LOCAL_RANK)
|
553 |
+
dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo')
|
554 |
+
|
555 |
+
# Train
|
556 |
+
if not opt.evolve:
|
557 |
+
train(opt.hyp, opt, device, callbacks)
|
558 |
+
|
559 |
+
# Evolve hyperparameters (optional)
|
560 |
+
else:
|
561 |
+
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
|
562 |
+
meta = {
|
563 |
+
'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
|
564 |
+
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
|
565 |
+
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
|
566 |
+
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
|
567 |
+
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
|
568 |
+
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
|
569 |
+
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
|
570 |
+
'box': (1, 0.02, 0.2), # box loss gain
|
571 |
+
'cls': (1, 0.2, 4.0), # cls loss gain
|
572 |
+
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
|
573 |
+
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
|
574 |
+
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
|
575 |
+
'iou_t': (0, 0.1, 0.7), # IoU training threshold
|
576 |
+
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
|
577 |
+
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
|
578 |
+
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
|
579 |
+
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
|
580 |
+
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
|
581 |
+
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
|
582 |
+
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
|
583 |
+
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
|
584 |
+
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
|
585 |
+
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
|
586 |
+
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
|
587 |
+
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
|
588 |
+
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
|
589 |
+
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
|
590 |
+
'mixup': (1, 0.0, 1.0), # image mixup (probability)
|
591 |
+
'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
|
592 |
+
|
593 |
+
with open(opt.hyp, errors='ignore') as f:
|
594 |
+
hyp = yaml.safe_load(f) # load hyps dict
|
595 |
+
if 'anchors' not in hyp: # anchors commented in hyp.yaml
|
596 |
+
hyp['anchors'] = 3
|
597 |
+
if opt.noautoanchor:
|
598 |
+
del hyp['anchors'], meta['anchors']
|
599 |
+
opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
|
600 |
+
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
|
601 |
+
evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
|
602 |
+
if opt.bucket:
|
603 |
+
# download evolve.csv if exists
|
604 |
+
subprocess.run([
|
605 |
+
'gsutil',
|
606 |
+
'cp',
|
607 |
+
f'gs://{opt.bucket}/evolve.csv',
|
608 |
+
str(evolve_csv), ])
|
609 |
+
|
610 |
+
for _ in range(opt.evolve): # generations to evolve
|
611 |
+
if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
|
612 |
+
# Select parent(s)
|
613 |
+
parent = 'single' # parent selection method: 'single' or 'weighted'
|
614 |
+
x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
|
615 |
+
n = min(5, len(x)) # number of previous results to consider
|
616 |
+
x = x[np.argsort(-fitness(x))][:n] # top n mutations
|
617 |
+
w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
|
618 |
+
if parent == 'single' or len(x) == 1:
|
619 |
+
# x = x[random.randint(0, n - 1)] # random selection
|
620 |
+
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
|
621 |
+
elif parent == 'weighted':
|
622 |
+
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
|
623 |
+
|
624 |
+
# Mutate
|
625 |
+
mp, s = 0.8, 0.2 # mutation probability, sigma
|
626 |
+
npr = np.random
|
627 |
+
npr.seed(int(time.time()))
|
628 |
+
g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
|
629 |
+
ng = len(meta)
|
630 |
+
v = np.ones(ng)
|
631 |
+
while all(v == 1): # mutate until a change occurs (prevent duplicates)
|
632 |
+
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
|
633 |
+
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
|
634 |
+
hyp[k] = float(x[i + 12] * v[i]) # mutate
|
635 |
+
|
636 |
+
# Constrain to limits
|
637 |
+
for k, v in meta.items():
|
638 |
+
hyp[k] = max(hyp[k], v[1]) # lower limit
|
639 |
+
hyp[k] = min(hyp[k], v[2]) # upper limit
|
640 |
+
hyp[k] = round(hyp[k], 5) # significant digits
|
641 |
+
|
642 |
+
# Train mutation
|
643 |
+
results = train(hyp.copy(), opt, device, callbacks)
|
644 |
+
callbacks = Callbacks()
|
645 |
+
# Write mutation results
|
646 |
+
print_mutation(KEYS[4:16], results, hyp.copy(), save_dir, opt.bucket)
|
647 |
+
|
648 |
+
# Plot results
|
649 |
+
plot_evolve(evolve_csv)
|
650 |
+
LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n'
|
651 |
+
f"Results saved to {colorstr('bold', save_dir)}\n"
|
652 |
+
f'Usage example: $ python train.py --hyp {evolve_yaml}')
|
653 |
+
|
654 |
+
|
655 |
+
def run(**kwargs):
|
656 |
+
# Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
|
657 |
+
opt = parse_opt(True)
|
658 |
+
for k, v in kwargs.items():
|
659 |
+
setattr(opt, k, v)
|
660 |
+
main(opt)
|
661 |
+
return opt
|
662 |
+
|
663 |
+
|
664 |
+
if __name__ == '__main__':
|
665 |
+
opt = parse_opt()
|
666 |
+
main(opt)
|
TextDetection/segment/tutorial.ipynb
ADDED
@@ -0,0 +1,595 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {
|
6 |
+
"id": "t6MPjfT5NrKQ"
|
7 |
+
},
|
8 |
+
"source": [
|
9 |
+
"<div align=\"center\">\n",
|
10 |
+
"\n",
|
11 |
+
" <a href=\"https://ultralytics.com/yolov5\" target=\"_blank\">\n",
|
12 |
+
" <img width=\"1024\", src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png\"></a>\n",
|
13 |
+
"\n",
|
14 |
+
"\n",
|
15 |
+
"<br>\n",
|
16 |
+
" <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a>\n",
|
17 |
+
" <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/segment/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
|
18 |
+
" <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
|
19 |
+
"<br>\n",
|
20 |
+
"\n",
|
21 |
+
"This <a href=\"https://github.com/ultralytics/yolov5\">YOLOv5</a> π notebook by <a href=\"https://ultralytics.com\">Ultralytics</a> presents simple train, validate and predict examples to help start your AI adventure.<br>See <a href=\"https://github.com/ultralytics/yolov5/issues/new/choose\">GitHub</a> for community support or <a href=\"https://ultralytics.com/contact\">contact us</a> for professional support.\n",
|
22 |
+
"\n",
|
23 |
+
"</div>"
|
24 |
+
]
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"cell_type": "markdown",
|
28 |
+
"metadata": {
|
29 |
+
"id": "7mGmQbAO5pQb"
|
30 |
+
},
|
31 |
+
"source": [
|
32 |
+
"# Setup\n",
|
33 |
+
"\n",
|
34 |
+
"Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU."
|
35 |
+
]
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"cell_type": "code",
|
39 |
+
"execution_count": null,
|
40 |
+
"metadata": {
|
41 |
+
"colab": {
|
42 |
+
"base_uri": "https://localhost:8080/"
|
43 |
+
},
|
44 |
+
"id": "wbvMlHd_QwMG",
|
45 |
+
"outputId": "171b23f0-71b9-4cbf-b666-6fa2ecef70c8"
|
46 |
+
},
|
47 |
+
"outputs": [
|
48 |
+
{
|
49 |
+
"output_type": "stream",
|
50 |
+
"name": "stderr",
|
51 |
+
"text": [
|
52 |
+
"YOLOv5 π v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n"
|
53 |
+
]
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"output_type": "stream",
|
57 |
+
"name": "stdout",
|
58 |
+
"text": [
|
59 |
+
"Setup complete β
(2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n"
|
60 |
+
]
|
61 |
+
}
|
62 |
+
],
|
63 |
+
"source": [
|
64 |
+
"!git clone https://github.com/ultralytics/yolov5 # clone\n",
|
65 |
+
"%cd yolov5\n",
|
66 |
+
"%pip install -qr requirements.txt comet_ml # install\n",
|
67 |
+
"\n",
|
68 |
+
"import torch\n",
|
69 |
+
"import utils\n",
|
70 |
+
"display = utils.notebook_init() # checks"
|
71 |
+
]
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"cell_type": "markdown",
|
75 |
+
"metadata": {
|
76 |
+
"id": "4JnkELT0cIJg"
|
77 |
+
},
|
78 |
+
"source": [
|
79 |
+
"# 1. Predict\n",
|
80 |
+
"\n",
|
81 |
+
"`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n",
|
82 |
+
"\n",
|
83 |
+
"```shell\n",
|
84 |
+
"python segment/predict.py --source 0 # webcam\n",
|
85 |
+
" img.jpg # image \n",
|
86 |
+
" vid.mp4 # video\n",
|
87 |
+
" screen # screenshot\n",
|
88 |
+
" path/ # directory\n",
|
89 |
+
" 'path/*.jpg' # glob\n",
|
90 |
+
" 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n",
|
91 |
+
" 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
|
92 |
+
"```"
|
93 |
+
]
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"cell_type": "code",
|
97 |
+
"execution_count": null,
|
98 |
+
"metadata": {
|
99 |
+
"colab": {
|
100 |
+
"base_uri": "https://localhost:8080/"
|
101 |
+
},
|
102 |
+
"id": "zR9ZbuQCH7FX",
|
103 |
+
"outputId": "3f67f1c7-f15e-4fa5-d251-967c3b77eaad"
|
104 |
+
},
|
105 |
+
"outputs": [
|
106 |
+
{
|
107 |
+
"output_type": "stream",
|
108 |
+
"name": "stdout",
|
109 |
+
"text": [
|
110 |
+
"\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n",
|
111 |
+
"YOLOv5 π v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
|
112 |
+
"\n",
|
113 |
+
"Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt to yolov5s-seg.pt...\n",
|
114 |
+
"100% 14.9M/14.9M [00:01<00:00, 12.0MB/s]\n",
|
115 |
+
"\n",
|
116 |
+
"Fusing layers... \n",
|
117 |
+
"YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
|
118 |
+
"image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.2ms\n",
|
119 |
+
"image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.4ms\n",
|
120 |
+
"Speed: 0.5ms pre-process, 15.8ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n",
|
121 |
+
"Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n"
|
122 |
+
]
|
123 |
+
}
|
124 |
+
],
|
125 |
+
"source": [
|
126 |
+
"!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n",
|
127 |
+
"#display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)"
|
128 |
+
]
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"cell_type": "markdown",
|
132 |
+
"metadata": {
|
133 |
+
"id": "hkAzDWJ7cWTr"
|
134 |
+
},
|
135 |
+
"source": [
|
136 |
+
" \n",
|
137 |
+
"<img align=\"left\" src=\"https://user-images.githubusercontent.com/26833433/199030123-08c72f8d-6871-4116-8ed3-c373642cf28e.jpg\" width=\"600\">"
|
138 |
+
]
|
139 |
+
},
|
140 |
+
{
|
141 |
+
"cell_type": "markdown",
|
142 |
+
"metadata": {
|
143 |
+
"id": "0eq1SMWl6Sfn"
|
144 |
+
},
|
145 |
+
"source": [
|
146 |
+
"# 2. Validate\n",
|
147 |
+
"Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag."
|
148 |
+
]
|
149 |
+
},
|
150 |
+
{
|
151 |
+
"cell_type": "code",
|
152 |
+
"execution_count": null,
|
153 |
+
"metadata": {
|
154 |
+
"colab": {
|
155 |
+
"base_uri": "https://localhost:8080/"
|
156 |
+
},
|
157 |
+
"id": "WQPtK1QYVaD_",
|
158 |
+
"outputId": "9d751d8c-bee8-4339-cf30-9854ca530449"
|
159 |
+
},
|
160 |
+
"outputs": [
|
161 |
+
{
|
162 |
+
"output_type": "stream",
|
163 |
+
"name": "stdout",
|
164 |
+
"text": [
|
165 |
+
"Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels-segments.zip ...\n",
|
166 |
+
"Downloading http://images.cocodataset.org/zips/val2017.zip ...\n",
|
167 |
+
"######################################################################## 100.0%\n",
|
168 |
+
"######################################################################## 100.0%\n"
|
169 |
+
]
|
170 |
+
}
|
171 |
+
],
|
172 |
+
"source": [
|
173 |
+
"# Download COCO val\n",
|
174 |
+
"!bash data/scripts/get_coco.sh --val --segments # download (780M - 5000 images)"
|
175 |
+
]
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"cell_type": "code",
|
179 |
+
"execution_count": null,
|
180 |
+
"metadata": {
|
181 |
+
"colab": {
|
182 |
+
"base_uri": "https://localhost:8080/"
|
183 |
+
},
|
184 |
+
"id": "X58w8JLpMnjH",
|
185 |
+
"outputId": "a140d67a-02da-479e-9ddb-7d54bf9e407a"
|
186 |
+
},
|
187 |
+
"outputs": [
|
188 |
+
{
|
189 |
+
"output_type": "stream",
|
190 |
+
"name": "stdout",
|
191 |
+
"text": [
|
192 |
+
"\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n",
|
193 |
+
"YOLOv5 π v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
|
194 |
+
"\n",
|
195 |
+
"Fusing layers... \n",
|
196 |
+
"YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
|
197 |
+
"\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1361.31it/s]\n",
|
198 |
+
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n",
|
199 |
+
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n",
|
200 |
+
" all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n",
|
201 |
+
"Speed: 0.6ms pre-process, 4.4ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\n",
|
202 |
+
"Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n"
|
203 |
+
]
|
204 |
+
}
|
205 |
+
],
|
206 |
+
"source": [
|
207 |
+
"# Validate YOLOv5s-seg on COCO val\n",
|
208 |
+
"!python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 --half"
|
209 |
+
]
|
210 |
+
},
|
211 |
+
{
|
212 |
+
"cell_type": "markdown",
|
213 |
+
"metadata": {
|
214 |
+
"id": "ZY2VXXXu74w5"
|
215 |
+
},
|
216 |
+
"source": [
|
217 |
+
"# 3. Train\n",
|
218 |
+
"\n",
|
219 |
+
"<p align=\"\"><a href=\"https://roboflow.com/?ref=ultralytics\"><img width=\"1000\" src=\"https://github.com/ultralytics/assets/raw/main/im/integrations-loop.png\"/></a></p>\n",
|
220 |
+
"Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n",
|
221 |
+
"<br><br>\n",
|
222 |
+
"\n",
|
223 |
+
"Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n",
|
224 |
+
"\n",
|
225 |
+
"- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n",
|
226 |
+
"automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n",
|
227 |
+
"- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n",
|
228 |
+
"- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n",
|
229 |
+
"<br><br>\n",
|
230 |
+
"\n",
|
231 |
+
"A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n",
|
232 |
+
"\n",
|
233 |
+
"## Train on Custom Data with Roboflow π NEW\n",
|
234 |
+
"\n",
|
235 |
+
"[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n",
|
236 |
+
"\n",
|
237 |
+
"- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n",
|
238 |
+
"- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n",
|
239 |
+
"<br>\n",
|
240 |
+
"\n",
|
241 |
+
"<p align=\"\"><a href=\"https://roboflow.com/?ref=ultralytics\"><img width=\"480\" src=\"https://robflow-public-assets.s3.amazonaws.com/how-to-train-yolov5-segmentation-annotation.gif\"/></a></p>Label images lightning fast (including with model-assisted labeling)"
|
242 |
+
]
|
243 |
+
},
|
244 |
+
{
|
245 |
+
"cell_type": "code",
|
246 |
+
"execution_count": null,
|
247 |
+
"metadata": {
|
248 |
+
"id": "i3oKtE4g-aNn"
|
249 |
+
},
|
250 |
+
"outputs": [],
|
251 |
+
"source": [
|
252 |
+
"#@title Select YOLOv5 π logger {run: 'auto'}\n",
|
253 |
+
"logger = 'Comet' #@param ['Comet', 'ClearML', 'TensorBoard']\n",
|
254 |
+
"\n",
|
255 |
+
"if logger == 'Comet':\n",
|
256 |
+
" %pip install -q comet_ml\n",
|
257 |
+
" import comet_ml; comet_ml.init()\n",
|
258 |
+
"elif logger == 'ClearML':\n",
|
259 |
+
" %pip install -q clearml\n",
|
260 |
+
" import clearml; clearml.browser_login()\n",
|
261 |
+
"elif logger == 'TensorBoard':\n",
|
262 |
+
" %load_ext tensorboard\n",
|
263 |
+
" %tensorboard --logdir runs/train"
|
264 |
+
]
|
265 |
+
},
|
266 |
+
{
|
267 |
+
"cell_type": "code",
|
268 |
+
"execution_count": null,
|
269 |
+
"metadata": {
|
270 |
+
"colab": {
|
271 |
+
"base_uri": "https://localhost:8080/"
|
272 |
+
},
|
273 |
+
"id": "1NcFxRcFdJ_O",
|
274 |
+
"outputId": "3a3e0cf7-e79c-47a5-c8e7-2d26eeeab988"
|
275 |
+
},
|
276 |
+
"outputs": [
|
277 |
+
{
|
278 |
+
"output_type": "stream",
|
279 |
+
"name": "stdout",
|
280 |
+
"text": [
|
281 |
+
"\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n",
|
282 |
+
"\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 β
\n",
|
283 |
+
"YOLOv5 π v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
|
284 |
+
"\n",
|
285 |
+
"\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
|
286 |
+
"\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n",
|
287 |
+
"\n",
|
288 |
+
"Dataset not found β οΈ, missing paths ['/content/datasets/coco128-seg/images/train2017']\n",
|
289 |
+
"Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n",
|
290 |
+
"100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\n",
|
291 |
+
"Dataset download success β
(1.9s), saved to \u001b[1m/content/datasets\u001b[0m\n",
|
292 |
+
"\n",
|
293 |
+
" from n params module arguments \n",
|
294 |
+
" 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n",
|
295 |
+
" 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n",
|
296 |
+
" 2 -1 1 18816 models.common.C3 [64, 64, 1] \n",
|
297 |
+
" 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n",
|
298 |
+
" 4 -1 2 115712 models.common.C3 [128, 128, 2] \n",
|
299 |
+
" 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n",
|
300 |
+
" 6 -1 3 625152 models.common.C3 [256, 256, 3] \n",
|
301 |
+
" 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n",
|
302 |
+
" 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n",
|
303 |
+
" 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n",
|
304 |
+
" 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n",
|
305 |
+
" 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
|
306 |
+
" 12 [-1, 6] 1 0 models.common.Concat [1] \n",
|
307 |
+
" 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n",
|
308 |
+
" 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n",
|
309 |
+
" 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
|
310 |
+
" 16 [-1, 4] 1 0 models.common.Concat [1] \n",
|
311 |
+
" 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n",
|
312 |
+
" 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n",
|
313 |
+
" 19 [-1, 14] 1 0 models.common.Concat [1] \n",
|
314 |
+
" 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n",
|
315 |
+
" 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n",
|
316 |
+
" 22 [-1, 10] 1 0 models.common.Concat [1] \n",
|
317 |
+
" 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
|
318 |
+
" 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n",
|
319 |
+
"Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n",
|
320 |
+
"\n",
|
321 |
+
"Transferred 367/367 items from yolov5s-seg.pt\n",
|
322 |
+
"\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed β
\n",
|
323 |
+
"\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n",
|
324 |
+
"\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n",
|
325 |
+
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1389.59it/s]\n",
|
326 |
+
"\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n",
|
327 |
+
"\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 238.86it/s]\n",
|
328 |
+
"\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<?, ?it/s]\n",
|
329 |
+
"\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:01<00:00, 98.90it/s]\n",
|
330 |
+
"\n",
|
331 |
+
"\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset β
\n",
|
332 |
+
"Plotting labels to runs/train-seg/exp/labels.jpg... \n",
|
333 |
+
"Image sizes 640 train, 640 val\n",
|
334 |
+
"Using 2 dataloader workers\n",
|
335 |
+
"Logging results to \u001b[1mruns/train-seg/exp\u001b[0m\n",
|
336 |
+
"Starting training for 3 epochs...\n",
|
337 |
+
"\n",
|
338 |
+
" Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n",
|
339 |
+
" 0/2 4.92G 0.0417 0.04646 0.06066 0.02126 192 640: 100% 8/8 [00:08<00:00, 1.10s/it]\n",
|
340 |
+
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:02<00:00, 1.81it/s]\n",
|
341 |
+
" all 128 929 0.737 0.649 0.715 0.492 0.719 0.617 0.658 0.408\n",
|
342 |
+
"\n",
|
343 |
+
" Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n",
|
344 |
+
" 1/2 6.29G 0.04157 0.04503 0.05772 0.01777 208 640: 100% 8/8 [00:09<00:00, 1.21s/it]\n",
|
345 |
+
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:02<00:00, 1.87it/s]\n",
|
346 |
+
" all 128 929 0.756 0.674 0.738 0.506 0.725 0.64 0.68 0.422\n",
|
347 |
+
"\n",
|
348 |
+
" Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n",
|
349 |
+
" 2/2 6.29G 0.0425 0.04793 0.06784 0.01863 161 640: 100% 8/8 [00:03<00:00, 2.02it/s]\n",
|
350 |
+
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:02<00:00, 1.88it/s]\n",
|
351 |
+
" all 128 929 0.736 0.694 0.747 0.522 0.769 0.622 0.683 0.427\n",
|
352 |
+
"\n",
|
353 |
+
"3 epochs completed in 0.009 hours.\n",
|
354 |
+
"Optimizer stripped from runs/train-seg/exp/weights/last.pt, 15.6MB\n",
|
355 |
+
"Optimizer stripped from runs/train-seg/exp/weights/best.pt, 15.6MB\n",
|
356 |
+
"\n",
|
357 |
+
"Validating runs/train-seg/exp/weights/best.pt...\n",
|
358 |
+
"Fusing layers... \n",
|
359 |
+
"Model summary: 165 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
|
360 |
+
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:06<00:00, 1.59s/it]\n",
|
361 |
+
" all 128 929 0.738 0.694 0.746 0.522 0.759 0.625 0.682 0.426\n",
|
362 |
+
" person 128 254 0.845 0.756 0.836 0.55 0.861 0.669 0.759 0.407\n",
|
363 |
+
" bicycle 128 6 0.475 0.333 0.549 0.341 0.711 0.333 0.526 0.322\n",
|
364 |
+
" car 128 46 0.612 0.565 0.539 0.257 0.555 0.435 0.477 0.171\n",
|
365 |
+
" motorcycle 128 5 0.73 0.8 0.752 0.571 0.747 0.8 0.752 0.42\n",
|
366 |
+
" airplane 128 6 1 0.943 0.995 0.732 0.92 0.833 0.839 0.555\n",
|
367 |
+
" bus 128 7 0.677 0.714 0.722 0.653 0.711 0.714 0.722 0.593\n",
|
368 |
+
" train 128 3 1 0.951 0.995 0.551 1 0.884 0.995 0.781\n",
|
369 |
+
" truck 128 12 0.555 0.417 0.457 0.285 0.624 0.417 0.397 0.277\n",
|
370 |
+
" boat 128 6 0.624 0.5 0.584 0.186 1 0.326 0.412 0.133\n",
|
371 |
+
" traffic light 128 14 0.513 0.302 0.411 0.247 0.435 0.214 0.376 0.251\n",
|
372 |
+
" stop sign 128 2 0.824 1 0.995 0.796 0.906 1 0.995 0.747\n",
|
373 |
+
" bench 128 9 0.75 0.667 0.763 0.367 0.724 0.585 0.698 0.209\n",
|
374 |
+
" bird 128 16 0.961 1 0.995 0.686 0.918 0.938 0.91 0.525\n",
|
375 |
+
" cat 128 4 0.771 0.857 0.945 0.752 0.76 0.8 0.945 0.728\n",
|
376 |
+
" dog 128 9 0.987 0.778 0.963 0.681 1 0.705 0.89 0.574\n",
|
377 |
+
" horse 128 2 0.703 1 0.995 0.697 0.759 1 0.995 0.249\n",
|
378 |
+
" elephant 128 17 0.916 0.882 0.93 0.691 0.811 0.765 0.829 0.537\n",
|
379 |
+
" bear 128 1 0.664 1 0.995 0.995 0.701 1 0.995 0.895\n",
|
380 |
+
" zebra 128 4 0.864 1 0.995 0.921 0.879 1 0.995 0.804\n",
|
381 |
+
" giraffe 128 9 0.883 0.889 0.94 0.683 0.845 0.778 0.78 0.463\n",
|
382 |
+
" backpack 128 6 1 0.59 0.701 0.372 1 0.474 0.52 0.252\n",
|
383 |
+
" umbrella 128 18 0.654 0.839 0.887 0.52 0.517 0.556 0.427 0.229\n",
|
384 |
+
" handbag 128 19 0.54 0.211 0.408 0.221 0.796 0.206 0.396 0.196\n",
|
385 |
+
" tie 128 7 0.864 0.857 0.857 0.577 0.925 0.857 0.857 0.534\n",
|
386 |
+
" suitcase 128 4 0.716 1 0.945 0.647 0.767 1 0.945 0.634\n",
|
387 |
+
" frisbee 128 5 0.708 0.8 0.761 0.643 0.737 0.8 0.761 0.501\n",
|
388 |
+
" skis 128 1 0.691 1 0.995 0.796 0.761 1 0.995 0.199\n",
|
389 |
+
" snowboard 128 7 0.918 0.857 0.904 0.604 0.32 0.286 0.235 0.137\n",
|
390 |
+
" sports ball 128 6 0.902 0.667 0.701 0.466 0.727 0.5 0.497 0.471\n",
|
391 |
+
" kite 128 10 0.586 0.4 0.511 0.231 0.663 0.394 0.417 0.139\n",
|
392 |
+
" baseball bat 128 4 0.359 0.5 0.401 0.169 0.631 0.5 0.526 0.133\n",
|
393 |
+
" baseball glove 128 7 1 0.519 0.58 0.327 0.687 0.286 0.455 0.328\n",
|
394 |
+
" skateboard 128 5 0.729 0.8 0.862 0.631 0.599 0.6 0.604 0.379\n",
|
395 |
+
" tennis racket 128 7 0.57 0.714 0.645 0.448 0.608 0.714 0.645 0.412\n",
|
396 |
+
" bottle 128 18 0.469 0.393 0.537 0.357 0.661 0.389 0.543 0.349\n",
|
397 |
+
" wine glass 128 16 0.677 0.938 0.866 0.441 0.53 0.625 0.67 0.334\n",
|
398 |
+
" cup 128 36 0.777 0.722 0.812 0.466 0.725 0.583 0.762 0.467\n",
|
399 |
+
" fork 128 6 0.948 0.333 0.425 0.27 0.527 0.167 0.18 0.102\n",
|
400 |
+
" knife 128 16 0.757 0.587 0.669 0.458 0.79 0.5 0.552 0.34\n",
|
401 |
+
" spoon 128 22 0.74 0.364 0.559 0.269 0.925 0.364 0.513 0.213\n",
|
402 |
+
" bowl 128 28 0.766 0.714 0.725 0.559 0.803 0.584 0.665 0.353\n",
|
403 |
+
" banana 128 1 0.408 1 0.995 0.398 0.539 1 0.995 0.497\n",
|
404 |
+
" sandwich 128 2 1 0 0.695 0.536 1 0 0.498 0.448\n",
|
405 |
+
" orange 128 4 0.467 1 0.995 0.693 0.518 1 0.995 0.663\n",
|
406 |
+
" broccoli 128 11 0.462 0.455 0.383 0.259 0.548 0.455 0.384 0.256\n",
|
407 |
+
" carrot 128 24 0.631 0.875 0.77 0.533 0.757 0.909 0.853 0.499\n",
|
408 |
+
" hot dog 128 2 0.555 1 0.995 0.995 0.578 1 0.995 0.796\n",
|
409 |
+
" pizza 128 5 0.89 0.8 0.962 0.796 1 0.778 0.962 0.766\n",
|
410 |
+
" donut 128 14 0.695 1 0.893 0.772 0.704 1 0.893 0.696\n",
|
411 |
+
" cake 128 4 0.826 1 0.995 0.92 0.862 1 0.995 0.846\n",
|
412 |
+
" chair 128 35 0.53 0.571 0.613 0.336 0.67 0.6 0.538 0.271\n",
|
413 |
+
" couch 128 6 0.972 0.667 0.833 0.627 1 0.62 0.696 0.394\n",
|
414 |
+
" potted plant 128 14 0.7 0.857 0.883 0.552 0.836 0.857 0.883 0.473\n",
|
415 |
+
" bed 128 3 0.979 0.667 0.83 0.366 1 0 0.83 0.373\n",
|
416 |
+
" dining table 128 13 0.775 0.308 0.505 0.364 0.644 0.231 0.25 0.0804\n",
|
417 |
+
" toilet 128 2 0.836 1 0.995 0.846 0.887 1 0.995 0.797\n",
|
418 |
+
" tv 128 2 0.6 1 0.995 0.846 0.655 1 0.995 0.896\n",
|
419 |
+
" laptop 128 3 0.822 0.333 0.445 0.307 1 0 0.392 0.12\n",
|
420 |
+
" mouse 128 2 1 0 0 0 1 0 0 0\n",
|
421 |
+
" remote 128 8 0.745 0.5 0.62 0.459 0.821 0.5 0.624 0.449\n",
|
422 |
+
" cell phone 128 8 0.686 0.375 0.502 0.272 0.488 0.25 0.28 0.132\n",
|
423 |
+
" microwave 128 3 0.831 1 0.995 0.722 0.867 1 0.995 0.592\n",
|
424 |
+
" oven 128 5 0.439 0.4 0.435 0.294 0.823 0.6 0.645 0.418\n",
|
425 |
+
" sink 128 6 0.677 0.5 0.565 0.448 0.722 0.5 0.46 0.362\n",
|
426 |
+
" refrigerator 128 5 0.533 0.8 0.783 0.524 0.558 0.8 0.783 0.527\n",
|
427 |
+
" book 128 29 0.732 0.379 0.423 0.196 0.69 0.207 0.38 0.131\n",
|
428 |
+
" clock 128 9 0.889 0.778 0.917 0.677 0.908 0.778 0.875 0.604\n",
|
429 |
+
" vase 128 2 0.375 1 0.995 0.995 0.455 1 0.995 0.796\n",
|
430 |
+
" scissors 128 1 1 0 0.0166 0.00166 1 0 0 0\n",
|
431 |
+
" teddy bear 128 21 0.813 0.829 0.841 0.457 0.826 0.678 0.786 0.422\n",
|
432 |
+
" toothbrush 128 5 0.806 1 0.995 0.733 0.991 1 0.995 0.628\n",
|
433 |
+
"Results saved to \u001b[1mruns/train-seg/exp\u001b[0m\n"
|
434 |
+
]
|
435 |
+
}
|
436 |
+
],
|
437 |
+
"source": [
|
438 |
+
"# Train YOLOv5s on COCO128 for 3 epochs\n",
|
439 |
+
"!python segment/train.py --img 640 --batch 16 --epochs 3 --data coco128-seg.yaml --weights yolov5s-seg.pt --cache"
|
440 |
+
]
|
441 |
+
},
|
442 |
+
{
|
443 |
+
"cell_type": "markdown",
|
444 |
+
"metadata": {
|
445 |
+
"id": "15glLzbQx5u0"
|
446 |
+
},
|
447 |
+
"source": [
|
448 |
+
"# 4. Visualize"
|
449 |
+
]
|
450 |
+
},
|
451 |
+
{
|
452 |
+
"cell_type": "markdown",
|
453 |
+
"metadata": {
|
454 |
+
"id": "nWOsI5wJR1o3"
|
455 |
+
},
|
456 |
+
"source": [
|
457 |
+
"## Comet Logging and Visualization π NEW\n",
|
458 |
+
"\n",
|
459 |
+
"[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n",
|
460 |
+
"\n",
|
461 |
+
"Getting started is easy:\n",
|
462 |
+
"```shell\n",
|
463 |
+
"pip install comet_ml # 1. install\n",
|
464 |
+
"export COMET_API_KEY=<Your API Key> # 2. paste API key\n",
|
465 |
+
"python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n",
|
466 |
+
"```\n",
|
467 |
+
"To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n",
|
468 |
+
"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n",
|
469 |
+
"\n",
|
470 |
+
"<a href=\"https://bit.ly/yolov5-readme-comet2\">\n",
|
471 |
+
"<img alt=\"Comet Dashboard\" src=\"https://user-images.githubusercontent.com/26833433/202851203-164e94e1-2238-46dd-91f8-de020e9d6b41.png\" width=\"1280\"/></a>"
|
472 |
+
]
|
473 |
+
},
|
474 |
+
{
|
475 |
+
"cell_type": "markdown",
|
476 |
+
"metadata": {
|
477 |
+
"id": "Lay2WsTjNJzP"
|
478 |
+
},
|
479 |
+
"source": [
|
480 |
+
"## ClearML Logging and Automation π NEW\n",
|
481 |
+
"\n",
|
482 |
+
"[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n",
|
483 |
+
"\n",
|
484 |
+
"- `pip install clearml`\n",
|
485 |
+
"- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n",
|
486 |
+
"\n",
|
487 |
+
"You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n",
|
488 |
+
"\n",
|
489 |
+
"You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n",
|
490 |
+
"\n",
|
491 |
+
"<a href=\"https://cutt.ly/yolov5-notebook-clearml\">\n",
|
492 |
+
"<img alt=\"ClearML Experiment Management UI\" src=\"https://github.com/thepycoder/clearml_screenshots/raw/main/scalars.jpg\" width=\"1280\"/></a>"
|
493 |
+
]
|
494 |
+
},
|
495 |
+
{
|
496 |
+
"cell_type": "markdown",
|
497 |
+
"metadata": {
|
498 |
+
"id": "-WPvRbS5Swl6"
|
499 |
+
},
|
500 |
+
"source": [
|
501 |
+
"## Local Logging\n",
|
502 |
+
"\n",
|
503 |
+
"Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n",
|
504 |
+
"\n",
|
505 |
+
"This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n",
|
506 |
+
"\n",
|
507 |
+
"<img alt=\"Local logging results\" src=\"https://user-images.githubusercontent.com/26833433/183222430-e1abd1b7-782c-4cde-b04d-ad52926bf818.jpg\" width=\"1280\"/>\n"
|
508 |
+
]
|
509 |
+
},
|
510 |
+
{
|
511 |
+
"cell_type": "markdown",
|
512 |
+
"metadata": {
|
513 |
+
"id": "Zelyeqbyt3GD"
|
514 |
+
},
|
515 |
+
"source": [
|
516 |
+
"# Environments\n",
|
517 |
+
"\n",
|
518 |
+
"YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
|
519 |
+
"\n",
|
520 |
+
"- **Notebooks** with free GPU: <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a> <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
|
521 |
+
"- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n",
|
522 |
+
"- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n",
|
523 |
+
"- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n"
|
524 |
+
]
|
525 |
+
},
|
526 |
+
{
|
527 |
+
"cell_type": "markdown",
|
528 |
+
"metadata": {
|
529 |
+
"id": "6Qu7Iesl0p54"
|
530 |
+
},
|
531 |
+
"source": [
|
532 |
+
"# Status\n",
|
533 |
+
"\n",
|
534 |
+
"![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n",
|
535 |
+
"\n",
|
536 |
+
"If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n"
|
537 |
+
]
|
538 |
+
},
|
539 |
+
{
|
540 |
+
"cell_type": "markdown",
|
541 |
+
"metadata": {
|
542 |
+
"id": "IEijrePND_2I"
|
543 |
+
},
|
544 |
+
"source": [
|
545 |
+
"# Appendix\n",
|
546 |
+
"\n",
|
547 |
+
"Additional content below."
|
548 |
+
]
|
549 |
+
},
|
550 |
+
{
|
551 |
+
"cell_type": "code",
|
552 |
+
"execution_count": null,
|
553 |
+
"metadata": {
|
554 |
+
"id": "GMusP4OAxFu6"
|
555 |
+
},
|
556 |
+
"outputs": [],
|
557 |
+
"source": [
|
558 |
+
"# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
|
559 |
+
"import torch\n",
|
560 |
+
"\n",
|
561 |
+
"model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n",
|
562 |
+
"im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
|
563 |
+
"results = model(im) # inference\n",
|
564 |
+
"results.print() # or .show(), .save(), .crop(), .pandas(), etc."
|
565 |
+
]
|
566 |
+
}
|
567 |
+
],
|
568 |
+
"metadata": {
|
569 |
+
"accelerator": "GPU",
|
570 |
+
"colab": {
|
571 |
+
"name": "YOLOv5 Segmentation Tutorial",
|
572 |
+
"provenance": [],
|
573 |
+
"toc_visible": true
|
574 |
+
},
|
575 |
+
"kernelspec": {
|
576 |
+
"display_name": "Python 3 (ipykernel)",
|
577 |
+
"language": "python",
|
578 |
+
"name": "python3"
|
579 |
+
},
|
580 |
+
"language_info": {
|
581 |
+
"codemirror_mode": {
|
582 |
+
"name": "ipython",
|
583 |
+
"version": 3
|
584 |
+
},
|
585 |
+
"file_extension": ".py",
|
586 |
+
"mimetype": "text/x-python",
|
587 |
+
"name": "python",
|
588 |
+
"nbconvert_exporter": "python",
|
589 |
+
"pygments_lexer": "ipython3",
|
590 |
+
"version": "3.7.12"
|
591 |
+
}
|
592 |
+
},
|
593 |
+
"nbformat": 4,
|
594 |
+
"nbformat_minor": 0
|
595 |
+
}
|
TextDetection/segment/val.py
ADDED
@@ -0,0 +1,473 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 π by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Validate a trained YOLOv5 segment model on a segment dataset
|
4 |
+
|
5 |
+
Usage:
|
6 |
+
$ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images)
|
7 |
+
$ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments
|
8 |
+
|
9 |
+
Usage - formats:
|
10 |
+
$ python segment/val.py --weights yolov5s-seg.pt # PyTorch
|
11 |
+
yolov5s-seg.torchscript # TorchScript
|
12 |
+
yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
13 |
+
yolov5s-seg_openvino_label # OpenVINO
|
14 |
+
yolov5s-seg.engine # TensorRT
|
15 |
+
yolov5s-seg.mlmodel # CoreML (macOS-only)
|
16 |
+
yolov5s-seg_saved_model # TensorFlow SavedModel
|
17 |
+
yolov5s-seg.pb # TensorFlow GraphDef
|
18 |
+
yolov5s-seg.tflite # TensorFlow Lite
|
19 |
+
yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU
|
20 |
+
yolov5s-seg_paddle_model # PaddlePaddle
|
21 |
+
"""
|
22 |
+
|
23 |
+
import argparse
|
24 |
+
import json
|
25 |
+
import os
|
26 |
+
import subprocess
|
27 |
+
import sys
|
28 |
+
from multiprocessing.pool import ThreadPool
|
29 |
+
from pathlib import Path
|
30 |
+
|
31 |
+
import numpy as np
|
32 |
+
import torch
|
33 |
+
from tqdm import tqdm
|
34 |
+
|
35 |
+
FILE = Path(__file__).resolve()
|
36 |
+
ROOT = FILE.parents[1] # YOLOv5 root directory
|
37 |
+
if str(ROOT) not in sys.path:
|
38 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
39 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
40 |
+
|
41 |
+
import torch.nn.functional as F
|
42 |
+
|
43 |
+
from models.common import DetectMultiBackend
|
44 |
+
from models.yolo import SegmentationModel
|
45 |
+
from utils.callbacks import Callbacks
|
46 |
+
from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size,
|
47 |
+
check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path,
|
48 |
+
non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh)
|
49 |
+
from utils.metrics import ConfusionMatrix, box_iou
|
50 |
+
from utils.plots import output_to_target, plot_val_study
|
51 |
+
from utils.segment.dataloaders import create_dataloader
|
52 |
+
from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image
|
53 |
+
from utils.segment.metrics import Metrics, ap_per_class_box_and_mask
|
54 |
+
from utils.segment.plots import plot_images_and_masks
|
55 |
+
from utils.torch_utils import de_parallel, select_device, smart_inference_mode
|
56 |
+
|
57 |
+
|
58 |
+
def save_one_txt(predn, save_conf, shape, file):
|
59 |
+
# Save one txt result
|
60 |
+
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
|
61 |
+
for *xyxy, conf, cls in predn.tolist():
|
62 |
+
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
63 |
+
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
|
64 |
+
with open(file, 'a') as f:
|
65 |
+
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
66 |
+
|
67 |
+
|
68 |
+
def save_one_json(predn, jdict, path, class_map, pred_masks):
|
69 |
+
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
|
70 |
+
from pycocotools.mask import encode
|
71 |
+
|
72 |
+
def single_encode(x):
|
73 |
+
rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0]
|
74 |
+
rle['counts'] = rle['counts'].decode('utf-8')
|
75 |
+
return rle
|
76 |
+
|
77 |
+
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
|
78 |
+
box = xyxy2xywh(predn[:, :4]) # xywh
|
79 |
+
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
|
80 |
+
pred_masks = np.transpose(pred_masks, (2, 0, 1))
|
81 |
+
with ThreadPool(NUM_THREADS) as pool:
|
82 |
+
rles = pool.map(single_encode, pred_masks)
|
83 |
+
for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())):
|
84 |
+
jdict.append({
|
85 |
+
'image_id': image_id,
|
86 |
+
'category_id': class_map[int(p[5])],
|
87 |
+
'bbox': [round(x, 3) for x in b],
|
88 |
+
'score': round(p[4], 5),
|
89 |
+
'segmentation': rles[i]})
|
90 |
+
|
91 |
+
|
92 |
+
def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False):
|
93 |
+
"""
|
94 |
+
Return correct prediction matrix
|
95 |
+
Arguments:
|
96 |
+
detections (array[N, 6]), x1, y1, x2, y2, conf, class
|
97 |
+
labels (array[M, 5]), class, x1, y1, x2, y2
|
98 |
+
Returns:
|
99 |
+
correct (array[N, 10]), for 10 IoU levels
|
100 |
+
"""
|
101 |
+
if masks:
|
102 |
+
if overlap:
|
103 |
+
nl = len(labels)
|
104 |
+
index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1
|
105 |
+
gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640)
|
106 |
+
gt_masks = torch.where(gt_masks == index, 1.0, 0.0)
|
107 |
+
if gt_masks.shape[1:] != pred_masks.shape[1:]:
|
108 |
+
gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0]
|
109 |
+
gt_masks = gt_masks.gt_(0.5)
|
110 |
+
iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1))
|
111 |
+
else: # boxes
|
112 |
+
iou = box_iou(labels[:, 1:], detections[:, :4])
|
113 |
+
|
114 |
+
correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
|
115 |
+
correct_class = labels[:, 0:1] == detections[:, 5]
|
116 |
+
for i in range(len(iouv)):
|
117 |
+
x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
|
118 |
+
if x[0].shape[0]:
|
119 |
+
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
|
120 |
+
if x[0].shape[0] > 1:
|
121 |
+
matches = matches[matches[:, 2].argsort()[::-1]]
|
122 |
+
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
|
123 |
+
# matches = matches[matches[:, 2].argsort()[::-1]]
|
124 |
+
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
|
125 |
+
correct[matches[:, 1].astype(int), i] = True
|
126 |
+
return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
|
127 |
+
|
128 |
+
|
129 |
+
@smart_inference_mode()
|
130 |
+
def run(
|
131 |
+
data,
|
132 |
+
weights=None, # model.pt path(s)
|
133 |
+
batch_size=32, # batch size
|
134 |
+
imgsz=640, # inference size (pixels)
|
135 |
+
conf_thres=0.001, # confidence threshold
|
136 |
+
iou_thres=0.6, # NMS IoU threshold
|
137 |
+
max_det=300, # maximum detections per image
|
138 |
+
task='val', # train, val, test, speed or study
|
139 |
+
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
140 |
+
workers=8, # max dataloader workers (per RANK in DDP mode)
|
141 |
+
single_cls=False, # treat as single-class dataset
|
142 |
+
augment=False, # augmented inference
|
143 |
+
verbose=False, # verbose output
|
144 |
+
save_txt=False, # save results to *.txt
|
145 |
+
save_hybrid=False, # save label+prediction hybrid results to *.txt
|
146 |
+
save_conf=False, # save confidences in --save-txt labels
|
147 |
+
save_json=False, # save a COCO-JSON results file
|
148 |
+
project=ROOT / 'runs/val-seg', # save to project/name
|
149 |
+
name='exp', # save to project/name
|
150 |
+
exist_ok=False, # existing project/name ok, do not increment
|
151 |
+
half=True, # use FP16 half-precision inference
|
152 |
+
dnn=False, # use OpenCV DNN for ONNX inference
|
153 |
+
model=None,
|
154 |
+
dataloader=None,
|
155 |
+
save_dir=Path(''),
|
156 |
+
plots=True,
|
157 |
+
overlap=False,
|
158 |
+
mask_downsample_ratio=1,
|
159 |
+
compute_loss=None,
|
160 |
+
callbacks=Callbacks(),
|
161 |
+
):
|
162 |
+
if save_json:
|
163 |
+
check_requirements('pycocotools>=2.0.6')
|
164 |
+
process = process_mask_native # more accurate
|
165 |
+
else:
|
166 |
+
process = process_mask # faster
|
167 |
+
|
168 |
+
# Initialize/load model and set device
|
169 |
+
training = model is not None
|
170 |
+
if training: # called by train.py
|
171 |
+
device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
|
172 |
+
half &= device.type != 'cpu' # half precision only supported on CUDA
|
173 |
+
model.half() if half else model.float()
|
174 |
+
nm = de_parallel(model).model[-1].nm # number of masks
|
175 |
+
else: # called directly
|
176 |
+
device = select_device(device, batch_size=batch_size)
|
177 |
+
|
178 |
+
# Directories
|
179 |
+
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
180 |
+
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
181 |
+
|
182 |
+
# Load model
|
183 |
+
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
184 |
+
stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
|
185 |
+
imgsz = check_img_size(imgsz, s=stride) # check image size
|
186 |
+
half = model.fp16 # FP16 supported on limited backends with CUDA
|
187 |
+
nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks
|
188 |
+
if engine:
|
189 |
+
batch_size = model.batch_size
|
190 |
+
else:
|
191 |
+
device = model.device
|
192 |
+
if not (pt or jit):
|
193 |
+
batch_size = 1 # export.py models default to batch-size 1
|
194 |
+
LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
|
195 |
+
|
196 |
+
# Data
|
197 |
+
data = check_dataset(data) # check
|
198 |
+
|
199 |
+
# Configure
|
200 |
+
model.eval()
|
201 |
+
cuda = device.type != 'cpu'
|
202 |
+
is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset
|
203 |
+
nc = 1 if single_cls else int(data['nc']) # number of classes
|
204 |
+
iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for [email protected]:0.95
|
205 |
+
niou = iouv.numel()
|
206 |
+
|
207 |
+
# Dataloader
|
208 |
+
if not training:
|
209 |
+
if pt and not single_cls: # check --weights are trained on --data
|
210 |
+
ncm = model.model.nc
|
211 |
+
assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
|
212 |
+
f'classes). Pass correct combination of --weights and --data that are trained together.'
|
213 |
+
model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
|
214 |
+
pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks
|
215 |
+
task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
|
216 |
+
dataloader = create_dataloader(data[task],
|
217 |
+
imgsz,
|
218 |
+
batch_size,
|
219 |
+
stride,
|
220 |
+
single_cls,
|
221 |
+
pad=pad,
|
222 |
+
rect=rect,
|
223 |
+
workers=workers,
|
224 |
+
prefix=colorstr(f'{task}: '),
|
225 |
+
overlap_mask=overlap,
|
226 |
+
mask_downsample_ratio=mask_downsample_ratio)[0]
|
227 |
+
|
228 |
+
seen = 0
|
229 |
+
confusion_matrix = ConfusionMatrix(nc=nc)
|
230 |
+
names = model.names if hasattr(model, 'names') else model.module.names # get class names
|
231 |
+
if isinstance(names, (list, tuple)): # old format
|
232 |
+
names = dict(enumerate(names))
|
233 |
+
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
|
234 |
+
s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R',
|
235 |
+
'mAP50', 'mAP50-95)')
|
236 |
+
dt = Profile(), Profile(), Profile()
|
237 |
+
metrics = Metrics()
|
238 |
+
loss = torch.zeros(4, device=device)
|
239 |
+
jdict, stats = [], []
|
240 |
+
# callbacks.run('on_val_start')
|
241 |
+
pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar
|
242 |
+
for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar):
|
243 |
+
# callbacks.run('on_val_batch_start')
|
244 |
+
with dt[0]:
|
245 |
+
if cuda:
|
246 |
+
im = im.to(device, non_blocking=True)
|
247 |
+
targets = targets.to(device)
|
248 |
+
masks = masks.to(device)
|
249 |
+
masks = masks.float()
|
250 |
+
im = im.half() if half else im.float() # uint8 to fp16/32
|
251 |
+
im /= 255 # 0 - 255 to 0.0 - 1.0
|
252 |
+
nb, _, height, width = im.shape # batch size, channels, height, width
|
253 |
+
|
254 |
+
# Inference
|
255 |
+
with dt[1]:
|
256 |
+
preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None)
|
257 |
+
|
258 |
+
# Loss
|
259 |
+
if compute_loss:
|
260 |
+
loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls
|
261 |
+
|
262 |
+
# NMS
|
263 |
+
targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
|
264 |
+
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
|
265 |
+
with dt[2]:
|
266 |
+
preds = non_max_suppression(preds,
|
267 |
+
conf_thres,
|
268 |
+
iou_thres,
|
269 |
+
labels=lb,
|
270 |
+
multi_label=True,
|
271 |
+
agnostic=single_cls,
|
272 |
+
max_det=max_det,
|
273 |
+
nm=nm)
|
274 |
+
|
275 |
+
# Metrics
|
276 |
+
plot_masks = [] # masks for plotting
|
277 |
+
for si, (pred, proto) in enumerate(zip(preds, protos)):
|
278 |
+
labels = targets[targets[:, 0] == si, 1:]
|
279 |
+
nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
|
280 |
+
path, shape = Path(paths[si]), shapes[si][0]
|
281 |
+
correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
|
282 |
+
correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
|
283 |
+
seen += 1
|
284 |
+
|
285 |
+
if npr == 0:
|
286 |
+
if nl:
|
287 |
+
stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0]))
|
288 |
+
if plots:
|
289 |
+
confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
|
290 |
+
continue
|
291 |
+
|
292 |
+
# Masks
|
293 |
+
midx = [si] if overlap else targets[:, 0] == si
|
294 |
+
gt_masks = masks[midx]
|
295 |
+
pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:])
|
296 |
+
|
297 |
+
# Predictions
|
298 |
+
if single_cls:
|
299 |
+
pred[:, 5] = 0
|
300 |
+
predn = pred.clone()
|
301 |
+
scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
|
302 |
+
|
303 |
+
# Evaluate
|
304 |
+
if nl:
|
305 |
+
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
|
306 |
+
scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
|
307 |
+
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
|
308 |
+
correct_bboxes = process_batch(predn, labelsn, iouv)
|
309 |
+
correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True)
|
310 |
+
if plots:
|
311 |
+
confusion_matrix.process_batch(predn, labelsn)
|
312 |
+
stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls)
|
313 |
+
|
314 |
+
pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
|
315 |
+
if plots and batch_i < 3:
|
316 |
+
plot_masks.append(pred_masks[:15]) # filter top 15 to plot
|
317 |
+
|
318 |
+
# Save/log
|
319 |
+
if save_txt:
|
320 |
+
save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
|
321 |
+
if save_json:
|
322 |
+
pred_masks = scale_image(im[si].shape[1:],
|
323 |
+
pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1])
|
324 |
+
save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary
|
325 |
+
# callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
|
326 |
+
|
327 |
+
# Plot images
|
328 |
+
if plots and batch_i < 3:
|
329 |
+
if len(plot_masks):
|
330 |
+
plot_masks = torch.cat(plot_masks, dim=0)
|
331 |
+
plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names)
|
332 |
+
plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths,
|
333 |
+
save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred
|
334 |
+
|
335 |
+
# callbacks.run('on_val_batch_end')
|
336 |
+
|
337 |
+
# Compute metrics
|
338 |
+
stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
|
339 |
+
if len(stats) and stats[0].any():
|
340 |
+
results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names)
|
341 |
+
metrics.update(results)
|
342 |
+
nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class
|
343 |
+
|
344 |
+
# Print results
|
345 |
+
pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format
|
346 |
+
LOGGER.info(pf % ('all', seen, nt.sum(), *metrics.mean_results()))
|
347 |
+
if nt.sum() == 0:
|
348 |
+
LOGGER.warning(f'WARNING β οΈ no labels found in {task} set, can not compute metrics without labels')
|
349 |
+
|
350 |
+
# Print results per class
|
351 |
+
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
|
352 |
+
for i, c in enumerate(metrics.ap_class_index):
|
353 |
+
LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i)))
|
354 |
+
|
355 |
+
# Print speeds
|
356 |
+
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
|
357 |
+
if not training:
|
358 |
+
shape = (batch_size, 3, imgsz, imgsz)
|
359 |
+
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
|
360 |
+
|
361 |
+
# Plots
|
362 |
+
if plots:
|
363 |
+
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
|
364 |
+
# callbacks.run('on_val_end')
|
365 |
+
|
366 |
+
mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results()
|
367 |
+
|
368 |
+
# Save JSON
|
369 |
+
if save_json and len(jdict):
|
370 |
+
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
|
371 |
+
anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations
|
372 |
+
pred_json = str(save_dir / f'{w}_predictions.json') # predictions
|
373 |
+
LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
|
374 |
+
with open(pred_json, 'w') as f:
|
375 |
+
json.dump(jdict, f)
|
376 |
+
|
377 |
+
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
|
378 |
+
from pycocotools.coco import COCO
|
379 |
+
from pycocotools.cocoeval import COCOeval
|
380 |
+
|
381 |
+
anno = COCO(anno_json) # init annotations api
|
382 |
+
pred = anno.loadRes(pred_json) # init predictions api
|
383 |
+
results = []
|
384 |
+
for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'):
|
385 |
+
if is_coco:
|
386 |
+
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate
|
387 |
+
eval.evaluate()
|
388 |
+
eval.accumulate()
|
389 |
+
eval.summarize()
|
390 |
+
results.extend(eval.stats[:2]) # update results ([email protected]:0.95, [email protected])
|
391 |
+
map_bbox, map50_bbox, map_mask, map50_mask = results
|
392 |
+
except Exception as e:
|
393 |
+
LOGGER.info(f'pycocotools unable to run: {e}')
|
394 |
+
|
395 |
+
# Return results
|
396 |
+
model.float() # for training
|
397 |
+
if not training:
|
398 |
+
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
399 |
+
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
400 |
+
final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask
|
401 |
+
return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t
|
402 |
+
|
403 |
+
|
404 |
+
def parse_opt():
|
405 |
+
parser = argparse.ArgumentParser()
|
406 |
+
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path')
|
407 |
+
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)')
|
408 |
+
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
|
409 |
+
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
|
410 |
+
parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
|
411 |
+
parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
|
412 |
+
parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image')
|
413 |
+
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
|
414 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
415 |
+
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
416 |
+
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
|
417 |
+
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
418 |
+
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
|
419 |
+
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
420 |
+
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
|
421 |
+
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
422 |
+
parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
|
423 |
+
parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name')
|
424 |
+
parser.add_argument('--name', default='exp', help='save to project/name')
|
425 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
426 |
+
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
427 |
+
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
428 |
+
opt = parser.parse_args()
|
429 |
+
opt.data = check_yaml(opt.data) # check YAML
|
430 |
+
# opt.save_json |= opt.data.endswith('coco.yaml')
|
431 |
+
opt.save_txt |= opt.save_hybrid
|
432 |
+
print_args(vars(opt))
|
433 |
+
return opt
|
434 |
+
|
435 |
+
|
436 |
+
def main(opt):
|
437 |
+
check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
438 |
+
|
439 |
+
if opt.task in ('train', 'val', 'test'): # run normally
|
440 |
+
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
|
441 |
+
LOGGER.warning(f'WARNING β οΈ confidence threshold {opt.conf_thres} > 0.001 produces invalid results')
|
442 |
+
if opt.save_hybrid:
|
443 |
+
LOGGER.warning('WARNING β οΈ --save-hybrid returns high mAP from hybrid labels, not from predictions alone')
|
444 |
+
run(**vars(opt))
|
445 |
+
|
446 |
+
else:
|
447 |
+
weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
|
448 |
+
opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results
|
449 |
+
if opt.task == 'speed': # speed benchmarks
|
450 |
+
# python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
|
451 |
+
opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
|
452 |
+
for opt.weights in weights:
|
453 |
+
run(**vars(opt), plots=False)
|
454 |
+
|
455 |
+
elif opt.task == 'study': # speed vs mAP benchmarks
|
456 |
+
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
|
457 |
+
for opt.weights in weights:
|
458 |
+
f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
|
459 |
+
x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
|
460 |
+
for opt.imgsz in x: # img-size
|
461 |
+
LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
|
462 |
+
r, _, t = run(**vars(opt), plots=False)
|
463 |
+
y.append(r + t) # results and times
|
464 |
+
np.savetxt(f, y, fmt='%10.4g') # save
|
465 |
+
subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt'])
|
466 |
+
plot_val_study(x=x) # plot
|
467 |
+
else:
|
468 |
+
raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")')
|
469 |
+
|
470 |
+
|
471 |
+
if __name__ == '__main__':
|
472 |
+
opt = parse_opt()
|
473 |
+
main(opt)
|
TextDetection/setup.cfg
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Project-wide configuration file, can be used for package metadata and other toll configurations
|
2 |
+
# Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments
|
3 |
+
# Local usage: pip install pre-commit, pre-commit run --all-files
|
4 |
+
|
5 |
+
[metadata]
|
6 |
+
license_files = LICENSE
|
7 |
+
description_file = README.md
|
8 |
+
|
9 |
+
[tool:pytest]
|
10 |
+
norecursedirs =
|
11 |
+
.git
|
12 |
+
dist
|
13 |
+
build
|
14 |
+
addopts =
|
15 |
+
--doctest-modules
|
16 |
+
--durations=25
|
17 |
+
--color=yes
|
18 |
+
|
19 |
+
[flake8]
|
20 |
+
max-line-length = 120
|
21 |
+
exclude = .tox,*.egg,build,temp
|
22 |
+
select = E,W,F
|
23 |
+
doctests = True
|
24 |
+
verbose = 2
|
25 |
+
# https://pep8.readthedocs.io/en/latest/intro.html#error-codes
|
26 |
+
format = pylint
|
27 |
+
# see: https://www.flake8rules.com/
|
28 |
+
ignore = E731,F405,E402,W504,E501
|
29 |
+
# E731: Do not assign a lambda expression, use a def
|
30 |
+
# F405: name may be undefined, or defined from star imports: module
|
31 |
+
# E402: module level import not at top of file
|
32 |
+
# W504: line break after binary operator
|
33 |
+
# E501: line too long
|
34 |
+
# removed:
|
35 |
+
# F401: module imported but unused
|
36 |
+
# E231: missing whitespace after β,β, β;β, or β:β
|
37 |
+
# E127: continuation line over-indented for visual indent
|
38 |
+
# F403: βfrom module import *β used; unable to detect undefined names
|
39 |
+
|
40 |
+
|
41 |
+
[isort]
|
42 |
+
# https://pycqa.github.io/isort/docs/configuration/options.html
|
43 |
+
line_length = 120
|
44 |
+
# see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html
|
45 |
+
multi_line_output = 0
|
46 |
+
|
47 |
+
[yapf]
|
48 |
+
based_on_style = pep8
|
49 |
+
spaces_before_comment = 2
|
50 |
+
COLUMN_LIMIT = 120
|
51 |
+
COALESCE_BRACKETS = True
|
52 |
+
SPACES_AROUND_POWER_OPERATOR = True
|
53 |
+
SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = True
|
54 |
+
SPLIT_BEFORE_CLOSING_BRACKET = False
|
55 |
+
SPLIT_BEFORE_FIRST_ARGUMENT = False
|
56 |
+
# EACH_DICT_ENTRY_ON_SEPARATE_LINE = False
|
TextDetection/utils/__init__.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 π by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
utils/initialization
|
4 |
+
"""
|
5 |
+
|
6 |
+
import contextlib
|
7 |
+
import platform
|
8 |
+
import threading
|
9 |
+
|
10 |
+
|
11 |
+
def emojis(str=''):
|
12 |
+
# Return platform-dependent emoji-safe version of string
|
13 |
+
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
|
14 |
+
|
15 |
+
|
16 |
+
class TryExcept(contextlib.ContextDecorator):
|
17 |
+
# YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
|
18 |
+
def __init__(self, msg=''):
|
19 |
+
self.msg = msg
|
20 |
+
|
21 |
+
def __enter__(self):
|
22 |
+
pass
|
23 |
+
|
24 |
+
def __exit__(self, exc_type, value, traceback):
|
25 |
+
if value:
|
26 |
+
print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}"))
|
27 |
+
return True
|
28 |
+
|
29 |
+
|
30 |
+
def threaded(func):
|
31 |
+
# Multi-threads a target function and returns thread. Usage: @threaded decorator
|
32 |
+
def wrapper(*args, **kwargs):
|
33 |
+
thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
|
34 |
+
thread.start()
|
35 |
+
return thread
|
36 |
+
|
37 |
+
return wrapper
|
38 |
+
|
39 |
+
|
40 |
+
def join_threads(verbose=False):
|
41 |
+
# Join all daemon threads, i.e. atexit.register(lambda: join_threads())
|
42 |
+
main_thread = threading.current_thread()
|
43 |
+
for t in threading.enumerate():
|
44 |
+
if t is not main_thread:
|
45 |
+
if verbose:
|
46 |
+
print(f'Joining thread {t.name}')
|
47 |
+
t.join()
|
48 |
+
|
49 |
+
|
50 |
+
def notebook_init(verbose=True):
|
51 |
+
# Check system software and hardware
|
52 |
+
print('Checking setup...')
|
53 |
+
|
54 |
+
import os
|
55 |
+
import shutil
|
56 |
+
|
57 |
+
from ultralytics.yolo.utils.checks import check_requirements
|
58 |
+
|
59 |
+
from utils.general import check_font, is_colab
|
60 |
+
from utils.torch_utils import select_device # imports
|
61 |
+
|
62 |
+
check_font()
|
63 |
+
|
64 |
+
import psutil
|
65 |
+
|
66 |
+
if check_requirements('wandb', install=False):
|
67 |
+
os.system('pip uninstall -y wandb') # eliminate unexpected account creation prompt with infinite hang
|
68 |
+
if is_colab():
|
69 |
+
shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
|
70 |
+
|
71 |
+
# System info
|
72 |
+
display = None
|
73 |
+
if verbose:
|
74 |
+
gb = 1 << 30 # bytes to GiB (1024 ** 3)
|
75 |
+
ram = psutil.virtual_memory().total
|
76 |
+
total, used, free = shutil.disk_usage('/')
|
77 |
+
with contextlib.suppress(Exception): # clear display if ipython is installed
|
78 |
+
from IPython import display
|
79 |
+
display.clear_output()
|
80 |
+
s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)'
|
81 |
+
else:
|
82 |
+
s = ''
|
83 |
+
|
84 |
+
select_device(newline=False)
|
85 |
+
print(emojis(f'Setup complete β
{s}'))
|
86 |
+
return display
|
TextDetection/utils/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (2.79 kB). View file
|
|
TextDetection/utils/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (2.73 kB). View file
|
|
TextDetection/utils/__pycache__/augmentations.cpython-310.pyc
ADDED
Binary file (13.7 kB). View file
|
|
TextDetection/utils/__pycache__/augmentations.cpython-39.pyc
ADDED
Binary file (13.7 kB). View file
|
|
TextDetection/utils/__pycache__/autoanchor.cpython-310.pyc
ADDED
Binary file (6.53 kB). View file
|
|
TextDetection/utils/__pycache__/autoanchor.cpython-39.pyc
ADDED
Binary file (6.49 kB). View file
|
|
TextDetection/utils/__pycache__/dataloaders.cpython-310.pyc
ADDED
Binary file (43.4 kB). View file
|
|
TextDetection/utils/__pycache__/dataloaders.cpython-39.pyc
ADDED
Binary file (43.4 kB). View file
|
|
TextDetection/utils/__pycache__/downloads.cpython-310.pyc
ADDED
Binary file (4.3 kB). View file
|
|
TextDetection/utils/__pycache__/downloads.cpython-39.pyc
ADDED
Binary file (4.24 kB). View file
|
|
TextDetection/utils/__pycache__/general.cpython-310.pyc
ADDED
Binary file (37.7 kB). View file
|
|
TextDetection/utils/__pycache__/general.cpython-39.pyc
ADDED
Binary file (37.6 kB). View file
|
|
TextDetection/utils/__pycache__/metrics.cpython-310.pyc
ADDED
Binary file (11.4 kB). View file
|
|
TextDetection/utils/__pycache__/metrics.cpython-39.pyc
ADDED
Binary file (11.3 kB). View file
|
|
TextDetection/utils/__pycache__/plots.cpython-310.pyc
ADDED
Binary file (21.5 kB). View file
|
|
TextDetection/utils/__pycache__/plots.cpython-39.pyc
ADDED
Binary file (21.5 kB). View file
|
|
TextDetection/utils/__pycache__/torch_utils.cpython-310.pyc
ADDED
Binary file (16.8 kB). View file
|
|
TextDetection/utils/__pycache__/torch_utils.cpython-39.pyc
ADDED
Binary file (16.8 kB). View file
|
|