nachiiiket
commited on
Commit
·
348a19a
1
Parent(s):
04a596a
Upload 14 files
Browse files- .gitattributes +1 -0
- .gitignore +263 -0
- AutoFis YOLOv7.ipynb +1 -0
- LICENSE.md +674 -0
- README.md +279 -3
- detect.py +196 -0
- export.py +205 -0
- hubconf.py +97 -0
- requirements.txt +39 -0
- test.py +353 -0
- train.py +705 -0
- train_aux.py +699 -0
- yolov7.pt +3 -0
- yolov7_training.pt +3 -0
- yolov7_training.pt.1 +3 -0
.gitattributes
CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
yolov7_training.pt.1 filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
|
2 |
+
*.jpg
|
3 |
+
*.jpeg
|
4 |
+
*.png
|
5 |
+
*.bmp
|
6 |
+
*.tif
|
7 |
+
*.tiff
|
8 |
+
*.heic
|
9 |
+
*.JPG
|
10 |
+
*.JPEG
|
11 |
+
*.PNG
|
12 |
+
*.BMP
|
13 |
+
*.TIF
|
14 |
+
*.TIFF
|
15 |
+
*.HEIC
|
16 |
+
*.mp4
|
17 |
+
*.mov
|
18 |
+
*.MOV
|
19 |
+
*.avi
|
20 |
+
*.data
|
21 |
+
*.json
|
22 |
+
*.cfg
|
23 |
+
!setup.cfg
|
24 |
+
!cfg/yolov3*.cfg
|
25 |
+
|
26 |
+
storage.googleapis.com
|
27 |
+
runs/*
|
28 |
+
data/*
|
29 |
+
data/images/*
|
30 |
+
!data/*.yaml
|
31 |
+
!data/hyps
|
32 |
+
!data/scripts
|
33 |
+
!data/images
|
34 |
+
!data/images/zidane.jpg
|
35 |
+
!data/images/bus.jpg
|
36 |
+
!data/*.sh
|
37 |
+
|
38 |
+
results*.csv
|
39 |
+
|
40 |
+
# Datasets -------------------------------------------------------------------------------------------------------------
|
41 |
+
coco/
|
42 |
+
coco128/
|
43 |
+
VOC/
|
44 |
+
|
45 |
+
coco2017labels-segments.zip
|
46 |
+
test2017.zip
|
47 |
+
train2017.zip
|
48 |
+
val2017.zip
|
49 |
+
|
50 |
+
# MATLAB GitIgnore -----------------------------------------------------------------------------------------------------
|
51 |
+
*.m~
|
52 |
+
*.mat
|
53 |
+
!targets*.mat
|
54 |
+
|
55 |
+
# Neural Network weights -----------------------------------------------------------------------------------------------
|
56 |
+
*.weights
|
57 |
+
*.pt
|
58 |
+
*.pb
|
59 |
+
*.onnx
|
60 |
+
*.engine
|
61 |
+
*.mlmodel
|
62 |
+
*.torchscript
|
63 |
+
*.tflite
|
64 |
+
*.h5
|
65 |
+
*_saved_model/
|
66 |
+
*_web_model/
|
67 |
+
*_openvino_model/
|
68 |
+
darknet53.conv.74
|
69 |
+
yolov3-tiny.conv.15
|
70 |
+
*.ptl
|
71 |
+
*.trt
|
72 |
+
|
73 |
+
# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
|
74 |
+
# Byte-compiled / optimized / DLL files
|
75 |
+
__pycache__/
|
76 |
+
*.py[cod]
|
77 |
+
*$py.class
|
78 |
+
|
79 |
+
# C extensions
|
80 |
+
*.so
|
81 |
+
|
82 |
+
# Distribution / packaging
|
83 |
+
.Python
|
84 |
+
env/
|
85 |
+
build/
|
86 |
+
develop-eggs/
|
87 |
+
dist/
|
88 |
+
downloads/
|
89 |
+
eggs/
|
90 |
+
.eggs/
|
91 |
+
lib/
|
92 |
+
lib64/
|
93 |
+
parts/
|
94 |
+
sdist/
|
95 |
+
var/
|
96 |
+
wheels/
|
97 |
+
*.egg-info/
|
98 |
+
/wandb/
|
99 |
+
.installed.cfg
|
100 |
+
*.egg
|
101 |
+
|
102 |
+
|
103 |
+
# PyInstaller
|
104 |
+
# Usually these files are written by a python script from a template
|
105 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
106 |
+
*.manifest
|
107 |
+
*.spec
|
108 |
+
|
109 |
+
# Installer logs
|
110 |
+
pip-log.txt
|
111 |
+
pip-delete-this-directory.txt
|
112 |
+
|
113 |
+
# Unit test / coverage reports
|
114 |
+
htmlcov/
|
115 |
+
.tox/
|
116 |
+
.coverage
|
117 |
+
.coverage.*
|
118 |
+
.cache
|
119 |
+
nosetests.xml
|
120 |
+
coverage.xml
|
121 |
+
*.cover
|
122 |
+
.hypothesis/
|
123 |
+
|
124 |
+
# Translations
|
125 |
+
*.mo
|
126 |
+
*.pot
|
127 |
+
|
128 |
+
# Django stuff:
|
129 |
+
*.log
|
130 |
+
local_settings.py
|
131 |
+
|
132 |
+
# Flask stuff:
|
133 |
+
instance/
|
134 |
+
.webassets-cache
|
135 |
+
|
136 |
+
# Scrapy stuff:
|
137 |
+
.scrapy
|
138 |
+
|
139 |
+
# Sphinx documentation
|
140 |
+
docs/_build/
|
141 |
+
|
142 |
+
# PyBuilder
|
143 |
+
target/
|
144 |
+
|
145 |
+
# Jupyter Notebook
|
146 |
+
.ipynb_checkpoints
|
147 |
+
|
148 |
+
# pyenv
|
149 |
+
.python-version
|
150 |
+
|
151 |
+
# celery beat schedule file
|
152 |
+
celerybeat-schedule
|
153 |
+
|
154 |
+
# SageMath parsed files
|
155 |
+
*.sage.py
|
156 |
+
|
157 |
+
# dotenv
|
158 |
+
.env
|
159 |
+
|
160 |
+
# virtualenv
|
161 |
+
.venv*
|
162 |
+
venv*/
|
163 |
+
ENV*/
|
164 |
+
|
165 |
+
# Spyder project settings
|
166 |
+
.spyderproject
|
167 |
+
.spyproject
|
168 |
+
|
169 |
+
# Rope project settings
|
170 |
+
.ropeproject
|
171 |
+
|
172 |
+
# mkdocs documentation
|
173 |
+
/site
|
174 |
+
|
175 |
+
# mypy
|
176 |
+
.mypy_cache/
|
177 |
+
|
178 |
+
|
179 |
+
# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
|
180 |
+
|
181 |
+
# General
|
182 |
+
.DS_Store
|
183 |
+
.AppleDouble
|
184 |
+
.LSOverride
|
185 |
+
|
186 |
+
# Icon must end with two \r
|
187 |
+
Icon
|
188 |
+
Icon?
|
189 |
+
|
190 |
+
# Thumbnails
|
191 |
+
._*
|
192 |
+
|
193 |
+
# Files that might appear in the root of a volume
|
194 |
+
.DocumentRevisions-V100
|
195 |
+
.fseventsd
|
196 |
+
.Spotlight-V100
|
197 |
+
.TemporaryItems
|
198 |
+
.Trashes
|
199 |
+
.VolumeIcon.icns
|
200 |
+
.com.apple.timemachine.donotpresent
|
201 |
+
|
202 |
+
# Directories potentially created on remote AFP share
|
203 |
+
.AppleDB
|
204 |
+
.AppleDesktop
|
205 |
+
Network Trash Folder
|
206 |
+
Temporary Items
|
207 |
+
.apdisk
|
208 |
+
|
209 |
+
|
210 |
+
# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
|
211 |
+
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
|
212 |
+
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
213 |
+
|
214 |
+
# User-specific stuff:
|
215 |
+
.idea/*
|
216 |
+
.idea/**/workspace.xml
|
217 |
+
.idea/**/tasks.xml
|
218 |
+
.idea/dictionaries
|
219 |
+
.html # Bokeh Plots
|
220 |
+
.pg # TensorFlow Frozen Graphs
|
221 |
+
.avi # videos
|
222 |
+
|
223 |
+
# Sensitive or high-churn files:
|
224 |
+
.idea/**/dataSources/
|
225 |
+
.idea/**/dataSources.ids
|
226 |
+
.idea/**/dataSources.local.xml
|
227 |
+
.idea/**/sqlDataSources.xml
|
228 |
+
.idea/**/dynamic.xml
|
229 |
+
.idea/**/uiDesigner.xml
|
230 |
+
|
231 |
+
# Gradle:
|
232 |
+
.idea/**/gradle.xml
|
233 |
+
.idea/**/libraries
|
234 |
+
|
235 |
+
# CMake
|
236 |
+
cmake-build-debug/
|
237 |
+
cmake-build-release/
|
238 |
+
|
239 |
+
# Mongo Explorer plugin:
|
240 |
+
.idea/**/mongoSettings.xml
|
241 |
+
|
242 |
+
## File-based project format:
|
243 |
+
*.iws
|
244 |
+
|
245 |
+
## Plugin-specific files:
|
246 |
+
|
247 |
+
# IntelliJ
|
248 |
+
out/
|
249 |
+
|
250 |
+
# mpeltonen/sbt-idea plugin
|
251 |
+
.idea_modules/
|
252 |
+
|
253 |
+
# JIRA plugin
|
254 |
+
atlassian-ide-plugin.xml
|
255 |
+
|
256 |
+
# Cursive Clojure plugin
|
257 |
+
.idea/replstate.xml
|
258 |
+
|
259 |
+
# Crashlytics plugin (for Android Studio and IntelliJ)
|
260 |
+
com_crashlytics_export_strings.xml
|
261 |
+
crashlytics.properties
|
262 |
+
crashlytics-build.properties
|
263 |
+
fabric.properties
|
AutoFis YOLOv7.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells":[{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3966,"status":"ok","timestamp":1683282908185,"user":{"displayName":"Deshpande Ms. Gauri Harish --","userId":"01361348429335733319"},"user_tz":-330},"id":"-E7HJfGWXpsX","outputId":"ce101e7c-0879-4b3a-d9ea-4dc3822178fe"},"outputs":[{"output_type":"stream","name":"stdout","text":["Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"]}],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":633,"status":"ok","timestamp":1683282914065,"user":{"displayName":"Deshpande Ms. Gauri Harish --","userId":"01361348429335733319"},"user_tz":-330},"id":"JGJspIl90XhV","outputId":"c1b677b7-1b61-40d3-cc2b-67e12fb9bc37"},"outputs":[{"output_type":"stream","name":"stdout","text":["Fri May 5 10:35:14 2023 \n","+-----------------------------------------------------------------------------+\n","| NVIDIA-SMI 525.85.12 Driver Version: 525.85.12 CUDA Version: 12.0 |\n","|-------------------------------+----------------------+----------------------+\n","| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n","| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n","| | | MIG M. |\n","|===============================+======================+======================|\n","| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n","| N/A 40C P8 9W / 70W | 0MiB / 15360MiB | 0% Default |\n","| | | N/A |\n","+-------------------------------+----------------------+----------------------+\n"," \n","+-----------------------------------------------------------------------------+\n","| Processes: |\n","| GPU GI CI PID Type Process name GPU Memory |\n","| ID ID Usage |\n","|=============================================================================|\n","| No running processes found |\n","+-----------------------------------------------------------------------------+\n"]}],"source":["!nvidia-smi"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":5954,"status":"ok","timestamp":1683282924369,"user":{"displayName":"Deshpande Ms. Gauri Harish --","userId":"01361348429335733319"},"user_tz":-330},"id":"nD-uPyQ_2jiN","outputId":"c8e09005-ce9a-464d-bfda-acffbb8bb9b6"},"outputs":[{"output_type":"stream","name":"stdout","text":["Cloning into 'yolov7'...\n","remote: Enumerating objects: 579, done.\u001b[K\n","remote: Total 579 (delta 0), reused 0 (delta 0), pack-reused 579\u001b[K\n","Receiving objects: 100% (579/579), 38.53 MiB | 32.39 MiB/s, done.\n","Resolving deltas: 100% (281/281), done.\n","/content/yolov7\n","Branch 'fix/problems_associated_with_the_latest_versions_of_pytorch_and_numpy' set up to track remote branch 'fix/problems_associated_with_the_latest_versions_of_pytorch_and_numpy' from 'origin'.\n","Switched to a new branch 'fix/problems_associated_with_the_latest_versions_of_pytorch_and_numpy'\n","Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n","Requirement already satisfied: matplotlib>=3.2.2 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 4)) (3.7.1)\n","Requirement already satisfied: numpy>=1.18.5 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 5)) (1.22.4)\n","Requirement already satisfied: opencv-python>=4.1.1 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 6)) (4.7.0.72)\n","Requirement already satisfied: Pillow>=7.1.2 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 7)) (8.4.0)\n","Requirement already satisfied: PyYAML>=5.3.1 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 8)) (6.0)\n","Requirement already satisfied: requests>=2.23.0 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 9)) (2.27.1)\n","Requirement already satisfied: scipy>=1.4.1 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 10)) (1.10.1)\n","Requirement already satisfied: torch!=1.12.0,>=1.7.0 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 11)) (2.0.0+cu118)\n","Requirement already satisfied: torchvision!=0.13.0,>=0.8.1 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 12)) (0.15.1+cu118)\n","Requirement already satisfied: tqdm>=4.41.0 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 13)) (4.65.0)\n","Requirement already satisfied: protobuf<4.21.3 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 14)) (3.20.3)\n","Requirement already satisfied: tensorboard>=2.4.1 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 17)) (2.12.2)\n","Requirement already satisfied: pandas>=1.1.4 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 21)) (1.5.3)\n","Requirement already satisfied: seaborn>=0.11.0 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 22)) (0.12.2)\n","Requirement already satisfied: ipython in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 34)) (7.34.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 35)) (5.9.5)\n","Requirement already satisfied: thop in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 36)) (0.1.1.post2209072238)\n","Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->-r requirements.txt (line 4)) (1.0.7)\n","Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->-r requirements.txt (line 4)) (1.4.4)\n","Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->-r requirements.txt (line 4)) (23.1)\n","Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->-r requirements.txt (line 4)) (3.0.9)\n","Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->-r requirements.txt (line 4)) (0.11.0)\n","Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->-r requirements.txt (line 4)) (4.39.3)\n","Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.2.2->-r requirements.txt (line 4)) (2.8.2)\n","Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->-r requirements.txt (line 9)) (2.0.12)\n","Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->-r requirements.txt (line 9)) (3.4)\n","Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->-r requirements.txt (line 9)) (1.26.15)\n","Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->-r requirements.txt (line 9)) (2022.12.7)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch!=1.12.0,>=1.7.0->-r requirements.txt (line 11)) (3.12.0)\n","Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch!=1.12.0,>=1.7.0->-r requirements.txt (line 11)) (3.1.2)\n","Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch!=1.12.0,>=1.7.0->-r requirements.txt (line 11)) (1.11.1)\n","Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.10/dist-packages (from torch!=1.12.0,>=1.7.0->-r requirements.txt (line 11)) (2.0.0)\n","Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch!=1.12.0,>=1.7.0->-r requirements.txt (line 11)) (4.5.0)\n","Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch!=1.12.0,>=1.7.0->-r requirements.txt (line 11)) (3.1)\n","Requirement already satisfied: cmake in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch!=1.12.0,>=1.7.0->-r requirements.txt (line 11)) (3.25.2)\n","Requirement already satisfied: lit in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch!=1.12.0,>=1.7.0->-r requirements.txt (line 11)) (16.0.2)\n","Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard>=2.4.1->-r requirements.txt (line 17)) (67.7.2)\n","Requirement already satisfied: google-auth-oauthlib<1.1,>=0.5 in /usr/local/lib/python3.10/dist-packages (from tensorboard>=2.4.1->-r requirements.txt (line 17)) (1.0.0)\n","Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.10/dist-packages (from tensorboard>=2.4.1->-r requirements.txt (line 17)) (1.4.0)\n","Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard>=2.4.1->-r requirements.txt (line 17)) (1.8.1)\n","Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.10/dist-packages (from tensorboard>=2.4.1->-r requirements.txt (line 17)) (0.40.0)\n","Requirement already satisfied: grpcio>=1.48.2 in /usr/local/lib/python3.10/dist-packages (from tensorboard>=2.4.1->-r requirements.txt (line 17)) (1.54.0)\n","Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard>=2.4.1->-r requirements.txt (line 17)) (0.7.0)\n","Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tensorboard>=2.4.1->-r requirements.txt (line 17)) (2.3.0)\n","Requirement already satisfied: google-auth<3,>=1.6.3 in /usr/local/lib/python3.10/dist-packages (from tensorboard>=2.4.1->-r requirements.txt (line 17)) (2.17.3)\n","Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tensorboard>=2.4.1->-r requirements.txt (line 17)) (3.4.3)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas>=1.1.4->-r requirements.txt (line 21)) (2022.7.1)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.10/dist-packages (from ipython->-r requirements.txt (line 34)) (4.8.0)\n","Requirement already satisfied: jedi>=0.16 in /usr/local/lib/python3.10/dist-packages (from ipython->-r requirements.txt (line 34)) (0.18.2)\n","Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.10/dist-packages (from ipython->-r requirements.txt (line 34)) (5.7.1)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from ipython->-r requirements.txt (line 34)) (3.0.38)\n","Requirement already satisfied: matplotlib-inline in /usr/local/lib/python3.10/dist-packages (from ipython->-r requirements.txt (line 34)) (0.1.6)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.10/dist-packages (from ipython->-r requirements.txt (line 34)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.10/dist-packages (from ipython->-r requirements.txt (line 34)) (0.7.5)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.10/dist-packages (from ipython->-r requirements.txt (line 34)) (2.14.0)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.10/dist-packages (from ipython->-r requirements.txt (line 34)) (0.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard>=2.4.1->-r requirements.txt (line 17)) (5.3.0)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard>=2.4.1->-r requirements.txt (line 17)) (0.3.0)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard>=2.4.1->-r requirements.txt (line 17)) (4.9)\n","Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard>=2.4.1->-r requirements.txt (line 17)) (1.16.0)\n","Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard>=2.4.1->-r requirements.txt (line 17)) (1.3.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.0 in /usr/local/lib/python3.10/dist-packages (from jedi>=0.16->ipython->-r requirements.txt (line 34)) (0.8.3)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.10/dist-packages (from pexpect>4.3->ipython->-r requirements.txt (line 34)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.10/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r requirements.txt (line 34)) (0.2.6)\n","Requirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug>=1.0.1->tensorboard>=2.4.1->-r requirements.txt (line 17)) (2.1.2)\n","Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch!=1.12.0,>=1.7.0->-r requirements.txt (line 11)) (1.3.0)\n","Requirement already satisfied: pyasn1<0.6.0,>=0.4.6 in /usr/local/lib/python3.10/dist-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard>=2.4.1->-r requirements.txt (line 17)) (0.5.0)\n","Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.10/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard>=2.4.1->-r requirements.txt (line 17)) (3.2.2)\n"]}],"source":["!git clone https://github.com/SkalskiP/yolov7.git\n","%cd yolov7\n","!git checkout fix/problems_associated_with_the_latest_versions_of_pytorch_and_numpy\n","!pip install -r requirements.txt"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"executionInfo":{"elapsed":56170,"status":"ok","timestamp":1682254655284,"user":{"displayName":"Aditya","userId":"02056965916683687180"},"user_tz":-330},"id":"ovKgrVN8ygdW","outputId":"ee244866-5ff7-4038-e070-90f3649cf1b0"},"outputs":[{"name":"stdout","output_type":"stream","text":["Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n","Collecting roboflow\n"," Downloading roboflow-1.0.5-py3-none-any.whl (56 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m56.2/56.2 kB\u001b[0m \u001b[31m2.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: opencv-python>=4.1.2 in /usr/local/lib/python3.9/dist-packages (from roboflow) (4.7.0.72)\n","Collecting cycler==0.10.0\n"," Downloading cycler-0.10.0-py2.py3-none-any.whl (6.5 kB)\n","Collecting pyparsing==2.4.7\n"," Downloading pyparsing-2.4.7-py2.py3-none-any.whl (67 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.8/67.8 kB\u001b[0m \u001b[31m5.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: chardet==4.0.0 in /usr/local/lib/python3.9/dist-packages (from roboflow) (4.0.0)\n","Collecting idna==2.10\n"," Downloading idna-2.10-py2.py3-none-any.whl (58 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.8/58.8 kB\u001b[0m \u001b[31m7.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting requests-toolbelt\n"," Downloading requests_toolbelt-0.10.1-py2.py3-none-any.whl (54 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m54.5/54.5 kB\u001b[0m \u001b[31m6.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: certifi==2022.12.7 in /usr/local/lib/python3.9/dist-packages (from roboflow) (2022.12.7)\n","Collecting wget\n"," Downloading wget-3.2.zip (10 kB)\n"," Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n","Requirement already satisfied: PyYAML>=5.3.1 in /usr/local/lib/python3.9/dist-packages (from roboflow) (6.0)\n","Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from roboflow) (2.27.1)\n","Requirement already satisfied: matplotlib in /usr/local/lib/python3.9/dist-packages (from roboflow) (3.7.1)\n","Requirement already satisfied: urllib3>=1.26.6 in /usr/local/lib/python3.9/dist-packages (from roboflow) (1.26.15)\n","Requirement already satisfied: numpy>=1.18.5 in /usr/local/lib/python3.9/dist-packages (from roboflow) (1.22.4)\n","Requirement already satisfied: python-dateutil in /usr/local/lib/python3.9/dist-packages (from roboflow) (2.8.2)\n","Requirement already satisfied: tqdm>=4.41.0 in /usr/local/lib/python3.9/dist-packages (from roboflow) (4.65.0)\n","Requirement already satisfied: Pillow>=7.1.2 in /usr/local/lib/python3.9/dist-packages (from roboflow) (8.4.0)\n","Requirement already satisfied: six in /usr/local/lib/python3.9/dist-packages (from roboflow) (1.16.0)\n","Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.9/dist-packages (from roboflow) (1.4.4)\n","Collecting python-dotenv\n"," Downloading python_dotenv-1.0.0-py3-none-any.whl (19 kB)\n","Requirement already satisfied: importlib-resources>=3.2.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->roboflow) (5.12.0)\n","Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.9/dist-packages (from matplotlib->roboflow) (1.0.7)\n","Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->roboflow) (4.39.3)\n","Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from matplotlib->roboflow) (23.1)\n","Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->roboflow) (2.0.12)\n","Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.9/dist-packages (from importlib-resources>=3.2.0->matplotlib->roboflow) (3.15.0)\n","Building wheels for collected packages: wget\n"," Building wheel for wget (setup.py) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for wget: filename=wget-3.2-py3-none-any.whl size=9676 sha256=fb7c73dc6b1d2ea0c0d18e8468b05b690d18cc58ab223d50609f6ab21f90c98d\n"," Stored in directory: /root/.cache/pip/wheels/04/5f/3e/46cc37c5d698415694d83f607f833f83f0149e49b3af9d0f38\n","Successfully built wget\n","Installing collected packages: wget, python-dotenv, pyparsing, idna, cycler, requests-toolbelt, roboflow\n"," Attempting uninstall: pyparsing\n"," Found existing installation: pyparsing 3.0.9\n"," Uninstalling pyparsing-3.0.9:\n"," Successfully uninstalled pyparsing-3.0.9\n"," Attempting uninstall: idna\n"," Found existing installation: idna 3.4\n"," Uninstalling idna-3.4:\n"," Successfully uninstalled idna-3.4\n"," Attempting uninstall: cycler\n"," Found existing installation: cycler 0.11.0\n"," Uninstalling cycler-0.11.0:\n"," Successfully uninstalled cycler-0.11.0\n","Successfully installed cycler-0.10.0 idna-2.10 pyparsing-2.4.7 python-dotenv-1.0.0 requests-toolbelt-0.10.1 roboflow-1.0.5 wget-3.2\n"]},{"data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["cycler","pyparsing"]}}},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["loading Roboflow workspace...\n","loading Roboflow project...\n","Downloading Dataset Version Zip in fish-pYTORCH-11 to yolov7pytorch: 100% [181781981 / 181781981] bytes\n"]},{"name":"stderr","output_type":"stream","text":["Extracting Dataset Version Zip to fish-pYTORCH-11 in yolov7pytorch:: 100%|██████████| 5962/5962 [00:02<00:00, 2706.68it/s]\n"]}],"source":["# downloading dataset from roboflow\n","\n","!pip install roboflow\n","\n","from roboflow import Roboflow\n","rf = Roboflow(api_key=\"13lan6RXdL1vpsbFUM8L\")\n","project = rf.workspace(\"daniel-5cnur\").project(\"fish-pytorch\")\n","dataset = project.version(11).download(\"yolov7\")"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":1038,"status":"ok","timestamp":1683282975269,"user":{"displayName":"Deshpande Ms. Gauri Harish --","userId":"01361348429335733319"},"user_tz":-330},"id":"bUbmy674bhpD","outputId":"9e1557c3-21e4-43a1-e27c-3ed095d9f064"},"outputs":[{"output_type":"stream","name":"stdout","text":["/content/yolov7\n","--2023-05-05 10:36:15-- https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7_training.pt\n","Resolving github.com (github.com)... 140.82.112.4\n","Connecting to github.com (github.com)|140.82.112.4|:443... connected.\n","HTTP request sent, awaiting response... 302 Found\n","Location: https://objects.githubusercontent.com/github-production-release-asset-2e65be/511187726/13e046d1-f7f0-43ab-910b-480613181b1f?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20230505%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20230505T103615Z&X-Amz-Expires=300&X-Amz-Signature=12ac614d1959b736702dd3e887956ee0236c76df66d05ee703683debb26358e2&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=511187726&response-content-disposition=attachment%3B%20filename%3Dyolov7_training.pt&response-content-type=application%2Foctet-stream [following]\n","--2023-05-05 10:36:15-- https://objects.githubusercontent.com/github-production-release-asset-2e65be/511187726/13e046d1-f7f0-43ab-910b-480613181b1f?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20230505%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20230505T103615Z&X-Amz-Expires=300&X-Amz-Signature=12ac614d1959b736702dd3e887956ee0236c76df66d05ee703683debb26358e2&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=511187726&response-content-disposition=attachment%3B%20filename%3Dyolov7_training.pt&response-content-type=application%2Foctet-stream\n","Resolving objects.githubusercontent.com (objects.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\n","Connecting to objects.githubusercontent.com (objects.githubusercontent.com)|185.199.108.133|:443... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 75628875 (72M) [application/octet-stream]\n","Saving to: ‘yolov7_training.pt’\n","\n","yolov7_training.pt 100%[===================>] 72.12M 217MB/s in 0.3s \n","\n","2023-05-05 10:36:16 (217 MB/s) - ‘yolov7_training.pt’ saved [75628875/75628875]\n","\n"]}],"source":["%cd /content/yolov7\n","!wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7_training.pt"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true,"base_uri":"https://localhost:8080/"},"id":"1iqOPKjr22mL","outputId":"09aa80d1-6f27-4f7e-91ae-c66c41bf7a87"},"outputs":[{"name":"stdout","output_type":"stream","text":["/content/yolov7\n","2023-04-23 12:57:39.851981: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n","To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n","2023-04-23 12:57:40.936247: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n","YOLOR 🚀 b2a7de9 torch 2.0.0+cu118 CUDA:0 (Tesla T4, 15101.8125MB)\n","\n","Namespace(weights='yolov7_training.pt', cfg='', data='/content/yolov7/fish-pYTORCH-11/data.yaml', hyp='data/hyp.scratch.p5.yaml', epochs=75, batch_size=16, img_size=[640, 640], rect=False, resume=False, nosave=False, notest=False, noautoanchor=False, evolve=False, bucket='', cache_images=False, image_weights=False, device='', multi_scale=False, single_cls=False, adam=False, sync_bn=False, local_rank=-1, workers=8, project='runs/train', entity=None, name='exp', exist_ok=False, quad=False, linear_lr=False, label_smoothing=0.0, upload_dataset=False, bbox_interval=-1, save_period=-1, artifact_alias='latest', freeze=[0], v5_metric=False, world_size=1, global_rank=-1, save_dir='runs/train/exp', total_batch_size=16)\n","\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n","\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.1, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.3, cls_pw=1.0, obj=0.7, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.2, scale=0.9, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.15, copy_paste=0.0, paste_in=0.15, loss_ota=1\n","\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOR logging with 'pip install wandb' (recommended)\n","Overriding model.yaml nc=80 with nc=31\n","\n"," from n params module arguments \n"," 0 -1 1 928 models.common.Conv [3, 32, 3, 1] \n"," 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n"," 2 -1 1 36992 models.common.Conv [64, 64, 3, 1] \n"," 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n"," 4 -1 1 8320 models.common.Conv [128, 64, 1, 1] \n"," 5 -2 1 8320 models.common.Conv [128, 64, 1, 1] \n"," 6 -1 1 36992 models.common.Conv [64, 64, 3, 1] \n"," 7 -1 1 36992 models.common.Conv [64, 64, 3, 1] \n"," 8 -1 1 36992 models.common.Conv [64, 64, 3, 1] \n"," 9 -1 1 36992 models.common.Conv [64, 64, 3, 1] \n"," 10 [-1, -3, -5, -6] 1 0 models.common.Concat [1] \n"," 11 -1 1 66048 models.common.Conv [256, 256, 1, 1] \n"," 12 -1 1 0 models.common.MP [] \n"," 13 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n"," 14 -3 1 33024 models.common.Conv [256, 128, 1, 1] \n"," 15 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n"," 16 [-1, -3] 1 0 models.common.Concat [1] \n"," 17 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n"," 18 -2 1 33024 models.common.Conv [256, 128, 1, 1] \n"," 19 -1 1 147712 models.common.Conv [128, 128, 3, 1] \n"," 20 -1 1 147712 models.common.Conv [128, 128, 3, 1] \n"," 21 -1 1 147712 models.common.Conv [128, 128, 3, 1] \n"," 22 -1 1 147712 models.common.Conv [128, 128, 3, 1] \n"," 23 [-1, -3, -5, -6] 1 0 models.common.Concat [1] \n"," 24 -1 1 263168 models.common.Conv [512, 512, 1, 1] \n"," 25 -1 1 0 models.common.MP [] \n"," 26 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n"," 27 -3 1 131584 models.common.Conv [512, 256, 1, 1] \n"," 28 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n"," 29 [-1, -3] 1 0 models.common.Concat [1] \n"," 30 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n"," 31 -2 1 131584 models.common.Conv [512, 256, 1, 1] \n"," 32 -1 1 590336 models.common.Conv [256, 256, 3, 1] \n"," 33 -1 1 590336 models.common.Conv [256, 256, 3, 1] \n"," 34 -1 1 590336 models.common.Conv [256, 256, 3, 1] \n"," 35 -1 1 590336 models.common.Conv [256, 256, 3, 1] \n"," 36 [-1, -3, -5, -6] 1 0 models.common.Concat [1] \n"," 37 -1 1 1050624 models.common.Conv [1024, 1024, 1, 1] \n"," 38 -1 1 0 models.common.MP [] \n"," 39 -1 1 525312 models.common.Conv [1024, 512, 1, 1] \n"," 40 -3 1 525312 models.common.Conv [1024, 512, 1, 1] \n"," 41 -1 1 2360320 models.common.Conv [512, 512, 3, 2] \n"," 42 [-1, -3] 1 0 models.common.Concat [1] \n"," 43 -1 1 262656 models.common.Conv [1024, 256, 1, 1] \n"," 44 -2 1 262656 models.common.Conv [1024, 256, 1, 1] \n"," 45 -1 1 590336 models.common.Conv [256, 256, 3, 1] \n"," 46 -1 1 590336 models.common.Conv [256, 256, 3, 1] \n"," 47 -1 1 590336 models.common.Conv [256, 256, 3, 1] \n"," 48 -1 1 590336 models.common.Conv [256, 256, 3, 1] \n"," 49 [-1, -3, -5, -6] 1 0 models.common.Concat [1] \n"," 50 -1 1 1050624 models.common.Conv [1024, 1024, 1, 1] \n"," 51 -1 1 7609344 models.common.SPPCSPC [1024, 512, 1] \n"," 52 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n"," 53 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n"," 54 37 1 262656 models.common.Conv [1024, 256, 1, 1] \n"," 55 [-1, -2] 1 0 models.common.Concat [1] \n"," 56 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n"," 57 -2 1 131584 models.common.Conv [512, 256, 1, 1] \n"," 58 -1 1 295168 models.common.Conv [256, 128, 3, 1] \n"," 59 -1 1 147712 models.common.Conv [128, 128, 3, 1] \n"," 60 -1 1 147712 models.common.Conv [128, 128, 3, 1] \n"," 61 -1 1 147712 models.common.Conv [128, 128, 3, 1] \n"," 62[-1, -2, -3, -4, -5, -6] 1 0 models.common.Concat [1] \n"," 63 -1 1 262656 models.common.Conv [1024, 256, 1, 1] \n"," 64 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n"," 65 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n"," 66 24 1 65792 models.common.Conv [512, 128, 1, 1] \n"," 67 [-1, -2] 1 0 models.common.Concat [1] \n"," 68 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n"," 69 -2 1 33024 models.common.Conv [256, 128, 1, 1] \n"," 70 -1 1 73856 models.common.Conv [128, 64, 3, 1] \n"," 71 -1 1 36992 models.common.Conv [64, 64, 3, 1] \n"," 72 -1 1 36992 models.common.Conv [64, 64, 3, 1] \n"," 73 -1 1 36992 models.common.Conv [64, 64, 3, 1] \n"," 74[-1, -2, -3, -4, -5, -6] 1 0 models.common.Concat [1] \n"," 75 -1 1 65792 models.common.Conv [512, 128, 1, 1] \n"," 76 -1 1 0 models.common.MP [] \n"," 77 -1 1 16640 models.common.Conv [128, 128, 1, 1] \n"," 78 -3 1 16640 models.common.Conv [128, 128, 1, 1] \n"," 79 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n"," 80 [-1, -3, 63] 1 0 models.common.Concat [1] \n"," 81 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n"," 82 -2 1 131584 models.common.Conv [512, 256, 1, 1] \n"," 83 -1 1 295168 models.common.Conv [256, 128, 3, 1] \n"," 84 -1 1 147712 models.common.Conv [128, 128, 3, 1] \n"," 85 -1 1 147712 models.common.Conv [128, 128, 3, 1] \n"," 86 -1 1 147712 models.common.Conv [128, 128, 3, 1] \n"," 87[-1, -2, -3, -4, -5, -6] 1 0 models.common.Concat [1] \n"," 88 -1 1 262656 models.common.Conv [1024, 256, 1, 1] \n"," 89 -1 1 0 models.common.MP [] \n"," 90 -1 1 66048 models.common.Conv [256, 256, 1, 1] \n"," 91 -3 1 66048 models.common.Conv [256, 256, 1, 1] \n"," 92 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n"," 93 [-1, -3, 51] 1 0 models.common.Concat [1] \n"," 94 -1 1 525312 models.common.Conv [1024, 512, 1, 1] \n"," 95 -2 1 525312 models.common.Conv [1024, 512, 1, 1] \n"," 96 -1 1 1180160 models.common.Conv [512, 256, 3, 1] \n"," 97 -1 1 590336 models.common.Conv [256, 256, 3, 1] \n"," 98 -1 1 590336 models.common.Conv [256, 256, 3, 1] \n"," 99 -1 1 590336 models.common.Conv [256, 256, 3, 1] \n","100[-1, -2, -3, -4, -5, -6] 1 0 models.common.Concat [1] \n","101 -1 1 1049600 models.common.Conv [2048, 512, 1, 1] \n","102 75 1 328704 models.common.RepConv [128, 256, 3, 1] \n","103 88 1 1312768 models.common.RepConv [256, 512, 3, 1] \n","104 101 1 5246976 models.common.RepConv [512, 1024, 3, 1] \n","105 [102, 103, 104] 1 195976 models.yolo.IDetect [31, [[12, 16, 19, 36, 40, 28], [36, 75, 76, 55, 72, 146], [142, 110, 192, 243, 459, 401]], [256, 512, 1024]]\n","/usr/local/lib/python3.9/dist-packages/torch/functional.py:504: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:3483.)\n"," return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\n","Model Summary: 415 layers, 37358376 parameters, 37358376 gradients, 105.6 GFLOPS\n","\n","Transferred 557/566 items from yolov7_training.pt\n","Scaled weight_decay = 0.0005\n","Optimizer groups: 95 .bias, 95 conv.weight, 98 other\n","\u001b[34m\u001b[1mtrain: \u001b[0mScanning 'fish-pYTORCH-11/train/labels' images and labels... 2003 found, 0 missing, 4 empty, 0 corrupted: 100% 2003/2003 [00:00<00:00, 3551.19it/s]\n","\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: fish-pYTORCH-11/train/labels.cache\n","\u001b[34m\u001b[1mval: \u001b[0mScanning 'fish-pYTORCH-11/valid/labels' images and labels... 760 found, 0 missing, 3 empty, 0 corrupted: 100% 760/760 [00:00<00:00, 1698.14it/s]\n","\u001b[34m\u001b[1mval: \u001b[0mNew cache created: fish-pYTORCH-11/valid/labels.cache\n","\n","\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 3.87, Best Possible Recall (BPR) = 0.9967\n","Image sizes 640 train, 640 test\n","Using 2 dataloader workers\n","Logging results to runs/train/exp\n","Starting training for 75 epochs...\n","\n"," Epoch gpu_mem box obj cls total labels img_size\n"," 0/74 1.33G 0.06403 0.01724 0.04493 0.1262 12 640: 100% 126/126 [03:16<00:00, 1.56s/it]\n"," Class Images Labels P R [email protected] [email protected]:.95: 62% 15/24 [00:31<00:18, 2.10s/it]\n","Traceback (most recent call last):\n"," File \"/content/yolov7/train.py\", line 616, in <module>\n"," train(hyp, opt, device, tb_writer)\n"," File \"/content/yolov7/train.py\", line 415, in train\n"," results, maps, times = test.test(data_dict,\n"," File \"/content/yolov7/test.py\", line 115, in test\n"," t0 += time_synchronized() - t\n"," File \"/content/yolov7/utils/torch_utils.py\", line 92, in time_synchronized\n"," torch.cuda.synchronize()\n"," File \"/usr/local/lib/python3.9/dist-packages/torch/cuda/__init__.py\", line 688, in synchronize\n"," return torch._C._cuda_synchronize()\n","KeyboardInterrupt\n","^C\n"]}],"source":["# training\n","%cd /content/yolov7\n","!python train.py --batch 16 --epochs 75 --data {dataset.location}/data.yaml --weights 'yolov7_training.pt' #--device 1"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"N4cfnLtTCIce"},"outputs":[],"source":["# Run evaluation\n","!python detect.py --weights runs/train/exp/weights/best.pt --conf 0.1 --source {dataset.location}/test/images"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"6AGhNOSSHY4_"},"outputs":[],"source":["#display inference on ALL test images\n","\n","import glob\n","from IPython.display import Image, display\n","\n","i = 0\n","limit = 10000 # max images to print\n","for imageName in glob.glob('/content/yolov7/runs/detect/exp/*.jpg'): #assuming JPG\n"," if i < limit:\n"," display(Image(filename=imageName))\n"," print(\"\\n\")\n"," i = i + 1\n"," "]},{"cell_type":"code","execution_count":null,"metadata":{"id":"CMOfi7eLJCT3"},"outputs":[],"source":["# Run evaluation\n","%cd /content/drive/MyDrive/Final Year Project/yolov7\n","!python detect.py --weights /content/drive/MyDrive/Final Year Project/yolov7/runs/train/exp/weights/best.pt --conf 0.1 --source /content/download.jpg"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"yVL_NcQP0rj2"},"outputs":[],"source":["import glob\n","from IPython.display import Image, display\n","\n","imageName=glob.glob('/content/yolov7/runs/detect/exp2/download.jpg')\n","display(imageName)"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"nIcxUmAh1QL0"},"outputs":[],"source":["import cv2\n","from google.colab.patches import cv2_imshow\n"," \n","# path\n","path = r'/content/yolov7/runs/detect/exp2/download.jpg'\n"," \n","# Reading an image in default mode\n","image = cv2.imread(path)\n"," \n","# Window name in which image is displayed\n","window_name = 'image'\n"," \n","# Using cv2.imshow() method\n","# Displaying the image\n","cv2_imshow(image)\n"," \n","# # waits for user to press any key\n","# # (this is necessary to avoid Python kernel form crashing)\n","# cv2.waitKey(0)\n"," \n","# # closing all open windows\n","# cv2.destroyAllWindows()"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"GZKSh7cC2vDM"},"outputs":[],"source":["!zip -r /content/file.zip /content/yolov7"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"Bg3cuMlk24AX","colab":{"base_uri":"https://localhost:8080/","height":304},"executionInfo":{"status":"error","timestamp":1682255431007,"user_tz":-330,"elapsed":395,"user":{"displayName":"Aditya","userId":"02056965916683687180"}},"outputId":"e30ff86c-4169-417e-ee35-0e48f41d72c8"},"outputs":[{"output_type":"error","ename":"FileNotFoundError","evalue":"ignored","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)","\u001b[0;32m<ipython-input-3-94b2842221ca>\u001b[0m in \u001b[0;36m<cell line: 2>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mgoogle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolab\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mfiles\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mfiles\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdownload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"/content/file.zip\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m","\u001b[0;32m/usr/local/lib/python3.9/dist-packages/google/colab/files.py\u001b[0m in \u001b[0;36mdownload\u001b[0;34m(filename)\u001b[0m\n\u001b[1;32m 220\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0m_os\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexists\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 221\u001b[0m \u001b[0mmsg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'Cannot find file: {}'\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 222\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mFileNotFoundError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# pylint: disable=undefined-variable\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 223\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 224\u001b[0m \u001b[0mcomm_manager\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_IPython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_ipython\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkernel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcomm_manager\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mFileNotFoundError\u001b[0m: Cannot find file: /content/file.zip"]}],"source":["from google.colab import files\n","files.download(\"/content/file.zip\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"wWOok8abrCsL"},"outputs":[],"source":["#zip to download weights and results locally\n","\n","!zip -r export.zip runs/detect\n","!zip -r export.zip runs/train/exp/weights/best.pt\n","!zip export.zip runs/train/exp/*"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"SN2eCDXJ5xdm"},"outputs":[],"source":["files.download(\"export.zip\")"]}],"metadata":{"accelerator":"GPU","colab":{"provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"}},"nbformat":4,"nbformat_minor":0}
|
LICENSE.md
ADDED
@@ -0,0 +1,674 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GNU GENERAL PUBLIC LICENSE
|
2 |
+
Version 3, 29 June 2007
|
3 |
+
|
4 |
+
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
5 |
+
Everyone is permitted to copy and distribute verbatim copies
|
6 |
+
of this license document, but changing it is not allowed.
|
7 |
+
|
8 |
+
Preamble
|
9 |
+
|
10 |
+
The GNU General Public License is a free, copyleft license for
|
11 |
+
software and other kinds of works.
|
12 |
+
|
13 |
+
The licenses for most software and other practical works are designed
|
14 |
+
to take away your freedom to share and change the works. By contrast,
|
15 |
+
the GNU General Public License is intended to guarantee your freedom to
|
16 |
+
share and change all versions of a program--to make sure it remains free
|
17 |
+
software for all its users. We, the Free Software Foundation, use the
|
18 |
+
GNU General Public License for most of our software; it applies also to
|
19 |
+
any other work released this way by its authors. You can apply it to
|
20 |
+
your programs, too.
|
21 |
+
|
22 |
+
When we speak of free software, we are referring to freedom, not
|
23 |
+
price. Our General Public Licenses are designed to make sure that you
|
24 |
+
have the freedom to distribute copies of free software (and charge for
|
25 |
+
them if you wish), that you receive source code or can get it if you
|
26 |
+
want it, that you can change the software or use pieces of it in new
|
27 |
+
free programs, and that you know you can do these things.
|
28 |
+
|
29 |
+
To protect your rights, we need to prevent others from denying you
|
30 |
+
these rights or asking you to surrender the rights. Therefore, you have
|
31 |
+
certain responsibilities if you distribute copies of the software, or if
|
32 |
+
you modify it: responsibilities to respect the freedom of others.
|
33 |
+
|
34 |
+
For example, if you distribute copies of such a program, whether
|
35 |
+
gratis or for a fee, you must pass on to the recipients the same
|
36 |
+
freedoms that you received. You must make sure that they, too, receive
|
37 |
+
or can get the source code. And you must show them these terms so they
|
38 |
+
know their rights.
|
39 |
+
|
40 |
+
Developers that use the GNU GPL protect your rights with two steps:
|
41 |
+
(1) assert copyright on the software, and (2) offer you this License
|
42 |
+
giving you legal permission to copy, distribute and/or modify it.
|
43 |
+
|
44 |
+
For the developers' and authors' protection, the GPL clearly explains
|
45 |
+
that there is no warranty for this free software. For both users' and
|
46 |
+
authors' sake, the GPL requires that modified versions be marked as
|
47 |
+
changed, so that their problems will not be attributed erroneously to
|
48 |
+
authors of previous versions.
|
49 |
+
|
50 |
+
Some devices are designed to deny users access to install or run
|
51 |
+
modified versions of the software inside them, although the manufacturer
|
52 |
+
can do so. This is fundamentally incompatible with the aim of
|
53 |
+
protecting users' freedom to change the software. The systematic
|
54 |
+
pattern of such abuse occurs in the area of products for individuals to
|
55 |
+
use, which is precisely where it is most unacceptable. Therefore, we
|
56 |
+
have designed this version of the GPL to prohibit the practice for those
|
57 |
+
products. If such problems arise substantially in other domains, we
|
58 |
+
stand ready to extend this provision to those domains in future versions
|
59 |
+
of the GPL, as needed to protect the freedom of users.
|
60 |
+
|
61 |
+
Finally, every program is threatened constantly by software patents.
|
62 |
+
States should not allow patents to restrict development and use of
|
63 |
+
software on general-purpose computers, but in those that do, we wish to
|
64 |
+
avoid the special danger that patents applied to a free program could
|
65 |
+
make it effectively proprietary. To prevent this, the GPL assures that
|
66 |
+
patents cannot be used to render the program non-free.
|
67 |
+
|
68 |
+
The precise terms and conditions for copying, distribution and
|
69 |
+
modification follow.
|
70 |
+
|
71 |
+
TERMS AND CONDITIONS
|
72 |
+
|
73 |
+
0. Definitions.
|
74 |
+
|
75 |
+
"This License" refers to version 3 of the GNU General Public License.
|
76 |
+
|
77 |
+
"Copyright" also means copyright-like laws that apply to other kinds of
|
78 |
+
works, such as semiconductor masks.
|
79 |
+
|
80 |
+
"The Program" refers to any copyrightable work licensed under this
|
81 |
+
License. Each licensee is addressed as "you". "Licensees" and
|
82 |
+
"recipients" may be individuals or organizations.
|
83 |
+
|
84 |
+
To "modify" a work means to copy from or adapt all or part of the work
|
85 |
+
in a fashion requiring copyright permission, other than the making of an
|
86 |
+
exact copy. The resulting work is called a "modified version" of the
|
87 |
+
earlier work or a work "based on" the earlier work.
|
88 |
+
|
89 |
+
A "covered work" means either the unmodified Program or a work based
|
90 |
+
on the Program.
|
91 |
+
|
92 |
+
To "propagate" a work means to do anything with it that, without
|
93 |
+
permission, would make you directly or secondarily liable for
|
94 |
+
infringement under applicable copyright law, except executing it on a
|
95 |
+
computer or modifying a private copy. Propagation includes copying,
|
96 |
+
distribution (with or without modification), making available to the
|
97 |
+
public, and in some countries other activities as well.
|
98 |
+
|
99 |
+
To "convey" a work means any kind of propagation that enables other
|
100 |
+
parties to make or receive copies. Mere interaction with a user through
|
101 |
+
a computer network, with no transfer of a copy, is not conveying.
|
102 |
+
|
103 |
+
An interactive user interface displays "Appropriate Legal Notices"
|
104 |
+
to the extent that it includes a convenient and prominently visible
|
105 |
+
feature that (1) displays an appropriate copyright notice, and (2)
|
106 |
+
tells the user that there is no warranty for the work (except to the
|
107 |
+
extent that warranties are provided), that licensees may convey the
|
108 |
+
work under this License, and how to view a copy of this License. If
|
109 |
+
the interface presents a list of user commands or options, such as a
|
110 |
+
menu, a prominent item in the list meets this criterion.
|
111 |
+
|
112 |
+
1. Source Code.
|
113 |
+
|
114 |
+
The "source code" for a work means the preferred form of the work
|
115 |
+
for making modifications to it. "Object code" means any non-source
|
116 |
+
form of a work.
|
117 |
+
|
118 |
+
A "Standard Interface" means an interface that either is an official
|
119 |
+
standard defined by a recognized standards body, or, in the case of
|
120 |
+
interfaces specified for a particular programming language, one that
|
121 |
+
is widely used among developers working in that language.
|
122 |
+
|
123 |
+
The "System Libraries" of an executable work include anything, other
|
124 |
+
than the work as a whole, that (a) is included in the normal form of
|
125 |
+
packaging a Major Component, but which is not part of that Major
|
126 |
+
Component, and (b) serves only to enable use of the work with that
|
127 |
+
Major Component, or to implement a Standard Interface for which an
|
128 |
+
implementation is available to the public in source code form. A
|
129 |
+
"Major Component", in this context, means a major essential component
|
130 |
+
(kernel, window system, and so on) of the specific operating system
|
131 |
+
(if any) on which the executable work runs, or a compiler used to
|
132 |
+
produce the work, or an object code interpreter used to run it.
|
133 |
+
|
134 |
+
The "Corresponding Source" for a work in object code form means all
|
135 |
+
the source code needed to generate, install, and (for an executable
|
136 |
+
work) run the object code and to modify the work, including scripts to
|
137 |
+
control those activities. However, it does not include the work's
|
138 |
+
System Libraries, or general-purpose tools or generally available free
|
139 |
+
programs which are used unmodified in performing those activities but
|
140 |
+
which are not part of the work. For example, Corresponding Source
|
141 |
+
includes interface definition files associated with source files for
|
142 |
+
the work, and the source code for shared libraries and dynamically
|
143 |
+
linked subprograms that the work is specifically designed to require,
|
144 |
+
such as by intimate data communication or control flow between those
|
145 |
+
subprograms and other parts of the work.
|
146 |
+
|
147 |
+
The Corresponding Source need not include anything that users
|
148 |
+
can regenerate automatically from other parts of the Corresponding
|
149 |
+
Source.
|
150 |
+
|
151 |
+
The Corresponding Source for a work in source code form is that
|
152 |
+
same work.
|
153 |
+
|
154 |
+
2. Basic Permissions.
|
155 |
+
|
156 |
+
All rights granted under this License are granted for the term of
|
157 |
+
copyright on the Program, and are irrevocable provided the stated
|
158 |
+
conditions are met. This License explicitly affirms your unlimited
|
159 |
+
permission to run the unmodified Program. The output from running a
|
160 |
+
covered work is covered by this License only if the output, given its
|
161 |
+
content, constitutes a covered work. This License acknowledges your
|
162 |
+
rights of fair use or other equivalent, as provided by copyright law.
|
163 |
+
|
164 |
+
You may make, run and propagate covered works that you do not
|
165 |
+
convey, without conditions so long as your license otherwise remains
|
166 |
+
in force. You may convey covered works to others for the sole purpose
|
167 |
+
of having them make modifications exclusively for you, or provide you
|
168 |
+
with facilities for running those works, provided that you comply with
|
169 |
+
the terms of this License in conveying all material for which you do
|
170 |
+
not control copyright. Those thus making or running the covered works
|
171 |
+
for you must do so exclusively on your behalf, under your direction
|
172 |
+
and control, on terms that prohibit them from making any copies of
|
173 |
+
your copyrighted material outside their relationship with you.
|
174 |
+
|
175 |
+
Conveying under any other circumstances is permitted solely under
|
176 |
+
the conditions stated below. Sublicensing is not allowed; section 10
|
177 |
+
makes it unnecessary.
|
178 |
+
|
179 |
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
180 |
+
|
181 |
+
No covered work shall be deemed part of an effective technological
|
182 |
+
measure under any applicable law fulfilling obligations under article
|
183 |
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
184 |
+
similar laws prohibiting or restricting circumvention of such
|
185 |
+
measures.
|
186 |
+
|
187 |
+
When you convey a covered work, you waive any legal power to forbid
|
188 |
+
circumvention of technological measures to the extent such circumvention
|
189 |
+
is effected by exercising rights under this License with respect to
|
190 |
+
the covered work, and you disclaim any intention to limit operation or
|
191 |
+
modification of the work as a means of enforcing, against the work's
|
192 |
+
users, your or third parties' legal rights to forbid circumvention of
|
193 |
+
technological measures.
|
194 |
+
|
195 |
+
4. Conveying Verbatim Copies.
|
196 |
+
|
197 |
+
You may convey verbatim copies of the Program's source code as you
|
198 |
+
receive it, in any medium, provided that you conspicuously and
|
199 |
+
appropriately publish on each copy an appropriate copyright notice;
|
200 |
+
keep intact all notices stating that this License and any
|
201 |
+
non-permissive terms added in accord with section 7 apply to the code;
|
202 |
+
keep intact all notices of the absence of any warranty; and give all
|
203 |
+
recipients a copy of this License along with the Program.
|
204 |
+
|
205 |
+
You may charge any price or no price for each copy that you convey,
|
206 |
+
and you may offer support or warranty protection for a fee.
|
207 |
+
|
208 |
+
5. Conveying Modified Source Versions.
|
209 |
+
|
210 |
+
You may convey a work based on the Program, or the modifications to
|
211 |
+
produce it from the Program, in the form of source code under the
|
212 |
+
terms of section 4, provided that you also meet all of these conditions:
|
213 |
+
|
214 |
+
a) The work must carry prominent notices stating that you modified
|
215 |
+
it, and giving a relevant date.
|
216 |
+
|
217 |
+
b) The work must carry prominent notices stating that it is
|
218 |
+
released under this License and any conditions added under section
|
219 |
+
7. This requirement modifies the requirement in section 4 to
|
220 |
+
"keep intact all notices".
|
221 |
+
|
222 |
+
c) You must license the entire work, as a whole, under this
|
223 |
+
License to anyone who comes into possession of a copy. This
|
224 |
+
License will therefore apply, along with any applicable section 7
|
225 |
+
additional terms, to the whole of the work, and all its parts,
|
226 |
+
regardless of how they are packaged. This License gives no
|
227 |
+
permission to license the work in any other way, but it does not
|
228 |
+
invalidate such permission if you have separately received it.
|
229 |
+
|
230 |
+
d) If the work has interactive user interfaces, each must display
|
231 |
+
Appropriate Legal Notices; however, if the Program has interactive
|
232 |
+
interfaces that do not display Appropriate Legal Notices, your
|
233 |
+
work need not make them do so.
|
234 |
+
|
235 |
+
A compilation of a covered work with other separate and independent
|
236 |
+
works, which are not by their nature extensions of the covered work,
|
237 |
+
and which are not combined with it such as to form a larger program,
|
238 |
+
in or on a volume of a storage or distribution medium, is called an
|
239 |
+
"aggregate" if the compilation and its resulting copyright are not
|
240 |
+
used to limit the access or legal rights of the compilation's users
|
241 |
+
beyond what the individual works permit. Inclusion of a covered work
|
242 |
+
in an aggregate does not cause this License to apply to the other
|
243 |
+
parts of the aggregate.
|
244 |
+
|
245 |
+
6. Conveying Non-Source Forms.
|
246 |
+
|
247 |
+
You may convey a covered work in object code form under the terms
|
248 |
+
of sections 4 and 5, provided that you also convey the
|
249 |
+
machine-readable Corresponding Source under the terms of this License,
|
250 |
+
in one of these ways:
|
251 |
+
|
252 |
+
a) Convey the object code in, or embodied in, a physical product
|
253 |
+
(including a physical distribution medium), accompanied by the
|
254 |
+
Corresponding Source fixed on a durable physical medium
|
255 |
+
customarily used for software interchange.
|
256 |
+
|
257 |
+
b) Convey the object code in, or embodied in, a physical product
|
258 |
+
(including a physical distribution medium), accompanied by a
|
259 |
+
written offer, valid for at least three years and valid for as
|
260 |
+
long as you offer spare parts or customer support for that product
|
261 |
+
model, to give anyone who possesses the object code either (1) a
|
262 |
+
copy of the Corresponding Source for all the software in the
|
263 |
+
product that is covered by this License, on a durable physical
|
264 |
+
medium customarily used for software interchange, for a price no
|
265 |
+
more than your reasonable cost of physically performing this
|
266 |
+
conveying of source, or (2) access to copy the
|
267 |
+
Corresponding Source from a network server at no charge.
|
268 |
+
|
269 |
+
c) Convey individual copies of the object code with a copy of the
|
270 |
+
written offer to provide the Corresponding Source. This
|
271 |
+
alternative is allowed only occasionally and noncommercially, and
|
272 |
+
only if you received the object code with such an offer, in accord
|
273 |
+
with subsection 6b.
|
274 |
+
|
275 |
+
d) Convey the object code by offering access from a designated
|
276 |
+
place (gratis or for a charge), and offer equivalent access to the
|
277 |
+
Corresponding Source in the same way through the same place at no
|
278 |
+
further charge. You need not require recipients to copy the
|
279 |
+
Corresponding Source along with the object code. If the place to
|
280 |
+
copy the object code is a network server, the Corresponding Source
|
281 |
+
may be on a different server (operated by you or a third party)
|
282 |
+
that supports equivalent copying facilities, provided you maintain
|
283 |
+
clear directions next to the object code saying where to find the
|
284 |
+
Corresponding Source. Regardless of what server hosts the
|
285 |
+
Corresponding Source, you remain obligated to ensure that it is
|
286 |
+
available for as long as needed to satisfy these requirements.
|
287 |
+
|
288 |
+
e) Convey the object code using peer-to-peer transmission, provided
|
289 |
+
you inform other peers where the object code and Corresponding
|
290 |
+
Source of the work are being offered to the general public at no
|
291 |
+
charge under subsection 6d.
|
292 |
+
|
293 |
+
A separable portion of the object code, whose source code is excluded
|
294 |
+
from the Corresponding Source as a System Library, need not be
|
295 |
+
included in conveying the object code work.
|
296 |
+
|
297 |
+
A "User Product" is either (1) a "consumer product", which means any
|
298 |
+
tangible personal property which is normally used for personal, family,
|
299 |
+
or household purposes, or (2) anything designed or sold for incorporation
|
300 |
+
into a dwelling. In determining whether a product is a consumer product,
|
301 |
+
doubtful cases shall be resolved in favor of coverage. For a particular
|
302 |
+
product received by a particular user, "normally used" refers to a
|
303 |
+
typical or common use of that class of product, regardless of the status
|
304 |
+
of the particular user or of the way in which the particular user
|
305 |
+
actually uses, or expects or is expected to use, the product. A product
|
306 |
+
is a consumer product regardless of whether the product has substantial
|
307 |
+
commercial, industrial or non-consumer uses, unless such uses represent
|
308 |
+
the only significant mode of use of the product.
|
309 |
+
|
310 |
+
"Installation Information" for a User Product means any methods,
|
311 |
+
procedures, authorization keys, or other information required to install
|
312 |
+
and execute modified versions of a covered work in that User Product from
|
313 |
+
a modified version of its Corresponding Source. The information must
|
314 |
+
suffice to ensure that the continued functioning of the modified object
|
315 |
+
code is in no case prevented or interfered with solely because
|
316 |
+
modification has been made.
|
317 |
+
|
318 |
+
If you convey an object code work under this section in, or with, or
|
319 |
+
specifically for use in, a User Product, and the conveying occurs as
|
320 |
+
part of a transaction in which the right of possession and use of the
|
321 |
+
User Product is transferred to the recipient in perpetuity or for a
|
322 |
+
fixed term (regardless of how the transaction is characterized), the
|
323 |
+
Corresponding Source conveyed under this section must be accompanied
|
324 |
+
by the Installation Information. But this requirement does not apply
|
325 |
+
if neither you nor any third party retains the ability to install
|
326 |
+
modified object code on the User Product (for example, the work has
|
327 |
+
been installed in ROM).
|
328 |
+
|
329 |
+
The requirement to provide Installation Information does not include a
|
330 |
+
requirement to continue to provide support service, warranty, or updates
|
331 |
+
for a work that has been modified or installed by the recipient, or for
|
332 |
+
the User Product in which it has been modified or installed. Access to a
|
333 |
+
network may be denied when the modification itself materially and
|
334 |
+
adversely affects the operation of the network or violates the rules and
|
335 |
+
protocols for communication across the network.
|
336 |
+
|
337 |
+
Corresponding Source conveyed, and Installation Information provided,
|
338 |
+
in accord with this section must be in a format that is publicly
|
339 |
+
documented (and with an implementation available to the public in
|
340 |
+
source code form), and must require no special password or key for
|
341 |
+
unpacking, reading or copying.
|
342 |
+
|
343 |
+
7. Additional Terms.
|
344 |
+
|
345 |
+
"Additional permissions" are terms that supplement the terms of this
|
346 |
+
License by making exceptions from one or more of its conditions.
|
347 |
+
Additional permissions that are applicable to the entire Program shall
|
348 |
+
be treated as though they were included in this License, to the extent
|
349 |
+
that they are valid under applicable law. If additional permissions
|
350 |
+
apply only to part of the Program, that part may be used separately
|
351 |
+
under those permissions, but the entire Program remains governed by
|
352 |
+
this License without regard to the additional permissions.
|
353 |
+
|
354 |
+
When you convey a copy of a covered work, you may at your option
|
355 |
+
remove any additional permissions from that copy, or from any part of
|
356 |
+
it. (Additional permissions may be written to require their own
|
357 |
+
removal in certain cases when you modify the work.) You may place
|
358 |
+
additional permissions on material, added by you to a covered work,
|
359 |
+
for which you have or can give appropriate copyright permission.
|
360 |
+
|
361 |
+
Notwithstanding any other provision of this License, for material you
|
362 |
+
add to a covered work, you may (if authorized by the copyright holders of
|
363 |
+
that material) supplement the terms of this License with terms:
|
364 |
+
|
365 |
+
a) Disclaiming warranty or limiting liability differently from the
|
366 |
+
terms of sections 15 and 16 of this License; or
|
367 |
+
|
368 |
+
b) Requiring preservation of specified reasonable legal notices or
|
369 |
+
author attributions in that material or in the Appropriate Legal
|
370 |
+
Notices displayed by works containing it; or
|
371 |
+
|
372 |
+
c) Prohibiting misrepresentation of the origin of that material, or
|
373 |
+
requiring that modified versions of such material be marked in
|
374 |
+
reasonable ways as different from the original version; or
|
375 |
+
|
376 |
+
d) Limiting the use for publicity purposes of names of licensors or
|
377 |
+
authors of the material; or
|
378 |
+
|
379 |
+
e) Declining to grant rights under trademark law for use of some
|
380 |
+
trade names, trademarks, or service marks; or
|
381 |
+
|
382 |
+
f) Requiring indemnification of licensors and authors of that
|
383 |
+
material by anyone who conveys the material (or modified versions of
|
384 |
+
it) with contractual assumptions of liability to the recipient, for
|
385 |
+
any liability that these contractual assumptions directly impose on
|
386 |
+
those licensors and authors.
|
387 |
+
|
388 |
+
All other non-permissive additional terms are considered "further
|
389 |
+
restrictions" within the meaning of section 10. If the Program as you
|
390 |
+
received it, or any part of it, contains a notice stating that it is
|
391 |
+
governed by this License along with a term that is a further
|
392 |
+
restriction, you may remove that term. If a license document contains
|
393 |
+
a further restriction but permits relicensing or conveying under this
|
394 |
+
License, you may add to a covered work material governed by the terms
|
395 |
+
of that license document, provided that the further restriction does
|
396 |
+
not survive such relicensing or conveying.
|
397 |
+
|
398 |
+
If you add terms to a covered work in accord with this section, you
|
399 |
+
must place, in the relevant source files, a statement of the
|
400 |
+
additional terms that apply to those files, or a notice indicating
|
401 |
+
where to find the applicable terms.
|
402 |
+
|
403 |
+
Additional terms, permissive or non-permissive, may be stated in the
|
404 |
+
form of a separately written license, or stated as exceptions;
|
405 |
+
the above requirements apply either way.
|
406 |
+
|
407 |
+
8. Termination.
|
408 |
+
|
409 |
+
You may not propagate or modify a covered work except as expressly
|
410 |
+
provided under this License. Any attempt otherwise to propagate or
|
411 |
+
modify it is void, and will automatically terminate your rights under
|
412 |
+
this License (including any patent licenses granted under the third
|
413 |
+
paragraph of section 11).
|
414 |
+
|
415 |
+
However, if you cease all violation of this License, then your
|
416 |
+
license from a particular copyright holder is reinstated (a)
|
417 |
+
provisionally, unless and until the copyright holder explicitly and
|
418 |
+
finally terminates your license, and (b) permanently, if the copyright
|
419 |
+
holder fails to notify you of the violation by some reasonable means
|
420 |
+
prior to 60 days after the cessation.
|
421 |
+
|
422 |
+
Moreover, your license from a particular copyright holder is
|
423 |
+
reinstated permanently if the copyright holder notifies you of the
|
424 |
+
violation by some reasonable means, this is the first time you have
|
425 |
+
received notice of violation of this License (for any work) from that
|
426 |
+
copyright holder, and you cure the violation prior to 30 days after
|
427 |
+
your receipt of the notice.
|
428 |
+
|
429 |
+
Termination of your rights under this section does not terminate the
|
430 |
+
licenses of parties who have received copies or rights from you under
|
431 |
+
this License. If your rights have been terminated and not permanently
|
432 |
+
reinstated, you do not qualify to receive new licenses for the same
|
433 |
+
material under section 10.
|
434 |
+
|
435 |
+
9. Acceptance Not Required for Having Copies.
|
436 |
+
|
437 |
+
You are not required to accept this License in order to receive or
|
438 |
+
run a copy of the Program. Ancillary propagation of a covered work
|
439 |
+
occurring solely as a consequence of using peer-to-peer transmission
|
440 |
+
to receive a copy likewise does not require acceptance. However,
|
441 |
+
nothing other than this License grants you permission to propagate or
|
442 |
+
modify any covered work. These actions infringe copyright if you do
|
443 |
+
not accept this License. Therefore, by modifying or propagating a
|
444 |
+
covered work, you indicate your acceptance of this License to do so.
|
445 |
+
|
446 |
+
10. Automatic Licensing of Downstream Recipients.
|
447 |
+
|
448 |
+
Each time you convey a covered work, the recipient automatically
|
449 |
+
receives a license from the original licensors, to run, modify and
|
450 |
+
propagate that work, subject to this License. You are not responsible
|
451 |
+
for enforcing compliance by third parties with this License.
|
452 |
+
|
453 |
+
An "entity transaction" is a transaction transferring control of an
|
454 |
+
organization, or substantially all assets of one, or subdividing an
|
455 |
+
organization, or merging organizations. If propagation of a covered
|
456 |
+
work results from an entity transaction, each party to that
|
457 |
+
transaction who receives a copy of the work also receives whatever
|
458 |
+
licenses to the work the party's predecessor in interest had or could
|
459 |
+
give under the previous paragraph, plus a right to possession of the
|
460 |
+
Corresponding Source of the work from the predecessor in interest, if
|
461 |
+
the predecessor has it or can get it with reasonable efforts.
|
462 |
+
|
463 |
+
You may not impose any further restrictions on the exercise of the
|
464 |
+
rights granted or affirmed under this License. For example, you may
|
465 |
+
not impose a license fee, royalty, or other charge for exercise of
|
466 |
+
rights granted under this License, and you may not initiate litigation
|
467 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
468 |
+
any patent claim is infringed by making, using, selling, offering for
|
469 |
+
sale, or importing the Program or any portion of it.
|
470 |
+
|
471 |
+
11. Patents.
|
472 |
+
|
473 |
+
A "contributor" is a copyright holder who authorizes use under this
|
474 |
+
License of the Program or a work on which the Program is based. The
|
475 |
+
work thus licensed is called the contributor's "contributor version".
|
476 |
+
|
477 |
+
A contributor's "essential patent claims" are all patent claims
|
478 |
+
owned or controlled by the contributor, whether already acquired or
|
479 |
+
hereafter acquired, that would be infringed by some manner, permitted
|
480 |
+
by this License, of making, using, or selling its contributor version,
|
481 |
+
but do not include claims that would be infringed only as a
|
482 |
+
consequence of further modification of the contributor version. For
|
483 |
+
purposes of this definition, "control" includes the right to grant
|
484 |
+
patent sublicenses in a manner consistent with the requirements of
|
485 |
+
this License.
|
486 |
+
|
487 |
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
488 |
+
patent license under the contributor's essential patent claims, to
|
489 |
+
make, use, sell, offer for sale, import and otherwise run, modify and
|
490 |
+
propagate the contents of its contributor version.
|
491 |
+
|
492 |
+
In the following three paragraphs, a "patent license" is any express
|
493 |
+
agreement or commitment, however denominated, not to enforce a patent
|
494 |
+
(such as an express permission to practice a patent or covenant not to
|
495 |
+
sue for patent infringement). To "grant" such a patent license to a
|
496 |
+
party means to make such an agreement or commitment not to enforce a
|
497 |
+
patent against the party.
|
498 |
+
|
499 |
+
If you convey a covered work, knowingly relying on a patent license,
|
500 |
+
and the Corresponding Source of the work is not available for anyone
|
501 |
+
to copy, free of charge and under the terms of this License, through a
|
502 |
+
publicly available network server or other readily accessible means,
|
503 |
+
then you must either (1) cause the Corresponding Source to be so
|
504 |
+
available, or (2) arrange to deprive yourself of the benefit of the
|
505 |
+
patent license for this particular work, or (3) arrange, in a manner
|
506 |
+
consistent with the requirements of this License, to extend the patent
|
507 |
+
license to downstream recipients. "Knowingly relying" means you have
|
508 |
+
actual knowledge that, but for the patent license, your conveying the
|
509 |
+
covered work in a country, or your recipient's use of the covered work
|
510 |
+
in a country, would infringe one or more identifiable patents in that
|
511 |
+
country that you have reason to believe are valid.
|
512 |
+
|
513 |
+
If, pursuant to or in connection with a single transaction or
|
514 |
+
arrangement, you convey, or propagate by procuring conveyance of, a
|
515 |
+
covered work, and grant a patent license to some of the parties
|
516 |
+
receiving the covered work authorizing them to use, propagate, modify
|
517 |
+
or convey a specific copy of the covered work, then the patent license
|
518 |
+
you grant is automatically extended to all recipients of the covered
|
519 |
+
work and works based on it.
|
520 |
+
|
521 |
+
A patent license is "discriminatory" if it does not include within
|
522 |
+
the scope of its coverage, prohibits the exercise of, or is
|
523 |
+
conditioned on the non-exercise of one or more of the rights that are
|
524 |
+
specifically granted under this License. You may not convey a covered
|
525 |
+
work if you are a party to an arrangement with a third party that is
|
526 |
+
in the business of distributing software, under which you make payment
|
527 |
+
to the third party based on the extent of your activity of conveying
|
528 |
+
the work, and under which the third party grants, to any of the
|
529 |
+
parties who would receive the covered work from you, a discriminatory
|
530 |
+
patent license (a) in connection with copies of the covered work
|
531 |
+
conveyed by you (or copies made from those copies), or (b) primarily
|
532 |
+
for and in connection with specific products or compilations that
|
533 |
+
contain the covered work, unless you entered into that arrangement,
|
534 |
+
or that patent license was granted, prior to 28 March 2007.
|
535 |
+
|
536 |
+
Nothing in this License shall be construed as excluding or limiting
|
537 |
+
any implied license or other defenses to infringement that may
|
538 |
+
otherwise be available to you under applicable patent law.
|
539 |
+
|
540 |
+
12. No Surrender of Others' Freedom.
|
541 |
+
|
542 |
+
If conditions are imposed on you (whether by court order, agreement or
|
543 |
+
otherwise) that contradict the conditions of this License, they do not
|
544 |
+
excuse you from the conditions of this License. If you cannot convey a
|
545 |
+
covered work so as to satisfy simultaneously your obligations under this
|
546 |
+
License and any other pertinent obligations, then as a consequence you may
|
547 |
+
not convey it at all. For example, if you agree to terms that obligate you
|
548 |
+
to collect a royalty for further conveying from those to whom you convey
|
549 |
+
the Program, the only way you could satisfy both those terms and this
|
550 |
+
License would be to refrain entirely from conveying the Program.
|
551 |
+
|
552 |
+
13. Use with the GNU Affero General Public License.
|
553 |
+
|
554 |
+
Notwithstanding any other provision of this License, you have
|
555 |
+
permission to link or combine any covered work with a work licensed
|
556 |
+
under version 3 of the GNU Affero General Public License into a single
|
557 |
+
combined work, and to convey the resulting work. The terms of this
|
558 |
+
License will continue to apply to the part which is the covered work,
|
559 |
+
but the special requirements of the GNU Affero General Public License,
|
560 |
+
section 13, concerning interaction through a network will apply to the
|
561 |
+
combination as such.
|
562 |
+
|
563 |
+
14. Revised Versions of this License.
|
564 |
+
|
565 |
+
The Free Software Foundation may publish revised and/or new versions of
|
566 |
+
the GNU General Public License from time to time. Such new versions will
|
567 |
+
be similar in spirit to the present version, but may differ in detail to
|
568 |
+
address new problems or concerns.
|
569 |
+
|
570 |
+
Each version is given a distinguishing version number. If the
|
571 |
+
Program specifies that a certain numbered version of the GNU General
|
572 |
+
Public License "or any later version" applies to it, you have the
|
573 |
+
option of following the terms and conditions either of that numbered
|
574 |
+
version or of any later version published by the Free Software
|
575 |
+
Foundation. If the Program does not specify a version number of the
|
576 |
+
GNU General Public License, you may choose any version ever published
|
577 |
+
by the Free Software Foundation.
|
578 |
+
|
579 |
+
If the Program specifies that a proxy can decide which future
|
580 |
+
versions of the GNU General Public License can be used, that proxy's
|
581 |
+
public statement of acceptance of a version permanently authorizes you
|
582 |
+
to choose that version for the Program.
|
583 |
+
|
584 |
+
Later license versions may give you additional or different
|
585 |
+
permissions. However, no additional obligations are imposed on any
|
586 |
+
author or copyright holder as a result of your choosing to follow a
|
587 |
+
later version.
|
588 |
+
|
589 |
+
15. Disclaimer of Warranty.
|
590 |
+
|
591 |
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
592 |
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
593 |
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
594 |
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
595 |
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
596 |
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
597 |
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
598 |
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
599 |
+
|
600 |
+
16. Limitation of Liability.
|
601 |
+
|
602 |
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
603 |
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
604 |
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
605 |
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
606 |
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
607 |
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
608 |
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
609 |
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
610 |
+
SUCH DAMAGES.
|
611 |
+
|
612 |
+
17. Interpretation of Sections 15 and 16.
|
613 |
+
|
614 |
+
If the disclaimer of warranty and limitation of liability provided
|
615 |
+
above cannot be given local legal effect according to their terms,
|
616 |
+
reviewing courts shall apply local law that most closely approximates
|
617 |
+
an absolute waiver of all civil liability in connection with the
|
618 |
+
Program, unless a warranty or assumption of liability accompanies a
|
619 |
+
copy of the Program in return for a fee.
|
620 |
+
|
621 |
+
END OF TERMS AND CONDITIONS
|
622 |
+
|
623 |
+
How to Apply These Terms to Your New Programs
|
624 |
+
|
625 |
+
If you develop a new program, and you want it to be of the greatest
|
626 |
+
possible use to the public, the best way to achieve this is to make it
|
627 |
+
free software which everyone can redistribute and change under these terms.
|
628 |
+
|
629 |
+
To do so, attach the following notices to the program. It is safest
|
630 |
+
to attach them to the start of each source file to most effectively
|
631 |
+
state the exclusion of warranty; and each file should have at least
|
632 |
+
the "copyright" line and a pointer to where the full notice is found.
|
633 |
+
|
634 |
+
<one line to give the program's name and a brief idea of what it does.>
|
635 |
+
Copyright (C) <year> <name of author>
|
636 |
+
|
637 |
+
This program is free software: you can redistribute it and/or modify
|
638 |
+
it under the terms of the GNU General Public License as published by
|
639 |
+
the Free Software Foundation, either version 3 of the License, or
|
640 |
+
(at your option) any later version.
|
641 |
+
|
642 |
+
This program is distributed in the hope that it will be useful,
|
643 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
644 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
645 |
+
GNU General Public License for more details.
|
646 |
+
|
647 |
+
You should have received a copy of the GNU General Public License
|
648 |
+
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
649 |
+
|
650 |
+
Also add information on how to contact you by electronic and paper mail.
|
651 |
+
|
652 |
+
If the program does terminal interaction, make it output a short
|
653 |
+
notice like this when it starts in an interactive mode:
|
654 |
+
|
655 |
+
<program> Copyright (C) <year> <name of author>
|
656 |
+
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
657 |
+
This is free software, and you are welcome to redistribute it
|
658 |
+
under certain conditions; type `show c' for details.
|
659 |
+
|
660 |
+
The hypothetical commands `show w' and `show c' should show the appropriate
|
661 |
+
parts of the General Public License. Of course, your program's commands
|
662 |
+
might be different; for a GUI interface, you would use an "about box".
|
663 |
+
|
664 |
+
You should also get your employer (if you work as a programmer) or school,
|
665 |
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
666 |
+
For more information on this, and how to apply and follow the GNU GPL, see
|
667 |
+
<https://www.gnu.org/licenses/>.
|
668 |
+
|
669 |
+
The GNU General Public License does not permit incorporating your program
|
670 |
+
into proprietary programs. If your program is a subroutine library, you
|
671 |
+
may consider it more useful to permit linking proprietary applications with
|
672 |
+
the library. If this is what you want to do, use the GNU Lesser General
|
673 |
+
Public License instead of this License. But first, please read
|
674 |
+
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
README.md
CHANGED
@@ -1,3 +1,279 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Official YOLOv7
|
2 |
+
|
3 |
+
Implementation of paper - [YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors](https://arxiv.org/abs/2207.02696)
|
4 |
+
|
5 |
+
[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/yolov7-trainable-bag-of-freebies-sets-new/real-time-object-detection-on-coco)](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=yolov7-trainable-bag-of-freebies-sets-new)
|
6 |
+
[![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/akhaliq/yolov7)
|
7 |
+
<a href="https://colab.research.google.com/gist/AlexeyAB/b769f5795e65fdab80086f6cb7940dae/yolov7detection.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
|
8 |
+
[![arxiv.org](http://img.shields.io/badge/cs.CV-arXiv%3A2207.02696-B31B1B.svg)](https://arxiv.org/abs/2207.02696)
|
9 |
+
|
10 |
+
<div align="center">
|
11 |
+
<a href="./">
|
12 |
+
<img src="./figure/performance.png" width="79%"/>
|
13 |
+
</a>
|
14 |
+
</div>
|
15 |
+
|
16 |
+
## Web Demo
|
17 |
+
|
18 |
+
- Integrated into [Huggingface Spaces 🤗](https://huggingface.co/spaces/akhaliq/yolov7) using Gradio. Try out the Web Demo [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/akhaliq/yolov7)
|
19 |
+
|
20 |
+
## Performance
|
21 |
+
|
22 |
+
MS COCO
|
23 |
+
|
24 |
+
| Model | Test Size | AP<sup>test</sup> | AP<sub>50</sub><sup>test</sup> | AP<sub>75</sub><sup>test</sup> | batch 1 fps | batch 32 average time |
|
25 |
+
| :-- | :-: | :-: | :-: | :-: | :-: | :-: |
|
26 |
+
| [**YOLOv7**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt) | 640 | **51.4%** | **69.7%** | **55.9%** | 161 *fps* | 2.8 *ms* |
|
27 |
+
| [**YOLOv7-X**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7x.pt) | 640 | **53.1%** | **71.2%** | **57.8%** | 114 *fps* | 4.3 *ms* |
|
28 |
+
| | | | | | | |
|
29 |
+
| [**YOLOv7-W6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-w6.pt) | 1280 | **54.9%** | **72.6%** | **60.1%** | 84 *fps* | 7.6 *ms* |
|
30 |
+
| [**YOLOv7-E6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6.pt) | 1280 | **56.0%** | **73.5%** | **61.2%** | 56 *fps* | 12.3 *ms* |
|
31 |
+
| [**YOLOv7-D6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-d6.pt) | 1280 | **56.6%** | **74.0%** | **61.8%** | 44 *fps* | 15.0 *ms* |
|
32 |
+
| [**YOLOv7-E6E**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6e.pt) | 1280 | **56.8%** | **74.4%** | **62.1%** | 36 *fps* | 18.7 *ms* |
|
33 |
+
|
34 |
+
## Installation
|
35 |
+
|
36 |
+
Docker environment (recommended)
|
37 |
+
<details><summary> <b>Expand</b> </summary>
|
38 |
+
|
39 |
+
``` shell
|
40 |
+
# create the docker container, you can change the share memory size if you have more.
|
41 |
+
nvidia-docker run --name yolov7 -it -v your_coco_path/:/coco/ -v your_code_path/:/yolov7 --shm-size=64g nvcr.io/nvidia/pytorch:21.08-py3
|
42 |
+
|
43 |
+
# apt install required packages
|
44 |
+
apt update
|
45 |
+
apt install -y zip htop screen libgl1-mesa-glx
|
46 |
+
|
47 |
+
# pip install required packages
|
48 |
+
pip install seaborn thop
|
49 |
+
|
50 |
+
# go to code folder
|
51 |
+
cd /yolov7
|
52 |
+
```
|
53 |
+
|
54 |
+
</details>
|
55 |
+
|
56 |
+
## Testing
|
57 |
+
|
58 |
+
[`yolov7.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt) [`yolov7x.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7x.pt) [`yolov7-w6.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-w6.pt) [`yolov7-e6.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6.pt) [`yolov7-d6.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-d6.pt) [`yolov7-e6e.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6e.pt)
|
59 |
+
|
60 |
+
``` shell
|
61 |
+
python test.py --data data/coco.yaml --img 640 --batch 32 --conf 0.001 --iou 0.65 --device 0 --weights yolov7.pt --name yolov7_640_val
|
62 |
+
```
|
63 |
+
|
64 |
+
You will get the results:
|
65 |
+
|
66 |
+
```
|
67 |
+
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.51206
|
68 |
+
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.69730
|
69 |
+
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.55521
|
70 |
+
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.35247
|
71 |
+
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.55937
|
72 |
+
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.66693
|
73 |
+
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.38453
|
74 |
+
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.63765
|
75 |
+
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.68772
|
76 |
+
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.53766
|
77 |
+
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.73549
|
78 |
+
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.83868
|
79 |
+
```
|
80 |
+
|
81 |
+
To measure accuracy, download [COCO-annotations for Pycocotools](http://images.cocodataset.org/annotations/annotations_trainval2017.zip) to the `./coco/annotations/instances_val2017.json`
|
82 |
+
|
83 |
+
## Training
|
84 |
+
|
85 |
+
Data preparation
|
86 |
+
|
87 |
+
``` shell
|
88 |
+
bash scripts/get_coco.sh
|
89 |
+
```
|
90 |
+
|
91 |
+
* Download MS COCO dataset images ([train](http://images.cocodataset.org/zips/train2017.zip), [val](http://images.cocodataset.org/zips/val2017.zip), [test](http://images.cocodataset.org/zips/test2017.zip)) and [labels](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/coco2017labels-segments.zip). If you have previously used a different version of YOLO, we strongly recommend that you delete `train2017.cache` and `val2017.cache` files, and redownload [labels](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/coco2017labels-segments.zip)
|
92 |
+
|
93 |
+
Single GPU training
|
94 |
+
|
95 |
+
``` shell
|
96 |
+
# train p5 models
|
97 |
+
python train.py --workers 8 --device 0 --batch-size 32 --data data/coco.yaml --img 640 640 --cfg cfg/training/yolov7.yaml --weights '' --name yolov7 --hyp data/hyp.scratch.p5.yaml
|
98 |
+
|
99 |
+
# train p6 models
|
100 |
+
python train_aux.py --workers 8 --device 0 --batch-size 16 --data data/coco.yaml --img 1280 1280 --cfg cfg/training/yolov7-w6.yaml --weights '' --name yolov7-w6 --hyp data/hyp.scratch.p6.yaml
|
101 |
+
```
|
102 |
+
|
103 |
+
Multiple GPU training
|
104 |
+
|
105 |
+
``` shell
|
106 |
+
# train p5 models
|
107 |
+
python -m torch.distributed.launch --nproc_per_node 4 --master_port 9527 train.py --workers 8 --device 0,1,2,3 --sync-bn --batch-size 128 --data data/coco.yaml --img 640 640 --cfg cfg/training/yolov7.yaml --weights '' --name yolov7 --hyp data/hyp.scratch.p5.yaml
|
108 |
+
|
109 |
+
# train p6 models
|
110 |
+
python -m torch.distributed.launch --nproc_per_node 8 --master_port 9527 train_aux.py --workers 8 --device 0,1,2,3,4,5,6,7 --sync-bn --batch-size 128 --data data/coco.yaml --img 1280 1280 --cfg cfg/training/yolov7-w6.yaml --weights '' --name yolov7-w6 --hyp data/hyp.scratch.p6.yaml
|
111 |
+
```
|
112 |
+
|
113 |
+
## Transfer learning
|
114 |
+
|
115 |
+
[`yolov7_training.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7_training.pt) [`yolov7x_training.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7x_training.pt) [`yolov7-w6_training.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-w6_training.pt) [`yolov7-e6_training.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6_training.pt) [`yolov7-d6_training.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-d6_training.pt) [`yolov7-e6e_training.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6e_training.pt)
|
116 |
+
|
117 |
+
Single GPU finetuning for custom dataset
|
118 |
+
|
119 |
+
``` shell
|
120 |
+
# finetune p5 models
|
121 |
+
python train.py --workers 8 --device 0 --batch-size 32 --data data/custom.yaml --img 640 640 --cfg cfg/training/yolov7-custom.yaml --weights 'yolov7_training.pt' --name yolov7-custom --hyp data/hyp.scratch.custom.yaml
|
122 |
+
|
123 |
+
# finetune p6 models
|
124 |
+
python train_aux.py --workers 8 --device 0 --batch-size 16 --data data/custom.yaml --img 1280 1280 --cfg cfg/training/yolov7-w6-custom.yaml --weights 'yolov7-w6_training.pt' --name yolov7-w6-custom --hyp data/hyp.scratch.custom.yaml
|
125 |
+
```
|
126 |
+
|
127 |
+
## Re-parameterization
|
128 |
+
|
129 |
+
See [reparameterization.ipynb](tools/reparameterization.ipynb)
|
130 |
+
|
131 |
+
## Inference
|
132 |
+
|
133 |
+
On video:
|
134 |
+
``` shell
|
135 |
+
python detect.py --weights yolov7.pt --conf 0.25 --img-size 640 --source yourvideo.mp4
|
136 |
+
```
|
137 |
+
|
138 |
+
On image:
|
139 |
+
``` shell
|
140 |
+
python detect.py --weights yolov7.pt --conf 0.25 --img-size 640 --source inference/images/horses.jpg
|
141 |
+
```
|
142 |
+
|
143 |
+
<div align="center">
|
144 |
+
<a href="./">
|
145 |
+
<img src="./figure/horses_prediction.jpg" width="59%"/>
|
146 |
+
</a>
|
147 |
+
</div>
|
148 |
+
|
149 |
+
|
150 |
+
## Export
|
151 |
+
|
152 |
+
**Pytorch to CoreML (and inference on MacOS/iOS)** <a href="https://colab.research.google.com/github/WongKinYiu/yolov7/blob/main/tools/YOLOv7CoreML.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
|
153 |
+
|
154 |
+
**Pytorch to ONNX with NMS (and inference)** <a href="https://colab.research.google.com/github/WongKinYiu/yolov7/blob/main/tools/YOLOv7onnx.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
|
155 |
+
```shell
|
156 |
+
python export.py --weights yolov7-tiny.pt --grid --end2end --simplify \
|
157 |
+
--topk-all 100 --iou-thres 0.65 --conf-thres 0.35 --img-size 640 640 --max-wh 640
|
158 |
+
```
|
159 |
+
|
160 |
+
**Pytorch to TensorRT with NMS (and inference)** <a href="https://colab.research.google.com/github/WongKinYiu/yolov7/blob/main/tools/YOLOv7trt.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
|
161 |
+
|
162 |
+
```shell
|
163 |
+
wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-tiny.pt
|
164 |
+
python export.py --weights ./yolov7-tiny.pt --grid --end2end --simplify --topk-all 100 --iou-thres 0.65 --conf-thres 0.35 --img-size 640 640
|
165 |
+
git clone https://github.com/Linaom1214/tensorrt-python.git
|
166 |
+
python ./tensorrt-python/export.py -o yolov7-tiny.onnx -e yolov7-tiny-nms.trt -p fp16
|
167 |
+
```
|
168 |
+
|
169 |
+
**Pytorch to TensorRT another way** <a href="https://colab.research.google.com/gist/AlexeyAB/fcb47ae544cf284eb24d8ad8e880d45c/yolov7trtlinaom.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> <details><summary> <b>Expand</b> </summary>
|
170 |
+
|
171 |
+
|
172 |
+
```shell
|
173 |
+
wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-tiny.pt
|
174 |
+
python export.py --weights yolov7-tiny.pt --grid --include-nms
|
175 |
+
git clone https://github.com/Linaom1214/tensorrt-python.git
|
176 |
+
python ./tensorrt-python/export.py -o yolov7-tiny.onnx -e yolov7-tiny-nms.trt -p fp16
|
177 |
+
|
178 |
+
# Or use trtexec to convert ONNX to TensorRT engine
|
179 |
+
/usr/src/tensorrt/bin/trtexec --onnx=yolov7-tiny.onnx --saveEngine=yolov7-tiny-nms.trt --fp16
|
180 |
+
```
|
181 |
+
|
182 |
+
</details>
|
183 |
+
|
184 |
+
Tested with: Python 3.7.13, Pytorch 1.12.0+cu113
|
185 |
+
|
186 |
+
## Pose estimation
|
187 |
+
|
188 |
+
[`code`](https://github.com/WongKinYiu/yolov7/tree/pose) [`yolov7-w6-pose.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-w6-pose.pt)
|
189 |
+
|
190 |
+
See [keypoint.ipynb](https://github.com/WongKinYiu/yolov7/blob/main/tools/keypoint.ipynb).
|
191 |
+
|
192 |
+
<div align="center">
|
193 |
+
<a href="./">
|
194 |
+
<img src="./figure/pose.png" width="39%"/>
|
195 |
+
</a>
|
196 |
+
</div>
|
197 |
+
|
198 |
+
|
199 |
+
## Instance segmentation
|
200 |
+
|
201 |
+
[`code`](https://github.com/WongKinYiu/yolov7/tree/mask) [`yolov7-mask.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-mask.pt)
|
202 |
+
|
203 |
+
See [instance.ipynb](https://github.com/WongKinYiu/yolov7/blob/main/tools/instance.ipynb).
|
204 |
+
|
205 |
+
<div align="center">
|
206 |
+
<a href="./">
|
207 |
+
<img src="./figure/mask.png" width="59%"/>
|
208 |
+
</a>
|
209 |
+
</div>
|
210 |
+
|
211 |
+
## Instance segmentation
|
212 |
+
|
213 |
+
[`code`](https://github.com/WongKinYiu/yolov7/tree/u7/seg) [`yolov7-seg.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-seg.pt)
|
214 |
+
|
215 |
+
YOLOv7 for instance segmentation (YOLOR + YOLOv5 + YOLACT)
|
216 |
+
|
217 |
+
| Model | Test Size | AP<sup>box</sup> | AP<sub>50</sub><sup>box</sup> | AP<sub>75</sub><sup>box</sup> | AP<sup>mask</sup> | AP<sub>50</sub><sup>mask</sup> | AP<sub>75</sub><sup>mask</sup> |
|
218 |
+
| :-- | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
|
219 |
+
| **YOLOv7-seg** | 640 | **51.4%** | **69.4%** | **55.8%** | **41.5%** | **65.5%** | **43.7%** |
|
220 |
+
|
221 |
+
## Anchor free detection head
|
222 |
+
|
223 |
+
[`code`](https://github.com/WongKinYiu/yolov7/tree/u6) [`yolov7-u6.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-u6.pt)
|
224 |
+
|
225 |
+
YOLOv7 with decoupled TAL head (YOLOR + YOLOv5 + YOLOv6)
|
226 |
+
|
227 |
+
| Model | Test Size | AP<sup>val</sup> | AP<sub>50</sub><sup>val</sup> | AP<sub>75</sub><sup>val</sup> |
|
228 |
+
| :-- | :-: | :-: | :-: | :-: |
|
229 |
+
| **YOLOv7-u6** | 640 | **52.6%** | **69.7%** | **57.3%** |
|
230 |
+
|
231 |
+
|
232 |
+
## Citation
|
233 |
+
|
234 |
+
```
|
235 |
+
@article{wang2022yolov7,
|
236 |
+
title={{YOLOv7}: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors},
|
237 |
+
author={Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark},
|
238 |
+
journal={arXiv preprint arXiv:2207.02696},
|
239 |
+
year={2022}
|
240 |
+
}
|
241 |
+
```
|
242 |
+
|
243 |
+
|
244 |
+
## Teaser
|
245 |
+
|
246 |
+
Yolov7-semantic & YOLOv7-panoptic & YOLOv7-caption
|
247 |
+
|
248 |
+
<div align="center">
|
249 |
+
<a href="./">
|
250 |
+
<img src="./figure/tennis.jpg" width="24%"/>
|
251 |
+
</a>
|
252 |
+
<a href="./">
|
253 |
+
<img src="./figure/tennis_semantic.jpg" width="24%"/>
|
254 |
+
</a>
|
255 |
+
<a href="./">
|
256 |
+
<img src="./figure/tennis_panoptic.png" width="24%"/>
|
257 |
+
</a>
|
258 |
+
<a href="./">
|
259 |
+
<img src="./figure/tennis_caption.png" width="24%"/>
|
260 |
+
</a>
|
261 |
+
</div>
|
262 |
+
|
263 |
+
|
264 |
+
## Acknowledgements
|
265 |
+
|
266 |
+
<details><summary> <b>Expand</b> </summary>
|
267 |
+
|
268 |
+
* [https://github.com/AlexeyAB/darknet](https://github.com/AlexeyAB/darknet)
|
269 |
+
* [https://github.com/WongKinYiu/yolor](https://github.com/WongKinYiu/yolor)
|
270 |
+
* [https://github.com/WongKinYiu/PyTorch_YOLOv4](https://github.com/WongKinYiu/PyTorch_YOLOv4)
|
271 |
+
* [https://github.com/WongKinYiu/ScaledYOLOv4](https://github.com/WongKinYiu/ScaledYOLOv4)
|
272 |
+
* [https://github.com/Megvii-BaseDetection/YOLOX](https://github.com/Megvii-BaseDetection/YOLOX)
|
273 |
+
* [https://github.com/ultralytics/yolov3](https://github.com/ultralytics/yolov3)
|
274 |
+
* [https://github.com/ultralytics/yolov5](https://github.com/ultralytics/yolov5)
|
275 |
+
* [https://github.com/DingXiaoH/RepVGG](https://github.com/DingXiaoH/RepVGG)
|
276 |
+
* [https://github.com/JUGGHM/OREPA_CVPR2022](https://github.com/JUGGHM/OREPA_CVPR2022)
|
277 |
+
* [https://github.com/TexasInstruments/edgeai-yolov5/tree/yolo-pose](https://github.com/TexasInstruments/edgeai-yolov5/tree/yolo-pose)
|
278 |
+
|
279 |
+
</details>
|
detect.py
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import time
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import cv2
|
6 |
+
import torch
|
7 |
+
import torch.backends.cudnn as cudnn
|
8 |
+
from numpy import random
|
9 |
+
|
10 |
+
from models.experimental import attempt_load
|
11 |
+
from utils.datasets import LoadStreams, LoadImages
|
12 |
+
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
|
13 |
+
scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
|
14 |
+
from utils.plots import plot_one_box
|
15 |
+
from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
|
16 |
+
|
17 |
+
|
18 |
+
def detect(save_img=False):
|
19 |
+
source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace
|
20 |
+
save_img = not opt.nosave and not source.endswith('.txt') # save inference images
|
21 |
+
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
|
22 |
+
('rtsp://', 'rtmp://', 'http://', 'https://'))
|
23 |
+
|
24 |
+
# Directories
|
25 |
+
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
|
26 |
+
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
27 |
+
|
28 |
+
# Initialize
|
29 |
+
set_logging()
|
30 |
+
device = select_device(opt.device)
|
31 |
+
half = device.type != 'cpu' # half precision only supported on CUDA
|
32 |
+
|
33 |
+
# Load model
|
34 |
+
model = attempt_load(weights, map_location=device) # load FP32 model
|
35 |
+
stride = int(model.stride.max()) # model stride
|
36 |
+
imgsz = check_img_size(imgsz, s=stride) # check img_size
|
37 |
+
|
38 |
+
if trace:
|
39 |
+
model = TracedModel(model, device, opt.img_size)
|
40 |
+
|
41 |
+
if half:
|
42 |
+
model.half() # to FP16
|
43 |
+
|
44 |
+
# Second-stage classifier
|
45 |
+
classify = False
|
46 |
+
if classify:
|
47 |
+
modelc = load_classifier(name='resnet101', n=2) # initialize
|
48 |
+
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
|
49 |
+
|
50 |
+
# Set Dataloader
|
51 |
+
vid_path, vid_writer = None, None
|
52 |
+
if webcam:
|
53 |
+
view_img = check_imshow()
|
54 |
+
cudnn.benchmark = True # set True to speed up constant image size inference
|
55 |
+
dataset = LoadStreams(source, img_size=imgsz, stride=stride)
|
56 |
+
else:
|
57 |
+
dataset = LoadImages(source, img_size=imgsz, stride=stride)
|
58 |
+
|
59 |
+
# Get names and colors
|
60 |
+
names = model.module.names if hasattr(model, 'module') else model.names
|
61 |
+
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
|
62 |
+
|
63 |
+
# Run inference
|
64 |
+
if device.type != 'cpu':
|
65 |
+
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
|
66 |
+
old_img_w = old_img_h = imgsz
|
67 |
+
old_img_b = 1
|
68 |
+
|
69 |
+
t0 = time.time()
|
70 |
+
for path, img, im0s, vid_cap in dataset:
|
71 |
+
img = torch.from_numpy(img).to(device)
|
72 |
+
img = img.half() if half else img.float() # uint8 to fp16/32
|
73 |
+
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
74 |
+
if img.ndimension() == 3:
|
75 |
+
img = img.unsqueeze(0)
|
76 |
+
|
77 |
+
# Warmup
|
78 |
+
if device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]):
|
79 |
+
old_img_b = img.shape[0]
|
80 |
+
old_img_h = img.shape[2]
|
81 |
+
old_img_w = img.shape[3]
|
82 |
+
for i in range(3):
|
83 |
+
model(img, augment=opt.augment)[0]
|
84 |
+
|
85 |
+
# Inference
|
86 |
+
t1 = time_synchronized()
|
87 |
+
with torch.no_grad(): # Calculating gradients would cause a GPU memory leak
|
88 |
+
pred = model(img, augment=opt.augment)[0]
|
89 |
+
t2 = time_synchronized()
|
90 |
+
|
91 |
+
# Apply NMS
|
92 |
+
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
|
93 |
+
t3 = time_synchronized()
|
94 |
+
|
95 |
+
# Apply Classifier
|
96 |
+
if classify:
|
97 |
+
pred = apply_classifier(pred, modelc, img, im0s)
|
98 |
+
|
99 |
+
# Process detections
|
100 |
+
for i, det in enumerate(pred): # detections per image
|
101 |
+
if webcam: # batch_size >= 1
|
102 |
+
p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
|
103 |
+
else:
|
104 |
+
p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
|
105 |
+
|
106 |
+
p = Path(p) # to Path
|
107 |
+
save_path = str(save_dir / p.name) # img.jpg
|
108 |
+
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
|
109 |
+
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
110 |
+
if len(det):
|
111 |
+
# Rescale boxes from img_size to im0 size
|
112 |
+
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
|
113 |
+
|
114 |
+
# Print results
|
115 |
+
for c in det[:, -1].unique():
|
116 |
+
n = (det[:, -1] == c).sum() # detections per class
|
117 |
+
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
|
118 |
+
|
119 |
+
# Write results
|
120 |
+
for *xyxy, conf, cls in reversed(det):
|
121 |
+
if save_txt: # Write to file
|
122 |
+
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
123 |
+
line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
|
124 |
+
with open(txt_path + '.txt', 'a') as f:
|
125 |
+
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
126 |
+
|
127 |
+
if save_img or view_img: # Add bbox to image
|
128 |
+
label = f'{names[int(cls)]} {conf:.2f}'
|
129 |
+
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1)
|
130 |
+
|
131 |
+
# Print time (inference + NMS)
|
132 |
+
print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')
|
133 |
+
|
134 |
+
# Stream results
|
135 |
+
if view_img:
|
136 |
+
cv2.imshow(str(p), im0)
|
137 |
+
cv2.waitKey(1) # 1 millisecond
|
138 |
+
|
139 |
+
# Save results (image with detections)
|
140 |
+
if save_img:
|
141 |
+
if dataset.mode == 'image':
|
142 |
+
cv2.imwrite(save_path, im0)
|
143 |
+
print(f" The image with the result is saved in: {save_path}")
|
144 |
+
else: # 'video' or 'stream'
|
145 |
+
if vid_path != save_path: # new video
|
146 |
+
vid_path = save_path
|
147 |
+
if isinstance(vid_writer, cv2.VideoWriter):
|
148 |
+
vid_writer.release() # release previous video writer
|
149 |
+
if vid_cap: # video
|
150 |
+
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
151 |
+
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
152 |
+
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
153 |
+
else: # stream
|
154 |
+
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
155 |
+
save_path += '.mp4'
|
156 |
+
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
157 |
+
vid_writer.write(im0)
|
158 |
+
|
159 |
+
if save_txt or save_img:
|
160 |
+
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
161 |
+
#print(f"Results saved to {save_dir}{s}")
|
162 |
+
|
163 |
+
print(f'Done. ({time.time() - t0:.3f}s)')
|
164 |
+
|
165 |
+
|
166 |
+
if __name__ == '__main__':
|
167 |
+
parser = argparse.ArgumentParser()
|
168 |
+
parser.add_argument('--weights', nargs='+', type=str, default='yolov7.pt', help='model.pt path(s)')
|
169 |
+
parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
|
170 |
+
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
|
171 |
+
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
|
172 |
+
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
|
173 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
174 |
+
parser.add_argument('--view-img', action='store_true', help='display results')
|
175 |
+
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
176 |
+
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
177 |
+
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
178 |
+
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
|
179 |
+
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
|
180 |
+
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
181 |
+
parser.add_argument('--update', action='store_true', help='update all models')
|
182 |
+
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
|
183 |
+
parser.add_argument('--name', default='exp', help='save results to project/name')
|
184 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
185 |
+
parser.add_argument('--no-trace', action='store_true', help='don`t trace model')
|
186 |
+
opt = parser.parse_args()
|
187 |
+
print(opt)
|
188 |
+
#check_requirements(exclude=('pycocotools', 'thop'))
|
189 |
+
|
190 |
+
with torch.no_grad():
|
191 |
+
if opt.update: # update all models (to fix SourceChangeWarning)
|
192 |
+
for opt.weights in ['yolov7.pt']:
|
193 |
+
detect()
|
194 |
+
strip_optimizer(opt.weights)
|
195 |
+
else:
|
196 |
+
detect()
|
export.py
ADDED
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import sys
|
3 |
+
import time
|
4 |
+
import warnings
|
5 |
+
|
6 |
+
sys.path.append('./') # to run '$ python *.py' files in subdirectories
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn
|
10 |
+
from torch.utils.mobile_optimizer import optimize_for_mobile
|
11 |
+
|
12 |
+
import models
|
13 |
+
from models.experimental import attempt_load, End2End
|
14 |
+
from utils.activations import Hardswish, SiLU
|
15 |
+
from utils.general import set_logging, check_img_size
|
16 |
+
from utils.torch_utils import select_device
|
17 |
+
from utils.add_nms import RegisterNMS
|
18 |
+
|
19 |
+
if __name__ == '__main__':
|
20 |
+
parser = argparse.ArgumentParser()
|
21 |
+
parser.add_argument('--weights', type=str, default='./yolor-csp-c.pt', help='weights path')
|
22 |
+
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width
|
23 |
+
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
24 |
+
parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes')
|
25 |
+
parser.add_argument('--dynamic-batch', action='store_true', help='dynamic batch onnx for tensorrt and onnx-runtime')
|
26 |
+
parser.add_argument('--grid', action='store_true', help='export Detect() layer grid')
|
27 |
+
parser.add_argument('--end2end', action='store_true', help='export end2end onnx')
|
28 |
+
parser.add_argument('--max-wh', type=int, default=None, help='None for tensorrt nms, int value for onnx-runtime nms')
|
29 |
+
parser.add_argument('--topk-all', type=int, default=100, help='topk objects for every images')
|
30 |
+
parser.add_argument('--iou-thres', type=float, default=0.45, help='iou threshold for NMS')
|
31 |
+
parser.add_argument('--conf-thres', type=float, default=0.25, help='conf threshold for NMS')
|
32 |
+
parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
33 |
+
parser.add_argument('--simplify', action='store_true', help='simplify onnx model')
|
34 |
+
parser.add_argument('--include-nms', action='store_true', help='export end2end onnx')
|
35 |
+
parser.add_argument('--fp16', action='store_true', help='CoreML FP16 half-precision export')
|
36 |
+
parser.add_argument('--int8', action='store_true', help='CoreML INT8 quantization')
|
37 |
+
opt = parser.parse_args()
|
38 |
+
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
|
39 |
+
opt.dynamic = opt.dynamic and not opt.end2end
|
40 |
+
opt.dynamic = False if opt.dynamic_batch else opt.dynamic
|
41 |
+
print(opt)
|
42 |
+
set_logging()
|
43 |
+
t = time.time()
|
44 |
+
|
45 |
+
# Load PyTorch model
|
46 |
+
device = select_device(opt.device)
|
47 |
+
model = attempt_load(opt.weights, map_location=device) # load FP32 model
|
48 |
+
labels = model.names
|
49 |
+
|
50 |
+
# Checks
|
51 |
+
gs = int(max(model.stride)) # grid size (max stride)
|
52 |
+
opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples
|
53 |
+
|
54 |
+
# Input
|
55 |
+
img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection
|
56 |
+
|
57 |
+
# Update model
|
58 |
+
for k, m in model.named_modules():
|
59 |
+
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
|
60 |
+
if isinstance(m, models.common.Conv): # assign export-friendly activations
|
61 |
+
if isinstance(m.act, nn.Hardswish):
|
62 |
+
m.act = Hardswish()
|
63 |
+
elif isinstance(m.act, nn.SiLU):
|
64 |
+
m.act = SiLU()
|
65 |
+
# elif isinstance(m, models.yolo.Detect):
|
66 |
+
# m.forward = m.forward_export # assign forward (optional)
|
67 |
+
model.model[-1].export = not opt.grid # set Detect() layer grid export
|
68 |
+
y = model(img) # dry run
|
69 |
+
if opt.include_nms:
|
70 |
+
model.model[-1].include_nms = True
|
71 |
+
y = None
|
72 |
+
|
73 |
+
# TorchScript export
|
74 |
+
try:
|
75 |
+
print('\nStarting TorchScript export with torch %s...' % torch.__version__)
|
76 |
+
f = opt.weights.replace('.pt', '.torchscript.pt') # filename
|
77 |
+
ts = torch.jit.trace(model, img, strict=False)
|
78 |
+
ts.save(f)
|
79 |
+
print('TorchScript export success, saved as %s' % f)
|
80 |
+
except Exception as e:
|
81 |
+
print('TorchScript export failure: %s' % e)
|
82 |
+
|
83 |
+
# CoreML export
|
84 |
+
try:
|
85 |
+
import coremltools as ct
|
86 |
+
|
87 |
+
print('\nStarting CoreML export with coremltools %s...' % ct.__version__)
|
88 |
+
# convert model from torchscript and apply pixel scaling as per detect.py
|
89 |
+
ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])])
|
90 |
+
bits, mode = (8, 'kmeans_lut') if opt.int8 else (16, 'linear') if opt.fp16 else (32, None)
|
91 |
+
if bits < 32:
|
92 |
+
if sys.platform.lower() == 'darwin': # quantization only supported on macOS
|
93 |
+
with warnings.catch_warnings():
|
94 |
+
warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning
|
95 |
+
ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
|
96 |
+
else:
|
97 |
+
print('quantization only supported on macOS, skipping...')
|
98 |
+
|
99 |
+
f = opt.weights.replace('.pt', '.mlmodel') # filename
|
100 |
+
ct_model.save(f)
|
101 |
+
print('CoreML export success, saved as %s' % f)
|
102 |
+
except Exception as e:
|
103 |
+
print('CoreML export failure: %s' % e)
|
104 |
+
|
105 |
+
# TorchScript-Lite export
|
106 |
+
try:
|
107 |
+
print('\nStarting TorchScript-Lite export with torch %s...' % torch.__version__)
|
108 |
+
f = opt.weights.replace('.pt', '.torchscript.ptl') # filename
|
109 |
+
tsl = torch.jit.trace(model, img, strict=False)
|
110 |
+
tsl = optimize_for_mobile(tsl)
|
111 |
+
tsl._save_for_lite_interpreter(f)
|
112 |
+
print('TorchScript-Lite export success, saved as %s' % f)
|
113 |
+
except Exception as e:
|
114 |
+
print('TorchScript-Lite export failure: %s' % e)
|
115 |
+
|
116 |
+
# ONNX export
|
117 |
+
try:
|
118 |
+
import onnx
|
119 |
+
|
120 |
+
print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
|
121 |
+
f = opt.weights.replace('.pt', '.onnx') # filename
|
122 |
+
model.eval()
|
123 |
+
output_names = ['classes', 'boxes'] if y is None else ['output']
|
124 |
+
dynamic_axes = None
|
125 |
+
if opt.dynamic:
|
126 |
+
dynamic_axes = {'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640)
|
127 |
+
'output': {0: 'batch', 2: 'y', 3: 'x'}}
|
128 |
+
if opt.dynamic_batch:
|
129 |
+
opt.batch_size = 'batch'
|
130 |
+
dynamic_axes = {
|
131 |
+
'images': {
|
132 |
+
0: 'batch',
|
133 |
+
}, }
|
134 |
+
if opt.end2end and opt.max_wh is None:
|
135 |
+
output_axes = {
|
136 |
+
'num_dets': {0: 'batch'},
|
137 |
+
'det_boxes': {0: 'batch'},
|
138 |
+
'det_scores': {0: 'batch'},
|
139 |
+
'det_classes': {0: 'batch'},
|
140 |
+
}
|
141 |
+
else:
|
142 |
+
output_axes = {
|
143 |
+
'output': {0: 'batch'},
|
144 |
+
}
|
145 |
+
dynamic_axes.update(output_axes)
|
146 |
+
if opt.grid:
|
147 |
+
if opt.end2end:
|
148 |
+
print('\nStarting export end2end onnx model for %s...' % 'TensorRT' if opt.max_wh is None else 'onnxruntime')
|
149 |
+
model = End2End(model,opt.topk_all,opt.iou_thres,opt.conf_thres,opt.max_wh,device,len(labels))
|
150 |
+
if opt.end2end and opt.max_wh is None:
|
151 |
+
output_names = ['num_dets', 'det_boxes', 'det_scores', 'det_classes']
|
152 |
+
shapes = [opt.batch_size, 1, opt.batch_size, opt.topk_all, 4,
|
153 |
+
opt.batch_size, opt.topk_all, opt.batch_size, opt.topk_all]
|
154 |
+
else:
|
155 |
+
output_names = ['output']
|
156 |
+
else:
|
157 |
+
model.model[-1].concat = True
|
158 |
+
|
159 |
+
torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'],
|
160 |
+
output_names=output_names,
|
161 |
+
dynamic_axes=dynamic_axes)
|
162 |
+
|
163 |
+
# Checks
|
164 |
+
onnx_model = onnx.load(f) # load onnx model
|
165 |
+
onnx.checker.check_model(onnx_model) # check onnx model
|
166 |
+
|
167 |
+
if opt.end2end and opt.max_wh is None:
|
168 |
+
for i in onnx_model.graph.output:
|
169 |
+
for j in i.type.tensor_type.shape.dim:
|
170 |
+
j.dim_param = str(shapes.pop(0))
|
171 |
+
|
172 |
+
# print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
|
173 |
+
|
174 |
+
# # Metadata
|
175 |
+
# d = {'stride': int(max(model.stride))}
|
176 |
+
# for k, v in d.items():
|
177 |
+
# meta = onnx_model.metadata_props.add()
|
178 |
+
# meta.key, meta.value = k, str(v)
|
179 |
+
# onnx.save(onnx_model, f)
|
180 |
+
|
181 |
+
if opt.simplify:
|
182 |
+
try:
|
183 |
+
import onnxsim
|
184 |
+
|
185 |
+
print('\nStarting to simplify ONNX...')
|
186 |
+
onnx_model, check = onnxsim.simplify(onnx_model)
|
187 |
+
assert check, 'assert check failed'
|
188 |
+
except Exception as e:
|
189 |
+
print(f'Simplifier failure: {e}')
|
190 |
+
|
191 |
+
# print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
|
192 |
+
onnx.save(onnx_model,f)
|
193 |
+
print('ONNX export success, saved as %s' % f)
|
194 |
+
|
195 |
+
if opt.include_nms:
|
196 |
+
print('Registering NMS plugin for ONNX...')
|
197 |
+
mo = RegisterNMS(f)
|
198 |
+
mo.register_nms()
|
199 |
+
mo.save(f)
|
200 |
+
|
201 |
+
except Exception as e:
|
202 |
+
print('ONNX export failure: %s' % e)
|
203 |
+
|
204 |
+
# Finish
|
205 |
+
print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t))
|
hubconf.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""PyTorch Hub models
|
2 |
+
|
3 |
+
Usage:
|
4 |
+
import torch
|
5 |
+
model = torch.hub.load('repo', 'model')
|
6 |
+
"""
|
7 |
+
|
8 |
+
from pathlib import Path
|
9 |
+
|
10 |
+
import torch
|
11 |
+
|
12 |
+
from models.yolo import Model
|
13 |
+
from utils.general import check_requirements, set_logging
|
14 |
+
from utils.google_utils import attempt_download
|
15 |
+
from utils.torch_utils import select_device
|
16 |
+
|
17 |
+
dependencies = ['torch', 'yaml']
|
18 |
+
check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('pycocotools', 'thop'))
|
19 |
+
set_logging()
|
20 |
+
|
21 |
+
|
22 |
+
def create(name, pretrained, channels, classes, autoshape):
|
23 |
+
"""Creates a specified model
|
24 |
+
|
25 |
+
Arguments:
|
26 |
+
name (str): name of model, i.e. 'yolov7'
|
27 |
+
pretrained (bool): load pretrained weights into the model
|
28 |
+
channels (int): number of input channels
|
29 |
+
classes (int): number of model classes
|
30 |
+
|
31 |
+
Returns:
|
32 |
+
pytorch model
|
33 |
+
"""
|
34 |
+
try:
|
35 |
+
cfg = list((Path(__file__).parent / 'cfg').rglob(f'{name}.yaml'))[0] # model.yaml path
|
36 |
+
model = Model(cfg, channels, classes)
|
37 |
+
if pretrained:
|
38 |
+
fname = f'{name}.pt' # checkpoint filename
|
39 |
+
attempt_download(fname) # download if not found locally
|
40 |
+
ckpt = torch.load(fname, map_location=torch.device('cpu')) # load
|
41 |
+
msd = model.state_dict() # model state_dict
|
42 |
+
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
|
43 |
+
csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter
|
44 |
+
model.load_state_dict(csd, strict=False) # load
|
45 |
+
if len(ckpt['model'].names) == classes:
|
46 |
+
model.names = ckpt['model'].names # set class names attribute
|
47 |
+
if autoshape:
|
48 |
+
model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
|
49 |
+
device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available
|
50 |
+
return model.to(device)
|
51 |
+
|
52 |
+
except Exception as e:
|
53 |
+
s = 'Cache maybe be out of date, try force_reload=True.'
|
54 |
+
raise Exception(s) from e
|
55 |
+
|
56 |
+
|
57 |
+
def custom(path_or_model='path/to/model.pt', autoshape=True):
|
58 |
+
"""custom mode
|
59 |
+
|
60 |
+
Arguments (3 options):
|
61 |
+
path_or_model (str): 'path/to/model.pt'
|
62 |
+
path_or_model (dict): torch.load('path/to/model.pt')
|
63 |
+
path_or_model (nn.Module): torch.load('path/to/model.pt')['model']
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
pytorch model
|
67 |
+
"""
|
68 |
+
model = torch.load(path_or_model, map_location=torch.device('cpu')) if isinstance(path_or_model, str) else path_or_model # load checkpoint
|
69 |
+
if isinstance(model, dict):
|
70 |
+
model = model['ema' if model.get('ema') else 'model'] # load model
|
71 |
+
|
72 |
+
hub_model = Model(model.yaml).to(next(model.parameters()).device) # create
|
73 |
+
hub_model.load_state_dict(model.float().state_dict()) # load state_dict
|
74 |
+
hub_model.names = model.names # class names
|
75 |
+
if autoshape:
|
76 |
+
hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
|
77 |
+
device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available
|
78 |
+
return hub_model.to(device)
|
79 |
+
|
80 |
+
|
81 |
+
def yolov7(pretrained=True, channels=3, classes=80, autoshape=True):
|
82 |
+
return create('yolov7', pretrained, channels, classes, autoshape)
|
83 |
+
|
84 |
+
|
85 |
+
if __name__ == '__main__':
|
86 |
+
model = custom(path_or_model='yolov7.pt') # custom example
|
87 |
+
# model = create(name='yolov7', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example
|
88 |
+
|
89 |
+
# Verify inference
|
90 |
+
import numpy as np
|
91 |
+
from PIL import Image
|
92 |
+
|
93 |
+
imgs = [np.zeros((640, 480, 3))]
|
94 |
+
|
95 |
+
results = model(imgs) # batched inference
|
96 |
+
results.print()
|
97 |
+
results.save()
|
requirements.txt
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Usage: pip install -r requirements.txt
|
2 |
+
|
3 |
+
# Base ----------------------------------------
|
4 |
+
matplotlib>=3.2.2
|
5 |
+
numpy>=1.18.5
|
6 |
+
opencv-python>=4.1.1
|
7 |
+
Pillow>=7.1.2
|
8 |
+
PyYAML>=5.3.1
|
9 |
+
requests>=2.23.0
|
10 |
+
scipy>=1.4.1
|
11 |
+
torch>=1.7.0,!=1.12.0
|
12 |
+
torchvision>=0.8.1,!=0.13.0
|
13 |
+
tqdm>=4.41.0
|
14 |
+
protobuf<4.21.3
|
15 |
+
|
16 |
+
# Logging -------------------------------------
|
17 |
+
tensorboard>=2.4.1
|
18 |
+
# wandb
|
19 |
+
|
20 |
+
# Plotting ------------------------------------
|
21 |
+
pandas>=1.1.4
|
22 |
+
seaborn>=0.11.0
|
23 |
+
|
24 |
+
# Export --------------------------------------
|
25 |
+
# coremltools>=4.1 # CoreML export
|
26 |
+
# onnx>=1.9.0 # ONNX export
|
27 |
+
# onnx-simplifier>=0.3.6 # ONNX simplifier
|
28 |
+
# scikit-learn==0.19.2 # CoreML quantization
|
29 |
+
# tensorflow>=2.4.1 # TFLite export
|
30 |
+
# tensorflowjs>=3.9.0 # TF.js export
|
31 |
+
# openvino-dev # OpenVINO export
|
32 |
+
|
33 |
+
# Extras --------------------------------------
|
34 |
+
ipython # interactive notebook
|
35 |
+
psutil # system utilization
|
36 |
+
thop # FLOPs computation
|
37 |
+
# albumentations>=1.0.3
|
38 |
+
# pycocotools>=2.0 # COCO mAP
|
39 |
+
# roboflow
|
test.py
ADDED
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
from pathlib import Path
|
5 |
+
from threading import Thread
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import torch
|
9 |
+
import yaml
|
10 |
+
from tqdm import tqdm
|
11 |
+
|
12 |
+
from models.experimental import attempt_load
|
13 |
+
from utils.datasets import create_dataloader
|
14 |
+
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
|
15 |
+
box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
|
16 |
+
from utils.metrics import ap_per_class, ConfusionMatrix
|
17 |
+
from utils.plots import plot_images, output_to_target, plot_study_txt
|
18 |
+
from utils.torch_utils import select_device, time_synchronized, TracedModel
|
19 |
+
|
20 |
+
|
21 |
+
def test(data,
|
22 |
+
weights=None,
|
23 |
+
batch_size=32,
|
24 |
+
imgsz=640,
|
25 |
+
conf_thres=0.001,
|
26 |
+
iou_thres=0.6, # for NMS
|
27 |
+
save_json=False,
|
28 |
+
single_cls=False,
|
29 |
+
augment=False,
|
30 |
+
verbose=False,
|
31 |
+
model=None,
|
32 |
+
dataloader=None,
|
33 |
+
save_dir=Path(''), # for saving images
|
34 |
+
save_txt=False, # for auto-labelling
|
35 |
+
save_hybrid=False, # for hybrid auto-labelling
|
36 |
+
save_conf=False, # save auto-label confidences
|
37 |
+
plots=True,
|
38 |
+
wandb_logger=None,
|
39 |
+
compute_loss=None,
|
40 |
+
half_precision=True,
|
41 |
+
trace=False,
|
42 |
+
is_coco=False,
|
43 |
+
v5_metric=False):
|
44 |
+
# Initialize/load model and set device
|
45 |
+
training = model is not None
|
46 |
+
if training: # called by train.py
|
47 |
+
device = next(model.parameters()).device # get model device
|
48 |
+
|
49 |
+
else: # called directly
|
50 |
+
set_logging()
|
51 |
+
device = select_device(opt.device, batch_size=batch_size)
|
52 |
+
|
53 |
+
# Directories
|
54 |
+
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
|
55 |
+
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
56 |
+
|
57 |
+
# Load model
|
58 |
+
model = attempt_load(weights, map_location=device) # load FP32 model
|
59 |
+
gs = max(int(model.stride.max()), 32) # grid size (max stride)
|
60 |
+
imgsz = check_img_size(imgsz, s=gs) # check img_size
|
61 |
+
|
62 |
+
if trace:
|
63 |
+
model = TracedModel(model, device, imgsz)
|
64 |
+
|
65 |
+
# Half
|
66 |
+
half = device.type != 'cpu' and half_precision # half precision only supported on CUDA
|
67 |
+
if half:
|
68 |
+
model.half()
|
69 |
+
|
70 |
+
# Configure
|
71 |
+
model.eval()
|
72 |
+
if isinstance(data, str):
|
73 |
+
is_coco = data.endswith('coco.yaml')
|
74 |
+
with open(data) as f:
|
75 |
+
data = yaml.load(f, Loader=yaml.SafeLoader)
|
76 |
+
check_dataset(data) # check
|
77 |
+
nc = 1 if single_cls else int(data['nc']) # number of classes
|
78 |
+
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95
|
79 |
+
niou = iouv.numel()
|
80 |
+
|
81 |
+
# Logging
|
82 |
+
log_imgs = 0
|
83 |
+
if wandb_logger and wandb_logger.wandb:
|
84 |
+
log_imgs = min(wandb_logger.log_imgs, 100)
|
85 |
+
# Dataloader
|
86 |
+
if not training:
|
87 |
+
if device.type != 'cpu':
|
88 |
+
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
|
89 |
+
task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images
|
90 |
+
dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True,
|
91 |
+
prefix=colorstr(f'{task}: '))[0]
|
92 |
+
|
93 |
+
if v5_metric:
|
94 |
+
print("Testing with YOLOv5 AP metric...")
|
95 |
+
|
96 |
+
seen = 0
|
97 |
+
confusion_matrix = ConfusionMatrix(nc=nc)
|
98 |
+
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
|
99 |
+
coco91class = coco80_to_coco91_class()
|
100 |
+
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', '[email protected]', '[email protected]:.95')
|
101 |
+
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
|
102 |
+
loss = torch.zeros(3, device=device)
|
103 |
+
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
|
104 |
+
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
|
105 |
+
img = img.to(device, non_blocking=True)
|
106 |
+
img = img.half() if half else img.float() # uint8 to fp16/32
|
107 |
+
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
108 |
+
targets = targets.to(device)
|
109 |
+
nb, _, height, width = img.shape # batch size, channels, height, width
|
110 |
+
|
111 |
+
with torch.no_grad():
|
112 |
+
# Run model
|
113 |
+
t = time_synchronized()
|
114 |
+
out, train_out = model(img, augment=augment) # inference and training outputs
|
115 |
+
t0 += time_synchronized() - t
|
116 |
+
|
117 |
+
# Compute loss
|
118 |
+
if compute_loss:
|
119 |
+
loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls
|
120 |
+
|
121 |
+
# Run NMS
|
122 |
+
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
|
123 |
+
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
|
124 |
+
t = time_synchronized()
|
125 |
+
out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True)
|
126 |
+
t1 += time_synchronized() - t
|
127 |
+
|
128 |
+
# Statistics per image
|
129 |
+
for si, pred in enumerate(out):
|
130 |
+
labels = targets[targets[:, 0] == si, 1:]
|
131 |
+
nl = len(labels)
|
132 |
+
tcls = labels[:, 0].tolist() if nl else [] # target class
|
133 |
+
path = Path(paths[si])
|
134 |
+
seen += 1
|
135 |
+
|
136 |
+
if len(pred) == 0:
|
137 |
+
if nl:
|
138 |
+
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
|
139 |
+
continue
|
140 |
+
|
141 |
+
# Predictions
|
142 |
+
predn = pred.clone()
|
143 |
+
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
|
144 |
+
|
145 |
+
# Append to text file
|
146 |
+
if save_txt:
|
147 |
+
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
|
148 |
+
for *xyxy, conf, cls in predn.tolist():
|
149 |
+
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
150 |
+
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
|
151 |
+
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
|
152 |
+
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
153 |
+
|
154 |
+
# W&B logging - Media Panel Plots
|
155 |
+
if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation
|
156 |
+
if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0:
|
157 |
+
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
|
158 |
+
"class_id": int(cls),
|
159 |
+
"box_caption": "%s %.3f" % (names[cls], conf),
|
160 |
+
"scores": {"class_score": conf},
|
161 |
+
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
|
162 |
+
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
|
163 |
+
wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name))
|
164 |
+
wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None
|
165 |
+
|
166 |
+
# Append to pycocotools JSON dictionary
|
167 |
+
if save_json:
|
168 |
+
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
|
169 |
+
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
|
170 |
+
box = xyxy2xywh(predn[:, :4]) # xywh
|
171 |
+
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
|
172 |
+
for p, b in zip(pred.tolist(), box.tolist()):
|
173 |
+
jdict.append({'image_id': image_id,
|
174 |
+
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
|
175 |
+
'bbox': [round(x, 3) for x in b],
|
176 |
+
'score': round(p[4], 5)})
|
177 |
+
|
178 |
+
# Assign all predictions as incorrect
|
179 |
+
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
|
180 |
+
if nl:
|
181 |
+
detected = [] # target indices
|
182 |
+
tcls_tensor = labels[:, 0]
|
183 |
+
|
184 |
+
# target boxes
|
185 |
+
tbox = xywh2xyxy(labels[:, 1:5])
|
186 |
+
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
|
187 |
+
if plots:
|
188 |
+
confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1))
|
189 |
+
|
190 |
+
# Per target class
|
191 |
+
for cls in torch.unique(tcls_tensor):
|
192 |
+
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
|
193 |
+
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
|
194 |
+
|
195 |
+
# Search for detections
|
196 |
+
if pi.shape[0]:
|
197 |
+
# Prediction to target ious
|
198 |
+
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
|
199 |
+
|
200 |
+
# Append detections
|
201 |
+
detected_set = set()
|
202 |
+
for j in (ious > iouv[0]).nonzero(as_tuple=False):
|
203 |
+
d = ti[i[j]] # detected target
|
204 |
+
if d.item() not in detected_set:
|
205 |
+
detected_set.add(d.item())
|
206 |
+
detected.append(d)
|
207 |
+
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
|
208 |
+
if len(detected) == nl: # all targets already located in image
|
209 |
+
break
|
210 |
+
|
211 |
+
# Append statistics (correct, conf, pcls, tcls)
|
212 |
+
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
|
213 |
+
|
214 |
+
# Plot images
|
215 |
+
if plots and batch_i < 3:
|
216 |
+
f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
|
217 |
+
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
|
218 |
+
f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
|
219 |
+
Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
|
220 |
+
|
221 |
+
# Compute statistics
|
222 |
+
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
|
223 |
+
if len(stats) and stats[0].any():
|
224 |
+
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, v5_metric=v5_metric, save_dir=save_dir, names=names)
|
225 |
+
ap50, ap = ap[:, 0], ap.mean(1) # [email protected], [email protected]:0.95
|
226 |
+
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
|
227 |
+
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
|
228 |
+
else:
|
229 |
+
nt = torch.zeros(1)
|
230 |
+
|
231 |
+
# Print results
|
232 |
+
pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format
|
233 |
+
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
|
234 |
+
|
235 |
+
# Print results per class
|
236 |
+
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
|
237 |
+
for i, c in enumerate(ap_class):
|
238 |
+
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
|
239 |
+
|
240 |
+
# Print speeds
|
241 |
+
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
|
242 |
+
if not training:
|
243 |
+
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
|
244 |
+
|
245 |
+
# Plots
|
246 |
+
if plots:
|
247 |
+
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
|
248 |
+
if wandb_logger and wandb_logger.wandb:
|
249 |
+
val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]
|
250 |
+
wandb_logger.log({"Validation": val_batches})
|
251 |
+
if wandb_images:
|
252 |
+
wandb_logger.log({"Bounding Box Debugger/Images": wandb_images})
|
253 |
+
|
254 |
+
# Save JSON
|
255 |
+
if save_json and len(jdict):
|
256 |
+
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
|
257 |
+
anno_json = './coco/annotations/instances_val2017.json' # annotations json
|
258 |
+
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
|
259 |
+
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
|
260 |
+
with open(pred_json, 'w') as f:
|
261 |
+
json.dump(jdict, f)
|
262 |
+
|
263 |
+
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
|
264 |
+
from pycocotools.coco import COCO
|
265 |
+
from pycocotools.cocoeval import COCOeval
|
266 |
+
|
267 |
+
anno = COCO(anno_json) # init annotations api
|
268 |
+
pred = anno.loadRes(pred_json) # init predictions api
|
269 |
+
eval = COCOeval(anno, pred, 'bbox')
|
270 |
+
if is_coco:
|
271 |
+
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
|
272 |
+
eval.evaluate()
|
273 |
+
eval.accumulate()
|
274 |
+
eval.summarize()
|
275 |
+
map, map50 = eval.stats[:2] # update results ([email protected]:0.95, [email protected])
|
276 |
+
except Exception as e:
|
277 |
+
print(f'pycocotools unable to run: {e}')
|
278 |
+
|
279 |
+
# Return results
|
280 |
+
model.float() # for training
|
281 |
+
if not training:
|
282 |
+
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
283 |
+
print(f"Results saved to {save_dir}{s}")
|
284 |
+
maps = np.zeros(nc) + map
|
285 |
+
for i, c in enumerate(ap_class):
|
286 |
+
maps[c] = ap[i]
|
287 |
+
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
|
288 |
+
|
289 |
+
|
290 |
+
if __name__ == '__main__':
|
291 |
+
parser = argparse.ArgumentParser(prog='test.py')
|
292 |
+
parser.add_argument('--weights', nargs='+', type=str, default='yolov7.pt', help='model.pt path(s)')
|
293 |
+
parser.add_argument('--data', type=str, default='data/coco.yaml', help='*.data path')
|
294 |
+
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
|
295 |
+
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
|
296 |
+
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
|
297 |
+
parser.add_argument('--iou-thres', type=float, default=0.65, help='IOU threshold for NMS')
|
298 |
+
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
|
299 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
300 |
+
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
|
301 |
+
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
302 |
+
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
|
303 |
+
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
304 |
+
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
|
305 |
+
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
306 |
+
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
|
307 |
+
parser.add_argument('--project', default='runs/test', help='save to project/name')
|
308 |
+
parser.add_argument('--name', default='exp', help='save to project/name')
|
309 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
310 |
+
parser.add_argument('--no-trace', action='store_true', help='don`t trace model')
|
311 |
+
parser.add_argument('--v5-metric', action='store_true', help='assume maximum recall as 1.0 in AP calculation')
|
312 |
+
opt = parser.parse_args()
|
313 |
+
opt.save_json |= opt.data.endswith('coco.yaml')
|
314 |
+
opt.data = check_file(opt.data) # check file
|
315 |
+
print(opt)
|
316 |
+
#check_requirements()
|
317 |
+
|
318 |
+
if opt.task in ('train', 'val', 'test'): # run normally
|
319 |
+
test(opt.data,
|
320 |
+
opt.weights,
|
321 |
+
opt.batch_size,
|
322 |
+
opt.img_size,
|
323 |
+
opt.conf_thres,
|
324 |
+
opt.iou_thres,
|
325 |
+
opt.save_json,
|
326 |
+
opt.single_cls,
|
327 |
+
opt.augment,
|
328 |
+
opt.verbose,
|
329 |
+
save_txt=opt.save_txt | opt.save_hybrid,
|
330 |
+
save_hybrid=opt.save_hybrid,
|
331 |
+
save_conf=opt.save_conf,
|
332 |
+
trace=not opt.no_trace,
|
333 |
+
v5_metric=opt.v5_metric
|
334 |
+
)
|
335 |
+
|
336 |
+
elif opt.task == 'speed': # speed benchmarks
|
337 |
+
for w in opt.weights:
|
338 |
+
test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, v5_metric=opt.v5_metric)
|
339 |
+
|
340 |
+
elif opt.task == 'study': # run over a range of settings and save/plot
|
341 |
+
# python test.py --task study --data coco.yaml --iou 0.65 --weights yolov7.pt
|
342 |
+
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
|
343 |
+
for w in opt.weights:
|
344 |
+
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
|
345 |
+
y = [] # y axis
|
346 |
+
for i in x: # img-size
|
347 |
+
print(f'\nRunning {f} point {i}...')
|
348 |
+
r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
|
349 |
+
plots=False, v5_metric=opt.v5_metric)
|
350 |
+
y.append(r + t) # results and times
|
351 |
+
np.savetxt(f, y, fmt='%10.4g') # save
|
352 |
+
os.system('zip -r study.zip study_*.txt')
|
353 |
+
plot_study_txt(x=x) # plot
|
train.py
ADDED
@@ -0,0 +1,705 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import logging
|
3 |
+
import math
|
4 |
+
import os
|
5 |
+
import random
|
6 |
+
import time
|
7 |
+
from copy import deepcopy
|
8 |
+
from pathlib import Path
|
9 |
+
from threading import Thread
|
10 |
+
|
11 |
+
import numpy as np
|
12 |
+
import torch.distributed as dist
|
13 |
+
import torch.nn as nn
|
14 |
+
import torch.nn.functional as F
|
15 |
+
import torch.optim as optim
|
16 |
+
import torch.optim.lr_scheduler as lr_scheduler
|
17 |
+
import torch.utils.data
|
18 |
+
import yaml
|
19 |
+
from torch.cuda import amp
|
20 |
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
21 |
+
from torch.utils.tensorboard import SummaryWriter
|
22 |
+
from tqdm import tqdm
|
23 |
+
|
24 |
+
import test # import test.py to get mAP after each epoch
|
25 |
+
from models.experimental import attempt_load
|
26 |
+
from models.yolo import Model
|
27 |
+
from utils.autoanchor import check_anchors
|
28 |
+
from utils.datasets import create_dataloader
|
29 |
+
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
|
30 |
+
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
|
31 |
+
check_requirements, print_mutation, set_logging, one_cycle, colorstr
|
32 |
+
from utils.google_utils import attempt_download
|
33 |
+
from utils.loss import ComputeLoss, ComputeLossOTA
|
34 |
+
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
|
35 |
+
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
|
36 |
+
from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
|
37 |
+
|
38 |
+
logger = logging.getLogger(__name__)
|
39 |
+
|
40 |
+
|
41 |
+
def train(hyp, opt, device, tb_writer=None):
|
42 |
+
logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
|
43 |
+
save_dir, epochs, batch_size, total_batch_size, weights, rank, freeze = \
|
44 |
+
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.freeze
|
45 |
+
|
46 |
+
# Directories
|
47 |
+
wdir = save_dir / 'weights'
|
48 |
+
wdir.mkdir(parents=True, exist_ok=True) # make dir
|
49 |
+
last = wdir / 'last.pt'
|
50 |
+
best = wdir / 'best.pt'
|
51 |
+
results_file = save_dir / 'results.txt'
|
52 |
+
|
53 |
+
# Save run settings
|
54 |
+
with open(save_dir / 'hyp.yaml', 'w') as f:
|
55 |
+
yaml.dump(hyp, f, sort_keys=False)
|
56 |
+
with open(save_dir / 'opt.yaml', 'w') as f:
|
57 |
+
yaml.dump(vars(opt), f, sort_keys=False)
|
58 |
+
|
59 |
+
# Configure
|
60 |
+
plots = not opt.evolve # create plots
|
61 |
+
cuda = device.type != 'cpu'
|
62 |
+
init_seeds(2 + rank)
|
63 |
+
with open(opt.data) as f:
|
64 |
+
data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
|
65 |
+
is_coco = opt.data.endswith('coco.yaml')
|
66 |
+
|
67 |
+
# Logging- Doing this before checking the dataset. Might update data_dict
|
68 |
+
loggers = {'wandb': None} # loggers dict
|
69 |
+
if rank in [-1, 0]:
|
70 |
+
opt.hyp = hyp # add hyperparameters
|
71 |
+
run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
|
72 |
+
wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict)
|
73 |
+
loggers['wandb'] = wandb_logger.wandb
|
74 |
+
data_dict = wandb_logger.data_dict
|
75 |
+
if wandb_logger.wandb:
|
76 |
+
weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
|
77 |
+
|
78 |
+
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
|
79 |
+
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
|
80 |
+
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
|
81 |
+
|
82 |
+
# Model
|
83 |
+
pretrained = weights.endswith('.pt')
|
84 |
+
if pretrained:
|
85 |
+
with torch_distributed_zero_first(rank):
|
86 |
+
attempt_download(weights) # download if not found locally
|
87 |
+
ckpt = torch.load(weights, map_location=device) # load checkpoint
|
88 |
+
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
89 |
+
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
|
90 |
+
state_dict = ckpt['model'].float().state_dict() # to FP32
|
91 |
+
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
|
92 |
+
model.load_state_dict(state_dict, strict=False) # load
|
93 |
+
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
|
94 |
+
else:
|
95 |
+
model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
96 |
+
with torch_distributed_zero_first(rank):
|
97 |
+
check_dataset(data_dict) # check
|
98 |
+
train_path = data_dict['train']
|
99 |
+
test_path = data_dict['val']
|
100 |
+
|
101 |
+
# Freeze
|
102 |
+
freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # parameter names to freeze (full or partial)
|
103 |
+
for k, v in model.named_parameters():
|
104 |
+
v.requires_grad = True # train all layers
|
105 |
+
if any(x in k for x in freeze):
|
106 |
+
print('freezing %s' % k)
|
107 |
+
v.requires_grad = False
|
108 |
+
|
109 |
+
# Optimizer
|
110 |
+
nbs = 64 # nominal batch size
|
111 |
+
accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
|
112 |
+
hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
|
113 |
+
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
|
114 |
+
|
115 |
+
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
|
116 |
+
for k, v in model.named_modules():
|
117 |
+
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
|
118 |
+
pg2.append(v.bias) # biases
|
119 |
+
if isinstance(v, nn.BatchNorm2d):
|
120 |
+
pg0.append(v.weight) # no decay
|
121 |
+
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
|
122 |
+
pg1.append(v.weight) # apply decay
|
123 |
+
if hasattr(v, 'im'):
|
124 |
+
if hasattr(v.im, 'implicit'):
|
125 |
+
pg0.append(v.im.implicit)
|
126 |
+
else:
|
127 |
+
for iv in v.im:
|
128 |
+
pg0.append(iv.implicit)
|
129 |
+
if hasattr(v, 'imc'):
|
130 |
+
if hasattr(v.imc, 'implicit'):
|
131 |
+
pg0.append(v.imc.implicit)
|
132 |
+
else:
|
133 |
+
for iv in v.imc:
|
134 |
+
pg0.append(iv.implicit)
|
135 |
+
if hasattr(v, 'imb'):
|
136 |
+
if hasattr(v.imb, 'implicit'):
|
137 |
+
pg0.append(v.imb.implicit)
|
138 |
+
else:
|
139 |
+
for iv in v.imb:
|
140 |
+
pg0.append(iv.implicit)
|
141 |
+
if hasattr(v, 'imo'):
|
142 |
+
if hasattr(v.imo, 'implicit'):
|
143 |
+
pg0.append(v.imo.implicit)
|
144 |
+
else:
|
145 |
+
for iv in v.imo:
|
146 |
+
pg0.append(iv.implicit)
|
147 |
+
if hasattr(v, 'ia'):
|
148 |
+
if hasattr(v.ia, 'implicit'):
|
149 |
+
pg0.append(v.ia.implicit)
|
150 |
+
else:
|
151 |
+
for iv in v.ia:
|
152 |
+
pg0.append(iv.implicit)
|
153 |
+
if hasattr(v, 'attn'):
|
154 |
+
if hasattr(v.attn, 'logit_scale'):
|
155 |
+
pg0.append(v.attn.logit_scale)
|
156 |
+
if hasattr(v.attn, 'q_bias'):
|
157 |
+
pg0.append(v.attn.q_bias)
|
158 |
+
if hasattr(v.attn, 'v_bias'):
|
159 |
+
pg0.append(v.attn.v_bias)
|
160 |
+
if hasattr(v.attn, 'relative_position_bias_table'):
|
161 |
+
pg0.append(v.attn.relative_position_bias_table)
|
162 |
+
if hasattr(v, 'rbr_dense'):
|
163 |
+
if hasattr(v.rbr_dense, 'weight_rbr_origin'):
|
164 |
+
pg0.append(v.rbr_dense.weight_rbr_origin)
|
165 |
+
if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'):
|
166 |
+
pg0.append(v.rbr_dense.weight_rbr_avg_conv)
|
167 |
+
if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'):
|
168 |
+
pg0.append(v.rbr_dense.weight_rbr_pfir_conv)
|
169 |
+
if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'):
|
170 |
+
pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1)
|
171 |
+
if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'):
|
172 |
+
pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2)
|
173 |
+
if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'):
|
174 |
+
pg0.append(v.rbr_dense.weight_rbr_gconv_dw)
|
175 |
+
if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'):
|
176 |
+
pg0.append(v.rbr_dense.weight_rbr_gconv_pw)
|
177 |
+
if hasattr(v.rbr_dense, 'vector'):
|
178 |
+
pg0.append(v.rbr_dense.vector)
|
179 |
+
|
180 |
+
if opt.adam:
|
181 |
+
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
|
182 |
+
else:
|
183 |
+
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
|
184 |
+
|
185 |
+
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
|
186 |
+
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
|
187 |
+
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
|
188 |
+
del pg0, pg1, pg2
|
189 |
+
|
190 |
+
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
|
191 |
+
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
|
192 |
+
if opt.linear_lr:
|
193 |
+
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
|
194 |
+
else:
|
195 |
+
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
|
196 |
+
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
|
197 |
+
# plot_lr_scheduler(optimizer, scheduler, epochs)
|
198 |
+
|
199 |
+
# EMA
|
200 |
+
ema = ModelEMA(model) if rank in [-1, 0] else None
|
201 |
+
|
202 |
+
# Resume
|
203 |
+
start_epoch, best_fitness = 0, 0.0
|
204 |
+
if pretrained:
|
205 |
+
# Optimizer
|
206 |
+
if ckpt['optimizer'] is not None:
|
207 |
+
optimizer.load_state_dict(ckpt['optimizer'])
|
208 |
+
best_fitness = ckpt['best_fitness']
|
209 |
+
|
210 |
+
# EMA
|
211 |
+
if ema and ckpt.get('ema'):
|
212 |
+
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
|
213 |
+
ema.updates = ckpt['updates']
|
214 |
+
|
215 |
+
# Results
|
216 |
+
if ckpt.get('training_results') is not None:
|
217 |
+
results_file.write_text(ckpt['training_results']) # write results.txt
|
218 |
+
|
219 |
+
# Epochs
|
220 |
+
start_epoch = ckpt['epoch'] + 1
|
221 |
+
if opt.resume:
|
222 |
+
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
|
223 |
+
if epochs < start_epoch:
|
224 |
+
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
|
225 |
+
(weights, ckpt['epoch'], epochs))
|
226 |
+
epochs += ckpt['epoch'] # finetune additional epochs
|
227 |
+
|
228 |
+
del ckpt, state_dict
|
229 |
+
|
230 |
+
# Image sizes
|
231 |
+
gs = max(int(model.stride.max()), 32) # grid size (max stride)
|
232 |
+
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
|
233 |
+
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
|
234 |
+
|
235 |
+
# DP mode
|
236 |
+
if cuda and rank == -1 and torch.cuda.device_count() > 1:
|
237 |
+
model = torch.nn.DataParallel(model)
|
238 |
+
|
239 |
+
# SyncBatchNorm
|
240 |
+
if opt.sync_bn and cuda and rank != -1:
|
241 |
+
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
|
242 |
+
logger.info('Using SyncBatchNorm()')
|
243 |
+
|
244 |
+
# Trainloader
|
245 |
+
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
|
246 |
+
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
|
247 |
+
world_size=opt.world_size, workers=opt.workers,
|
248 |
+
image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))
|
249 |
+
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
|
250 |
+
nb = len(dataloader) # number of batches
|
251 |
+
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
|
252 |
+
|
253 |
+
# Process 0
|
254 |
+
if rank in [-1, 0]:
|
255 |
+
testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
|
256 |
+
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
|
257 |
+
world_size=opt.world_size, workers=opt.workers,
|
258 |
+
pad=0.5, prefix=colorstr('val: '))[0]
|
259 |
+
|
260 |
+
if not opt.resume:
|
261 |
+
labels = np.concatenate(dataset.labels, 0)
|
262 |
+
c = torch.tensor(labels[:, 0]) # classes
|
263 |
+
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
|
264 |
+
# model._initialize_biases(cf.to(device))
|
265 |
+
if plots:
|
266 |
+
#plot_labels(labels, names, save_dir, loggers)
|
267 |
+
if tb_writer:
|
268 |
+
tb_writer.add_histogram('classes', c, 0)
|
269 |
+
|
270 |
+
# Anchors
|
271 |
+
if not opt.noautoanchor:
|
272 |
+
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
|
273 |
+
model.half().float() # pre-reduce anchor precision
|
274 |
+
|
275 |
+
# DDP mode
|
276 |
+
if cuda and rank != -1:
|
277 |
+
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,
|
278 |
+
# nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698
|
279 |
+
find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))
|
280 |
+
|
281 |
+
# Model parameters
|
282 |
+
hyp['box'] *= 3. / nl # scale to layers
|
283 |
+
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
|
284 |
+
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
|
285 |
+
hyp['label_smoothing'] = opt.label_smoothing
|
286 |
+
model.nc = nc # attach number of classes to model
|
287 |
+
model.hyp = hyp # attach hyperparameters to model
|
288 |
+
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
|
289 |
+
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
|
290 |
+
model.names = names
|
291 |
+
|
292 |
+
# Start training
|
293 |
+
t0 = time.time()
|
294 |
+
nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
|
295 |
+
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
|
296 |
+
maps = np.zeros(nc) # mAP per class
|
297 |
+
results = (0, 0, 0, 0, 0, 0, 0) # P, R, [email protected], [email protected], val_loss(box, obj, cls)
|
298 |
+
scheduler.last_epoch = start_epoch - 1 # do not move
|
299 |
+
scaler = amp.GradScaler(enabled=cuda)
|
300 |
+
compute_loss_ota = ComputeLossOTA(model) # init loss class
|
301 |
+
compute_loss = ComputeLoss(model) # init loss class
|
302 |
+
logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
|
303 |
+
f'Using {dataloader.num_workers} dataloader workers\n'
|
304 |
+
f'Logging results to {save_dir}\n'
|
305 |
+
f'Starting training for {epochs} epochs...')
|
306 |
+
torch.save(model, wdir / 'init.pt')
|
307 |
+
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
|
308 |
+
model.train()
|
309 |
+
|
310 |
+
# Update image weights (optional)
|
311 |
+
if opt.image_weights:
|
312 |
+
# Generate indices
|
313 |
+
if rank in [-1, 0]:
|
314 |
+
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
|
315 |
+
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
|
316 |
+
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
|
317 |
+
# Broadcast if DDP
|
318 |
+
if rank != -1:
|
319 |
+
indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
|
320 |
+
dist.broadcast(indices, 0)
|
321 |
+
if rank != 0:
|
322 |
+
dataset.indices = indices.cpu().numpy()
|
323 |
+
|
324 |
+
# Update mosaic border
|
325 |
+
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
|
326 |
+
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
|
327 |
+
|
328 |
+
mloss = torch.zeros(4, device=device) # mean losses
|
329 |
+
if rank != -1:
|
330 |
+
dataloader.sampler.set_epoch(epoch)
|
331 |
+
pbar = enumerate(dataloader)
|
332 |
+
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
|
333 |
+
if rank in [-1, 0]:
|
334 |
+
pbar = tqdm(pbar, total=nb) # progress bar
|
335 |
+
optimizer.zero_grad()
|
336 |
+
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
|
337 |
+
ni = i + nb * epoch # number integrated batches (since train start)
|
338 |
+
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
|
339 |
+
|
340 |
+
# Warmup
|
341 |
+
if ni <= nw:
|
342 |
+
xi = [0, nw] # x interp
|
343 |
+
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
|
344 |
+
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
|
345 |
+
for j, x in enumerate(optimizer.param_groups):
|
346 |
+
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
|
347 |
+
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
|
348 |
+
if 'momentum' in x:
|
349 |
+
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
|
350 |
+
|
351 |
+
# Multi-scale
|
352 |
+
if opt.multi_scale:
|
353 |
+
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
|
354 |
+
sf = sz / max(imgs.shape[2:]) # scale factor
|
355 |
+
if sf != 1:
|
356 |
+
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
|
357 |
+
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
|
358 |
+
|
359 |
+
# Forward
|
360 |
+
with amp.autocast(enabled=cuda):
|
361 |
+
pred = model(imgs) # forward
|
362 |
+
if 'loss_ota' not in hyp or hyp['loss_ota'] == 1:
|
363 |
+
loss, loss_items = compute_loss_ota(pred, targets.to(device), imgs) # loss scaled by batch_size
|
364 |
+
else:
|
365 |
+
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
|
366 |
+
if rank != -1:
|
367 |
+
loss *= opt.world_size # gradient averaged between devices in DDP mode
|
368 |
+
if opt.quad:
|
369 |
+
loss *= 4.
|
370 |
+
|
371 |
+
# Backward
|
372 |
+
scaler.scale(loss).backward()
|
373 |
+
|
374 |
+
# Optimize
|
375 |
+
if ni % accumulate == 0:
|
376 |
+
scaler.step(optimizer) # optimizer.step
|
377 |
+
scaler.update()
|
378 |
+
optimizer.zero_grad()
|
379 |
+
if ema:
|
380 |
+
ema.update(model)
|
381 |
+
|
382 |
+
# Print
|
383 |
+
if rank in [-1, 0]:
|
384 |
+
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
|
385 |
+
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
|
386 |
+
s = ('%10s' * 2 + '%10.4g' * 6) % (
|
387 |
+
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
|
388 |
+
pbar.set_description(s)
|
389 |
+
|
390 |
+
# Plot
|
391 |
+
if plots and ni < 10:
|
392 |
+
f = save_dir / f'train_batch{ni}.jpg' # filename
|
393 |
+
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
|
394 |
+
# if tb_writer:
|
395 |
+
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
|
396 |
+
# tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph
|
397 |
+
elif plots and ni == 10 and wandb_logger.wandb:
|
398 |
+
wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in
|
399 |
+
save_dir.glob('train*.jpg') if x.exists()]})
|
400 |
+
|
401 |
+
# end batch ------------------------------------------------------------------------------------------------
|
402 |
+
# end epoch ----------------------------------------------------------------------------------------------------
|
403 |
+
|
404 |
+
# Scheduler
|
405 |
+
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
|
406 |
+
scheduler.step()
|
407 |
+
|
408 |
+
# DDP process 0 or single-GPU
|
409 |
+
if rank in [-1, 0]:
|
410 |
+
# mAP
|
411 |
+
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
|
412 |
+
final_epoch = epoch + 1 == epochs
|
413 |
+
if not opt.notest or final_epoch: # Calculate mAP
|
414 |
+
wandb_logger.current_epoch = epoch + 1
|
415 |
+
results, maps, times = test.test(data_dict,
|
416 |
+
batch_size=batch_size * 2,
|
417 |
+
imgsz=imgsz_test,
|
418 |
+
model=ema.ema,
|
419 |
+
single_cls=opt.single_cls,
|
420 |
+
dataloader=testloader,
|
421 |
+
save_dir=save_dir,
|
422 |
+
verbose=nc < 50 and final_epoch,
|
423 |
+
plots=plots and final_epoch,
|
424 |
+
wandb_logger=wandb_logger,
|
425 |
+
compute_loss=compute_loss,
|
426 |
+
is_coco=is_coco,
|
427 |
+
v5_metric=opt.v5_metric)
|
428 |
+
|
429 |
+
# Write
|
430 |
+
with open(results_file, 'a') as f:
|
431 |
+
f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss
|
432 |
+
if len(opt.name) and opt.bucket:
|
433 |
+
os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
|
434 |
+
|
435 |
+
# Log
|
436 |
+
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
|
437 |
+
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
|
438 |
+
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
|
439 |
+
'x/lr0', 'x/lr1', 'x/lr2'] # params
|
440 |
+
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
|
441 |
+
if tb_writer:
|
442 |
+
tb_writer.add_scalar(tag, x, epoch) # tensorboard
|
443 |
+
if wandb_logger.wandb:
|
444 |
+
wandb_logger.log({tag: x}) # W&B
|
445 |
+
|
446 |
+
# Update best mAP
|
447 |
+
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, [email protected], [email protected]]
|
448 |
+
if fi > best_fitness:
|
449 |
+
best_fitness = fi
|
450 |
+
wandb_logger.end_epoch(best_result=best_fitness == fi)
|
451 |
+
|
452 |
+
# Save model
|
453 |
+
if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
|
454 |
+
ckpt = {'epoch': epoch,
|
455 |
+
'best_fitness': best_fitness,
|
456 |
+
'training_results': results_file.read_text(),
|
457 |
+
'model': deepcopy(model.module if is_parallel(model) else model).half(),
|
458 |
+
'ema': deepcopy(ema.ema).half(),
|
459 |
+
'updates': ema.updates,
|
460 |
+
'optimizer': optimizer.state_dict(),
|
461 |
+
'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}
|
462 |
+
|
463 |
+
# Save last, best and delete
|
464 |
+
torch.save(ckpt, last)
|
465 |
+
if best_fitness == fi:
|
466 |
+
torch.save(ckpt, best)
|
467 |
+
if (best_fitness == fi) and (epoch >= 200):
|
468 |
+
torch.save(ckpt, wdir / 'best_{:03d}.pt'.format(epoch))
|
469 |
+
if epoch == 0:
|
470 |
+
torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch))
|
471 |
+
elif ((epoch+1) % 25) == 0:
|
472 |
+
torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch))
|
473 |
+
elif epoch >= (epochs-5):
|
474 |
+
torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch))
|
475 |
+
if wandb_logger.wandb:
|
476 |
+
if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
|
477 |
+
wandb_logger.log_model(
|
478 |
+
last.parent, opt, epoch, fi, best_model=best_fitness == fi)
|
479 |
+
del ckpt
|
480 |
+
|
481 |
+
# end epoch ----------------------------------------------------------------------------------------------------
|
482 |
+
# end training
|
483 |
+
if rank in [-1, 0]:
|
484 |
+
# Plots
|
485 |
+
if plots:
|
486 |
+
plot_results(save_dir=save_dir) # save as results.png
|
487 |
+
if wandb_logger.wandb:
|
488 |
+
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
|
489 |
+
wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files
|
490 |
+
if (save_dir / f).exists()]})
|
491 |
+
# Test best.pt
|
492 |
+
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
|
493 |
+
if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
|
494 |
+
for m in (last, best) if best.exists() else (last): # speed, mAP tests
|
495 |
+
results, _, _ = test.test(opt.data,
|
496 |
+
batch_size=batch_size * 2,
|
497 |
+
imgsz=imgsz_test,
|
498 |
+
conf_thres=0.001,
|
499 |
+
iou_thres=0.7,
|
500 |
+
model=attempt_load(m, device).half(),
|
501 |
+
single_cls=opt.single_cls,
|
502 |
+
dataloader=testloader,
|
503 |
+
save_dir=save_dir,
|
504 |
+
save_json=True,
|
505 |
+
plots=False,
|
506 |
+
is_coco=is_coco,
|
507 |
+
v5_metric=opt.v5_metric)
|
508 |
+
|
509 |
+
# Strip optimizers
|
510 |
+
final = best if best.exists() else last # final model
|
511 |
+
for f in last, best:
|
512 |
+
if f.exists():
|
513 |
+
strip_optimizer(f) # strip optimizers
|
514 |
+
if opt.bucket:
|
515 |
+
os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
|
516 |
+
if wandb_logger.wandb and not opt.evolve: # Log the stripped model
|
517 |
+
wandb_logger.wandb.log_artifact(str(final), type='model',
|
518 |
+
name='run_' + wandb_logger.wandb_run.id + '_model',
|
519 |
+
aliases=['last', 'best', 'stripped'])
|
520 |
+
wandb_logger.finish_run()
|
521 |
+
else:
|
522 |
+
dist.destroy_process_group()
|
523 |
+
torch.cuda.empty_cache()
|
524 |
+
return results
|
525 |
+
|
526 |
+
|
527 |
+
if __name__ == '__main__':
|
528 |
+
parser = argparse.ArgumentParser()
|
529 |
+
parser.add_argument('--weights', type=str, default='yolo7.pt', help='initial weights path')
|
530 |
+
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
|
531 |
+
parser.add_argument('--data', type=str, default='data/coco.yaml', help='data.yaml path')
|
532 |
+
parser.add_argument('--hyp', type=str, default='data/hyp.scratch.p5.yaml', help='hyperparameters path')
|
533 |
+
parser.add_argument('--epochs', type=int, default=300)
|
534 |
+
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
|
535 |
+
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
|
536 |
+
parser.add_argument('--rect', action='store_true', help='rectangular training')
|
537 |
+
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
|
538 |
+
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
|
539 |
+
parser.add_argument('--notest', action='store_true', help='only test final epoch')
|
540 |
+
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
|
541 |
+
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
|
542 |
+
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
|
543 |
+
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
|
544 |
+
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
|
545 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
546 |
+
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
|
547 |
+
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
|
548 |
+
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
|
549 |
+
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
|
550 |
+
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
|
551 |
+
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
|
552 |
+
parser.add_argument('--project', default='runs/train', help='save to project/name')
|
553 |
+
parser.add_argument('--entity', default=None, help='W&B entity')
|
554 |
+
parser.add_argument('--name', default='exp', help='save to project/name')
|
555 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
556 |
+
parser.add_argument('--quad', action='store_true', help='quad dataloader')
|
557 |
+
parser.add_argument('--linear-lr', action='store_true', help='linear LR')
|
558 |
+
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
|
559 |
+
parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
|
560 |
+
parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B')
|
561 |
+
parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
|
562 |
+
parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
|
563 |
+
parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone of yolov7=50, first3=0 1 2')
|
564 |
+
parser.add_argument('--v5-metric', action='store_true', help='assume maximum recall as 1.0 in AP calculation')
|
565 |
+
opt = parser.parse_args()
|
566 |
+
|
567 |
+
# Set DDP variables
|
568 |
+
opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
|
569 |
+
opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
|
570 |
+
set_logging(opt.global_rank)
|
571 |
+
#if opt.global_rank in [-1, 0]:
|
572 |
+
# check_git_status()
|
573 |
+
# check_requirements()
|
574 |
+
|
575 |
+
# Resume
|
576 |
+
wandb_run = check_wandb_resume(opt)
|
577 |
+
if opt.resume and not wandb_run: # resume an interrupted run
|
578 |
+
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
|
579 |
+
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
|
580 |
+
apriori = opt.global_rank, opt.local_rank
|
581 |
+
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
|
582 |
+
opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace
|
583 |
+
opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate
|
584 |
+
logger.info('Resuming training from %s' % ckpt)
|
585 |
+
else:
|
586 |
+
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
|
587 |
+
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
|
588 |
+
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
|
589 |
+
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
|
590 |
+
opt.name = 'evolve' if opt.evolve else opt.name
|
591 |
+
opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
|
592 |
+
|
593 |
+
# DDP mode
|
594 |
+
opt.total_batch_size = opt.batch_size
|
595 |
+
device = select_device(opt.device, batch_size=opt.batch_size)
|
596 |
+
if opt.local_rank != -1:
|
597 |
+
assert torch.cuda.device_count() > opt.local_rank
|
598 |
+
torch.cuda.set_device(opt.local_rank)
|
599 |
+
device = torch.device('cuda', opt.local_rank)
|
600 |
+
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
|
601 |
+
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
|
602 |
+
opt.batch_size = opt.total_batch_size // opt.world_size
|
603 |
+
|
604 |
+
# Hyperparameters
|
605 |
+
with open(opt.hyp) as f:
|
606 |
+
hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps
|
607 |
+
|
608 |
+
# Train
|
609 |
+
logger.info(opt)
|
610 |
+
if not opt.evolve:
|
611 |
+
tb_writer = None # init loggers
|
612 |
+
if opt.global_rank in [-1, 0]:
|
613 |
+
prefix = colorstr('tensorboard: ')
|
614 |
+
logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
|
615 |
+
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
|
616 |
+
train(hyp, opt, device, tb_writer)
|
617 |
+
|
618 |
+
# Evolve hyperparameters (optional)
|
619 |
+
else:
|
620 |
+
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
|
621 |
+
meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
|
622 |
+
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
|
623 |
+
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
|
624 |
+
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
|
625 |
+
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
|
626 |
+
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
|
627 |
+
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
|
628 |
+
'box': (1, 0.02, 0.2), # box loss gain
|
629 |
+
'cls': (1, 0.2, 4.0), # cls loss gain
|
630 |
+
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
|
631 |
+
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
|
632 |
+
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
|
633 |
+
'iou_t': (0, 0.1, 0.7), # IoU training threshold
|
634 |
+
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
|
635 |
+
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
|
636 |
+
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
|
637 |
+
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
|
638 |
+
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
|
639 |
+
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
|
640 |
+
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
|
641 |
+
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
|
642 |
+
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
|
643 |
+
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
|
644 |
+
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
|
645 |
+
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
|
646 |
+
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
|
647 |
+
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
|
648 |
+
'mixup': (1, 0.0, 1.0), # image mixup (probability)
|
649 |
+
'copy_paste': (1, 0.0, 1.0), # segment copy-paste (probability)
|
650 |
+
'paste_in': (1, 0.0, 1.0)} # segment copy-paste (probability)
|
651 |
+
|
652 |
+
with open(opt.hyp, errors='ignore') as f:
|
653 |
+
hyp = yaml.safe_load(f) # load hyps dict
|
654 |
+
if 'anchors' not in hyp: # anchors commented in hyp.yaml
|
655 |
+
hyp['anchors'] = 3
|
656 |
+
|
657 |
+
assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
|
658 |
+
opt.notest, opt.nosave = True, True # only test/save final epoch
|
659 |
+
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
|
660 |
+
yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
|
661 |
+
if opt.bucket:
|
662 |
+
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
|
663 |
+
|
664 |
+
for _ in range(300): # generations to evolve
|
665 |
+
if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
|
666 |
+
# Select parent(s)
|
667 |
+
parent = 'single' # parent selection method: 'single' or 'weighted'
|
668 |
+
x = np.loadtxt('evolve.txt', ndmin=2)
|
669 |
+
n = min(5, len(x)) # number of previous results to consider
|
670 |
+
x = x[np.argsort(-fitness(x))][:n] # top n mutations
|
671 |
+
w = fitness(x) - fitness(x).min() # weights
|
672 |
+
if parent == 'single' or len(x) == 1:
|
673 |
+
# x = x[random.randint(0, n - 1)] # random selection
|
674 |
+
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
|
675 |
+
elif parent == 'weighted':
|
676 |
+
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
|
677 |
+
|
678 |
+
# Mutate
|
679 |
+
mp, s = 0.8, 0.2 # mutation probability, sigma
|
680 |
+
npr = np.random
|
681 |
+
npr.seed(int(time.time()))
|
682 |
+
g = np.array([x[0] for x in meta.values()]) # gains 0-1
|
683 |
+
ng = len(meta)
|
684 |
+
v = np.ones(ng)
|
685 |
+
while all(v == 1): # mutate until a change occurs (prevent duplicates)
|
686 |
+
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
|
687 |
+
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
|
688 |
+
hyp[k] = float(x[i + 7] * v[i]) # mutate
|
689 |
+
|
690 |
+
# Constrain to limits
|
691 |
+
for k, v in meta.items():
|
692 |
+
hyp[k] = max(hyp[k], v[1]) # lower limit
|
693 |
+
hyp[k] = min(hyp[k], v[2]) # upper limit
|
694 |
+
hyp[k] = round(hyp[k], 5) # significant digits
|
695 |
+
|
696 |
+
# Train mutation
|
697 |
+
results = train(hyp.copy(), opt, device)
|
698 |
+
|
699 |
+
# Write mutation results
|
700 |
+
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
|
701 |
+
|
702 |
+
# Plot results
|
703 |
+
plot_evolution(yaml_file)
|
704 |
+
print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
|
705 |
+
f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
|
train_aux.py
ADDED
@@ -0,0 +1,699 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import logging
|
3 |
+
import math
|
4 |
+
import os
|
5 |
+
import random
|
6 |
+
import time
|
7 |
+
from copy import deepcopy
|
8 |
+
from pathlib import Path
|
9 |
+
from threading import Thread
|
10 |
+
|
11 |
+
import numpy as np
|
12 |
+
import torch.distributed as dist
|
13 |
+
import torch.nn as nn
|
14 |
+
import torch.nn.functional as F
|
15 |
+
import torch.optim as optim
|
16 |
+
import torch.optim.lr_scheduler as lr_scheduler
|
17 |
+
import torch.utils.data
|
18 |
+
import yaml
|
19 |
+
from torch.cuda import amp
|
20 |
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
21 |
+
from torch.utils.tensorboard import SummaryWriter
|
22 |
+
from tqdm import tqdm
|
23 |
+
|
24 |
+
import test # import test.py to get mAP after each epoch
|
25 |
+
from models.experimental import attempt_load
|
26 |
+
from models.yolo import Model
|
27 |
+
from utils.autoanchor import check_anchors
|
28 |
+
from utils.datasets import create_dataloader
|
29 |
+
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
|
30 |
+
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
|
31 |
+
check_requirements, print_mutation, set_logging, one_cycle, colorstr
|
32 |
+
from utils.google_utils import attempt_download
|
33 |
+
from utils.loss import ComputeLoss, ComputeLossAuxOTA
|
34 |
+
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
|
35 |
+
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
|
36 |
+
from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
|
37 |
+
|
38 |
+
logger = logging.getLogger(__name__)
|
39 |
+
|
40 |
+
|
41 |
+
def train(hyp, opt, device, tb_writer=None):
|
42 |
+
logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
|
43 |
+
save_dir, epochs, batch_size, total_batch_size, weights, rank = \
|
44 |
+
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
|
45 |
+
|
46 |
+
# Directories
|
47 |
+
wdir = save_dir / 'weights'
|
48 |
+
wdir.mkdir(parents=True, exist_ok=True) # make dir
|
49 |
+
last = wdir / 'last.pt'
|
50 |
+
best = wdir / 'best.pt'
|
51 |
+
results_file = save_dir / 'results.txt'
|
52 |
+
|
53 |
+
# Save run settings
|
54 |
+
with open(save_dir / 'hyp.yaml', 'w') as f:
|
55 |
+
yaml.dump(hyp, f, sort_keys=False)
|
56 |
+
with open(save_dir / 'opt.yaml', 'w') as f:
|
57 |
+
yaml.dump(vars(opt), f, sort_keys=False)
|
58 |
+
|
59 |
+
# Configure
|
60 |
+
plots = not opt.evolve # create plots
|
61 |
+
cuda = device.type != 'cpu'
|
62 |
+
init_seeds(2 + rank)
|
63 |
+
with open(opt.data) as f:
|
64 |
+
data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
|
65 |
+
is_coco = opt.data.endswith('coco.yaml')
|
66 |
+
|
67 |
+
# Logging- Doing this before checking the dataset. Might update data_dict
|
68 |
+
loggers = {'wandb': None} # loggers dict
|
69 |
+
if rank in [-1, 0]:
|
70 |
+
opt.hyp = hyp # add hyperparameters
|
71 |
+
run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
|
72 |
+
wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict)
|
73 |
+
loggers['wandb'] = wandb_logger.wandb
|
74 |
+
data_dict = wandb_logger.data_dict
|
75 |
+
if wandb_logger.wandb:
|
76 |
+
weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
|
77 |
+
|
78 |
+
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
|
79 |
+
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
|
80 |
+
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
|
81 |
+
|
82 |
+
# Model
|
83 |
+
pretrained = weights.endswith('.pt')
|
84 |
+
if pretrained:
|
85 |
+
with torch_distributed_zero_first(rank):
|
86 |
+
attempt_download(weights) # download if not found locally
|
87 |
+
ckpt = torch.load(weights, map_location=device) # load checkpoint
|
88 |
+
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
89 |
+
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
|
90 |
+
state_dict = ckpt['model'].float().state_dict() # to FP32
|
91 |
+
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
|
92 |
+
model.load_state_dict(state_dict, strict=False) # load
|
93 |
+
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
|
94 |
+
else:
|
95 |
+
model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
96 |
+
with torch_distributed_zero_first(rank):
|
97 |
+
check_dataset(data_dict) # check
|
98 |
+
train_path = data_dict['train']
|
99 |
+
test_path = data_dict['val']
|
100 |
+
|
101 |
+
# Freeze
|
102 |
+
freeze = [] # parameter names to freeze (full or partial)
|
103 |
+
for k, v in model.named_parameters():
|
104 |
+
v.requires_grad = True # train all layers
|
105 |
+
if any(x in k for x in freeze):
|
106 |
+
print('freezing %s' % k)
|
107 |
+
v.requires_grad = False
|
108 |
+
|
109 |
+
# Optimizer
|
110 |
+
nbs = 64 # nominal batch size
|
111 |
+
accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
|
112 |
+
hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
|
113 |
+
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
|
114 |
+
|
115 |
+
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
|
116 |
+
for k, v in model.named_modules():
|
117 |
+
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
|
118 |
+
pg2.append(v.bias) # biases
|
119 |
+
if isinstance(v, nn.BatchNorm2d):
|
120 |
+
pg0.append(v.weight) # no decay
|
121 |
+
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
|
122 |
+
pg1.append(v.weight) # apply decay
|
123 |
+
if hasattr(v, 'im'):
|
124 |
+
if hasattr(v.im, 'implicit'):
|
125 |
+
pg0.append(v.im.implicit)
|
126 |
+
else:
|
127 |
+
for iv in v.im:
|
128 |
+
pg0.append(iv.implicit)
|
129 |
+
if hasattr(v, 'imc'):
|
130 |
+
if hasattr(v.imc, 'implicit'):
|
131 |
+
pg0.append(v.imc.implicit)
|
132 |
+
else:
|
133 |
+
for iv in v.imc:
|
134 |
+
pg0.append(iv.implicit)
|
135 |
+
if hasattr(v, 'imb'):
|
136 |
+
if hasattr(v.imb, 'implicit'):
|
137 |
+
pg0.append(v.imb.implicit)
|
138 |
+
else:
|
139 |
+
for iv in v.imb:
|
140 |
+
pg0.append(iv.implicit)
|
141 |
+
if hasattr(v, 'imo'):
|
142 |
+
if hasattr(v.imo, 'implicit'):
|
143 |
+
pg0.append(v.imo.implicit)
|
144 |
+
else:
|
145 |
+
for iv in v.imo:
|
146 |
+
pg0.append(iv.implicit)
|
147 |
+
if hasattr(v, 'ia'):
|
148 |
+
if hasattr(v.ia, 'implicit'):
|
149 |
+
pg0.append(v.ia.implicit)
|
150 |
+
else:
|
151 |
+
for iv in v.ia:
|
152 |
+
pg0.append(iv.implicit)
|
153 |
+
if hasattr(v, 'attn'):
|
154 |
+
if hasattr(v.attn, 'logit_scale'):
|
155 |
+
pg0.append(v.attn.logit_scale)
|
156 |
+
if hasattr(v.attn, 'q_bias'):
|
157 |
+
pg0.append(v.attn.q_bias)
|
158 |
+
if hasattr(v.attn, 'v_bias'):
|
159 |
+
pg0.append(v.attn.v_bias)
|
160 |
+
if hasattr(v.attn, 'relative_position_bias_table'):
|
161 |
+
pg0.append(v.attn.relative_position_bias_table)
|
162 |
+
if hasattr(v, 'rbr_dense'):
|
163 |
+
if hasattr(v.rbr_dense, 'weight_rbr_origin'):
|
164 |
+
pg0.append(v.rbr_dense.weight_rbr_origin)
|
165 |
+
if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'):
|
166 |
+
pg0.append(v.rbr_dense.weight_rbr_avg_conv)
|
167 |
+
if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'):
|
168 |
+
pg0.append(v.rbr_dense.weight_rbr_pfir_conv)
|
169 |
+
if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'):
|
170 |
+
pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1)
|
171 |
+
if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'):
|
172 |
+
pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2)
|
173 |
+
if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'):
|
174 |
+
pg0.append(v.rbr_dense.weight_rbr_gconv_dw)
|
175 |
+
if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'):
|
176 |
+
pg0.append(v.rbr_dense.weight_rbr_gconv_pw)
|
177 |
+
if hasattr(v.rbr_dense, 'vector'):
|
178 |
+
pg0.append(v.rbr_dense.vector)
|
179 |
+
|
180 |
+
if opt.adam:
|
181 |
+
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
|
182 |
+
else:
|
183 |
+
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
|
184 |
+
|
185 |
+
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
|
186 |
+
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
|
187 |
+
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
|
188 |
+
del pg0, pg1, pg2
|
189 |
+
|
190 |
+
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
|
191 |
+
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
|
192 |
+
if opt.linear_lr:
|
193 |
+
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
|
194 |
+
else:
|
195 |
+
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
|
196 |
+
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
|
197 |
+
# plot_lr_scheduler(optimizer, scheduler, epochs)
|
198 |
+
|
199 |
+
# EMA
|
200 |
+
ema = ModelEMA(model) if rank in [-1, 0] else None
|
201 |
+
|
202 |
+
# Resume
|
203 |
+
start_epoch, best_fitness = 0, 0.0
|
204 |
+
if pretrained:
|
205 |
+
# Optimizer
|
206 |
+
if ckpt['optimizer'] is not None:
|
207 |
+
optimizer.load_state_dict(ckpt['optimizer'])
|
208 |
+
best_fitness = ckpt['best_fitness']
|
209 |
+
|
210 |
+
# EMA
|
211 |
+
if ema and ckpt.get('ema'):
|
212 |
+
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
|
213 |
+
ema.updates = ckpt['updates']
|
214 |
+
|
215 |
+
# Results
|
216 |
+
if ckpt.get('training_results') is not None:
|
217 |
+
results_file.write_text(ckpt['training_results']) # write results.txt
|
218 |
+
|
219 |
+
# Epochs
|
220 |
+
start_epoch = ckpt['epoch'] + 1
|
221 |
+
if opt.resume:
|
222 |
+
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
|
223 |
+
if epochs < start_epoch:
|
224 |
+
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
|
225 |
+
(weights, ckpt['epoch'], epochs))
|
226 |
+
epochs += ckpt['epoch'] # finetune additional epochs
|
227 |
+
|
228 |
+
del ckpt, state_dict
|
229 |
+
|
230 |
+
# Image sizes
|
231 |
+
gs = max(int(model.stride.max()), 32) # grid size (max stride)
|
232 |
+
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
|
233 |
+
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
|
234 |
+
|
235 |
+
# DP mode
|
236 |
+
if cuda and rank == -1 and torch.cuda.device_count() > 1:
|
237 |
+
model = torch.nn.DataParallel(model)
|
238 |
+
|
239 |
+
# SyncBatchNorm
|
240 |
+
if opt.sync_bn and cuda and rank != -1:
|
241 |
+
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
|
242 |
+
logger.info('Using SyncBatchNorm()')
|
243 |
+
|
244 |
+
# Trainloader
|
245 |
+
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
|
246 |
+
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
|
247 |
+
world_size=opt.world_size, workers=opt.workers,
|
248 |
+
image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))
|
249 |
+
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
|
250 |
+
nb = len(dataloader) # number of batches
|
251 |
+
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
|
252 |
+
|
253 |
+
# Process 0
|
254 |
+
if rank in [-1, 0]:
|
255 |
+
testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
|
256 |
+
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
|
257 |
+
world_size=opt.world_size, workers=opt.workers,
|
258 |
+
pad=0.5, prefix=colorstr('val: '))[0]
|
259 |
+
|
260 |
+
if not opt.resume:
|
261 |
+
labels = np.concatenate(dataset.labels, 0)
|
262 |
+
c = torch.tensor(labels[:, 0]) # classes
|
263 |
+
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
|
264 |
+
# model._initialize_biases(cf.to(device))
|
265 |
+
if plots:
|
266 |
+
#plot_labels(labels, names, save_dir, loggers)
|
267 |
+
if tb_writer:
|
268 |
+
tb_writer.add_histogram('classes', c, 0)
|
269 |
+
|
270 |
+
# Anchors
|
271 |
+
if not opt.noautoanchor:
|
272 |
+
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
|
273 |
+
model.half().float() # pre-reduce anchor precision
|
274 |
+
|
275 |
+
# DDP mode
|
276 |
+
if cuda and rank != -1:
|
277 |
+
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,
|
278 |
+
# nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698
|
279 |
+
find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))
|
280 |
+
|
281 |
+
# Model parameters
|
282 |
+
hyp['box'] *= 3. / nl # scale to layers
|
283 |
+
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
|
284 |
+
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
|
285 |
+
hyp['label_smoothing'] = opt.label_smoothing
|
286 |
+
model.nc = nc # attach number of classes to model
|
287 |
+
model.hyp = hyp # attach hyperparameters to model
|
288 |
+
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
|
289 |
+
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
|
290 |
+
model.names = names
|
291 |
+
|
292 |
+
# Start training
|
293 |
+
t0 = time.time()
|
294 |
+
nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
|
295 |
+
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
|
296 |
+
maps = np.zeros(nc) # mAP per class
|
297 |
+
results = (0, 0, 0, 0, 0, 0, 0) # P, R, [email protected], [email protected], val_loss(box, obj, cls)
|
298 |
+
scheduler.last_epoch = start_epoch - 1 # do not move
|
299 |
+
scaler = amp.GradScaler(enabled=cuda)
|
300 |
+
compute_loss_ota = ComputeLossAuxOTA(model) # init loss class
|
301 |
+
compute_loss = ComputeLoss(model) # init loss class
|
302 |
+
logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
|
303 |
+
f'Using {dataloader.num_workers} dataloader workers\n'
|
304 |
+
f'Logging results to {save_dir}\n'
|
305 |
+
f'Starting training for {epochs} epochs...')
|
306 |
+
torch.save(model, wdir / 'init.pt')
|
307 |
+
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
|
308 |
+
model.train()
|
309 |
+
|
310 |
+
# Update image weights (optional)
|
311 |
+
if opt.image_weights:
|
312 |
+
# Generate indices
|
313 |
+
if rank in [-1, 0]:
|
314 |
+
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
|
315 |
+
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
|
316 |
+
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
|
317 |
+
# Broadcast if DDP
|
318 |
+
if rank != -1:
|
319 |
+
indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
|
320 |
+
dist.broadcast(indices, 0)
|
321 |
+
if rank != 0:
|
322 |
+
dataset.indices = indices.cpu().numpy()
|
323 |
+
|
324 |
+
# Update mosaic border
|
325 |
+
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
|
326 |
+
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
|
327 |
+
|
328 |
+
mloss = torch.zeros(4, device=device) # mean losses
|
329 |
+
if rank != -1:
|
330 |
+
dataloader.sampler.set_epoch(epoch)
|
331 |
+
pbar = enumerate(dataloader)
|
332 |
+
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
|
333 |
+
if rank in [-1, 0]:
|
334 |
+
pbar = tqdm(pbar, total=nb) # progress bar
|
335 |
+
optimizer.zero_grad()
|
336 |
+
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
|
337 |
+
ni = i + nb * epoch # number integrated batches (since train start)
|
338 |
+
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
|
339 |
+
|
340 |
+
# Warmup
|
341 |
+
if ni <= nw:
|
342 |
+
xi = [0, nw] # x interp
|
343 |
+
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
|
344 |
+
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
|
345 |
+
for j, x in enumerate(optimizer.param_groups):
|
346 |
+
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
|
347 |
+
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
|
348 |
+
if 'momentum' in x:
|
349 |
+
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
|
350 |
+
|
351 |
+
# Multi-scale
|
352 |
+
if opt.multi_scale:
|
353 |
+
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
|
354 |
+
sf = sz / max(imgs.shape[2:]) # scale factor
|
355 |
+
if sf != 1:
|
356 |
+
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
|
357 |
+
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
|
358 |
+
|
359 |
+
# Forward
|
360 |
+
with amp.autocast(enabled=cuda):
|
361 |
+
pred = model(imgs) # forward
|
362 |
+
loss, loss_items = compute_loss_ota(pred, targets.to(device), imgs) # loss scaled by batch_size
|
363 |
+
if rank != -1:
|
364 |
+
loss *= opt.world_size # gradient averaged between devices in DDP mode
|
365 |
+
if opt.quad:
|
366 |
+
loss *= 4.
|
367 |
+
|
368 |
+
# Backward
|
369 |
+
scaler.scale(loss).backward()
|
370 |
+
|
371 |
+
# Optimize
|
372 |
+
if ni % accumulate == 0:
|
373 |
+
scaler.step(optimizer) # optimizer.step
|
374 |
+
scaler.update()
|
375 |
+
optimizer.zero_grad()
|
376 |
+
if ema:
|
377 |
+
ema.update(model)
|
378 |
+
|
379 |
+
# Print
|
380 |
+
if rank in [-1, 0]:
|
381 |
+
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
|
382 |
+
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
|
383 |
+
s = ('%10s' * 2 + '%10.4g' * 6) % (
|
384 |
+
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
|
385 |
+
pbar.set_description(s)
|
386 |
+
|
387 |
+
# Plot
|
388 |
+
if plots and ni < 10:
|
389 |
+
f = save_dir / f'train_batch{ni}.jpg' # filename
|
390 |
+
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
|
391 |
+
# if tb_writer:
|
392 |
+
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
|
393 |
+
# tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph
|
394 |
+
elif plots and ni == 10 and wandb_logger.wandb:
|
395 |
+
wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in
|
396 |
+
save_dir.glob('train*.jpg') if x.exists()]})
|
397 |
+
|
398 |
+
# end batch ------------------------------------------------------------------------------------------------
|
399 |
+
# end epoch ----------------------------------------------------------------------------------------------------
|
400 |
+
|
401 |
+
# Scheduler
|
402 |
+
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
|
403 |
+
scheduler.step()
|
404 |
+
|
405 |
+
# DDP process 0 or single-GPU
|
406 |
+
if rank in [-1, 0]:
|
407 |
+
# mAP
|
408 |
+
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
|
409 |
+
final_epoch = epoch + 1 == epochs
|
410 |
+
if not opt.notest or final_epoch: # Calculate mAP
|
411 |
+
wandb_logger.current_epoch = epoch + 1
|
412 |
+
results, maps, times = test.test(data_dict,
|
413 |
+
batch_size=batch_size * 2,
|
414 |
+
imgsz=imgsz_test,
|
415 |
+
model=ema.ema,
|
416 |
+
single_cls=opt.single_cls,
|
417 |
+
dataloader=testloader,
|
418 |
+
save_dir=save_dir,
|
419 |
+
verbose=nc < 50 and final_epoch,
|
420 |
+
plots=plots and final_epoch,
|
421 |
+
wandb_logger=wandb_logger,
|
422 |
+
compute_loss=compute_loss,
|
423 |
+
is_coco=is_coco,
|
424 |
+
v5_metric=opt.v5_metric)
|
425 |
+
|
426 |
+
# Write
|
427 |
+
with open(results_file, 'a') as f:
|
428 |
+
f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss
|
429 |
+
if len(opt.name) and opt.bucket:
|
430 |
+
os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
|
431 |
+
|
432 |
+
# Log
|
433 |
+
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
|
434 |
+
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
|
435 |
+
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
|
436 |
+
'x/lr0', 'x/lr1', 'x/lr2'] # params
|
437 |
+
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
|
438 |
+
if tb_writer:
|
439 |
+
tb_writer.add_scalar(tag, x, epoch) # tensorboard
|
440 |
+
if wandb_logger.wandb:
|
441 |
+
wandb_logger.log({tag: x}) # W&B
|
442 |
+
|
443 |
+
# Update best mAP
|
444 |
+
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, [email protected], [email protected]]
|
445 |
+
if fi > best_fitness:
|
446 |
+
best_fitness = fi
|
447 |
+
wandb_logger.end_epoch(best_result=best_fitness == fi)
|
448 |
+
|
449 |
+
# Save model
|
450 |
+
if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
|
451 |
+
ckpt = {'epoch': epoch,
|
452 |
+
'best_fitness': best_fitness,
|
453 |
+
'training_results': results_file.read_text(),
|
454 |
+
'model': deepcopy(model.module if is_parallel(model) else model).half(),
|
455 |
+
'ema': deepcopy(ema.ema).half(),
|
456 |
+
'updates': ema.updates,
|
457 |
+
'optimizer': optimizer.state_dict(),
|
458 |
+
'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}
|
459 |
+
|
460 |
+
# Save last, best and delete
|
461 |
+
torch.save(ckpt, last)
|
462 |
+
if best_fitness == fi:
|
463 |
+
torch.save(ckpt, best)
|
464 |
+
if (best_fitness == fi) and (epoch >= 200):
|
465 |
+
torch.save(ckpt, wdir / 'best_{:03d}.pt'.format(epoch))
|
466 |
+
if epoch == 0:
|
467 |
+
torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch))
|
468 |
+
elif ((epoch+1) % 25) == 0:
|
469 |
+
torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch))
|
470 |
+
elif epoch >= (epochs-5):
|
471 |
+
torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch))
|
472 |
+
if wandb_logger.wandb:
|
473 |
+
if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
|
474 |
+
wandb_logger.log_model(
|
475 |
+
last.parent, opt, epoch, fi, best_model=best_fitness == fi)
|
476 |
+
del ckpt
|
477 |
+
|
478 |
+
# end epoch ----------------------------------------------------------------------------------------------------
|
479 |
+
# end training
|
480 |
+
if rank in [-1, 0]:
|
481 |
+
# Plots
|
482 |
+
if plots:
|
483 |
+
plot_results(save_dir=save_dir) # save as results.png
|
484 |
+
if wandb_logger.wandb:
|
485 |
+
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
|
486 |
+
wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files
|
487 |
+
if (save_dir / f).exists()]})
|
488 |
+
# Test best.pt
|
489 |
+
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
|
490 |
+
if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
|
491 |
+
for m in (last, best) if best.exists() else (last): # speed, mAP tests
|
492 |
+
results, _, _ = test.test(opt.data,
|
493 |
+
batch_size=batch_size * 2,
|
494 |
+
imgsz=imgsz_test,
|
495 |
+
conf_thres=0.001,
|
496 |
+
iou_thres=0.7,
|
497 |
+
model=attempt_load(m, device).half(),
|
498 |
+
single_cls=opt.single_cls,
|
499 |
+
dataloader=testloader,
|
500 |
+
save_dir=save_dir,
|
501 |
+
save_json=True,
|
502 |
+
plots=False,
|
503 |
+
is_coco=is_coco,
|
504 |
+
v5_metric=opt.v5_metric)
|
505 |
+
|
506 |
+
# Strip optimizers
|
507 |
+
final = best if best.exists() else last # final model
|
508 |
+
for f in last, best:
|
509 |
+
if f.exists():
|
510 |
+
strip_optimizer(f) # strip optimizers
|
511 |
+
if opt.bucket:
|
512 |
+
os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
|
513 |
+
if wandb_logger.wandb and not opt.evolve: # Log the stripped model
|
514 |
+
wandb_logger.wandb.log_artifact(str(final), type='model',
|
515 |
+
name='run_' + wandb_logger.wandb_run.id + '_model',
|
516 |
+
aliases=['last', 'best', 'stripped'])
|
517 |
+
wandb_logger.finish_run()
|
518 |
+
else:
|
519 |
+
dist.destroy_process_group()
|
520 |
+
torch.cuda.empty_cache()
|
521 |
+
return results
|
522 |
+
|
523 |
+
|
524 |
+
if __name__ == '__main__':
|
525 |
+
parser = argparse.ArgumentParser()
|
526 |
+
parser.add_argument('--weights', type=str, default='yolo7.pt', help='initial weights path')
|
527 |
+
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
|
528 |
+
parser.add_argument('--data', type=str, default='data/coco.yaml', help='data.yaml path')
|
529 |
+
parser.add_argument('--hyp', type=str, default='data/hyp.scratch.p5.yaml', help='hyperparameters path')
|
530 |
+
parser.add_argument('--epochs', type=int, default=300)
|
531 |
+
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
|
532 |
+
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
|
533 |
+
parser.add_argument('--rect', action='store_true', help='rectangular training')
|
534 |
+
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
|
535 |
+
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
|
536 |
+
parser.add_argument('--notest', action='store_true', help='only test final epoch')
|
537 |
+
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
|
538 |
+
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
|
539 |
+
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
|
540 |
+
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
|
541 |
+
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
|
542 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
543 |
+
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
|
544 |
+
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
|
545 |
+
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
|
546 |
+
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
|
547 |
+
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
|
548 |
+
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
|
549 |
+
parser.add_argument('--project', default='runs/train', help='save to project/name')
|
550 |
+
parser.add_argument('--entity', default=None, help='W&B entity')
|
551 |
+
parser.add_argument('--name', default='exp', help='save to project/name')
|
552 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
553 |
+
parser.add_argument('--quad', action='store_true', help='quad dataloader')
|
554 |
+
parser.add_argument('--linear-lr', action='store_true', help='linear LR')
|
555 |
+
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
|
556 |
+
parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
|
557 |
+
parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B')
|
558 |
+
parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
|
559 |
+
parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
|
560 |
+
parser.add_argument('--v5-metric', action='store_true', help='assume maximum recall as 1.0 in AP calculation')
|
561 |
+
opt = parser.parse_args()
|
562 |
+
|
563 |
+
# Set DDP variables
|
564 |
+
opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
|
565 |
+
opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
|
566 |
+
set_logging(opt.global_rank)
|
567 |
+
#if opt.global_rank in [-1, 0]:
|
568 |
+
# check_git_status()
|
569 |
+
# check_requirements()
|
570 |
+
|
571 |
+
# Resume
|
572 |
+
wandb_run = check_wandb_resume(opt)
|
573 |
+
if opt.resume and not wandb_run: # resume an interrupted run
|
574 |
+
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
|
575 |
+
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
|
576 |
+
apriori = opt.global_rank, opt.local_rank
|
577 |
+
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
|
578 |
+
opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace
|
579 |
+
opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate
|
580 |
+
logger.info('Resuming training from %s' % ckpt)
|
581 |
+
else:
|
582 |
+
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
|
583 |
+
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
|
584 |
+
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
|
585 |
+
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
|
586 |
+
opt.name = 'evolve' if opt.evolve else opt.name
|
587 |
+
opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
|
588 |
+
|
589 |
+
# DDP mode
|
590 |
+
opt.total_batch_size = opt.batch_size
|
591 |
+
device = select_device(opt.device, batch_size=opt.batch_size)
|
592 |
+
if opt.local_rank != -1:
|
593 |
+
assert torch.cuda.device_count() > opt.local_rank
|
594 |
+
torch.cuda.set_device(opt.local_rank)
|
595 |
+
device = torch.device('cuda', opt.local_rank)
|
596 |
+
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
|
597 |
+
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
|
598 |
+
opt.batch_size = opt.total_batch_size // opt.world_size
|
599 |
+
|
600 |
+
# Hyperparameters
|
601 |
+
with open(opt.hyp) as f:
|
602 |
+
hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps
|
603 |
+
|
604 |
+
# Train
|
605 |
+
logger.info(opt)
|
606 |
+
if not opt.evolve:
|
607 |
+
tb_writer = None # init loggers
|
608 |
+
if opt.global_rank in [-1, 0]:
|
609 |
+
prefix = colorstr('tensorboard: ')
|
610 |
+
logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
|
611 |
+
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
|
612 |
+
train(hyp, opt, device, tb_writer)
|
613 |
+
|
614 |
+
# Evolve hyperparameters (optional)
|
615 |
+
else:
|
616 |
+
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
|
617 |
+
meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
|
618 |
+
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
|
619 |
+
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
|
620 |
+
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
|
621 |
+
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
|
622 |
+
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
|
623 |
+
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
|
624 |
+
'box': (1, 0.02, 0.2), # box loss gain
|
625 |
+
'cls': (1, 0.2, 4.0), # cls loss gain
|
626 |
+
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
|
627 |
+
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
|
628 |
+
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
|
629 |
+
'iou_t': (0, 0.1, 0.7), # IoU training threshold
|
630 |
+
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
|
631 |
+
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
|
632 |
+
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
|
633 |
+
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
|
634 |
+
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
|
635 |
+
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
|
636 |
+
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
|
637 |
+
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
|
638 |
+
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
|
639 |
+
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
|
640 |
+
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
|
641 |
+
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
|
642 |
+
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
|
643 |
+
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
|
644 |
+
'mixup': (1, 0.0, 1.0)} # image mixup (probability)
|
645 |
+
|
646 |
+
with open(opt.hyp, errors='ignore') as f:
|
647 |
+
hyp = yaml.safe_load(f) # load hyps dict
|
648 |
+
if 'anchors' not in hyp: # anchors commented in hyp.yaml
|
649 |
+
hyp['anchors'] = 3
|
650 |
+
|
651 |
+
assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
|
652 |
+
opt.notest, opt.nosave = True, True # only test/save final epoch
|
653 |
+
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
|
654 |
+
yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
|
655 |
+
if opt.bucket:
|
656 |
+
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
|
657 |
+
|
658 |
+
for _ in range(300): # generations to evolve
|
659 |
+
if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
|
660 |
+
# Select parent(s)
|
661 |
+
parent = 'single' # parent selection method: 'single' or 'weighted'
|
662 |
+
x = np.loadtxt('evolve.txt', ndmin=2)
|
663 |
+
n = min(5, len(x)) # number of previous results to consider
|
664 |
+
x = x[np.argsort(-fitness(x))][:n] # top n mutations
|
665 |
+
w = fitness(x) - fitness(x).min() # weights
|
666 |
+
if parent == 'single' or len(x) == 1:
|
667 |
+
# x = x[random.randint(0, n - 1)] # random selection
|
668 |
+
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
|
669 |
+
elif parent == 'weighted':
|
670 |
+
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
|
671 |
+
|
672 |
+
# Mutate
|
673 |
+
mp, s = 0.8, 0.2 # mutation probability, sigma
|
674 |
+
npr = np.random
|
675 |
+
npr.seed(int(time.time()))
|
676 |
+
g = np.array([x[0] for x in meta.values()]) # gains 0-1
|
677 |
+
ng = len(meta)
|
678 |
+
v = np.ones(ng)
|
679 |
+
while all(v == 1): # mutate until a change occurs (prevent duplicates)
|
680 |
+
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
|
681 |
+
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
|
682 |
+
hyp[k] = float(x[i + 7] * v[i]) # mutate
|
683 |
+
|
684 |
+
# Constrain to limits
|
685 |
+
for k, v in meta.items():
|
686 |
+
hyp[k] = max(hyp[k], v[1]) # lower limit
|
687 |
+
hyp[k] = min(hyp[k], v[2]) # upper limit
|
688 |
+
hyp[k] = round(hyp[k], 5) # significant digits
|
689 |
+
|
690 |
+
# Train mutation
|
691 |
+
results = train(hyp.copy(), opt, device)
|
692 |
+
|
693 |
+
# Write mutation results
|
694 |
+
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
|
695 |
+
|
696 |
+
# Plot results
|
697 |
+
plot_evolution(yaml_file)
|
698 |
+
print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
|
699 |
+
f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
|
yolov7.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5128f8885b88f51704ea728a97d55218cdf3e18973badaf22d2c0da7d9e0b094
|
3 |
+
size 75586793
|
yolov7_training.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:54c0651ef2d866584f9dcdcbd2a60c9f8fb0ee9e444af3dfd67b8c1e023f75f4
|
3 |
+
size 75628875
|
yolov7_training.pt.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:54c0651ef2d866584f9dcdcbd2a60c9f8fb0ee9e444af3dfd67b8c1e023f75f4
|
3 |
+
size 75628875
|