Spaces:
Runtime error
Runtime error
Commit
·
85db903
1
Parent(s):
670680d
First model version
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- .gitignore +5 -0
- app.py +47 -0
- images/image_1.jpg +0 -0
- images/image_2.jpg +0 -0
- images/image_3.jpg +0 -0
- metadata/dataset_utils/dataset_downloader.py +21 -0
- metadata/predictor_yolo_detector/__pycache__/detector_test.cpython-37.pyc +0 -0
- metadata/predictor_yolo_detector/__pycache__/detector_test.cpython-38.pyc +0 -0
- metadata/predictor_yolo_detector/best.pt +3 -0
- metadata/predictor_yolo_detector/detector_test.py +176 -0
- metadata/predictor_yolo_detector/inference/images/inputImage.jpg +0 -0
- metadata/predictor_yolo_detector/models/__init__.py +0 -0
- metadata/predictor_yolo_detector/models/__pycache__/__init__.cpython-36.pyc +0 -0
- metadata/predictor_yolo_detector/models/__pycache__/__init__.cpython-37.pyc +0 -0
- metadata/predictor_yolo_detector/models/__pycache__/__init__.cpython-38.pyc +0 -0
- metadata/predictor_yolo_detector/models/__pycache__/common.cpython-36.pyc +0 -0
- metadata/predictor_yolo_detector/models/__pycache__/common.cpython-37.pyc +0 -0
- metadata/predictor_yolo_detector/models/__pycache__/common.cpython-38.pyc +0 -0
- metadata/predictor_yolo_detector/models/__pycache__/experimental.cpython-36.pyc +0 -0
- metadata/predictor_yolo_detector/models/__pycache__/experimental.cpython-37.pyc +0 -0
- metadata/predictor_yolo_detector/models/__pycache__/experimental.cpython-38.pyc +0 -0
- metadata/predictor_yolo_detector/models/__pycache__/yolo.cpython-36.pyc +0 -0
- metadata/predictor_yolo_detector/models/__pycache__/yolo.cpython-37.pyc +0 -0
- metadata/predictor_yolo_detector/models/__pycache__/yolo.cpython-38.pyc +0 -0
- metadata/predictor_yolo_detector/models/common.py +189 -0
- metadata/predictor_yolo_detector/models/custom_yolov5s.yaml +48 -0
- metadata/predictor_yolo_detector/models/experimental.py +152 -0
- metadata/predictor_yolo_detector/models/export.py +94 -0
- metadata/predictor_yolo_detector/models/hub/yolov3-spp.yaml +51 -0
- metadata/predictor_yolo_detector/models/hub/yolov5-fpn.yaml +42 -0
- metadata/predictor_yolo_detector/models/hub/yolov5-panet.yaml +48 -0
- metadata/predictor_yolo_detector/models/yolo.py +283 -0
- metadata/predictor_yolo_detector/models/yolov5l.yaml +48 -0
- metadata/predictor_yolo_detector/models/yolov5m.yaml +48 -0
- metadata/predictor_yolo_detector/models/yolov5s.yaml +48 -0
- metadata/predictor_yolo_detector/models/yolov5x.yaml +48 -0
- metadata/predictor_yolo_detector/runs/exp0_yolov5s_results/events.out.tfevents.1604565595.828c870bfd5d.342.0 +3 -0
- metadata/predictor_yolo_detector/runs/exp0_yolov5s_results/hyp.yaml +27 -0
- metadata/predictor_yolo_detector/runs/exp0_yolov5s_results/opt.yaml +31 -0
- metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/events.out.tfevents.1604565658.828c870bfd5d.369.0 +3 -0
- metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/hyp.yaml +27 -0
- metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/labels.png +0 -0
- metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/labels_correlogram.png +0 -0
- metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/opt.yaml +31 -0
- metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/precision-recall_curve.png +0 -0
- metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/results.png +0 -0
- metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/results.txt +100 -0
- metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/test_batch0_gt.jpg +0 -0
- metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/test_batch0_pred.jpg +0 -0
.gitattributes
CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
**.pt filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.ipynb_checkpoints
|
2 |
+
.vscode
|
3 |
+
flagged/
|
4 |
+
resolute/
|
5 |
+
.idea/
|
app.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from charset_normalizer import detect
|
2 |
+
import numpy as np
|
3 |
+
import gradio as gr
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
import cv2
|
7 |
+
import os
|
8 |
+
from numpy import random
|
9 |
+
from metadata.utils.utils import decodeImage
|
10 |
+
from metadata.predictor_yolo_detector.detector_test import Detector
|
11 |
+
from PIL import Image
|
12 |
+
|
13 |
+
class ClientApp:
|
14 |
+
def __init__(self):
|
15 |
+
self.filename = "inputImage.jpg"
|
16 |
+
#modelPath = 'research/ssd_mobilenet_v1_coco_2017_11_17'
|
17 |
+
self.objectDetection = Detector(self.filename)
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
clApp = ClientApp()
|
23 |
+
|
24 |
+
def predict_image(input_img):
|
25 |
+
|
26 |
+
img = Image.fromarray(input_img)
|
27 |
+
img.save("./metadata/predictor_yolo_detector/inference/images/"+ clApp.filename)
|
28 |
+
resultant_img = clApp.objectDetection.detect_action()
|
29 |
+
|
30 |
+
|
31 |
+
return resultant_img
|
32 |
+
|
33 |
+
demo = gr.Blocks()
|
34 |
+
|
35 |
+
with demo:
|
36 |
+
gr.Markdown(
|
37 |
+
"""
|
38 |
+
<h1 align = "center"> Warehouse Apparel Detection </h1>
|
39 |
+
""")
|
40 |
+
|
41 |
+
detect = gr.Interface(predict_image, 'image', 'image', examples=[
|
42 |
+
os.path.join(os.path.dirname(__file__), "images/image_1.jpg"),
|
43 |
+
os.path.join(os.path.dirname(__file__), "images/image_2.jpg"),
|
44 |
+
os.path.join(os.path.dirname(__file__), "images/image_3.jpg")
|
45 |
+
])
|
46 |
+
|
47 |
+
demo.launch()
|
images/image_1.jpg
ADDED
![]() |
images/image_2.jpg
ADDED
![]() |
images/image_3.jpg
ADDED
![]() |
metadata/dataset_utils/dataset_downloader.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gdown
|
2 |
+
from zipfile import ZipFile
|
3 |
+
|
4 |
+
# Original Link :- https://drive.google.com/file/d/14QoqoZQLYnUmZgYblmFZ2u2eHo9yv2aA/view?usp=sharing
|
5 |
+
url = 'https://drive.google.com/uc?id=14QoqoZQLYnUmZgYblmFZ2u2eHo9yv2aA'
|
6 |
+
output = 'Fire_smoke.zip'
|
7 |
+
|
8 |
+
gdown.download(url, output, quiet=False)
|
9 |
+
|
10 |
+
# specifying the zip file name
|
11 |
+
file_name = output
|
12 |
+
|
13 |
+
# opening the zip file in READ mode
|
14 |
+
with ZipFile(file_name, 'r') as zip:
|
15 |
+
# printing all the contents of the zip file
|
16 |
+
zip.printdir()
|
17 |
+
|
18 |
+
# extracting all the files
|
19 |
+
print('Extracting all the files now...')
|
20 |
+
zip.extractall()
|
21 |
+
print('Done!')
|
metadata/predictor_yolo_detector/__pycache__/detector_test.cpython-37.pyc
ADDED
Binary file (5.53 kB). View file
|
|
metadata/predictor_yolo_detector/__pycache__/detector_test.cpython-38.pyc
ADDED
Binary file (5.05 kB). View file
|
|
metadata/predictor_yolo_detector/best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:26c75a28c481bd9a22759e8b2a2a4a9be08bee37a864aed6cd442a1b3e199b0c
|
3 |
+
size 14785730
|
metadata/predictor_yolo_detector/detector_test.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import time
|
4 |
+
from pathlib import Path
|
5 |
+
|
6 |
+
import cv2
|
7 |
+
import torch
|
8 |
+
import torch.backends.cudnn as cudnn
|
9 |
+
from numpy import random
|
10 |
+
from PIL import Image
|
11 |
+
|
12 |
+
from metadata.utils.utils import encodeImageIntoBase64
|
13 |
+
|
14 |
+
import sys
|
15 |
+
sys.path.insert(0, 'metadata/predictor_yolo_detector')
|
16 |
+
|
17 |
+
from metadata.predictor_yolo_detector.models.experimental import attempt_load
|
18 |
+
from metadata.predictor_yolo_detector.utils.datasets import LoadStreams, LoadImages
|
19 |
+
from metadata.predictor_yolo_detector.utils.general import (
|
20 |
+
check_img_size, non_max_suppression, apply_classifier, scale_coords,
|
21 |
+
xyxy2xywh, plot_one_box, strip_optimizer, set_logging)
|
22 |
+
from metadata.predictor_yolo_detector.utils.torch_utils import select_device, load_classifier, \
|
23 |
+
time_synchronized
|
24 |
+
|
25 |
+
|
26 |
+
class Detector():
|
27 |
+
def __init__(self, filename):
|
28 |
+
self.weights = "./metadata/predictor_yolo_detector/best.pt"
|
29 |
+
self.conf = float(0.5)
|
30 |
+
self.source = "./metadata/predictor_yolo_detector/inference/images/"
|
31 |
+
self.img_size = int(416)
|
32 |
+
self.save_dir = "./metadata/predictor_yolo_detector/inference/output"
|
33 |
+
self.view_img = False
|
34 |
+
self.save_txt = False
|
35 |
+
self.device = 'cpu'
|
36 |
+
self.augment = True
|
37 |
+
self.agnostic_nms = True
|
38 |
+
self.conf_thres = float(0.5)
|
39 |
+
self.iou_thres = float(0.45)
|
40 |
+
self.classes = 0
|
41 |
+
self.save_conf = True
|
42 |
+
self.update = True
|
43 |
+
self.filename = filename
|
44 |
+
|
45 |
+
def detect(self, save_img=False):
|
46 |
+
out, source, weights, view_img, save_txt, imgsz = \
|
47 |
+
self.save_dir, self.source, self.weights, self.view_img, self.save_txt, self.img_size
|
48 |
+
webcam = source.isnumeric() or source.startswith(('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')
|
49 |
+
|
50 |
+
# Initialize
|
51 |
+
set_logging()
|
52 |
+
device = select_device(self.device)
|
53 |
+
if os.path.exists(out): # output dir
|
54 |
+
shutil.rmtree(out) # delete dir
|
55 |
+
os.makedirs(out) # make new dir
|
56 |
+
half = device.type != 'cpu' # half precision only supported on CUDA
|
57 |
+
|
58 |
+
# Load model
|
59 |
+
model = attempt_load(weights, map_location=device) # load FP32 model
|
60 |
+
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
|
61 |
+
if half:
|
62 |
+
model.half() # to FP16
|
63 |
+
|
64 |
+
# Second-stage classifier
|
65 |
+
classify = False
|
66 |
+
if classify:
|
67 |
+
modelc = load_classifier(name='resnet101', n=2) # initialize
|
68 |
+
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
|
69 |
+
modelc.to(device).eval()
|
70 |
+
|
71 |
+
# Set Dataloader
|
72 |
+
vid_path, vid_writer = None, None
|
73 |
+
if webcam:
|
74 |
+
view_img = True
|
75 |
+
cudnn.benchmark = True # set True to speed up constant image size inference
|
76 |
+
dataset = LoadStreams(source, img_size=imgsz)
|
77 |
+
else:
|
78 |
+
save_img = True
|
79 |
+
dataset = LoadImages(source, img_size=imgsz)
|
80 |
+
|
81 |
+
# Get names and colors
|
82 |
+
names = model.module.names if hasattr(model, 'module') else model.names
|
83 |
+
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
|
84 |
+
|
85 |
+
# Run inference
|
86 |
+
t0 = time.time()
|
87 |
+
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
|
88 |
+
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
|
89 |
+
for path, img, im0s, vid_cap in dataset:
|
90 |
+
img = torch.from_numpy(img).to(device)
|
91 |
+
img = img.half() if half else img.float() # uint8 to fp16/32
|
92 |
+
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
93 |
+
if img.ndimension() == 3:
|
94 |
+
img = img.unsqueeze(0)
|
95 |
+
|
96 |
+
# Inference
|
97 |
+
t1 = time_synchronized()
|
98 |
+
pred = model(img, augment=self.augment)[0]
|
99 |
+
|
100 |
+
# Apply NMS
|
101 |
+
pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes=self.classes,
|
102 |
+
agnostic=self.agnostic_nms)
|
103 |
+
t2 = time_synchronized()
|
104 |
+
|
105 |
+
# Apply Classifier
|
106 |
+
if classify:
|
107 |
+
pred = apply_classifier(pred, modelc, img, im0s)
|
108 |
+
|
109 |
+
# Process detections
|
110 |
+
for i, det in enumerate(pred): # detections per image
|
111 |
+
if webcam: # batch_size >= 1
|
112 |
+
p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
|
113 |
+
else:
|
114 |
+
p, s, im0 = path, '', im0s
|
115 |
+
|
116 |
+
save_path = str(Path(out) / Path(p).name)
|
117 |
+
txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
|
118 |
+
s += '%gx%g ' % img.shape[2:] # print string
|
119 |
+
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
120 |
+
if det is not None and len(det):
|
121 |
+
# Rescale boxes from img_size to im0 size
|
122 |
+
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
|
123 |
+
|
124 |
+
# Print results
|
125 |
+
for c in det[:, -1].unique():
|
126 |
+
n = (det[:, -1] == c).sum() # detections per class
|
127 |
+
s += '%g %ss, ' % (n, names[int(c)]) # add to string
|
128 |
+
|
129 |
+
# Write results
|
130 |
+
for *xyxy, conf, cls in reversed(det):
|
131 |
+
if save_txt: # Write to file
|
132 |
+
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
133 |
+
line = (cls, conf, *xywh) if self.save_conf else (cls, *xywh) # label format
|
134 |
+
with open(txt_path + '.txt', 'a') as f:
|
135 |
+
f.write(('%g ' * len(line) + '\n') % line)
|
136 |
+
|
137 |
+
if save_img or view_img: # Add bbox to image
|
138 |
+
label = '%s %.2f' % (names[int(cls)], conf)
|
139 |
+
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
|
140 |
+
|
141 |
+
# Print time (inference + NMS)
|
142 |
+
# print('%sDone. (%.3fs)' % (s, t2 - t1))
|
143 |
+
# detections = "Total No. of Cardboards:" + str(len(det))
|
144 |
+
# cv2.putText(img = im0, text = detections, org = (round(im0.shape[0]*0.08), round(im0.shape[1]*0.08)),fontFace = cv2.FONT_HERSHEY_DUPLEX, fontScale = 1.0,color = (0, 0, 255),thickness = 3)
|
145 |
+
im0 = cv2.cvtColor(im0, cv2.COLOR_RGB2BGR)
|
146 |
+
return im0
|
147 |
+
# if save_img:
|
148 |
+
# if dataset.mode == 'images':
|
149 |
+
|
150 |
+
# #im = im0[:, :, ::-1]
|
151 |
+
# im = Image.fromarray(im0)
|
152 |
+
|
153 |
+
# im.save("output.jpg")
|
154 |
+
# # cv2.imwrite(save_path, im0)
|
155 |
+
# else:
|
156 |
+
# print("Video Processing Needed")
|
157 |
+
|
158 |
+
|
159 |
+
# if save_txt or save_img:
|
160 |
+
# print('Results saved to %s' % Path(out))
|
161 |
+
|
162 |
+
# print('Done. (%.3fs)' % (time.time() - t0))
|
163 |
+
|
164 |
+
# return "Done"
|
165 |
+
|
166 |
+
def detect_action(self):
|
167 |
+
with torch.no_grad():
|
168 |
+
img = self.detect()
|
169 |
+
return img
|
170 |
+
# bgr_image = cv2.imread("output.jpg")
|
171 |
+
# im_rgb = cv2.cvtColor(bgr_image, cv2.COLOR_RGB2BGR)
|
172 |
+
# cv2.imwrite('color_img.jpg', im_rgb)
|
173 |
+
# opencodedbase64 = encodeImageIntoBase64("color_img.jpg")
|
174 |
+
# result = {"image": opencodedbase64.decode('utf-8')}
|
175 |
+
# return result
|
176 |
+
|
metadata/predictor_yolo_detector/inference/images/inputImage.jpg
ADDED
![]() |
metadata/predictor_yolo_detector/models/__init__.py
ADDED
File without changes
|
metadata/predictor_yolo_detector/models/__pycache__/__init__.cpython-36.pyc
ADDED
Binary file (117 Bytes). View file
|
|
metadata/predictor_yolo_detector/models/__pycache__/__init__.cpython-37.pyc
ADDED
Binary file (204 Bytes). View file
|
|
metadata/predictor_yolo_detector/models/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (198 Bytes). View file
|
|
metadata/predictor_yolo_detector/models/__pycache__/common.cpython-36.pyc
ADDED
Binary file (8.92 kB). View file
|
|
metadata/predictor_yolo_detector/models/__pycache__/common.cpython-37.pyc
ADDED
Binary file (9.06 kB). View file
|
|
metadata/predictor_yolo_detector/models/__pycache__/common.cpython-38.pyc
ADDED
Binary file (8.92 kB). View file
|
|
metadata/predictor_yolo_detector/models/__pycache__/experimental.cpython-36.pyc
ADDED
Binary file (6.76 kB). View file
|
|
metadata/predictor_yolo_detector/models/__pycache__/experimental.cpython-37.pyc
ADDED
Binary file (6.91 kB). View file
|
|
metadata/predictor_yolo_detector/models/__pycache__/experimental.cpython-38.pyc
ADDED
Binary file (6.78 kB). View file
|
|
metadata/predictor_yolo_detector/models/__pycache__/yolo.cpython-36.pyc
ADDED
Binary file (9.85 kB). View file
|
|
metadata/predictor_yolo_detector/models/__pycache__/yolo.cpython-37.pyc
ADDED
Binary file (9.83 kB). View file
|
|
metadata/predictor_yolo_detector/models/__pycache__/yolo.cpython-38.pyc
ADDED
Binary file (9.79 kB). View file
|
|
metadata/predictor_yolo_detector/models/common.py
ADDED
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file contains modules common to various models
|
2 |
+
|
3 |
+
import math
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
|
9 |
+
from metadata.predictor_yolo_detector.utils.datasets import letterbox
|
10 |
+
from metadata.predictor_yolo_detector.utils.general import non_max_suppression, make_divisible, \
|
11 |
+
scale_coords
|
12 |
+
|
13 |
+
|
14 |
+
def autopad(k, p=None): # kernel, padding
|
15 |
+
# Pad to 'same'
|
16 |
+
if p is None:
|
17 |
+
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
|
18 |
+
return p
|
19 |
+
|
20 |
+
|
21 |
+
def DWConv(c1, c2, k=1, s=1, act=True):
|
22 |
+
# Depthwise convolution
|
23 |
+
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
|
24 |
+
|
25 |
+
|
26 |
+
class Conv(nn.Module):
|
27 |
+
# Standard convolution
|
28 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
29 |
+
super(Conv, self).__init__()
|
30 |
+
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
|
31 |
+
self.bn = nn.BatchNorm2d(c2)
|
32 |
+
self.act = nn.Hardswish() if act else nn.Identity()
|
33 |
+
|
34 |
+
def forward(self, x):
|
35 |
+
return self.act(self.bn(self.conv(x)))
|
36 |
+
|
37 |
+
def fuseforward(self, x):
|
38 |
+
return self.act(self.conv(x))
|
39 |
+
|
40 |
+
|
41 |
+
class Bottleneck(nn.Module):
|
42 |
+
# Standard bottleneck
|
43 |
+
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
|
44 |
+
super(Bottleneck, self).__init__()
|
45 |
+
c_ = int(c2 * e) # hidden channels
|
46 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
47 |
+
self.cv2 = Conv(c_, c2, 3, 1, g=g)
|
48 |
+
self.add = shortcut and c1 == c2
|
49 |
+
|
50 |
+
def forward(self, x):
|
51 |
+
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
52 |
+
|
53 |
+
|
54 |
+
class BottleneckCSP(nn.Module):
|
55 |
+
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
56 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
57 |
+
super(BottleneckCSP, self).__init__()
|
58 |
+
c_ = int(c2 * e) # hidden channels
|
59 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
60 |
+
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
|
61 |
+
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
|
62 |
+
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
63 |
+
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
64 |
+
self.act = nn.LeakyReLU(0.1, inplace=True)
|
65 |
+
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
|
66 |
+
|
67 |
+
def forward(self, x):
|
68 |
+
y1 = self.cv3(self.m(self.cv1(x)))
|
69 |
+
y2 = self.cv2(x)
|
70 |
+
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
|
71 |
+
|
72 |
+
|
73 |
+
class SPP(nn.Module):
|
74 |
+
# Spatial pyramid pooling layer used in YOLOv3-SPP
|
75 |
+
def __init__(self, c1, c2, k=(5, 9, 13)):
|
76 |
+
super(SPP, self).__init__()
|
77 |
+
c_ = c1 // 2 # hidden channels
|
78 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
79 |
+
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
|
80 |
+
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
|
81 |
+
|
82 |
+
def forward(self, x):
|
83 |
+
x = self.cv1(x)
|
84 |
+
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
|
85 |
+
|
86 |
+
|
87 |
+
class Focus(nn.Module):
|
88 |
+
# Focus wh information into c-space
|
89 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
90 |
+
super(Focus, self).__init__()
|
91 |
+
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
|
92 |
+
|
93 |
+
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
|
94 |
+
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
|
95 |
+
|
96 |
+
|
97 |
+
class Concat(nn.Module):
|
98 |
+
# Concatenate a list of tensors along dimension
|
99 |
+
def __init__(self, dimension=1):
|
100 |
+
super(Concat, self).__init__()
|
101 |
+
self.d = dimension
|
102 |
+
|
103 |
+
def forward(self, x):
|
104 |
+
return torch.cat(x, self.d)
|
105 |
+
|
106 |
+
|
107 |
+
class NMS(nn.Module):
|
108 |
+
# Non-Maximum Suppression (NMS) module
|
109 |
+
conf = 0.25 # confidence threshold
|
110 |
+
iou = 0.45 # IoU threshold
|
111 |
+
classes = None # (optional list) filter by class
|
112 |
+
|
113 |
+
def __init__(self):
|
114 |
+
super(NMS, self).__init__()
|
115 |
+
|
116 |
+
def forward(self, x):
|
117 |
+
return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes)
|
118 |
+
|
119 |
+
|
120 |
+
class autoShape(nn.Module):
|
121 |
+
# input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
|
122 |
+
img_size = 640 # inference size (pixels)
|
123 |
+
conf = 0.25 # NMS confidence threshold
|
124 |
+
iou = 0.45 # NMS IoU threshold
|
125 |
+
classes = None # (optional list) filter by class
|
126 |
+
|
127 |
+
def __init__(self, model):
|
128 |
+
super(autoShape, self).__init__()
|
129 |
+
self.model = model
|
130 |
+
|
131 |
+
def forward(self, x, size=640, augment=False, profile=False):
|
132 |
+
# supports inference from various sources. For height=720, width=1280, RGB images example inputs are:
|
133 |
+
# opencv: x = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3)
|
134 |
+
# PIL: x = Image.open('image.jpg') # HWC x(720,1280,3)
|
135 |
+
# numpy: x = np.zeros((720,1280,3)) # HWC
|
136 |
+
# torch: x = torch.zeros(16,3,720,1280) # BCHW
|
137 |
+
# multiple: x = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
|
138 |
+
|
139 |
+
p = next(self.model.parameters()) # for device and type
|
140 |
+
if isinstance(x, torch.Tensor): # torch
|
141 |
+
return self.model(x.to(p.device).type_as(p), augment, profile) # inference
|
142 |
+
|
143 |
+
# Pre-process
|
144 |
+
if not isinstance(x, list):
|
145 |
+
x = [x]
|
146 |
+
shape0, shape1 = [], [] # image and inference shapes
|
147 |
+
batch = range(len(x)) # batch size
|
148 |
+
for i in batch:
|
149 |
+
x[i] = np.array(x[i]) # to numpy
|
150 |
+
x[i] = x[i][:, :, :3] if x[i].ndim == 3 else np.tile(x[i][:, :, None], 3) # enforce 3ch input
|
151 |
+
s = x[i].shape[:2] # HWC
|
152 |
+
shape0.append(s) # image shape
|
153 |
+
g = (size / max(s)) # gain
|
154 |
+
shape1.append([y * g for y in s])
|
155 |
+
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
|
156 |
+
x = [letterbox(x[i], new_shape=shape1, auto=False)[0] for i in batch] # pad
|
157 |
+
x = np.stack(x, 0) if batch[-1] else x[0][None] # stack
|
158 |
+
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
|
159 |
+
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
|
160 |
+
|
161 |
+
# Inference
|
162 |
+
x = self.model(x, augment, profile) # forward
|
163 |
+
x = non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
|
164 |
+
|
165 |
+
# Post-process
|
166 |
+
for i in batch:
|
167 |
+
if x[i] is not None:
|
168 |
+
x[i][:, :4] = scale_coords(shape1, x[i][:, :4], shape0[i])
|
169 |
+
return x
|
170 |
+
|
171 |
+
|
172 |
+
class Flatten(nn.Module):
|
173 |
+
# Use after nn.AdaptiveAvgPool2d(1) to remove last 2 dimensions
|
174 |
+
@staticmethod
|
175 |
+
def forward(x):
|
176 |
+
return x.view(x.size(0), -1)
|
177 |
+
|
178 |
+
|
179 |
+
class Classify(nn.Module):
|
180 |
+
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
|
181 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
|
182 |
+
super(Classify, self).__init__()
|
183 |
+
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
|
184 |
+
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) # to x(b,c2,1,1)
|
185 |
+
self.flat = Flatten()
|
186 |
+
|
187 |
+
def forward(self, x):
|
188 |
+
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
|
189 |
+
return self.flat(self.conv(z)) # flatten to x(b,c2)
|
metadata/predictor_yolo_detector/models/custom_yolov5s.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 2 # number of classes
|
3 |
+
depth_multiple: 0.33 # model depth multiple
|
4 |
+
width_multiple: 0.50 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [ 10,13, 16,30, 33,23 ] # P3/8
|
9 |
+
- [ 30,61, 62,45, 59,119 ] # P4/16
|
10 |
+
- [ 116,90, 156,198, 373,326 ] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
|
16 |
+
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
|
17 |
+
[ -1, 3, BottleneckCSP, [ 128 ] ],
|
18 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
|
19 |
+
[ -1, 9, BottleneckCSP, [ 256 ] ],
|
20 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
|
21 |
+
[ -1, 9, BottleneckCSP, [ 512 ] ],
|
22 |
+
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
|
23 |
+
[ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ],
|
24 |
+
[ -1, 3, BottleneckCSP, [ 1024, False ] ], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 head
|
28 |
+
head:
|
29 |
+
[ [ -1, 1, Conv, [ 512, 1, 1 ] ],
|
30 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
31 |
+
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
|
32 |
+
[ -1, 3, BottleneckCSP, [ 512, False ] ], # 13
|
33 |
+
|
34 |
+
[ -1, 1, Conv, [ 256, 1, 1 ] ],
|
35 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
36 |
+
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
|
37 |
+
[ -1, 3, BottleneckCSP, [ 256, False ] ], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ],
|
40 |
+
[ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
|
41 |
+
[ -1, 3, BottleneckCSP, [ 512, False ] ], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ],
|
44 |
+
[ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5
|
45 |
+
[ -1, 3, BottleneckCSP, [ 1024, False ] ], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[ [ 17, 20, 23 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5)
|
48 |
+
]
|
metadata/predictor_yolo_detector/models/experimental.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file contains experimental modules
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
|
7 |
+
from metadata.predictor_yolo_detector.models.common import Conv, DWConv
|
8 |
+
from metadata.predictor_yolo_detector.utils.google_utils import attempt_download
|
9 |
+
|
10 |
+
|
11 |
+
class CrossConv(nn.Module):
|
12 |
+
# Cross Convolution Downsample
|
13 |
+
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
|
14 |
+
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
|
15 |
+
super(CrossConv, self).__init__()
|
16 |
+
c_ = int(c2 * e) # hidden channels
|
17 |
+
self.cv1 = Conv(c1, c_, (1, k), (1, s))
|
18 |
+
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
|
19 |
+
self.add = shortcut and c1 == c2
|
20 |
+
|
21 |
+
def forward(self, x):
|
22 |
+
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
23 |
+
|
24 |
+
|
25 |
+
class C3(nn.Module):
|
26 |
+
# Cross Convolution CSP
|
27 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
28 |
+
super(C3, self).__init__()
|
29 |
+
c_ = int(c2 * e) # hidden channels
|
30 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
31 |
+
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
|
32 |
+
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
|
33 |
+
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
34 |
+
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
35 |
+
self.act = nn.LeakyReLU(0.1, inplace=True)
|
36 |
+
self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
|
37 |
+
|
38 |
+
def forward(self, x):
|
39 |
+
y1 = self.cv3(self.m(self.cv1(x)))
|
40 |
+
y2 = self.cv2(x)
|
41 |
+
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
|
42 |
+
|
43 |
+
|
44 |
+
class Sum(nn.Module):
|
45 |
+
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
|
46 |
+
def __init__(self, n, weight=False): # n: number of inputs
|
47 |
+
super(Sum, self).__init__()
|
48 |
+
self.weight = weight # apply weights boolean
|
49 |
+
self.iter = range(n - 1) # iter object
|
50 |
+
if weight:
|
51 |
+
self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
|
52 |
+
|
53 |
+
def forward(self, x):
|
54 |
+
y = x[0] # no weight
|
55 |
+
if self.weight:
|
56 |
+
w = torch.sigmoid(self.w) * 2
|
57 |
+
for i in self.iter:
|
58 |
+
y = y + x[i + 1] * w[i]
|
59 |
+
else:
|
60 |
+
for i in self.iter:
|
61 |
+
y = y + x[i + 1]
|
62 |
+
return y
|
63 |
+
|
64 |
+
|
65 |
+
class GhostConv(nn.Module):
|
66 |
+
# Ghost Convolution https://github.com/huawei-noah/ghostnet
|
67 |
+
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
|
68 |
+
super(GhostConv, self).__init__()
|
69 |
+
c_ = c2 // 2 # hidden channels
|
70 |
+
self.cv1 = Conv(c1, c_, k, s, None, g, act)
|
71 |
+
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
|
72 |
+
|
73 |
+
def forward(self, x):
|
74 |
+
y = self.cv1(x)
|
75 |
+
return torch.cat([y, self.cv2(y)], 1)
|
76 |
+
|
77 |
+
|
78 |
+
class GhostBottleneck(nn.Module):
|
79 |
+
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
|
80 |
+
def __init__(self, c1, c2, k, s):
|
81 |
+
super(GhostBottleneck, self).__init__()
|
82 |
+
c_ = c2 // 2
|
83 |
+
self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
|
84 |
+
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
|
85 |
+
GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
|
86 |
+
self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
|
87 |
+
Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
|
88 |
+
|
89 |
+
def forward(self, x):
|
90 |
+
return self.conv(x) + self.shortcut(x)
|
91 |
+
|
92 |
+
|
93 |
+
class MixConv2d(nn.Module):
|
94 |
+
# Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
|
95 |
+
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
|
96 |
+
super(MixConv2d, self).__init__()
|
97 |
+
groups = len(k)
|
98 |
+
if equal_ch: # equal c_ per group
|
99 |
+
i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
|
100 |
+
c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
|
101 |
+
else: # equal weight.numel() per group
|
102 |
+
b = [c2] + [0] * groups
|
103 |
+
a = np.eye(groups + 1, groups, k=-1)
|
104 |
+
a -= np.roll(a, 1, axis=1)
|
105 |
+
a *= np.array(k) ** 2
|
106 |
+
a[0] = 1
|
107 |
+
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
|
108 |
+
|
109 |
+
self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
|
110 |
+
self.bn = nn.BatchNorm2d(c2)
|
111 |
+
self.act = nn.LeakyReLU(0.1, inplace=True)
|
112 |
+
|
113 |
+
def forward(self, x):
|
114 |
+
return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
|
115 |
+
|
116 |
+
|
117 |
+
class Ensemble(nn.ModuleList):
|
118 |
+
# Ensemble of models
|
119 |
+
def __init__(self):
|
120 |
+
super(Ensemble, self).__init__()
|
121 |
+
|
122 |
+
def forward(self, x, augment=False):
|
123 |
+
y = []
|
124 |
+
for module in self:
|
125 |
+
y.append(module(x, augment)[0])
|
126 |
+
# y = torch.stack(y).max(0)[0] # max ensemble
|
127 |
+
# y = torch.cat(y, 1) # nms ensemble
|
128 |
+
y = torch.stack(y).mean(0) # mean ensemble
|
129 |
+
return y, None # inference, train output
|
130 |
+
|
131 |
+
|
132 |
+
def attempt_load(weights, map_location=None):
|
133 |
+
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
|
134 |
+
model = Ensemble()
|
135 |
+
for w in weights if isinstance(weights, list) else [weights]:
|
136 |
+
attempt_download(w)
|
137 |
+
model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model
|
138 |
+
|
139 |
+
# Compatibility updates
|
140 |
+
for m in model.modules():
|
141 |
+
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
|
142 |
+
m.inplace = True # pytorch 1.7.0 compatibility
|
143 |
+
elif type(m) is Conv:
|
144 |
+
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
|
145 |
+
|
146 |
+
if len(model) == 1:
|
147 |
+
return model[-1] # return model
|
148 |
+
else:
|
149 |
+
print('Ensemble created with %s\n' % weights)
|
150 |
+
for k in ['names', 'stride']:
|
151 |
+
setattr(model, k, getattr(model[-1], k))
|
152 |
+
return model # return ensemble
|
metadata/predictor_yolo_detector/models/export.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats
|
2 |
+
|
3 |
+
Usage:
|
4 |
+
$ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1
|
5 |
+
"""
|
6 |
+
|
7 |
+
import argparse
|
8 |
+
import sys
|
9 |
+
import time
|
10 |
+
|
11 |
+
sys.path.append('./') # to run '$ python *.py' files in subdirectories
|
12 |
+
|
13 |
+
import torch
|
14 |
+
import torch.nn as nn
|
15 |
+
|
16 |
+
from metadata.predictor_yolo_detector.models import common
|
17 |
+
from metadata.predictor_yolo_detector.models.experimental import attempt_load
|
18 |
+
from metadata.predictor_yolo_detector.utils.activations import Hardswish
|
19 |
+
from metadata.predictor_yolo_detector.utils.general import set_logging, check_img_size
|
20 |
+
|
21 |
+
if __name__ == '__main__':
|
22 |
+
parser = argparse.ArgumentParser()
|
23 |
+
parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/
|
24 |
+
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width
|
25 |
+
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
26 |
+
opt = parser.parse_args()
|
27 |
+
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
|
28 |
+
print(opt)
|
29 |
+
set_logging()
|
30 |
+
t = time.time()
|
31 |
+
|
32 |
+
# Load PyTorch model
|
33 |
+
model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model
|
34 |
+
labels = model.names
|
35 |
+
|
36 |
+
# Checks
|
37 |
+
gs = int(max(model.stride)) # grid size (max stride)
|
38 |
+
opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples
|
39 |
+
|
40 |
+
# Input
|
41 |
+
img = torch.zeros(opt.batch_size, 3, *opt.img_size) # image size(1,3,320,192) iDetection
|
42 |
+
|
43 |
+
# Update model
|
44 |
+
for k, m in model.named_modules():
|
45 |
+
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
|
46 |
+
if isinstance(m, common.Conv) and isinstance(m.act, nn.Hardswish):
|
47 |
+
m.act = Hardswish() # assign activation
|
48 |
+
# if isinstance(m, models.yolo.Detect):
|
49 |
+
# m.forward = m.forward_export # assign forward (optional)
|
50 |
+
model.model[-1].export = True # set Detect() layer export=True
|
51 |
+
y = model(img) # dry run
|
52 |
+
|
53 |
+
# TorchScript export
|
54 |
+
try:
|
55 |
+
print('\nStarting TorchScript export with torch %s...' % torch.__version__)
|
56 |
+
f = opt.weights.replace('.pt', '.torchscript.pt') # filename
|
57 |
+
ts = torch.jit.trace(model, img)
|
58 |
+
ts.save(f)
|
59 |
+
print('TorchScript export success, saved as %s' % f)
|
60 |
+
except Exception as e:
|
61 |
+
print('TorchScript export failure: %s' % e)
|
62 |
+
|
63 |
+
# ONNX export
|
64 |
+
try:
|
65 |
+
import onnx
|
66 |
+
|
67 |
+
print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
|
68 |
+
f = opt.weights.replace('.pt', '.onnx') # filename
|
69 |
+
torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'],
|
70 |
+
output_names=['classes', 'boxes'] if y is None else ['output'])
|
71 |
+
|
72 |
+
# Checks
|
73 |
+
onnx_model = onnx.load(f) # load onnx model
|
74 |
+
onnx.checker.check_model(onnx_model) # check onnx model
|
75 |
+
# print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
|
76 |
+
print('ONNX export success, saved as %s' % f)
|
77 |
+
except Exception as e:
|
78 |
+
print('ONNX export failure: %s' % e)
|
79 |
+
|
80 |
+
# CoreML export
|
81 |
+
try:
|
82 |
+
import coremltools as ct
|
83 |
+
|
84 |
+
print('\nStarting CoreML export with coremltools %s...' % ct.__version__)
|
85 |
+
# convert model from torchscript and apply pixel scaling as per detect.py
|
86 |
+
model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])])
|
87 |
+
f = opt.weights.replace('.pt', '.mlmodel') # filename
|
88 |
+
model.save(f)
|
89 |
+
print('CoreML export success, saved as %s' % f)
|
90 |
+
except Exception as e:
|
91 |
+
print('CoreML export failure: %s' % e)
|
92 |
+
|
93 |
+
# Finish
|
94 |
+
print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t))
|
metadata/predictor_yolo_detector/models/hub/yolov3-spp.yaml
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 80 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 1.0 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [ 10,13, 16,30, 33,23 ] # P3/8
|
9 |
+
- [ 30,61, 62,45, 59,119 ] # P4/16
|
10 |
+
- [ 116,90, 156,198, 373,326 ] # P5/32
|
11 |
+
|
12 |
+
# darknet53 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[ [ -1, 1, Conv, [ 32, 3, 1 ] ], # 0
|
16 |
+
[ -1, 1, Conv, [ 64, 3, 2 ] ], # 1-P1/2
|
17 |
+
[ -1, 1, Bottleneck, [ 64 ] ],
|
18 |
+
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 3-P2/4
|
19 |
+
[ -1, 2, Bottleneck, [ 128 ] ],
|
20 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 5-P3/8
|
21 |
+
[ -1, 8, Bottleneck, [ 256 ] ],
|
22 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 7-P4/16
|
23 |
+
[ -1, 8, Bottleneck, [ 512 ] ],
|
24 |
+
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P5/32
|
25 |
+
[ -1, 4, Bottleneck, [ 1024 ] ], # 10
|
26 |
+
]
|
27 |
+
|
28 |
+
# YOLOv3-SPP head
|
29 |
+
head:
|
30 |
+
[ [ -1, 1, Bottleneck, [ 1024, False ] ],
|
31 |
+
[ -1, 1, SPP, [ 512, [ 5, 9, 13 ] ] ],
|
32 |
+
[ -1, 1, Conv, [ 1024, 3, 1 ] ],
|
33 |
+
[ -1, 1, Conv, [ 512, 1, 1 ] ],
|
34 |
+
[ -1, 1, Conv, [ 1024, 3, 1 ] ], # 15 (P5/32-large)
|
35 |
+
|
36 |
+
[ -2, 1, Conv, [ 256, 1, 1 ] ],
|
37 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
38 |
+
[ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P4
|
39 |
+
[ -1, 1, Bottleneck, [ 512, False ] ],
|
40 |
+
[ -1, 1, Bottleneck, [ 512, False ] ],
|
41 |
+
[ -1, 1, Conv, [ 256, 1, 1 ] ],
|
42 |
+
[ -1, 1, Conv, [ 512, 3, 1 ] ], # 22 (P4/16-medium)
|
43 |
+
|
44 |
+
[ -2, 1, Conv, [ 128, 1, 1 ] ],
|
45 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
46 |
+
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P3
|
47 |
+
[ -1, 1, Bottleneck, [ 256, False ] ],
|
48 |
+
[ -1, 2, Bottleneck, [ 256, False ] ], # 27 (P3/8-small)
|
49 |
+
|
50 |
+
[ [ 27, 22, 15 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5)
|
51 |
+
]
|
metadata/predictor_yolo_detector/models/hub/yolov5-fpn.yaml
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 80 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 1.0 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [ 10,13, 16,30, 33,23 ] # P3/8
|
9 |
+
- [ 30,61, 62,45, 59,119 ] # P4/16
|
10 |
+
- [ 116,90, 156,198, 373,326 ] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
|
16 |
+
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
|
17 |
+
[ -1, 3, Bottleneck, [ 128 ] ],
|
18 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
|
19 |
+
[ -1, 9, BottleneckCSP, [ 256 ] ],
|
20 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
|
21 |
+
[ -1, 9, BottleneckCSP, [ 512 ] ],
|
22 |
+
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
|
23 |
+
[ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ],
|
24 |
+
[ -1, 6, BottleneckCSP, [ 1024 ] ], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 FPN head
|
28 |
+
head:
|
29 |
+
[ [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 10 (P5/32-large)
|
30 |
+
|
31 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
32 |
+
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
|
33 |
+
[ -1, 1, Conv, [ 512, 1, 1 ] ],
|
34 |
+
[ -1, 3, BottleneckCSP, [ 512, False ] ], # 14 (P4/16-medium)
|
35 |
+
|
36 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
37 |
+
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
|
38 |
+
[ -1, 1, Conv, [ 256, 1, 1 ] ],
|
39 |
+
[ -1, 3, BottleneckCSP, [ 256, False ] ], # 18 (P3/8-small)
|
40 |
+
|
41 |
+
[ [ 18, 14, 10 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5)
|
42 |
+
]
|
metadata/predictor_yolo_detector/models/hub/yolov5-panet.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 80 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 1.0 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [ 116,90, 156,198, 373,326 ] # P5/32
|
9 |
+
- [ 30,61, 62,45, 59,119 ] # P4/16
|
10 |
+
- [ 10,13, 16,30, 33,23 ] # P3/8
|
11 |
+
|
12 |
+
# YOLOv5 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
|
16 |
+
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
|
17 |
+
[ -1, 3, BottleneckCSP, [ 128 ] ],
|
18 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
|
19 |
+
[ -1, 9, BottleneckCSP, [ 256 ] ],
|
20 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
|
21 |
+
[ -1, 9, BottleneckCSP, [ 512 ] ],
|
22 |
+
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
|
23 |
+
[ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ],
|
24 |
+
[ -1, 3, BottleneckCSP, [ 1024, False ] ], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 PANet head
|
28 |
+
head:
|
29 |
+
[ [ -1, 1, Conv, [ 512, 1, 1 ] ],
|
30 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
31 |
+
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
|
32 |
+
[ -1, 3, BottleneckCSP, [ 512, False ] ], # 13
|
33 |
+
|
34 |
+
[ -1, 1, Conv, [ 256, 1, 1 ] ],
|
35 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
36 |
+
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
|
37 |
+
[ -1, 3, BottleneckCSP, [ 256, False ] ], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ],
|
40 |
+
[ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
|
41 |
+
[ -1, 3, BottleneckCSP, [ 512, False ] ], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ],
|
44 |
+
[ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5
|
45 |
+
[ -1, 3, BottleneckCSP, [ 1024, False ] ], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[ [ 17, 20, 23 ], 1, Detect, [ nc, anchors ] ], # Detect(P5, P4, P3)
|
48 |
+
]
|
metadata/predictor_yolo_detector/models/yolo.py
ADDED
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import logging
|
3 |
+
import sys
|
4 |
+
from copy import deepcopy
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
import math
|
8 |
+
|
9 |
+
sys.path.append('./') # to run '$ python *.py' files in subdirectories
|
10 |
+
logger = logging.getLogger(__name__)
|
11 |
+
|
12 |
+
import torch
|
13 |
+
import torch.nn as nn
|
14 |
+
|
15 |
+
from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat, NMS, autoShape
|
16 |
+
from models.experimental import MixConv2d, CrossConv, C3
|
17 |
+
from utils.general import check_anchor_order, make_divisible, check_file, set_logging
|
18 |
+
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
|
19 |
+
select_device, copy_attr
|
20 |
+
|
21 |
+
|
22 |
+
class Detect(nn.Module):
|
23 |
+
stride = None # strides computed during build
|
24 |
+
export = False # onnx export
|
25 |
+
|
26 |
+
def __init__(self, nc=80, anchors=(), ch=()): # detection layer
|
27 |
+
super(Detect, self).__init__()
|
28 |
+
self.nc = nc # number of classes
|
29 |
+
self.no = nc + 5 # number of outputs per anchor
|
30 |
+
self.nl = len(anchors) # number of detection layers
|
31 |
+
self.na = len(anchors[0]) // 2 # number of anchors
|
32 |
+
self.grid = [torch.zeros(1)] * self.nl # init grid
|
33 |
+
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
|
34 |
+
self.register_buffer('anchors', a) # shape(nl,na,2)
|
35 |
+
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
|
36 |
+
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
|
37 |
+
|
38 |
+
def forward(self, x):
|
39 |
+
# x = x.copy() # for profiling
|
40 |
+
z = [] # inference output
|
41 |
+
self.training |= self.export
|
42 |
+
for i in range(self.nl):
|
43 |
+
x[i] = self.m[i](x[i]) # conv
|
44 |
+
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
|
45 |
+
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
|
46 |
+
|
47 |
+
if not self.training: # inference
|
48 |
+
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
|
49 |
+
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
|
50 |
+
|
51 |
+
y = x[i].sigmoid()
|
52 |
+
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
|
53 |
+
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
|
54 |
+
z.append(y.view(bs, -1, self.no))
|
55 |
+
|
56 |
+
return x if self.training else (torch.cat(z, 1), x)
|
57 |
+
|
58 |
+
@staticmethod
|
59 |
+
def _make_grid(nx=20, ny=20):
|
60 |
+
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
|
61 |
+
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
|
62 |
+
|
63 |
+
|
64 |
+
class Model(nn.Module):
|
65 |
+
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes
|
66 |
+
super(Model, self).__init__()
|
67 |
+
if isinstance(cfg, dict):
|
68 |
+
self.yaml = cfg # model dict
|
69 |
+
else: # is *.yaml
|
70 |
+
import yaml # for torch hub
|
71 |
+
self.yaml_file = Path(cfg).name
|
72 |
+
with open(cfg) as f:
|
73 |
+
self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
|
74 |
+
|
75 |
+
# Define model
|
76 |
+
if nc and nc != self.yaml['nc']:
|
77 |
+
print('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc))
|
78 |
+
self.yaml['nc'] = nc # override yaml value
|
79 |
+
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist, ch_out
|
80 |
+
# print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
|
81 |
+
|
82 |
+
# Build strides, anchors
|
83 |
+
m = self.model[-1] # Detect()
|
84 |
+
if isinstance(m, Detect):
|
85 |
+
s = 128 # 2x min stride
|
86 |
+
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
|
87 |
+
m.anchors /= m.stride.view(-1, 1, 1)
|
88 |
+
check_anchor_order(m)
|
89 |
+
self.stride = m.stride
|
90 |
+
self._initialize_biases() # only run once
|
91 |
+
# print('Strides: %s' % m.stride.tolist())
|
92 |
+
|
93 |
+
# Init weights, biases
|
94 |
+
initialize_weights(self)
|
95 |
+
self.info()
|
96 |
+
print('')
|
97 |
+
|
98 |
+
def forward(self, x, augment=False, profile=False):
|
99 |
+
if augment:
|
100 |
+
img_size = x.shape[-2:] # height, width
|
101 |
+
s = [1, 0.83, 0.67] # scales
|
102 |
+
f = [None, 3, None] # flips (2-ud, 3-lr)
|
103 |
+
y = [] # outputs
|
104 |
+
for si, fi in zip(s, f):
|
105 |
+
xi = scale_img(x.flip(fi) if fi else x, si)
|
106 |
+
yi = self.forward_once(xi)[0] # forward
|
107 |
+
# cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
|
108 |
+
yi[..., :4] /= si # de-scale
|
109 |
+
if fi == 2:
|
110 |
+
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
|
111 |
+
elif fi == 3:
|
112 |
+
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
|
113 |
+
y.append(yi)
|
114 |
+
return torch.cat(y, 1), None # augmented inference, train
|
115 |
+
else:
|
116 |
+
return self.forward_once(x, profile) # single-scale inference, train
|
117 |
+
|
118 |
+
def forward_once(self, x, profile=False):
|
119 |
+
y, dt = [], [] # outputs
|
120 |
+
for m in self.model:
|
121 |
+
if m.f != -1: # if not from previous layer
|
122 |
+
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
123 |
+
|
124 |
+
if profile:
|
125 |
+
try:
|
126 |
+
import thop
|
127 |
+
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # FLOPS
|
128 |
+
except:
|
129 |
+
o = 0
|
130 |
+
t = time_synchronized()
|
131 |
+
for _ in range(10):
|
132 |
+
_ = m(x)
|
133 |
+
dt.append((time_synchronized() - t) * 100)
|
134 |
+
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
|
135 |
+
|
136 |
+
x = m(x) # run
|
137 |
+
y.append(x if m.i in self.save else None) # save output
|
138 |
+
|
139 |
+
if profile:
|
140 |
+
print('%.1fms total' % sum(dt))
|
141 |
+
return x
|
142 |
+
|
143 |
+
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
|
144 |
+
# https://arxiv.org/abs/1708.02002 section 3.3
|
145 |
+
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
|
146 |
+
m = self.model[-1] # Detect() module
|
147 |
+
for mi, s in zip(m.m, m.stride): # from
|
148 |
+
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
|
149 |
+
b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
|
150 |
+
b[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
|
151 |
+
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
|
152 |
+
|
153 |
+
def _print_biases(self):
|
154 |
+
m = self.model[-1] # Detect() module
|
155 |
+
for mi in m.m: # from
|
156 |
+
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
|
157 |
+
print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
|
158 |
+
|
159 |
+
# def _print_weights(self):
|
160 |
+
# for m in self.model.modules():
|
161 |
+
# if type(m) is Bottleneck:
|
162 |
+
# print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
|
163 |
+
|
164 |
+
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
|
165 |
+
# print('Fusing layers... ')
|
166 |
+
for m in self.model.modules():
|
167 |
+
if type(m) is Conv and hasattr(m, 'bn'):
|
168 |
+
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
|
169 |
+
delattr(m, 'bn') # remove batchnorm
|
170 |
+
m.forward = m.fuseforward # update forward
|
171 |
+
self.info()
|
172 |
+
return self
|
173 |
+
|
174 |
+
def nms(self, mode=True): # add or remove NMS module
|
175 |
+
present = type(self.model[-1]) is NMS # last layer is NMS
|
176 |
+
if mode and not present:
|
177 |
+
print('Adding NMS... ')
|
178 |
+
m = NMS() # module
|
179 |
+
m.f = -1 # from
|
180 |
+
m.i = self.model[-1].i + 1 # index
|
181 |
+
self.model.add_module(name='%s' % m.i, module=m) # add
|
182 |
+
self.eval()
|
183 |
+
elif not mode and present:
|
184 |
+
print('Removing NMS... ')
|
185 |
+
self.model = self.model[:-1] # remove
|
186 |
+
return self
|
187 |
+
|
188 |
+
def autoshape(self): # add autoShape module
|
189 |
+
print('Adding autoShape... ')
|
190 |
+
m = autoShape(self) # wrap model
|
191 |
+
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
|
192 |
+
return m
|
193 |
+
|
194 |
+
def info(self, verbose=False): # print model information
|
195 |
+
model_info(self, verbose)
|
196 |
+
|
197 |
+
|
198 |
+
def parse_model(d, ch): # model_dict, input_channels(3)
|
199 |
+
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
|
200 |
+
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
|
201 |
+
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
202 |
+
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
203 |
+
|
204 |
+
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
205 |
+
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
|
206 |
+
m = eval(m) if isinstance(m, str) else m # eval strings
|
207 |
+
for j, a in enumerate(args):
|
208 |
+
try:
|
209 |
+
args[j] = eval(a) if isinstance(a, str) else a # eval strings
|
210 |
+
except:
|
211 |
+
pass
|
212 |
+
|
213 |
+
n = max(round(n * gd), 1) if n > 1 else n # depth gain
|
214 |
+
if m in [Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]:
|
215 |
+
c1, c2 = ch[f], args[0]
|
216 |
+
|
217 |
+
# Normal
|
218 |
+
# if i > 0 and args[0] != no: # channel expansion factor
|
219 |
+
# ex = 1.75 # exponential (default 2.0)
|
220 |
+
# e = math.log(c2 / ch[1]) / math.log(2)
|
221 |
+
# c2 = int(ch[1] * ex ** e)
|
222 |
+
# if m != Focus:
|
223 |
+
|
224 |
+
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
|
225 |
+
|
226 |
+
# Experimental
|
227 |
+
# if i > 0 and args[0] != no: # channel expansion factor
|
228 |
+
# ex = 1 + gw # exponential (default 2.0)
|
229 |
+
# ch1 = 32 # ch[1]
|
230 |
+
# e = math.log(c2 / ch1) / math.log(2) # level 1-n
|
231 |
+
# c2 = int(ch1 * ex ** e)
|
232 |
+
# if m != Focus:
|
233 |
+
# c2 = make_divisible(c2, 8) if c2 != no else c2
|
234 |
+
|
235 |
+
args = [c1, c2, *args[1:]]
|
236 |
+
if m in [BottleneckCSP, C3]:
|
237 |
+
args.insert(2, n)
|
238 |
+
n = 1
|
239 |
+
elif m is nn.BatchNorm2d:
|
240 |
+
args = [ch[f]]
|
241 |
+
elif m is Concat:
|
242 |
+
c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])
|
243 |
+
elif m is Detect:
|
244 |
+
args.append([ch[x + 1] for x in f])
|
245 |
+
if isinstance(args[1], int): # number of anchors
|
246 |
+
args[1] = [list(range(args[1] * 2))] * len(f)
|
247 |
+
else:
|
248 |
+
c2 = ch[f]
|
249 |
+
|
250 |
+
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
|
251 |
+
t = str(m)[8:-2].replace('__main__.', '') # module type
|
252 |
+
np = sum([x.numel() for x in m_.parameters()]) # number params
|
253 |
+
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
254 |
+
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
|
255 |
+
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
256 |
+
layers.append(m_)
|
257 |
+
ch.append(c2)
|
258 |
+
return nn.Sequential(*layers), sorted(save)
|
259 |
+
|
260 |
+
|
261 |
+
if __name__ == '__main__':
|
262 |
+
parser = argparse.ArgumentParser()
|
263 |
+
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
|
264 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
265 |
+
opt = parser.parse_args()
|
266 |
+
opt.cfg = check_file(opt.cfg) # check file
|
267 |
+
set_logging()
|
268 |
+
device = select_device(opt.device)
|
269 |
+
|
270 |
+
# Create model
|
271 |
+
model = Model(opt.cfg).to(device)
|
272 |
+
model.train()
|
273 |
+
|
274 |
+
# Profile
|
275 |
+
# img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
|
276 |
+
# y = model(img, profile=True)
|
277 |
+
|
278 |
+
# Tensorboard
|
279 |
+
# from torch.utils.tensorboard import SummaryWriter
|
280 |
+
# tb_writer = SummaryWriter()
|
281 |
+
# print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
|
282 |
+
# tb_writer.add_graph(model.model, img) # add model to tensorboard
|
283 |
+
# tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
|
metadata/predictor_yolo_detector/models/yolov5l.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 80 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 1.0 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [ 10,13, 16,30, 33,23 ] # P3/8
|
9 |
+
- [ 30,61, 62,45, 59,119 ] # P4/16
|
10 |
+
- [ 116,90, 156,198, 373,326 ] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
|
16 |
+
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
|
17 |
+
[ -1, 3, BottleneckCSP, [ 128 ] ],
|
18 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
|
19 |
+
[ -1, 9, BottleneckCSP, [ 256 ] ],
|
20 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
|
21 |
+
[ -1, 9, BottleneckCSP, [ 512 ] ],
|
22 |
+
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
|
23 |
+
[ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ],
|
24 |
+
[ -1, 3, BottleneckCSP, [ 1024, False ] ], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 head
|
28 |
+
head:
|
29 |
+
[ [ -1, 1, Conv, [ 512, 1, 1 ] ],
|
30 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
31 |
+
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
|
32 |
+
[ -1, 3, BottleneckCSP, [ 512, False ] ], # 13
|
33 |
+
|
34 |
+
[ -1, 1, Conv, [ 256, 1, 1 ] ],
|
35 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
36 |
+
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
|
37 |
+
[ -1, 3, BottleneckCSP, [ 256, False ] ], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ],
|
40 |
+
[ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
|
41 |
+
[ -1, 3, BottleneckCSP, [ 512, False ] ], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ],
|
44 |
+
[ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5
|
45 |
+
[ -1, 3, BottleneckCSP, [ 1024, False ] ], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[ [ 17, 20, 23 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5)
|
48 |
+
]
|
metadata/predictor_yolo_detector/models/yolov5m.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 80 # number of classes
|
3 |
+
depth_multiple: 0.67 # model depth multiple
|
4 |
+
width_multiple: 0.75 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [ 10,13, 16,30, 33,23 ] # P3/8
|
9 |
+
- [ 30,61, 62,45, 59,119 ] # P4/16
|
10 |
+
- [ 116,90, 156,198, 373,326 ] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
|
16 |
+
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
|
17 |
+
[ -1, 3, BottleneckCSP, [ 128 ] ],
|
18 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
|
19 |
+
[ -1, 9, BottleneckCSP, [ 256 ] ],
|
20 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
|
21 |
+
[ -1, 9, BottleneckCSP, [ 512 ] ],
|
22 |
+
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
|
23 |
+
[ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ],
|
24 |
+
[ -1, 3, BottleneckCSP, [ 1024, False ] ], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 head
|
28 |
+
head:
|
29 |
+
[ [ -1, 1, Conv, [ 512, 1, 1 ] ],
|
30 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
31 |
+
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
|
32 |
+
[ -1, 3, BottleneckCSP, [ 512, False ] ], # 13
|
33 |
+
|
34 |
+
[ -1, 1, Conv, [ 256, 1, 1 ] ],
|
35 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
36 |
+
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
|
37 |
+
[ -1, 3, BottleneckCSP, [ 256, False ] ], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ],
|
40 |
+
[ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
|
41 |
+
[ -1, 3, BottleneckCSP, [ 512, False ] ], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ],
|
44 |
+
[ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5
|
45 |
+
[ -1, 3, BottleneckCSP, [ 1024, False ] ], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[ [ 17, 20, 23 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5)
|
48 |
+
]
|
metadata/predictor_yolo_detector/models/yolov5s.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 80 # number of classes
|
3 |
+
depth_multiple: 0.33 # model depth multiple
|
4 |
+
width_multiple: 0.50 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [ 10,13, 16,30, 33,23 ] # P3/8
|
9 |
+
- [ 30,61, 62,45, 59,119 ] # P4/16
|
10 |
+
- [ 116,90, 156,198, 373,326 ] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
|
16 |
+
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
|
17 |
+
[ -1, 3, BottleneckCSP, [ 128 ] ],
|
18 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
|
19 |
+
[ -1, 9, BottleneckCSP, [ 256 ] ],
|
20 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
|
21 |
+
[ -1, 9, BottleneckCSP, [ 512 ] ],
|
22 |
+
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
|
23 |
+
[ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ],
|
24 |
+
[ -1, 3, BottleneckCSP, [ 1024, False ] ], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 head
|
28 |
+
head:
|
29 |
+
[ [ -1, 1, Conv, [ 512, 1, 1 ] ],
|
30 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
31 |
+
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
|
32 |
+
[ -1, 3, BottleneckCSP, [ 512, False ] ], # 13
|
33 |
+
|
34 |
+
[ -1, 1, Conv, [ 256, 1, 1 ] ],
|
35 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
36 |
+
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
|
37 |
+
[ -1, 3, BottleneckCSP, [ 256, False ] ], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ],
|
40 |
+
[ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
|
41 |
+
[ -1, 3, BottleneckCSP, [ 512, False ] ], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ],
|
44 |
+
[ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5
|
45 |
+
[ -1, 3, BottleneckCSP, [ 1024, False ] ], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[ [ 17, 20, 23 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5)
|
48 |
+
]
|
metadata/predictor_yolo_detector/models/yolov5x.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 80 # number of classes
|
3 |
+
depth_multiple: 1.33 # model depth multiple
|
4 |
+
width_multiple: 1.25 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [ 10,13, 16,30, 33,23 ] # P3/8
|
9 |
+
- [ 30,61, 62,45, 59,119 ] # P4/16
|
10 |
+
- [ 116,90, 156,198, 373,326 ] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
|
16 |
+
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
|
17 |
+
[ -1, 3, BottleneckCSP, [ 128 ] ],
|
18 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
|
19 |
+
[ -1, 9, BottleneckCSP, [ 256 ] ],
|
20 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
|
21 |
+
[ -1, 9, BottleneckCSP, [ 512 ] ],
|
22 |
+
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
|
23 |
+
[ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ],
|
24 |
+
[ -1, 3, BottleneckCSP, [ 1024, False ] ], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 head
|
28 |
+
head:
|
29 |
+
[ [ -1, 1, Conv, [ 512, 1, 1 ] ],
|
30 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
31 |
+
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
|
32 |
+
[ -1, 3, BottleneckCSP, [ 512, False ] ], # 13
|
33 |
+
|
34 |
+
[ -1, 1, Conv, [ 256, 1, 1 ] ],
|
35 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
36 |
+
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
|
37 |
+
[ -1, 3, BottleneckCSP, [ 256, False ] ], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ],
|
40 |
+
[ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
|
41 |
+
[ -1, 3, BottleneckCSP, [ 512, False ] ], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ],
|
44 |
+
[ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5
|
45 |
+
[ -1, 3, BottleneckCSP, [ 1024, False ] ], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[ [ 17, 20, 23 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5)
|
48 |
+
]
|
metadata/predictor_yolo_detector/runs/exp0_yolov5s_results/events.out.tfevents.1604565595.828c870bfd5d.342.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:648cd7c2fca5aae280c21bef9cbc4cbce4a09cb0789281dc1da6f6dba71d6036
|
3 |
+
size 40
|
metadata/predictor_yolo_detector/runs/exp0_yolov5s_results/hyp.yaml
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
lr0: 0.01
|
2 |
+
lrf: 0.2
|
3 |
+
momentum: 0.937
|
4 |
+
weight_decay: 0.0005
|
5 |
+
warmup_epochs: 3.0
|
6 |
+
warmup_momentum: 0.8
|
7 |
+
warmup_bias_lr: 0.1
|
8 |
+
box: 0.05
|
9 |
+
cls: 0.5
|
10 |
+
cls_pw: 1.0
|
11 |
+
obj: 1.0
|
12 |
+
obj_pw: 1.0
|
13 |
+
iou_t: 0.2
|
14 |
+
anchor_t: 4.0
|
15 |
+
fl_gamma: 0.0
|
16 |
+
hsv_h: 0.015
|
17 |
+
hsv_s: 0.7
|
18 |
+
hsv_v: 0.4
|
19 |
+
degrees: 0.0
|
20 |
+
translate: 0.1
|
21 |
+
scale: 0.5
|
22 |
+
shear: 0.0
|
23 |
+
perspective: 0.0
|
24 |
+
flipud: 0.0
|
25 |
+
fliplr: 0.5
|
26 |
+
mosaic: 1.0
|
27 |
+
mixup: 0.0
|
metadata/predictor_yolo_detector/runs/exp0_yolov5s_results/opt.yaml
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
weights: ''
|
2 |
+
cfg: ./models/custom_yolov5s.yaml
|
3 |
+
data: /content/drive/My Drive/Factory Security Automation/dataset/Fire_Smoke/data.yaml
|
4 |
+
hyp: data/hyp.scratch.yaml
|
5 |
+
epochs: 100
|
6 |
+
batch_size: 16
|
7 |
+
img_size:
|
8 |
+
- 416
|
9 |
+
- 416
|
10 |
+
rect: false
|
11 |
+
resume: false
|
12 |
+
nosave: false
|
13 |
+
notest: false
|
14 |
+
noautoanchor: false
|
15 |
+
evolve: false
|
16 |
+
bucket: ''
|
17 |
+
cache_images: true
|
18 |
+
image_weights: false
|
19 |
+
name: yolov5s_results
|
20 |
+
device: ''
|
21 |
+
multi_scale: false
|
22 |
+
single_cls: false
|
23 |
+
adam: false
|
24 |
+
sync_bn: false
|
25 |
+
local_rank: -1
|
26 |
+
logdir: runs/
|
27 |
+
log_imgs: 0
|
28 |
+
workers: 8
|
29 |
+
total_batch_size: 16
|
30 |
+
world_size: 1
|
31 |
+
global_rank: -1
|
metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/events.out.tfevents.1604565658.828c870bfd5d.369.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ee160c9e2f42a7bdaa4477128e3f77500754fe1c2f2d2e6740989f8c14132238
|
3 |
+
size 70271
|
metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/hyp.yaml
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
lr0: 0.01
|
2 |
+
lrf: 0.2
|
3 |
+
momentum: 0.937
|
4 |
+
weight_decay: 0.0005
|
5 |
+
warmup_epochs: 3.0
|
6 |
+
warmup_momentum: 0.8
|
7 |
+
warmup_bias_lr: 0.1
|
8 |
+
box: 0.05
|
9 |
+
cls: 0.5
|
10 |
+
cls_pw: 1.0
|
11 |
+
obj: 1.0
|
12 |
+
obj_pw: 1.0
|
13 |
+
iou_t: 0.2
|
14 |
+
anchor_t: 4.0
|
15 |
+
fl_gamma: 0.0
|
16 |
+
hsv_h: 0.015
|
17 |
+
hsv_s: 0.7
|
18 |
+
hsv_v: 0.4
|
19 |
+
degrees: 0.0
|
20 |
+
translate: 0.1
|
21 |
+
scale: 0.5
|
22 |
+
shear: 0.0
|
23 |
+
perspective: 0.0
|
24 |
+
flipud: 0.0
|
25 |
+
fliplr: 0.5
|
26 |
+
mosaic: 1.0
|
27 |
+
mixup: 0.0
|
metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/labels.png
ADDED
![]() |
metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/labels_correlogram.png
ADDED
![]() |
metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/opt.yaml
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
weights: ''
|
2 |
+
cfg: ./models/custom_yolov5s.yaml
|
3 |
+
data: /content/drive/My Drive/Factory Security Automation/dataset/Fire_Smoke/data.yaml
|
4 |
+
hyp: data/hyp.scratch.yaml
|
5 |
+
epochs: 100
|
6 |
+
batch_size: 16
|
7 |
+
img_size:
|
8 |
+
- 416
|
9 |
+
- 416
|
10 |
+
rect: false
|
11 |
+
resume: false
|
12 |
+
nosave: false
|
13 |
+
notest: false
|
14 |
+
noautoanchor: false
|
15 |
+
evolve: false
|
16 |
+
bucket: ''
|
17 |
+
cache_images: true
|
18 |
+
image_weights: false
|
19 |
+
name: yolov5s_results
|
20 |
+
device: ''
|
21 |
+
multi_scale: false
|
22 |
+
single_cls: false
|
23 |
+
adam: false
|
24 |
+
sync_bn: false
|
25 |
+
local_rank: -1
|
26 |
+
logdir: runs/
|
27 |
+
log_imgs: 0
|
28 |
+
workers: 8
|
29 |
+
total_batch_size: 16
|
30 |
+
world_size: 1
|
31 |
+
global_rank: -1
|
metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/precision-recall_curve.png
ADDED
![]() |
metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/results.png
ADDED
![]() |
metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/results.txt
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
0/99 1.81G 0.09893 0.04764 0.02131 0.1679 30 416 0 0 0.001587 0.0002944 0.08504 0.02206 0.03531
|
2 |
+
1/99 1.81G 0.09344 0.04973 0.01541 0.1586 40 416 0 0 0.005799 0.001407 0.08099 0.02243 0.03596
|
3 |
+
2/99 1.81G 0.08784 0.04993 0.01227 0.15 32 416 0.01432 0.01075 0.003013 0.0005957 0.08129 0.02286 0.03354
|
4 |
+
3/99 1.82G 0.08028 0.05145 0.009282 0.141 35 416 0.001481 0.01075 0.002394 0.0005067 0.08033 0.02581 0.02608
|
5 |
+
4/99 1.82G 0.07551 0.05114 0.007099 0.1337 34 416 0.01091 0.04139 0.002496 0.0004708 0.07839 0.0237 0.0394
|
6 |
+
5/99 1.82G 0.0707 0.05078 0.005377 0.1269 35 416 0.00772 0.06718 0.003629 0.0007497 0.07799 0.02498 0.03243
|
7 |
+
6/99 1.82G 0.06663 0.04872 0.004305 0.1197 35 416 0.01284 0.2059 0.01187 0.003024 0.07354 0.03182 0.02271
|
8 |
+
7/99 1.82G 0.06207 0.04577 0.003588 0.1114 33 416 0.01773 0.03602 0.005831 0.001438 0.0773 0.02229 0.0292
|
9 |
+
8/99 1.82G 0.05921 0.04342 0.00287 0.1055 37 416 0.01854 0.2053 0.01263 0.002853 0.06826 0.03094 0.02368
|
10 |
+
9/99 1.82G 0.05657 0.04126 0.002692 0.1005 25 416 0.03702 0.2177 0.05131 0.01846 0.06741 0.02426 0.02812
|
11 |
+
10/99 1.82G 0.05436 0.03943 0.002331 0.09612 32 416 0.0588 0.2129 0.03189 0.009211 0.06712 0.02804 0.04081
|
12 |
+
11/99 1.82G 0.05269 0.03888 0.002292 0.09386 34 416 0.04649 0.2464 0.05502 0.02069 0.06784 0.02609 0.02809
|
13 |
+
12/99 1.82G 0.05104 0.03803 0.001927 0.091 34 416 0.03231 0.2521 0.05141 0.01356 0.06666 0.0252 0.02543
|
14 |
+
13/99 1.82G 0.04998 0.03688 0.001914 0.08878 28 416 0.04019 0.2214 0.05516 0.01728 0.06618 0.02353 0.02053
|
15 |
+
14/99 1.82G 0.04853 0.03689 0.001747 0.08716 26 416 0.04947 0.2698 0.05401 0.01793 0.06578 0.02403 0.01927
|
16 |
+
15/99 1.82G 0.04742 0.03596 0.001539 0.08492 34 416 0.05023 0.2295 0.05609 0.02081 0.06588 0.02474 0.02174
|
17 |
+
16/99 1.82G 0.04651 0.03519 0.001496 0.08319 37 416 0.03594 0.1779 0.05644 0.01819 0.06521 0.02446 0.02155
|
18 |
+
17/99 1.82G 0.04556 0.03465 0.001401 0.08161 31 416 0.04865 0.2741 0.06832 0.02672 0.06373 0.02337 0.01956
|
19 |
+
18/99 1.82G 0.04499 0.03426 0.001472 0.08071 35 416 0.04921 0.2585 0.08185 0.03131 0.06576 0.02329 0.02796
|
20 |
+
19/99 1.82G 0.04434 0.03451 0.001371 0.08021 36 416 0.0441 0.2676 0.08527 0.0253 0.0657 0.02383 0.01746
|
21 |
+
20/99 1.82G 0.04314 0.03314 0.001416 0.07769 36 416 0.04411 0.2144 0.05299 0.02149 0.06512 0.02501 0.0202
|
22 |
+
21/99 1.82G 0.04277 0.03305 0.001311 0.07714 34 416 0.04467 0.2381 0.05648 0.01529 0.06593 0.02413 0.02592
|
23 |
+
22/99 1.82G 0.04213 0.03229 0.001334 0.07575 35 416 0.07915 0.236 0.06322 0.01497 0.06624 0.02392 0.03236
|
24 |
+
23/99 1.82G 0.04152 0.03256 0.00114 0.07522 34 416 0.03371 0.2182 0.03018 0.009125 0.06527 0.02787 0.02136
|
25 |
+
24/99 1.82G 0.04097 0.032 0.00104 0.074 44 416 0.06347 0.1817 0.07005 0.02203 0.06461 0.02434 0.03134
|
26 |
+
25/99 1.82G 0.04079 0.03207 0.001136 0.07399 44 416 0.05743 0.2016 0.06142 0.02198 0.06408 0.02473 0.02933
|
27 |
+
26/99 1.82G 0.04057 0.03112 0.001092 0.07278 37 416 0.05116 0.1473 0.04281 0.01783 0.06544 0.02432 0.02658
|
28 |
+
27/99 1.82G 0.03921 0.0305 0.001197 0.07091 35 416 0.05939 0.2137 0.07203 0.0186 0.06598 0.02327 0.02478
|
29 |
+
28/99 1.82G 0.03929 0.03088 0.001088 0.07125 38 416 0.04667 0.1892 0.04484 0.01265 0.06675 0.02642 0.02214
|
30 |
+
29/99 1.82G 0.03889 0.03057 0.001078 0.07054 42 416 0.1081 0.2225 0.07418 0.03127 0.06701 0.02537 0.03313
|
31 |
+
30/99 1.82G 0.03866 0.03029 0.001163 0.07011 35 416 0.0507 0.2043 0.07113 0.02053 0.06582 0.02442 0.0274
|
32 |
+
31/99 1.82G 0.03812 0.02989 0.001115 0.06913 41 416 0.07134 0.2118 0.05145 0.01356 0.06666 0.02828 0.02723
|
33 |
+
32/99 1.82G 0.03754 0.02953 0.0009821 0.06805 31 416 0.04466 0.1795 0.05775 0.02495 0.06641 0.02562 0.02646
|
34 |
+
33/99 1.82G 0.03749 0.02937 0.001026 0.06789 28 416 0.06486 0.172 0.05876 0.02403 0.06531 0.02484 0.03947
|
35 |
+
34/99 1.82G 0.03683 0.02874 0.0008897 0.06646 33 416 0.05251 0.1715 0.07841 0.03514 0.06499 0.02397 0.03911
|
36 |
+
35/99 1.82G 0.03678 0.02906 0.0009355 0.06677 33 416 0.05911 0.1849 0.07483 0.0309 0.06571 0.02463 0.03404
|
37 |
+
36/99 1.82G 0.03582 0.02807 0.0008915 0.06478 29 416 0.06801 0.2311 0.09068 0.03254 0.06554 0.02431 0.02458
|
38 |
+
37/99 1.82G 0.03589 0.02868 0.0009573 0.06552 37 416 0.05897 0.259 0.07025 0.02419 0.06471 0.02421 0.02755
|
39 |
+
38/99 1.82G 0.03576 0.02835 0.0009018 0.06501 33 416 0.06878 0.1689 0.05431 0.01683 0.06609 0.02504 0.02782
|
40 |
+
39/99 1.82G 0.03548 0.02775 0.001018 0.06425 32 416 0.05994 0.2639 0.07841 0.02884 0.06302 0.02464 0.02572
|
41 |
+
40/99 1.82G 0.03471 0.02764 0.001074 0.06342 39 416 0.06073 0.172 0.05433 0.02337 0.06646 0.0243 0.02453
|
42 |
+
41/99 1.82G 0.03427 0.0277 0.0007684 0.06273 34 416 0.05228 0.1716 0.06343 0.02321 0.06575 0.02656 0.02478
|
43 |
+
42/99 1.82G 0.03409 0.02698 0.000711 0.06178 35 416 0.05419 0.1811 0.07327 0.02578 0.06515 0.02367 0.02294
|
44 |
+
43/99 1.82G 0.03413 0.0275 0.0007858 0.06242 37 416 0.06028 0.2633 0.07119 0.02687 0.06336 0.02412 0.02402
|
45 |
+
44/99 1.82G 0.03395 0.0273 0.0008348 0.06208 35 416 0.06234 0.2069 0.05342 0.01517 0.06587 0.025 0.02885
|
46 |
+
45/99 1.82G 0.03391 0.02725 0.0007883 0.06195 35 416 0.03548 0.1618 0.08076 0.03325 0.06703 0.02426 0.03585
|
47 |
+
46/99 1.82G 0.03327 0.02711 0.000814 0.0612 36 416 0.06062 0.15 0.06311 0.02728 0.0669 0.02457 0.03052
|
48 |
+
47/99 1.82G 0.03314 0.02687 0.0007741 0.06079 33 416 0.05882 0.1623 0.06726 0.02572 0.06593 0.02574 0.03263
|
49 |
+
48/99 1.82G 0.03269 0.02613 0.0007547 0.05958 46 416 0.0668 0.1849 0.0746 0.03231 0.06439 0.02563 0.02913
|
50 |
+
49/99 1.82G 0.0325 0.02625 0.0009518 0.0597 34 416 0.04818 0.1881 0.0673 0.02661 0.06696 0.0255 0.03222
|
51 |
+
50/99 1.82G 0.03242 0.02669 0.0007682 0.05988 33 416 0.06559 0.1887 0.06419 0.02415 0.06374 0.02534 0.03064
|
52 |
+
51/99 1.82G 0.03222 0.02623 0.0008339 0.05928 45 416 0.06583 0.1505 0.05873 0.02629 0.06751 0.02481 0.03065
|
53 |
+
52/99 1.82G 0.03218 0.02625 0.0007168 0.05915 40 416 0.07037 0.172 0.07119 0.033 0.06663 0.02512 0.03217
|
54 |
+
53/99 1.82G 0.03177 0.0261 0.0006838 0.05856 29 416 0.04996 0.1355 0.0561 0.02383 0.06828 0.02497 0.03567
|
55 |
+
54/99 1.82G 0.03157 0.02546 0.0007498 0.05779 29 416 0.0804 0.1822 0.07 0.02763 0.0658 0.02441 0.03856
|
56 |
+
55/99 1.82G 0.03091 0.02491 0.0007254 0.05655 29 416 0.1062 0.2268 0.09232 0.03424 0.06595 0.02258 0.02559
|
57 |
+
56/99 1.82G 0.03122 0.02539 0.0007681 0.05737 32 416 0.05518 0.1677 0.05497 0.02257 0.06654 0.02742 0.03168
|
58 |
+
57/99 1.82G 0.03099 0.02474 0.0006948 0.05642 28 416 0.08698 0.1575 0.0484 0.01746 0.06675 0.02576 0.03131
|
59 |
+
58/99 1.82G 0.03033 0.02465 0.0008397 0.05583 35 416 0.08594 0.165 0.06749 0.02868 0.06517 0.0262 0.02958
|
60 |
+
59/99 1.82G 0.03043 0.02477 0.0007128 0.05591 28 416 0.05579 0.1994 0.06667 0.02366 0.06477 0.0252 0.03023
|
61 |
+
60/99 1.82G 0.02964 0.0239 0.0006062 0.05415 30 416 0.08714 0.1887 0.07176 0.02187 0.06539 0.0253 0.02587
|
62 |
+
61/99 1.82G 0.02992 0.02428 0.0008142 0.05502 28 416 0.06426 0.1666 0.07668 0.03018 0.06625 0.02599 0.03827
|
63 |
+
62/99 1.82G 0.02937 0.02416 0.0007393 0.05427 31 416 0.06483 0.1672 0.07641 0.03143 0.06552 0.02601 0.02674
|
64 |
+
63/99 1.82G 0.02954 0.02407 0.0006106 0.05422 37 416 0.06024 0.136 0.05373 0.02556 0.06761 0.02717 0.03878
|
65 |
+
64/99 1.82G 0.02919 0.02371 0.0006706 0.05357 52 416 0.07542 0.1924 0.07466 0.03114 0.06665 0.02434 0.02677
|
66 |
+
65/99 1.82G 0.02905 0.02305 0.000703 0.0528 32 416 0.05857 0.1688 0.05535 0.02363 0.06605 0.02567 0.02736
|
67 |
+
66/99 1.82G 0.02866 0.02315 0.0005723 0.05238 30 416 0.06874 0.1924 0.06045 0.02389 0.06684 0.02559 0.03497
|
68 |
+
67/99 1.82G 0.02882 0.02312 0.000545 0.05248 34 416 0.07738 0.1747 0.06953 0.0303 0.06643 0.02542 0.03153
|
69 |
+
68/99 1.82G 0.02845 0.02317 0.0006277 0.05226 34 416 0.09113 0.1677 0.06334 0.02466 0.06758 0.02589 0.05093
|
70 |
+
69/99 1.82G 0.02839 0.02344 0.0005977 0.05243 33 416 0.06885 0.1731 0.05824 0.01981 0.06547 0.02576 0.03899
|
71 |
+
70/99 1.82G 0.02791 0.0228 0.000616 0.05133 34 416 0.08296 0.2252 0.09107 0.03721 0.06375 0.02519 0.02305
|
72 |
+
71/99 1.82G 0.02783 0.02251 0.0006301 0.05098 33 416 0.107 0.1629 0.07936 0.03225 0.06665 0.02669 0.03384
|
73 |
+
72/99 1.82G 0.02764 0.02298 0.0006631 0.05128 30 416 0.06204 0.1672 0.06169 0.02566 0.06585 0.02572 0.03274
|
74 |
+
73/99 1.82G 0.02749 0.02266 0.0006699 0.05082 32 416 0.06946 0.179 0.06772 0.02775 0.06658 0.02605 0.0337
|
75 |
+
74/99 1.82G 0.02697 0.02259 0.0006182 0.05018 31 416 0.08451 0.1838 0.07818 0.03035 0.06688 0.0253 0.03588
|
76 |
+
75/99 1.82G 0.02719 0.02252 0.0005216 0.05023 31 416 0.07323 0.1612 0.07149 0.02854 0.06807 0.02594 0.03716
|
77 |
+
76/99 1.82G 0.02725 0.02234 0.0004887 0.05008 31 416 0.08067 0.1634 0.07874 0.03363 0.06735 0.02562 0.03068
|
78 |
+
77/99 1.82G 0.02745 0.022 0.0005644 0.05002 30 416 0.06627 0.1876 0.07775 0.03286 0.06695 0.02587 0.03243
|
79 |
+
78/99 1.82G 0.02653 0.02192 0.0005903 0.04904 36 416 0.09917 0.1812 0.07785 0.02913 0.06707 0.02513 0.03151
|
80 |
+
79/99 1.82G 0.02684 0.02257 0.000562 0.04997 39 416 0.05564 0.1526 0.05876 0.02633 0.06746 0.02618 0.03566
|
81 |
+
80/99 1.82G 0.02673 0.02188 0.0005288 0.04914 30 416 0.08089 0.1859 0.07233 0.02725 0.0666 0.02622 0.03448
|
82 |
+
81/99 1.82G 0.02646 0.02194 0.0005107 0.04892 34 416 0.06028 0.1698 0.06026 0.02502 0.06615 0.0269 0.03563
|
83 |
+
82/99 1.82G 0.02617 0.02159 0.000595 0.04835 29 416 0.06385 0.1585 0.06293 0.02799 0.06571 0.02701 0.03528
|
84 |
+
83/99 1.82G 0.02604 0.02124 0.0004056 0.04768 28 416 0.06987 0.187 0.06717 0.02929 0.06616 0.02629 0.03006
|
85 |
+
84/99 1.82G 0.02573 0.02162 0.0005842 0.04793 33 416 0.07749 0.179 0.0737 0.02904 0.06591 0.02666 0.03001
|
86 |
+
85/99 1.82G 0.02559 0.02099 0.0004245 0.047 33 416 0.06499 0.1213 0.06146 0.02877 0.06672 0.02698 0.03723
|
87 |
+
86/99 1.82G 0.02543 0.02096 0.0005875 0.04697 34 416 0.07738 0.1752 0.07124 0.02814 0.06586 0.02577 0.03049
|
88 |
+
87/99 1.82G 0.02556 0.02081 0.0005253 0.0469 35 416 0.08506 0.1704 0.06562 0.02325 0.06611 0.02683 0.0293
|
89 |
+
88/99 1.82G 0.02565 0.02129 0.0005002 0.04744 25 416 0.1032 0.2139 0.07841 0.03021 0.06443 0.027 0.02538
|
90 |
+
89/99 1.82G 0.02527 0.02095 0.0004493 0.04668 27 416 0.07521 0.129 0.07 0.0293 0.06717 0.02677 0.037
|
91 |
+
90/99 1.82G 0.02508 0.02057 0.0005487 0.0462 38 416 0.0543 0.1317 0.05963 0.02459 0.06709 0.02688 0.03416
|
92 |
+
91/99 1.82G 0.02524 0.02082 0.0004508 0.04651 27 416 0.06223 0.1564 0.06508 0.02967 0.06637 0.02682 0.03376
|
93 |
+
92/99 1.82G 0.02519 0.02103 0.000424 0.04665 49 416 0.0764 0.1902 0.07643 0.03485 0.0672 0.02529 0.02728
|
94 |
+
93/99 1.82G 0.02532 0.02064 0.0005462 0.0465 40 416 0.06959 0.1924 0.07251 0.02783 0.06521 0.02599 0.02777
|
95 |
+
94/99 1.82G 0.02496 0.02044 0.000513 0.04591 33 416 0.0755 0.1505 0.05635 0.0219 0.06826 0.02616 0.03474
|
96 |
+
95/99 1.82G 0.02479 0.02023 0.0004699 0.04549 37 416 0.08039 0.1419 0.05785 0.02388 0.06745 0.02636 0.03429
|
97 |
+
96/99 1.82G 0.02471 0.02026 0.0004353 0.04541 27 416 0.0647 0.114 0.05691 0.02578 0.0689 0.02652 0.03532
|
98 |
+
97/99 1.82G 0.02477 0.0202 0.000512 0.04548 39 416 0.07329 0.1859 0.06612 0.0242 0.06666 0.02636 0.03026
|
99 |
+
98/99 1.82G 0.02466 0.02006 0.0005136 0.04523 28 416 0.08644 0.1887 0.06246 0.02416 0.06699 0.02501 0.0358
|
100 |
+
99/99 1.82G 0.02438 0.02024 0.0004354 0.04505 28 416 0.0765 0.1918 0.06221 0.0229 0.06631 0.0257 0.03321
|
metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/test_batch0_gt.jpg
ADDED
![]() |
metadata/predictor_yolo_detector/runs/exp1_yolov5s_results/test_batch0_pred.jpg
ADDED
![]() |