Spaces:
Runtime error
Runtime error
Commit
·
b64c69e
1
Parent(s):
eb7b8ee
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import torch
|
3 |
+
import numpy as np
|
4 |
+
import math
|
5 |
+
from super_gradients.training import models
|
6 |
+
from super_gradients.training.processing import (DetectionCenterPadding,StandardizeImage,
|
7 |
+
ImagePermute, ComposeProcessing,
|
8 |
+
DetectionLongestMaxSizeRescale)
|
9 |
+
from deep_sort_pytorch.utils.parser import get_config
|
10 |
+
from deep_sort_pytorch.deep_sort import DeepSort
|
11 |
+
import streamlit as st
|
12 |
+
|
13 |
+
file_path = 'coco-labels-paper.txt'
|
14 |
+
palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
|
15 |
+
|
16 |
+
names = []
|
17 |
+
with open(file_path, 'r') as file:
|
18 |
+
for line in file:
|
19 |
+
names.append(line.strip())
|
20 |
+
|
21 |
+
st.header(":hand: Welcome To YoLo Nas Object Detection and Tracking : ")
|
22 |
+
st.info("""
|
23 |
+
This app uses the cutting-edge YOLO Nas algorithm to detect objects in real-time video streams.
|
24 |
+
But that's not all it also employs the powerful DeepSort algorithm to track these objects,
|
25 |
+
providing you with seamless tracking capabilities. Easily upload a video feed,
|
26 |
+
and watch as our app identifies and tracks objects with precision.
|
27 |
+
It's simple, efficient, and ready to help you monitor and analyze moving objects effortlessly!
|
28 |
+
""")
|
29 |
+
with st.sidebar :
|
30 |
+
device_name =st.selectbox("Device : " , ["cpu" , "cuda"])
|
31 |
+
if device_name == 'cuda' :
|
32 |
+
device = torch.device("cuda:0")
|
33 |
+
else :
|
34 |
+
device = torch.device("cpu")
|
35 |
+
source_name = st.selectbox("select you source feed : " , ["URL"])
|
36 |
+
conf = st.slider("Select threshold confidence value : " , min_value=0.1 , max_value=1.0 , value=0.25)
|
37 |
+
iou = st.slider("Select Intersection over union (iou) value : " , min_value=0.1 , max_value=1.0 , value=0.5)
|
38 |
+
|
39 |
+
#model=models.get('yolo_nas_s',num_classes=len(names) ,
|
40 |
+
# checkpoint_path="yolo_nas_s_coco.pth").to(device)
|
41 |
+
if source_name == "URL" :
|
42 |
+
source = st.text_input("Input your Url Camera feed and press Entre ex : http://IP:8080/video")
|
43 |
+
cap = cv2.VideoCapture(source)
|
44 |
+
model=models.get('yolo_nas_s', pretrained_weights="coco").to(device)
|
45 |
+
model.set_dataset_processing_params(
|
46 |
+
class_names=names,
|
47 |
+
image_processor=ComposeProcessing(
|
48 |
+
[DetectionLongestMaxSizeRescale(output_shape=(636, 636)),
|
49 |
+
DetectionCenterPadding(output_shape=(640, 640),
|
50 |
+
pad_value=114),
|
51 |
+
StandardizeImage(max_value=255.0),
|
52 |
+
ImagePermute(permutation=(2, 0, 1)),]),
|
53 |
+
iou=iou ,conf=conf)
|
54 |
+
|
55 |
+
cfg_deep = get_config()
|
56 |
+
cfg_deep.merge_from_file("deep_sort_pytorch/configs/deep_sort.yaml")
|
57 |
+
deepsort = DeepSort(cfg_deep.DEEPSORT.REID_CKPT,
|
58 |
+
max_dist=cfg_deep.DEEPSORT.MAX_DIST, min_confidence=cfg_deep.DEEPSORT.MIN_CONFIDENCE,
|
59 |
+
nms_max_overlap=cfg_deep.DEEPSORT.NMS_MAX_OVERLAP,
|
60 |
+
max_iou_distance=cfg_deep.DEEPSORT.MAX_IOU_DISTANCE,
|
61 |
+
max_age=cfg_deep.DEEPSORT.MAX_AGE, n_init=cfg_deep.DEEPSORT.N_INIT,
|
62 |
+
nn_budget=cfg_deep.DEEPSORT.NN_BUDGET,
|
63 |
+
use_cuda=False)
|
64 |
+
|
65 |
+
def compute_color_for_labels(label):
|
66 |
+
"""
|
67 |
+
Simple function that adds fixed color depending on the class
|
68 |
+
"""
|
69 |
+
if label == 0: #person
|
70 |
+
color = (85,45,255)
|
71 |
+
elif label == 2: # Car
|
72 |
+
color = (222,82,175)
|
73 |
+
elif label == 3: # Motobike
|
74 |
+
color = (0, 204, 255)
|
75 |
+
elif label == 5: # Bus
|
76 |
+
color = (0, 149, 255)
|
77 |
+
else:
|
78 |
+
color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
|
79 |
+
return tuple(color)
|
80 |
+
|
81 |
+
|
82 |
+
def draw_boxes(img, bbox, identities=None, categories=None, names=None, offset=(0,0)):
|
83 |
+
for i, box in enumerate(bbox):
|
84 |
+
x1, y1, x2, y2 = [int(i) for i in box]
|
85 |
+
x1 += offset[0]
|
86 |
+
x2 += offset[0]
|
87 |
+
y1 += offset[0]
|
88 |
+
y2 += offset[0]
|
89 |
+
cat = int(categories[i]) if categories is not None else 0
|
90 |
+
id = int(identities[i]) if identities is not None else 0
|
91 |
+
cv2.rectangle(img, (x1, y1), (x2, y2), color= compute_color_for_labels(cat),thickness=2, lineType=cv2.LINE_AA)
|
92 |
+
label = str(id) + ":" + names[cat]
|
93 |
+
(w,h), _ = cv2.getTextSize(str(label), cv2.FONT_HERSHEY_SIMPLEX, fontScale=1/2, thickness=1)
|
94 |
+
t_size=cv2.getTextSize(str(label), cv2.FONT_HERSHEY_SIMPLEX, fontScale=1/2, thickness=1)[0]
|
95 |
+
c2=x1+t_size[0], y1-t_size[1]-3
|
96 |
+
cv2.rectangle(img, (x1, y1), c2, color=compute_color_for_labels(cat), thickness=-1, lineType=cv2.LINE_AA)
|
97 |
+
cv2.putText(img, str(label), (x1, y1-2), 0, 1/2, [255, 255, 255], thickness=1, lineType=cv2.LINE_AA)
|
98 |
+
return img
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
|
103 |
+
if st.button("Start detection and Tracking") :
|
104 |
+
frame_window = st.image( [] )
|
105 |
+
while True:
|
106 |
+
xywh_bboxs = []
|
107 |
+
confs = []
|
108 |
+
oids = []
|
109 |
+
ret, frame = cap.read()
|
110 |
+
if ret:
|
111 |
+
result = list(model.predict(frame))[0]
|
112 |
+
bbox_xyxys = result.prediction.bboxes_xyxy.tolist()
|
113 |
+
confidences = result.prediction.confidence
|
114 |
+
labels = result.prediction.labels.tolist()
|
115 |
+
for (bbox_xyxy, confidence, cls) in zip(bbox_xyxys, confidences, labels):
|
116 |
+
|
117 |
+
bbox = np.array(bbox_xyxy)
|
118 |
+
x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]
|
119 |
+
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
|
120 |
+
conf = math.ceil((confidence*100))/100
|
121 |
+
cx, cy = int((x1+x2)/2), int((y1+y2)/2)
|
122 |
+
bbox_width = abs(x1-x2)
|
123 |
+
bbox_height = abs(y1-y2)
|
124 |
+
xcycwh = [cx, cy, bbox_width, bbox_height]
|
125 |
+
xywh_bboxs.append(xcycwh)
|
126 |
+
confs.append(conf)
|
127 |
+
oids.append(int(cls))
|
128 |
+
xywhs = torch.tensor(xywh_bboxs)
|
129 |
+
confss= torch.tensor(confs)
|
130 |
+
outputs = deepsort.update(xywhs, confss, oids, frame)
|
131 |
+
if len(outputs)>0:
|
132 |
+
bbox_xyxy = outputs[:,:4]
|
133 |
+
identities = outputs[:, -2]
|
134 |
+
object_id = outputs[:, -1]
|
135 |
+
draw_boxes(frame, bbox_xyxy, identities, object_id , names=names)
|
136 |
+
#output.write(frame)
|
137 |
+
#cv2.imshow('Video', frame)
|
138 |
+
#if cv2.waitKey(25) & 0xFF == ord('q'):
|
139 |
+
# break
|
140 |
+
frame = cv2.cvtColor( frame , cv2.COLOR_BGR2RGB)
|
141 |
+
frame_window.image(frame)
|
142 |
+
else:
|
143 |
+
break
|
144 |
+
#out.release()
|
145 |
+
cap.release()
|
146 |
+
cv2.destroyAllWindows()
|