Spaces:
Runtime error
Runtime error
Commit
·
5b63380
1
Parent(s):
5a4c0fb
Associative Tracker
Browse files- dataloader.py +1 -1
- inference.py +34 -4
- lib/fish_eye/associative.py +246 -0
- scripts/track_detection.py +3 -0
dataloader.py
CHANGED
@@ -117,7 +117,7 @@ class YOLOFrameDataset(Dataset):
|
|
117 |
self.shape = np.ceil(np.array(shape) * img_size / stride + pad).astype(int) * stride
|
118 |
|
119 |
self.batch_indices = []
|
120 |
-
for i in range(0,n,batch_size):
|
121 |
self.batch_indices.append((i, min(n, i+batch_size)))
|
122 |
|
123 |
|
|
|
117 |
self.shape = np.ceil(np.array(shape) * img_size / stride + pad).astype(int) * stride
|
118 |
|
119 |
self.batch_indices = []
|
120 |
+
for i in range(0, n, batch_size):
|
121 |
self.batch_indices.append((i, min(n, i+batch_size)))
|
122 |
|
123 |
|
inference.py
CHANGED
@@ -239,7 +239,7 @@ def format_predictions(image_shapes, outputs, width, height, gp=None, batch_size
|
|
239 |
|
240 |
return all_preds, real_width, real_height
|
241 |
|
242 |
-
def do_confidence_boost(inference, safe_preds, gp=None, batch_size=BATCH_SIZE,
|
243 |
"""
|
244 |
Args:
|
245 |
frames_dir: a directory containing frames to be evaluated
|
@@ -254,7 +254,8 @@ def do_confidence_boost(inference, safe_preds, gp=None, batch_size=BATCH_SIZE, c
|
|
254 |
|
255 |
|
256 |
boost_cutoff = 0.01
|
257 |
-
boost_range = math.floor(math.sqrt(1/
|
|
|
258 |
|
259 |
outputs = []
|
260 |
with tqdm(total=len(inference), desc="Running confidence boost", ncols=0, disable=not verbose) as pbar:
|
@@ -286,7 +287,7 @@ def do_confidence_boost(inference, safe_preds, gp=None, batch_size=BATCH_SIZE, c
|
|
286 |
temp_frame = next_batch[idx - len(infer)]
|
287 |
|
288 |
if temp_frame is not None:
|
289 |
-
boost_frame(safe_frame, temp_frame, dt,
|
290 |
|
291 |
pbar.update(1*batch_size)
|
292 |
|
@@ -297,7 +298,7 @@ def boost_frame(safe_frame, base_frame, dt, power=1, decay=1):
|
|
297 |
ious = box_iou(boxes, safe_boxes)
|
298 |
score = torch.matmul(ious, safe_frame[:, 4])
|
299 |
# score = iou(safe_box, base_box) * confidence(safe_box)
|
300 |
-
base_frame[:, 4] *= 1 + power*(score)*math.exp(-decay*dt*dt)
|
301 |
return base_frame
|
302 |
|
303 |
def do_tracking(all_preds, image_meter_width, image_meter_height, gp=None, max_age=MAX_AGE, iou_thres=IOU_THRES, min_hits=MIN_HITS, min_length=MIN_LENGTH, verbose=True):
|
@@ -328,6 +329,35 @@ def do_tracking(all_preds, image_meter_width, image_meter_height, gp=None, max_a
|
|
328 |
|
329 |
return json_data
|
330 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
331 |
|
332 |
@patch('json.encoder.c_make_encoder', None)
|
333 |
def json_dump_round_float(some_object, out_path, num_digits=4):
|
|
|
239 |
|
240 |
return all_preds, real_width, real_height
|
241 |
|
242 |
+
def do_confidence_boost(inference, safe_preds, gp=None, batch_size=BATCH_SIZE, boost_power=1, boost_decay=1, verbose=True):
|
243 |
"""
|
244 |
Args:
|
245 |
frames_dir: a directory containing frames to be evaluated
|
|
|
254 |
|
255 |
|
256 |
boost_cutoff = 0.01
|
257 |
+
boost_range = math.floor(math.sqrt(1/boost_decay * math.log(boost_power / boost_cutoff)))
|
258 |
+
boost_scale = boost_power * math.exp(-boost_decay)
|
259 |
|
260 |
outputs = []
|
261 |
with tqdm(total=len(inference), desc="Running confidence boost", ncols=0, disable=not verbose) as pbar:
|
|
|
287 |
temp_frame = next_batch[idx - len(infer)]
|
288 |
|
289 |
if temp_frame is not None:
|
290 |
+
boost_frame(safe_frame, temp_frame, dt, scale=boost_scale, decay=boost_decay)
|
291 |
|
292 |
pbar.update(1*batch_size)
|
293 |
|
|
|
298 |
ious = box_iou(boxes, safe_boxes)
|
299 |
score = torch.matmul(ious, safe_frame[:, 4])
|
300 |
# score = iou(safe_box, base_box) * confidence(safe_box)
|
301 |
+
base_frame[:, 4] *= 1 + power*(score)*math.exp(-decay*(dt*dt-1))
|
302 |
return base_frame
|
303 |
|
304 |
def do_tracking(all_preds, image_meter_width, image_meter_height, gp=None, max_age=MAX_AGE, iou_thres=IOU_THRES, min_hits=MIN_HITS, min_length=MIN_LENGTH, verbose=True):
|
|
|
329 |
|
330 |
return json_data
|
331 |
|
332 |
+
def do_associative_tracking(raw_detections, image_meter_width, image_meter_height, gp=None, max_age=MAX_AGE, iou_thres=IOU_THRES, min_hits=MIN_HITS, min_length=MIN_LENGTH, verbose=True):
|
333 |
+
|
334 |
+
if (gp): gp(0, "Tracking...")
|
335 |
+
|
336 |
+
print(len(raw_detections))
|
337 |
+
|
338 |
+
# Initialize tracker
|
339 |
+
clip_info = {
|
340 |
+
'start_frame': 0,
|
341 |
+
'end_frame': len(raw_detections),
|
342 |
+
'image_meter_width': image_meter_width,
|
343 |
+
'image_meter_height': image_meter_height
|
344 |
+
}
|
345 |
+
tracker = Tracker(clip_info, args={ 'max_age': max_age, 'min_hits': 0, 'iou_threshold': iou_thres}, min_hits=min_hits)
|
346 |
+
|
347 |
+
# Run tracking
|
348 |
+
with tqdm(total=len(all_preds), desc="Running tracking", ncols=0, disable=not verbose) as pbar:
|
349 |
+
for i, key in enumerate(sorted(all_preds.keys())):
|
350 |
+
if gp: gp(i / len(all_preds), pbar.__str__())
|
351 |
+
boxes = all_preds[key]
|
352 |
+
if boxes is not None:
|
353 |
+
tracker.update(boxes)
|
354 |
+
else:
|
355 |
+
tracker.update()
|
356 |
+
pbar.update(1)
|
357 |
+
|
358 |
+
json_data = tracker.finalize(min_length=min_length)
|
359 |
+
|
360 |
+
return json_data
|
361 |
|
362 |
@patch('json.encoder.c_make_encoder', None)
|
363 |
def json_dump_round_float(some_object, out_path, num_digits=4):
|
lib/fish_eye/associative.py
ADDED
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
SORT: A Simple, Online and Realtime Tracker
|
3 |
+
Copyright (C) 2016-2020 Alex Bewley alex@bewley.ai
|
4 |
+
|
5 |
+
This program is free software: you can redistribute it and/or modify
|
6 |
+
it under the terms of the GNU General Public License as published by
|
7 |
+
the Free Software Foundation, either version 3 of the License, or
|
8 |
+
(at your option) any later version.
|
9 |
+
|
10 |
+
This program is distributed in the hope that it will be useful,
|
11 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
12 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
13 |
+
GNU General Public License for more details.
|
14 |
+
|
15 |
+
You should have received a copy of the GNU General Public License
|
16 |
+
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
17 |
+
"""
|
18 |
+
from filterpy.kalman import KalmanFilter
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
def linear_assignment(cost_matrix):
|
22 |
+
try:
|
23 |
+
import lap
|
24 |
+
_, x, y = lap.lapjv(cost_matrix, extend_cost=True)
|
25 |
+
return np.array([[y[i],i] for i in x if i >= 0]) #
|
26 |
+
except ImportError:
|
27 |
+
from scipy.optimize import linear_sum_assignment
|
28 |
+
x, y = linear_sum_assignment(cost_matrix)
|
29 |
+
return np.array(list(zip(x, y)))
|
30 |
+
|
31 |
+
|
32 |
+
def iou_batch(bb_test, bb_gt):
|
33 |
+
"""
|
34 |
+
From SORT: Computes IOU between two bboxes in the form [l,t,w,h]
|
35 |
+
"""
|
36 |
+
bb_gt = np.expand_dims(bb_gt, 0)
|
37 |
+
bb_test = np.expand_dims(bb_test, 1)
|
38 |
+
|
39 |
+
xx1 = np.maximum(bb_test[..., 0], bb_gt[..., 0])
|
40 |
+
yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1])
|
41 |
+
xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2])
|
42 |
+
yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3])
|
43 |
+
w = np.maximum(0., xx2 - xx1)
|
44 |
+
h = np.maximum(0., yy2 - yy1)
|
45 |
+
wh = w * h
|
46 |
+
o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1])
|
47 |
+
+ (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh)
|
48 |
+
return(o)
|
49 |
+
|
50 |
+
|
51 |
+
def convert_bbox_to_z(bbox):
|
52 |
+
"""
|
53 |
+
Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
|
54 |
+
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
|
55 |
+
the aspect ratio
|
56 |
+
"""
|
57 |
+
w = bbox[2] - bbox[0]
|
58 |
+
h = bbox[3] - bbox[1]
|
59 |
+
x = bbox[0] + w/2.
|
60 |
+
y = bbox[1] + h/2.
|
61 |
+
s = w * h #scale is just area
|
62 |
+
r = w / float(h)
|
63 |
+
return np.array([x, y, s, r]).reshape((4, 1))
|
64 |
+
|
65 |
+
|
66 |
+
def convert_x_to_bbox(x,score=None):
|
67 |
+
"""
|
68 |
+
Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
|
69 |
+
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
|
70 |
+
"""
|
71 |
+
w = np.sqrt(x[2] * x[3])
|
72 |
+
h = x[2] / w
|
73 |
+
if(score==None):
|
74 |
+
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))
|
75 |
+
else:
|
76 |
+
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))
|
77 |
+
|
78 |
+
|
79 |
+
class KalmanBoxTracker(object):
|
80 |
+
"""
|
81 |
+
This class represents the internal state of individual tracked objects observed as bbox.
|
82 |
+
"""
|
83 |
+
count = 0
|
84 |
+
def __init__(self,bbox):
|
85 |
+
"""
|
86 |
+
Initialises a tracker using initial bounding box.
|
87 |
+
"""
|
88 |
+
#define constant velocity model
|
89 |
+
self.kf = KalmanFilter(dim_x=7, dim_z=4)
|
90 |
+
self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0], [0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])
|
91 |
+
self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])
|
92 |
+
|
93 |
+
self.kf.R[2:,2:] *= 10.
|
94 |
+
self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities
|
95 |
+
self.kf.P *= 10.
|
96 |
+
self.kf.Q[-1,-1] *= 0.01
|
97 |
+
self.kf.Q[4:,4:] *= 0.01
|
98 |
+
|
99 |
+
self.kf.x[:4] = convert_bbox_to_z(bbox)
|
100 |
+
self.time_since_update = 0
|
101 |
+
self.id = KalmanBoxTracker.count
|
102 |
+
KalmanBoxTracker.count += 1
|
103 |
+
self.history = []
|
104 |
+
self.hits = 0
|
105 |
+
self.hit_streak = 0
|
106 |
+
self.age = 0
|
107 |
+
|
108 |
+
def update(self,bbox):
|
109 |
+
"""
|
110 |
+
Updates the state vector with observed bbox.
|
111 |
+
"""
|
112 |
+
self.time_since_update = 0
|
113 |
+
self.history = []
|
114 |
+
self.hits += 1
|
115 |
+
self.hit_streak += 1
|
116 |
+
self.kf.update(convert_bbox_to_z(bbox))
|
117 |
+
|
118 |
+
def predict(self):
|
119 |
+
"""
|
120 |
+
Advances the state vector and returns the predicted bounding box estimate.
|
121 |
+
"""
|
122 |
+
if((self.kf.x[6]+self.kf.x[2])<=0):
|
123 |
+
self.kf.x[6] *= 0.0
|
124 |
+
self.kf.predict()
|
125 |
+
self.age += 1
|
126 |
+
if(self.time_since_update>0):
|
127 |
+
self.hit_streak = 0
|
128 |
+
self.time_since_update += 1
|
129 |
+
self.history.append(convert_x_to_bbox(self.kf.x))
|
130 |
+
return self.history[-1]
|
131 |
+
|
132 |
+
def get_state(self):
|
133 |
+
"""
|
134 |
+
Returns the current bounding box estimate.
|
135 |
+
"""
|
136 |
+
return convert_x_to_bbox(self.kf.x)
|
137 |
+
|
138 |
+
|
139 |
+
def associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):
|
140 |
+
"""
|
141 |
+
Assigns detections to tracked object (both represented as bounding boxes)
|
142 |
+
|
143 |
+
Returns 3 lists of matches, unmatched_detections and unmatched_trackers
|
144 |
+
"""
|
145 |
+
if(len(trackers)==0):
|
146 |
+
return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
|
147 |
+
|
148 |
+
iou_matrix = iou_batch(detections, trackers)
|
149 |
+
|
150 |
+
if min(iou_matrix.shape) > 0:
|
151 |
+
a = (iou_matrix > iou_threshold).astype(np.int32)
|
152 |
+
if a.sum(1).max() == 1 and a.sum(0).max() == 1:
|
153 |
+
matched_indices = np.stack(np.where(a), axis=1)
|
154 |
+
else:
|
155 |
+
matched_indices = linear_assignment(-iou_matrix)
|
156 |
+
else:
|
157 |
+
matched_indices = np.empty(shape=(0,2))
|
158 |
+
|
159 |
+
unmatched_detections = []
|
160 |
+
for d, _ in enumerate(detections):
|
161 |
+
if(d not in matched_indices[:,0]):
|
162 |
+
unmatched_detections.append(d)
|
163 |
+
unmatched_trackers = []
|
164 |
+
for t, _ in enumerate(trackers):
|
165 |
+
if(t not in matched_indices[:,1]):
|
166 |
+
unmatched_trackers.append(t)
|
167 |
+
|
168 |
+
#filter out matched with low IOU
|
169 |
+
matches = []
|
170 |
+
for m in matched_indices:
|
171 |
+
if(iou_matrix[m[0], m[1]]<iou_threshold):
|
172 |
+
unmatched_detections.append(m[0])
|
173 |
+
unmatched_trackers.append(m[1])
|
174 |
+
else:
|
175 |
+
matches.append(m.reshape(1,2))
|
176 |
+
if(len(matches)==0):
|
177 |
+
matches = np.empty((0,2),dtype=int)
|
178 |
+
else:
|
179 |
+
matches = np.concatenate(matches,axis=0)
|
180 |
+
|
181 |
+
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
|
182 |
+
|
183 |
+
|
184 |
+
class Associate(object):
|
185 |
+
def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3):
|
186 |
+
"""
|
187 |
+
Sets key parameters for SORT
|
188 |
+
"""
|
189 |
+
self.max_age = max_age
|
190 |
+
self.min_hits = min_hits
|
191 |
+
self.iou_threshold = iou_threshold
|
192 |
+
self.trackers = []
|
193 |
+
self.frame_count = 0
|
194 |
+
|
195 |
+
def update(self, dets=(np.empty((0, 5)), np.empty((0, 5)))):
|
196 |
+
"""
|
197 |
+
Params:
|
198 |
+
dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
|
199 |
+
Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).
|
200 |
+
Returns the a similar array, where the last column is the object ID.
|
201 |
+
|
202 |
+
NOTE: The number of objects returned may differ from the number of detections provided.
|
203 |
+
"""
|
204 |
+
self.frame_count += 1
|
205 |
+
# get predicted locations from existing trackers.
|
206 |
+
|
207 |
+
low_dets = dets[0]
|
208 |
+
high_dets = dets[1]
|
209 |
+
|
210 |
+
trks = np.zeros((len(self.trackers), 5))
|
211 |
+
to_del = []
|
212 |
+
ret = []
|
213 |
+
for t, trk in enumerate(trks):
|
214 |
+
pos = self.trackers[t].predict()[0]
|
215 |
+
trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
|
216 |
+
if np.any(np.isnan(pos)):
|
217 |
+
to_del.append(t)
|
218 |
+
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
|
219 |
+
for t in reversed(to_del):
|
220 |
+
self.trackers.pop(t)
|
221 |
+
high_matched, unmatched_high_dets, unmatched_trks = associate_detections_to_trackers(high_dets, trks, self.iou_threshold)
|
222 |
+
|
223 |
+
low_matched, unmatched_low_dets, unmatched_trks = associate_detections_to_trackers(low_dets, unmatched_trks, self.iou_threshold)
|
224 |
+
|
225 |
+
# update matched trackers with assigned detections
|
226 |
+
for m in high_matched:
|
227 |
+
self.trackers[m[1]].update(high_dets[m[0], :])
|
228 |
+
for m in low_matched:
|
229 |
+
self.trackers[m[1]].update(low_dets[m[0], :])
|
230 |
+
|
231 |
+
# create and initialise new trackers for unmatched detections
|
232 |
+
for i in unmatched_high_dets:
|
233 |
+
trk = KalmanBoxTracker(high_dets[i,:])
|
234 |
+
self.trackers.append(trk)
|
235 |
+
i = len(self.trackers)
|
236 |
+
for trk in reversed(self.trackers):
|
237 |
+
d = trk.get_state()[0]
|
238 |
+
if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
|
239 |
+
ret.append(np.concatenate((d,[trk.id+1])).reshape(1,-1)) # +1 as MOT benchmark requires positive
|
240 |
+
i -= 1
|
241 |
+
# remove dead tracklet
|
242 |
+
if(trk.time_since_update > self.max_age):
|
243 |
+
self.trackers.pop(i)
|
244 |
+
if(len(ret)>0):
|
245 |
+
return np.concatenate(ret)
|
246 |
+
return np.empty((0,5))
|
scripts/track_detection.py
CHANGED
@@ -110,6 +110,9 @@ def track(in_loc_dir, out_loc_dir, metadata_path, seq, config, verbose):
|
|
110 |
|
111 |
all_preds, real_width, real_height = format_predictions(image_shapes, outputs, width, height, verbose=verbose)
|
112 |
|
|
|
|
|
|
|
113 |
results = do_tracking(all_preds, image_meter_width, image_meter_height, min_length=config['min_length'], max_age=config['max_age'], iou_thres=config['iou_threshold'], min_hits=config['min_hits'], verbose=verbose)
|
114 |
|
115 |
mot_rows = []
|
|
|
110 |
|
111 |
all_preds, real_width, real_height = format_predictions(image_shapes, outputs, width, height, verbose=verbose)
|
112 |
|
113 |
+
print(len(all_preds))
|
114 |
+
print(all_preds[0][0])
|
115 |
+
|
116 |
results = do_tracking(all_preds, image_meter_width, image_meter_height, min_length=config['min_length'], max_age=config['max_age'], iou_thres=config['iou_threshold'], min_hits=config['min_hits'], verbose=verbose)
|
117 |
|
118 |
mot_rows = []
|