Spaces:
Runtime error
Runtime error
Commit
·
809371f
1
Parent(s):
59bc184
Tracking script
Browse files- scripts/detect_frames.py +15 -63
- scripts/full_detect_frames.py +139 -0
- scripts/track_detection.py +141 -0
- scripts/track_eval.py +47 -0
scripts/detect_frames.py
CHANGED
@@ -12,7 +12,7 @@ from tqdm import tqdm
|
|
12 |
import numpy as np
|
13 |
|
14 |
|
15 |
-
def main(args,
|
16 |
"""
|
17 |
Main processing task to be run in gradio
|
18 |
- Writes aris frames to dirname(filepath)/frames/{i}.jpg
|
@@ -28,16 +28,6 @@ def main(args, config={}, verbose=True):
|
|
28 |
print("In task...")
|
29 |
print("Cuda available in task?", torch.cuda.is_available())
|
30 |
|
31 |
-
# setup config
|
32 |
-
if "conf_threshold" not in config: config['conf_threshold'] = 0.001
|
33 |
-
if "nms_iou" not in config: config['nms_iou'] = 0.6
|
34 |
-
if "min_length" not in config: config['min_length'] = 0.3
|
35 |
-
if "max_age" not in config: config['max_age'] = 20
|
36 |
-
if "iou_threshold" not in config: config['iou_threshold'] = 0.01
|
37 |
-
if "min_hits" not in config: config['min_hits'] = 11
|
38 |
-
|
39 |
-
print(config)
|
40 |
-
|
41 |
model, device = setup_model(args.weights)
|
42 |
|
43 |
locations = [
|
@@ -50,11 +40,11 @@ def main(args, config={}, verbose=True):
|
|
50 |
print(in_loc_dir)
|
51 |
print(out_loc_dir)
|
52 |
|
53 |
-
detect_location(in_loc_dir, out_loc_dir,
|
54 |
|
55 |
|
56 |
|
57 |
-
def detect_location(in_loc_dir, out_loc_dir,
|
58 |
|
59 |
seq_list = os.listdir(in_loc_dir)
|
60 |
|
@@ -69,63 +59,25 @@ def detect_location(in_loc_dir, out_loc_dir, config, model, device, verbose):
|
|
69 |
out_seq_dir = os.path.join(out_loc_dir, seq)
|
70 |
os.makedirs(out_seq_dir, exist_ok=True)
|
71 |
|
72 |
-
|
73 |
-
|
74 |
-
def detect_seq(in_seq_dir, out_seq_dir, config, model, device, verbose):
|
75 |
-
|
76 |
-
ann_list = []
|
77 |
-
frame_list = detect(in_seq_dir, config, model, device, verbose)
|
78 |
-
for frame in frame_list:
|
79 |
-
if frame is not None:
|
80 |
-
for ann in frame:
|
81 |
-
ann_list.append({
|
82 |
-
'image_id': ann[5],
|
83 |
-
'category_id': 0,
|
84 |
-
'bbox': [ann[0], ann[1], ann[2] - ann[0], ann[3] - ann[1]],
|
85 |
-
'score': ann[4]
|
86 |
-
})
|
87 |
-
result = json.dumps(ann_list)
|
88 |
|
89 |
-
|
90 |
-
f.write(result)
|
91 |
-
|
92 |
-
def detect(in_dir, config, model, device, verbose):
|
93 |
-
|
94 |
-
#progress_log = lambda p, m: 0
|
95 |
|
96 |
# create dataloader
|
97 |
-
dataloader = create_dataloader_frames_only(
|
98 |
|
99 |
inference, image_shapes, width, height = do_detection(dataloader, model, device, verbose=verbose)
|
100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
-
|
103 |
-
|
104 |
-
file_names = dataloader.files
|
105 |
-
frame_list = []
|
106 |
-
for batch_i, batch in enumerate(outputs):
|
107 |
-
|
108 |
-
batch_shapes = image_shapes[batch_i]
|
109 |
-
|
110 |
-
# Format results
|
111 |
-
for si, pred in enumerate(batch):
|
112 |
-
(image_shape, original_shape) = batch_shapes[si]
|
113 |
-
|
114 |
-
# Clip boxes to image bounds and resize to input shape
|
115 |
-
clip_boxes(pred, (height, width))
|
116 |
-
boxes = pred[:, :4].clone() # xyxy
|
117 |
-
confs = pred[:, 4].clone().tolist()
|
118 |
-
scale_boxes(image_shape, boxes, original_shape[0], original_shape[1]) # to original shape
|
119 |
-
|
120 |
-
frame = [ [*bb, conf] for bb, conf in zip(boxes.tolist(), confs) ]
|
121 |
-
|
122 |
-
file_name = file_names[batch_i*32 + si]
|
123 |
-
for ann in frame:
|
124 |
-
ann.append(file_name)
|
125 |
-
|
126 |
-
frame_list.append(frame)
|
127 |
-
|
128 |
-
return frame_list
|
129 |
|
130 |
def argument_parser():
|
131 |
parser = argparse.ArgumentParser()
|
|
|
12 |
import numpy as np
|
13 |
|
14 |
|
15 |
+
def main(args, verbose=True):
|
16 |
"""
|
17 |
Main processing task to be run in gradio
|
18 |
- Writes aris frames to dirname(filepath)/frames/{i}.jpg
|
|
|
28 |
print("In task...")
|
29 |
print("Cuda available in task?", torch.cuda.is_available())
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
model, device = setup_model(args.weights)
|
32 |
|
33 |
locations = [
|
|
|
40 |
print(in_loc_dir)
|
41 |
print(out_loc_dir)
|
42 |
|
43 |
+
detect_location(in_loc_dir, out_loc_dir, model, device, verbose)
|
44 |
|
45 |
|
46 |
|
47 |
+
def detect_location(in_loc_dir, out_loc_dir, model, device, verbose):
|
48 |
|
49 |
seq_list = os.listdir(in_loc_dir)
|
50 |
|
|
|
59 |
out_seq_dir = os.path.join(out_loc_dir, seq)
|
60 |
os.makedirs(out_seq_dir, exist_ok=True)
|
61 |
|
62 |
+
detect(in_seq_dir, out_seq_dir, model, device, verbose)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
+
def detect(in_seq_dir, out_seq_dir, model, device, verbose):
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
# create dataloader
|
67 |
+
dataloader = create_dataloader_frames_only(in_seq_dir)
|
68 |
|
69 |
inference, image_shapes, width, height = do_detection(dataloader, model, device, verbose=verbose)
|
70 |
|
71 |
+
json_obj = {
|
72 |
+
'inference': inference,
|
73 |
+
'image_shapes': image_shapes,
|
74 |
+
'width': width,
|
75 |
+
'height': height
|
76 |
+
}
|
77 |
+
json_string = json.dumps(json_obj)
|
78 |
|
79 |
+
with open(os.path.join(out_seq_dir, 'pred.json'), 'w') as f:
|
80 |
+
f.write(json_string)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
def argument_parser():
|
83 |
parser = argparse.ArgumentParser()
|
scripts/full_detect_frames.py
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import project_path
|
2 |
+
from lib.yolov5.utils.general import clip_boxes, scale_boxes
|
3 |
+
import argparse
|
4 |
+
from datetime import datetime
|
5 |
+
import torch
|
6 |
+
import os
|
7 |
+
from dataloader import create_dataloader_frames_only
|
8 |
+
from inference import setup_model, do_detection, do_suppression, do_confidence_boost, format_predictions, do_tracking
|
9 |
+
from visualizer import generate_video_batches
|
10 |
+
import json
|
11 |
+
from tqdm import tqdm
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
|
15 |
+
def main(args, config={}, verbose=True):
|
16 |
+
"""
|
17 |
+
Main processing task to be run in gradio
|
18 |
+
- Writes aris frames to dirname(filepath)/frames/{i}.jpg
|
19 |
+
- Writes json output to dirname(filepath)/{filename}_results.json
|
20 |
+
- Writes manual marking to dirname(filepath)/{filename}_marking.txt
|
21 |
+
- Writes video output to dirname(filepath)/{filename}_results.mp4
|
22 |
+
- Zips all results to dirname(filepath)/{filename}_results.zip
|
23 |
+
Args:
|
24 |
+
filepath (str): path to aris file
|
25 |
+
|
26 |
+
TODO: Separate into subtasks in different queues; have a GPU-only queue.
|
27 |
+
"""
|
28 |
+
print("In task...")
|
29 |
+
print("Cuda available in task?", torch.cuda.is_available())
|
30 |
+
|
31 |
+
# setup config
|
32 |
+
if "conf_threshold" not in config: config['conf_threshold'] = 0.001
|
33 |
+
if "nms_iou" not in config: config['nms_iou'] = 0.6
|
34 |
+
if "min_length" not in config: config['min_length'] = 0.3
|
35 |
+
if "max_age" not in config: config['max_age'] = 20
|
36 |
+
if "iou_threshold" not in config: config['iou_threshold'] = 0.01
|
37 |
+
if "min_hits" not in config: config['min_hits'] = 11
|
38 |
+
|
39 |
+
print(config)
|
40 |
+
|
41 |
+
model, device = setup_model(args.weights)
|
42 |
+
|
43 |
+
locations = [
|
44 |
+
"kenai-val"
|
45 |
+
]
|
46 |
+
for loc in locations:
|
47 |
+
|
48 |
+
in_loc_dir = os.path.join(args.frames, loc)
|
49 |
+
out_loc_dir = os.path.join(args.output, loc)
|
50 |
+
print(in_loc_dir)
|
51 |
+
print(out_loc_dir)
|
52 |
+
|
53 |
+
detect_location(in_loc_dir, out_loc_dir, config, model, device, verbose)
|
54 |
+
|
55 |
+
|
56 |
+
|
57 |
+
def detect_location(in_loc_dir, out_loc_dir, config, model, device, verbose):
|
58 |
+
|
59 |
+
seq_list = os.listdir(in_loc_dir)
|
60 |
+
|
61 |
+
with tqdm(total=len(seq_list), desc="...", ncols=0) as pbar:
|
62 |
+
for seq in seq_list:
|
63 |
+
|
64 |
+
pbar.update(1)
|
65 |
+
if (seq.startswith(".")): continue
|
66 |
+
pbar.set_description("Processing " + seq)
|
67 |
+
|
68 |
+
in_seq_dir = os.path.join(in_loc_dir, seq)
|
69 |
+
out_seq_dir = os.path.join(out_loc_dir, seq)
|
70 |
+
os.makedirs(out_seq_dir, exist_ok=True)
|
71 |
+
|
72 |
+
detect_seq(in_seq_dir, out_seq_dir, config, model, device, verbose)
|
73 |
+
|
74 |
+
def detect_seq(in_seq_dir, out_seq_dir, config, model, device, verbose):
|
75 |
+
|
76 |
+
ann_list = []
|
77 |
+
frame_list = detect(in_seq_dir, config, model, device, verbose)
|
78 |
+
for frame in frame_list:
|
79 |
+
if frame is not None:
|
80 |
+
for ann in frame:
|
81 |
+
ann_list.append({
|
82 |
+
'image_id': ann[5],
|
83 |
+
'category_id': 0,
|
84 |
+
'bbox': [ann[0], ann[1], ann[2] - ann[0], ann[3] - ann[1]],
|
85 |
+
'score': ann[4]
|
86 |
+
})
|
87 |
+
result = json.dumps(ann_list)
|
88 |
+
|
89 |
+
with open(os.path.join(out_seq_dir, 'pred.json'), 'w') as f:
|
90 |
+
f.write(result)
|
91 |
+
|
92 |
+
def detect(in_dir, config, model, device, verbose):
|
93 |
+
|
94 |
+
#progress_log = lambda p, m: 0
|
95 |
+
|
96 |
+
# create dataloader
|
97 |
+
dataloader = create_dataloader_frames_only(in_dir)
|
98 |
+
|
99 |
+
inference, image_shapes, width, height = do_detection(dataloader, model, device, verbose=verbose)
|
100 |
+
|
101 |
+
|
102 |
+
outputs = do_suppression(inference, conf_thres=config['conf_threshold'], iou_thres=config['nms_iou'], verbose=verbose)
|
103 |
+
|
104 |
+
file_names = dataloader.files
|
105 |
+
frame_list = []
|
106 |
+
for batch_i, batch in enumerate(outputs):
|
107 |
+
|
108 |
+
batch_shapes = image_shapes[batch_i]
|
109 |
+
|
110 |
+
# Format results
|
111 |
+
for si, pred in enumerate(batch):
|
112 |
+
(image_shape, original_shape) = batch_shapes[si]
|
113 |
+
|
114 |
+
# Clip boxes to image bounds and resize to input shape
|
115 |
+
clip_boxes(pred, (height, width))
|
116 |
+
boxes = pred[:, :4].clone() # xyxy
|
117 |
+
confs = pred[:, 4].clone().tolist()
|
118 |
+
scale_boxes(image_shape, boxes, original_shape[0], original_shape[1]) # to original shape
|
119 |
+
|
120 |
+
frame = [ [*bb, conf] for bb, conf in zip(boxes.tolist(), confs) ]
|
121 |
+
|
122 |
+
file_name = file_names[batch_i*32 + si]
|
123 |
+
for ann in frame:
|
124 |
+
ann.append(file_name)
|
125 |
+
|
126 |
+
frame_list.append(frame)
|
127 |
+
|
128 |
+
return frame_list
|
129 |
+
|
130 |
+
def argument_parser():
|
131 |
+
parser = argparse.ArgumentParser()
|
132 |
+
parser.add_argument("--frames", required=True, help="Path to frame directory. Required.")
|
133 |
+
parser.add_argument("--output", required=True, help="Path to output directory. Required.")
|
134 |
+
parser.add_argument("--weights", default='models/v5m_896_300best.pt', help="Path to saved YOLOv5 weights. Default: ../models/v5m_896_300best.pt")
|
135 |
+
return parser
|
136 |
+
|
137 |
+
if __name__ == "__main__":
|
138 |
+
args = argument_parser().parse_args()
|
139 |
+
main(args)
|
scripts/track_detection.py
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import project_path
|
2 |
+
from lib.yolov5.utils.general import clip_boxes, scale_boxes
|
3 |
+
import argparse
|
4 |
+
from datetime import datetime
|
5 |
+
import torch
|
6 |
+
import os
|
7 |
+
from dataloader import create_dataloader_frames_only
|
8 |
+
from inference import setup_model, do_detection, do_suppression, do_confidence_boost, format_predictions, do_tracking
|
9 |
+
from visualizer import generate_video_batches
|
10 |
+
import json
|
11 |
+
from tqdm import tqdm
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
|
15 |
+
def main(args, config={}, verbose=True):
|
16 |
+
"""
|
17 |
+
Main processing task to be run in gradio
|
18 |
+
- Writes aris frames to dirname(filepath)/frames/{i}.jpg
|
19 |
+
- Writes json output to dirname(filepath)/{filename}_results.json
|
20 |
+
- Writes manual marking to dirname(filepath)/{filename}_marking.txt
|
21 |
+
- Writes video output to dirname(filepath)/{filename}_results.mp4
|
22 |
+
- Zips all results to dirname(filepath)/{filename}_results.zip
|
23 |
+
Args:
|
24 |
+
filepath (str): path to aris file
|
25 |
+
|
26 |
+
TODO: Separate into subtasks in different queues; have a GPU-only queue.
|
27 |
+
"""
|
28 |
+
|
29 |
+
# setup config
|
30 |
+
if "conf_threshold" not in config: config['conf_threshold'] = 0.001
|
31 |
+
if "nms_iou" not in config: config['nms_iou'] = 0.6
|
32 |
+
if "min_length" not in config: config['min_length'] = 0.3
|
33 |
+
if "max_age" not in config: config['max_age'] = 20
|
34 |
+
if "iou_threshold" not in config: config['iou_threshold'] = 0.01
|
35 |
+
if "min_hits" not in config: config['min_hits'] = 11
|
36 |
+
if "use_associative" not in config: config['use_associative'] = False
|
37 |
+
|
38 |
+
print(config)
|
39 |
+
|
40 |
+
|
41 |
+
locations = [
|
42 |
+
"kenai-val"
|
43 |
+
]
|
44 |
+
for loc in locations:
|
45 |
+
|
46 |
+
in_loc_dir = os.path.join(args.detections, loc)
|
47 |
+
out_loc_dir = os.path.join(args.output, "tracker", "data", loc)
|
48 |
+
os.makedirs(out_loc_dir, exist_ok=True)
|
49 |
+
metadata_path = os.path.join(args.metadata, loc + ".json")
|
50 |
+
print(in_loc_dir)
|
51 |
+
print(out_loc_dir)
|
52 |
+
print(metadata_path)
|
53 |
+
|
54 |
+
track_location(in_loc_dir, out_loc_dir, metadata_path, config, verbose)
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
def track_location(in_loc_dir, out_loc_dir, metadata_path, config, verbose):
|
59 |
+
|
60 |
+
seq_list = os.listdir(in_loc_dir)
|
61 |
+
|
62 |
+
with tqdm(total=len(seq_list), desc="...", ncols=0) as pbar:
|
63 |
+
for seq in seq_list:
|
64 |
+
|
65 |
+
pbar.update(1)
|
66 |
+
if (seq.startswith(".")): continue
|
67 |
+
pbar.set_description("Processing " + seq)
|
68 |
+
|
69 |
+
in_path = os.path.join(in_loc_dir, seq, 'pred.json')
|
70 |
+
out_path = os.path.join(out_loc_dir, seq + ".txt")
|
71 |
+
|
72 |
+
track(in_path, out_path, metadata_path, seq, config, verbose)
|
73 |
+
|
74 |
+
def track(in_path, out_path, metadata_path, seq_name, config, verbose):
|
75 |
+
|
76 |
+
# read detection
|
77 |
+
with open(in_path, 'r') as f:
|
78 |
+
detection = json.read(f)
|
79 |
+
|
80 |
+
inference = detection['inference']
|
81 |
+
image_shapes = detection['image_shapes']
|
82 |
+
width = detection['width']
|
83 |
+
height = detection['height']
|
84 |
+
|
85 |
+
# read metadata
|
86 |
+
image_meter_width = -1
|
87 |
+
image_meter_height = -1
|
88 |
+
with open(metadata_path, 'r') as f:
|
89 |
+
json_object = json.loads(f.read())
|
90 |
+
for seq in json_object:
|
91 |
+
if seq['clip_name'] == seq_name:
|
92 |
+
image_meter_width = seq['x_meter_stop'] - seq['x_meter_start']
|
93 |
+
image_meter_height = seq['y_meter_stop'] - seq['y_meter_start']
|
94 |
+
|
95 |
+
outputs = do_suppression(inference, conf_thres=config['conf_threshold'], iou_thres=config['nms_iou'], verbose=verbose)
|
96 |
+
|
97 |
+
if config['use_associative']:
|
98 |
+
do_confidence_boost(inference, outputs, verbose=verbose)
|
99 |
+
|
100 |
+
outputs = do_suppression(inference, conf_thres=config['conf_threshold'], iou_thres=config['nms_iou'], verbose=verbose)
|
101 |
+
|
102 |
+
all_preds, real_width, real_height = format_predictions(image_shapes, outputs, width, height)
|
103 |
+
|
104 |
+
results = do_tracking(all_preds, image_meter_width, image_meter_height, min_length=config['min_length'], max_age=config['max_age'], iou_thres=config['iou_threshold'], min_hits=config['min_hits'], verbose=verbose)
|
105 |
+
|
106 |
+
mot_rows = []
|
107 |
+
for frame in results['frames']:
|
108 |
+
for fish in frame['fish']:
|
109 |
+
bbox = fish['bbox']
|
110 |
+
row = []
|
111 |
+
right = bbox[0]*real_width
|
112 |
+
top = bbox[1]*real_height
|
113 |
+
w = bbox[2]*real_width - bbox[0]*real_width
|
114 |
+
h = bbox[3]*real_height - bbox[1]*real_height
|
115 |
+
|
116 |
+
row.append(str(frame['frame_num'] + 1))
|
117 |
+
row.append(str(fish['fish_id'] + 1))
|
118 |
+
row.append(str(int(right)))
|
119 |
+
row.append(str(int(top)))
|
120 |
+
row.append(str(int(w)))
|
121 |
+
row.append(str(int(h)))
|
122 |
+
row.append("-1")
|
123 |
+
row.append("-1")
|
124 |
+
row.append("-1")
|
125 |
+
row.append("-1")
|
126 |
+
mot_rows.append(",".join(row))
|
127 |
+
|
128 |
+
mot_text = "\n".join(mot_rows)
|
129 |
+
|
130 |
+
with open(out_path, 'w') as f:
|
131 |
+
f.write(mot_text)
|
132 |
+
|
133 |
+
def argument_parser():
|
134 |
+
parser = argparse.ArgumentParser()
|
135 |
+
parser.add_argument("--detections", required=True, help="Path to frame directory. Required.")
|
136 |
+
parser.add_argument("--output", required=True, help="Path to output directory. Required.")
|
137 |
+
return parser
|
138 |
+
|
139 |
+
if __name__ == "__main__":
|
140 |
+
args = argument_parser().parse_args()
|
141 |
+
main(args)
|
scripts/track_eval.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import project_path
|
2 |
+
import argparse
|
3 |
+
from infer_frames import main as infer
|
4 |
+
import sys
|
5 |
+
sys.path.append('..')
|
6 |
+
sys.path.append('../caltech-fish-counting')
|
7 |
+
|
8 |
+
from evaluate import evaluate
|
9 |
+
|
10 |
+
class Object(object):
|
11 |
+
pass
|
12 |
+
|
13 |
+
def main(args):
|
14 |
+
|
15 |
+
infer_args = Object()
|
16 |
+
infer_args.metadata = "../frames/metadata"
|
17 |
+
infer_args.frames = "../frames/images"
|
18 |
+
infer_args.output = "../frames/result"
|
19 |
+
infer_args.weights = "models/v5m_896_300best.pt"
|
20 |
+
|
21 |
+
config = {
|
22 |
+
'conf_threshold': float(args.conf_threshold),
|
23 |
+
'nms_iou': float(args.nms_iou),
|
24 |
+
'min_length': float(args.min_length),
|
25 |
+
'max_age': int(args.max_age),
|
26 |
+
'iou_threshold': float(args.iou_threshold),
|
27 |
+
'min_hits': int(args.min_hits)
|
28 |
+
}
|
29 |
+
|
30 |
+
infer(infer_args, config=config, verbose=False)
|
31 |
+
|
32 |
+
evaluate("../frames/result_testing", "../frames/MOT", "../frames/metadata", "tracker", False)
|
33 |
+
|
34 |
+
|
35 |
+
def argument_parser():
|
36 |
+
parser = argparse.ArgumentParser()
|
37 |
+
parser.add_argument("--conf_threshold", default=0.3, help="Config object. Required.")
|
38 |
+
parser.add_argument("--nms_iou", default=0.3, help="Config object. Required.")
|
39 |
+
parser.add_argument("--min_length", default=0.3, help="Config object. Required.")
|
40 |
+
parser.add_argument("--max_age", default=20, help="Config object. Required.")
|
41 |
+
parser.add_argument("--iou_threshold", default=0.01, help="Config object. Required.")
|
42 |
+
parser.add_argument("--min_hits", default=11, help="Config object. Required.")
|
43 |
+
return parser
|
44 |
+
|
45 |
+
if __name__ == "__main__":
|
46 |
+
args = argument_parser().parse_args()
|
47 |
+
main(args)
|