oskarastrom commited on
Commit
91c1277
·
1 Parent(s): 694b0df

new detection format

Browse files
scripts/detect_frames.py CHANGED
@@ -77,10 +77,8 @@ def detect(in_seq_dir, out_seq_dir, model, device, verbose):
77
 
78
  with open(os.path.join(out_seq_dir, 'pred.json'), 'w') as f:
79
  json.dump(json_obj, f)
80
- print('saved json')
81
-
82
  torch.save(inference, 'inference.pt')
83
- print('saved inference')
84
 
85
  def argument_parser():
86
  parser = argparse.ArgumentParser()
 
77
 
78
  with open(os.path.join(out_seq_dir, 'pred.json'), 'w') as f:
79
  json.dump(json_obj, f)
80
+
 
81
  torch.save(inference, 'inference.pt')
 
82
 
83
  def argument_parser():
84
  parser = argparse.ArgumentParser()
scripts/track_detection.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import project_path
2
  from lib.yolov5.utils.general import clip_boxes, scale_boxes
3
  import argparse
@@ -66,21 +67,27 @@ def track_location(in_loc_dir, out_loc_dir, metadata_path, config, verbose):
66
  if (seq.startswith(".")): continue
67
  pbar.set_description("Processing " + seq)
68
 
69
- in_path = os.path.join(in_loc_dir, seq, 'pred.json')
70
- out_path = os.path.join(out_loc_dir, seq + ".txt")
71
 
72
- track(in_path, out_path, metadata_path, seq, config, verbose)
73
 
74
- def track(in_path, out_path, metadata_path, seq_name, config, verbose):
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
  # read detection
77
- with open(in_path, 'r') as f:
78
  detection = json.load(f)
79
- print('inference' in detection)
80
- print('image_shapes' in detection)
81
- print(detection)
82
-
83
- inference = detection['inference']
84
  image_shapes = detection['image_shapes']
85
  width = detection['width']
86
  height = detection['height']
 
1
+ from lib.yolov5.utils.torch_utils import select_device
2
  import project_path
3
  from lib.yolov5.utils.general import clip_boxes, scale_boxes
4
  import argparse
 
67
  if (seq.startswith(".")): continue
68
  pbar.set_description("Processing " + seq)
69
 
 
 
70
 
71
+ track(in_loc_dir, out_loc_dir, metadata_path, seq, config, verbose)
72
 
73
+ def track(in_loc_dir, out_loc_dir, metadata_path, seq_name, config, verbose):
74
+
75
+ json_path = os.path.join(in_loc_dir, seq, 'pred.json')
76
+ inference_path = os.path.join(in_loc_dir, seq, 'inference.pt')
77
+ out_path = os.path.join(out_loc_dir, seq + ".txt")
78
+
79
+
80
+ device_name = '0' if torch.cuda.is_available() else 'cpu'
81
+ device = select_device(device_name, batch_size=32)
82
+ inference = torch.load(inference_path, device=device)
83
+
84
+ print(type(inference))
85
+ print(len(inference))
86
+ print(type(inference[0]))
87
 
88
  # read detection
89
+ with open(json_path, 'r') as f:
90
  detection = json.load(f)
 
 
 
 
 
91
  image_shapes = detection['image_shapes']
92
  width = detection['width']
93
  height = detection['height']