Spaces:
Runtime error
Runtime error
import project_path | |
from lib.yolov5.utils.general import clip_boxes, scale_boxes | |
import argparse | |
from datetime import datetime | |
import torch | |
import os | |
from dataloader import create_dataloader_frames_only | |
from inference import setup_model, do_detection, do_suppression, do_confidence_boost, format_predictions, do_tracking | |
from visualizer import generate_video_batches | |
import json | |
from tqdm import tqdm | |
import numpy as np | |
def main(args, verbose=True): | |
""" | |
Main processing task to be run in gradio | |
- Writes aris frames to dirname(filepath)/frames/{i}.jpg | |
- Writes json output to dirname(filepath)/{filename}_results.json | |
- Writes manual marking to dirname(filepath)/{filename}_marking.txt | |
- Writes video output to dirname(filepath)/{filename}_results.mp4 | |
- Zips all results to dirname(filepath)/{filename}_results.zip | |
Args: | |
filepath (str): path to aris file | |
TODO: Separate into subtasks in different queues; have a GPU-only queue. | |
""" | |
print("In task...") | |
print("Cuda available in task?", torch.cuda.is_available()) | |
model, device = setup_model(args.weights) | |
locations = [ | |
"kenai-val" | |
] | |
for loc in locations: | |
in_loc_dir = os.path.join(args.frames, loc) | |
out_loc_dir = os.path.join(args.output, loc) | |
print(in_loc_dir) | |
print(out_loc_dir) | |
detect_location(in_loc_dir, out_loc_dir, model, device, verbose) | |
def detect_location(in_loc_dir, out_loc_dir, model, device, verbose): | |
seq_list = os.listdir(in_loc_dir) | |
with tqdm(total=len(seq_list), desc="...", ncols=0) as pbar: | |
for seq in seq_list: | |
pbar.update(1) | |
if (seq.startswith(".")): continue | |
pbar.set_description("Processing " + seq) | |
in_seq_dir = os.path.join(in_loc_dir, seq) | |
out_seq_dir = os.path.join(out_loc_dir, seq) | |
os.makedirs(out_seq_dir, exist_ok=True) | |
detect(in_seq_dir, out_seq_dir, model, device, verbose) | |
def detect(in_seq_dir, out_seq_dir, model, device, verbose): | |
# create dataloader | |
dataloader = create_dataloader_frames_only(in_seq_dir) | |
inference, image_shapes, width, height = do_detection(dataloader, model, device, verbose=verbose) | |
json_obj = { | |
'image_shapes': image_shapes, | |
'width': width, | |
'height': height | |
} | |
with open(os.path.join(out_seq_dir, 'pred.json'), 'w') as f: | |
json.dump(json_obj, f) | |
torch.save(inference, os.path.join(out_seq_dir, 'inference.pt')) | |
def argument_parser(): | |
parser = argparse.ArgumentParser() | |
parser.add_argument("--frames", required=True, help="Path to frame directory. Required.") | |
parser.add_argument("--output", required=True, help="Path to output directory. Required.") | |
parser.add_argument("--weights", default='models/v5m_896_300best.pt', help="Path to saved YOLOv5 weights. Default: ../models/v5m_896_300best.pt") | |
return parser | |
if __name__ == "__main__": | |
args = argument_parser().parse_args() | |
main(args) |