File size: 3,071 Bytes
fbb3995
 
 
 
 
 
 
 
 
 
 
 
 
 
809371f
fbb3995
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a697d26
fbb3995
 
ee9362f
fbb3995
 
 
ee9362f
a697d26
fbb3995
a697d26
fbb3995
809371f
fbb3995
193f172
 
809371f
193f172
ee9362f
 
193f172
a697d26
193f172
 
a697d26
 
 
 
 
 
fbb3995
809371f
a697d26
809371f
fbb3995
 
809371f
fbb3995
a697d26
fbb3995
809371f
 
 
 
 
fbb3995
809371f
188a41c
91c1277
ed9fba4
fbb3995
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import project_path
from lib.yolov5.utils.general import clip_boxes, scale_boxes
import argparse
from datetime import datetime
import torch
import os
from dataloader import create_dataloader_frames_only
from inference import setup_model, do_detection, do_suppression, do_confidence_boost, format_predictions, do_tracking
from visualizer import generate_video_batches
import json
from tqdm import tqdm
import numpy as np


def main(args, verbose=True):
    """
    Main processing task to be run in gradio
        - Writes aris frames to dirname(filepath)/frames/{i}.jpg
        - Writes json output to dirname(filepath)/{filename}_results.json
        - Writes manual marking to dirname(filepath)/{filename}_marking.txt
        - Writes video output to dirname(filepath)/{filename}_results.mp4
        - Zips all results to dirname(filepath)/{filename}_results.zip
    Args:
        filepath (str): path to aris file
        
    TODO: Separate into subtasks in different queues; have a GPU-only queue.
    """
    print("In task...")
    print("Cuda available in task?", torch.cuda.is_available())

    model, device = setup_model(args.weights)
    
    locations = [
        "kenai-val"
    ]
    for loc in locations:

        in_loc_dir = os.path.join(args.frames, loc)
        out_loc_dir = os.path.join(args.output, loc)
        print(in_loc_dir)
        print(out_loc_dir)

        detect_location(in_loc_dir, out_loc_dir, model, device, verbose)


                
def detect_location(in_loc_dir, out_loc_dir, model, device, verbose):

    seq_list = os.listdir(in_loc_dir)

    with tqdm(total=len(seq_list), desc="...", ncols=0) as pbar:
        for seq in seq_list:

            pbar.update(1)
            if (seq.startswith(".")): continue
            pbar.set_description("Processing " + seq)

            in_seq_dir = os.path.join(in_loc_dir, seq)
            out_seq_dir = os.path.join(out_loc_dir, seq)
            os.makedirs(out_seq_dir, exist_ok=True)

            detect(in_seq_dir, out_seq_dir, model, device, verbose)

def detect(in_seq_dir, out_seq_dir, model, device, verbose):

    # create dataloader
    dataloader = create_dataloader_frames_only(in_seq_dir)

    inference, image_shapes, width, height = do_detection(dataloader, model, device, verbose=verbose)

    json_obj = {
        'image_shapes': image_shapes,
        'width': width,
        'height': height
    }

    with open(os.path.join(out_seq_dir, 'pred.json'), 'w') as f:
        json.dump(json_obj, f)

    torch.save(inference, os.path.join(out_seq_dir, 'inference.pt'))

def argument_parser():
    parser = argparse.ArgumentParser()
    parser.add_argument("--frames", required=True, help="Path to frame directory. Required.")
    parser.add_argument("--output", required=True, help="Path to output directory. Required.")
    parser.add_argument("--weights", default='models/v5m_896_300best.pt', help="Path to saved YOLOv5 weights. Default: ../models/v5m_896_300best.pt")
    return parser

if __name__ == "__main__":
    args = argument_parser().parse_args()
    main(args)