Spaces:
Runtime error
Runtime error
File size: 3,610 Bytes
128e4f0 7a4b92f 128e4f0 7a4b92f 058f18b 7a4b92f 058f18b 7a4b92f 058f18b 7a4b92f 128e4f0 7a4b92f 058f18b 7a4b92f 2a572c2 7a4b92f 058f18b 7a4b92f 592e4db 2a572c2 7a4b92f 058f18b 7a4b92f 058f18b 128e4f0 592e4db 7a4b92f 058f18b 128e4f0 058f18b 7a4b92f 058f18b 7a4b92f 058f18b 7a4b92f 592e4db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
import project_path
import os
import torch
from zipfile import ZipFile
from backend.aris import create_manual_marking, BEAM_WIDTH_DIR, add_metadata_to_result, prep_for_mm
from backend.dataloader import create_dataloader_aris
from backend.inference import do_full_inference, json_dump_round_float
from backend.visualizer import generate_video_batches
def predict_task(filepath, config, output_formats=[], gradio_progress=None):
"""
Main processing task to be run in gradio
- Writes aris frames to dirname(filepath)/frames/{i}.jpg
- Writes json output to dirname(filepath)/{filename}_results.json
- Writes manual marking to dirname(filepath)/{filename}_marking.txt
- Writes video output to dirname(filepath)/{filename}_results.mp4
- Zips all results to dirname(filepath)/{filename}_results.zip
Args:
filepath (str): path to aris file
"""
if (gradio_progress): gradio_progress(0, "In task...")
print("Cuda available in task?", torch.cuda.is_available())
# Set up save directory and define file names
dirname = os.path.dirname(filepath)
filename = os.path.basename(filepath).replace(".aris","").replace(".ddf","")
results_filepath = os.path.join(dirname, f"{filename}_results.json")
marking_filepath = os.path.join(dirname, f"{filename}_marking.txt")
video_filepath = os.path.join(dirname, f"{filename}_results.mp4")
zip_filepath = os.path.join(dirname, f"{filename}_results.zip")
os.makedirs(dirname, exist_ok=True)
# Create dataloader
if (gradio_progress): gradio_progress(0, "Initializing Dataloader...")
dataloader, dataset = create_dataloader_aris(filepath, BEAM_WIDTH_DIR, None, num_frames_bg_subtract=0)
dataloader, dataset = create_dataloader_aris(filepath, BEAM_WIDTH_DIR, None)
# Extract aris/didson info. Didson does not yet have pixel-meter info
if ".ddf" in filepath:
image_meter_width = -1
image_meter_height = -1
else:
image_meter_width = dataset.didson.info['xdim'] * dataset.didson.info['pixel_meter_width']
image_meter_height = dataset.didson.info['ydim'] * dataset.didson.info['pixel_meter_height']
frame_rate = dataset.didson.info['framerate']
# run detection + tracking
results = do_full_inference(dataloader, image_meter_width, image_meter_height, gp=gradio_progress, config=config)
# Generate Metadata and extra inference information
results = prep_for_mm(results)
results = add_metadata_to_result(filepath, results)
results['metadata']['hyperparameters'] = config.to_dict()
# Create JSON result file
json_dump_round_float(results, results_filepath)
# Create Manual Marking file
if "Generate Manual Marking" in output_formats and dataset.didson.info['version'][3] == 5:
create_manual_marking(results, out_path=marking_filepath)
# Create Annotated Video
if "Generate Annotated Video" in output_formats:
generate_video_batches(dataset.didson, results, frame_rate, video_filepath,
image_meter_width=image_meter_width, image_meter_height=image_meter_height, gp=gradio_progress)
# Zip up the results
with ZipFile(zip_filepath, 'w') as z:
for file in [results_filepath, marking_filepath, video_filepath, os.path.join(dirname, 'bg_start.jpg')]:
if os.path.exists(file):
z.write(file, arcname=os.path.basename(file))
# Release GPU memory
torch.cuda.empty_cache()
return results, results_filepath, zip_filepath, video_filepath, marking_filepath |