trainmodel2 / app.py
nagasurendra's picture
Update app.py
8d573ce verified
import gradio as gr
import torch
import cv2
from ultralytics import YOLO
# Load YOLO models
def safe_load_yolo_model(path):
torch.serialization.add_safe_globals([torch, 'ultralytics.nn.tasks.DetectionModel'])
return YOLO(path)
# Dictionary of model paths
model_paths = {
'YOLOv11': './data/yolo11n.pt',
'Crack & Pothole Detector': './data/best.pt',
'Toll gates': './data/best2.pt'
}
# Load models into memory
models = {name: safe_load_yolo_model(path) for name, path in model_paths.items()}
# Assign colors for each model
model_colors = {
'YOLOv11': (0, 255, 0),
'Crack & Pothole Detector': (255, 0, 0),
'Toll gates': (0, 0, 255)
}
def process_video(video, selected_model):
cap = cv2.VideoCapture(video)
fps = cap.get(cv2.CAP_PROP_FPS)
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
out = cv2.VideoWriter('output_video.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width, frame_height))
use_models = models if selected_model == 'All' else {selected_model: models[selected_model]}
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
for model_name, model in use_models.items():
results = model(frame)
for result in results:
for box in result.boxes:
x1, y1, x2, y2 = map(int, box.xyxy[0].tolist())
class_id = int(box.cls[0])
label = f"{model.names[class_id]} - {box.conf[0]:.2f}"
color = model_colors.get(model_name, (0, 255, 255))
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2)
out.write(frame)
cap.release()
out.release()
return 'output_video.mp4'
# Gradio Interface
iface = gr.Interface(
fn=process_video,
inputs=[
gr.Video(label="Upload a Video"),
gr.Dropdown(
choices=["All"] + list(model_paths.keys()),
label="Select Model(s)",
value="All"
)
],
outputs=gr.Video(label="Processed Output"),
live=False,
title="Multi-Model YOLOv8 Video Inference"
)
iface.launch()