Spaces:
Runtime error
Runtime error
# Theorem Explain Agent - Video Generation App | |
# Updated: 2025-06-12 - Fixed Gradio interface | |
import gradio as gr | |
import uuid | |
import subprocess | |
import threading | |
import os | |
import time | |
from fastapi import FastAPI | |
from fastapi.responses import FileResponse | |
import asyncio | |
import sys | |
# A simple in-memory dictionary to track task status. | |
# For a production system, you'd use a database or Redis. | |
tasks = {} | |
sys.path.insert(0, os.getcwd()) | |
# --- Download Kokoro models if they don't exist --- | |
model_dir = "models" | |
if not os.path.exists(os.path.join(model_dir, "kokoro-v0_19.onnx")): | |
print("Downloading Kokoro TTS models...") | |
os.makedirs(model_dir, exist_ok=True) | |
os.system(f"wget -P {model_dir} https://github.com/thewh1teagle/kokoro-onnx/releases/download/model-files/kokoro-v0_19.onnx") | |
os.system(f"wget -P {model_dir} https://github.com/thewh1teagle/kokoro-onnx/releases/download/model-files/voices.bin") | |
print("Model download complete.") | |
def run_video_generation(task_id: str, topic: str, context: str, model: str): | |
""" | |
This function runs the main generation script in a separate process. | |
""" | |
tasks[task_id]['status'] = 'running' | |
# Sanitize topic to create a valid directory name | |
file_prefix = "".join(c if c.isalnum() else "_" for c in topic.lower()) | |
output_dir = os.path.join("output", file_prefix) | |
command = [ | |
"python", "generate_video.py", | |
"--model", model, | |
"--topic", topic, | |
"--context", context, | |
"--output_dir", "output", | |
# "--use_langfuse" | |
] | |
try: | |
# Using subprocess to run the existing script | |
process = subprocess.run(command, check=True, capture_output=True, text=True) | |
# Look for the output video in the directory | |
video_path = None | |
if os.path.exists(output_dir): | |
for file in os.listdir(output_dir): | |
if file.endswith("_combined.mp4"): | |
video_path = os.path.join(output_dir, file) | |
break | |
if video_path and os.path.exists(video_path): | |
tasks[task_id]['status'] = 'completed' | |
tasks[task_id]['video_path'] = video_path | |
else: | |
tasks[task_id]['status'] = 'failed' | |
tasks[task_id]['error'] = "Video file not found after generation." | |
except subprocess.CalledProcessError as e: | |
tasks[task_id]['status'] = 'failed' | |
tasks[task_id]['error'] = str(e) | |
except Exception as e: | |
tasks[task_id]['status'] = 'failed' | |
tasks[task_id]['error'] = str(e) | |
def start_generation(topic: str, context: str, model: str): | |
if not all([topic, context, model]): | |
return "Topic, Context, and Model cannot be empty.", "" | |
task_id = str(uuid.uuid4()) | |
tasks[task_id] = {'status': 'queued', 'model': model} | |
# Use a background thread to run the time-consuming task | |
thread = threading.Thread( | |
target=run_video_generation, | |
args=(task_id, topic, context, model) | |
) | |
thread.start() | |
return f"Task started with model {model}. Your Task ID is: {task_id}", task_id | |
def check_status(task_id: str): | |
if not task_id: | |
return "Please provide a Task ID.", None | |
task = tasks.get(task_id) | |
if not task: | |
return "Task not found.", None | |
status = task.get('status') | |
model = task.get('model', 'Unknown') | |
if status == 'completed': | |
video_path = task.get('video_path') | |
return f"Status: {status} (Model: {model})", video_path | |
elif status == 'failed': | |
error = task.get('error', 'Unknown error') | |
return f"Status: {status} (Model: {model})\nError: {error}", None | |
return f"Status: {status} (Model: {model})", None | |
# Create the Gradio interface | |
with gr.Blocks(title="Theorem Explain Agent") as demo: | |
gr.Markdown("# π Theorem-Explain-Agent Video Generation") | |
gr.Markdown("Generate educational videos explaining mathematical theorems and concepts.") | |
with gr.Tab("π Start Generation"): | |
gr.Markdown("### Enter the details for your video:") | |
model_input = gr.Textbox( | |
label="Model", | |
placeholder="e.g., gemini/gemini-1.5-flash, openai/gpt-4o", | |
value="gemini/gemini-1.5-flash" | |
) | |
topic_input = gr.Textbox( | |
label="Topic", | |
placeholder="e.g., The Pythagorean Theorem" | |
) | |
context_input = gr.Textbox( | |
label="Context", | |
placeholder="A short explanation of the theorem.", | |
lines=3 | |
) | |
start_button = gr.Button("π¬ Generate Video", variant="primary") | |
with gr.Row(): | |
status_output = gr.Textbox(label="Status", interactive=False) | |
task_id_output = gr.Textbox(label="Task ID", interactive=False) | |
with gr.Tab("π Check Status"): | |
gr.Markdown("### Check the status of your video generation:") | |
task_id_input = gr.Textbox( | |
label="Task ID", | |
placeholder="Enter the Task ID you received" | |
) | |
check_button = gr.Button("π Check Status", variant="secondary") | |
status_display = gr.Textbox(label="Status", interactive=False) | |
video_output = gr.Video(label="Generated Video") | |
# Connect the functions to the interface | |
start_button.click( | |
fn=start_generation, | |
inputs=[topic_input, context_input, model_input], | |
outputs=[status_output, task_id_output] | |
) | |
check_button.click( | |
fn=check_status, | |
inputs=[task_id_input], | |
outputs=[status_display, video_output] | |
) | |
gr.Markdown(""" | |
### π How to Use: | |
1. **Start Generation**: Enter a Model, Topic, and Context, then click 'Generate Video' | |
2. **Copy the Task ID** that appears | |
3. **Check Status**: Go to the 'Check Status' tab, paste your Task ID, and click 'Check Status' | |
4. **Wait**: Video generation can take several minutes. Check periodically until complete | |
5. **Download**: When complete, the video will appear and can be downloaded | |
### π€ Supported Models: | |
- `gemini/gemini-1.5-flash` (recommended) | |
- `gemini/gemini-1.5-pro` | |
- `openai/gpt-4o` | |
- `openai/o3-mini` | |
- `anthropic/claude-3-opus-20240229` | |
""") | |
# Launch the app | |
if __name__ == "__main__": | |
demo.launch( | |
server_name="0.0.0.0", | |
server_port=7860, | |
show_error=True | |
) |