Spaces:
Runtime error
Runtime error
# app.py (Corrected for pathing and live logging) | |
import gradio as gr | |
import uuid | |
import subprocess | |
import threading | |
import os | |
import sys | |
import re | |
import traceback | |
# Add project root to Python path to fix any import issues | |
sys.path.insert(0, os.getcwd()) | |
# --- Download Kokoro models if they don't exist --- | |
model_dir = "models" | |
kokoro_model_path = os.path.join(model_dir, "kokoro-v0_19.onnx") | |
kokoro_voices_path = os.path.join(model_dir, "voices.bin") | |
if not os.path.exists(kokoro_model_path) or not os.path.exists(kokoro_voices_path): | |
print("Downloading Kokoro TTS models...") | |
os.makedirs(model_dir, exist_ok=True) | |
os.system(f"wget -O {kokoro_model_path} https://github.com/thewh1teagle/kokoro-onnx/releases/download/model-files/kokoro-v0_19.onnx") | |
os.system(f"wget -O {kokoro_voices_path} https://github.com/thewh1teagle/kokoro-onnx/releases/download/model-files/voices.bin") | |
print("Model download complete.") | |
# In-memory dictionary to track task status. | |
tasks = {} | |
def run_video_generation(task_id: str, topic: str, context: str, model: str): | |
""" | |
Runs the main generation script in a separate process and captures output in real-time. | |
""" | |
tasks[task_id]['status'] = 'running' | |
tasks[task_id]['log'] = 'Process started...\n' | |
# Sanitize topic name to create a valid directory/file prefix | |
file_prefix = re.sub(r'[^a-z0-9_]+', '_', topic.lower()) | |
# This is the directory where the script will create its files | |
final_output_dir = os.path.join("output", file_prefix) | |
# --- THIS IS THE FIX --- | |
# We tell the script to use the general 'output' folder. | |
# The script itself will then create the specific 'file_prefix' subfolder inside it. | |
command = [ | |
"python", "-u", "generate_video.py", | |
"--model", model, | |
"--topic", topic, | |
"--context", context, | |
"--output_dir", "output" # Pass the general directory | |
# Langfuse is disabled by not including the --use_langfuse flag | |
] | |
print(f"Running command: {' '.join(command)}") | |
try: | |
process = subprocess.Popen( | |
command, | |
stdout=subprocess.PIPE, | |
stderr=subprocess.STDOUT, | |
text=True, | |
bufsize=1, | |
universal_newlines=True, | |
) | |
for line in iter(process.stdout.readline, ''): | |
print(line, end='') | |
tasks[task_id]['log'] += line | |
process.wait() | |
if process.returncode == 0: | |
# Check for the final combined video file in the correct specific directory | |
final_video_path = os.path.join(final_output_dir, f"{file_prefix}_combined.mp4") | |
if os.path.exists(final_video_path): | |
tasks[task_id]['status'] = 'completed' | |
tasks[task_id]['video_path'] = final_video_path | |
tasks[task_id]['log'] += f"\nβ Success! Video available at: {final_video_path}" | |
else: | |
tasks[task_id]['status'] = 'failed' | |
tasks[task_id]['error'] = "Script finished, but the final combined video file was not found." | |
tasks[task_id]['log'] += f"\nβ Error: Output video not found at {final_video_path}. Check full logs." | |
else: | |
tasks[task_id]['status'] = 'failed' | |
tasks[task_id]['error'] = f"Process failed with return code {process.returncode}." | |
tasks[task_id]['log'] += f"\nβ Error: Process failed. See logs above for details." | |
except Exception as e: | |
print(f"Caught an exception: {e}") | |
tasks[task_id]['status'] = 'failed' | |
tasks[task_id]['error'] = str(e) | |
tasks[task_id]['log'] += f"\nβ An exception occurred: {traceback.format_exc()}" | |
def start_generation(topic: str, context: str, model: str): | |
if not all([topic, context, model]): | |
return "Topic, Context, and Model cannot be empty.", "" | |
task_id = str(uuid.uuid4()) | |
tasks[task_id] = {'status': 'queued', 'model': model, 'log': ''} | |
thread = threading.Thread( | |
target=run_video_generation, | |
args=(task_id, topic, context, model) | |
) | |
thread.start() | |
return f"β Task started with ID: {task_id}. Go to 'Check Status' tab to monitor progress.", task_id | |
def check_status(task_id: str): | |
if not task_id: | |
return "Please enter a Task ID.", None, "Please enter a Task ID above and click 'Check Status'." | |
task = tasks.get(task_id) | |
if not task: | |
return "Task not found.", None, f"No task found with ID: {task_id}" | |
status = task.get('status') | |
model = task.get('model', 'Unknown') | |
log = task.get('log', 'No logs yet...') | |
if status == 'completed': | |
video_path = task.get('video_path') | |
status_message = f"β Status: {status} (Model: {model})" | |
return status_message, video_path, log | |
elif status == 'failed': | |
error = task.get('error', 'Unknown error') | |
status_message = f"β Status: {status} (Model: {model})" | |
return status_message, None, log | |
status_message = f"π Status: {status} (Model: {model})" | |
return status_message, None, log | |
with gr.Blocks(title="Theorem Explain Agent") as demo: | |
gr.Markdown("# π Theorem Explain Agent: Video Generation") | |
gr.Markdown("Generate educational videos explaining mathematical theorems and concepts. This may take several minutes.") | |
with gr.Tab("π Start Generation"): | |
gr.Markdown("### 1. Enter the details for your video") | |
model_input = gr.Dropdown( | |
label="Model", | |
choices=["gemini/gemini-1.5-flash-001", "gemini/gemini-1.5-pro-002"], | |
value="gemini/gemini-1.5-flash-001", | |
info="Select the AI model for content generation." | |
) | |
topic_input = gr.Textbox(label="Topic", placeholder="e.g., The Pythagorean Theorem") | |
context_input = gr.Textbox(label="Context", placeholder="A short explanation of the theorem.", lines=3) | |
start_button = gr.Button("π¬ Generate Video", variant="primary") | |
gr.Markdown("### 2. Monitor your task") | |
with gr.Row(): | |
status_output = gr.Textbox(label="Status", interactive=False) | |
task_id_output = gr.Textbox(label="Task ID", interactive=False) | |
with gr.Tab("π Check Status & View Video"): | |
gr.Markdown("### Paste your Task ID to check progress and view the final video") | |
with gr.Row(): | |
task_id_input = gr.Textbox(label="Task ID", placeholder="Enter the Task ID you received") | |
check_button = gr.Button("π Check Status", variant="secondary") | |
status_display = gr.Textbox(label="Current Status", interactive=False) | |
video_output = gr.Video(label="Generated Video", interactive=False) | |
log_display = gr.Textbox(label="Live Generation Logs", lines=15, interactive=False) | |
start_button.click( | |
fn=start_generation, | |
inputs=[topic_input, context_input, model_input], | |
outputs=[status_output, task_id_output] | |
) | |
check_button.click( | |
fn=check_status, | |
inputs=[task_id_input], | |
outputs=[status_display, video_output, log_display], | |
every=2 | |
) | |
demo.launch() |