Spaces:
Runtime error
Runtime error
File size: 7,261 Bytes
ae425dd 25bd165 929083d d9486d1 86c6777 25bd165 929083d 25bd165 86c6777 25bd165 86c6777 25bd165 86c6777 25bd165 86c6777 d9486d1 25bd165 a12cf12 d9486d1 25bd165 d9486d1 25bd165 d9486d1 25bd165 ae425dd d9486d1 3d192d3 ae425dd d9486d1 ae425dd a12cf12 d9486d1 ae425dd d9486d1 ae425dd d9486d1 25bd165 3d192d3 25bd165 d9486d1 3d192d3 25bd165 ae425dd 25bd165 3d192d3 d9486d1 25bd165 3d192d3 25bd165 d9486d1 25bd165 d9486d1 25bd165 d9486d1 a12cf12 d9486d1 25bd165 d9486d1 a12cf12 d9486d1 25bd165 d9486d1 3d192d3 d9486d1 25bd165 d9486d1 a12cf12 25bd165 a12cf12 d9486d1 25bd165 d9486d1 25bd165 d9486d1 25bd165 a12cf12 3d192d3 25bd165 a12cf12 25bd165 a12cf12 25bd165 a12cf12 25bd165 a12cf12 d9486d1 25bd165 a12cf12 d9486d1 25bd165 d9486d1 25bd165 d9486d1 a12cf12 d9486d1 25bd165 d9486d1 25bd165 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
# app.py (Corrected for pathing and live logging)
import gradio as gr
import uuid
import subprocess
import threading
import os
import sys
import re
import traceback
# Add project root to Python path to fix any import issues
sys.path.insert(0, os.getcwd())
# --- Download Kokoro models if they don't exist ---
model_dir = "models"
kokoro_model_path = os.path.join(model_dir, "kokoro-v0_19.onnx")
kokoro_voices_path = os.path.join(model_dir, "voices.bin")
if not os.path.exists(kokoro_model_path) or not os.path.exists(kokoro_voices_path):
print("Downloading Kokoro TTS models...")
os.makedirs(model_dir, exist_ok=True)
os.system(f"wget -O {kokoro_model_path} https://github.com/thewh1teagle/kokoro-onnx/releases/download/model-files/kokoro-v0_19.onnx")
os.system(f"wget -O {kokoro_voices_path} https://github.com/thewh1teagle/kokoro-onnx/releases/download/model-files/voices.bin")
print("Model download complete.")
# In-memory dictionary to track task status.
tasks = {}
def run_video_generation(task_id: str, topic: str, context: str, model: str):
"""
Runs the main generation script in a separate process and captures output in real-time.
"""
tasks[task_id]['status'] = 'running'
tasks[task_id]['log'] = 'Process started...\n'
# Sanitize topic name to create a valid directory/file prefix
file_prefix = re.sub(r'[^a-z0-9_]+', '_', topic.lower())
# This is the directory where the script will create its files
final_output_dir = os.path.join("output", file_prefix)
# --- THIS IS THE FIX ---
# We tell the script to use the general 'output' folder.
# The script itself will then create the specific 'file_prefix' subfolder inside it.
command = [
"python", "-u", "generate_video.py",
"--model", model,
"--topic", topic,
"--context", context,
"--output_dir", "output" # Pass the general directory
# Langfuse is disabled by not including the --use_langfuse flag
]
print(f"Running command: {' '.join(command)}")
try:
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
bufsize=1,
universal_newlines=True,
)
for line in iter(process.stdout.readline, ''):
print(line, end='')
tasks[task_id]['log'] += line
process.wait()
if process.returncode == 0:
# Check for the final combined video file in the correct specific directory
final_video_path = os.path.join(final_output_dir, f"{file_prefix}_combined.mp4")
if os.path.exists(final_video_path):
tasks[task_id]['status'] = 'completed'
tasks[task_id]['video_path'] = final_video_path
tasks[task_id]['log'] += f"\nβ
Success! Video available at: {final_video_path}"
else:
tasks[task_id]['status'] = 'failed'
tasks[task_id]['error'] = "Script finished, but the final combined video file was not found."
tasks[task_id]['log'] += f"\nβ Error: Output video not found at {final_video_path}. Check full logs."
else:
tasks[task_id]['status'] = 'failed'
tasks[task_id]['error'] = f"Process failed with return code {process.returncode}."
tasks[task_id]['log'] += f"\nβ Error: Process failed. See logs above for details."
except Exception as e:
print(f"Caught an exception: {e}")
tasks[task_id]['status'] = 'failed'
tasks[task_id]['error'] = str(e)
tasks[task_id]['log'] += f"\nβ An exception occurred: {traceback.format_exc()}"
def start_generation(topic: str, context: str, model: str):
if not all([topic, context, model]):
return "Topic, Context, and Model cannot be empty.", ""
task_id = str(uuid.uuid4())
tasks[task_id] = {'status': 'queued', 'model': model, 'log': ''}
thread = threading.Thread(
target=run_video_generation,
args=(task_id, topic, context, model)
)
thread.start()
return f"β
Task started with ID: {task_id}. Go to 'Check Status' tab to monitor progress.", task_id
def check_status(task_id: str):
if not task_id:
return "Please enter a Task ID.", None, "Please enter a Task ID above and click 'Check Status'."
task = tasks.get(task_id)
if not task:
return "Task not found.", None, f"No task found with ID: {task_id}"
status = task.get('status')
model = task.get('model', 'Unknown')
log = task.get('log', 'No logs yet...')
if status == 'completed':
video_path = task.get('video_path')
status_message = f"β
Status: {status} (Model: {model})"
return status_message, video_path, log
elif status == 'failed':
error = task.get('error', 'Unknown error')
status_message = f"β Status: {status} (Model: {model})"
return status_message, None, log
status_message = f"π Status: {status} (Model: {model})"
return status_message, None, log
with gr.Blocks(title="Theorem Explain Agent") as demo:
gr.Markdown("# π Theorem Explain Agent: Video Generation")
gr.Markdown("Generate educational videos explaining mathematical theorems and concepts. This may take several minutes.")
with gr.Tab("π Start Generation"):
gr.Markdown("### 1. Enter the details for your video")
model_input = gr.Dropdown(
label="Model",
choices=["gemini/gemini-1.5-flash-001", "gemini/gemini-1.5-pro-002"],
value="gemini/gemini-1.5-flash-001",
info="Select the AI model for content generation."
)
topic_input = gr.Textbox(label="Topic", placeholder="e.g., The Pythagorean Theorem")
context_input = gr.Textbox(label="Context", placeholder="A short explanation of the theorem.", lines=3)
start_button = gr.Button("π¬ Generate Video", variant="primary")
gr.Markdown("### 2. Monitor your task")
with gr.Row():
status_output = gr.Textbox(label="Status", interactive=False)
task_id_output = gr.Textbox(label="Task ID", interactive=False)
with gr.Tab("π Check Status & View Video"):
gr.Markdown("### Paste your Task ID to check progress and view the final video")
with gr.Row():
task_id_input = gr.Textbox(label="Task ID", placeholder="Enter the Task ID you received")
check_button = gr.Button("π Check Status", variant="secondary")
status_display = gr.Textbox(label="Current Status", interactive=False)
video_output = gr.Video(label="Generated Video", interactive=False)
log_display = gr.Textbox(label="Live Generation Logs", lines=15, interactive=False)
start_button.click(
fn=start_generation,
inputs=[topic_input, context_input, model_input],
outputs=[status_output, task_id_output]
)
check_button.click(
fn=check_status,
inputs=[task_id_input],
outputs=[status_display, video_output, log_display],
every=2
)
demo.launch() |