dfdfdsfgs commited on
Commit
a12cf12
Β·
1 Parent(s): d9486d1

Force update with corrected Gradio function

Browse files
Files changed (1) hide show
  1. app.py +84 -79
app.py CHANGED
@@ -1,4 +1,6 @@
 
1
  import gradio as gr
 
2
  import uuid
3
  import subprocess
4
  import threading
@@ -13,7 +15,7 @@ import asyncio
13
  # For a production system, you'd use a database or Redis.
14
  tasks = {}
15
 
16
- def run_video_generation(task_id: str, topic: str, context: str):
17
  """
18
  This function runs the main generation script in a separate process.
19
  """
@@ -25,25 +27,24 @@ def run_video_generation(task_id: str, topic: str, context: str):
25
 
26
  command = [
27
  "python", "generate_video.py",
28
- "--model", "openai/o3-mini", # Or get from request
29
  "--topic", topic,
30
  "--context", context,
31
  "--output_dir", "output",
32
- "--use_langfuse" # Assuming you have secrets set
33
  ]
34
 
35
  try:
36
  # Using subprocess to run the existing script
37
  process = subprocess.run(command, check=True, capture_output=True, text=True)
38
 
39
- # Assume the final video is named based on the topic
40
- # Note: The actual video path might differ. This is an assumption.
41
- # You may need to parse the stdout from generate_video.py to get the exact path.
42
  video_path = None
43
- for file in os.listdir(output_dir):
44
- if file.endswith("_combined.mp4"):
45
- video_path = os.path.join(output_dir, file)
46
- break
 
47
 
48
  if video_path and os.path.exists(video_path):
49
  tasks[task_id]['status'] = 'completed'
@@ -51,34 +52,29 @@ def run_video_generation(task_id: str, topic: str, context: str):
51
  else:
52
  tasks[task_id]['status'] = 'failed'
53
  tasks[task_id]['error'] = "Video file not found after generation."
54
- tasks[task_id]['stdout'] = process.stdout
55
- tasks[task_id]['stderr'] = process.stderr
56
 
57
  except subprocess.CalledProcessError as e:
58
  tasks[task_id]['status'] = 'failed'
59
  tasks[task_id]['error'] = str(e)
60
- tasks[task_id]['stdout'] = e.stdout
61
- tasks[task_id]['stderr'] = e.stderr
62
  except Exception as e:
63
  tasks[task_id]['status'] = 'failed'
64
  tasks[task_id]['error'] = str(e)
65
 
66
- def start_generation_thread(topic: str, context: str):
67
- if not topic or not context:
68
- return "Topic and Context cannot be empty.", "", None
69
 
70
  task_id = str(uuid.uuid4())
71
- tasks[task_id] = {'status': 'queued'}
72
 
73
  # Use a background thread to run the time-consuming task
74
  thread = threading.Thread(
75
  target=run_video_generation,
76
- args=(task_id, topic, context)
77
  )
78
  thread.start()
79
 
80
- return f"Task started. Your Task ID is: {task_id}", task_id, None
81
-
82
 
83
  def check_status(task_id: str):
84
  if not task_id:
@@ -89,79 +85,88 @@ def check_status(task_id: str):
89
  return "Task not found.", None
90
 
91
  status = task.get('status')
 
 
92
  if status == 'completed':
93
  video_path = task.get('video_path')
94
- return f"Status: {status}", video_path
95
  elif status == 'failed':
96
  error = task.get('error', 'Unknown error')
97
- stdout = task.get('stdout', '')
98
- stderr = task.get('stderr', '')
99
- return f"Status: {status}\nError: {error}\nOutput: {stdout}\nStderr: {stderr}", None
100
-
101
- return f"Status: {status}", None
102
-
103
- # We need a lightweight FastAPI app in the background to serve the video files.
104
- # Gradio can't serve files directly from arbitrary paths in a secure way.
105
- fastapi_app = FastAPI()
106
-
107
- @fastapi_app.get("/videos/{task_id}")
108
- def get_video(task_id: str):
109
- """
110
- Serves the final generated video file.
111
- """
112
- task = tasks.get(task_id)
113
- if not task or task.get('status') != 'completed':
114
- return {"error": "Task not completed or not found"}
115
 
116
- video_path = task.get('video_path')
117
- if not os.path.exists(video_path):
118
- return {"error": "Video file not found."}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
- return FileResponse(video_path, media_type="video/mp4", filename=os.path.basename(video_path))
121
-
122
-
123
- # Gradio Interface
124
- with gr.Blocks() as demo:
125
- gr.Markdown("# Theorem-Explain-Agent Video Generation")
126
- gr.Markdown("Start a video generation task and check its status.")
127
-
128
- with gr.Tab("Start Generation"):
129
- topic_input = gr.Textbox(label="Topic", placeholder="e.g., The Pythagorean Theorem")
130
- context_input = gr.Textbox(label="Context", placeholder="A short explanation of the theorem.")
131
- start_button = gr.Button("Generate Video")
132
-
133
- with gr.Column():
134
  task_id_output = gr.Textbox(label="Task ID", interactive=False)
135
- status_output_start = gr.Textbox(label="Status", interactive=False)
136
 
137
- with gr.Tab("Check Status"):
138
- task_id_input = gr.Textbox(label="Task ID", placeholder="Enter the Task ID you received.")
139
- check_button = gr.Button("Check Status")
 
 
 
 
140
 
141
- with gr.Column():
142
- status_output_check = gr.Textbox(label="Status", interactive=False)
143
- video_output = gr.Video(label="Generated Video")
144
 
145
- # Actions
146
  start_button.click(
147
- fn=start_generation_thread,
148
- inputs=[topic_input, context_input],
149
- outputs=[status_output_start, task_id_output, video_output] # Clear video on new task
150
  )
151
 
152
  check_button.click(
153
  fn=check_status,
154
  inputs=[task_id_input],
155
- outputs=[status_output_check, video_output]
156
  )
157
 
158
- gr.Markdown("### How to Use")
159
- gr.Markdown(
160
- "1. Enter a `Topic` and `Context` in the 'Start Generation' tab and click 'Generate Video'.\n"
161
- "2. Copy the `Task ID` that appears.\n"
162
- "3. Go to the 'Check Status' tab, paste the `Task ID`, and click 'Check Status' periodically.\n"
163
- "4. When the generation is complete, the video will appear."
164
- )
165
-
166
- # To run both Gradio and FastAPI, we mount the FastAPI app into Gradio's internal FastAPI app.
167
- app = gr.mount_ ΟŒΟ€ΞΏΟ…(demo, fastapi_app, path="/")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Updated to fix Gradio mount function
2
  import gradio as gr
3
+ # ... rest of the file
4
  import uuid
5
  import subprocess
6
  import threading
 
15
  # For a production system, you'd use a database or Redis.
16
  tasks = {}
17
 
18
+ def run_video_generation(task_id: str, topic: str, context: str, model: str):
19
  """
20
  This function runs the main generation script in a separate process.
21
  """
 
27
 
28
  command = [
29
  "python", "generate_video.py",
30
+ "--model", model,
31
  "--topic", topic,
32
  "--context", context,
33
  "--output_dir", "output",
34
+ "--use_langfuse"
35
  ]
36
 
37
  try:
38
  # Using subprocess to run the existing script
39
  process = subprocess.run(command, check=True, capture_output=True, text=True)
40
 
41
+ # Look for the output video in the directory
 
 
42
  video_path = None
43
+ if os.path.exists(output_dir):
44
+ for file in os.listdir(output_dir):
45
+ if file.endswith("_combined.mp4"):
46
+ video_path = os.path.join(output_dir, file)
47
+ break
48
 
49
  if video_path and os.path.exists(video_path):
50
  tasks[task_id]['status'] = 'completed'
 
52
  else:
53
  tasks[task_id]['status'] = 'failed'
54
  tasks[task_id]['error'] = "Video file not found after generation."
 
 
55
 
56
  except subprocess.CalledProcessError as e:
57
  tasks[task_id]['status'] = 'failed'
58
  tasks[task_id]['error'] = str(e)
 
 
59
  except Exception as e:
60
  tasks[task_id]['status'] = 'failed'
61
  tasks[task_id]['error'] = str(e)
62
 
63
+ def start_generation(topic: str, context: str, model: str):
64
+ if not all([topic, context, model]):
65
+ return "Topic, Context, and Model cannot be empty.", ""
66
 
67
  task_id = str(uuid.uuid4())
68
+ tasks[task_id] = {'status': 'queued', 'model': model}
69
 
70
  # Use a background thread to run the time-consuming task
71
  thread = threading.Thread(
72
  target=run_video_generation,
73
+ args=(task_id, topic, context, model)
74
  )
75
  thread.start()
76
 
77
+ return f"Task started with model {model}. Your Task ID is: {task_id}", task_id
 
78
 
79
  def check_status(task_id: str):
80
  if not task_id:
 
85
  return "Task not found.", None
86
 
87
  status = task.get('status')
88
+ model = task.get('model', 'Unknown')
89
+
90
  if status == 'completed':
91
  video_path = task.get('video_path')
92
+ return f"Status: {status} (Model: {model})", video_path
93
  elif status == 'failed':
94
  error = task.get('error', 'Unknown error')
95
+ return f"Status: {status} (Model: {model})\nError: {error}", None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
+ return f"Status: {status} (Model: {model})", None
98
+
99
+ # Create the Gradio interface
100
+ with gr.Blocks(title="Theorem Explain Agent") as demo:
101
+ gr.Markdown("# πŸŽ“ Theorem-Explain-Agent Video Generation")
102
+ gr.Markdown("Generate educational videos explaining mathematical theorems and concepts.")
103
+
104
+ with gr.Tab("πŸš€ Start Generation"):
105
+ gr.Markdown("### Enter the details for your video:")
106
+ model_input = gr.Textbox(
107
+ label="Model",
108
+ placeholder="e.g., gemini/gemini-1.5-flash, openai/gpt-4o",
109
+ value="gemini/gemini-1.5-flash"
110
+ )
111
+ topic_input = gr.Textbox(
112
+ label="Topic",
113
+ placeholder="e.g., The Pythagorean Theorem"
114
+ )
115
+ context_input = gr.Textbox(
116
+ label="Context",
117
+ placeholder="A short explanation of the theorem.",
118
+ lines=3
119
+ )
120
+ start_button = gr.Button("🎬 Generate Video", variant="primary")
121
 
122
+ with gr.Row():
123
+ status_output = gr.Textbox(label="Status", interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
124
  task_id_output = gr.Textbox(label="Task ID", interactive=False)
 
125
 
126
+ with gr.Tab("πŸ“Š Check Status"):
127
+ gr.Markdown("### Check the status of your video generation:")
128
+ task_id_input = gr.Textbox(
129
+ label="Task ID",
130
+ placeholder="Enter the Task ID you received"
131
+ )
132
+ check_button = gr.Button("πŸ” Check Status", variant="secondary")
133
 
134
+ status_display = gr.Textbox(label="Status", interactive=False)
135
+ video_output = gr.Video(label="Generated Video")
 
136
 
137
+ # Connect the functions to the interface
138
  start_button.click(
139
+ fn=start_generation,
140
+ inputs=[topic_input, context_input, model_input],
141
+ outputs=[status_output, task_id_output]
142
  )
143
 
144
  check_button.click(
145
  fn=check_status,
146
  inputs=[task_id_input],
147
+ outputs=[status_display, video_output]
148
  )
149
 
150
+ gr.Markdown("""
151
+ ### πŸ“‹ How to Use:
152
+ 1. **Start Generation**: Enter a Model, Topic, and Context, then click 'Generate Video'
153
+ 2. **Copy the Task ID** that appears
154
+ 3. **Check Status**: Go to the 'Check Status' tab, paste your Task ID, and click 'Check Status'
155
+ 4. **Wait**: Video generation can take several minutes. Check periodically until complete
156
+ 5. **Download**: When complete, the video will appear and can be downloaded
157
+
158
+ ### πŸ€– Supported Models:
159
+ - `gemini/gemini-1.5-flash` (recommended)
160
+ - `gemini/gemini-1.5-pro`
161
+ - `openai/gpt-4o`
162
+ - `openai/o3-mini`
163
+ - `anthropic/claude-3-opus-20240229`
164
+ """)
165
+
166
+ # Launch the app
167
+ if __name__ == "__main__":
168
+ demo.launch(
169
+ server_name="0.0.0.0",
170
+ server_port=7860,
171
+ show_error=True
172
+ )