fffiloni commited on
Commit
f266d6a
·
verified ·
1 Parent(s): b03d37f

Increase queue size and add some info about inference time and settings

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -243,7 +243,7 @@ with gr.Blocks(title="MultiTalk Inference") as demo:
243
  gr.Markdown("## 🎤 Meigen MultiTalk Inference Demo")
244
  gr.Markdown("Let Them Talk: Audio-Driven Multi-Person Conversational Video Generation")
245
  if is_shared_ui:
246
- gr.Markdown("Audio will be trimmed to max 5 seconds on fffiloni's shared UI. Duplicate to skip the queue and work with longer audio inference. ")
247
  gr.HTML("""
248
  <div style="display:flex;column-gap:4px;">
249
  <a href="https://github.com/MeiGen-AI/MultiTalk">
@@ -309,4 +309,4 @@ with gr.Blocks(title="MultiTalk Inference") as demo:
309
  outputs=output_video
310
  )
311
 
312
- demo.queue(max_size=2).launch(ssr_mode=False, show_error=True, show_api=False)
 
243
  gr.Markdown("## 🎤 Meigen MultiTalk Inference Demo")
244
  gr.Markdown("Let Them Talk: Audio-Driven Multi-Person Conversational Video Generation")
245
  if is_shared_ui:
246
+ gr.Markdown("Audio will be trimmed to max 5 seconds on fffiloni's shared UI. Sample steps are limited to 12. Gradio queue size is set to 4. Generating a 5 seconds video will take approximatively 20 minutes. Duplicate to skip the queue and work with longer audio inference. ")
247
  gr.HTML("""
248
  <div style="display:flex;column-gap:4px;">
249
  <a href="https://github.com/MeiGen-AI/MultiTalk">
 
309
  outputs=output_video
310
  )
311
 
312
+ demo.queue(max_size=4).launch(ssr_mode=False, show_error=True, show_api=False)