Spaces:
Running
on
Zero
Running
on
Zero
Update app_lora.py
Browse files- app_lora.py +2 -2
app_lora.py
CHANGED
@@ -134,8 +134,8 @@ def generate_video(input_image, prompt, height, width,
|
|
134 |
return video_path, current_seed
|
135 |
|
136 |
with gr.Blocks() as demo:
|
137 |
-
gr.Markdown("# Fast 4 steps Wan 2.1 I2V (14B)
|
138 |
-
gr.Markdown("[CausVid](https://github.com/tianweiy/CausVid) is a distilled version of Wan 2.1 to run faster in just 4-8 steps, [extracted as LoRA by Kijai](https://huggingface.co/Kijai/WanVideo_comfy/blob/main/Wan21_CausVid_14B_T2V_lora_rank32.safetensors) and is compatible with 🧨 diffusers")
|
139 |
with gr.Row():
|
140 |
with gr.Column():
|
141 |
input_image_component = gr.Image(type="pil", label="Input Image (auto-resized to target H/W)")
|
|
|
134 |
return video_path, current_seed
|
135 |
|
136 |
with gr.Blocks() as demo:
|
137 |
+
gr.Markdown("# Fast 4 steps Wan 2.1 I2V (14B) fusionx-lora")
|
138 |
+
#gr.Markdown("[CausVid](https://github.com/tianweiy/CausVid) is a distilled version of Wan 2.1 to run faster in just 4-8 steps, [extracted as LoRA by Kijai](https://huggingface.co/Kijai/WanVideo_comfy/blob/main/Wan21_CausVid_14B_T2V_lora_rank32.safetensors) and is compatible with 🧨 diffusers")
|
139 |
with gr.Row():
|
140 |
with gr.Column():
|
141 |
input_image_component = gr.Image(type="pil", label="Input Image (auto-resized to target H/W)")
|