File size: 4,321 Bytes
53c54f1
 
 
 
 
d2b1bb2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53c54f1
 
 
 
d2b1bb2
 
 
 
 
 
 
 
 
 
 
 
 
53c54f1
 
 
 
d2b1bb2
 
53c54f1
 
 
 
 
 
 
d2b1bb2
53c54f1
d2b1bb2
53c54f1
d2b1bb2
53c54f1
 
d2b1bb2
53c54f1
 
 
d2b1bb2
 
 
 
53c54f1
 
d2b1bb2
 
 
 
 
 
 
 
 
 
 
53c54f1
d2b1bb2
53c54f1
d2b1bb2
 
 
 
53c54f1
 
 
d2b1bb2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53c54f1
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import gradio as gr
import requests

API_URL = "https://rahul7star-FramePack-F1-DiffusionForce.hf.space/api/generate/"

def call_framepack_api(
    input_image,
    prompt,
    t2v,
    n_prompt,
    seed,
    total_second_length,
    latent_window_size,
    steps,
    cfg,
    gs,
    rs,
    gpu_memory_preservation,
    use_teacache,
    mp4_crf,
    lora_file,
    lora_multiplier,
    fp8_optimization,
):
    files = {}
    data = {
        "prompt": prompt,
        "t2v": str(t2v).lower(),
        "n_prompt": n_prompt,
        "seed": int(seed),
        "total_second_length": float(total_second_length),
        "latent_window_size": int(latent_window_size),
        "steps": int(steps),
        "cfg": float(cfg),
        "gs": float(gs),
        "rs": float(rs),
        "gpu_memory_preservation": float(gpu_memory_preservation),
        "use_teacache": str(use_teacache).lower(),
        "mp4_crf": int(mp4_crf),
        "lora_multiplier": float(lora_multiplier),
        "fp8_optimization": str(fp8_optimization).lower(),
    }

    if input_image:
        files["input_image"] = ("input.png", input_image, "image/png")
    if lora_file:
        files["lora_file"] = (lora_file.name, lora_file, "application/octet-stream")

    try:
        response = requests.post(API_URL, data=data, files=files)
        if response.status_code == 200:
            result = response.json()
            video_url = result.get("video_url")
            preview_url = result.get("preview_image_url")
            return video_url, preview_url, str(result)
        else:
            return None, None, f"API Error: {response.status_code} - {response.text}"
    except Exception as e:
        return None, None, f"Exception: {str(e)}"

with gr.Blocks() as demo:
    gr.Markdown("# FramePack API Client with Full Options")

    with gr.Row():
        with gr.Column():
            input_image = gr.File(label="Input Image (PNG/JPG) — optional", file_types=[".png", ".jpg", ".jpeg"])
            lora_file = gr.File(label="LoRA File (optional)", file_types=[".safetensors", ".pt", ".bin"])
            prompt = gr.Textbox(label="Prompt")
            n_prompt = gr.Textbox(label="Negative Prompt (optional)", value="")
            t2v = gr.Checkbox(label="Text-to-Video", value=True)
            seed = gr.Number(label="Seed", value=31337, precision=0)
            total_second_length = gr.Slider(label="Video Length (seconds)", minimum=1, maximum=120, value=5, step=0.1)
            latent_window_size = gr.Slider(label="Latent Window Size", minimum=1, maximum=33, value=9, step=1)
            steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1)
            cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01)
            gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=10.0, step=0.01)
            rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01)
            gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB)", minimum=6, maximum=128, value=6, step=0.1)
            use_teacache = gr.Checkbox(label="Use TeaCache", value=True)
            mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1)
            lora_multiplier = gr.Slider(label="LoRA Multiplier", minimum=0.0, maximum=1.0, value=0.8, step=0.1)
            fp8_optimization = gr.Checkbox(label="FP8 Optimization", value=False)

            generate_btn = gr.Button("Generate")

        with gr.Column():
            video_output = gr.Video(label="Generated Video", autoplay=True)
            preview_output = gr.Image(label="Preview Image")
            api_response = gr.Textbox(label="API JSON Response", lines=10)

    generate_btn.click(
        fn=call_framepack_api,
        inputs=[
            input_image,
            prompt,
            t2v,
            n_prompt,
            seed,
            total_second_length,
            latent_window_size,
            steps,
            cfg,
            gs,
            rs,
            gpu_memory_preservation,
            use_teacache,
            mp4_crf,
            lora_file,
            lora_multiplier,
            fp8_optimization,
        ],
        outputs=[video_output, preview_output, api_response],
    )

demo.launch()