sayakpaul HF Staff commited on
Commit
ca370fd
·
verified ·
1 Parent(s): 441f78e

Sync from GitHub

Browse files
Files changed (3) hide show
  1. app.py +56 -14
  2. prompts.py +2 -1
  3. utils/pipeline_utils.py +1 -1
app.py CHANGED
@@ -46,14 +46,15 @@ def get_output_code(
46
 
47
 
48
  # --- Gradio UI Definition ---
49
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
50
  gr.Markdown(
51
  """
52
  # 🧨 Generate Diffusers Inference code snippet tailored to your machine
53
  Enter a Hugging Face Hub `repo_id` and your system specs to get started for inference.
54
  This tool uses [Gemini](https://ai.google.dev/gemini-api/docs/models) to generate the code based on your settings. This is based on
55
  [sayakpaul/auto-diffusers-docs](https://github.com/sayakpaul/auto-diffusers-docs/).
56
- """
 
57
  )
58
 
59
  with gr.Row():
@@ -71,8 +72,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
71
  info="Select the model to generate the analysis.",
72
  )
73
  with gr.Row():
74
- system_ram = gr.Number(label="System RAM (GB)", value=20)
75
- gpu_vram = gr.Number(label="GPU VRAM (GB)", value=8)
76
 
77
  with gr.Row():
78
  disable_bf16 = gr.Checkbox(
@@ -92,6 +93,57 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
92
 
93
  with gr.Column(scale=1):
94
  submit_btn = gr.Button("Estimate Memory ☁", variant="primary", scale=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
  with gr.Accordion("💡 Tips", open=False):
97
  gr.Markdown(
@@ -126,16 +178,6 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
126
  )
127
 
128
  # --- Event Handling ---
129
- all_inputs = [
130
- repo_id,
131
- gemini_model_to_use,
132
- disable_bf16,
133
- enable_lossy,
134
- system_ram,
135
- gpu_vram,
136
- torch_compile_friendly,
137
- fp8_friendly,
138
- ]
139
  submit_btn.click(fn=get_output_code, inputs=all_inputs, outputs=[output_markdown, prompt_output])
140
 
141
 
 
46
 
47
 
48
  # --- Gradio UI Definition ---
49
+ with gr.Blocks() as demo:
50
  gr.Markdown(
51
  """
52
  # 🧨 Generate Diffusers Inference code snippet tailored to your machine
53
  Enter a Hugging Face Hub `repo_id` and your system specs to get started for inference.
54
  This tool uses [Gemini](https://ai.google.dev/gemini-api/docs/models) to generate the code based on your settings. This is based on
55
  [sayakpaul/auto-diffusers-docs](https://github.com/sayakpaul/auto-diffusers-docs/).
56
+ """,
57
+ elem_id="col-container"
58
  )
59
 
60
  with gr.Row():
 
72
  info="Select the model to generate the analysis.",
73
  )
74
  with gr.Row():
75
+ system_ram = gr.Number(label="Free System RAM (GB)", value=20)
76
+ gpu_vram = gr.Number(label="Free GPU VRAM (GB)", value=8)
77
 
78
  with gr.Row():
79
  disable_bf16 = gr.Checkbox(
 
93
 
94
  with gr.Column(scale=1):
95
  submit_btn = gr.Button("Estimate Memory ☁", variant="primary", scale=1)
96
+
97
+ # --- Start of New Code Block ---
98
+ all_inputs = [
99
+ repo_id,
100
+ gemini_model_to_use,
101
+ disable_bf16,
102
+ enable_lossy,
103
+ system_ram,
104
+ gpu_vram,
105
+ torch_compile_friendly,
106
+ fp8_friendly,
107
+ ]
108
+
109
+ with gr.Accordion("Examples (Click to expand)", open=False):
110
+ gr.Examples(
111
+ examples=[
112
+ [
113
+ "stabilityai/stable-diffusion-xl-base-1.0",
114
+ "gemini-2.5-pro",
115
+ False,
116
+ False,
117
+ 64,
118
+ 24,
119
+ True,
120
+ True,
121
+ ],
122
+ [
123
+ "Wan-AI/Wan2.1-VACE-1.3B-diffusers",
124
+ "gemini-2.5-flash",
125
+ False,
126
+ True,
127
+ 16,
128
+ 8,
129
+ False,
130
+ False,
131
+ ],
132
+ [
133
+ "stabilityai/stable-diffusion-3-medium-diffusers",
134
+ "gemini-2.5-pro",
135
+ False,
136
+ False,
137
+ 32,
138
+ 16,
139
+ True,
140
+ False,
141
+ ],
142
+ ],
143
+ inputs=all_inputs,
144
+ label="Examples (Click to try)",
145
+ )
146
+ # --- End of New Code Block ---
147
 
148
  with gr.Accordion("💡 Tips", open=False):
149
  gr.Markdown(
 
178
  )
179
 
180
  # --- Event Handling ---
 
 
 
 
 
 
 
 
 
 
181
  submit_btn.click(fn=get_output_code, inputs=all_inputs, outputs=[output_markdown, prompt_output])
182
 
183
 
prompts.py CHANGED
@@ -163,7 +163,8 @@ pipe.transformer.compile(fullgraph=True)
163
 
164
  * When the available VRAM > pipeline loading memory, you should suggest using `pipe = pipe.to("cuda")`.
165
  * If the user prefers not to use quantization and further reduce memory, then suggest using:
166
- `pipe.transformer.enable_layerwise_casting(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16)`.
 
167
  * Do NOT add any extra imports or lines of code that will not be used.
168
  * Do NOT try to be too creative about combining the optimization techniques laid out above.
169
  * Do NOT add extra arguments to the `pipe` call other than the `prompt`.
 
163
 
164
  * When the available VRAM > pipeline loading memory, you should suggest using `pipe = pipe.to("cuda")`.
165
  * If the user prefers not to use quantization and further reduce memory, then suggest using:
166
+ `pipe.transformer.enable_layerwise_casting(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16)`. Note
167
+ that this is different from using FP8. In FP8, we use quantization like shown above.
168
  * Do NOT add any extra imports or lines of code that will not be used.
169
  * Do NOT try to be too creative about combining the optimization techniques laid out above.
170
  * Do NOT add extra arguments to the `pipe` call other than the `prompt`.
utils/pipeline_utils.py CHANGED
@@ -12,7 +12,7 @@ import requests
12
  import struct
13
  from huggingface_hub import hf_hub_url
14
 
15
- DTYPE_MAP = {"FP32": torch.float32, "FP16": torch.float16, "BF16": torch.bfloat16}
16
 
17
 
18
  # https://huggingface.co/docs/safetensors/v0.3.2/metadata_parsing#python
 
12
  import struct
13
  from huggingface_hub import hf_hub_url
14
 
15
+ DTYPE_MAP = {"FP32": torch.float32, "F16": torch.float16, "BF16": torch.bfloat16}
16
 
17
 
18
  # https://huggingface.co/docs/safetensors/v0.3.2/metadata_parsing#python