Spaces:
Running
on
Zero
Running
on
Zero
File size: 10,870 Bytes
0edd51d 8a50ffc 0edd51d d8fa9a9 0edd51d 8a50ffc 0edd51d 89539f7 0edd51d 89539f7 7795c61 f0e8e9b 0edd51d e2b8df1 0edd51d 8a50ffc 89539f7 d8fa9a9 89539f7 4d961a4 89539f7 e2b8df1 0edd51d 89539f7 f1ef7a2 0edd51d 89539f7 0edd51d 4cabf12 0edd51d dd644f6 f864b44 0edd51d 7795c61 89539f7 0edd51d 89539f7 4cabf12 0edd51d 89539f7 f864b44 89539f7 7795c61 89539f7 f864b44 89539f7 f864b44 7795c61 f864b44 7795c61 89539f7 f864b44 89539f7 f864b44 7795c61 f864b44 7795c61 89539f7 f864b44 89539f7 f864b44 7795c61 f864b44 89539f7 f864b44 89539f7 f864b44 0edd51d f864b44 89539f7 f864b44 0edd51d 2c007cb f864b44 89539f7 f864b44 89539f7 f864b44 d8fa9a9 89539f7 2c007cb f864b44 89539f7 f864b44 89539f7 f864b44 89539f7 f864b44 d8fa9a9 89539f7 0edd51d 89539f7 0edd51d bbf135d 0edd51d bbf135d 0edd51d bbf135d 0edd51d bbf135d 0edd51d 59bd43d e2b8df1 0edd51d 59bd43d 0edd51d 7795c61 0edd51d 89539f7 e2b8df1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 |
#!/usr/bin/env python
import os
import gradio as gr
import numpy as np
import PIL.Image
import spaces
import torch
from diffusers import AutoencoderKL, DiffusionPipeline
DESCRIPTION = "# SDXL"
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
vae=vae,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
).to(device)
refiner = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0",
vae=vae,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
).to(device)
def get_seed(randomize_seed: bool, seed: int) -> int:
"""Determine and return the random seed to use for model generation or sampling.
- MAX_SEED is the maximum value for a 32-bit integer (np.iinfo(np.int32).max).
- This function is typically used to ensure reproducibility or to introduce randomness in model generation.
- The random seed affects the stochastic processes in downstream model inference or sampling.
Args:
randomize_seed (bool): If True, a random seed (an integer in [0, MAX_SEED)) is generated using NumPy's default random number generator. If False, the provided seed argument is returned as-is.
seed (int): The seed value to use if randomize_seed is False.
Returns:
int: The selected seed value. If randomize_seed is True, a randomly generated integer; otherwise, the value of the seed argument.
"""
rng = np.random.default_rng()
return int(rng.integers(0, MAX_SEED)) if randomize_seed else seed
@spaces.GPU
def generate(
prompt: str,
negative_prompt: str = "",
prompt_2: str = "",
negative_prompt_2: str = "",
use_negative_prompt: bool = False,
use_prompt_2: bool = False,
use_negative_prompt_2: bool = False,
seed: int = 0,
width: int = 1024,
height: int = 1024,
guidance_scale_base: float = 5.0,
guidance_scale_refiner: float = 5.0,
num_inference_steps_base: int = 25,
num_inference_steps_refiner: int = 25,
apply_refiner: bool = False,
progress: gr.Progress = gr.Progress(track_tqdm=True), # noqa: ARG001, B008
) -> PIL.Image.Image:
"""Generates an image from a text prompt using the SDXL (Stable Diffusion XL) model.
This function allows fine-grained control over image generation through prompts,
negative prompts, and optional refinement stages.
Note:
All prompt-related inputs (e.g., `prompt`, `negative_prompt`, `prompt_2`, and `negative_prompt_2`)
must be written in English for proper model performance.
Args:
prompt (str): Main text prompt used to guide image generation.
negative_prompt (str, optional): Text specifying elements to exclude from the image.
prompt_2 (str, optional): Secondary prompt for additional guidance. Used only if `use_prompt_2` is True.
negative_prompt_2 (str, optional): Secondary negative prompt. Used only if `use_negative_prompt_2` is True.
use_negative_prompt (bool, optional): Whether to apply `negative_prompt` during generation.
use_prompt_2 (bool, optional): Whether to apply `prompt_2` during generation.
use_negative_prompt_2 (bool, optional): Whether to apply `negative_prompt_2` during generation.
seed (int, optional): Seed for random number generation. Use 0 to generate a random seed.
width (int, optional): Width of the output image in pixels.
height (int, optional): Height of the output image in pixels.
guidance_scale_base (float, optional): Guidance scale for the base model. Higher values follow the prompt more closely.
guidance_scale_refiner (float, optional): Guidance scale for the refiner model.
num_inference_steps_base (int, optional): Number of inference steps for the base model.
num_inference_steps_refiner (int, optional): Number of inference steps for the refiner model.
apply_refiner (bool, optional): Whether to apply the refiner stage after the base image is generated.
progress (gr.Progress, optional): Gradio progress object to show progress during generation.
Returns:
PIL.Image.Image: The generated image as a PIL Image object.
"""
generator = torch.Generator().manual_seed(seed)
if not use_negative_prompt:
negative_prompt = None # type: ignore
if not use_prompt_2:
prompt_2 = None # type: ignore
if not use_negative_prompt_2:
negative_prompt_2 = None # type: ignore
if not apply_refiner:
return pipe(
prompt=prompt,
negative_prompt=negative_prompt,
prompt_2=prompt_2,
negative_prompt_2=negative_prompt_2,
width=width,
height=height,
guidance_scale=guidance_scale_base,
num_inference_steps=num_inference_steps_base,
generator=generator,
output_type="pil",
).images[0]
latents = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
prompt_2=prompt_2,
negative_prompt_2=negative_prompt_2,
width=width,
height=height,
guidance_scale=guidance_scale_base,
num_inference_steps=num_inference_steps_base,
generator=generator,
output_type="latent",
).images
images = refiner(
prompt=prompt,
negative_prompt=negative_prompt,
prompt_2=prompt_2,
negative_prompt_2=negative_prompt_2,
guidance_scale=guidance_scale_refiner,
num_inference_steps=num_inference_steps_refiner,
image=latents,
generator=generator,
).images
return images[0]
examples = [
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
"An astronaut riding a green horse",
]
with gr.Blocks(css_paths="style.css") as demo:
gr.Markdown(DESCRIPTION)
with gr.Group():
with gr.Row():
prompt = gr.Textbox(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
submit_btn=True,
)
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced options", open=False):
with gr.Row():
use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
negative_prompt = gr.Textbox(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=False,
value="",
)
prompt_2 = gr.Textbox(
label="Prompt 2",
max_lines=1,
placeholder="Enter your prompt",
visible=False,
value="",
)
negative_prompt_2 = gr.Textbox(
label="Negative prompt 2",
max_lines=1,
placeholder="Enter a negative prompt",
visible=False,
value="",
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
apply_refiner = gr.Checkbox(label="Apply refiner", value=True)
with gr.Row():
guidance_scale_base = gr.Slider(
label="Guidance scale for base",
minimum=1,
maximum=20,
step=0.1,
value=5.0,
)
num_inference_steps_base = gr.Slider(
label="Number of inference steps for base",
minimum=10,
maximum=100,
step=1,
value=25,
)
with gr.Row() as refiner_params:
guidance_scale_refiner = gr.Slider(
label="Guidance scale for refiner",
minimum=1,
maximum=20,
step=0.1,
value=5.0,
)
num_inference_steps_refiner = gr.Slider(
label="Number of inference steps for refiner",
minimum=10,
maximum=100,
step=1,
value=25,
)
gr.Examples(
examples=examples,
inputs=prompt,
outputs=result,
fn=generate,
)
use_negative_prompt.change(
fn=lambda x: gr.Textbox(visible=x),
inputs=use_negative_prompt,
outputs=negative_prompt,
queue=False,
api_name=False,
)
use_prompt_2.change(
fn=lambda x: gr.Textbox(visible=x),
inputs=use_prompt_2,
outputs=prompt_2,
queue=False,
api_name=False,
)
use_negative_prompt_2.change(
fn=lambda x: gr.Textbox(visible=x),
inputs=use_negative_prompt_2,
outputs=negative_prompt_2,
queue=False,
api_name=False,
)
apply_refiner.change(
fn=lambda x: gr.Row(visible=x),
inputs=apply_refiner,
outputs=refiner_params,
queue=False,
api_name=False,
)
gr.on(
triggers=[
prompt.submit,
negative_prompt.submit,
prompt_2.submit,
negative_prompt_2.submit,
],
fn=get_seed,
inputs=[randomize_seed, seed],
outputs=seed,
queue=False,
).then(
fn=generate,
inputs=[
prompt,
negative_prompt,
prompt_2,
negative_prompt_2,
use_negative_prompt,
use_prompt_2,
use_negative_prompt_2,
seed,
width,
height,
guidance_scale_base,
guidance_scale_refiner,
num_inference_steps_base,
num_inference_steps_refiner,
apply_refiner,
],
outputs=result,
api_name="predict",
)
if __name__ == "__main__":
demo.launch(mcp_server=True)
|