Spaces:
Runtime error
Runtime error
File size: 4,460 Bytes
3251b02 2a2202a 3251b02 2a2202a 3251b02 2a2202a 3251b02 2a2202a 3251b02 2a2202a 3251b02 2a2202a 3251b02 2a2202a 3251b02 2a2202a 3251b02 2a2202a 3251b02 2a2202a 3251b02 2a2202a 3251b02 2a2202a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
import os
import random
import gradio as gr
import torch
from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler
# Configuration
MODEL_ID = "OFA-Sys/small-stable-diffusion-v0"
MODEL_CACHE = "model_cache"
os.makedirs(MODEL_CACHE, exist_ok=True)
# Initialize pipeline with updated scheduler config
def get_pipeline():
scheduler = EulerAncestralDiscreteScheduler.from_pretrained(
MODEL_ID,
subfolder="scheduler",
cache_dir=MODEL_CACHE,
steps_offset=1 # Fix for the deprecation warning
)
return StableDiffusionPipeline.from_pretrained(
MODEL_ID,
scheduler=scheduler,
torch_dtype=torch.float32,
cache_dir=MODEL_CACHE,
safety_checker=None,
local_files_only=False
).to("cpu")
# Load model
pipeline = get_pipeline()
def generate_image(
prompt: str,
width: int,
height: int,
seed: int,
randomize_seed: bool,
guidance_scale: float,
num_inference_steps: int
):
# Handle seed randomization
if randomize_seed:
seed = random.randint(0, 2147483647)
generator = torch.Generator(device="cpu").manual_seed(seed)
# Generate image
with torch.no_grad():
image = pipeline(
prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=generator
).images[0]
return image, seed
# Style presets
STYLE_PRESETS = {
"Realistic": "photorealistic, 8k, detailed, sharp focus",
"Anime": "anime style, vibrant colors, cel shading",
"Oil Painting": "oil painting, brush strokes, textured",
"Cyberpunk": "neon lights, cyberpunk, futuristic, rain",
"Minimalist": "minimalist, simple shapes, flat colors"
}
def apply_style(prompt, style_name):
return f"{STYLE_PRESETS[style_name]}, {prompt}"
# Create Gradio interface
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# ⚡ FLUX Turbo Generator
**CPU-Optimized Image Generation** · No APIs · No Limits
""")
with gr.Row():
with gr.Column(scale=3):
prompt = gr.Textbox(
label="Prompt",
placeholder="Describe what you want to create...",
lines=3
)
style = gr.Dropdown(
label="Style Preset",
choices=list(STYLE_PRESETS.keys()),
value="Realistic"
)
generate_btn = gr.Button("Generate", variant="primary")
with gr.Accordion("Advanced Settings", open=False):
with gr.Row():
width = gr.Slider(384, 768, value=512, step=64, label="Width")
height = gr.Slider(384, 768, value=512, step=64, label="Height")
guidance = gr.Slider(1.0, 20.0, value=7.5, step=0.5, label="Creativity")
steps = gr.Slider(5, 50, value=20, step=5, label="Generation Steps")
with gr.Row():
seed = gr.Number(label="Seed", value=0)
random_seed = gr.Checkbox(label="Random Seed", value=True)
with gr.Column(scale=2):
output_image = gr.Image(label="Generated Image", type="pil")
used_seed = gr.Textbox(label="Used Seed", interactive=False)
gr.Markdown("**Tip:** Use specific descriptions for better results")
# Style application
style.change(
fn=apply_style,
inputs=[prompt, style],
outputs=prompt
)
# Generation handler
generate_btn.click(
fn=generate_image,
inputs=[prompt, width, height, seed, random_seed, guidance, steps],
outputs=[output_image, used_seed]
)
# Examples
gr.Examples(
examples=[
["majestic mountain landscape at sunset, snow-capped peaks", "Realistic", 512, 512],
["cyberpunk city street at night, neon signs, rain puddles", "Cyberpunk", 512, 512],
["cute anime cat warrior wearing armor, fantasy setting", "Anime", 512, 512]
],
inputs=[prompt, style, width, height],
label="Example Prompts"
)
# Launch with corrected parameters
if __name__ == "__main__":
demo.queue() # Enable queuing
demo.launch(
server_name="0.0.0.0",
server_port=int(os.getenv("PORT", 7860)) |