yeshog50 commited on
Commit
3067775
·
verified ·
1 Parent(s): e1e1a92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -63
app.py CHANGED
@@ -3,16 +3,18 @@ import random
3
  import time
4
  import gradio as gr
5
  import torch
6
- from diffusers import DiffusionPipeline, LCMScheduler
 
7
 
8
- # Configuration - Using optimized Flux model with LCM
9
- MODEL_ID = "ByteDance/Hyper-SD"
10
  MODEL_CACHE = "model_cache"
11
  os.makedirs(MODEL_CACHE, exist_ok=True)
12
 
13
  # Load model with CPU optimizations
14
  def get_pipeline():
15
- pipe = DiffusionPipeline.from_pretrained(
 
16
  MODEL_ID,
17
  torch_dtype=torch.float32,
18
  cache_dir=MODEL_CACHE,
@@ -36,18 +38,16 @@ print(f"Model loaded in {load_time:.2f} seconds")
36
  def generate_image(
37
  prompt: str,
38
  negative_prompt: str = "blurry, low quality, cartoon, drawing, text",
39
- width: int = 768,
40
- height: int = 768,
41
  seed: int = -1,
42
  guidance_scale: float = 2.0,
43
  num_inference_steps: int = 4
44
  ):
45
- # Set seed if not provided
46
  if seed == -1:
47
  seed = random.randint(0, 2147483647)
48
  generator = torch.Generator(device="cpu").manual_seed(seed)
49
 
50
- # Generate image with timing
51
  start_gen = time.time()
52
  with torch.no_grad():
53
  image = pipeline(
@@ -64,73 +64,33 @@ def generate_image(
64
  print(f"Generated {width}x{height} image in {gen_time:.2f} seconds")
65
  return image, seed, f"Generated in {gen_time:.2f}s | Loaded in {load_time:.2f}s"
66
 
67
- # Create optimized interface
68
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal")) as demo:
69
- gr.Markdown("""
70
- # ⚡ FLUX Turbo Generator
71
- **Professional Quality Images · Lightning Fast CPU Generation**
72
- """)
73
 
74
  with gr.Row():
75
- with gr.Column(scale=1):
76
- prompt = gr.Textbox(
77
- label="Prompt",
78
- placeholder="Describe a professional, high-quality image...",
79
- lines=3
80
- )
81
- negative_prompt = gr.Textbox(
82
- label="Negative Prompt",
83
- value="blurry, low quality, cartoon, drawing, text"
84
- )
85
- generate_btn = gr.Button("Generate Image", variant="primary")
86
 
87
- with gr.Accordion("Advanced Settings", open=False):
88
- with gr.Row():
89
- width = gr.Slider(512, 1024, value=768, step=64, label="Width")
90
- height = gr.Slider(512, 1024, value=768, step=64, label="Height")
91
- with gr.Row():
92
- guidance = gr.Slider(1.0, 5.0, value=2.0, step=0.1, label="Guidance")
93
- steps = gr.Slider(1, 8, value=4, step=1, label="Steps")
94
  seed = gr.Number(label="Seed", value=-1)
95
 
96
- with gr.Column(scale=1):
97
- output_image = gr.Image(label="Result", type="pil", height=500)
98
  used_seed = gr.Textbox(label="Used Seed")
99
  perf_info = gr.Textbox(label="Performance Info")
100
-
101
- # Generation handler
102
  generate_btn.click(
103
  generate_image,
104
  inputs=[prompt, negative_prompt, width, height, seed, guidance, steps],
105
  outputs=[output_image, used_seed, perf_info]
106
  )
107
-
108
- # Professional examples
109
- gr.Examples(
110
- examples=[
111
- [
112
- "Professional photograph of a futuristic city at golden hour, cinematic lighting, ultra-detailed",
113
- "blurry, cartoon, drawing, text, watermark",
114
- 768,
115
- 768
116
- ],
117
- [
118
- "Hyperrealistic portrait of a wise elderly man, detailed wrinkles, studio lighting, 8k resolution",
119
- "anime, cartoon, deformed, ugly",
120
- 768,
121
- 1024
122
- ],
123
- [
124
- "Majestic mountain landscape with crystal clear lake reflection, autumn colors, sharp focus",
125
- "low quality, blurry, people, buildings",
126
- 1024,
127
- 768
128
- ]
129
- ],
130
- inputs=[prompt, negative_prompt, width, height],
131
- label="Professional Examples"
132
- )
133
 
134
- # Launch the app
135
  if __name__ == "__main__":
136
  demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", 7860)))
 
3
  import time
4
  import gradio as gr
5
  import torch
6
+ from diffusers import StableDiffusionPipeline, LCMScheduler
7
+ from huggingface_hub import hf_hub_download
8
 
9
+ # Configuration - Using optimized model
10
+ MODEL_ID = "Lykon/dreamshaper-8-lcm"
11
  MODEL_CACHE = "model_cache"
12
  os.makedirs(MODEL_CACHE, exist_ok=True)
13
 
14
  # Load model with CPU optimizations
15
  def get_pipeline():
16
+ # Use traditional StableDiffusionPipeline instead of DiffusionPipeline
17
+ pipe = StableDiffusionPipeline.from_pretrained(
18
  MODEL_ID,
19
  torch_dtype=torch.float32,
20
  cache_dir=MODEL_CACHE,
 
38
  def generate_image(
39
  prompt: str,
40
  negative_prompt: str = "blurry, low quality, cartoon, drawing, text",
41
+ width: int = 512, # Reduced for CPU performance
42
+ height: int = 512, # Reduced for CPU performance
43
  seed: int = -1,
44
  guidance_scale: float = 2.0,
45
  num_inference_steps: int = 4
46
  ):
 
47
  if seed == -1:
48
  seed = random.randint(0, 2147483647)
49
  generator = torch.Generator(device="cpu").manual_seed(seed)
50
 
 
51
  start_gen = time.time()
52
  with torch.no_grad():
53
  image = pipeline(
 
64
  print(f"Generated {width}x{height} image in {gen_time:.2f} seconds")
65
  return image, seed, f"Generated in {gen_time:.2f}s | Loaded in {load_time:.2f}s"
66
 
67
+ # Create interface
68
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
69
+ gr.Markdown("# ⚡ Turbo Image Generator")
 
 
 
70
 
71
  with gr.Row():
72
+ with gr.Column():
73
+ prompt = gr.Textbox(label="Prompt", lines=3)
74
+ negative_prompt = gr.Textbox(label="Negative Prompt", value="blurry, low quality")
75
+ generate_btn = gr.Button("Generate", variant="primary")
 
 
 
 
 
 
 
76
 
77
+ with gr.Accordion("Advanced", open=False):
78
+ width = gr.Slider(384, 768, value=512, step=64, label="Width")
79
+ height = gr.Slider(384, 768, value=512, step=64, label="Height")
80
+ guidance = gr.Slider(1.0, 5.0, value=2.0, step=0.1, label="Guidance")
81
+ steps = gr.Slider(1, 8, value=4, step=1, label="Steps")
 
 
82
  seed = gr.Number(label="Seed", value=-1)
83
 
84
+ with gr.Column():
85
+ output_image = gr.Image(label="Result", type="pil")
86
  used_seed = gr.Textbox(label="Used Seed")
87
  perf_info = gr.Textbox(label="Performance Info")
88
+
 
89
  generate_btn.click(
90
  generate_image,
91
  inputs=[prompt, negative_prompt, width, height, seed, guidance, steps],
92
  outputs=[output_image, used_seed, perf_info]
93
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
 
95
  if __name__ == "__main__":
96
  demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", 7860)))