yeshog50 commited on
Commit
2073989
·
verified ·
1 Parent(s): 3cbcaed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -25
app.py CHANGED
@@ -1,43 +1,54 @@
1
  import os
2
  import random
 
3
  import gradio as gr
4
  import torch
5
- from diffusers import StableDiffusionPipeline # Changed import
6
- from transformers import CLIPTextModel, CLIPTokenizer
7
 
8
- # Configuration
9
- MODEL_ID = "CompVis/stable-diffusion-v1-4" # Changed to working model
10
  MODEL_CACHE = "model_cache"
11
  os.makedirs(MODEL_CACHE, exist_ok=True)
12
 
 
13
  def get_pipeline():
14
- pipe = StableDiffusionPipeline.from_pretrained(
15
  MODEL_ID,
16
  torch_dtype=torch.float32,
17
  cache_dir=MODEL_CACHE,
18
  safety_checker=None,
19
  use_safetensors=True
20
  )
 
 
 
21
  pipe = pipe.to("cpu")
22
  pipe.enable_attention_slicing()
23
  return pipe
24
 
25
  # Load model
 
 
26
  pipeline = get_pipeline()
 
 
27
 
28
  def generate_image(
29
  prompt: str,
30
- negative_prompt: str = "",
31
  width: int = 768,
32
  height: int = 768,
33
  seed: int = -1,
34
- guidance_scale: float = 7.5,
35
- num_inference_steps: int = 25
36
  ):
 
37
  if seed == -1:
38
  seed = random.randint(0, 2147483647)
39
  generator = torch.Generator(device="cpu").manual_seed(seed)
40
 
 
 
41
  with torch.no_grad():
42
  image = pipeline(
43
  prompt=prompt,
@@ -48,34 +59,78 @@ def generate_image(
48
  num_inference_steps=num_inference_steps,
49
  generator=generator
50
  ).images[0]
 
51
 
52
- return image, seed
 
53
 
54
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
55
- gr.Markdown("# 🌀 FLUX-Pro Image Generator")
 
 
 
 
56
 
57
  with gr.Row():
58
- with gr.Column():
59
- prompt = gr.Textbox(label="Prompt", lines=3)
60
- negative_prompt = gr.Textbox(label="Negative Prompt", value="blurry, low quality")
61
- generate_btn = gr.Button("Generate", variant="primary")
 
 
 
 
 
 
 
62
 
63
- with gr.Accordion("Advanced", open=False):
64
- width = gr.Slider(512, 1024, value=768, step=64, label="Width")
65
- height = gr.Slider(512, 1024, value=768, step=64, label="Height")
66
- guidance = gr.Slider(1.0, 15.0, value=7.5, step=0.5, label="Guidance")
67
- steps = gr.Slider(15, 50, value=25, step=1, label="Steps")
 
 
68
  seed = gr.Number(label="Seed", value=-1)
69
 
70
- with gr.Column():
71
- output_image = gr.Image(label="Result", type="pil")
72
  used_seed = gr.Textbox(label="Used Seed")
73
-
 
 
74
  generate_btn.click(
75
  generate_image,
76
  inputs=[prompt, negative_prompt, width, height, seed, guidance, steps],
77
- outputs=[output_image, used_seed]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  )
79
 
 
80
  if __name__ == "__main__":
81
- demo.launch()
 
1
  import os
2
  import random
3
+ import time
4
  import gradio as gr
5
  import torch
6
+ from diffusers import DiffusionPipeline, LCMScheduler
 
7
 
8
+ # Configuration - Using optimized Flux model with LCM
9
+ MODEL_ID = "ByteDance/Hyper-SD"
10
  MODEL_CACHE = "model_cache"
11
  os.makedirs(MODEL_CACHE, exist_ok=True)
12
 
13
+ # Load model with CPU optimizations
14
  def get_pipeline():
15
+ pipe = DiffusionPipeline.from_pretrained(
16
  MODEL_ID,
17
  torch_dtype=torch.float32,
18
  cache_dir=MODEL_CACHE,
19
  safety_checker=None,
20
  use_safetensors=True
21
  )
22
+
23
+ # Configure for fast generation
24
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
25
  pipe = pipe.to("cpu")
26
  pipe.enable_attention_slicing()
27
  return pipe
28
 
29
  # Load model
30
+ print("Loading model...")
31
+ start_time = time.time()
32
  pipeline = get_pipeline()
33
+ load_time = time.time() - start_time
34
+ print(f"Model loaded in {load_time:.2f} seconds")
35
 
36
  def generate_image(
37
  prompt: str,
38
+ negative_prompt: str = "blurry, low quality, cartoon, drawing, text",
39
  width: int = 768,
40
  height: int = 768,
41
  seed: int = -1,
42
+ guidance_scale: float = 2.0,
43
+ num_inference_steps: int = 4
44
  ):
45
+ # Set seed if not provided
46
  if seed == -1:
47
  seed = random.randint(0, 2147483647)
48
  generator = torch.Generator(device="cpu").manual_seed(seed)
49
 
50
+ # Generate image with timing
51
+ start_gen = time.time()
52
  with torch.no_grad():
53
  image = pipeline(
54
  prompt=prompt,
 
59
  num_inference_steps=num_inference_steps,
60
  generator=generator
61
  ).images[0]
62
+ gen_time = time.time() - start_gen
63
 
64
+ print(f"Generated {width}x{height} image in {gen_time:.2f} seconds")
65
+ return image, seed, f"Generated in {gen_time:.2f}s | Loaded in {load_time:.2f}s"
66
 
67
+ # Create optimized interface
68
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal")) as demo:
69
+ gr.Markdown("""
70
+ # ⚡ FLUX Turbo Generator
71
+ **Professional Quality Images · Lightning Fast CPU Generation**
72
+ """)
73
 
74
  with gr.Row():
75
+ with gr.Column(scale=1):
76
+ prompt = gr.Textbox(
77
+ label="Prompt",
78
+ placeholder="Describe a professional, high-quality image...",
79
+ lines=3
80
+ )
81
+ negative_prompt = gr.Textbox(
82
+ label="Negative Prompt",
83
+ value="blurry, low quality, cartoon, drawing, text"
84
+ )
85
+ generate_btn = gr.Button("Generate Image", variant="primary")
86
 
87
+ with gr.Accordion("Advanced Settings", open=False):
88
+ with gr.Row():
89
+ width = gr.Slider(512, 1024, value=768, step=64, label="Width")
90
+ height = gr.Slider(512, 1024, value=768, step=64, label="Height")
91
+ with gr.Row():
92
+ guidance = gr.Slider(1.0, 5.0, value=2.0, step=0.1, label="Guidance")
93
+ steps = gr.Slider(1, 8, value=4, step=1, label="Steps")
94
  seed = gr.Number(label="Seed", value=-1)
95
 
96
+ with gr.Column(scale=1):
97
+ output_image = gr.Image(label="Result", type="pil", height=500)
98
  used_seed = gr.Textbox(label="Used Seed")
99
+ perf_info = gr.Textbox(label="Performance Info")
100
+
101
+ # Generation handler
102
  generate_btn.click(
103
  generate_image,
104
  inputs=[prompt, negative_prompt, width, height, seed, guidance, steps],
105
+ outputs=[output_image, used_seed, perf_info]
106
+ )
107
+
108
+ # Professional examples
109
+ gr.Examples(
110
+ examples=[
111
+ [
112
+ "Professional photograph of a futuristic city at golden hour, cinematic lighting, ultra-detailed",
113
+ "blurry, cartoon, drawing, text, watermark",
114
+ 768,
115
+ 768
116
+ ],
117
+ [
118
+ "Hyperrealistic portrait of a wise elderly man, detailed wrinkles, studio lighting, 8k resolution",
119
+ "anime, cartoon, deformed, ugly",
120
+ 768,
121
+ 1024
122
+ ],
123
+ [
124
+ "Majestic mountain landscape with crystal clear lake reflection, autumn colors, sharp focus",
125
+ "low quality, blurry, people, buildings",
126
+ 1024,
127
+ 768
128
+ ]
129
+ ],
130
+ inputs=[prompt, negative_prompt, width, height],
131
+ label="Professional Examples"
132
  )
133
 
134
+ # Launch the app
135
  if __name__ == "__main__":
136
+ demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", 7860)))