aiqtech commited on
Commit
2490d71
·
verified ·
1 Parent(s): 9eede63

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +184 -105
app.py CHANGED
@@ -9,18 +9,71 @@ import torch
9
  from diffusers import DiffusionPipeline
10
  from PIL import Image
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  # Create permanent storage directory
13
  SAVE_DIR = "saved_images" # Gradio will handle the persistence
14
  if not os.path.exists(SAVE_DIR):
15
  os.makedirs(SAVE_DIR, exist_ok=True)
16
 
 
17
  device = "cuda" if torch.cuda.is_available() else "cpu"
18
  repo_id = "black-forest-labs/FLUX.1-dev"
19
  adapter_id = "openfree/flux-chatgpt-ghibli-lora"
20
 
21
- pipeline = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16)
22
- pipeline.load_lora_weights(adapter_id)
23
- pipeline = pipeline.to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  MAX_SEED = np.iinfo(np.int32).max
26
  MAX_IMAGE_SIZE = 1024
@@ -53,10 +106,6 @@ def load_generated_images():
53
  image_files.sort(key=lambda x: os.path.getctime(x), reverse=True)
54
  return image_files
55
 
56
- def load_predefined_images():
57
- # Return empty list since we're not using predefined images
58
- return []
59
-
60
  @spaces.GPU(duration=120)
61
  def inference(
62
  prompt: str,
@@ -73,21 +122,28 @@ def inference(
73
  seed = random.randint(0, MAX_SEED)
74
  generator = torch.Generator(device=device).manual_seed(seed)
75
 
76
- image = pipeline(
77
- prompt=prompt,
78
- guidance_scale=guidance_scale,
79
- num_inference_steps=num_inference_steps,
80
- width=width,
81
- height=height,
82
- generator=generator,
83
- joint_attention_kwargs={"scale": lora_scale},
84
- ).images[0]
85
-
86
- # Save the generated image
87
- filepath = save_generated_image(image, prompt)
88
-
89
- # Return the image, seed, and updated gallery
90
- return image, seed, load_generated_images()
 
 
 
 
 
 
 
91
 
92
  examples = [
93
  "Ghibli style futuristic stormtrooper with glossy white armor and a sleek helmet, standing heroically on a lush alien planet, vibrant flowers blooming around, soft sunlight illuminating the scene, a gentle breeze rustling the leaves. The armor reflects the pink and purple hues of the alien sunset, creating an ethereal glow around the figure. [trigger]",
@@ -109,105 +165,121 @@ footer {
109
  }
110
  """
111
 
112
- with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css, analytics_enabled=False) as demo:
 
113
  gr.HTML('<div class="title"> FLUX Ghibli LoRA</div>')
114
- gr.HTML('<div class="title">😄Image to Video Explore: <a href="https://huggingface.co/spaces/ginigen/theater" target="_blank">https://huggingface.co/spaces/ginigen/theater</a></div>')
115
 
116
- with gr.Tabs() as tabs:
117
- with gr.Tab("Generation"):
118
- with gr.Column(elem_id="col-container"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  with gr.Row():
120
- prompt = gr.Text(
121
- label="Prompt",
122
- show_label=False,
123
- max_lines=1,
124
- placeholder="Enter your prompt",
125
- container=False,
 
 
 
 
 
 
 
126
  )
127
- run_button = gr.Button("Run", scale=0)
128
-
129
- result = gr.Image(label="Result", show_label=False)
130
 
131
- with gr.Accordion("Advanced Settings", open=False):
132
- seed = gr.Slider(
133
- label="Seed",
134
- minimum=0,
135
- maximum=MAX_SEED,
 
 
 
 
 
 
 
136
  step=1,
137
- value=42,
138
  )
139
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
140
-
141
- with gr.Row():
142
- width = gr.Slider(
143
- label="Width",
144
- minimum=256,
145
- maximum=MAX_IMAGE_SIZE,
146
- step=32,
147
- value=1024,
148
- )
149
- height = gr.Slider(
150
- label="Height",
151
- minimum=256,
152
- maximum=MAX_IMAGE_SIZE,
153
- step=32,
154
- value=768,
155
- )
156
-
157
- with gr.Row():
158
- guidance_scale = gr.Slider(
159
- label="Guidance scale",
160
- minimum=0.0,
161
- maximum=10.0,
162
- step=0.1,
163
- value=3.5,
164
- )
165
- num_inference_steps = gr.Slider(
166
- label="Number of inference steps",
167
- minimum=1,
168
- maximum=50,
169
- step=1,
170
- value=30,
171
- )
172
- lora_scale = gr.Slider(
173
- label="LoRA scale",
174
- minimum=0.0,
175
- maximum=1.0,
176
- step=0.1,
177
- value=1.0,
178
- )
179
-
180
- gr.Examples(
181
- examples=examples,
182
- inputs=[prompt],
183
- outputs=[result, seed],
184
- )
185
-
186
- with gr.Tab("Gallery"):
187
- gallery_header = gr.Markdown("### Generated Images Gallery")
188
- generated_gallery = gr.Gallery(
189
- label="Generated Images",
190
- columns=6,
191
- show_label=False,
192
- value=load_generated_images(),
193
- elem_id="generated_gallery",
194
- height="auto"
195
  )
196
- refresh_btn = gr.Button("🔄 Refresh Gallery")
197
-
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
  # Event handlers
200
  def refresh_gallery():
201
  return load_generated_images()
202
 
 
 
 
203
  refresh_btn.click(
204
  fn=refresh_gallery,
205
  inputs=None,
206
  outputs=generated_gallery,
207
  )
 
 
 
 
 
 
208
 
209
- gr.on(
210
- triggers=[run_button.click, prompt.submit],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  fn=inference,
212
  inputs=[
213
  prompt,
@@ -219,8 +291,15 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css, analytics_enabled=Fa
219
  num_inference_steps,
220
  lora_scale,
221
  ],
222
- outputs=[result, seed, generated_gallery],
223
  )
224
 
225
- demo.queue()
226
- demo.launch()
 
 
 
 
 
 
 
 
9
  from diffusers import DiffusionPipeline
10
  from PIL import Image
11
 
12
+ # Apply more comprehensive patches to Gradio's utility functions
13
+ import gradio_client.utils
14
+ import types
15
+
16
+ # Patch 1: Fix the _json_schema_to_python_type function
17
+ original_json_schema = gradio_client.utils._json_schema_to_python_type
18
+
19
+ def patched_json_schema(schema, defs=None):
20
+ # Handle boolean values directly
21
+ if isinstance(schema, bool):
22
+ return "bool"
23
+
24
+ # Handle cases where 'additionalProperties' is a boolean
25
+ try:
26
+ if "additionalProperties" in schema and isinstance(schema["additionalProperties"], bool):
27
+ schema["additionalProperties"] = {"type": "any"}
28
+ except (TypeError, KeyError):
29
+ pass
30
+
31
+ # Call the original function
32
+ try:
33
+ return original_json_schema(schema, defs)
34
+ except Exception as e:
35
+ # Fallback to a safe value when the schema can't be parsed
36
+ return "any"
37
+
38
+ # Replace the original function with our patched version
39
+ gradio_client.utils._json_schema_to_python_type = patched_json_schema
40
+
41
  # Create permanent storage directory
42
  SAVE_DIR = "saved_images" # Gradio will handle the persistence
43
  if not os.path.exists(SAVE_DIR):
44
  os.makedirs(SAVE_DIR, exist_ok=True)
45
 
46
+ # Safe settings for model loading
47
  device = "cuda" if torch.cuda.is_available() else "cpu"
48
  repo_id = "black-forest-labs/FLUX.1-dev"
49
  adapter_id = "openfree/flux-chatgpt-ghibli-lora"
50
 
51
+ def load_model_with_retry(max_retries=5):
52
+ for attempt in range(max_retries):
53
+ try:
54
+ print(f"Loading model attempt {attempt+1}/{max_retries}...")
55
+ pipeline = DiffusionPipeline.from_pretrained(
56
+ repo_id,
57
+ torch_dtype=torch.bfloat16,
58
+ use_safetensors=True,
59
+ resume_download=True
60
+ )
61
+ print("Model loaded successfully, loading LoRA weights...")
62
+ pipeline.load_lora_weights(adapter_id)
63
+ pipeline = pipeline.to(device)
64
+ print("Pipeline ready!")
65
+ return pipeline
66
+ except Exception as e:
67
+ if attempt < max_retries - 1:
68
+ wait_time = 10 * (attempt + 1)
69
+ print(f"Error loading model: {e}. Retrying in {wait_time} seconds...")
70
+ import time
71
+ time.sleep(wait_time)
72
+ else:
73
+ raise Exception(f"Failed to load model after {max_retries} attempts: {e}")
74
+
75
+ # Load the model
76
+ pipeline = load_model_with_retry()
77
 
78
  MAX_SEED = np.iinfo(np.int32).max
79
  MAX_IMAGE_SIZE = 1024
 
106
  image_files.sort(key=lambda x: os.path.getctime(x), reverse=True)
107
  return image_files
108
 
 
 
 
 
109
  @spaces.GPU(duration=120)
110
  def inference(
111
  prompt: str,
 
122
  seed = random.randint(0, MAX_SEED)
123
  generator = torch.Generator(device=device).manual_seed(seed)
124
 
125
+ # Error handling for the inference process
126
+ try:
127
+ image = pipeline(
128
+ prompt=prompt,
129
+ guidance_scale=guidance_scale,
130
+ num_inference_steps=num_inference_steps,
131
+ width=width,
132
+ height=height,
133
+ generator=generator,
134
+ joint_attention_kwargs={"scale": lora_scale},
135
+ ).images[0]
136
+
137
+ # Save the generated image
138
+ filepath = save_generated_image(image, prompt)
139
+
140
+ # Return the image, seed, and updated gallery
141
+ return image, seed, load_generated_images()
142
+ except Exception as e:
143
+ # Log the error and return a simple error image
144
+ print(f"Error during inference: {e}")
145
+ error_img = Image.new('RGB', (width, height), color='red')
146
+ return error_img, seed, load_generated_images()
147
 
148
  examples = [
149
  "Ghibli style futuristic stormtrooper with glossy white armor and a sleek helmet, standing heroically on a lush alien planet, vibrant flowers blooming around, soft sunlight illuminating the scene, a gentle breeze rustling the leaves. The armor reflects the pink and purple hues of the alien sunset, creating an ethereal glow around the figure. [trigger]",
 
165
  }
166
  """
167
 
168
+ # Use a simpler UI configuration that is less likely to cause issues
169
+ with gr.Blocks(css=css, analytics_enabled=False) as demo:
170
  gr.HTML('<div class="title"> FLUX Ghibli LoRA</div>')
 
171
 
172
+ with gr.Row():
173
+ with gr.Column(scale=3):
174
+ prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt")
175
+
176
+ with gr.Row():
177
+ run_button = gr.Button("Generate Image")
178
+ clear_button = gr.Button("Clear")
179
+
180
+ with gr.Accordion("Settings", open=False):
181
+ seed = gr.Slider(
182
+ label="Seed",
183
+ minimum=0,
184
+ maximum=MAX_SEED,
185
+ step=1,
186
+ value=42,
187
+ )
188
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
189
+
190
  with gr.Row():
191
+ width = gr.Slider(
192
+ label="Width",
193
+ minimum=256,
194
+ maximum=MAX_IMAGE_SIZE,
195
+ step=32,
196
+ value=1024,
197
+ )
198
+ height = gr.Slider(
199
+ label="Height",
200
+ minimum=256,
201
+ maximum=MAX_IMAGE_SIZE,
202
+ step=32,
203
+ value=768,
204
  )
 
 
 
205
 
206
+ with gr.Row():
207
+ guidance_scale = gr.Slider(
208
+ label="Guidance scale",
209
+ minimum=0.0,
210
+ maximum=10.0,
211
+ step=0.1,
212
+ value=3.5,
213
+ )
214
+ num_inference_steps = gr.Slider(
215
+ label="Steps",
216
+ minimum=1,
217
+ maximum=50,
218
  step=1,
219
+ value=30,
220
  )
221
+ lora_scale = gr.Slider(
222
+ label="LoRA scale",
223
+ minimum=0.0,
224
+ maximum=1.0,
225
+ step=0.1,
226
+ value=1.0,
227
+ )
228
+
229
+ gr.Examples(
230
+ examples=examples,
231
+ inputs=prompt,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  )
233
+
234
+ with gr.Column(scale=4):
235
+ result = gr.Image(label="Generated Image")
236
+ seed_text = gr.Number(label="Used Seed", value=42)
237
+
238
+ with gr.Tab("Gallery"):
239
+ gallery_header = gr.Markdown("### Generated Images Gallery")
240
+ generated_gallery = gr.Gallery(
241
+ label="Generated Images",
242
+ columns=3,
243
+ value=load_generated_images(),
244
+ height="auto"
245
+ )
246
+ refresh_btn = gr.Button("🔄 Refresh Gallery")
247
 
248
  # Event handlers
249
  def refresh_gallery():
250
  return load_generated_images()
251
 
252
+ def clear_output():
253
+ return "", gr.update(value=None), seed
254
+
255
  refresh_btn.click(
256
  fn=refresh_gallery,
257
  inputs=None,
258
  outputs=generated_gallery,
259
  )
260
+
261
+ clear_button.click(
262
+ fn=clear_output,
263
+ inputs=None,
264
+ outputs=[prompt, result, seed_text]
265
+ )
266
 
267
+ run_button.click(
268
+ fn=inference,
269
+ inputs=[
270
+ prompt,
271
+ seed,
272
+ randomize_seed,
273
+ width,
274
+ height,
275
+ guidance_scale,
276
+ num_inference_steps,
277
+ lora_scale,
278
+ ],
279
+ outputs=[result, seed_text, generated_gallery],
280
+ )
281
+
282
+ prompt.submit(
283
  fn=inference,
284
  inputs=[
285
  prompt,
 
291
  num_inference_steps,
292
  lora_scale,
293
  ],
294
+ outputs=[result, seed_text, generated_gallery],
295
  )
296
 
297
+ # Launch with fallback options
298
+ try:
299
+ demo.queue(concurrency_count=1, max_size=10)
300
+ demo.launch(debug=True, show_api=False)
301
+ except Exception as e:
302
+ print(f"Error during launch: {e}")
303
+ print("Trying alternative launch configuration...")
304
+ # Skip queue and simplify launch parameters
305
+ demo.launch(debug=True, show_api=False, share=False)