TheAIBoi commited on
Commit
eb46155
·
verified ·
1 Parent(s): edff4a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -13
app.py CHANGED
@@ -3,7 +3,7 @@ import numpy as np
3
  import random
4
 
5
  import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import StableDiffusionXLPipeline, AutoencoderKL
7
  import torch
8
  from typing import Tuple
9
 
@@ -27,6 +27,17 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
27
  )
28
  pipe.to(device)
29
 
 
 
 
 
 
 
 
 
 
 
 
30
  MAX_SEED = np.iinfo(np.int32).max
31
  MAX_IMAGE_SIZE = 4096
32
 
@@ -104,6 +115,8 @@ def infer(
104
  height,
105
  guidance_scale,
106
  num_inference_steps,
 
 
107
  progress=gr.Progress(track_tqdm=True),
108
  ):
109
  if randomize_seed:
@@ -111,15 +124,28 @@ def infer(
111
  prompt, negative_prompt = apply_style(style, prompt, negative_prompt)
112
  generator = torch.Generator().manual_seed(seed)
113
 
114
- image = pipe(
115
- prompt=prompt,
116
- negative_prompt=negative_prompt,
117
- guidance_scale=guidance_scale,
118
- num_inference_steps=num_inference_steps,
119
- width=width,
120
- height=height,
121
- generator=generator,
122
- ).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
  return image, seed
125
 
@@ -140,7 +166,6 @@ css = """
140
  with gr.Blocks(css=css) as demo:
141
  with gr.Column(elem_id="col-container"):
142
  gr.Markdown(" # ImageGen, the fastest and most precise image generator")
143
-
144
  with gr.Row():
145
  prompt = gr.Text(
146
  label="Prompt",
@@ -149,11 +174,22 @@ with gr.Blocks(css=css) as demo:
149
  placeholder="Enter your prompt",
150
  container=False,
151
  )
152
-
153
  run_button = gr.Button("Run", scale=0, variant="primary")
154
-
155
  result = gr.Image(label="Result", show_label=False)
156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  with gr.Row(visible=True):
158
  style_selection = gr.Radio(
159
  show_label=True,
@@ -230,6 +266,8 @@ with gr.Blocks(css=css) as demo:
230
  height,
231
  guidance_scale,
232
  num_inference_steps,
 
 
233
  ],
234
  outputs=[result, seed],
235
  )
 
3
  import random
4
 
5
  import spaces #[uncomment to use ZeroGPU]
6
+ from diffusers import StableDiffusionXLPipeline, AutoencoderKL, StableDiffusionXLImg2ImgPipeline
7
  import torch
8
  from typing import Tuple
9
 
 
27
  )
28
  pipe.to(device)
29
 
30
+ pipe_img2img = StableDiffusionXLImg2ImgPipeline.from_pretrained(
31
+ "RunDiffusion/Juggernaut-XL-v9",
32
+ vae=vae,
33
+ torch_dtype=torch.float16,
34
+ custom_pipeline="lpw_stable_diffusion_xl",
35
+ use_safetensors=True,
36
+ add_watermarker=False,
37
+ variant="fp16",
38
+ )
39
+ pipe_img2img.to(device)
40
+
41
  MAX_SEED = np.iinfo(np.int32).max
42
  MAX_IMAGE_SIZE = 4096
43
 
 
115
  height,
116
  guidance_scale,
117
  num_inference_steps,
118
+ input_image=None, # New parameter for input image
119
+ strength=0.8, # New parameter for img2img strength
120
  progress=gr.Progress(track_tqdm=True),
121
  ):
122
  if randomize_seed:
 
124
  prompt, negative_prompt = apply_style(style, prompt, negative_prompt)
125
  generator = torch.Generator().manual_seed(seed)
126
 
127
+ if input_image is not None:
128
+ # Use img2img pipeline if an image is provided
129
+ image = pipe_img2img(
130
+ prompt=prompt,
131
+ image=input_image, # Pass the input image
132
+ strength=strength, # Control how much the image is changed
133
+ negative_prompt=negative_prompt,
134
+ guidance_scale=guidance_scale,
135
+ num_inference_steps=num_inference_steps,
136
+ generator=generator,
137
+ ).images[0]
138
+ else:
139
+ # Use text2img pipeline otherwise
140
+ image = pipe(
141
+ prompt=prompt,
142
+ negative_prompt=negative_prompt,
143
+ guidance_scale=guidance_scale,
144
+ num_inference_steps=num_inference_steps,
145
+ width=width,
146
+ height=height,
147
+ generator=generator,
148
+ ).images[0]
149
 
150
  return image, seed
151
 
 
166
  with gr.Blocks(css=css) as demo:
167
  with gr.Column(elem_id="col-container"):
168
  gr.Markdown(" # ImageGen, the fastest and most precise image generator")
 
169
  with gr.Row():
170
  prompt = gr.Text(
171
  label="Prompt",
 
174
  placeholder="Enter your prompt",
175
  container=False,
176
  )
 
177
  run_button = gr.Button("Run", scale=0, variant="primary")
 
178
  result = gr.Image(label="Result", show_label=False)
179
 
180
+ # Add image input and strength slider
181
+ with gr.Row():
182
+ input_image = gr.Image(type="pil", label="Input Image (Optional)", show_label=True, height=200)
183
+ with gr.Column():
184
+ strength = gr.Slider(
185
+ label="Image Strength",
186
+ minimum=0.0,
187
+ maximum=1.0,
188
+ step=0.01,
189
+ value=0.8, # Default strength for img2img
190
+ visible=True, # Make it visible if you want it always there, or toggle visibility with JS
191
+ )
192
+
193
  with gr.Row(visible=True):
194
  style_selection = gr.Radio(
195
  show_label=True,
 
266
  height,
267
  guidance_scale,
268
  num_inference_steps,
269
+ input_image, # Add input_image to inputs
270
+ strength, # Add strength to inputs
271
  ],
272
  outputs=[result, seed],
273
  )