George Krupenchenkov commited on
Commit
983c23d
·
1 Parent(s): 5dd224a

add hw6 markups

Browse files
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -123,12 +123,14 @@ def infer(
123
 
124
 
125
  else:
126
- print("in infer 2")
127
  pipe = StableDiffusionPipeline.from_pretrained(model_id,
128
  torch_dtype=torch_dtype,
129
  safety_checker=None) #.to(device)
 
130
 
131
  if lora_enable:
 
132
  unet_sub_dir = os.path.join(CKPT_DIR, "unet")
133
  text_encoder_sub_dir = os.path.join(CKPT_DIR, "text_encoder")
134
  adapter_name="sd-14-lora"
@@ -138,23 +140,27 @@ def infer(
138
  pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name
139
  )
140
  params['cross_attention_kwargs']={"scale": lora_scale}
 
141
 
142
  if torch_dtype in (torch.float16, torch.bfloat16):
143
  pipe.unet.half()
144
  pipe.text_encoder.half()
145
 
146
  if ip_adapter_enable:
 
147
  pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin")
148
  pipe.set_ip_adapter_scale(ip_adapter_scale)
149
  params['ip_adapter_image'] = process_control_image(ip_image, "")
 
150
 
151
 
152
  # pipe.to(device)
153
 
154
-
 
155
  image = pipe(**params
156
  ).images[0]
157
-
158
  return image, seed
159
 
160
 
@@ -210,6 +216,7 @@ with gr.Blocks(css=css) as demo:
210
  max_lines=1,
211
  placeholder="Enter your prompt",
212
  container=False,
 
213
  )
214
 
215
  negative_prompt = gr.Textbox(
 
123
 
124
 
125
  else:
126
+ print("step: basic pipeline")
127
  pipe = StableDiffusionPipeline.from_pretrained(model_id,
128
  torch_dtype=torch_dtype,
129
  safety_checker=None) #.to(device)
130
+ print("step: basic pipeline done!")
131
 
132
  if lora_enable:
133
+ print("step: lora")
134
  unet_sub_dir = os.path.join(CKPT_DIR, "unet")
135
  text_encoder_sub_dir = os.path.join(CKPT_DIR, "text_encoder")
136
  adapter_name="sd-14-lora"
 
140
  pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name
141
  )
142
  params['cross_attention_kwargs']={"scale": lora_scale}
143
+ print("step: lora done!")
144
 
145
  if torch_dtype in (torch.float16, torch.bfloat16):
146
  pipe.unet.half()
147
  pipe.text_encoder.half()
148
 
149
  if ip_adapter_enable:
150
+ print("step: ip_adapter_enable")
151
  pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin")
152
  pipe.set_ip_adapter_scale(ip_adapter_scale)
153
  params['ip_adapter_image'] = process_control_image(ip_image, "")
154
+ print("step: ip_adapter_enable done!")
155
 
156
 
157
  # pipe.to(device)
158
 
159
+ print("step: start generating")
160
+ print(**params)
161
  image = pipe(**params
162
  ).images[0]
163
+ print("step: generating done!")
164
  return image, seed
165
 
166
 
 
216
  max_lines=1,
217
  placeholder="Enter your prompt",
218
  container=False,
219
+ value="kawaiicat. The cat is having fun, is smiling."
220
  )
221
 
222
  negative_prompt = gr.Textbox(