jihuayang3 commited on
Commit
968f90f
·
verified ·
1 Parent(s): a3265bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -27,14 +27,14 @@ model_file = hf_hub_download(
27
  filename="diffusion_pytorch_model_promax.safetensors",
28
  )
29
  state_dict = load_state_dict(model_file)
30
- model, _, _, _, _ = ControlNetModel_Union._load_pretrained_model(
31
  controlnet_model, state_dict, model_file, "xinsir/controlnet-union-sdxl-1.0"
32
  )
33
- model.to(device="cuda", dtype=torch.float16)
34
 
35
  vae = AutoencoderKL.from_pretrained(
36
  "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
37
- ).to("cuda")
38
 
39
  pipe = StableDiffusionXLFillPipeline.from_pretrained(
40
  "SG161222/RealVisXL_V5.0_Lightning",
@@ -42,7 +42,7 @@ pipe = StableDiffusionXLFillPipeline.from_pretrained(
42
  vae=vae,
43
  controlnet=model,
44
  variant="fp16",
45
- ).to("cuda")
46
 
47
  pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
48
 
@@ -91,7 +91,7 @@ def infer(image, model_selection, width, height, overlap_width, num_inference_st
91
  negative_prompt_embeds,
92
  pooled_prompt_embeds,
93
  negative_pooled_prompt_embeds,
94
- ) = pipe.encode_prompt(final_prompt, "cuda", True)
95
 
96
  for image in pipe(
97
  prompt_embeds=prompt_embeds,
 
27
  filename="diffusion_pytorch_model_promax.safetensors",
28
  )
29
  state_dict = load_state_dict(model_file)
30
+ model = ControlNetModel_Union._load_pretrained_model(
31
  controlnet_model, state_dict, model_file, "xinsir/controlnet-union-sdxl-1.0"
32
  )
33
+ model.to(device="cpu", dtype=torch.float16)
34
 
35
  vae = AutoencoderKL.from_pretrained(
36
  "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
37
+ ).to("cpu")
38
 
39
  pipe = StableDiffusionXLFillPipeline.from_pretrained(
40
  "SG161222/RealVisXL_V5.0_Lightning",
 
42
  vae=vae,
43
  controlnet=model,
44
  variant="fp16",
45
+ ).to("cpu")
46
 
47
  pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
48
 
 
91
  negative_prompt_embeds,
92
  pooled_prompt_embeds,
93
  negative_pooled_prompt_embeds,
94
+ ) = pipe.encode_prompt(final_prompt, "cpu", True)
95
 
96
  for image in pipe(
97
  prompt_embeds=prompt_embeds,