Update app.py
Browse files
app.py
CHANGED
@@ -3,18 +3,19 @@ import numpy as np
|
|
3 |
import random
|
4 |
|
5 |
import spaces #[uncomment to use ZeroGPU]
|
6 |
-
from diffusers import StableDiffusionXLPipeline
|
7 |
import torch
|
8 |
|
9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
model_repo_id = "RunDiffusion/Juggernaut-XL-v9" # Replace to the model you would like to use
|
|
|
11 |
|
12 |
if torch.cuda.is_available():
|
13 |
torch_dtype = torch.float16
|
14 |
else:
|
15 |
torch_dtype = torch.float32
|
16 |
|
17 |
-
pipe = StableDiffusionXLPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
|
18 |
pipe = pipe.to(device)
|
19 |
|
20 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
3 |
import random
|
4 |
|
5 |
import spaces #[uncomment to use ZeroGPU]
|
6 |
+
from diffusers import StableDiffusionXLPipeline, AutoencoderKL
|
7 |
import torch
|
8 |
|
9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
model_repo_id = "RunDiffusion/Juggernaut-XL-v9" # Replace to the model you would like to use
|
11 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
12 |
|
13 |
if torch.cuda.is_available():
|
14 |
torch_dtype = torch.float16
|
15 |
else:
|
16 |
torch_dtype = torch.float32
|
17 |
|
18 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(model_repo_id, vae=vae, torch_dtype=torch_dtype)
|
19 |
pipe = pipe.to(device)
|
20 |
|
21 |
MAX_SEED = np.iinfo(np.int32).max
|