Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,14 +5,17 @@ import gradio as gr
|
|
5 |
from diffusers import StableDiffusionPipeline
|
6 |
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
7 |
from transformers import CLIPFeatureExtractor
|
|
|
8 |
|
9 |
# Configuration
|
10 |
-
BASE_MODEL = "
|
|
|
|
|
11 |
MODEL_CACHE = "model_cache"
|
12 |
os.makedirs(MODEL_CACHE, exist_ok=True)
|
13 |
|
14 |
def get_pipeline():
|
15 |
-
# Load
|
16 |
safety_checker = StableDiffusionSafetyChecker.from_pretrained(
|
17 |
"CompVis/stable-diffusion-safety-checker"
|
18 |
)
|
@@ -20,7 +23,7 @@ def get_pipeline():
|
|
20 |
"openai/clip-vit-base-patch32"
|
21 |
)
|
22 |
|
23 |
-
# Load
|
24 |
pipe = StableDiffusionPipeline.from_pretrained(
|
25 |
BASE_MODEL,
|
26 |
torch_dtype=torch.float32,
|
@@ -30,14 +33,20 @@ def get_pipeline():
|
|
30 |
use_safetensors=True
|
31 |
)
|
32 |
|
33 |
-
#
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
pipe.enable_attention_slicing()
|
36 |
-
pipe.enable_model_cpu_offload() # β
Requires accelerate>=0.17.0
|
37 |
|
38 |
return pipe
|
39 |
|
40 |
-
# Load once
|
41 |
pipeline = get_pipeline()
|
42 |
|
43 |
def generate_image(prompt, negative_prompt="", width=768, height=768, seed=-1, guidance_scale=7.5, num_inference_steps=25):
|
@@ -47,7 +56,7 @@ def generate_image(prompt, negative_prompt="", width=768, height=768, seed=-1, g
|
|
47 |
|
48 |
with torch.no_grad():
|
49 |
output = pipeline(
|
50 |
-
prompt=f"
|
51 |
negative_prompt=negative_prompt,
|
52 |
width=width,
|
53 |
height=height,
|
@@ -60,7 +69,7 @@ def generate_image(prompt, negative_prompt="", width=768, height=768, seed=-1, g
|
|
60 |
|
61 |
# Gradio UI
|
62 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
63 |
-
gr.Markdown("# π Anime Image Generator (
|
64 |
with gr.Row():
|
65 |
with gr.Column():
|
66 |
prompt = gr.Textbox(label="Prompt", lines=3)
|
|
|
5 |
from diffusers import StableDiffusionPipeline
|
6 |
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
7 |
from transformers import CLIPFeatureExtractor
|
8 |
+
from huggingface_hub import hf_hub_download
|
9 |
|
10 |
# Configuration
|
11 |
+
BASE_MODEL = "stabilityai/stable-diffusion-2-1-base"
|
12 |
+
LORA_REPO = "Norod78/Flux-LoRA"
|
13 |
+
LORA_FILENAME = "flux_lora.safetensors"
|
14 |
MODEL_CACHE = "model_cache"
|
15 |
os.makedirs(MODEL_CACHE, exist_ok=True)
|
16 |
|
17 |
def get_pipeline():
|
18 |
+
# Load safety checker + feature extractor
|
19 |
safety_checker = StableDiffusionSafetyChecker.from_pretrained(
|
20 |
"CompVis/stable-diffusion-safety-checker"
|
21 |
)
|
|
|
23 |
"openai/clip-vit-base-patch32"
|
24 |
)
|
25 |
|
26 |
+
# Load base pipeline
|
27 |
pipe = StableDiffusionPipeline.from_pretrained(
|
28 |
BASE_MODEL,
|
29 |
torch_dtype=torch.float32,
|
|
|
33 |
use_safetensors=True
|
34 |
)
|
35 |
|
36 |
+
# Load and apply LoRA weights
|
37 |
+
lora_path = hf_hub_download(
|
38 |
+
repo_id=LORA_REPO,
|
39 |
+
filename=LORA_FILENAME,
|
40 |
+
cache_dir=MODEL_CACHE
|
41 |
+
)
|
42 |
+
pipe.load_lora_weights(lora_path)
|
43 |
+
|
44 |
+
pipe.to("cpu")
|
45 |
pipe.enable_attention_slicing()
|
|
|
46 |
|
47 |
return pipe
|
48 |
|
49 |
+
# Load model once
|
50 |
pipeline = get_pipeline()
|
51 |
|
52 |
def generate_image(prompt, negative_prompt="", width=768, height=768, seed=-1, guidance_scale=7.5, num_inference_steps=25):
|
|
|
56 |
|
57 |
with torch.no_grad():
|
58 |
output = pipeline(
|
59 |
+
prompt=f"flux style, {prompt}", # Stylized prompt
|
60 |
negative_prompt=negative_prompt,
|
61 |
width=width,
|
62 |
height=height,
|
|
|
69 |
|
70 |
# Gradio UI
|
71 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
72 |
+
gr.Markdown("# π Flux-LoRA Anime Image Generator (CPU only)")
|
73 |
with gr.Row():
|
74 |
with gr.Column():
|
75 |
prompt = gr.Textbox(label="Prompt", lines=3)
|