Spaces:
Running
on
Zero
Running
on
Zero
Update raw.py
Browse files
raw.py
CHANGED
@@ -6,6 +6,7 @@ from diffusers import FluxControlNetModel, FluxControlNetPipeline, AutoencoderKL
|
|
6 |
from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig
|
7 |
from transformers import T5EncoderModel
|
8 |
from transformers import BitsAndBytesConfig as TransformersBitsAndBytesConfig
|
|
|
9 |
|
10 |
import gradio as gr
|
11 |
huggingface_token = os.getenv("HUGGINFACE_TOKEN")
|
@@ -36,12 +37,14 @@ pipe = FluxControlNetPipeline.from_pretrained(
|
|
36 |
)
|
37 |
adapter_id = "alimama-creative/FLUX.1-Turbo-Alpha"
|
38 |
adapter_id2 = "XLabs-AI/flux-RealismLora"
|
|
|
39 |
|
40 |
pipe.to("cuda")
|
41 |
pipe.load_lora_weights(adapter_id, adapter_name="turbo")
|
42 |
pipe.load_lora_weights(adapter_id2, adapter_name="real")
|
43 |
-
pipe.
|
44 |
-
pipe.
|
|
|
45 |
pipe.unload_lora_weights()
|
46 |
# pipe.enable_xformers_memory_efficient_attention()
|
47 |
# save to the Hub
|
|
|
6 |
from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig
|
7 |
from transformers import T5EncoderModel
|
8 |
from transformers import BitsAndBytesConfig as TransformersBitsAndBytesConfig
|
9 |
+
from peft import PeftModel, PeftConfig
|
10 |
|
11 |
import gradio as gr
|
12 |
huggingface_token = os.getenv("HUGGINFACE_TOKEN")
|
|
|
37 |
)
|
38 |
adapter_id = "alimama-creative/FLUX.1-Turbo-Alpha"
|
39 |
adapter_id2 = "XLabs-AI/flux-RealismLora"
|
40 |
+
adapter_id3 = "enhanceaiteam/Flux-uncensored-v2"
|
41 |
|
42 |
pipe.to("cuda")
|
43 |
pipe.load_lora_weights(adapter_id, adapter_name="turbo")
|
44 |
pipe.load_lora_weights(adapter_id2, adapter_name="real")
|
45 |
+
pipe.load_lora_weights(adapter_id3, weight_name="lora.safetensors", adapter_name="enhance")
|
46 |
+
pipe.set_adapters(["turbo", "real", "enhance"], adapter_weights=[0.9, 0.66, 0.6])
|
47 |
+
pipe.fuse_lora(adapter_names=["turbo", "real", "enhance"], lora_scale=1.0)
|
48 |
pipe.unload_lora_weights()
|
49 |
# pipe.enable_xformers_memory_efficient_attention()
|
50 |
# save to the Hub
|