LPX55 commited on
Commit
8c9f898
·
verified ·
1 Parent(s): 0d9cf7e

Update raw.py

Browse files
Files changed (1) hide show
  1. raw.py +5 -6
raw.py CHANGED
@@ -7,11 +7,11 @@ from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig
7
  from transformers import T5EncoderModel
8
  from transformers import BitsAndBytesConfig as TransformersBitsAndBytesConfig
9
  from peft import PeftModel, PeftConfig
10
- from attention_map_diffusers import (
11
- attn_maps,
12
- init_pipeline,
13
- save_attention_maps
14
- )
15
  import gradio as gr
16
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
17
 
@@ -50,7 +50,6 @@ pipe.load_lora_weights(adapter_id3, weight_name="lora.safetensors", adapter_name
50
  pipe.set_adapters(["turbo", "real", "enhance"], adapter_weights=[0.9, 0.66, 0.6])
51
  pipe.fuse_lora(adapter_names=["turbo", "real", "enhance"], lora_scale=1.0)
52
  pipe.unload_lora_weights()
53
- pipe = init_pipeline(pipe)
54
  # pipe.enable_xformers_memory_efficient_attention()
55
  # save to the Hub
56
  # pipe.push_to_hub("fused-t-r")
 
7
  from transformers import T5EncoderModel
8
  from transformers import BitsAndBytesConfig as TransformersBitsAndBytesConfig
9
  from peft import PeftModel, PeftConfig
10
+ # from attention_map_diffusers import (
11
+ # attn_maps,
12
+ # init_pipeline,
13
+ # save_attention_maps
14
+ # )
15
  import gradio as gr
16
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
17
 
 
50
  pipe.set_adapters(["turbo", "real", "enhance"], adapter_weights=[0.9, 0.66, 0.6])
51
  pipe.fuse_lora(adapter_names=["turbo", "real", "enhance"], lora_scale=1.0)
52
  pipe.unload_lora_weights()
 
53
  # pipe.enable_xformers_memory_efficient_attention()
54
  # save to the Hub
55
  # pipe.push_to_hub("fused-t-r")