Upload 10 files
Browse files- README.md +1 -1
- app.py +16 -12
- convert_url_to_diffusers_flux_gr.py +42 -28
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 🎨➡️🧨
|
|
| 4 |
colorFrom: indigo
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 4.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
|
|
|
| 4 |
colorFrom: indigo
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 4.44.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
app.py
CHANGED
|
@@ -33,18 +33,22 @@ It saves you the trouble of typing them in.<br>
|
|
| 33 |
)
|
| 34 |
with gr.Column():
|
| 35 |
dl_url = gr.Textbox(label="URL to download", placeholder="https://huggingface.co/marduk191/Flux.1_collection/blob/main/flux.1_dev_fp8_fp16t5-marduk191.safetensors", value="", max_lines=1)
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
run_button = gr.Button(value="Submit")
|
| 49 |
repo_urls = gr.CheckboxGroup(visible=False, choices=[], value=None)
|
| 50 |
output_md = gr.Markdown(label="Output")
|
|
|
|
| 33 |
)
|
| 34 |
with gr.Column():
|
| 35 |
dl_url = gr.Textbox(label="URL to download", placeholder="https://huggingface.co/marduk191/Flux.1_collection/blob/main/flux.1_dev_fp8_fp16t5-marduk191.safetensors", value="", max_lines=1)
|
| 36 |
+
with gr.Row():
|
| 37 |
+
hf_user = gr.Textbox(label="Your HF user ID", placeholder="username", value="", max_lines=1)
|
| 38 |
+
hf_repo = gr.Textbox(label="New repo name", placeholder="reponame", info="If empty, auto-complete", value="", max_lines=1)
|
| 39 |
+
with gr.Row():
|
| 40 |
+
hf_token = gr.Textbox(label="Your HF write token", placeholder="hf_...", value="", max_lines=1)
|
| 41 |
+
civitai_key = gr.Textbox(label="Your Civitai API Key (Optional)", info="If you download model from Civitai...", placeholder="", value="", max_lines=1)
|
| 42 |
+
with gr.Row():
|
| 43 |
+
data_type = gr.Radio(label="Output data type", choices=["bf16", "fp8"], value="fp8")
|
| 44 |
+
model_type = gr.Radio(label="Original model repo", choices=["dev", "schnell", "dev fp8", "schnell fp8"], value="dev")
|
| 45 |
+
use_original = gr.CheckboxGroup(label="Use original repo version", choices=["vae", "text_encoder", "text_encoder_2"], value=["vae", "text_encoder"])
|
| 46 |
+
with gr.Row():
|
| 47 |
+
is_dequat = gr.Checkbox(label="Dequantization", info="Deadly slow", value=False)
|
| 48 |
+
is_upload_sf = gr.Checkbox(label="Upload single safetensors file into new repo", value=False, visible=False)
|
| 49 |
+
is_fix_only = gr.Checkbox(label="Only fixing", value=False)
|
| 50 |
+
is_private = gr.Checkbox(label="Create private repo", value=True)
|
| 51 |
+
is_overwrite = gr.Checkbox(label="Overweite repo", value=True)
|
| 52 |
run_button = gr.Button(value="Submit")
|
| 53 |
repo_urls = gr.CheckboxGroup(visible=False, choices=[], value=None)
|
| 54 |
output_md = gr.Markdown(label="Output")
|
convert_url_to_diffusers_flux_gr.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
import json
|
| 2 |
import torch
|
| 3 |
from safetensors.torch import load_file, save_file
|
|
@@ -5,6 +6,7 @@ from pathlib import Path
|
|
| 5 |
import gc
|
| 6 |
import gguf
|
| 7 |
from dequant import dequantize_tensor # https://github.com/city96/ComfyUI-GGUF
|
|
|
|
| 8 |
|
| 9 |
import os
|
| 10 |
import argparse
|
|
@@ -14,13 +16,16 @@ import gradio as gr
|
|
| 14 |
import subprocess
|
| 15 |
subprocess.run('pip cache purge', shell=True)
|
| 16 |
|
| 17 |
-
import spaces
|
| 18 |
@spaces.GPU()
|
| 19 |
def spaces_dummy():
|
| 20 |
pass
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
system_temp_dir = "temp"
|
| 25 |
|
| 26 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
@@ -38,6 +43,13 @@ TORCH_DTYPE = [torch.float32, torch.float, torch.float64, torch.double, torch.fl
|
|
| 38 |
|
| 39 |
TORCH_QUANTIZED_DTYPE = [torch.quint8, torch.qint8, torch.qint32, torch.quint4x2]
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
def list_sub(a, b):
|
| 42 |
return [e for e in a if e not in b]
|
| 43 |
|
|
@@ -47,6 +59,8 @@ def is_repo_name(s):
|
|
| 47 |
|
| 48 |
def clear_cache():
|
| 49 |
torch.cuda.empty_cache()
|
|
|
|
|
|
|
| 50 |
gc.collect()
|
| 51 |
|
| 52 |
def clear_sd(sd: dict):
|
|
@@ -54,6 +68,8 @@ def clear_sd(sd: dict):
|
|
| 54 |
sd.pop(k)
|
| 55 |
del sd
|
| 56 |
torch.cuda.empty_cache()
|
|
|
|
|
|
|
| 57 |
gc.collect()
|
| 58 |
|
| 59 |
def clone_sd(sd: dict):
|
|
@@ -181,8 +197,7 @@ def is_repo_exists(repo_id):
|
|
| 181 |
|
| 182 |
def create_diffusers_repo(new_repo_id, diffusers_folder, is_private, is_overwrite, progress=gr.Progress(track_tqdm=True)):
|
| 183 |
from huggingface_hub import HfApi
|
| 184 |
-
|
| 185 |
-
hf_token = os.environ.get("HF_TOKEN")
|
| 186 |
api = HfApi()
|
| 187 |
try:
|
| 188 |
progress(0, desc="Start uploading...")
|
|
@@ -440,12 +455,11 @@ with torch.no_grad():
|
|
| 440 |
print(f"Saving quantized FLUX.1 {name} to {path}")
|
| 441 |
else:
|
| 442 |
progress(0.5, desc=f"Saving FLUX.1 {name} to: {path}")
|
| 443 |
-
if False and path.endswith("/transformer"):
|
| 444 |
from diffusers import FluxTransformer2DModel
|
| 445 |
has_guidance = any("guidance" in k for k in sd)
|
| 446 |
-
with init_empty_weights():
|
| 447 |
-
|
| 448 |
-
model.to("cpu")
|
| 449 |
model.load_state_dict(sd, strict=True)
|
| 450 |
print(f"Saving FLUX.1 {name} to: {path} (FluxTransformer2DModel)")
|
| 451 |
if metadata is not None:
|
|
@@ -658,49 +672,48 @@ def download_repo(repo_name, path, use_original=["vae", "text_encoder"], progres
|
|
| 658 |
print(f"Downloading {repo_name}.")
|
| 659 |
try:
|
| 660 |
if "text_encoder_2" in use_original:
|
| 661 |
-
snapshot_download(repo_id=repo_name, local_dir=path, ignore_patterns=["transformer/diffusion*.*", "*.sft", ".*", "README*", "*.md", "*.index", "*.jpg", "*.png", "*.webp"])
|
| 662 |
else:
|
| 663 |
-
snapshot_download(repo_id=repo_name, local_dir=path, ignore_patterns=["transformer/diffusion*.*", "text_encoder_2/model*.*", "*.sft", ".*", "README*", "*.md", "*.index", "*.jpg", "*.png", "*.webp"])
|
| 664 |
except Exception as e:
|
| 665 |
print(e)
|
| 666 |
|
| 667 |
-
def
|
| 668 |
import shutil
|
| 669 |
if "text_encoder_2" in use_original:
|
| 670 |
te_from = str(Path(from_path, "text_encoder_2"))
|
| 671 |
te_to = str(Path(to_path, "text_encoder_2"))
|
| 672 |
print(f"Copying Text Encoder 2 files {te_from} to {te_to}")
|
| 673 |
-
shutil.copytree(te_from, te_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
|
| 674 |
if "text_encoder" in use_original:
|
| 675 |
te1_from = str(Path(from_path, "text_encoder"))
|
| 676 |
te1_to = str(Path(to_path, "text_encoder"))
|
| 677 |
print(f"Copying Text Encoder 1 files {te1_from} to {te1_to}")
|
| 678 |
-
shutil.copytree(te1_from, te1_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
|
| 679 |
if "vae" in use_original:
|
| 680 |
vae_from = str(Path(from_path, "vae"))
|
| 681 |
vae_to = str(Path(to_path, "vae"))
|
| 682 |
print(f"Copying VAE files {vae_from} to {vae_to}")
|
| 683 |
-
shutil.copytree(vae_from, vae_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
|
| 684 |
tn2_from = str(Path(from_path, "tokenizer_2"))
|
| 685 |
tn2_to = str(Path(to_path, "tokenizer_2"))
|
| 686 |
print(f"Copying Tokenizer 2 files {tn2_from} to {tn2_to}")
|
| 687 |
-
shutil.copytree(tn2_from, tn2_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
|
| 688 |
print(f"Copying non-tensor files {from_path} to {to_path}")
|
| 689 |
-
shutil.copytree(from_path, to_path, ignore=shutil.ignore_patterns("*.safetensors", "*.bin", "*.sft", ".*", "README*", "*.md", "*.index", "*.jpg", "*.png", "*.webp", "*.index.json"), dirs_exist_ok=True)
|
| 690 |
|
| 691 |
def save_flux_other_diffusers(path: str, model_type: str = "dev", use_original: list = ["vae", "text_encoder"], progress=gr.Progress(track_tqdm=True)):
|
| 692 |
import shutil
|
| 693 |
progress(0, desc="Loading FLUX.1 Components.")
|
| 694 |
print("Loading FLUX.1 Components.")
|
| 695 |
temppath = system_temp_dir
|
| 696 |
-
if model_type
|
| 697 |
-
else: repo = flux_dev_repo
|
| 698 |
os.makedirs(temppath, exist_ok=True)
|
| 699 |
os.makedirs(path, exist_ok=True)
|
| 700 |
download_repo(repo, temppath, use_original)
|
| 701 |
progress(0.5, desc="Saving FLUX.1 Components.")
|
| 702 |
print("Saving FLUX.1 Components.")
|
| 703 |
-
|
| 704 |
shutil.rmtree(temppath)
|
| 705 |
|
| 706 |
with torch.no_grad():
|
|
@@ -855,10 +868,10 @@ def convert_url_to_diffusers_flux(url, civitai_key="", is_upload_sf=False, data_
|
|
| 855 |
if hf_repo != "": new_repo_id = f"{hf_user}/{hf_repo}"
|
| 856 |
flux_to_diffusers_lowmem(new_file, new_repo_name, dtype, quantization, model_type, dequant, use_original, new_repo_id)
|
| 857 |
|
| 858 |
-
|
| 859 |
import shutil
|
| 860 |
shutil.move(str(Path(new_file).resolve()), str(Path(new_repo_name, Path(new_file).name).resolve()))
|
| 861 |
-
else: os.remove(new_file)
|
| 862 |
|
| 863 |
progress(1, desc="Converted.")
|
| 864 |
q.put(new_repo_name)
|
|
@@ -902,7 +915,8 @@ def convert_url_to_diffusers_repo_flux(dl_url, hf_user, hf_repo, hf_token, civit
|
|
| 902 |
print(f"Invalid user name: {hf_user}")
|
| 903 |
progress(1, desc=f"Invalid user name: {hf_user}")
|
| 904 |
return gr.update(value=repo_urls, choices=repo_urls), gr.update(value="")
|
| 905 |
-
if hf_token and
|
|
|
|
| 906 |
if not civitai_key and os.environ.get("CIVITAI_API_KEY"): civitai_key = os.environ.get("CIVITAI_API_KEY")
|
| 907 |
q = mp.Queue()
|
| 908 |
if fix_only:
|
|
@@ -927,7 +941,7 @@ def convert_url_to_diffusers_repo_flux(dl_url, hf_user, hf_repo, hf_token, civit
|
|
| 927 |
print(f"Repo already exists: {new_repo_id}")
|
| 928 |
progress(1, desc=f"Repo already exists: {new_repo_id}")
|
| 929 |
return gr.update(value=repo_urls, choices=repo_urls), gr.update(value="")
|
| 930 |
-
|
| 931 |
repo_url = create_diffusers_repo(new_repo_id, new_path, is_private, is_overwrite)
|
| 932 |
shutil.rmtree(new_path)
|
| 933 |
if not repo_urls: repo_urls = []
|
|
@@ -953,12 +967,12 @@ if __name__ == "__main__":
|
|
| 953 |
quantization = False
|
| 954 |
if args.dtype == "fp8": dtype = torch.float8_e4m3fn
|
| 955 |
elif args.dtype == "fp16": dtype = torch.float16
|
| 956 |
-
elif args.dtype == "qfloat8":
|
| 957 |
-
|
| 958 |
-
|
| 959 |
else: dtype = torch.bfloat16
|
| 960 |
|
| 961 |
-
use_original = ["vae", "text_encoder"]
|
| 962 |
new_repo_id = ""
|
| 963 |
use_local = True
|
| 964 |
|
|
|
|
| 1 |
+
import spaces
|
| 2 |
import json
|
| 3 |
import torch
|
| 4 |
from safetensors.torch import load_file, save_file
|
|
|
|
| 6 |
import gc
|
| 7 |
import gguf
|
| 8 |
from dequant import dequantize_tensor # https://github.com/city96/ComfyUI-GGUF
|
| 9 |
+
from huggingface_hub import HfFolder
|
| 10 |
|
| 11 |
import os
|
| 12 |
import argparse
|
|
|
|
| 16 |
import subprocess
|
| 17 |
subprocess.run('pip cache purge', shell=True)
|
| 18 |
|
|
|
|
| 19 |
@spaces.GPU()
|
| 20 |
def spaces_dummy():
|
| 21 |
pass
|
| 22 |
|
| 23 |
+
flux_diffusers_repos = {
|
| 24 |
+
"dev": "ChuckMcSneed/FLUX.1-dev",
|
| 25 |
+
"schnell": "black-forest-labs/FLUX.1-schnell",
|
| 26 |
+
"dev fp8": "John6666/flux1-dev-fp8-flux",
|
| 27 |
+
"schnell fp8": "John6666/flux1-schnell-fp8-flux",
|
| 28 |
+
}
|
| 29 |
system_temp_dir = "temp"
|
| 30 |
|
| 31 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
| 43 |
|
| 44 |
TORCH_QUANTIZED_DTYPE = [torch.quint8, torch.qint8, torch.qint32, torch.quint4x2]
|
| 45 |
|
| 46 |
+
def get_token():
|
| 47 |
+
try:
|
| 48 |
+
token = HfFolder.get_token()
|
| 49 |
+
except Exception:
|
| 50 |
+
token = ""
|
| 51 |
+
return token
|
| 52 |
+
|
| 53 |
def list_sub(a, b):
|
| 54 |
return [e for e in a if e not in b]
|
| 55 |
|
|
|
|
| 59 |
|
| 60 |
def clear_cache():
|
| 61 |
torch.cuda.empty_cache()
|
| 62 |
+
#torch.cuda.reset_max_memory_allocated()
|
| 63 |
+
#torch.cuda.reset_peak_memory_stats()
|
| 64 |
gc.collect()
|
| 65 |
|
| 66 |
def clear_sd(sd: dict):
|
|
|
|
| 68 |
sd.pop(k)
|
| 69 |
del sd
|
| 70 |
torch.cuda.empty_cache()
|
| 71 |
+
#torch.cuda.reset_max_memory_allocated()
|
| 72 |
+
#torch.cuda.reset_peak_memory_stats()
|
| 73 |
gc.collect()
|
| 74 |
|
| 75 |
def clone_sd(sd: dict):
|
|
|
|
| 197 |
|
| 198 |
def create_diffusers_repo(new_repo_id, diffusers_folder, is_private, is_overwrite, progress=gr.Progress(track_tqdm=True)):
|
| 199 |
from huggingface_hub import HfApi
|
| 200 |
+
hf_token = get_token()
|
|
|
|
| 201 |
api = HfApi()
|
| 202 |
try:
|
| 203 |
progress(0, desc="Start uploading...")
|
|
|
|
| 455 |
print(f"Saving quantized FLUX.1 {name} to {path}")
|
| 456 |
else:
|
| 457 |
progress(0.5, desc=f"Saving FLUX.1 {name} to: {path}")
|
| 458 |
+
if False and path.endswith("/transformer"): # omitted
|
| 459 |
from diffusers import FluxTransformer2DModel
|
| 460 |
has_guidance = any("guidance" in k for k in sd)
|
| 461 |
+
#with init_empty_weights():
|
| 462 |
+
model = FluxTransformer2DModel(guidance_embeds=has_guidance).to("cpu")
|
|
|
|
| 463 |
model.load_state_dict(sd, strict=True)
|
| 464 |
print(f"Saving FLUX.1 {name} to: {path} (FluxTransformer2DModel)")
|
| 465 |
if metadata is not None:
|
|
|
|
| 672 |
print(f"Downloading {repo_name}.")
|
| 673 |
try:
|
| 674 |
if "text_encoder_2" in use_original:
|
| 675 |
+
snapshot_download(repo_id=repo_name, local_dir=path, ignore_patterns=["transformer/diffusion*.*", "*.sft", ".*", "README*", "*.md", "*.index", "*.jpg", "*.jpeg", "*.png", "*.webp"])
|
| 676 |
else:
|
| 677 |
+
snapshot_download(repo_id=repo_name, local_dir=path, ignore_patterns=["transformer/diffusion*.*", "text_encoder_2/model*.*", "*.sft", ".*", "README*", "*.md", "*.index", "*.jpg", "*.jpeg", "*.png", "*.webp"])
|
| 678 |
except Exception as e:
|
| 679 |
print(e)
|
| 680 |
|
| 681 |
+
def copy_missing_files(from_path, to_path, use_original=["vae", "text_encoder"]):
|
| 682 |
import shutil
|
| 683 |
if "text_encoder_2" in use_original:
|
| 684 |
te_from = str(Path(from_path, "text_encoder_2"))
|
| 685 |
te_to = str(Path(to_path, "text_encoder_2"))
|
| 686 |
print(f"Copying Text Encoder 2 files {te_from} to {te_to}")
|
| 687 |
+
shutil.copytree(te_from, te_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.jpeg", "*.png", "*.webp"), dirs_exist_ok=True)
|
| 688 |
if "text_encoder" in use_original:
|
| 689 |
te1_from = str(Path(from_path, "text_encoder"))
|
| 690 |
te1_to = str(Path(to_path, "text_encoder"))
|
| 691 |
print(f"Copying Text Encoder 1 files {te1_from} to {te1_to}")
|
| 692 |
+
shutil.copytree(te1_from, te1_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.jpeg", "*.png", "*.webp"), dirs_exist_ok=True)
|
| 693 |
if "vae" in use_original:
|
| 694 |
vae_from = str(Path(from_path, "vae"))
|
| 695 |
vae_to = str(Path(to_path, "vae"))
|
| 696 |
print(f"Copying VAE files {vae_from} to {vae_to}")
|
| 697 |
+
shutil.copytree(vae_from, vae_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.jpeg", "*.png", "*.webp"), dirs_exist_ok=True)
|
| 698 |
tn2_from = str(Path(from_path, "tokenizer_2"))
|
| 699 |
tn2_to = str(Path(to_path, "tokenizer_2"))
|
| 700 |
print(f"Copying Tokenizer 2 files {tn2_from} to {tn2_to}")
|
| 701 |
+
shutil.copytree(tn2_from, tn2_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.jpeg", "*.png", "*.webp"), dirs_exist_ok=True)
|
| 702 |
print(f"Copying non-tensor files {from_path} to {to_path}")
|
| 703 |
+
shutil.copytree(from_path, to_path, ignore=shutil.ignore_patterns("*.safetensors", "*.bin", "*.sft", ".*", "README*", "*.md", "*.index", "*.jpg", "*.jpeg", "*.png", "*.webp", "*.index.json"), dirs_exist_ok=True)
|
| 704 |
|
| 705 |
def save_flux_other_diffusers(path: str, model_type: str = "dev", use_original: list = ["vae", "text_encoder"], progress=gr.Progress(track_tqdm=True)):
|
| 706 |
import shutil
|
| 707 |
progress(0, desc="Loading FLUX.1 Components.")
|
| 708 |
print("Loading FLUX.1 Components.")
|
| 709 |
temppath = system_temp_dir
|
| 710 |
+
repo = flux_diffusers_repos.get(model_type, None) if model_type in flux_diffusers_repos else flux_diffusers_repos.get("dev", None)
|
|
|
|
| 711 |
os.makedirs(temppath, exist_ok=True)
|
| 712 |
os.makedirs(path, exist_ok=True)
|
| 713 |
download_repo(repo, temppath, use_original)
|
| 714 |
progress(0.5, desc="Saving FLUX.1 Components.")
|
| 715 |
print("Saving FLUX.1 Components.")
|
| 716 |
+
copy_missing_files(temppath, path, use_original)
|
| 717 |
shutil.rmtree(temppath)
|
| 718 |
|
| 719 |
with torch.no_grad():
|
|
|
|
| 868 |
if hf_repo != "": new_repo_id = f"{hf_user}/{hf_repo}"
|
| 869 |
flux_to_diffusers_lowmem(new_file, new_repo_name, dtype, quantization, model_type, dequant, use_original, new_repo_id)
|
| 870 |
|
| 871 |
+
if is_upload_sf:
|
| 872 |
import shutil
|
| 873 |
shutil.move(str(Path(new_file).resolve()), str(Path(new_repo_name, Path(new_file).name).resolve()))
|
| 874 |
+
else: os.remove(new_file)
|
| 875 |
|
| 876 |
progress(1, desc="Converted.")
|
| 877 |
q.put(new_repo_name)
|
|
|
|
| 915 |
print(f"Invalid user name: {hf_user}")
|
| 916 |
progress(1, desc=f"Invalid user name: {hf_user}")
|
| 917 |
return gr.update(value=repo_urls, choices=repo_urls), gr.update(value="")
|
| 918 |
+
if not hf_token and os.environ.get("HF_TOKEN"): HfFolder.save_token(os.environ.get("HF_TOKEN"))
|
| 919 |
+
else: HfFolder.save_token(hf_token)
|
| 920 |
if not civitai_key and os.environ.get("CIVITAI_API_KEY"): civitai_key = os.environ.get("CIVITAI_API_KEY")
|
| 921 |
q = mp.Queue()
|
| 922 |
if fix_only:
|
|
|
|
| 941 |
print(f"Repo already exists: {new_repo_id}")
|
| 942 |
progress(1, desc=f"Repo already exists: {new_repo_id}")
|
| 943 |
return gr.update(value=repo_urls, choices=repo_urls), gr.update(value="")
|
| 944 |
+
save_readme_md(new_path, dl_url)
|
| 945 |
repo_url = create_diffusers_repo(new_repo_id, new_path, is_private, is_overwrite)
|
| 946 |
shutil.rmtree(new_path)
|
| 947 |
if not repo_urls: repo_urls = []
|
|
|
|
| 967 |
quantization = False
|
| 968 |
if args.dtype == "fp8": dtype = torch.float8_e4m3fn
|
| 969 |
elif args.dtype == "fp16": dtype = torch.float16
|
| 970 |
+
#elif args.dtype == "qfloat8":
|
| 971 |
+
# dtype = torch.bfloat16
|
| 972 |
+
# quantization = True
|
| 973 |
else: dtype = torch.bfloat16
|
| 974 |
|
| 975 |
+
use_original = ["vae", "text_encoder", "text_encoder_2"]
|
| 976 |
new_repo_id = ""
|
| 977 |
use_local = True
|
| 978 |
|