|
import gradio as gr |
|
from convert_diffusion_to_gguf import SUPPORTED_ARCHS, qconfig_map, convert |
|
from huggingface_hub import create_repo, upload_file |
|
from argparse import Namespace |
|
from pathlib import Path |
|
|
|
|
|
def upload(args, outfile): |
|
url = "" |
|
if args.host_repo_id and args.hf_token: |
|
repo_id = create_repo(args.host_repo_id, repo_type="model", exist_ok=True, token=args.hf_token).repo_id |
|
info = upload_file(repo_id=repo_id, path_in_repo=str(outfile), path_or_fileobj=str(outfile), token=args.token) |
|
url = info.commit_url |
|
print(f"Uploaded to {url}") |
|
|
|
return url |
|
|
|
|
|
def go_gguf(model_repo_id, subfolder, arch, outtype, outfile_name, bigendian, verbose, host_repo_id, hf_token): |
|
args = Namespace( |
|
model=model_repo_id, |
|
subfolder=subfolder, |
|
arch=arch, |
|
outtype=outtype, |
|
outfile=Path(outfile_name), |
|
bigendian=bigendian, |
|
verbose=verbose, |
|
host_repo_id=host_repo_id, |
|
hf_token=hf_token, |
|
) |
|
convert(args) |
|
upload(args) |
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Soft()) as demo: |
|
gr.Markdown("<h1><center>GGUF Converter for Diffusers format model checkpoints</center></h1>") |
|
gr.Markdown( |
|
"Convert `diffusers` format model checkpoints from the Hub to GGUF format and optionally upload them back. Based on [this repo](https://github.com/ngxson/diffusion-to-gguf)." |
|
) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
gr.Markdown("### π₯ Input Model") |
|
model_repo_id = gr.Textbox(label="Model Repo ID", placeholder="e.g., Qwen/Qwen-Image") |
|
subfolder = gr.Textbox(label="Subfolder (Optional)", placeholder="e.g., transformer") |
|
|
|
gr.Markdown("### βοΈ Conversion Settings") |
|
arch = gr.Dropdown(choices=SUPPORTED_ARCHS, label="Architecture") |
|
outtype = gr.Dropdown(choices=list(qconfig_map.keys()), label="Quantization Type", value="F16") |
|
outfile_name = gr.Textbox(label="Output Filename", value="{ftype}.gguf") |
|
|
|
with gr.Accordion("Advanced Settings", open=False): |
|
bigendian = gr.Checkbox(label="Use Big Endian") |
|
verbose = gr.Checkbox(label="Verbose Logging", value=True) |
|
|
|
gr.Markdown("### π€ Upload to Hub (Optional)") |
|
host_repo_id = gr.Textbox(label="Your Hub Repo ID", placeholder="e.g., YourUsername/My-GGUFs") |
|
hf_token = gr.Textbox(label="Hugging Face Token", type="password", placeholder="hf_...") |
|
|
|
convert_btn = gr.Button("Convert & Upload", variant="primary") |
|
|
|
with gr.Column(scale=2): |
|
gr.Markdown("### π Result") |
|
url_output = gr.Markdown() |
|
|
|
gr.Examples( |
|
examples=[ |
|
[ |
|
"black-forest-labs/FLUX.1-schnell", |
|
"transformer", |
|
"flux", |
|
"Q4_0", |
|
"flux-schnell-q4.gguf", |
|
False, |
|
False, |
|
"YourUsername/MyGGUFs", |
|
"hf_...", |
|
], |
|
[ |
|
"Qwen/Qwen-Image", |
|
"transformer", |
|
"flux", |
|
"Q8_0", |
|
"qwen-q4.gguf", |
|
False, |
|
False, |
|
"YourUsername/MyGGUFs", |
|
"hf_...", |
|
], |
|
], |
|
inputs=[model_repo_id, subfolder, arch, outtype, outfile_name, bigendian, verbose, host_repo_id, hf_token], |
|
) |
|
|
|
convert_btn.click( |
|
fn=lambda x: x, |
|
inputs=[model_repo_id, subfolder, arch, outtype, outfile_name, bigendian, verbose, host_repo_id, hf_token], |
|
outputs=[url_output], |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|