Files changed (9) hide show
  1. .gitignore +0 -207
  2. README.md +1 -1
  3. app.py +0 -0
  4. constants.py +23 -49
  5. image_processor.py +2 -2
  6. packages.txt +1 -1
  7. pre-requirements.txt +0 -1
  8. requirements.txt +3 -11
  9. utils.py +485 -714
.gitignore DELETED
@@ -1,207 +0,0 @@
1
- # Byte-compiled / optimized / DLL files
2
- __pycache__/
3
- *.py[codz]
4
- *$py.class
5
-
6
- # C extensions
7
- *.so
8
-
9
- # Distribution / packaging
10
- .Python
11
- build/
12
- develop-eggs/
13
- dist/
14
- downloads/
15
- eggs/
16
- .eggs/
17
- lib/
18
- lib64/
19
- parts/
20
- sdist/
21
- var/
22
- wheels/
23
- share/python-wheels/
24
- *.egg-info/
25
- .installed.cfg
26
- *.egg
27
- MANIFEST
28
-
29
- # PyInstaller
30
- # Usually these files are written by a python script from a template
31
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
- *.manifest
33
- *.spec
34
-
35
- # Installer logs
36
- pip-log.txt
37
- pip-delete-this-directory.txt
38
-
39
- # Unit test / coverage reports
40
- htmlcov/
41
- .tox/
42
- .nox/
43
- .coverage
44
- .coverage.*
45
- .cache
46
- nosetests.xml
47
- coverage.xml
48
- *.cover
49
- *.py.cover
50
- .hypothesis/
51
- .pytest_cache/
52
- cover/
53
-
54
- # Translations
55
- *.mo
56
- *.pot
57
-
58
- # Django stuff:
59
- *.log
60
- local_settings.py
61
- db.sqlite3
62
- db.sqlite3-journal
63
-
64
- # Flask stuff:
65
- instance/
66
- .webassets-cache
67
-
68
- # Scrapy stuff:
69
- .scrapy
70
-
71
- # Sphinx documentation
72
- docs/_build/
73
-
74
- # PyBuilder
75
- .pybuilder/
76
- target/
77
-
78
- # Jupyter Notebook
79
- .ipynb_checkpoints
80
-
81
- # IPython
82
- profile_default/
83
- ipython_config.py
84
-
85
- # pyenv
86
- # For a library or package, you might want to ignore these files since the code is
87
- # intended to run in multiple environments; otherwise, check them in:
88
- # .python-version
89
-
90
- # pipenv
91
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
- # install all needed dependencies.
95
- #Pipfile.lock
96
-
97
- # UV
98
- # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
- # This is especially recommended for binary packages to ensure reproducibility, and is more
100
- # commonly ignored for libraries.
101
- #uv.lock
102
-
103
- # poetry
104
- # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
- # This is especially recommended for binary packages to ensure reproducibility, and is more
106
- # commonly ignored for libraries.
107
- # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
- #poetry.lock
109
- #poetry.toml
110
-
111
- # pdm
112
- # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
- # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
- # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
- #pdm.lock
116
- #pdm.toml
117
- .pdm-python
118
- .pdm-build/
119
-
120
- # pixi
121
- # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
- #pixi.lock
123
- # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
- # in the .venv directory. It is recommended not to include this directory in version control.
125
- .pixi
126
-
127
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
- __pypackages__/
129
-
130
- # Celery stuff
131
- celerybeat-schedule
132
- celerybeat.pid
133
-
134
- # SageMath parsed files
135
- *.sage.py
136
-
137
- # Environments
138
- .env
139
- .envrc
140
- .venv
141
- env/
142
- venv/
143
- ENV/
144
- env.bak/
145
- venv.bak/
146
-
147
- # Spyder project settings
148
- .spyderproject
149
- .spyproject
150
-
151
- # Rope project settings
152
- .ropeproject
153
-
154
- # mkdocs documentation
155
- /site
156
-
157
- # mypy
158
- .mypy_cache/
159
- .dmypy.json
160
- dmypy.json
161
-
162
- # Pyre type checker
163
- .pyre/
164
-
165
- # pytype static type analyzer
166
- .pytype/
167
-
168
- # Cython debug symbols
169
- cython_debug/
170
-
171
- # PyCharm
172
- # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
- # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
- # and can be added to the global gitignore or merged into this file. For a more nuclear
175
- # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
- #.idea/
177
-
178
- # Abstra
179
- # Abstra is an AI-powered process automation framework.
180
- # Ignore directories containing user credentials, local state, and settings.
181
- # Learn more at https://abstra.io/docs
182
- .abstra/
183
-
184
- # Visual Studio Code
185
- # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
- # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
- # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
- # you could uncomment the following to ignore the entire vscode folder
189
- # .vscode/
190
-
191
- # Ruff stuff:
192
- .ruff_cache/
193
-
194
- # PyPI configuration file
195
- .pypirc
196
-
197
- # Cursor
198
- # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
- # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
- # refer to https://docs.cursor.com/context/ignore-files
201
- .cursorignore
202
- .cursorindexingignore
203
-
204
- # Marimo
205
- marimo/_static/
206
- marimo/_lsp/
207
- __marimo__/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🧩🖼️
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: 5.44.1
8
  app_file: app.py
9
  pinned: true
10
  license: mit
 
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.31.3
8
  app_file: app.py
9
  pinned: true
10
  license: mit
app.py CHANGED
The diff for this file is too large to render. See raw diff
 
constants.py CHANGED
@@ -9,8 +9,6 @@ from stablepy import (
9
  IP_ADAPTERS_SDXL,
10
  )
11
 
12
- IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
13
-
14
  # - **Download Models**
15
  DOWNLOAD_MODEL = "https://huggingface.co/TechnoByte/MilkyWonderland/resolve/main/milkyWonderland_v40.safetensors"
16
 
@@ -25,12 +23,12 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
25
  'Laxhar/noobai-XL-1.1',
26
  'Laxhar/noobai-XL-Vpred-1.0',
27
  'black-forest-labs/FLUX.1-dev',
28
- 'black-forest-labs/FLUX.1-Krea-dev',
29
  'John6666/blue-pencil-flux1-v021-fp8-flux',
30
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
31
  'John6666/xe-anime-flux-v04-fp8-flux',
32
  'John6666/lyh-anime-flux-v2a1-fp8-flux',
33
  'John6666/carnival-unchained-v10-fp8-flux',
 
34
  'Freepik/flux.1-lite-8B-alpha',
35
  'shauray/FluxDev-HyperSD-merged',
36
  'mikeyandfriends/PixelWave_FLUX.1-dev_03',
@@ -39,19 +37,23 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
39
  # 'ostris/OpenFLUX.1',
40
  'shuttleai/shuttle-3-diffusion',
41
  'Laxhar/noobai-XL-1.0',
 
42
  'Laxhar/noobai-XL-0.77',
43
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
44
  'Laxhar/noobai-XL-0.6',
45
  'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
46
  'John6666/noobai-cyberfix-v10-sdxl',
47
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
48
- 'John6666/ripplemix-noob-vpred10-illustrious01-v14-sdxl',
49
- 'John6666/sigmaih-15-sdxl',
 
 
50
  'John6666/ntr-mix-illustrious-xl-noob-xl-xi-sdxl',
51
  'John6666/ntr-mix-illustrious-xl-noob-xl-xii-sdxl',
52
  'John6666/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl',
53
  'John6666/mistoon-anime-v10illustrious-sdxl',
54
- 'John6666/hassaku-xl-illustrious-v22-sdxl',
 
55
  'John6666/haruki-mix-illustrious-v10-sdxl',
56
  'John6666/noobreal-v10-sdxl',
57
  'John6666/complicated-noobai-merge-vprediction-sdxl',
@@ -62,7 +64,6 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
62
  'Laxhar/noobai-XL-Vpred-0.65',
63
  'Laxhar/noobai-XL-Vpred-0.6',
64
  'John6666/cat-tower-noobai-xl-checkpoint-v14vpred-sdxl',
65
- 'John6666/cat-tower-noobai-xl-checkpoint-v15vpred-sdxl',
66
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
67
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
68
  'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
@@ -73,34 +74,19 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
73
  'John6666/obsession-illustriousxl-v21-sdxl',
74
  'John6666/obsession-illustriousxl-v30-sdxl',
75
  'John6666/obsession-illustriousxl-v31-sdxl',
76
- 'John6666/one-obsession-13-sdxl',
77
- 'John6666/one-obsession-14-24d-sdxl',
78
- 'John6666/one-obsession-15-noobai-sdxl',
79
- 'John6666/one-obsession-v16-noobai-sdxl',
80
- 'John6666/prefect-illustrious-xl-v3-sdxl',
81
  'John6666/wai-nsfw-illustrious-v70-sdxl',
82
- 'John6666/wai-nsfw-illustrious-sdxl-v140-sdxl',
83
  'John6666/illustrious-pony-mix-v3-sdxl',
84
- 'John6666/nova-anime-xl-il-v90-sdxl',
85
- 'John6666/nova-anime-xl-il-v110-sdxl',
86
- 'John6666/nova-orange-xl-re-v10-sdxl',
87
- 'John6666/nova-orange-xl-v110-sdxl',
88
- 'John6666/nova-orange-xl-re-v20-sdxl',
89
- 'John6666/nova-unreal-xl-v60-sdxl',
90
- 'John6666/nova-unreal-xl-v70-sdxl',
91
- 'John6666/nova-unreal-xl-v80-sdxl',
92
- 'John6666/nova-cartoon-xl-v40-sdxl',
93
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
94
  'eienmojiki/Anything-XL',
95
  'eienmojiki/Starry-XL-v5.2',
96
- 'votepurchase/plantMilkModelSuite_walnut',
97
  'John6666/meinaxl-v2-sdxl',
98
  'Eugeoter/artiwaifu-diffusion-2.0',
99
  'comin/IterComp',
100
- 'John6666/epicrealism-xl-v8kiss-sdxl',
101
- 'John6666/epicrealism-xl-v10kiss2-sdxl',
102
  'John6666/epicrealism-xl-vxiabeast-sdxl',
103
- 'John6666/epicrealism-xl-vxvii-crystal-clear-realism-sdxl',
 
104
  'misri/zavychromaxl_v80',
105
  'SG161222/RealVisXL_V4.0',
106
  'SG161222/RealVisXL_V5.0',
@@ -116,10 +102,8 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
116
  'John6666/ebara-mfcg-pony-mix-v12-sdxl',
117
  'John6666/t-ponynai3-v51-sdxl',
118
  'John6666/t-ponynai3-v65-sdxl',
119
- 'John6666/t-ponynai3-v7-sdxl',
120
  'John6666/prefect-pony-xl-v3-sdxl',
121
  'John6666/prefect-pony-xl-v4-sdxl',
122
- 'John6666/prefect-pony-xl-v50-sdxl',
123
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
124
  'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
125
  'John6666/wai-real-mix-v11-sdxl',
@@ -127,14 +111,13 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
127
  'John6666/wai-c-v6-sdxl',
128
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
129
  'John6666/sifw-annihilation-xl-v2-sdxl',
130
- 'John6666/sifw-annihilation-xl-v305illustrious-beta-sdxl',
131
  'John6666/photo-realistic-pony-v5-sdxl',
132
  'John6666/pony-realism-v21main-sdxl',
133
  'John6666/pony-realism-v22main-sdxl',
134
- 'John6666/pony-realism-v23-ultra-sdxl',
 
135
  'John6666/cyberrealistic-pony-v65-sdxl',
136
  'John6666/cyberrealistic-pony-v7-sdxl',
137
- 'John6666/cyberrealistic-pony-v127-alternative-sdxl',
138
  'GraydientPlatformAPI/realcartoon-pony-diffusion',
139
  'John6666/nova-anime-xl-pony-v5-sdxl',
140
  'John6666/autismmix-sdxl-autismmix-pony-sdxl',
@@ -144,15 +127,13 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
144
  'John6666/duchaiten-pony-real-v11fix-sdxl',
145
  'John6666/duchaiten-pony-real-v20-sdxl',
146
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
 
 
147
  'KBlueLeaf/Kohaku-XL-Zeta',
148
  'cagliostrolab/animagine-xl-3.1',
149
- 'cagliostrolab/animagine-xl-4.0',
150
  'yodayo-ai/kivotos-xl-2.0',
151
  'yodayo-ai/holodayo-xl-2.1',
152
  'yodayo-ai/clandestine-xl-1.0',
153
- 'https://huggingface.co/chemwolf/Karmix-XL-v0/resolve/main/Karmix-XL-v0.safetensors?download=true',
154
- 'https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16',
155
- 'https://civitai.com/models/30240?modelVersionId=125771',
156
  'digiplay/majicMIX_sombre_v2',
157
  'digiplay/majicMIX_realistic_v6',
158
  'digiplay/majicMIX_realistic_v7',
@@ -178,9 +159,9 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
178
  'GraydientPlatformAPI/realcartoon3d-17',
179
  'GraydientPlatformAPI/realcartoon-pixar11',
180
  'GraydientPlatformAPI/realcartoon-real17',
 
181
  ]
182
 
183
-
184
  DIFFUSERS_FORMAT_LORAS = [
185
  "nerijs/animation2k-flux",
186
  "XLabs-AI/flux-RealismLora",
@@ -202,11 +183,8 @@ DIRECTORY_VAES = 'vaes'
202
  DIRECTORY_EMBEDS = 'embedings'
203
  DIRECTORY_UPSCALERS = 'upscalers'
204
 
 
205
  STORAGE_ROOT = "/home/user/"
206
- CACHE_HF_ROOT = os.path.expanduser("~/.cache/huggingface")
207
- CACHE_HF = os.path.join(CACHE_HF_ROOT, "hub")
208
- if IS_ZERO_GPU:
209
- os.environ["HF_HOME"] = CACHE_HF
210
 
211
  TASK_STABLEPY = {
212
  'txt2img': 'txt2img',
@@ -248,7 +226,6 @@ UPSCALER_DICT_GUI = {
248
  # "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
249
  # "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
250
  "4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
251
- "Real-ESRGAN-Anime-finetuning": "https://huggingface.co/danhtran2mind/Real-ESRGAN-Anime-finetuning/resolve/main/Real-ESRGAN-Anime-finetuning.pth",
252
  "4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
253
  "Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
254
  "AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
@@ -382,11 +359,9 @@ SUBTITLE_GUI = (
382
  " to perform different tasks in image generation."
383
  )
384
 
385
- msg_zero = "" if not IS_ZERO_GPU else "- The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, if you submit expensive tasks, the operation may be canceled upon reaching the maximum allowed time with 'GPU TASK ABORTED'."
386
-
387
  HELP_GUI = (
388
- f"""### Help:
389
- {msg_zero}
390
  - Distorted or strange images often result from high prompt weights, so it's best to use low weights and scales, and consider using Classic variants like 'Classic-original'.
391
  - For better results with Pony Diffusion, try using sampler DPM++ 1s or DPM2 with Compel or Classic prompt weights.
392
  """
@@ -510,7 +485,7 @@ EXAMPLES_GUI = [
510
  20,
511
  4.0,
512
  -1,
513
- ("loras/Coloring_book_-_LineArt.safetensors" if os.path.exists("loras/Coloring_book_-_LineArt.safetensors") else "None"),
514
  1.0,
515
  "DPM++ 2M SDE",
516
  1024,
@@ -605,7 +580,6 @@ EXAMPLES_GUI = [
605
  RESOURCES = (
606
  """### Resources
607
  - John6666's space has some great features you might find helpful [link](https://huggingface.co/spaces/John6666/DiffuseCraftMod).
608
- - Try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
609
- - `DiffuseCraft` in Colab:[link](https://github.com/R3gm/DiffuseCraft?tab=readme-ov-file#diffusecraft).
610
  """
611
- )
 
9
  IP_ADAPTERS_SDXL,
10
  )
11
 
 
 
12
  # - **Download Models**
13
  DOWNLOAD_MODEL = "https://huggingface.co/TechnoByte/MilkyWonderland/resolve/main/milkyWonderland_v40.safetensors"
14
 
 
23
  'Laxhar/noobai-XL-1.1',
24
  'Laxhar/noobai-XL-Vpred-1.0',
25
  'black-forest-labs/FLUX.1-dev',
 
26
  'John6666/blue-pencil-flux1-v021-fp8-flux',
27
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
28
  'John6666/xe-anime-flux-v04-fp8-flux',
29
  'John6666/lyh-anime-flux-v2a1-fp8-flux',
30
  'John6666/carnival-unchained-v10-fp8-flux',
31
+ 'John6666/iniverse-mix-xl-sfwnsfw-fluxdfp16nsfwv11-fp8-flux',
32
  'Freepik/flux.1-lite-8B-alpha',
33
  'shauray/FluxDev-HyperSD-merged',
34
  'mikeyandfriends/PixelWave_FLUX.1-dev_03',
 
37
  # 'ostris/OpenFLUX.1',
38
  'shuttleai/shuttle-3-diffusion',
39
  'Laxhar/noobai-XL-1.0',
40
+ 'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
41
  'Laxhar/noobai-XL-0.77',
42
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
43
  'Laxhar/noobai-XL-0.6',
44
  'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
45
  'John6666/noobai-cyberfix-v10-sdxl',
46
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
47
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v40-sdxl',
48
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-ntrmix35-sdxl',
49
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v777-sdxl',
50
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v777forlora-sdxl',
51
  'John6666/ntr-mix-illustrious-xl-noob-xl-xi-sdxl',
52
  'John6666/ntr-mix-illustrious-xl-noob-xl-xii-sdxl',
53
  'John6666/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl',
54
  'John6666/mistoon-anime-v10illustrious-sdxl',
55
+ 'John6666/hassaku-xl-illustrious-v10-sdxl',
56
+ 'John6666/hassaku-xl-illustrious-v10style-sdxl',
57
  'John6666/haruki-mix-illustrious-v10-sdxl',
58
  'John6666/noobreal-v10-sdxl',
59
  'John6666/complicated-noobai-merge-vprediction-sdxl',
 
64
  'Laxhar/noobai-XL-Vpred-0.65',
65
  'Laxhar/noobai-XL-Vpred-0.6',
66
  'John6666/cat-tower-noobai-xl-checkpoint-v14vpred-sdxl',
 
67
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
68
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
69
  'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
 
74
  'John6666/obsession-illustriousxl-v21-sdxl',
75
  'John6666/obsession-illustriousxl-v30-sdxl',
76
  'John6666/obsession-illustriousxl-v31-sdxl',
 
 
 
 
 
77
  'John6666/wai-nsfw-illustrious-v70-sdxl',
 
78
  'John6666/illustrious-pony-mix-v3-sdxl',
79
+ 'John6666/nova-anime-xl-illustriousv10-sdxl',
80
+ 'John6666/nova-orange-xl-v30-sdxl',
 
 
 
 
 
 
 
81
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
82
  'eienmojiki/Anything-XL',
83
  'eienmojiki/Starry-XL-v5.2',
 
84
  'John6666/meinaxl-v2-sdxl',
85
  'Eugeoter/artiwaifu-diffusion-2.0',
86
  'comin/IterComp',
 
 
87
  'John6666/epicrealism-xl-vxiabeast-sdxl',
88
+ 'John6666/epicrealism-xl-v10kiss2-sdxl',
89
+ 'John6666/epicrealism-xl-v8kiss-sdxl',
90
  'misri/zavychromaxl_v80',
91
  'SG161222/RealVisXL_V4.0',
92
  'SG161222/RealVisXL_V5.0',
 
102
  'John6666/ebara-mfcg-pony-mix-v12-sdxl',
103
  'John6666/t-ponynai3-v51-sdxl',
104
  'John6666/t-ponynai3-v65-sdxl',
 
105
  'John6666/prefect-pony-xl-v3-sdxl',
106
  'John6666/prefect-pony-xl-v4-sdxl',
 
107
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
108
  'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
109
  'John6666/wai-real-mix-v11-sdxl',
 
111
  'John6666/wai-c-v6-sdxl',
112
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
113
  'John6666/sifw-annihilation-xl-v2-sdxl',
 
114
  'John6666/photo-realistic-pony-v5-sdxl',
115
  'John6666/pony-realism-v21main-sdxl',
116
  'John6666/pony-realism-v22main-sdxl',
117
+ 'John6666/cyberrealistic-pony-v63-sdxl',
118
+ 'John6666/cyberrealistic-pony-v64-sdxl',
119
  'John6666/cyberrealistic-pony-v65-sdxl',
120
  'John6666/cyberrealistic-pony-v7-sdxl',
 
121
  'GraydientPlatformAPI/realcartoon-pony-diffusion',
122
  'John6666/nova-anime-xl-pony-v5-sdxl',
123
  'John6666/autismmix-sdxl-autismmix-pony-sdxl',
 
127
  'John6666/duchaiten-pony-real-v11fix-sdxl',
128
  'John6666/duchaiten-pony-real-v20-sdxl',
129
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
130
+ 'odyssey-labs/OdysseyXL-3.0',
131
+ 'odyssey-labs/OdysseyXL-4.0',
132
  'KBlueLeaf/Kohaku-XL-Zeta',
133
  'cagliostrolab/animagine-xl-3.1',
 
134
  'yodayo-ai/kivotos-xl-2.0',
135
  'yodayo-ai/holodayo-xl-2.1',
136
  'yodayo-ai/clandestine-xl-1.0',
 
 
 
137
  'digiplay/majicMIX_sombre_v2',
138
  'digiplay/majicMIX_realistic_v6',
139
  'digiplay/majicMIX_realistic_v7',
 
159
  'GraydientPlatformAPI/realcartoon3d-17',
160
  'GraydientPlatformAPI/realcartoon-pixar11',
161
  'GraydientPlatformAPI/realcartoon-real17',
162
+ 'nitrosocke/Ghibli-Diffusion',
163
  ]
164
 
 
165
  DIFFUSERS_FORMAT_LORAS = [
166
  "nerijs/animation2k-flux",
167
  "XLabs-AI/flux-RealismLora",
 
183
  DIRECTORY_EMBEDS = 'embedings'
184
  DIRECTORY_UPSCALERS = 'upscalers'
185
 
186
+ CACHE_HF = "/home/user/.cache/huggingface/hub/"
187
  STORAGE_ROOT = "/home/user/"
 
 
 
 
188
 
189
  TASK_STABLEPY = {
190
  'txt2img': 'txt2img',
 
226
  # "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
227
  # "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
228
  "4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
 
229
  "4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
230
  "Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
231
  "AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
 
359
  " to perform different tasks in image generation."
360
  )
361
 
 
 
362
  HELP_GUI = (
363
+ """### Help:
364
+ - The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, if you submit expensive tasks, the operation may be canceled upon reaching the maximum allowed time with 'GPU TASK ABORTED'.
365
  - Distorted or strange images often result from high prompt weights, so it's best to use low weights and scales, and consider using Classic variants like 'Classic-original'.
366
  - For better results with Pony Diffusion, try using sampler DPM++ 1s or DPM2 with Compel or Classic prompt weights.
367
  """
 
485
  20,
486
  4.0,
487
  -1,
488
+ "loras/Coloring_book_-_LineArt.safetensors",
489
  1.0,
490
  "DPM++ 2M SDE",
491
  1024,
 
580
  RESOURCES = (
581
  """### Resources
582
  - John6666's space has some great features you might find helpful [link](https://huggingface.co/spaces/John6666/DiffuseCraftMod).
583
+ - You can also try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
 
584
  """
585
+ )
image_processor.py CHANGED
@@ -92,8 +92,8 @@ def preprocessor_tab():
92
  pre_processor_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
93
  pre_low_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
94
  pre_high_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
95
- pre_value_threshold = gr.Slider(minimum=0., maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
96
- pre_distance_threshold = gr.Slider(minimum=0., maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
97
  pre_recolor_mode = gr.Dropdown(label="'RECOLOR' mode", choices=["luminance", "intensity"], value="luminance")
98
  pre_recolor_gamma_correction = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
99
  pre_blur_k_size = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'BLUR' sigma")
 
92
  pre_processor_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
93
  pre_low_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
94
  pre_high_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
95
+ pre_value_threshold = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
96
+ pre_distance_threshold = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
97
  pre_recolor_mode = gr.Dropdown(label="'RECOLOR' mode", choices=["luminance", "intensity"], value="luminance")
98
  pre_recolor_gamma_correction = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
99
  pre_blur_k_size = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'BLUR' sigma")
packages.txt CHANGED
@@ -1,3 +1,3 @@
1
  git-lfs
2
- aria2
3
  ffmpeg
 
1
  git-lfs
2
+ aria2 -y
3
  ffmpeg
pre-requirements.txt DELETED
@@ -1 +0,0 @@
1
- pip>=23.0.0
 
 
requirements.txt CHANGED
@@ -1,13 +1,5 @@
1
- stablepy==0.6.2
2
- torch==2.5.1
3
- diffusers
4
  gdown
5
  opencv-python
6
- unidecode
7
- pydantic==2.10.6
8
- huggingface_hub
9
- hf_transfer
10
- hf_xet
11
- spaces
12
- gradio==5.44.1
13
- matplotlib-inline
 
1
+ stablepy==0.6.0
2
+ torch==2.2.0
 
3
  gdown
4
  opencv-python
5
+ unidecode
 
 
 
 
 
 
 
utils.py CHANGED
@@ -1,714 +1,485 @@
1
- import os
2
- import re
3
- import gradio as gr
4
- from constants import (
5
- DIFFUSERS_FORMAT_LORAS,
6
- CIVITAI_API_KEY,
7
- HF_TOKEN,
8
- MODEL_TYPE_CLASS,
9
- DIRECTORY_LORAS,
10
- DIRECTORY_MODELS,
11
- DIFFUSECRAFT_CHECKPOINT_NAME,
12
- CACHE_HF_ROOT,
13
- CACHE_HF,
14
- STORAGE_ROOT,
15
- )
16
- from huggingface_hub import HfApi, get_hf_file_metadata, snapshot_download
17
- from diffusers import DiffusionPipeline
18
- from huggingface_hub import model_info as model_info_data
19
- from diffusers.pipelines.pipeline_loading_utils import variant_compatible_siblings
20
- from stablepy.diffusers_vanilla.utils import checkpoint_model_type
21
- from pathlib import PosixPath
22
- from unidecode import unidecode
23
- import urllib.parse
24
- import copy
25
- import requests
26
- from requests.adapters import HTTPAdapter
27
- from urllib3.util import Retry
28
- import shutil
29
- import subprocess
30
- import json
31
- import html as _html
32
-
33
- IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
34
- USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
35
- MODEL_ARCH = {
36
- 'stable-diffusion-xl-v1-base/lora': "Stable Diffusion XL (Illustrious, Pony, NoobAI)",
37
- 'stable-diffusion-v1/lora': "Stable Diffusion 1.5",
38
- 'flux-1-dev/lora': "Flux",
39
- }
40
-
41
-
42
- def read_safetensors_header_from_url(url: str):
43
- """Read safetensors header from a remote Hugging Face file."""
44
- meta = get_hf_file_metadata(url)
45
-
46
- # Step 1: first 8 bytes → header length
47
- resp = requests.get(meta.location, headers={"Range": "bytes=0-7"})
48
- resp.raise_for_status()
49
- header_len = int.from_bytes(resp.content, "little")
50
-
51
- # Step 2: fetch full header JSON
52
- end = 8 + header_len - 1
53
- resp = requests.get(meta.location, headers={"Range": f"bytes=8-{end}"})
54
- resp.raise_for_status()
55
- header_json = resp.content.decode("utf-8")
56
-
57
- return json.loads(header_json)
58
-
59
-
60
- def read_safetensors_header_from_file(path: str):
61
- """Read safetensors header from a local file."""
62
- with open(path, "rb") as f:
63
- # Step 1: first 8 bytes → header length
64
- header_len = int.from_bytes(f.read(8), "little")
65
-
66
- # Step 2: read header JSON
67
- header_json = f.read(header_len).decode("utf-8")
68
-
69
- return json.loads(header_json)
70
-
71
-
72
- class LoraHeaderInformation:
73
- """
74
- Encapsulates parsed info from a LoRA JSON header and provides
75
- a compact HTML summary via .to_html().
76
- """
77
-
78
- def __init__(self, json_data):
79
- self.original_json = copy.deepcopy(json_data or {})
80
-
81
- # Check if text encoder was trained
82
- # guard for json_data being a mapping
83
- try:
84
- self.text_encoder_trained = any("text_model" in ln for ln in json_data)
85
- except Exception:
86
- self.text_encoder_trained = False
87
-
88
- # Metadata (may be None)
89
- metadata = (json_data or {}).get("__metadata__", None)
90
- self.metadata = metadata
91
-
92
- # Default values
93
- self.architecture = "undefined"
94
- self.prediction_type = "undefined"
95
- self.base_model = "undefined"
96
- self.author = "undefined"
97
- self.title = "undefined"
98
- self.common_tags_list = []
99
-
100
- if metadata:
101
- self.architecture = MODEL_ARCH.get(
102
- metadata.get('modelspec.architecture', None),
103
- "undefined"
104
- )
105
-
106
- self.prediction_type = metadata.get('modelspec.prediction_type', "undefined")
107
- self.base_model = metadata.get('ss_sd_model_name', "undefined")
108
- self.author = metadata.get('modelspec.author', "undefined")
109
- self.title = metadata.get('modelspec.title', "undefined")
110
-
111
- base_model_hash = metadata.get('ss_new_sd_model_hash', None) # SHA256
112
- # AUTOV1 ss_sd_model_hash
113
- # https://civitai.com/api/v1/model-versions/by-hash/{base_model_hash} # Info
114
- if base_model_hash:
115
- self.base_model += f" hash={base_model_hash}"
116
-
117
- # Extract tags
118
- try:
119
- tags = metadata.get('ss_tag_frequency') if "ss_tag_frequency" in metadata else metadata.get('ss_datasets', "")
120
- tags = json.loads(tags) if tags else ""
121
-
122
- if isinstance(tags, list):
123
- tags = tags[0].get("tag_frequency", {})
124
-
125
- if tags:
126
- self.common_tags_list = list(tags[list(tags.keys())[0]].keys())
127
- except Exception:
128
- self.common_tags_list = []
129
-
130
- def to_dict(self):
131
- """Return a plain dict summary of parsed fields."""
132
- return {
133
- "architecture": self.architecture,
134
- "prediction_type": self.prediction_type,
135
- "base_model": self.base_model,
136
- "author": self.author,
137
- "title": self.title,
138
- "text_encoder_trained": bool(self.text_encoder_trained),
139
- "common_tags": self.common_tags_list,
140
- }
141
-
142
- def to_html(self, limit_tags=20):
143
- """
144
- Return a compact HTML snippet (string) showing the parsed info
145
- in a small font. Values are HTML-escaped.
146
- """
147
- # helper to escape
148
- esc = _html.escape
149
-
150
- rows = [
151
- ("Title", esc(str(self.title))),
152
- ("Author", esc(str(self.author))),
153
- ("Architecture", esc(str(self.architecture))),
154
- ("Base model", esc(str(self.base_model))),
155
- ("Prediction type", esc(str(self.prediction_type))),
156
- ("Text encoder trained", esc(str(self.text_encoder_trained))),
157
- ("Reference tags", esc(str(", ".join(self.common_tags_list[:limit_tags])))),
158
- ]
159
-
160
- # small, compact table with inline styling (small font)
161
- html_rows = "".join(
162
- f"<tr><th style='text-align:left;padding:2px 6px;white-space:nowrap'>{k}</th>"
163
- f"<td style='padding:2px 6px'>{v}</td></tr>"
164
- for k, v in rows
165
- )
166
-
167
- html_snippet = (
168
- "<div style='font-family:system-ui, -apple-system, \"Segoe UI\", Roboto, "
169
- "Helvetica, Arial, \"Noto Sans\", sans-serif; font-size:12px; line-height:1.2; "
170
- "'>"
171
- f"<table style='border-collapse:collapse; font-size:12px;'>"
172
- f"{html_rows}"
173
- "</table>"
174
- "</div>"
175
- )
176
-
177
- return html_snippet
178
-
179
-
180
- def request_json_data(url):
181
- model_version_id = url.split('/')[-1]
182
- if "?modelVersionId=" in model_version_id:
183
- match = re.search(r'modelVersionId=(\d+)', url)
184
- model_version_id = match.group(1)
185
-
186
- endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_version_id}"
187
-
188
- params = {}
189
- headers = {'User-Agent': USER_AGENT, 'content-type': 'application/json'}
190
- session = requests.Session()
191
- retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
192
- session.mount("https://", HTTPAdapter(max_retries=retries))
193
-
194
- try:
195
- result = session.get(endpoint_url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
196
- result.raise_for_status()
197
- json_data = result.json()
198
- return json_data if json_data else None
199
- except Exception as e:
200
- print(f"Error: {e}")
201
- return None
202
-
203
-
204
- class ModelInformation:
205
- def __init__(self, json_data):
206
- self.model_version_id = json_data.get("id", "")
207
- self.model_id = json_data.get("modelId", "")
208
- self.download_url = json_data.get("downloadUrl", "")
209
- self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
210
- self.filename_url = next(
211
- (v.get("name", "") for v in json_data.get("files", []) if str(self.model_version_id) in v.get("downloadUrl", "") and v.get("type", "Model") == "Model"), ""
212
- )
213
- self.filename_url = self.filename_url if self.filename_url else ""
214
- self.description = json_data.get("description", "")
215
- if self.description is None:
216
- self.description = ""
217
- self.model_name = json_data.get("model", {}).get("name", "")
218
- self.model_type = json_data.get("model", {}).get("type", "")
219
- self.nsfw = json_data.get("model", {}).get("nsfw", False)
220
- self.poi = json_data.get("model", {}).get("poi", False)
221
- self.images = [img.get("url", "") for img in json_data.get("images", [])]
222
- self.example_prompt = json_data.get("trainedWords", [""])[0] if json_data.get("trainedWords") else ""
223
- self.original_json = copy.deepcopy(json_data)
224
-
225
-
226
- def get_civit_params(url):
227
- try:
228
- json_data = request_json_data(url)
229
- mdc = ModelInformation(json_data)
230
- if mdc.download_url and mdc.filename_url:
231
- return mdc.download_url, mdc.filename_url, mdc.model_url
232
- else:
233
- ValueError("Invalid Civitai model URL")
234
- except Exception as e:
235
- print(f"Error retrieving Civitai metadata: {e} — fallback to direct download")
236
- return url, None, None
237
-
238
-
239
- def civ_redirect_down(url, dir_, civitai_api_key, romanize, alternative_name):
240
- filename_base = filename = None
241
-
242
- if alternative_name:
243
- output_path = os.path.join(dir_, alternative_name)
244
- if os.path.exists(output_path):
245
- return output_path, alternative_name
246
-
247
- # Follow the redirect to get the actual download URL
248
- curl_command = (
249
- f'curl -L -sI --connect-timeout 5 --max-time 5 '
250
- f'-H "Content-Type: application/json" '
251
- f'-H "Authorization: Bearer {civitai_api_key}" "{url}"'
252
- )
253
-
254
- headers = os.popen(curl_command).read()
255
-
256
- # Look for the redirected "Location" URL
257
- location_match = re.search(r'location: (.+)', headers, re.IGNORECASE)
258
-
259
- if location_match:
260
- redirect_url = location_match.group(1).strip()
261
-
262
- # Extract the filename from the redirect URL's "Content-Disposition"
263
- filename_match = re.search(r'filename%3D%22(.+?)%22', redirect_url)
264
- if filename_match:
265
- encoded_filename = filename_match.group(1)
266
- # Decode the URL-encoded filename
267
- decoded_filename = urllib.parse.unquote(encoded_filename)
268
-
269
- filename = unidecode(decoded_filename) if romanize else decoded_filename
270
- # print(f"Filename redirect: {filename}")
271
-
272
- filename_base = alternative_name if alternative_name else filename
273
- if not filename_base:
274
- return None, None
275
- elif os.path.exists(os.path.join(dir_, filename_base)):
276
- return os.path.join(dir_, filename_base), filename_base
277
-
278
- aria2_command = (
279
- f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
280
- f'-k 1M -s 16 -d "{dir_}" -o "{filename_base}" "{redirect_url}"'
281
- )
282
- r_code = os.system(aria2_command) # noqa
283
-
284
- # if r_code != 0:
285
- # raise RuntimeError(f"Failed to download file: {filename_base}. Error code: {r_code}")
286
-
287
- output_path = os.path.join(dir_, filename_base)
288
- if not os.path.exists(output_path):
289
- return None, filename_base
290
-
291
- return output_path, filename_base
292
-
293
-
294
- def civ_api_down(url, dir_, civitai_api_key, civ_filename):
295
- """
296
- This method is susceptible to being blocked because it generates a lot of temp redirect links with aria2c.
297
- If an API key limit is reached, generating a new API key and using it can fix the issue.
298
- """
299
- output_path = None
300
-
301
- url_dl = url + f"?token={civitai_api_key}"
302
- if not civ_filename:
303
- aria2_command = f'aria2c -c -x 1 -s 1 -d "{dir_}" "{url_dl}"'
304
- os.system(aria2_command)
305
- else:
306
- output_path = os.path.join(dir_, civ_filename)
307
- if not os.path.exists(output_path):
308
- aria2_command = (
309
- f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
310
- f'-k 1M -s 16 -d "{dir_}" -o "{civ_filename}" "{url_dl}"'
311
- )
312
- os.system(aria2_command)
313
-
314
- return output_path
315
-
316
-
317
- def drive_down(url, dir_):
318
- import gdown
319
-
320
- output_path = None
321
-
322
- drive_id, _ = gdown.parse_url.parse_url(url, warning=False)
323
- dir_files = os.listdir(dir_)
324
-
325
- for dfile in dir_files:
326
- if drive_id in dfile:
327
- output_path = os.path.join(dir_, dfile)
328
- break
329
-
330
- if not output_path:
331
- original_path = gdown.download(url, f"{dir_}/", fuzzy=True)
332
-
333
- dir_name, base_name = os.path.split(original_path)
334
- name, ext = base_name.rsplit(".", 1)
335
- new_name = f"{name}_{drive_id}.{ext}"
336
- output_path = os.path.join(dir_name, new_name)
337
-
338
- os.rename(original_path, output_path)
339
-
340
- return output_path
341
-
342
-
343
- def hf_down(url, dir_, hf_token, romanize):
344
- url = url.replace("?download=true", "")
345
- # url = urllib.parse.quote(url, safe=':/') # fix encoding
346
-
347
- filename = unidecode(url.split('/')[-1]) if romanize else url.split('/')[-1]
348
- output_path = os.path.join(dir_, filename)
349
-
350
- if os.path.exists(output_path):
351
- return output_path
352
-
353
- if "/blob/" in url:
354
- url = url.replace("/blob/", "/resolve/")
355
-
356
- if hf_token:
357
- user_header = f'"Authorization: Bearer {hf_token}"'
358
- os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {dir_} -o {filename}")
359
- else:
360
- os.system(f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {dir_} -o {filename}")
361
-
362
- return output_path
363
-
364
-
365
- def download_things(directory, url, hf_token="", civitai_api_key="", romanize=False):
366
- url = url.strip()
367
- downloaded_file_path = None
368
-
369
- if "drive.google.com" in url:
370
- downloaded_file_path = drive_down(url, directory)
371
- elif "huggingface.co" in url:
372
- downloaded_file_path = hf_down(url, directory, hf_token, romanize)
373
- elif "civitai.com" in url:
374
- if not civitai_api_key:
375
- msg = "You need an API key to download Civitai models."
376
- print(f"\033[91m{msg}\033[0m")
377
- gr.Warning(msg)
378
- return None
379
-
380
- url, civ_filename, civ_page = get_civit_params(url)
381
- if civ_page and not IS_ZERO_GPU:
382
- print(f"\033[92mCivitai model: {civ_filename} [page: {civ_page}]\033[0m")
383
-
384
- downloaded_file_path, civ_filename = civ_redirect_down(url, directory, civitai_api_key, romanize, civ_filename)
385
-
386
- if not downloaded_file_path:
387
- msg = (
388
- "Download failed.\n"
389
- "If this is due to an API limit, generating a new API key may resolve the issue.\n"
390
- "Attempting to download using the old method..."
391
- )
392
- print(msg)
393
- gr.Warning(msg)
394
- downloaded_file_path = civ_api_down(url, directory, civitai_api_key, civ_filename)
395
- else:
396
- os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
397
-
398
- return downloaded_file_path
399
-
400
-
401
- def get_model_list(directory_path):
402
- model_list = []
403
- valid_extensions = {'.ckpt', '.pt', '.pth', '.safetensors', '.bin'}
404
-
405
- for filename in os.listdir(directory_path):
406
- if os.path.splitext(filename)[1] in valid_extensions:
407
- # name_without_extension = os.path.splitext(filename)[0]
408
- file_path = os.path.join(directory_path, filename)
409
- # model_list.append((name_without_extension, file_path))
410
- model_list.append(file_path)
411
- print('\033[34mFILE: ' + file_path + '\033[0m')
412
- return model_list
413
-
414
-
415
- def extract_parameters(input_string):
416
- parameters = {}
417
- input_string = input_string.replace("\n", "")
418
-
419
- if "Negative prompt:" not in input_string:
420
- if "Steps:" in input_string:
421
- input_string = input_string.replace("Steps:", "Negative prompt: Steps:")
422
- else:
423
- msg = "Generation data is invalid."
424
- gr.Warning(msg)
425
- print(msg)
426
- parameters["prompt"] = input_string
427
- return parameters
428
-
429
- parm = input_string.split("Negative prompt:")
430
- parameters["prompt"] = parm[0].strip()
431
- if "Steps:" not in parm[1]:
432
- parameters["neg_prompt"] = parm[1].strip()
433
- return parameters
434
- parm = parm[1].split("Steps:")
435
- parameters["neg_prompt"] = parm[0].strip()
436
- input_string = "Steps:" + parm[1]
437
-
438
- # Extracting Steps
439
- steps_match = re.search(r'Steps: (\d+)', input_string)
440
- if steps_match:
441
- parameters['Steps'] = int(steps_match.group(1))
442
-
443
- # Extracting Size
444
- size_match = re.search(r'Size: (\d+x\d+)', input_string)
445
- if size_match:
446
- parameters['Size'] = size_match.group(1)
447
- width, height = map(int, parameters['Size'].split('x'))
448
- parameters['width'] = width
449
- parameters['height'] = height
450
-
451
- # Extracting other parameters
452
- other_parameters = re.findall(r'([^,:]+): (.*?)(?=, [^,:]+:|$)', input_string)
453
- for param in other_parameters:
454
- parameters[param[0].strip()] = param[1].strip('"')
455
-
456
- return parameters
457
-
458
-
459
- def get_my_lora(link_url, romanize):
460
- l_name = ""
461
- for url in [url.strip() for url in link_url.split(',')]:
462
- if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
463
- l_name = download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY, romanize)
464
- new_lora_model_list = get_model_list(DIRECTORY_LORAS)
465
- new_lora_model_list.insert(0, "None")
466
- new_lora_model_list = new_lora_model_list + DIFFUSERS_FORMAT_LORAS
467
- msg_lora = "Downloaded"
468
- if l_name:
469
- msg_lora += f": <b>{l_name}</b>"
470
- print(msg_lora)
471
-
472
- try:
473
- # Works with non-Civitai loras.
474
- json_data = read_safetensors_header_from_file(l_name)
475
- metadata_lora = LoraHeaderInformation(json_data)
476
- msg_lora += "<br>" + metadata_lora.to_html()
477
- except Exception:
478
- pass
479
-
480
- return gr.update(
481
- choices=new_lora_model_list
482
- ), gr.update(
483
- choices=new_lora_model_list
484
- ), gr.update(
485
- choices=new_lora_model_list
486
- ), gr.update(
487
- choices=new_lora_model_list
488
- ), gr.update(
489
- choices=new_lora_model_list
490
- ), gr.update(
491
- choices=new_lora_model_list
492
- ), gr.update(
493
- choices=new_lora_model_list
494
- ), gr.update(
495
- value=msg_lora
496
- )
497
-
498
-
499
- def info_html(json_data, title, subtitle):
500
- return f"""
501
- <div style='padding: 0; border-radius: 10px;'>
502
- <p style='margin: 0; font-weight: bold;'>{title}</p>
503
- <details>
504
- <summary>Details</summary>
505
- <p style='margin: 0; font-weight: bold;'>{subtitle}</p>
506
- </details>
507
- </div>
508
- """
509
-
510
-
511
- def get_model_type(repo_id: str):
512
- api = HfApi(token=os.environ.get("HF_TOKEN")) # if use private or gated model
513
- default = "SD 1.5"
514
- try:
515
- if os.path.exists(repo_id):
516
- tag, _, _, _ = checkpoint_model_type(repo_id)
517
- return DIFFUSECRAFT_CHECKPOINT_NAME[tag]
518
- else:
519
- model = api.model_info(repo_id=repo_id, timeout=5.0)
520
- tags = model.tags
521
- for tag in tags:
522
- if tag in MODEL_TYPE_CLASS.keys():
523
- return MODEL_TYPE_CLASS.get(tag, default)
524
-
525
- except Exception:
526
- return default
527
- return default
528
-
529
-
530
- def restart_space(repo_id: str, factory_reboot: bool):
531
- api = HfApi(token=os.environ.get("HF_TOKEN"))
532
- try:
533
- runtime = api.get_space_runtime(repo_id=repo_id)
534
- if runtime.stage == "RUNNING":
535
- api.restart_space(repo_id=repo_id, factory_reboot=factory_reboot)
536
- print(f"Restarting space: {repo_id}")
537
- else:
538
- print(f"Space {repo_id} is in stage: {runtime.stage}")
539
- except Exception as e:
540
- print(e)
541
-
542
-
543
- def extract_exif_data(image):
544
- if image is None:
545
- return ""
546
-
547
- try:
548
- metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
549
-
550
- for key in metadata_keys:
551
- if key in image.info:
552
- return image.info[key]
553
-
554
- return str(image.info)
555
-
556
- except Exception as e:
557
- return f"Error extracting metadata: {str(e)}"
558
-
559
-
560
- def create_mask_now(img, invert):
561
- import numpy as np
562
- import time
563
-
564
- time.sleep(0.5)
565
-
566
- transparent_image = img["layers"][0]
567
-
568
- # Extract the alpha channel
569
- alpha_channel = np.array(transparent_image)[:, :, 3]
570
-
571
- # Create a binary mask by thresholding the alpha channel
572
- binary_mask = alpha_channel > 1
573
-
574
- if invert:
575
- print("Invert")
576
- # Invert the binary mask so that the drawn shape is white and the rest is black
577
- binary_mask = np.invert(binary_mask)
578
-
579
- # Convert the binary mask to a 3-channel RGB mask
580
- rgb_mask = np.stack((binary_mask,) * 3, axis=-1)
581
-
582
- # Convert the mask to uint8
583
- rgb_mask = rgb_mask.astype(np.uint8) * 255
584
-
585
- return img["background"], rgb_mask
586
-
587
-
588
- def download_diffuser_repo(repo_name: str, model_type: str, revision: str = "main", token=True):
589
-
590
- variant = None
591
- if token is True and not os.environ.get("HF_TOKEN"):
592
- token = None
593
-
594
- if model_type == "SDXL":
595
- info = model_info_data(
596
- repo_name,
597
- token=token,
598
- revision=revision,
599
- timeout=5.0,
600
- )
601
-
602
- filenames = {sibling.rfilename for sibling in info.siblings}
603
- model_filenames, variant_filenames = variant_compatible_siblings(
604
- filenames, variant="fp16"
605
- )
606
-
607
- if len(variant_filenames):
608
- variant = "fp16"
609
-
610
- if model_type == "FLUX":
611
- cached_folder = snapshot_download(
612
- repo_id=repo_name,
613
- allow_patterns="transformer/*"
614
- )
615
- else:
616
- cached_folder = DiffusionPipeline.download(
617
- pretrained_model_name=repo_name,
618
- force_download=False,
619
- token=token,
620
- revision=revision,
621
- # mirror="https://hf-mirror.com",
622
- variant=variant,
623
- use_safetensors=True,
624
- trust_remote_code=False,
625
- timeout=5.0,
626
- )
627
-
628
- if isinstance(cached_folder, PosixPath):
629
- cached_folder = cached_folder.as_posix()
630
-
631
- # Task model
632
- # from huggingface_hub import hf_hub_download
633
- # hf_hub_download(
634
- # task_model,
635
- # filename="diffusion_pytorch_model.safetensors", # fix fp16 variant
636
- # )
637
-
638
- return cached_folder
639
-
640
-
641
- def get_folder_size_gb(folder_path):
642
- result = subprocess.run(["du", "-s", folder_path], capture_output=True, text=True)
643
-
644
- total_size_kb = int(result.stdout.split()[0])
645
- total_size_gb = total_size_kb / (1024 ** 2)
646
-
647
- return total_size_gb
648
-
649
-
650
- def get_used_storage_gb(path_storage=STORAGE_ROOT):
651
- try:
652
- used_gb = get_folder_size_gb(path_storage)
653
- print(f"Used Storage: {used_gb:.2f} GB")
654
- except Exception as e:
655
- used_gb = 999
656
- print(f"Error while retrieving the used storage: {e}.")
657
-
658
- return used_gb
659
-
660
-
661
- def delete_model(removal_candidate):
662
- print(f"Removing: {removal_candidate}")
663
-
664
- if os.path.exists(removal_candidate):
665
- os.remove(removal_candidate)
666
- else:
667
- diffusers_model = f"{CACHE_HF}{DIRECTORY_MODELS}--{removal_candidate.replace('/', '--')}"
668
- if os.path.isdir(diffusers_model):
669
- shutil.rmtree(diffusers_model)
670
-
671
-
672
- def clear_hf_cache():
673
- """
674
- Clears the entire Hugging Face cache at ~/.cache/huggingface.
675
- Hugging Face will re-download models as needed later.
676
- """
677
- try:
678
- if os.path.exists(CACHE_HF):
679
- shutil.rmtree(CACHE_HF, ignore_errors=True)
680
- print(f"Hugging Face cache cleared: {CACHE_HF}")
681
- else:
682
- print(f"No Hugging Face cache found at: {CACHE_HF}")
683
- except Exception as e:
684
- print(f"Error clearing Hugging Face cache: {e}")
685
-
686
-
687
- def progress_step_bar(step, total):
688
- # Calculate the percentage for the progress bar width
689
- percentage = min(100, ((step / total) * 100))
690
-
691
- return f"""
692
- <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
693
- <div style="width: {percentage}%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
694
- <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 13px;">
695
- {int(percentage)}%
696
- </div>
697
- </div>
698
- """
699
-
700
-
701
- def html_template_message(msg):
702
- return f"""
703
- <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
704
- <div style="width: 0%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
705
- <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 14px; font-weight: bold; text-shadow: 1px 1px 2px black;">
706
- {msg}
707
- </div>
708
- </div>
709
- """
710
-
711
-
712
- def escape_html(text):
713
- """Escapes HTML special characters in the input text."""
714
- return text.replace("<", "&lt;").replace(">", "&gt;").replace("\n", "<br>")
 
1
+ import os
2
+ import re
3
+ import gradio as gr
4
+ from constants import (
5
+ DIFFUSERS_FORMAT_LORAS,
6
+ CIVITAI_API_KEY,
7
+ HF_TOKEN,
8
+ MODEL_TYPE_CLASS,
9
+ DIRECTORY_LORAS,
10
+ DIRECTORY_MODELS,
11
+ DIFFUSECRAFT_CHECKPOINT_NAME,
12
+ CACHE_HF,
13
+ STORAGE_ROOT,
14
+ )
15
+ from huggingface_hub import HfApi
16
+ from huggingface_hub import snapshot_download
17
+ from diffusers import DiffusionPipeline
18
+ from huggingface_hub import model_info as model_info_data
19
+ from diffusers.pipelines.pipeline_loading_utils import variant_compatible_siblings
20
+ from stablepy.diffusers_vanilla.utils import checkpoint_model_type
21
+ from pathlib import PosixPath
22
+ from unidecode import unidecode
23
+ import urllib.parse
24
+ import copy
25
+ import requests
26
+ from requests.adapters import HTTPAdapter
27
+ from urllib3.util import Retry
28
+ import shutil
29
+ import subprocess
30
+
31
+ USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
32
+
33
+
34
+ def request_json_data(url):
35
+ model_version_id = url.split('/')[-1]
36
+ if "?modelVersionId=" in model_version_id:
37
+ match = re.search(r'modelVersionId=(\d+)', url)
38
+ model_version_id = match.group(1)
39
+
40
+ endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_version_id}"
41
+
42
+ params = {}
43
+ headers = {'User-Agent': USER_AGENT, 'content-type': 'application/json'}
44
+ session = requests.Session()
45
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
46
+ session.mount("https://", HTTPAdapter(max_retries=retries))
47
+
48
+ try:
49
+ result = session.get(endpoint_url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
50
+ result.raise_for_status()
51
+ json_data = result.json()
52
+ return json_data if json_data else None
53
+ except Exception as e:
54
+ print(f"Error: {e}")
55
+ return None
56
+
57
+
58
+ class ModelInformation:
59
+ def __init__(self, json_data):
60
+ self.model_version_id = json_data.get("id", "")
61
+ self.model_id = json_data.get("modelId", "")
62
+ self.download_url = json_data.get("downloadUrl", "")
63
+ self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
64
+ self.filename_url = next(
65
+ (v.get("name", "") for v in json_data.get("files", []) if str(self.model_version_id) in v.get("downloadUrl", "") and v.get("type", "Model") == "Model"), ""
66
+ )
67
+ self.filename_url = self.filename_url if self.filename_url else ""
68
+ self.description = json_data.get("description", "")
69
+ if self.description is None: self.description = ""
70
+ self.model_name = json_data.get("model", {}).get("name", "")
71
+ self.model_type = json_data.get("model", {}).get("type", "")
72
+ self.nsfw = json_data.get("model", {}).get("nsfw", False)
73
+ self.poi = json_data.get("model", {}).get("poi", False)
74
+ self.images = [img.get("url", "") for img in json_data.get("images", [])]
75
+ self.example_prompt = json_data.get("trainedWords", [""])[0] if json_data.get("trainedWords") else ""
76
+ self.original_json = copy.deepcopy(json_data)
77
+
78
+
79
+ def retrieve_model_info(url):
80
+ json_data = request_json_data(url)
81
+ if not json_data:
82
+ return None
83
+ model_descriptor = ModelInformation(json_data)
84
+ return model_descriptor
85
+
86
+
87
+ def download_things(directory, url, hf_token="", civitai_api_key="", romanize=False):
88
+ url = url.strip()
89
+ downloaded_file_path = None
90
+
91
+ if "drive.google.com" in url:
92
+ original_dir = os.getcwd()
93
+ os.chdir(directory)
94
+ os.system(f"gdown --fuzzy {url}")
95
+ os.chdir(original_dir)
96
+ elif "huggingface.co" in url:
97
+ url = url.replace("?download=true", "")
98
+ # url = urllib.parse.quote(url, safe=':/') # fix encoding
99
+ if "/blob/" in url:
100
+ url = url.replace("/blob/", "/resolve/")
101
+ user_header = f'"Authorization: Bearer {hf_token}"'
102
+
103
+ filename = unidecode(url.split('/')[-1]) if romanize else url.split('/')[-1]
104
+
105
+ if hf_token:
106
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {filename}")
107
+ else:
108
+ os.system(f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {filename}")
109
+
110
+ downloaded_file_path = os.path.join(directory, filename)
111
+
112
+ elif "civitai.com" in url:
113
+
114
+ if not civitai_api_key:
115
+ print("\033[91mYou need an API key to download Civitai models.\033[0m")
116
+
117
+ model_profile = retrieve_model_info(url)
118
+ if (
119
+ model_profile is not None
120
+ and model_profile.download_url
121
+ and model_profile.filename_url
122
+ ):
123
+ url = model_profile.download_url
124
+ filename = unidecode(model_profile.filename_url) if romanize else model_profile.filename_url
125
+ else:
126
+ if "?" in url:
127
+ url = url.split("?")[0]
128
+ filename = ""
129
+
130
+ url_dl = url + f"?token={civitai_api_key}"
131
+ print(f"Filename: {filename}")
132
+
133
+ param_filename = ""
134
+ if filename:
135
+ param_filename = f"-o '{filename}'"
136
+
137
+ aria2_command = (
138
+ f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
139
+ f'-k 1M -s 16 -d "{directory}" {param_filename} "{url_dl}"'
140
+ )
141
+ os.system(aria2_command)
142
+
143
+ if param_filename and os.path.exists(os.path.join(directory, filename)):
144
+ downloaded_file_path = os.path.join(directory, filename)
145
+
146
+ # # PLAN B
147
+ # # Follow the redirect to get the actual download URL
148
+ # curl_command = (
149
+ # f'curl -L -sI --connect-timeout 5 --max-time 5 '
150
+ # f'-H "Content-Type: application/json" '
151
+ # f'-H "Authorization: Bearer {civitai_api_key}" "{url}"'
152
+ # )
153
+
154
+ # headers = os.popen(curl_command).read()
155
+
156
+ # # Look for the redirected "Location" URL
157
+ # location_match = re.search(r'location: (.+)', headers, re.IGNORECASE)
158
+
159
+ # if location_match:
160
+ # redirect_url = location_match.group(1).strip()
161
+
162
+ # # Extract the filename from the redirect URL's "Content-Disposition"
163
+ # filename_match = re.search(r'filename%3D%22(.+?)%22', redirect_url)
164
+ # if filename_match:
165
+ # encoded_filename = filename_match.group(1)
166
+ # # Decode the URL-encoded filename
167
+ # decoded_filename = urllib.parse.unquote(encoded_filename)
168
+
169
+ # filename = unidecode(decoded_filename) if romanize else decoded_filename
170
+ # print(f"Filename: {filename}")
171
+
172
+ # aria2_command = (
173
+ # f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
174
+ # f'-k 1M -s 16 -d "{directory}" -o "{filename}" "{redirect_url}"'
175
+ # )
176
+ # return_code = os.system(aria2_command)
177
+
178
+ # # if return_code != 0:
179
+ # # raise RuntimeError(f"Failed to download file: {filename}. Error code: {return_code}")
180
+ # downloaded_file_path = os.path.join(directory, filename)
181
+ # if not os.path.exists(downloaded_file_path):
182
+ # downloaded_file_path = None
183
+
184
+ # if not downloaded_file_path:
185
+ # # Old method
186
+ # if "?" in url:
187
+ # url = url.split("?")[0]
188
+ # url = url + f"?token={civitai_api_key}"
189
+ # os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
190
+
191
+ else:
192
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
193
+
194
+ return downloaded_file_path
195
+
196
+
197
+ def get_model_list(directory_path):
198
+ model_list = []
199
+ valid_extensions = {'.ckpt', '.pt', '.pth', '.safetensors', '.bin'}
200
+
201
+ for filename in os.listdir(directory_path):
202
+ if os.path.splitext(filename)[1] in valid_extensions:
203
+ # name_without_extension = os.path.splitext(filename)[0]
204
+ file_path = os.path.join(directory_path, filename)
205
+ # model_list.append((name_without_extension, file_path))
206
+ model_list.append(file_path)
207
+ print('\033[34mFILE: ' + file_path + '\033[0m')
208
+ return model_list
209
+
210
+
211
+ def extract_parameters(input_string):
212
+ parameters = {}
213
+ input_string = input_string.replace("\n", "")
214
+
215
+ if "Negative prompt:" not in input_string:
216
+ if "Steps:" in input_string:
217
+ input_string = input_string.replace("Steps:", "Negative prompt: Steps:")
218
+ else:
219
+ print("Invalid metadata")
220
+ parameters["prompt"] = input_string
221
+ return parameters
222
+
223
+ parm = input_string.split("Negative prompt:")
224
+ parameters["prompt"] = parm[0].strip()
225
+ if "Steps:" not in parm[1]:
226
+ print("Steps not detected")
227
+ parameters["neg_prompt"] = parm[1].strip()
228
+ return parameters
229
+ parm = parm[1].split("Steps:")
230
+ parameters["neg_prompt"] = parm[0].strip()
231
+ input_string = "Steps:" + parm[1]
232
+
233
+ # Extracting Steps
234
+ steps_match = re.search(r'Steps: (\d+)', input_string)
235
+ if steps_match:
236
+ parameters['Steps'] = int(steps_match.group(1))
237
+
238
+ # Extracting Size
239
+ size_match = re.search(r'Size: (\d+x\d+)', input_string)
240
+ if size_match:
241
+ parameters['Size'] = size_match.group(1)
242
+ width, height = map(int, parameters['Size'].split('x'))
243
+ parameters['width'] = width
244
+ parameters['height'] = height
245
+
246
+ # Extracting other parameters
247
+ other_parameters = re.findall(r'([^,:]+): (.*?)(?=, [^,:]+:|$)', input_string)
248
+ for param in other_parameters:
249
+ parameters[param[0].strip()] = param[1].strip('"')
250
+
251
+ return parameters
252
+
253
+
254
+ def get_my_lora(link_url, romanize):
255
+ l_name = ""
256
+ for url in [url.strip() for url in link_url.split(',')]:
257
+ if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
258
+ l_name = download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY, romanize)
259
+ new_lora_model_list = get_model_list(DIRECTORY_LORAS)
260
+ new_lora_model_list.insert(0, "None")
261
+ new_lora_model_list = new_lora_model_list + DIFFUSERS_FORMAT_LORAS
262
+ msg_lora = "Downloaded"
263
+ if l_name:
264
+ msg_lora += f": <b>{l_name}</b>"
265
+ print(msg_lora)
266
+
267
+ return gr.update(
268
+ choices=new_lora_model_list
269
+ ), gr.update(
270
+ choices=new_lora_model_list
271
+ ), gr.update(
272
+ choices=new_lora_model_list
273
+ ), gr.update(
274
+ choices=new_lora_model_list
275
+ ), gr.update(
276
+ choices=new_lora_model_list
277
+ ), gr.update(
278
+ choices=new_lora_model_list
279
+ ), gr.update(
280
+ choices=new_lora_model_list
281
+ ), gr.update(
282
+ value=msg_lora
283
+ )
284
+
285
+
286
+ def info_html(json_data, title, subtitle):
287
+ return f"""
288
+ <div style='padding: 0; border-radius: 10px;'>
289
+ <p style='margin: 0; font-weight: bold;'>{title}</p>
290
+ <details>
291
+ <summary>Details</summary>
292
+ <p style='margin: 0; font-weight: bold;'>{subtitle}</p>
293
+ </details>
294
+ </div>
295
+ """
296
+
297
+
298
+ def get_model_type(repo_id: str):
299
+ api = HfApi(token=os.environ.get("HF_TOKEN")) # if use private or gated model
300
+ default = "SD 1.5"
301
+ try:
302
+ if os.path.exists(repo_id):
303
+ tag, _, _, _ = checkpoint_model_type(repo_id)
304
+ return DIFFUSECRAFT_CHECKPOINT_NAME[tag]
305
+ else:
306
+ model = api.model_info(repo_id=repo_id, timeout=5.0)
307
+ tags = model.tags
308
+ for tag in tags:
309
+ if tag in MODEL_TYPE_CLASS.keys(): return MODEL_TYPE_CLASS.get(tag, default)
310
+
311
+ except Exception:
312
+ return default
313
+ return default
314
+
315
+
316
+ def restart_space(repo_id: str, factory_reboot: bool):
317
+ api = HfApi(token=os.environ.get("HF_TOKEN"))
318
+ try:
319
+ runtime = api.get_space_runtime(repo_id=repo_id)
320
+ if runtime.stage == "RUNNING":
321
+ api.restart_space(repo_id=repo_id, factory_reboot=factory_reboot)
322
+ print(f"Restarting space: {repo_id}")
323
+ else:
324
+ print(f"Space {repo_id} is in stage: {runtime.stage}")
325
+ except Exception as e:
326
+ print(e)
327
+
328
+
329
+ def extract_exif_data(image):
330
+ if image is None:
331
+ return ""
332
+
333
+ try:
334
+ metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
335
+
336
+ for key in metadata_keys:
337
+ if key in image.info:
338
+ return image.info[key]
339
+
340
+ return str(image.info)
341
+
342
+ except Exception as e:
343
+ return f"Error extracting metadata: {str(e)}"
344
+
345
+
346
+ def create_mask_now(img, invert):
347
+ import numpy as np
348
+ import time
349
+
350
+ time.sleep(0.5)
351
+
352
+ transparent_image = img["layers"][0]
353
+
354
+ # Extract the alpha channel
355
+ alpha_channel = np.array(transparent_image)[:, :, 3]
356
+
357
+ # Create a binary mask by thresholding the alpha channel
358
+ binary_mask = alpha_channel > 1
359
+
360
+ if invert:
361
+ print("Invert")
362
+ # Invert the binary mask so that the drawn shape is white and the rest is black
363
+ binary_mask = np.invert(binary_mask)
364
+
365
+ # Convert the binary mask to a 3-channel RGB mask
366
+ rgb_mask = np.stack((binary_mask,) * 3, axis=-1)
367
+
368
+ # Convert the mask to uint8
369
+ rgb_mask = rgb_mask.astype(np.uint8) * 255
370
+
371
+ return img["background"], rgb_mask
372
+
373
+
374
+ def download_diffuser_repo(repo_name: str, model_type: str, revision: str = "main", token=True):
375
+
376
+ variant = None
377
+ if token is True and not os.environ.get("HF_TOKEN"):
378
+ token = None
379
+
380
+ if model_type == "SDXL":
381
+ info = model_info_data(
382
+ repo_name,
383
+ token=token,
384
+ revision=revision,
385
+ timeout=5.0,
386
+ )
387
+
388
+ filenames = {sibling.rfilename for sibling in info.siblings}
389
+ model_filenames, variant_filenames = variant_compatible_siblings(
390
+ filenames, variant="fp16"
391
+ )
392
+
393
+ if len(variant_filenames):
394
+ variant = "fp16"
395
+
396
+ if model_type == "FLUX":
397
+ cached_folder = snapshot_download(
398
+ repo_id=repo_name,
399
+ allow_patterns="transformer/*"
400
+ )
401
+ else:
402
+ cached_folder = DiffusionPipeline.download(
403
+ pretrained_model_name=repo_name,
404
+ force_download=False,
405
+ token=token,
406
+ revision=revision,
407
+ # mirror="https://hf-mirror.com",
408
+ variant=variant,
409
+ use_safetensors=True,
410
+ trust_remote_code=False,
411
+ timeout=5.0,
412
+ )
413
+
414
+ if isinstance(cached_folder, PosixPath):
415
+ cached_folder = cached_folder.as_posix()
416
+
417
+ # Task model
418
+ # from huggingface_hub import hf_hub_download
419
+ # hf_hub_download(
420
+ # task_model,
421
+ # filename="diffusion_pytorch_model.safetensors", # fix fp16 variant
422
+ # )
423
+
424
+ return cached_folder
425
+
426
+
427
+ def get_folder_size_gb(folder_path):
428
+ result = subprocess.run(["du", "-s", folder_path], capture_output=True, text=True)
429
+
430
+ total_size_kb = int(result.stdout.split()[0])
431
+ total_size_gb = total_size_kb / (1024 ** 2)
432
+
433
+ return total_size_gb
434
+
435
+
436
+ def get_used_storage_gb():
437
+ try:
438
+ used_gb = get_folder_size_gb(STORAGE_ROOT)
439
+ print(f"Used Storage: {used_gb:.2f} GB")
440
+ except Exception as e:
441
+ used_gb = 999
442
+ print(f"Error while retrieving the used storage: {e}.")
443
+
444
+ return used_gb
445
+
446
+
447
+ def delete_model(removal_candidate):
448
+ print(f"Removing: {removal_candidate}")
449
+
450
+ if os.path.exists(removal_candidate):
451
+ os.remove(removal_candidate)
452
+ else:
453
+ diffusers_model = f"{CACHE_HF}{DIRECTORY_MODELS}--{removal_candidate.replace('/', '--')}"
454
+ if os.path.isdir(diffusers_model):
455
+ shutil.rmtree(diffusers_model)
456
+
457
+
458
+ def progress_step_bar(step, total):
459
+ # Calculate the percentage for the progress bar width
460
+ percentage = min(100, ((step / total) * 100))
461
+
462
+ return f"""
463
+ <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
464
+ <div style="width: {percentage}%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
465
+ <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 13px;">
466
+ {int(percentage)}%
467
+ </div>
468
+ </div>
469
+ """
470
+
471
+
472
+ def html_template_message(msg):
473
+ return f"""
474
+ <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
475
+ <div style="width: 0%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
476
+ <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 14px; font-weight: bold; text-shadow: 1px 1px 2px black;">
477
+ {msg}
478
+ </div>
479
+ </div>
480
+ """
481
+
482
+
483
+ def escape_html(text):
484
+ """Escapes HTML special characters in the input text."""
485
+ return text.replace("<", "&lt;").replace(">", "&gt;").replace("\n", "<br>")