sayakpaul HF Staff commited on
Commit
ec3f4e3
·
verified ·
1 Parent(s): 9304828

Sync from GitHub

Browse files
.github/workflows/publish-to-hf-spaces.yml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Publish to HF Spaces
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ workflow_dispatch:
8
+
9
+ jobs:
10
+ publish:
11
+ runs-on: ubuntu-latest
12
+ env:
13
+ HF_REPO: diffusers/optimized-diffusers-code
14
+ steps:
15
+ - name: Checkout repository
16
+ uses: actions/checkout@v3
17
+
18
+ - name: Install HF CLI
19
+ run: |
20
+ python -m pip install --upgrade pip
21
+ pip install huggingface_hub
22
+
23
+ - name: Log in to Hugging Face
24
+ run: |
25
+ huggingface-cli login --token "${{ secrets.HF_TOKEN }}"
26
+
27
+ - name: Create (or reuse) the Spaces repo
28
+ run: |
29
+ huggingface-cli repo create ${{env.HF_REPO}} \
30
+ --type space --space_sdk gradio \
31
+ --exist-ok
32
+
33
+ - name: Push current repo to HF Spaces
34
+ run: |
35
+ huggingface-cli upload ${{env.HF_REPO}} \
36
+ . . \
37
+ --repo-type space \
38
+ --commit-message "Sync from GitHub" \
39
+ --token "${{ secrets.HF_TOKEN }}"
.gitignore ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ __pycache__/
10
+ # Distribution / packaging
11
+ .Python
12
+ build/
13
+ develop-eggs/
14
+ dist/
15
+ downloads/
16
+ eggs/
17
+ .eggs/
18
+ lib/
19
+ lib64/
20
+ parts/
21
+ sdist/
22
+ var/
23
+ wheels/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+ cover/
54
+
55
+ # Translations
56
+ *.mo
57
+ *.pot
58
+
59
+ # Django stuff:
60
+ *.log
61
+ local_settings.py
62
+ db.sqlite3
63
+ db.sqlite3-journal
64
+
65
+ # Flask stuff:
66
+ instance/
67
+ .webassets-cache
68
+
69
+ # Scrapy stuff:
70
+ .scrapy
71
+
72
+ # Sphinx documentation
73
+ docs/_build/
74
+
75
+ # PyBuilder
76
+ .pybuilder/
77
+ target/
78
+
79
+ # Jupyter Notebook
80
+ .ipynb_checkpoints
81
+
82
+ # IPython
83
+ profile_default/
84
+ ipython_config.py
85
+
86
+ # pyenv
87
+ # For a library or package, you might want to ignore these files since the code is
88
+ # intended to run in multiple environments; otherwise, check them in:
89
+ # .python-version
90
+
91
+ # pipenv
92
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
93
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
94
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
95
+ # install all needed dependencies.
96
+ #Pipfile.lock
97
+
98
+ # UV
99
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
100
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
101
+ # commonly ignored for libraries.
102
+ #uv.lock
103
+
104
+ # poetry
105
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
106
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
107
+ # commonly ignored for libraries.
108
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
109
+ #poetry.lock
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ #pdm.lock
114
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
115
+ # in version control.
116
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
117
+ .pdm.toml
118
+ .pdm-python
119
+ .pdm-build/
120
+
121
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
122
+ __pypackages__/
123
+
124
+ # Celery stuff
125
+ celerybeat-schedule
126
+ celerybeat.pid
127
+
128
+ # SageMath parsed files
129
+ *.sage.py
130
+
131
+ # Environments
132
+ .env
133
+ .venv
134
+ env/
135
+ venv/
136
+ ENV/
137
+ env.bak/
138
+ venv.bak/
139
+
140
+ # Spyder project settings
141
+ .spyderproject
142
+ .spyproject
143
+
144
+ # Rope project settings
145
+ .ropeproject
146
+
147
+ # mkdocs documentation
148
+ /site
149
+
150
+ # mypy
151
+ .mypy_cache/
152
+ .dmypy.json
153
+ dmypy.json
154
+
155
+ # Pyre type checker
156
+ .pyre/
157
+
158
+ # pytype static type analyzer
159
+ .pytype/
160
+
161
+ # Cython debug symbols
162
+ cython_debug/
163
+
164
+ # PyCharm
165
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
166
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
167
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
168
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
169
+ #.idea/
170
+
171
+ # Abstra
172
+ # Abstra is an AI-powered process automation framework.
173
+ # Ignore directories containing user credentials, local state, and settings.
174
+ # Learn more at https://abstra.io/docs
175
+ .abstra/
176
+
177
+ # Visual Studio Code
178
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
179
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
180
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
181
+ # you could uncomment the following to ignore the enitre vscode folder
182
+ # .vscode/
183
+
184
+ # Ruff stuff:
185
+ .ruff_cache/
186
+
187
+ # PyPI configuration file
188
+ .pypirc
189
+
190
+ # Cursor
191
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
192
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
193
+ # refer to https://docs.cursor.com/context/ignore-files
194
+ .cursorignore
195
+ .cursorindexingignore
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md CHANGED
@@ -1,12 +1,353 @@
1
- ---
2
- title: Optimized Diffusers Code
3
- emoji: 📈
4
- colorFrom: purple
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 5.34.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # auto-diffusers-docs
2
+
3
+ Still a WIP. Use an LLM to generate reasonable code snippets in a hardware-aware manner for Diffusers.
4
+
5
+ ### Motivation
6
+
7
+ Within the Diffusers, we support a bunch of optimization techniques (refer [here](https://huggingface.co/docs/diffusers/main/en/optimization/memory), [here](https://huggingface.co/docs/diffusers/main/en/optimization/cache), and [here](https://huggingface.co/docs/diffusers/main/en/optimization/fp16)). However, it can be
8
+ daunting for our users to determine when to use what. Hence, this repository tries to take a stab
9
+ at using an LLM to generate reasonable code snippets for a given pipeline checkpoint that respects
10
+ user hardware configuration.
11
+
12
+ ## Getting started
13
+
14
+ Install the requirements from `requirements.txt`.
15
+
16
+ Configure `GOOGLE_API_KEY` in the environment: `export GOOGLE_API_KEY=...`.
17
+
18
+ Then run:
19
+
20
+ ```bash
21
+ python e2e_example.py
22
+ ```
23
+
24
+ By default, the `e2e_example.py` script uses Flux.1-Dev, but this can be configured through the `--ckpt_id` argument.
25
+
26
+ Full usage:
27
+
28
+ ```sh
29
+ usage: e2e_example.py [-h] [--ckpt_id CKPT_ID] [--gemini_model GEMINI_MODEL] [--variant VARIANT] [--enable_lossy]
30
+
31
+ options:
32
+ -h, --help show this help message and exit
33
+ --ckpt_id CKPT_ID Can be a repo id from the Hub or a local path where the checkpoint is stored.
34
+ --gemini_model GEMINI_MODEL
35
+ Gemini model to use. Choose from https://ai.google.dev/gemini-api/docs/models.
36
+ --variant VARIANT If the `ckpt_id` has variants, supply this flag to estimate compute. Example: 'fp16'.
37
+ --enable_lossy When enabled, the code will include snippets for enabling quantization.
38
+ ```
39
+
40
+ ## Example outputs
41
+
42
+ <details>
43
+ <summary>python e2e_example.py (ran on an H100)</summary>
44
+
45
+ ````sh
46
+ System RAM: 1999.99 GB
47
+ RAM Category: large
48
+
49
+ GPU VRAM: 79.65 GB
50
+ VRAM Category: large
51
+ current_generate_prompt='\npipeline_loading_memory_GB: 31.424\navailable_system_ram_GB: 1999.9855346679688\navailable_gpu_vram_GB: 79.6474609375\nenable_lossy_outputs: False\nenable_torch_compile: True\n'
52
+ Sending request to Gemini...
53
+ ```python
54
+ from diffusers import DiffusionPipeline
55
+ import torch
56
+
57
+ # User-provided information:
58
+ # pipeline_loading_memory_GB: 31.424
59
+ # available_system_ram_GB: 1999.9855346679688 (Large RAM)
60
+ # available_gpu_vram_GB: 79.6474609375 (Large VRAM)
61
+ # enable_lossy_outputs: False
62
+ # enable_torch_compile: True
63
+
64
+ # --- Configuration based on user needs and system capabilities ---
65
+
66
+ # Placeholder for the actual checkpoint ID
67
+ # Please replace this with your desired model checkpoint ID.
68
+ CKPT_ID = "black-forest-labs/FLUX.1-dev"
69
+
70
+ # Determine dtype. bfloat16 is generally recommended for performance on compatible GPUs.
71
+ # Ensure your GPU supports bfloat16 for optimal performance.
72
+ dtype = torch.bfloat16
73
+
74
+ # 1. Pipeline Loading and Device Placement:
75
+ # Available VRAM (79.64 GB) is significantly greater than the pipeline's loading memory (31.42 GB).
76
+ # Therefore, the entire pipeline can comfortably fit and run on the GPU.
77
+ print(f"Loading pipeline '{CKPT_ID}' with {dtype} precision...")
78
+ pipe = DiffusionPipeline.from_pretrained(CKPT_ID, torch_dtype=dtype)
79
+
80
+ print("Moving pipeline to CUDA (GPU) as VRAM is sufficient...")
81
+ pipe = pipe.to("cuda")
82
+
83
+ # 2. Quantization:
84
+ # User specified `enable_lossy_outputs: False`, so no quantization is applied.
85
+ print("Quantization is NOT applied as per user's preference for lossless outputs.")
86
+
87
+ # 3. Torch Compile:
88
+ # User specified `enable_torch_compile: True`.
89
+ # Since no offloading was applied (the entire model is on GPU), we can use `fullgraph=True`
90
+ # for potentially greater performance benefits.
91
+ print("Applying torch.compile() to the transformer for accelerated inference...")
92
+ # The transformer is typically the most compute-intensive part of the diffusion pipeline.
93
+ # Compiling it can lead to significant speedups.
94
+ pipe.transformer.compile(fullgraph=True)
95
+
96
+ # --- Inference ---
97
+ print("Starting inference...")
98
+ prompt = "photo of a dog sitting beside a river, high quality, 4k"
99
+ image = pipe(prompt).images[0]
100
+
101
+ print("Inference completed. Displaying image.")
102
+ # Save or display the image
103
+ image.save("generated_image.png")
104
+ print("Image saved as generated_image.png")
105
+
106
+ # You can also display the image directly if running in an environment that supports it
107
+ # image.show()
108
+ ```
109
+ ````
110
+ <br>
111
+ </details>
112
+ <br>
113
+ <details>
114
+ <summary>python e2e_example.py --enable_lossy</summary>
115
+
116
+ ````sh
117
+ System RAM: 1999.99 GB
118
+ RAM Category: large
119
+
120
+ GPU VRAM: 79.65 GB
121
+ VRAM Category: large
122
+ current_generate_prompt='\npipeline_loading_memory_GB: 31.424\navailable_system_ram_GB: 1999.9855346679688\navailable_gpu_vram_GB: 79.6474609375\nenable_lossy_outputs: True\nenable_torch_compile: True\n'
123
+ Sending request to Gemini...
124
+ ```python
125
+ import torch
126
+ from diffusers import DiffusionPipeline
127
+ from diffusers.quantizers import PipelineQuantizationConfig
128
+ import os
129
+
130
+ # --- User-provided information and derived constants ---
131
+ # Checkpoint ID (assuming a placeholder since it was not provided in the user input)
132
+ # Using the example CKPT_ID from the problem description
133
+ CKPT_ID = "black-forest-labs/FLUX.1-dev"
134
+
135
+ # Derived from available_gpu_vram_GB (79.64 GB) and pipeline_loading_memory_GB (31.424 GB)
136
+ # VRAM is ample to load the entire pipeline
137
+ use_cuda_direct_load = True
138
+
139
+ # Derived from enable_lossy_outputs (True)
140
+ enable_quantization = True
141
+
142
+ # Derived from enable_torch_compile (True)
143
+ enable_torch_compile = True
144
+
145
+ # --- Inference Code ---
146
+
147
+ print(f"Loading pipeline: {CKPT_ID}")
148
+
149
+ # 1. Quantization Configuration (since enable_lossy_outputs is True)
150
+ quant_config = None
151
+ if enable_quantization:
152
+ # Default to bitsandbytes 4-bit as per guidance
153
+ print("Enabling bitsandbytes 4-bit quantization for 'transformer' component.")
154
+ quant_config = PipelineQuantizationConfig(
155
+ quant_backend="bitsandbytes_4bit",
156
+ quant_kwargs={"load_in_4bit": True, "bnb_4bit_compute_dtype": torch.bfloat16, "bnb_4bit_quant_type": "nf4"},
157
+ # For FLUX.1-dev, the main generative component is typically 'transformer'.
158
+ # For other pipelines, you might include 'unet', 'text_encoder', 'text_encoder_2', etc.
159
+ components_to_quantize=["transformer"]
160
+ )
161
+
162
+ # 2. Load the Diffusion Pipeline
163
+ # Use bfloat16 for better performance and modern GPU compatibility
164
+ pipe = DiffusionPipeline.from_pretrained(
165
+ CKPT_ID,
166
+ torch_dtype=torch.bfloat16,
167
+ quantization_config=quant_config if enable_quantization else None
168
+ )
169
+
170
+ # 3. Move Pipeline to GPU (since VRAM is ample)
171
+ if use_cuda_direct_load:
172
+ print("Moving the entire pipeline to CUDA (GPU).")
173
+ pipe = pipe.to("cuda")
174
+
175
+ # 4. Apply torch.compile() (since enable_torch_compile is True)
176
+ if enable_torch_compile:
177
+ print("Applying torch.compile() for speedup.")
178
+ # This setting is beneficial when bitsandbytes is used
179
+ torch._dynamo.config.capture_dynamic_output_shape_ops = True
180
+
181
+ # Since no offloading is applied (model fits fully in VRAM), use fullgraph=True
182
+ # The primary component for compilation in FLUX.1-dev is 'transformer'
183
+ print("Compiling pipe.transformer with fullgraph=True.")
184
+ pipe.transformer = torch.compile(pipe.transformer, fullgraph=True)
185
+
186
+ # 5. Perform Inference
187
+ print("Starting image generation...")
188
+ prompt = "photo of a dog sitting beside a river"
189
+ num_inference_steps = 28 # A reasonable number of steps for good quality
190
+
191
+ # Ensure all inputs are on the correct device for inference after compilation
192
+ with torch.no_grad():
193
+ image = pipe(prompt, num_inference_steps=num_inference_steps).images[0]
194
+
195
+ print("Image generation complete.")
196
+ # Save or display the image
197
+ output_path = "generated_image.png"
198
+ image.save(output_path)
199
+ print(f"Image saved to {output_path}")
200
+
201
+ ```
202
+ ````
203
+
204
+ </details>
205
+ <br>
206
+ When invoked from an RTX 4090, it outputs:
207
+
208
+ <details>
209
+ <summary>Expand</summary>
210
+
211
+ ````sh
212
+ System RAM: 125.54 GB
213
+ RAM Category: large
214
+
215
+ GPU VRAM: 23.99 GB
216
+ VRAM Category: medium
217
+ current_generate_prompt='\npipeline_loading_memory_GB: 31.424\navailable_system_ram_GB: 125.54026794433594\navailable_gpu_vram_GB: 23.98828125\nenable_lossy_outputs: False\nenable_torch_compile: True\n'
218
+ Sending request to Gemini...
219
+ ```python
220
+ import torch
221
+ from diffusers import DiffusionPipeline
222
+ import os # For creating offload directories if needed, though not directly used in this solution
223
+
224
+ # --- User-provided information (interpreted) ---
225
+ # Checkpoint ID will be a placeholder as it's not provided by the user directly in the input.
226
+ # pipeline_loading_memory_GB: 31.424 GB
227
+ # available_system_ram_GB: 125.54 GB (Categorized as "large": > 40GB)
228
+ # available_gpu_vram_GB: 23.98 GB (Categorized as "medium": > 8GB <= 24GB)
229
+ # enable_lossy_outputs: False (User prefers no quantization)
230
+ # enable_torch_compile: True (User wants to enable torch.compile)
231
+
232
+ # --- Configuration ---
233
+ # Placeholder for the actual checkpoint ID. Replace with the desired model ID.
234
+ CKPT_ID = "black-forest-labs/FLUX.1-dev" # Example from Diffusers library.
235
+ PROMPT = "photo of a dog sitting beside a river"
236
+
237
+ print(f"--- Optimizing inference for CKPT_ID: {CKPT_ID} ---")
238
+ print(f"Pipeline loading memory: {31.424} GB")
239
+ print(f"Available System RAM: {125.54} GB (Large)")
240
+ print(f"Available GPU VRAM: {23.98} GB (Medium)")
241
+ print(f"Lossy outputs (quantization): {'Disabled' if not False else 'Enabled'}")
242
+ print(f"Torch.compile: {'Enabled' if True else 'Disabled'}")
243
+ print("-" * 50)
244
+
245
+ # --- 1. Load the Diffusion Pipeline ---
246
+ # Use bfloat16 for a good balance of memory and performance.
247
+ print(f"Loading pipeline '{CKPT_ID}' with torch_dtype=torch.bfloat16...")
248
+ pipe = DiffusionPipeline.from_pretrained(CKPT_ID, torch_dtype=torch.bfloat16)
249
+ print("Pipeline loaded.")
250
+
251
+ # --- 2. Apply Memory Optimizations ---
252
+ # Analysis:
253
+ # - Pipeline memory (31.424 GB) exceeds available GPU VRAM (23.98 GB).
254
+ # - System RAM (125.54 GB) is large.
255
+ # Strategy: Use `enable_model_cpu_offload()`. This moves model components to CPU when not
256
+ # in use, swapping them to GPU on demand. This is ideal when VRAM is insufficient but system
257
+ # RAM is abundant.
258
+
259
+ print("Applying memory optimization: `pipe.enable_model_cpu_offload()`...")
260
+ pipe.enable_model_cpu_offload()
261
+ print("Model CPU offloading enabled. Components will dynamically move between CPU and GPU.")
262
+
263
+ # --- 3. Apply Speed Optimizations (torch.compile) ---
264
+ # Analysis:
265
+ # - `enable_torch_compile` is True.
266
+ # - Model offloading (`enable_model_cpu_offload`) is applied.
267
+ # Strategy: Enable torch.compile with `recompile_limit` as offloading is used.
268
+ # Do not use `fullgraph=True` when offloading is active.
269
+
270
+ print("Applying speed optimization: `torch.compile()`...")
271
+ torch._dynamo.config.recompile_limit = 1000 # Recommended when offloading is applied.
272
+ # torch._dynamo.config.capture_dynamic_output_shape_ops = True # Only for bitsandbytes, not applicable here.
273
+
274
+ # Compile the main computational component (e.g., transformer or unet).
275
+ # FLUX models primarily use a transformer. For other models, it might be `pipe.unet`.
276
+ if hasattr(pipe, "transformer"):
277
+ print("Compiling `pipe.transformer`...")
278
+ pipe.transformer.compile()
279
+ elif hasattr(pipe, "unet"):
280
+ print("Compiling `pipe.unet`...")
281
+ pipe.unet.compile()
282
+ else:
283
+ print("Warning: Neither `pipe.transformer` nor `pipe.unet` found for compilation. Skipping `torch.compile` for core component.")
284
+
285
+ print("Speed optimizations applied.")
286
+
287
+ # --- 4. Perform Inference ---
288
+ print(f"Starting image generation for prompt: '{PROMPT}'")
289
+ # The pipeline handles component swapping automatically due to `enable_model_cpu_offload()`.
290
+ image = pipe(PROMPT).images[0]
291
+ print("Image generation complete.")
292
+
293
+ # --- 5. Save or Display the Result ---
294
+ output_path = "generated_image.png"
295
+ image.save(output_path)
296
+ print(f"Generated image saved to '{output_path}'")
297
+
298
+ print("\n--- Inference process finished successfully ---")
299
+ ```
300
+ ````
301
+ </details>
302
+
303
+ ### More outputs
304
+
305
+ <details>
306
+ <summary>"Wan-AI/Wan2.1-T2V-14B-Diffusers" with lossy outputs enabled</summary>
307
+
308
+ ````sh
309
+ System RAM: 125.54 GB
310
+ RAM Category: large
311
+
312
+ GPU VRAM: 23.99 GB
313
+ VRAM Category: medium
314
+ ("current_generate_prompt='\\nckpt_id: "
315
+ 'Wan-AI/Wan2.1-T2V-14B-Diffusers\\npipeline_loading_memory_GB: '
316
+ '37.432\\navailable_system_ram_GB: '
317
+ '125.54026794433594\\navailable_gpu_vram_GB: '
318
+ '23.98828125\\nenable_lossy_outputs: True\\nis_fp8_supported: '
319
+ "True\\nenable_torch_compile: True\\n'")
320
+ Sending request to Gemini...
321
+ ```python
322
+ from diffusers import DiffusionPipeline
323
+ from diffusers.quantizers import PipelineQuantizationConfig
324
+ import torch
325
+
326
+ ckpt_id = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
327
+
328
+ quant_config = PipelineQuantizationConfig(
329
+ quant_backend="torchao",
330
+ quant_kwargs={"quant_type": "float8dq_e4m3_row"},
331
+ components_to_quantize=["transformer"]
332
+ )
333
+ pipe = DiffusionPipeline.from_pretrained(ckpt_id, quantization_config=quant_config, torch_dtype=torch.bfloat16)
334
+
335
+ # Apply model CPU offload due to VRAM constraints
336
+ pipe.enable_model_cpu_offload()
337
+
338
+ # torch.compile() configuration
339
+ torch._dynamo.config.recompile_limit = 1000
340
+ pipe.transformer.compile()
341
+ # pipe.vae.decode = torch.compile(pipe.vae.decode) # Uncomment if you want to compile VAE decode as well
342
+
343
+ prompt = "photo of a dog sitting beside a river"
344
+
345
+ # Modify the pipe call arguments as needed.
346
+ image = pipe(prompt).images[0]
347
+
348
+ # You can save the image or perform further operations here
349
+ # image.save("generated_image.png")
350
+ ```
351
+ ````
352
+ </details>
353
+ <small>Ran on an RTX 4090</small>
app.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from utils.pipeline_utils import determine_pipe_loading_memory
3
+ from utils.llm_utils import LLMCodeOptimizer
4
+ from prompts import system_prompt, generate_prompt
5
+ from utils.hardware_utils import categorize_ram, categorize_vram
6
+
7
+ LLM_CACHE = {}
8
+
9
+
10
+ def get_output_code(
11
+ repo_id,
12
+ gemini_model_to_use,
13
+ disable_bf16,
14
+ enable_lossy,
15
+ system_ram,
16
+ gpu_vram,
17
+ torch_compile_friendly,
18
+ fp8_friendly,
19
+ ):
20
+ loading_mem_out = determine_pipe_loading_memory(repo_id, None, disable_bf16)
21
+ load_memory = loading_mem_out["total_loading_memory_gb"]
22
+ ram_category = categorize_ram(system_ram)
23
+ vram_category = categorize_vram(gpu_vram)
24
+
25
+ print(f"RAM Category: {ram_category}")
26
+ print(f"VRAM Category: {vram_category}")
27
+
28
+ if gemini_model_to_use not in LLM_CACHE:
29
+ print(f"Initializing new LLM instance for: {gemini_model_to_use}")
30
+ # If not, create it and add it to the cache
31
+ LLM_CACHE[gemini_model_to_use] = LLMCodeOptimizer(model_name=gemini_model_to_use, system_prompt=system_prompt)
32
+
33
+ llm = LLM_CACHE[gemini_model_to_use]
34
+ current_generate_prompt = generate_prompt.format(
35
+ ckpt_id=repo_id,
36
+ pipeline_loading_memory=load_memory,
37
+ available_system_ram=system_ram,
38
+ available_gpu_vram=gpu_vram,
39
+ enable_lossy_outputs=enable_lossy,
40
+ is_fp8_supported=fp8_friendly,
41
+ enable_torch_compile=torch_compile_friendly,
42
+ )
43
+ generated_prompt = current_generate_prompt
44
+ llm_output = llm(current_generate_prompt)
45
+ return llm_output, generated_prompt
46
+
47
+
48
+ # --- Gradio UI Definition ---
49
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
50
+ gr.Markdown(
51
+ """
52
+ # 🧨 Generate Diffusers Inference code snippet tailored to your machine
53
+ Enter a Hugging Face Hub `repo_id` and your system specs to get started for inference.
54
+ This tool uses [Gemini](https://ai.google.dev/gemini-api/docs/models) to generate the code based on your settings. This is based on
55
+ [sayakpaul/auto-diffusers-docs](https://github.com/sayakpaul/auto-diffusers-docs/).
56
+ """
57
+ )
58
+
59
+ with gr.Row():
60
+ with gr.Column(scale=3):
61
+ repo_id = gr.Textbox(
62
+ label="Hugging Face Repo ID",
63
+ placeholder="e.g., black-forest-labs/FLUX.1-dev",
64
+ info="The model repository you want to analyze.",
65
+ value="black-forest-labs/FLUX.1-dev",
66
+ )
67
+ gemini_model_to_use = gr.Dropdown(
68
+ ["gemini-2.5-flash", "gemini-2.5-pro"],
69
+ value="gemini-2.5-flash",
70
+ label="Gemini Model",
71
+ info="Select the model to generate the analysis.",
72
+ )
73
+ with gr.Row():
74
+ system_ram = gr.Number(label="System RAM (GB)", value=20)
75
+ gpu_vram = gr.Number(label="GPU VRAM (GB)", value=8)
76
+
77
+ with gr.Row():
78
+ disable_bf16 = gr.Checkbox(
79
+ label="Disable BF16 (Use FP32)",
80
+ value=False,
81
+ info="Calculate using 32-bit precision instead of 16-bit.",
82
+ )
83
+ enable_lossy = gr.Checkbox(
84
+ label="Allow Lossy Quantization", value=False, info="Consider 8-bit/4-bit quantization."
85
+ )
86
+ torch_compile_friendly = gr.Checkbox(
87
+ label="torch.compile() friendly", value=False, info="Model is compatible with torch.compile."
88
+ )
89
+ fp8_friendly = gr.Checkbox(
90
+ label="fp8 friendly", value=False, info="Model and hardware support FP8 precision."
91
+ )
92
+
93
+ with gr.Column(scale=1):
94
+ submit_btn = gr.Button("Estimate Memory ☁", variant="primary", scale=1)
95
+
96
+ with gr.Accordion("Generated LLM Prompt (for debugging)", open=False):
97
+ prompt_output = gr.Textbox(label="Prompt", show_copy_button=True, lines=10, interactive=False)
98
+
99
+ gr.Markdown("---")
100
+ gr.Markdown("### Estimation Result")
101
+
102
+ output_markdown = gr.Markdown(label="LLM Output", value="*Your results will appear here...*")
103
+
104
+ gr.Markdown(
105
+ """
106
+ ---
107
+ > ⛔️ **Disclaimer:** Large Language Models (LLMs) can make mistakes. The information provided
108
+ > is an estimate and should be verified. Always test the model on your target hardware to confirm
109
+ > actual memory requirements.
110
+ """
111
+ )
112
+
113
+ # --- Event Handling ---
114
+ all_inputs = [
115
+ repo_id,
116
+ gemini_model_to_use,
117
+ disable_bf16,
118
+ enable_lossy,
119
+ system_ram,
120
+ gpu_vram,
121
+ torch_compile_friendly,
122
+ fp8_friendly,
123
+ ]
124
+ submit_btn.click(fn=get_output_code, inputs=all_inputs, outputs=[output_markdown, prompt_output])
125
+
126
+
127
+ if __name__ == "__main__":
128
+ demo.launch()
e2e_example.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from utils.llm_utils import LLMCodeOptimizer
3
+ from prompts import system_prompt, generate_prompt
4
+ from utils.pipeline_utils import determine_pipe_loading_memory
5
+ from utils.hardware_utils import (
6
+ categorize_vram,
7
+ categorize_ram,
8
+ get_gpu_vram_gb,
9
+ get_system_ram_gb,
10
+ is_compile_friendly_gpu,
11
+ is_fp8_friendly,
12
+ )
13
+ import torch
14
+ from pprint import pprint
15
+
16
+
17
+ def create_parser():
18
+ parser = argparse.ArgumentParser()
19
+ parser.add_argument(
20
+ "--ckpt_id",
21
+ type=str,
22
+ default="black-forest-labs/FLUX.1-dev",
23
+ help="Can be a repo id from the Hub or a local path where the checkpoint is stored.",
24
+ )
25
+ parser.add_argument(
26
+ "--gemini_model",
27
+ type=str,
28
+ default="gemini-2.5-flash-preview-05-20",
29
+ help="Gemini model to use. Choose from https://ai.google.dev/gemini-api/docs/models.",
30
+ )
31
+ parser.add_argument(
32
+ "--variant",
33
+ type=str,
34
+ default=None,
35
+ help="If the `ckpt_id` has variants, supply this flag to estimate compute. Example: 'fp16'.",
36
+ )
37
+ parser.add_argument(
38
+ "--disable_bf16",
39
+ action="store_true",
40
+ help="When enabled the load memory is affected. Prefer not enabling this flag.",
41
+ )
42
+ parser.add_argument(
43
+ "--enable_lossy",
44
+ action="store_true",
45
+ help="When enabled, the code will include snippets for enabling quantization.",
46
+ )
47
+ return parser
48
+
49
+
50
+ def main(args):
51
+ if not torch.cuda.is_available():
52
+ raise ValueError("Not supported for non-CUDA devices for now.")
53
+
54
+ loading_mem_out = determine_pipe_loading_memory(args.ckpt_id, args.variant, args.disable_bf16)
55
+ load_memory = loading_mem_out["total_loading_memory_gb"]
56
+ ram_gb = get_system_ram_gb()
57
+ ram_category = categorize_ram(ram_gb)
58
+ if ram_gb is not None:
59
+ print(f"\nSystem RAM: {ram_gb:.2f} GB")
60
+ print(f"RAM Category: {ram_category}")
61
+ else:
62
+ print("\nCould not determine System RAM.")
63
+
64
+ vram_gb = get_gpu_vram_gb()
65
+ vram_category = categorize_vram(vram_gb)
66
+ if vram_gb is not None:
67
+ print(f"\nGPU VRAM: {vram_gb:.2f} GB")
68
+ print(f"VRAM Category: {vram_category}")
69
+ else:
70
+ print("\nGPU VRAM check complete.")
71
+
72
+ is_compile_friendly = is_compile_friendly_gpu()
73
+ is_fp8_compatible = is_fp8_friendly()
74
+
75
+ llm = LLMCodeOptimizer(model_name=args.gemini_model, system_prompt=system_prompt)
76
+ current_generate_prompt = generate_prompt.format(
77
+ ckpt_id=args.ckpt_id,
78
+ pipeline_loading_memory=load_memory,
79
+ available_system_ram=ram_gb,
80
+ available_gpu_vram=vram_gb,
81
+ enable_lossy_outputs=args.enable_lossy,
82
+ is_fp8_supported=is_fp8_compatible,
83
+ enable_torch_compile=is_compile_friendly,
84
+ )
85
+ pprint(f"{current_generate_prompt=}")
86
+ print(llm(current_generate_prompt))
87
+
88
+
89
+ if __name__ == "__main__":
90
+ parser = create_parser()
91
+ args = parser.parse_args()
92
+ main(args)
prompts.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ system_prompt = """
2
+ Consider yourself an expert at optimizing inference code for diffusion-based image and video generation models.
3
+ For this project, you will be working with the Diffusers library. The library is built on top of PyTorch. Therefore,
4
+ it's essential for you to exercise your PyTorch knowledge.
5
+
6
+ Below is the simplest example of how a diffusion pipeline is usually used in Diffusers:
7
+
8
+ ```py
9
+ from diffusers import DiffusionPipeline
10
+ import torch
11
+
12
+ ckpt_id = "black-forest-labs/FLUX.1-dev"
13
+ pipe = DiffusionPipeline.from_pretrained(ckpt_id, torch_dtype=torch.bfloat16).to("cuda")
14
+ image = pipe("photo of a dog sitting beside a river").images[0]
15
+ ```
16
+
17
+ Your task will be to output a reasonable inference code in Python from user-supplied information about their
18
+ needs. More specifically, you will be provided with the following information (in no particular order):
19
+
20
+ * `ckpt_id` of the diffusion pipeline
21
+ * Loading memory of a diffusion pipeline in GB
22
+ * Available system RAM in GB
23
+ * Available GPU VRAM in GB
24
+ * If the user can afford to have lossy outputs (the likes of quantization)
25
+ * If FP8 is supported
26
+ * If the available GPU supports the latest `torch.compile()` knobs
27
+
28
+ There are three categories of system RAM, broadly:
29
+
30
+ * "small": <= 20GB
31
+ * "medium": > 20GB <= 40GB
32
+ * "large": > 40GB
33
+
34
+ Similarly, there are three categories of VRAM, broadly:
35
+
36
+ * "small": <= 8GB
37
+ * "medium": > 8GB <= 24GB
38
+ * "large": > 24GB
39
+
40
+ Here is a high-level overview of what optimizations to apply for typical use cases.
41
+
42
+ * Small VRAM, small system RAM
43
+
44
+ Depending on the loading memory of the underlying diffusion pipeline, if the available VRAM and system RAM
45
+ are both small, you apply a technique offloading called group offloading with disk serialization/deserialization
46
+ support.
47
+
48
+ Consider the code has an underlying component called `pipe` which has all the components needed
49
+ to perform inference. So, the code for realizing the above solution would look something
50
+ like so:
51
+
52
+ ```py
53
+ from transformers import from transformers import PreTrainedModel
54
+ # other imports go here.
55
+ ...
56
+
57
+ onload_device = torch.device("cuda")
58
+ pipe = DiffusionPipeline.from_pretrained(CKPT_ID, torch_dtype=torch.bfloat16)
59
+
60
+ offload_dir = "DIRECTORY" # change me
61
+ for name, module in pipe.components.items():
62
+ if hasattr(component, "_supports_group_offloading") and component._supports_group_offloading:
63
+ module.enable_group_offload(
64
+ onload_device=onload_device,
65
+ offload_type="leaf_level",
66
+ use_stream=True,
67
+ offload_to_disk_path=f"{offload_dir}/{name}"
68
+ )
69
+ elif isinstance(component, PreTrainedModel):
70
+ apply_group_offloading(
71
+ module,
72
+ onload_device=onload_device,
73
+ offload_type="leaf_level",
74
+ use_stream=True,
75
+ offload_to_disk_path=f"{offload_dir}/{name}"
76
+ )
77
+
78
+ # Inference goes here.
79
+ ...
80
+ ```
81
+
82
+ * Small VRAM, medium system RAM
83
+
84
+ Here, we can make use of model offloading:
85
+
86
+ ```py
87
+ # other imports go here.
88
+ ...
89
+
90
+ pipe = DiffusionPipeline.from_pretrained(CKPT_ID, torch_dtype=torch.bfloat16)
91
+ pipe.enable_model_cpu_offload()
92
+
93
+ # Inference goes here.
94
+ ...
95
+ ```
96
+
97
+ * Large VRAM, large system RAM
98
+
99
+ In this case, the `pipe` could directly be placed on CUDA if and only iff the loading memory requirements are
100
+ satisfied by the available VRAM:
101
+
102
+ ```py
103
+ pipe = pipe.to("cuda")
104
+ ```
105
+
106
+ ## Guidance on using quantization
107
+
108
+ If the user specifies to use quantization, then you should default to using bitsandbytes 4bit. The code here
109
+ would look like so:
110
+
111
+ ```py
112
+ from diffusers.quantizers import PipelineQuantizationConfig
113
+ # other imports go here.
114
+ ...
115
+
116
+ quant_config = PipelineQuantizationConfig(
117
+ quant_backend="bitsandbytes_4bit",
118
+ quant_kwargs={"load_in_4bit": True, "bnb_4bit_compute_dtype": torch.bfloat16, "bnb_4bit_quant_type": "nf4"}
119
+ components_to_quantize=["transformer"] # Can add a heavy text encoder here too.
120
+ )
121
+ pipe = DiffusionPipeline.from_pretrained(CKPT_ID, quantization_config=quant_config, torch_dtype=torch.bfloat16)
122
+
123
+ # Rest of the code goes here.
124
+ ...
125
+ ```
126
+
127
+ If there's support for performing FP8 computation, then we should use `torchao`:
128
+
129
+ ```py
130
+ from diffusers.quantizers import PipelineQuantizationConfig
131
+ # other imports go here.
132
+ ...
133
+
134
+ quant_config = PipelineQuantizationConfig(
135
+ quant_backend="torchao",
136
+ quant_kwargs={"quant_type": "float8dq_e4m3_row"}
137
+ components_to_quantize=["transformer"]
138
+ )
139
+ pipe = DiffusionPipeline.from_pretrained(CKPT_ID, quantization_config=quant_config, torch_dtype=torch.bfloat16)
140
+
141
+ # Rest of the code goes here.
142
+ ...
143
+ ```
144
+
145
+ ## Guidance on using `torch.compile()`
146
+
147
+ If the user wants to additionally boost inference speed, then you should the following line of code just before
148
+ inference:
149
+
150
+ * Add the following when offloading was applied: `torch._dynamo.config.recompile_limit = 1000`.
151
+ * ONLY, add the following when `bitsandbytes` was used for `quant_backend`: `torch._dynamo.config.capture_dynamic_output_shape_ops = True`.
152
+ * Finally, add `pipe.transformer.compile()`.
153
+ * Add `pipe.vae.decode = torch.compile(vae.decode)` as a comment.
154
+
155
+ In case no offloading was applied, then the line should be:
156
+
157
+ ```py
158
+ pipe.transformer.compile(fullgraph=True)
159
+ ```
160
+
161
+ ## Other guidelines
162
+
163
+ * When the available VRAM > pipeline loading memory, you should suggest using `pipe = pipe.to("cuda")`.
164
+ * If the user prefers not to use quantization and further reduce memory, then suggest using:
165
+ `pipe.transformer.enable_layerwise_casting(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16)`.
166
+ * Do NOT add any extra imports or lines of code that will not be used.
167
+ * Do NOT try to be too creative about combining the optimization techniques laid out above.
168
+ * Do NOT add extra arguments to the `pipe` call other than the `prompt`.
169
+ * Add a comment before the `pipe` call, saying "Modify the pipe call arguments as needed."
170
+ * Do NOT add any serialization step after the pipe call.
171
+
172
+ Please think about these guidelines carefully before producing the outputs.
173
+ """
174
+
175
+ generate_prompt = """
176
+ ckpt_id: {ckpt_id}
177
+ pipeline_loading_memory_GB: {pipeline_loading_memory}
178
+ available_system_ram_GB: {available_system_ram}
179
+ available_gpu_vram_GB: {available_gpu_vram}
180
+ enable_lossy_outputs: {enable_lossy_outputs}
181
+ is_fp8_supported: {is_fp8_supported}
182
+ enable_torch_compile: {enable_torch_compile}
183
+ """
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch>=2.1
2
+ safetensors>=0.5.3
3
+ huggingface_hub>=0.30.2
4
+ hf_xet
5
+ google-generativeai>=0.8.5
utils/hardware_utils.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ import psutil
3
+ import functools
4
+ from torch._inductor.runtime.hints import DeviceProperties
5
+ from torch._inductor.utils import get_gpu_type
6
+ from typing import Union
7
+ import torch
8
+
9
+
10
+ @functools.cache
11
+ def get_system_ram_gb():
12
+ """
13
+ Gets the total physical system RAM in Gigabytes.
14
+
15
+ Returns:
16
+ float: Total system RAM in GB, or None if it cannot be determined.
17
+ """
18
+ try:
19
+ # Get virtual memory details
20
+ virtual_memory = psutil.virtual_memory()
21
+ # Total physical memory in bytes
22
+ total_ram_bytes = virtual_memory.total
23
+ # Convert bytes to gigabytes (1 GB = 1024^3 bytes)
24
+ total_ram_gb = total_ram_bytes / (1024**3)
25
+ return total_ram_gb
26
+ except Exception as e:
27
+ print(f"Error getting system RAM: {e}")
28
+ return None
29
+
30
+
31
+ @functools.cache
32
+ def get_gpu_vram_gb():
33
+ """
34
+ Gets the total GPU VRAM in Gigabytes using the nvidia-smi command.
35
+ This function is intended for NVIDIA GPUs.
36
+
37
+ Returns:
38
+ float: Total GPU VRAM in GB, or None if it cannot be determined.
39
+ """
40
+ try:
41
+ # Execute the nvidia-smi command to get GPU memory info
42
+ # The command queries for the total memory and outputs it in MiB
43
+ result = subprocess.run(
44
+ ["nvidia-smi", "--query-gpu=memory.total", "--format=csv,noheader,nounits"],
45
+ capture_output=True,
46
+ text=True,
47
+ check=True,
48
+ )
49
+ # The output will be a string like "12288\n" for the first GPU
50
+ # We take the first line in case there are multiple GPUs
51
+ vram_mib = int(result.stdout.strip().split("\n")[0])
52
+ # Convert MiB to Gigabytes (1 GB = 1024 MiB)
53
+ vram_gb = vram_mib / 1024
54
+ return vram_gb
55
+ except FileNotFoundError:
56
+ # This error occurs if nvidia-smi is not installed or not in the PATH
57
+ print("INFO: 'nvidia-smi' command not found. Cannot determine GPU VRAM.")
58
+ print(" This is expected if you don't have an NVIDIA GPU or drivers installed.")
59
+ return None
60
+ except (subprocess.CalledProcessError, ValueError, IndexError) as e:
61
+ # Handles other potential errors like command failure or parsing issues
62
+ print(f"Error getting GPU VRAM: {e}")
63
+ return None
64
+
65
+
66
+ def categorize_ram(ram_gb):
67
+ """
68
+ Categorizes RAM into 'small', 'medium', or 'large'.
69
+
70
+ Args:
71
+ ram_gb (float): The amount of RAM in GB.
72
+
73
+ Returns:
74
+ str: The category ('small', 'medium', 'large') or 'unknown'.
75
+ """
76
+ if ram_gb is None:
77
+ return "unknown"
78
+ if ram_gb <= 20:
79
+ return "small"
80
+ elif 20 < ram_gb <= 40:
81
+ return "medium"
82
+ else: # ram_gb > 40
83
+ return "large"
84
+
85
+
86
+ def categorize_vram(vram_gb):
87
+ """
88
+ Categorizes VRAM into 'small', 'medium', or 'large'.
89
+
90
+ Args:
91
+ vram_gb (float): The amount of VRAM in GB.
92
+
93
+ Returns:
94
+ str: The category ('small', 'medium', 'large') or 'not applicable/unknown'.
95
+ """
96
+ if vram_gb is None:
97
+ return "not applicable/unknown"
98
+ if vram_gb <= 8:
99
+ return "small"
100
+ elif 8 < vram_gb <= 24:
101
+ return "medium"
102
+ else: # vram_gb > 24
103
+ return "large"
104
+
105
+
106
+ @functools.cache
107
+ def is_compile_friendly_gpu(index_or_device: Union[int, str, torch.device] = 0) -> bool:
108
+ """Hand-coded rules from experiments. Don't take seriously."""
109
+ if isinstance(index_or_device, torch.device):
110
+ device = index_or_device
111
+ elif isinstance(index_or_device, str):
112
+ device = torch.device(index_or_device)
113
+ else:
114
+ device = torch.device(get_gpu_type(), index_or_device)
115
+
116
+ prop = DeviceProperties.create(device)
117
+ return prop.major >= 8
118
+
119
+
120
+ @functools.lru_cache()
121
+ def is_sm_version(major: int, minor: int) -> bool:
122
+ """Check if the CUDA version is exactly major.minor"""
123
+ is_cuda = torch.cuda.is_available() and torch.version.cuda
124
+ return torch.cuda.get_device_capability() == (major, minor) if is_cuda else False
125
+
126
+
127
+ def is_fp8_friendly():
128
+ return is_sm_version(8, 9)
utils/llm_utils.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import google.generativeai as genai
2
+ import os
3
+
4
+ # https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview
5
+ MODEL_NAME = "gemini-2.5-flash-preview-05-20"
6
+
7
+
8
+ class LLMCodeOptimizer:
9
+ def __init__(self, model_name: str, system_prompt: str):
10
+ api_key = os.getenv("GOOGLE_API_KEY")
11
+ if api_key is None:
12
+ raise ValueError("Must provide an API key for Gemini through the `GOOGLE_API_KEY` env variable.")
13
+ genai.configure(api_key=api_key)
14
+ self.model = genai.GenerativeModel(model_name=model_name, system_instruction=system_prompt)
15
+
16
+ def __call__(self, generation_prompt):
17
+ try:
18
+ print("Sending request to Gemini...")
19
+ response = self.model.generate_content(generation_prompt)
20
+
21
+ return response.text
22
+
23
+ except Exception as e:
24
+ # Handle potential exceptions, such as invalid API keys,
25
+ # network issues, or content moderation errors.
26
+ return f"An error occurred: {e}"
utils/pipeline_utils.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ from pathlib import Path
3
+ import functools
4
+ import os
5
+ import safetensors.torch
6
+ from huggingface_hub import model_info, hf_hub_download
7
+ import tempfile
8
+ import torch
9
+ import functools
10
+ import os
11
+ import requests
12
+ import struct
13
+ from huggingface_hub import hf_hub_url
14
+
15
+ DTYPE_MAP = {"FP32": torch.float32, "FP16": torch.float16, "BF16": torch.bfloat16}
16
+
17
+
18
+ # https://huggingface.co/docs/safetensors/v0.3.2/metadata_parsing#python
19
+ def _parse_single_file(url):
20
+ print(f"{url=}")
21
+ token = os.getenv("HF_TOKEN")
22
+ headers = {"Range": "bytes=0-7", "Authorization": f"Bearer {token}"}
23
+ response = requests.get(url, headers=headers)
24
+ length_of_header = struct.unpack("<Q", response.content)[0]
25
+ headers = {"Range": f"bytes=8-{7 + length_of_header}", "Authorization": f"Bearer {token}"}
26
+ response = requests.get(url, headers=headers)
27
+ header = response.json()
28
+ return header
29
+
30
+
31
+ def _get_dtype_from_safetensor_file(file_path):
32
+ """Inspects a safetensors file and returns the dtype of the first tensor.
33
+
34
+ If it's not a safetensors file and a URL instead, we query it.
35
+ """
36
+ if "https" in file_path:
37
+ metadata = _parse_single_file(file_path)
38
+ except_format_metadata_keys = sorted({k for k in metadata if k != "__metadata__"})
39
+ string_dtype = metadata[except_format_metadata_keys[0]]["dtype"]
40
+ return DTYPE_MAP[string_dtype]
41
+ try:
42
+ # load_file is simple and sufficient for this info-gathering purpose.
43
+ state_dict = safetensors.torch.load_file(file_path)
44
+ if not state_dict:
45
+ return "N/A (empty)"
46
+
47
+ # Get the dtype from the first tensor in the state dict
48
+ first_tensor = next(iter(state_dict.values()))
49
+ return first_tensor.dtype
50
+ except Exception as e:
51
+ print(f"Warning: Could not determine dtype from {file_path}. Error: {e}")
52
+ return "N/A (error)"
53
+
54
+
55
+ def _process_components(component_files, file_accessor_fn, disable_bf16=False):
56
+ """
57
+ Generic function to process components, calculate size, and determine dtype.
58
+
59
+ Args:
60
+ component_files (dict): A dictionary mapping component names to lists of file objects.
61
+ file_accessor_fn (function): A function that takes a file object and returns
62
+ a tuple of (local_path_for_inspection, size_in_bytes, relative_filename).
63
+ disable_bf16 (bool): To disable using `torch.bfloat16`. Use it at your own risk.
64
+
65
+ Returns:
66
+ dict: A dictionary containing the total memory and detailed component info.
67
+ """
68
+ components_info = {}
69
+ total_size_bytes = 0
70
+
71
+ for name, files in component_files.items():
72
+ # Get dtype by inspecting the first file of the component
73
+ first_file = files[0]
74
+
75
+ # The accessor function handles how to get the path (download vs local)
76
+ # and its size and relative name.
77
+ inspection_path, _, _ = file_accessor_fn(first_file)
78
+ dtype = _get_dtype_from_safetensor_file(inspection_path)
79
+
80
+ component_size_bytes = 0
81
+ component_file_details = []
82
+ for f in files:
83
+ _, size_bytes, rel_filename = file_accessor_fn(f)
84
+ component_size_bytes += size_bytes
85
+ component_file_details.append({"filename": rel_filename, "size_mb": size_bytes / (1024**2)})
86
+
87
+ if dtype == torch.float32 and not disable_bf16:
88
+ print(
89
+ f"The `dtype` for component ({name}) is torch.float32. Since bf16 computation is not disabled "
90
+ "we will slash the total size of this component by 2."
91
+ )
92
+ total_size_bytes += component_size_bytes / 2
93
+ else:
94
+ total_size_bytes += component_size_bytes
95
+
96
+ components_info[name] = {
97
+ "size_gb": round(component_size_bytes / (1024**3), 3),
98
+ "dtype": dtype,
99
+ "files": sorted(component_file_details, key=lambda x: x["filename"]),
100
+ }
101
+
102
+ return {
103
+ "total_loading_memory_gb": round(total_size_bytes / (1024**3), 3),
104
+ "components": components_info,
105
+ }
106
+
107
+
108
+ @functools.lru_cache()
109
+ def _determine_memory_from_hub_ckpt(ckpt_id, variant=None, disable_bf16=False):
110
+ """
111
+ Determines memory and dtypes for a checkpoint on the Hugging Face Hub.
112
+ """
113
+ files_in_repo = model_info(ckpt_id, files_metadata=True, token=os.getenv("HF_TOKEN")).siblings
114
+ all_safetensors_siblings = [
115
+ s for s in files_in_repo if s.rfilename.endswith(".safetensors") and "/" in s.rfilename
116
+ ]
117
+ if variant:
118
+ all_safetensors_siblings = [f for f in all_safetensors_siblings if variant in f.rfilename]
119
+
120
+ component_files = collections.defaultdict(list)
121
+ for sibling in all_safetensors_siblings:
122
+ component_name = Path(sibling.rfilename).parent.name
123
+ component_files[component_name].append(sibling)
124
+
125
+ with tempfile.TemporaryDirectory() as temp_dir:
126
+
127
+ def hub_file_accessor(file_obj):
128
+ """Accessor for Hub files: downloads them and returns path/size."""
129
+ print(f"Querying '{file_obj.rfilename}' for inspection...")
130
+ url = hf_hub_url(ckpt_id, file_obj.rfilename)
131
+ return url, file_obj.size, file_obj.rfilename
132
+
133
+ # We only need to download one file per component for dtype inspection.
134
+ # To make this efficient, we create a specialized accessor for the processing loop
135
+ # that only downloads the *first* file encountered for a component.
136
+ downloaded_for_inspection = {}
137
+
138
+ def efficient_hub_accessor(file_obj):
139
+ component_name = Path(file_obj.rfilename).parent.name
140
+ if component_name not in downloaded_for_inspection:
141
+ path, _, _ = hub_file_accessor(file_obj)
142
+ downloaded_for_inspection[component_name] = path
143
+
144
+ inspection_path = downloaded_for_inspection[component_name]
145
+ return inspection_path, file_obj.size, file_obj.rfilename
146
+
147
+ return _process_components(component_files, efficient_hub_accessor, disable_bf16)
148
+
149
+
150
+ @functools.lru_cache()
151
+ def _determine_memory_from_local_ckpt(path: str, variant=None, disable_bf16=False):
152
+ """
153
+ Determines memory and dtypes for a local checkpoint.
154
+ """
155
+ ckpt_path = Path(path)
156
+ if not ckpt_path.is_dir():
157
+ return {"error": f"Checkpoint path '{path}' not found or is not a directory."}
158
+
159
+ all_safetensors_paths = list(ckpt_path.glob("**/*.safetensors"))
160
+ if variant:
161
+ all_safetensors_paths = [p for p in all_safetensors_paths if variant in p.name]
162
+
163
+ component_files = collections.defaultdict(list)
164
+ for file_path in all_safetensors_paths:
165
+ component_name = file_path.parent.name
166
+ component_files[component_name].append(file_path)
167
+
168
+ def local_file_accessor(file_path):
169
+ """Accessor for local files: just returns their path and size."""
170
+ return file_path, file_path.stat().st_size, str(file_path.relative_to(ckpt_path))
171
+
172
+ return _process_components(component_files, local_file_accessor, disable_bf16)
173
+
174
+
175
+ def determine_pipe_loading_memory(ckpt_id: str, variant=None, disable_bf16=False):
176
+ """
177
+ Determines the memory and dtypes for a pipeline, whether it's local or on the Hub.
178
+ """
179
+ if os.path.isdir(ckpt_id):
180
+ return _determine_memory_from_local_ckpt(ckpt_id, variant, disable_bf16)
181
+ else:
182
+ return _determine_memory_from_hub_ckpt(ckpt_id, variant, disable_bf16)
183
+
184
+
185
+ if __name__ == "__main__":
186
+ output = _determine_memory_from_hub_ckpt("Wan-AI/Wan2.1-T2V-14B-Diffusers")
187
+ total_size_gb = output["total_loading_memory_gb"]
188
+ safetensor_files = output["components"]
189
+ print(f"{total_size_gb=} GB")
190
+ print(f"{safetensor_files=}")
191
+ print("\n")
192
+ # total_size_gb, safetensor_files = _determine_memory_from_local_ckpt("LOCAL_DIR") # change me.
193
+ # print(f"{total_size_gb=} GB")
194
+ # print(f"{safetensor_files=}")