Datasets:
slz1
/

ArXiv:
slz1 commited on
Commit
7b4c6f4
·
verified ·
1 Parent(s): 39bd69c

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. .gitignore +20 -0
  2. LICENSE +21 -0
  3. README.md +217 -0
  4. VAR.code-workspace +10 -0
  5. a-next_token.py +101 -0
  6. demo_sample.ipynb +127 -0
  7. demo_zero_shot_edit.ipynb +0 -0
  8. dist.py +214 -0
  9. local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep10adamlr8e-05wd0.05/events.out.tfevents.1749554045.slz-gpu2-0.240780.0__0610_1914 +3 -0
  10. local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep10adamlr8e-05wd0.05/events.out.tfevents.1749554096.slz-gpu2-0.241049.0__0610_1914 +3 -0
  11. local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep130adamlr8e-05wd0.05/events.out.tfevents.1750037349.slz-gpu2-0.1463.0__0616_0929 +3 -0
  12. local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep130adamlr8e-05wd0.05/events.out.tfevents.1750038434.slz-gpu2-0.5620.0__0616_0947 +3 -0
  13. local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep35adamlr8e-05wd0.05/events.out.tfevents.1749719042.slz-gpu2-0.60611.0__0612_1704 +3 -0
  14. local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep45adamlr8e-05wd0.05/events.out.tfevents.1749730258.slz-gpu2-0.102283.0__0612_2010 +3 -0
  15. local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep50adamlr8e-05wd0.05/events.out.tfevents.1750040923.slz-gpu2-0.20209.0__0616_1028 +3 -0
  16. local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep81adamlr8e-05wd0.05/events.out.tfevents.1750040357.slz-gpu2-0.18949.0__0616_1019 +3 -0
  17. local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep83adamlr8e-05wd0.05/events.out.tfevents.1750040814.slz-gpu2-0.19484.0__0616_1026 +3 -0
  18. metric/FID_score.py +355 -0
  19. metric/clip_score.py +76 -0
  20. metric/eval_is.py +69 -0
  21. metric/inception.py +344 -0
  22. models/__init__.py +40 -0
  23. models/__pycache__/__init__.cpython-310.pyc +0 -0
  24. models/__pycache__/basic_vae.cpython-310.pyc +0 -0
  25. models/__pycache__/basic_var.cpython-310.pyc +0 -0
  26. models/__pycache__/helpers.cpython-310.pyc +0 -0
  27. models/__pycache__/quant.cpython-310.pyc +0 -0
  28. models/__pycache__/var.cpython-310.pyc +0 -0
  29. models/__pycache__/vqvae.cpython-310.pyc +0 -0
  30. models/basic_vae.py +226 -0
  31. models/basic_var.py +406 -0
  32. models/helpers.py +59 -0
  33. models/new_gpt_t2i.py +569 -0
  34. models/quant.py +243 -0
  35. models/var.py +429 -0
  36. models/vqvae.py +96 -0
  37. requirements.txt +41 -0
  38. train.py +575 -0
  39. trainer.py +262 -0
  40. utils/amp_sc.py +94 -0
  41. utils/arg_util.py +296 -0
  42. utils/canny.py +16 -0
  43. utils/data.py +55 -0
  44. utils/data_sampler.py +103 -0
  45. utils/freeze_utils.py +38 -0
  46. utils/lr_control.py +108 -0
  47. utils/misc.py +385 -0
  48. utils/model_args.py +36 -0
  49. utils/t2i_control.py +200 -0
  50. vae_ch160v4096z32.pth +3 -0
.gitignore ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.swp
2
+ **/__pycache__/**
3
+ **/.ipynb_checkpoints/**
4
+ .DS_Store
5
+ .idea/*
6
+ .vscode/*
7
+ llava/
8
+ _vis_cached/
9
+ _auto_*
10
+ ckpt/
11
+ log/
12
+ tb*/
13
+ img*/
14
+ local_output*
15
+ *.pth
16
+ *.pth.tar
17
+ *.ckpt
18
+ *.log
19
+ *.txt
20
+ *.ipynb
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 FoundationVision
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # VAR: a new visual generation method elevates GPT-style models beyond diffusion🚀 & Scaling laws observed📈
2
+
3
+ <div align="center">
4
+
5
+ [![demo platform](https://img.shields.io/badge/Play%20with%20VAR%21-VAR%20demo%20platform-lightblue)](https://opensource.bytedance.com/gmpt/t2i/invite)&nbsp;
6
+ [![arXiv](https://img.shields.io/badge/arXiv%20paper-2404.02905-b31b1b.svg)](https://arxiv.org/abs/2404.02905)&nbsp;
7
+ [![huggingface weights](https://img.shields.io/badge/%F0%9F%A4%97%20Weights-FoundationVision/var-yellow)](https://huggingface.co/FoundationVision/var)&nbsp;
8
+ [![SOTA](https://img.shields.io/badge/State%20of%20the%20Art-Image%20Generation%20on%20ImageNet%20%28AR%29-32B1B4?logo=data%3Aimage%2Fsvg%2Bxml%3Bbase64%2CPHN2ZyB3aWR0aD0iNjA2IiBoZWlnaHQ9IjYwNiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgb3ZlcmZsb3c9ImhpZGRlbiI%2BPGRlZnM%2BPGNsaXBQYXRoIGlkPSJjbGlwMCI%2BPHJlY3QgeD0iLTEiIHk9Ii0xIiB3aWR0aD0iNjA2IiBoZWlnaHQ9IjYwNiIvPjwvY2xpcFBhdGg%2BPC9kZWZzPjxnIGNsaXAtcGF0aD0idXJsKCNjbGlwMCkiIHRyYW5zZm9ybT0idHJhbnNsYXRlKDEgMSkiPjxyZWN0IHg9IjUyOSIgeT0iNjYiIHdpZHRoPSI1NiIgaGVpZ2h0PSI0NzMiIGZpbGw9IiM0NEYyRjYiLz48cmVjdCB4PSIxOSIgeT0iNjYiIHdpZHRoPSI1NyIgaGVpZ2h0PSI0NzMiIGZpbGw9IiM0NEYyRjYiLz48cmVjdCB4PSIyNzQiIHk9IjE1MSIgd2lkdGg9IjU3IiBoZWlnaHQ9IjMwMiIgZmlsbD0iIzQ0RjJGNiIvPjxyZWN0IHg9IjEwNCIgeT0iMTUxIiB3aWR0aD0iNTciIGhlaWdodD0iMzAyIiBmaWxsPSIjNDRGMkY2Ii8%2BPHJlY3QgeD0iNDQ0IiB5PSIxNTEiIHdpZHRoPSI1NyIgaGVpZ2h0PSIzMDIiIGZpbGw9IiM0NEYyRjYiLz48cmVjdCB4PSIzNTkiIHk9IjE3MCIgd2lkdGg9IjU2IiBoZWlnaHQ9IjI2NCIgZmlsbD0iIzQ0RjJGNiIvPjxyZWN0IHg9IjE4OCIgeT0iMTcwIiB3aWR0aD0iNTciIGhlaWdodD0iMjY0IiBmaWxsPSIjNDRGMkY2Ii8%2BPHJlY3QgeD0iNzYiIHk9IjY2IiB3aWR0aD0iNDciIGhlaWdodD0iNTciIGZpbGw9IiM0NEYyRjYiLz48cmVjdCB4PSI0ODIiIHk9IjY2IiB3aWR0aD0iNDciIGhlaWdodD0iNTciIGZpbGw9IiM0NEYyRjYiLz48cmVjdCB4PSI3NiIgeT0iNDgyIiB3aWR0aD0iNDciIGhlaWdodD0iNTciIGZpbGw9IiM0NEYyRjYiLz48cmVjdCB4PSI0ODIiIHk9IjQ4MiIgd2lkdGg9IjQ3IiBoZWlnaHQ9IjU3IiBmaWxsPSIjNDRGMkY2Ii8%2BPC9nPjwvc3ZnPg%3D%3D)](https://paperswithcode.com/sota/image-generation-on-imagenet-256x256?tag_filter=485&p=visual-autoregressive-modeling-scalable-image)
9
+
10
+
11
+ </div>
12
+ <p align="center" style="font-size: larger;">
13
+ <a href="https://arxiv.org/abs/2404.02905">Visual Autoregressive Modeling: Scalable Image Generation via Next-Scale Prediction</a>
14
+ </p>
15
+
16
+ <div>
17
+ <p align="center" style="font-size: larger;">
18
+ <strong>NeurIPS 2024 Best Paper</strong>
19
+ </p>
20
+ </div>
21
+
22
+ <p align="center">
23
+ <img src="https://github.com/FoundationVision/VAR/assets/39692511/9850df90-20b1-4f29-8592-e3526d16d755" width=95%>
24
+ <p>
25
+
26
+ <br>
27
+
28
+ ## News
29
+
30
+ * **2024-12:** 🏆 VAR received **NeurIPS 2024 Best Paper Award**.
31
+ * **2024-12:** 🔥 We Release our Text-to-Image research based on VAR, please check [Infinity](https://github.com/FoundationVision/Infinity).
32
+ * **2024-09:** VAR is accepted as **NeurIPS 2024 Oral** Presentation.
33
+ * **2024-04:** [Visual AutoRegressive modeling](https://github.com/FoundationVision/VAR) is released.
34
+
35
+ ## 🕹️ Try and Play with VAR!
36
+
37
+ ~~We provide a [demo website](https://var.vision/demo) for you to play with VAR models and generate images interactively. Enjoy the fun of visual autoregressive modeling!~~
38
+
39
+ We provide a [demo website](https://opensource.bytedance.com/gmpt/t2i/invite) for you to play with VAR Text-to-Image and generate images interactively. Enjoy the fun of visual autoregressive modeling!
40
+
41
+ We also provide [demo_sample.ipynb](demo_sample.ipynb) for you to see more technical details about VAR.
42
+
43
+ [//]: # (<p align="center">)
44
+ [//]: # (<img src="https://user-images.githubusercontent.com/39692511/226376648-3f28a1a6-275d-4f88-8f3e-cd1219882488.png" width=50%)
45
+ [//]: # (<p>)
46
+
47
+
48
+ ## What's New?
49
+
50
+ ### 🔥 Introducing VAR: a new paradigm in autoregressive visual generation✨:
51
+
52
+ Visual Autoregressive Modeling (VAR) redefines the autoregressive learning on images as coarse-to-fine "next-scale prediction" or "next-resolution prediction", diverging from the standard raster-scan "next-token prediction".
53
+
54
+ <p align="center">
55
+ <img src="https://github.com/FoundationVision/VAR/assets/39692511/3e12655c-37dc-4528-b923-ec6c4cfef178" width=93%>
56
+ <p>
57
+
58
+ ### 🔥 For the first time, GPT-style autoregressive models surpass diffusion models🚀:
59
+ <p align="center">
60
+ <img src="https://github.com/FoundationVision/VAR/assets/39692511/cc30b043-fa4e-4d01-a9b1-e50650d5675d" width=55%>
61
+ <p>
62
+
63
+
64
+ ### 🔥 Discovering power-law Scaling Laws in VAR transformers📈:
65
+
66
+
67
+ <p align="center">
68
+ <img src="https://github.com/FoundationVision/VAR/assets/39692511/c35fb56e-896e-4e4b-9fb9-7a1c38513804" width=85%>
69
+ <p>
70
+ <p align="center">
71
+ <img src="https://github.com/FoundationVision/VAR/assets/39692511/91d7b92c-8fc3-44d9-8fb4-73d6cdb8ec1e" width=85%>
72
+ <p>
73
+
74
+
75
+ ### 🔥 Zero-shot generalizability🛠️:
76
+
77
+ <p align="center">
78
+ <img src="https://github.com/FoundationVision/VAR/assets/39692511/a54a4e52-6793-4130-bae2-9e459a08e96a" width=70%>
79
+ <p>
80
+
81
+ #### For a deep dive into our analyses, discussions, and evaluations, check out our [paper](https://arxiv.org/abs/2404.02905).
82
+
83
+
84
+ ## VAR zoo
85
+ We provide VAR models for you to play with, which are on <a href='https://huggingface.co/FoundationVision/var'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Huggingface-FoundationVision/var-yellow'></a> or can be downloaded from the following links:
86
+
87
+ | model | reso. | FID | rel. cost | #params | HF weights🤗 |
88
+ |:----------:|:-----:|:--------:|:---------:|:-------:|:------------------------------------------------------------------------------------|
89
+ | VAR-d16 | 256 | 3.55 | 0.4 | 310M | [var_d16.pth](https://huggingface.co/FoundationVision/var/resolve/main/var_d16.pth) |
90
+ | VAR-d20 | 256 | 2.95 | 0.5 | 600M | [var_d20.pth](https://huggingface.co/FoundationVision/var/resolve/main/var_d20.pth) |
91
+ | VAR-d24 | 256 | 2.33 | 0.6 | 1.0B | [var_d24.pth](https://huggingface.co/FoundationVision/var/resolve/main/var_d24.pth) |
92
+ | VAR-d30 | 256 | 1.97 | 1 | 2.0B | [var_d30.pth](https://huggingface.co/FoundationVision/var/resolve/main/var_d30.pth) |
93
+ | VAR-d30-re | 256 | **1.80** | 1 | 2.0B | [var_d30.pth](https://huggingface.co/FoundationVision/var/resolve/main/var_d30.pth) |
94
+ | VAR-d36 | 512 | **2.63** | - | 2.3B | [var_d36.pth](https://huggingface.co/FoundationVision/var/resolve/main/var_d36.pth) |
95
+
96
+ You can load these models to generate images via the codes in [demo_sample.ipynb](demo_sample.ipynb). Note: you need to download [vae_ch160v4096z32.pth](https://huggingface.co/FoundationVision/var/resolve/main/vae_ch160v4096z32.pth) first.
97
+
98
+
99
+ ## Installation
100
+
101
+ 1. Install `torch>=2.0.0`.
102
+ 2. Install other pip packages via `pip3 install -r requirements.txt`.
103
+ 3. Prepare the [ImageNet](http://image-net.org/) dataset
104
+ <details>
105
+ <summary> assume the ImageNet is in `/path/to/imagenet`. It should be like this:</summary>
106
+
107
+ ```
108
+ /path/to/imagenet/:
109
+ train/:
110
+ n01440764:
111
+ many_images.JPEG ...
112
+ n01443537:
113
+ many_images.JPEG ...
114
+ val/:
115
+ n01440764:
116
+ ILSVRC2012_val_00000293.JPEG ...
117
+ n01443537:
118
+ ILSVRC2012_val_00000236.JPEG ...
119
+ ```
120
+ **NOTE: The arg `--data_path=/path/to/imagenet` should be passed to the training script.**
121
+ </details>
122
+
123
+ 5. (Optional) install and compile `flash-attn` and `xformers` for faster attention computation. Our code will automatically use them if installed. See [models/basic_var.py#L15-L30](models/basic_var.py#L15-L30).
124
+
125
+
126
+ ## Training Scripts
127
+
128
+ To train VAR-{d16, d20, d24, d30, d36-s} on ImageNet 256x256 or 512x512, you can run the following command:
129
+ ```shell
130
+ # d16, 256x256
131
+ torchrun --nproc_per_node=8 --nnodes=... --node_rank=... --master_addr=... --master_port=... train.py \
132
+ --depth=16 --bs=768 --ep=200 --fp16=1 --alng=1e-3 --wpe=0.1
133
+ # d20, 256x256
134
+ torchrun --nproc_per_node=8 --nnodes=... --node_rank=... --master_addr=... --master_port=... train.py \
135
+ --depth=20 --bs=768 --ep=250 --fp16=1 --alng=1e-3 --wpe=0.1
136
+ # d24, 256x256
137
+ torchrun --nproc_per_node=8 --nnodes=... --node_rank=... --master_addr=... --master_port=... train.py \
138
+ --depth=24 --bs=768 --ep=350 --tblr=8e-5 --fp16=1 --alng=1e-4 --wpe=0.01
139
+ # d30, 256x256
140
+ torchrun --nproc_per_node=8 --nnodes=... --node_rank=... --master_addr=... --master_port=... train.py \
141
+ --depth=30 --bs=1024 --ep=350 --tblr=8e-5 --fp16=1 --alng=1e-5 --wpe=0.01 --twde=0.08
142
+ # d36-s, 512x512 (-s means saln=1, shared AdaLN)
143
+ torchrun --nproc_per_node=8 --nnodes=... --node_rank=... --master_addr=... --master_port=... train.py \
144
+ --depth=36 --saln=1 --pn=512 --bs=768 --ep=350 --tblr=8e-5 --fp16=1 --alng=5e-6 --wpe=0.01 --twde=0.08
145
+ ```
146
+ A folder named `local_output` will be created to save the checkpoints and logs.
147
+ You can monitor the training process by checking the logs in `local_output/log.txt` and `local_output/stdout.txt`, or using `tensorboard --logdir=local_output/`.
148
+
149
+ If your experiment is interrupted, just rerun the command, and the training will **automatically resume** from the last checkpoint in `local_output/ckpt*.pth` (see [utils/misc.py#L344-L357](utils/misc.py#L344-L357)).
150
+
151
+ ## Sampling & Zero-shot Inference
152
+
153
+ For FID evaluation, use `var.autoregressive_infer_cfg(..., cfg=1.5, top_p=0.96, top_k=900, more_smooth=False)` to sample 50,000 images (50 per class) and save them as PNG (not JPEG) files in a folder. Pack them into a `.npz` file via `create_npz_from_sample_folder(sample_folder)` in [utils/misc.py#L344](utils/misc.py#L360).
154
+ Then use the [OpenAI's FID evaluation toolkit](https://github.com/openai/guided-diffusion/tree/main/evaluations) and reference ground truth npz file of [256x256](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/256/VIRTUAL_imagenet256_labeled.npz) or [512x512](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/512/VIRTUAL_imagenet512.npz) to evaluate FID, IS, precision, and recall.
155
+
156
+ Note a relatively small `cfg=1.5` is used for trade-off between image quality and diversity. You can adjust it to `cfg=5.0`, or sample with `autoregressive_infer_cfg(..., more_smooth=True)` for **better visual quality**.
157
+ We'll provide the sampling script later.
158
+
159
+
160
+ ## Third-party Usage and Research
161
+
162
+ ***In this pargraph, we cross link third-party repositories or research which use VAR and report results. You can let us know by raising an issue***
163
+
164
+ (`Note please report accuracy numbers and provide trained models in your new repository to facilitate others to get sense of correctness and model behavior`)
165
+
166
+ | **Time** | **Research** | **Link** |
167
+ |--------------|--------------------------------------------------------------------------------------------------|--------------------------------------------------------------------|
168
+ | [12/30/2024] | Next Token Prediction Towards Multimodal Intelligence | https://github.com/LMM101/Awesome-Multimodal-Next-Token-Prediction |
169
+ | [12/30/2024] | Varformer: Adapting VAR’s Generative Prior for Image Restoration |https://arxiv.org/abs/2412.21063 |
170
+ | [12/22/2024] | Distilled Decoding 1: One-step Sampling of Image Auto-regressive Models with Flow Matching | https://github.com/imagination-research/distilled-decoding |
171
+ | [12/19/2024] | FlowAR: Scale-wise Autoregressive Image Generation Meets Flow Matching | https://github.com/OliverRensu/FlowAR |
172
+ | [12/13/2024] | 3D representation in 512-Byte: Variational tokenizer is the key for autoregressive 3D generation | https://github.com/sparse-mvs-2/VAT |
173
+ | [12/9/2024] | CARP: Visuomotor Policy Learning via Coarse-to-Fine Autoregressive Prediction | https://carp-robot.github.io/ |
174
+ | [12/5/2024] | Infinity ∞: Scaling Bitwise AutoRegressive Modeling for High-Resolution Image Synthesis | https://github.com/FoundationVision/Infinity |
175
+ | [12/5/2024] | Switti: Designing Scale-Wise Transformers for Text-to-Image Synthesis | https://github.com/yandex-research/switti |
176
+ | [12/4/2024] | TokenFlow🚀: Unified Image Tokenizer for Multimodal Understanding and Generation | https://github.com/ByteFlow-AI/TokenFlow |
177
+ | [12/3/2024] | XQ-GAN🚀: An Open-source Image Tokenization Framework for Autoregressive Generation | https://github.com/lxa9867/ImageFolder |
178
+ | [11/28/2024] | CoDe: Collaborative Decoding Makes Visual Auto-Regressive Modeling Efficient | https://github.com/czg1225/CoDe |
179
+ | [11/28/2024] | Scalable Autoregressive Monocular Depth Estimation | https://arxiv.org/abs/2411.11361 |
180
+ | [11/27/2024] | SAR3D: Autoregressive 3D Object Generation and Understanding via Multi-scale 3D VQVAE | https://github.com/cyw-3d/SAR3D |
181
+ | [11/26/2024] | LiteVAR: Compressing Visual Autoregressive Modelling with Efficient Attention and Quantization | https://arxiv.org/abs/2411.17178 |
182
+ | [11/15/2024] | M-VAR: Decoupled Scale-wise Autoregressive Modeling for High-Quality Image Generation | https://github.com/OliverRensu/MVAR |
183
+ | [10/14/2024] | HART: Efficient Visual Generation with Hybrid Autoregressive Transformer | https://github.com/mit-han-lab/hart |
184
+ | [10/3/2024] | ImageFolder🚀: Autoregressive Image Generation with Folded Tokens | https://github.com/lxa9867/ImageFolder |
185
+ | [07/25/2024] | ControlVAR: Exploring Controllable Visual Autoregressive Modeling | https://github.com/lxa9867/ControlVAR |
186
+ | [07/3/2024] | VAR-CLIP: Text-to-Image Generator with Visual Auto-Regressive Modeling | https://github.com/daixiangzi/VAR-CLIP |
187
+ | [06/16/2024] | STAR: Scale-wise Text-to-image generation via Auto-Regressive representations | https://arxiv.org/abs/2406.10797 |
188
+
189
+
190
+ ## License
191
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
192
+
193
+
194
+ ## Citation
195
+ If our work assists your research, feel free to give us a star ⭐ or cite us using:
196
+ ```
197
+ @Article{VAR,
198
+ title={Visual Autoregressive Modeling: Scalable Image Generation via Next-Scale Prediction},
199
+ author={Keyu Tian and Yi Jiang and Zehuan Yuan and Bingyue Peng and Liwei Wang},
200
+ year={2024},
201
+ eprint={2404.02905},
202
+ archivePrefix={arXiv},
203
+ primaryClass={cs.CV}
204
+ }
205
+ ```
206
+
207
+ ```
208
+ @misc{Infinity,
209
+ title={Infinity: Scaling Bitwise AutoRegressive Modeling for High-Resolution Image Synthesis},
210
+ author={Jian Han and Jinlai Liu and Yi Jiang and Bin Yan and Yuqi Zhang and Zehuan Yuan and Bingyue Peng and Xiaobing Liu},
211
+ year={2024},
212
+ eprint={2412.04431},
213
+ archivePrefix={arXiv},
214
+ primaryClass={cs.CV},
215
+ url={https://arxiv.org/abs/2412.04431},
216
+ }
217
+ ```
VAR.code-workspace ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "folders": [
3
+ {
4
+ "path": "."
5
+ },
6
+ {
7
+ "path": "../ControlAR_old"
8
+ }
9
+ ]
10
+ }
a-next_token.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import random
3
+ from functools import partial
4
+ from typing import Optional, Tuple, Union
5
+ from itertools import chain
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ from torch.nn import functional as F
10
+
11
+ from models.basic_var import AdaLNBeforeHead, AdaLNSelfAttn
12
+ from models.helpers import gumbel_softmax_with_rng, sample_with_top_k_top_p_
13
+ from models.vqvae import VQVAE, VectorQuantizer2
14
+
15
+
16
+ patch_nums=(1, 2, 3)
17
+ L = sum(pn ** 2 for pn in patch_nums)
18
+ first_l = patch_nums[0] ** 2
19
+ num_stages_minus_1 = len(patch_nums) - 1
20
+
21
+
22
+ B=8
23
+ C=1024
24
+ Cvae=32
25
+ init_std = math.sqrt(1 / C / 3)
26
+ num_classes=1000
27
+ class_emb = nn.Embedding(num_classes + 1, C)
28
+
29
+ # input
30
+ vae_local = VQVAE()
31
+ quant: VectorQuantizer2 = vae_local.quantize
32
+ vae_proxy: Tuple[VQVAE] = (vae_local,)
33
+ vae_quant_proxy: Tuple[VectorQuantizer2] = (quant,)
34
+ word_embed = nn.Linear(Cvae, C)
35
+ uniform_prob = torch.full((1, num_classes), fill_value=1.0 / num_classes, dtype=torch.float32)
36
+
37
+ # Initialize random number generator
38
+ rng = torch.Generator()
39
+ rng.manual_seed(42) # Set a fixed seed for reproducibility
40
+
41
+ label_B = torch.multinomial(uniform_prob, num_samples=B, replacement=True, generator=rng).reshape(B)
42
+
43
+ sos = cond_BD = class_emb(torch.cat((label_B, torch.full_like(label_B, fill_value=num_classes)), dim=0))
44
+
45
+ # absolute position embedding
46
+ pos_1LC = []
47
+ for i, pn in enumerate(patch_nums):
48
+ pe = torch.empty(1, pn*pn, C)
49
+ nn.init.trunc_normal_(pe, mean=0, std=init_std)
50
+ pos_1LC.append(pe)
51
+ pos_1LC = torch.cat(pos_1LC, dim=1) # 1, L, C
52
+ assert tuple(pos_1LC.shape) == (1, L, C)
53
+ pos_1LC = nn.Parameter(pos_1LC)
54
+ # level embedding (similar to GPT's segment embedding, used to distinguish different levels of token pyramid)
55
+ lvl_embed = nn.Embedding(len(patch_nums), C)
56
+ nn.init.trunc_normal_(lvl_embed.weight.data, mean=0, std=init_std)
57
+
58
+ pos_start = nn.Parameter(torch.empty(1, first_l, C))
59
+
60
+ # attention mask
61
+ d: torch.Tensor = torch.cat([torch.full((pn*pn,), i) for i, pn in enumerate(patch_nums)]).view(1, L, 1)
62
+ dT = d.transpose(1, 2) # dT: 11L
63
+ lvl_1L = dT[:, 0].contiguous()
64
+ attn_bias_for_masking = torch.where(d >= dT, 0., -torch.inf).reshape(1, 1, L, L)
65
+
66
+
67
+ lvl_pos = lvl_embed(lvl_1L) + pos_1LC
68
+ next_token_map = sos.unsqueeze(1).expand(2 * B, first_l, -1) + pos_start.expand(2 * B, first_l, -1) + lvl_pos[:, :first_l]
69
+
70
+ cur_L = 0
71
+ f_hat = sos.new_zeros(B, Cvae, patch_nums[-1], patch_nums[-1])
72
+
73
+
74
+ for si, pn in enumerate(patch_nums): # si: i-th segment
75
+ print("si pn")
76
+ print(si, pn)
77
+ print()
78
+ ratio = si / num_stages_minus_1
79
+ # last_L = cur_L
80
+ print("cur_L")
81
+ cur_L += pn*pn
82
+ print(cur_L)
83
+ print()
84
+
85
+
86
+ h_BChw = torch.randn(B, L, C)
87
+ h_BChw = h_BChw.transpose_(1, 2).reshape(B, Cvae, pn, pn)
88
+ f_hat, next_token_map = vae_quant_proxy[0].get_next_autoregressive_input(si, len(patch_nums), f_hat, h_BChw)
89
+
90
+ if si != num_stages_minus_1: # prepare for next stage
91
+ next_token_map = next_token_map.view(B, Cvae, -1).transpose(1, 2)
92
+ print(next_token_map)
93
+ print()
94
+ next_token_map = word_embed(next_token_map) + lvl_pos[:, cur_L:cur_L + patch_nums[si+1] ** 2]
95
+ print(next_token_map)
96
+ print()
97
+ next_token_map = next_token_map.repeat(2, 1, 1) # double the batch sizes due to CFG
98
+ print(next_token_map)
99
+ print()
100
+
101
+ #print(f_hat)
demo_sample.ipynb ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "source": [
6
+ "### 🚀 For an interactive experience, head over to our [demo platform](https://var.vision/demo) and dive right in! 🌟"
7
+ ],
8
+ "metadata": {
9
+ "collapsed": false
10
+ }
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": null,
15
+ "outputs": [],
16
+ "source": [
17
+ "################## 1. Download checkpoints and build models\n",
18
+ "import os\n",
19
+ "import os.path as osp\n",
20
+ "import torch, torchvision\n",
21
+ "import random\n",
22
+ "import numpy as np\n",
23
+ "import PIL.Image as PImage, PIL.ImageDraw as PImageDraw\n",
24
+ "setattr(torch.nn.Linear, 'reset_parameters', lambda self: None) # disable default parameter init for faster speed\n",
25
+ "setattr(torch.nn.LayerNorm, 'reset_parameters', lambda self: None) # disable default parameter init for faster speed\n",
26
+ "from models import VQVAE, build_vae_var\n",
27
+ "\n",
28
+ "MODEL_DEPTH = 16 # TODO: =====> please specify MODEL_DEPTH <=====\n",
29
+ "assert MODEL_DEPTH in {16, 20, 24, 30}\n",
30
+ "\n",
31
+ "\n",
32
+ "# download checkpoint\n",
33
+ "hf_home = 'https://huggingface.co/FoundationVision/var/resolve/main'\n",
34
+ "vae_ckpt, var_ckpt = 'vae_ch160v4096z32.pth', f'var_d{MODEL_DEPTH}.pth'\n",
35
+ "if not osp.exists(vae_ckpt): os.system(f'wget {hf_home}/{vae_ckpt}')\n",
36
+ "if not osp.exists(var_ckpt): os.system(f'wget {hf_home}/{var_ckpt}')\n",
37
+ "\n",
38
+ "# build vae, var\n",
39
+ "patch_nums = (1, 2, 3, 4, 5, 6, 8, 10, 13, 16)\n",
40
+ "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
41
+ "if 'vae' not in globals() or 'var' not in globals():\n",
42
+ " vae, var = build_vae_var(\n",
43
+ " V=4096, Cvae=32, ch=160, share_quant_resi=4, # hard-coded VQVAE hyperparameters\n",
44
+ " device=device, patch_nums=patch_nums,\n",
45
+ " num_classes=1000, depth=MODEL_DEPTH, shared_aln=False,\n",
46
+ " )\n",
47
+ "\n",
48
+ "# load checkpoints\n",
49
+ "vae.load_state_dict(torch.load(vae_ckpt, map_location='cpu'), strict=True)\n",
50
+ "var.load_state_dict(torch.load(var_ckpt, map_location='cpu'), strict=True)\n",
51
+ "vae.eval(), var.eval()\n",
52
+ "for p in vae.parameters(): p.requires_grad_(False)\n",
53
+ "for p in var.parameters(): p.requires_grad_(False)\n",
54
+ "print(f'prepare finished.')"
55
+ ],
56
+ "metadata": {
57
+ "collapsed": false,
58
+ "is_executing": true
59
+ }
60
+ },
61
+ {
62
+ "cell_type": "code",
63
+ "execution_count": null,
64
+ "outputs": [],
65
+ "source": [
66
+ "############################# 2. Sample with classifier-free guidance\n",
67
+ "\n",
68
+ "# set args\n",
69
+ "seed = 0 #@param {type:\"number\"}\n",
70
+ "torch.manual_seed(seed)\n",
71
+ "num_sampling_steps = 250 #@param {type:\"slider\", min:0, max:1000, step:1}\n",
72
+ "cfg = 4 #@param {type:\"slider\", min:1, max:10, step:0.1}\n",
73
+ "class_labels = (980, 980, 437, 437, 22, 22, 562, 562) #@param {type:\"raw\"}\n",
74
+ "more_smooth = False # True for more smooth output\n",
75
+ "\n",
76
+ "# seed\n",
77
+ "torch.manual_seed(seed)\n",
78
+ "random.seed(seed)\n",
79
+ "np.random.seed(seed)\n",
80
+ "torch.backends.cudnn.deterministic = True\n",
81
+ "torch.backends.cudnn.benchmark = False\n",
82
+ "\n",
83
+ "# run faster\n",
84
+ "tf32 = True\n",
85
+ "torch.backends.cudnn.allow_tf32 = bool(tf32)\n",
86
+ "torch.backends.cuda.matmul.allow_tf32 = bool(tf32)\n",
87
+ "torch.set_float32_matmul_precision('high' if tf32 else 'highest')\n",
88
+ "\n",
89
+ "# sample\n",
90
+ "B = len(class_labels)\n",
91
+ "label_B: torch.LongTensor = torch.tensor(class_labels, device=device)\n",
92
+ "with torch.inference_mode():\n",
93
+ " with torch.autocast('cuda', enabled=True, dtype=torch.float16, cache_enabled=True): # using bfloat16 can be faster\n",
94
+ " recon_B3HW = var.autoregressive_infer_cfg(B=B, label_B=label_B, cfg=cfg, top_k=900, top_p=0.95, g_seed=seed, more_smooth=more_smooth)\n",
95
+ "\n",
96
+ "chw = torchvision.utils.make_grid(recon_B3HW, nrow=8, padding=0, pad_value=1.0)\n",
97
+ "chw = chw.permute(1, 2, 0).mul_(255).cpu().numpy()\n",
98
+ "chw = PImage.fromarray(chw.astype(np.uint8))\n",
99
+ "chw.show()\n"
100
+ ],
101
+ "metadata": {
102
+ "collapsed": false
103
+ }
104
+ }
105
+ ],
106
+ "metadata": {
107
+ "kernelspec": {
108
+ "display_name": "Python 3",
109
+ "language": "python",
110
+ "name": "python3"
111
+ },
112
+ "language_info": {
113
+ "codemirror_mode": {
114
+ "name": "ipython",
115
+ "version": 2
116
+ },
117
+ "file_extension": ".py",
118
+ "mimetype": "text/x-python",
119
+ "name": "python",
120
+ "nbconvert_exporter": "python",
121
+ "pygments_lexer": "ipython2",
122
+ "version": "2.7.6"
123
+ }
124
+ },
125
+ "nbformat": 4,
126
+ "nbformat_minor": 0
127
+ }
demo_zero_shot_edit.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
dist.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import functools
3
+ import os
4
+ import sys
5
+ from typing import List
6
+ from typing import Union
7
+
8
+ import torch
9
+ import torch.distributed as tdist
10
+ import torch.multiprocessing as mp
11
+
12
+ __rank, __local_rank, __world_size, __device = 0, 0, 1, 'cuda' if torch.cuda.is_available() else 'cpu'
13
+ __initialized = False
14
+
15
+
16
+ def initialized():
17
+ return __initialized
18
+
19
+
20
+ def initialize(fork=False, backend='nccl', gpu_id_if_not_distibuted=0, timeout=30):
21
+ global __device, __initialized
22
+ if torch.distributed.is_initialized():
23
+ print("[dist initialize] skipped because already initialized")
24
+ return
25
+ if not torch.cuda.is_available():
26
+ print(f'[dist initialize] cuda is not available, use cpu instead', file=sys.stderr)
27
+ return
28
+ elif 'RANK' not in os.environ:
29
+ torch.cuda.set_device(gpu_id_if_not_distibuted)
30
+ __device = torch.empty(1).cuda().device
31
+ print(f'[dist initialize] env variable "RANK" is not set, use {__device} as the device', file=sys.stderr)
32
+ return
33
+ # then 'RANK' must exist
34
+ global_rank, num_gpus = int(os.environ['RANK']), torch.cuda.device_count()
35
+ local_rank = global_rank % num_gpus
36
+ torch.cuda.set_device(local_rank)
37
+
38
+ # ref: https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/dist_utils.py#L29
39
+ if mp.get_start_method(allow_none=True) is None:
40
+ method = 'fork' if fork else 'spawn'
41
+ print(f'[dist initialize] mp method={method}')
42
+ mp.set_start_method(method)
43
+ tdist.init_process_group(backend=backend, timeout=datetime.timedelta(seconds=timeout*60))
44
+
45
+ global __rank, __local_rank, __world_size, __initialized
46
+ __local_rank = local_rank
47
+ __rank, __world_size = tdist.get_rank(), tdist.get_world_size()
48
+ __device = torch.empty(1).cuda().device
49
+ __initialized = True
50
+
51
+ assert tdist.is_initialized(), 'torch.distributed is not initialized!'
52
+ print(f'[lrk={get_local_rank()}, rk={get_rank()}]')
53
+
54
+
55
+ def get_rank():
56
+ return __rank
57
+
58
+
59
+ def get_local_rank():
60
+ return __local_rank
61
+
62
+
63
+ def get_world_size():
64
+ return __world_size
65
+
66
+
67
+ def get_device():
68
+ return __device
69
+
70
+
71
+ def set_gpu_id(gpu_id: int):
72
+ if gpu_id is None: return
73
+ global __device
74
+ if isinstance(gpu_id, (str, int)):
75
+ torch.cuda.set_device(int(gpu_id))
76
+ __device = torch.empty(1).cuda().device
77
+ else:
78
+ raise NotImplementedError
79
+
80
+
81
+ def is_master():
82
+ return __rank == 0
83
+
84
+
85
+ def is_local_master():
86
+ return __local_rank == 0
87
+
88
+
89
+ def new_group(ranks: List[int]):
90
+ if __initialized:
91
+ return tdist.new_group(ranks=ranks)
92
+ return None
93
+
94
+
95
+ def barrier():
96
+ if __initialized:
97
+ tdist.barrier()
98
+
99
+
100
+ def allreduce(t: torch.Tensor, async_op=False):
101
+ if __initialized:
102
+ if not t.is_cuda:
103
+ cu = t.detach().cuda()
104
+ ret = tdist.all_reduce(cu, async_op=async_op)
105
+ t.copy_(cu.cpu())
106
+ else:
107
+ ret = tdist.all_reduce(t, async_op=async_op)
108
+ return ret
109
+ return None
110
+
111
+
112
+ def allgather(t: torch.Tensor, cat=True) -> Union[List[torch.Tensor], torch.Tensor]:
113
+ if __initialized:
114
+ if not t.is_cuda:
115
+ t = t.cuda()
116
+ ls = [torch.empty_like(t) for _ in range(__world_size)]
117
+ tdist.all_gather(ls, t)
118
+ else:
119
+ ls = [t]
120
+ if cat:
121
+ ls = torch.cat(ls, dim=0)
122
+ return ls
123
+
124
+
125
+ def allgather_diff_shape(t: torch.Tensor, cat=True) -> Union[List[torch.Tensor], torch.Tensor]:
126
+ if __initialized:
127
+ if not t.is_cuda:
128
+ t = t.cuda()
129
+
130
+ t_size = torch.tensor(t.size(), device=t.device)
131
+ ls_size = [torch.empty_like(t_size) for _ in range(__world_size)]
132
+ tdist.all_gather(ls_size, t_size)
133
+
134
+ max_B = max(size[0].item() for size in ls_size)
135
+ pad = max_B - t_size[0].item()
136
+ if pad:
137
+ pad_size = (pad, *t.size()[1:])
138
+ t = torch.cat((t, t.new_empty(pad_size)), dim=0)
139
+
140
+ ls_padded = [torch.empty_like(t) for _ in range(__world_size)]
141
+ tdist.all_gather(ls_padded, t)
142
+ ls = []
143
+ for t, size in zip(ls_padded, ls_size):
144
+ ls.append(t[:size[0].item()])
145
+ else:
146
+ ls = [t]
147
+ if cat:
148
+ ls = torch.cat(ls, dim=0)
149
+ return ls
150
+
151
+
152
+ def broadcast(t: torch.Tensor, src_rank) -> None:
153
+ if __initialized:
154
+ if not t.is_cuda:
155
+ cu = t.detach().cuda()
156
+ tdist.broadcast(cu, src=src_rank)
157
+ t.copy_(cu.cpu())
158
+ else:
159
+ tdist.broadcast(t, src=src_rank)
160
+
161
+
162
+ def dist_fmt_vals(val: float, fmt: Union[str, None] = '%.2f') -> Union[torch.Tensor, List]:
163
+ if not initialized():
164
+ return torch.tensor([val]) if fmt is None else [fmt % val]
165
+
166
+ ts = torch.zeros(__world_size)
167
+ ts[__rank] = val
168
+ allreduce(ts)
169
+ if fmt is None:
170
+ return ts
171
+ return [fmt % v for v in ts.cpu().numpy().tolist()]
172
+
173
+
174
+ def master_only(func):
175
+ @functools.wraps(func)
176
+ def wrapper(*args, **kwargs):
177
+ force = kwargs.pop('force', False)
178
+ if force or is_master():
179
+ ret = func(*args, **kwargs)
180
+ else:
181
+ ret = None
182
+ barrier()
183
+ return ret
184
+ return wrapper
185
+
186
+
187
+ def local_master_only(func):
188
+ @functools.wraps(func)
189
+ def wrapper(*args, **kwargs):
190
+ force = kwargs.pop('force', False)
191
+ if force or is_local_master():
192
+ ret = func(*args, **kwargs)
193
+ else:
194
+ ret = None
195
+ barrier()
196
+ return ret
197
+ return wrapper
198
+
199
+
200
+ def for_visualize(func):
201
+ @functools.wraps(func)
202
+ def wrapper(*args, **kwargs):
203
+ if is_master():
204
+ # with torch.no_grad():
205
+ ret = func(*args, **kwargs)
206
+ else:
207
+ ret = None
208
+ return ret
209
+ return wrapper
210
+
211
+
212
+ def finalize():
213
+ if __initialized:
214
+ tdist.destroy_process_group()
local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep10adamlr8e-05wd0.05/events.out.tfevents.1749554045.slz-gpu2-0.240780.0__0610_1914 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fda15414c85da2af772c4da3042a7983d74af607c005b80143ca157b38cf1e7a
3
+ size 88
local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep10adamlr8e-05wd0.05/events.out.tfevents.1749554096.slz-gpu2-0.241049.0__0610_1914 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d8c5b7e379db6037ff1815ac136c5d11c6cf98ee09e3ea2c31f19d4f6d31706
3
+ size 88
local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep130adamlr8e-05wd0.05/events.out.tfevents.1750037349.slz-gpu2-0.1463.0__0616_0929 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d951137e0aca9798454c857db0b4f2443c778ddd04ed47b6b297a3f9065ff0ee
3
+ size 88
local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep130adamlr8e-05wd0.05/events.out.tfevents.1750038434.slz-gpu2-0.5620.0__0616_0947 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:015208b488ebcf395d58c64ca44fce4e02b1c99429a777f9ea5575251d9fa8cf
3
+ size 4888
local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep35adamlr8e-05wd0.05/events.out.tfevents.1749719042.slz-gpu2-0.60611.0__0612_1704 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56953e2938c03b4aa430efcf21cfa1fdf298d35955fa971d4e23515c38704875
3
+ size 18210
local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep45adamlr8e-05wd0.05/events.out.tfevents.1749730258.slz-gpu2-0.102283.0__0612_2010 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5df427ef96c08b76adbed6f62f78fc42cf0aca6a8419dbc0522dcc85cc85ff6
3
+ size 16354
local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep50adamlr8e-05wd0.05/events.out.tfevents.1750040923.slz-gpu2-0.20209.0__0616_1028 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3f3be869cd32ea4a9587b3726c12f66bf0ee4e62134d8562f7ecd16b50c4dd5
3
+ size 50278
local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep81adamlr8e-05wd0.05/events.out.tfevents.1750040357.slz-gpu2-0.18949.0__0616_1019 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad2a9a51afe59a91af5e349ea64f1a6abb83de067d400b62a9177f340488f8a8
3
+ size 88
local_output/tb-VARd12__pn1_2_3_4_5_6_8_10_13_16__b64ep83adamlr8e-05wd0.05/events.out.tfevents.1750040814.slz-gpu2-0.19484.0__0616_1026 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97deeca05116167bf98fa46dbe40b19e4ae8fb305a96ec2f46847eb75f1863f6
3
+ size 88
metric/FID_score.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Calculates the Frechet Inception Distance (FID) to evalulate GANs
2
+
3
+ The FID metric calculates the distance between two distributions of images.
4
+ Typically, we have summary statistics (mean & covariance matrix) of one
5
+ of these distributions, while the 2nd distribution is given by a GAN.
6
+
7
+ When run as a stand-alone program, it compares the distribution of
8
+ images that are stored as PNG/JPEG at a specified location with a
9
+ distribution given by summary statistics (in pickle format).
10
+
11
+ The FID is calculated by assuming that X_1 and X_2 are the activations of
12
+ the pool_3 layer of the inception net for generated samples and real world
13
+ samples respectively.
14
+
15
+ See --help to see further details.
16
+
17
+ Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
18
+ of Tensorflow
19
+
20
+ Copyright 2018 Institute of Bioinformatics, JKU Linz
21
+
22
+ Licensed under the Apache License, Version 2.0 (the "License");
23
+ you may not use this file except in compliance with the License.
24
+ You may obtain a copy of the License at
25
+
26
+ http://www.apache.org/licenses/LICENSE-2.0
27
+
28
+ Unless required by applicable law or agreed to in writing, software
29
+ distributed under the License is distributed on an "AS IS" BASIS,
30
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
31
+ See the License for the specific language governing permissions and
32
+ limitations under the License.
33
+ """
34
+
35
+ import os
36
+ import pathlib
37
+ from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
38
+
39
+ import numpy as np
40
+ import torch
41
+ import torchvision.transforms as TF
42
+ from PIL import Image
43
+ from scipy import linalg
44
+ from torch.nn.functional import adaptive_avg_pool2d
45
+
46
+ try:
47
+ from tqdm import tqdm
48
+ except ImportError:
49
+ # If tqdm is not available, provide a mock version of it
50
+ def tqdm(x):
51
+ return x
52
+
53
+
54
+ from metric.inception import InceptionV3
55
+
56
+ parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
57
+ parser.add_argument("--batch-size", type=int, default=50, help="Batch size to use")
58
+ parser.add_argument(
59
+ "--num-workers",
60
+ type=int,
61
+ help=(
62
+ "Number of processes to use for data loading. " "Defaults to `min(8, num_cpus)`"
63
+ ),
64
+ )
65
+ parser.add_argument(
66
+ "--device", type=str, default=None, help="Device to use. Like cuda, cuda:0 or cpu"
67
+ )
68
+ parser.add_argument(
69
+ "--dims",
70
+ type=int,
71
+ default=2048,
72
+ choices=list(InceptionV3.BLOCK_INDEX_BY_DIM),
73
+ help=(
74
+ "Dimensionality of Inception features to use. "
75
+ "By default, uses pool3 features"
76
+ ),
77
+ )
78
+ parser.add_argument(
79
+ "--save-stats",
80
+ action="store_true",
81
+ help=(
82
+ "Generate an npz archive from a directory of "
83
+ "samples. The first path is used as input and the "
84
+ "second as output."
85
+ ),
86
+ )
87
+ parser.add_argument(
88
+ "path",
89
+ type=str,
90
+ nargs=2,
91
+ help=("Paths to the generated images or " "to .npz statistic files"),
92
+ )
93
+
94
+ IMAGE_EXTENSIONS = {"bmp", "jpg", "jpeg", "pgm", "png", "ppm", "tif", "tiff", "webp"}
95
+
96
+
97
+ class ImagePathDataset(torch.utils.data.Dataset):
98
+ def __init__(self, files, transforms=None):
99
+ self.files = files
100
+ self.transforms = transforms
101
+
102
+ def __len__(self):
103
+ return len(self.files)
104
+
105
+ def __getitem__(self, i):
106
+ path = self.files[i]
107
+ img = Image.open(path).convert("RGB")
108
+ if self.transforms is not None:
109
+ img = self.transforms(img)
110
+ return img
111
+
112
+
113
+ def get_activations(
114
+ files, model, batch_size=50, dims=2048, device="cpu", num_workers=1
115
+ ):
116
+ """Calculates the activations of the pool_3 layer for all images.
117
+
118
+ Params:
119
+ -- files : List of image files paths
120
+ -- model : Instance of inception model
121
+ -- batch_size : Batch size of images for the model to process at once.
122
+ Make sure that the number of samples is a multiple of
123
+ the batch size, otherwise some samples are ignored. This
124
+ behavior is retained to match the original FID score
125
+ implementation.
126
+ -- dims : Dimensionality of features returned by Inception
127
+ -- device : Device to run calculations
128
+ -- num_workers : Number of parallel dataloader workers
129
+
130
+ Returns:
131
+ -- A numpy array of dimension (num images, dims) that contains the
132
+ activations of the given tensor when feeding inception with the
133
+ query tensor.
134
+ """
135
+ model.eval()
136
+
137
+ if batch_size > len(files):
138
+ print(
139
+ (
140
+ "Warning: batch size is bigger than the data size. "
141
+ "Setting batch size to data size"
142
+ )
143
+ )
144
+ batch_size = len(files)
145
+
146
+ dataset = ImagePathDataset(files, transforms=TF.ToTensor())
147
+ dataloader = torch.utils.data.DataLoader(
148
+ dataset,
149
+ batch_size=batch_size,
150
+ shuffle=False,
151
+ drop_last=False,
152
+ num_workers=num_workers,
153
+ )
154
+
155
+ pred_arr = np.empty((len(files), dims))
156
+
157
+ start_idx = 0
158
+
159
+ for batch in tqdm(dataloader):
160
+ batch = batch.to(device)
161
+
162
+ with torch.no_grad():
163
+ pred = model(batch)[0]
164
+
165
+ # If model output is not scalar, apply global spatial average pooling.
166
+ # This happens if you choose a dimensionality not equal 2048.
167
+ if pred.size(2) != 1 or pred.size(3) != 1:
168
+ pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
169
+
170
+ pred = pred.squeeze(3).squeeze(2).cpu().numpy()
171
+
172
+ pred_arr[start_idx : start_idx + pred.shape[0]] = pred
173
+
174
+ start_idx = start_idx + pred.shape[0]
175
+
176
+ return pred_arr
177
+
178
+
179
+ def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
180
+ """Numpy implementation of the Frechet Distance.
181
+ The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
182
+ and X_2 ~ N(mu_2, C_2) is
183
+ d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
184
+
185
+ Stable version by Dougal J. Sutherland.
186
+
187
+ Params:
188
+ -- mu1 : Numpy array containing the activations of a layer of the
189
+ inception net (like returned by the function 'get_predictions')
190
+ for generated samples.
191
+ -- mu2 : The sample mean over activations, precalculated on an
192
+ representative data set.
193
+ -- sigma1: The covariance matrix over activations for generated samples.
194
+ -- sigma2: The covariance matrix over activations, precalculated on an
195
+ representative data set.
196
+
197
+ Returns:
198
+ -- : The Frechet Distance.
199
+ """
200
+
201
+ mu1 = np.atleast_1d(mu1)
202
+ mu2 = np.atleast_1d(mu2)
203
+
204
+ sigma1 = np.atleast_2d(sigma1)
205
+ sigma2 = np.atleast_2d(sigma2)
206
+
207
+ assert (
208
+ mu1.shape == mu2.shape
209
+ ), "Training and test mean vectors have different lengths"
210
+ assert (
211
+ sigma1.shape == sigma2.shape
212
+ ), "Training and test covariances have different dimensions"
213
+
214
+ diff = mu1 - mu2
215
+
216
+ # Product might be almost singular
217
+ covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
218
+ if not np.isfinite(covmean).all():
219
+ msg = (
220
+ "fid calculation produces singular product; "
221
+ "adding %s to diagonal of cov estimates"
222
+ ) % eps
223
+ print(msg)
224
+ offset = np.eye(sigma1.shape[0]) * eps
225
+ covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
226
+
227
+ # Numerical error might give slight imaginary component
228
+ if np.iscomplexobj(covmean):
229
+ if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
230
+ m = np.max(np.abs(covmean.imag))
231
+ raise ValueError("Imaginary component {}".format(m))
232
+ covmean = covmean.real
233
+
234
+ tr_covmean = np.trace(covmean)
235
+
236
+ return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
237
+
238
+
239
+ def calculate_activation_statistics(
240
+ files, model, batch_size=50, dims=2048, device="cpu", num_workers=1
241
+ ):
242
+ """Calculation of the statistics used by the FID.
243
+ Params:
244
+ -- files : List of image files paths
245
+ -- model : Instance of inception model
246
+ -- batch_size : The images numpy array is split into batches with
247
+ batch size batch_size. A reasonable batch size
248
+ depends on the hardware.
249
+ -- dims : Dimensionality of features returned by Inception
250
+ -- device : Device to run calculations
251
+ -- num_workers : Number of parallel dataloader workers
252
+
253
+ Returns:
254
+ -- mu : The mean over samples of the activations of the pool_3 layer of
255
+ the inception model.
256
+ -- sigma : The covariance matrix of the activations of the pool_3 layer of
257
+ the inception model.
258
+ """
259
+ act = get_activations(files, model, batch_size, dims, device, num_workers)
260
+ mu = np.mean(act, axis=0)
261
+ sigma = np.cov(act, rowvar=False)
262
+ return mu, sigma
263
+
264
+
265
+ def compute_statistics_of_path(path, model, batch_size, dims, device, num_workers=1):
266
+ if path.endswith(".npz"):
267
+ with np.load(path) as f:
268
+ m, s = f["mu"][:], f["sigma"][:]
269
+ else:
270
+ path = pathlib.Path(path)
271
+ files = sorted(
272
+ [file for ext in IMAGE_EXTENSIONS for file in path.glob("*.{}".format(ext))]
273
+ )
274
+ m, s = calculate_activation_statistics(
275
+ files, model, batch_size, dims, device, num_workers
276
+ )
277
+
278
+ return m, s
279
+
280
+
281
+ def calculate_fid_given_paths(paths, batch_size, device, dims, num_workers=1):
282
+ """Calculates the FID of two paths"""
283
+ for p in paths:
284
+ if not os.path.exists(p):
285
+ raise RuntimeError("Invalid path: %s" % p)
286
+
287
+ block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
288
+
289
+ model = InceptionV3([block_idx]).to(device)
290
+
291
+ m1, s1 = compute_statistics_of_path(
292
+ paths[0], model, batch_size, dims, device, num_workers
293
+ )
294
+ m2, s2 = compute_statistics_of_path(
295
+ paths[1], model, batch_size, dims, device, num_workers
296
+ )
297
+ fid_value = calculate_frechet_distance(m1, s1, m2, s2)
298
+
299
+ return fid_value
300
+
301
+
302
+ def save_fid_stats(paths, batch_size, device, dims, num_workers=1):
303
+ """Saves FID statistics of one path"""
304
+ if not os.path.exists(paths[0]):
305
+ raise RuntimeError("Invalid path: %s" % paths[0])
306
+
307
+ if os.path.exists(paths[1]):
308
+ raise RuntimeError("Existing output file: %s" % paths[1])
309
+
310
+ block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
311
+
312
+ model = InceptionV3([block_idx]).to(device)
313
+
314
+ print(f"Saving statistics for {paths[0]}")
315
+
316
+ m1, s1 = compute_statistics_of_path(
317
+ paths[0], model, batch_size, dims, device, num_workers
318
+ )
319
+
320
+ np.savez_compressed(paths[1], mu=m1, sigma=s1)
321
+
322
+
323
+ def main():
324
+ args = parser.parse_args()
325
+
326
+ if args.device is None:
327
+ device = torch.device("cuda" if (torch.cuda.is_available()) else "cpu")
328
+ else:
329
+ device = torch.device(args.device)
330
+
331
+ if args.num_workers is None:
332
+ try:
333
+ num_cpus = len(os.sched_getaffinity(0))
334
+ except AttributeError:
335
+ # os.sched_getaffinity is not available under Windows, use
336
+ # os.cpu_count instead (which may not return the *available* number
337
+ # of CPUs).
338
+ num_cpus = os.cpu_count()
339
+
340
+ num_workers = min(num_cpus, 8) if num_cpus is not None else 0
341
+ else:
342
+ num_workers = args.num_workers
343
+
344
+ if args.save_stats:
345
+ save_fid_stats(args.path, args.batch_size, device, args.dims, num_workers)
346
+ return
347
+
348
+ fid_value = calculate_fid_given_paths(
349
+ args.path, args.batch_size, device, args.dims, num_workers
350
+ )
351
+ print("FID: ", fid_value)
352
+
353
+
354
+ if __name__ == "__main__":
355
+ main()
metric/clip_score.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from torch import autocast, nn
3
+ import clip
4
+ import torch
5
+ from PIL import Image
6
+ import numpy as np
7
+
8
+
9
+ def clip_iterator(img_root,cap_root, batch_size):
10
+ images = []
11
+ captions = []
12
+ img_files=os.listdir(img_root)
13
+ for item in img_files:
14
+ images.append(Image.open(os.path.join(img_root,item)))
15
+ cap=open(os.path.join(cap_root,item.replace('.png','.txt')),'r').readline().strip()
16
+ captions.append(cap)
17
+
18
+ if len(captions) == batch_size:
19
+ yield images, captions
20
+ # 清空列表以准备下一个批次
21
+ images = []
22
+ captions = []
23
+ # 处理剩余的items,如果有的话
24
+ if images:
25
+ yield images, captions
26
+
27
+
28
+
29
+ def get_clip_score_batched(clip_model, image_features, prompts,device):
30
+
31
+ tokens = clip.tokenize(prompts, truncate=True).to(device)
32
+
33
+ with torch.no_grad():
34
+ if len(image_features) != len(prompts):
35
+ assert len(image_features) % len(prompts) == 0
36
+ tokens = (
37
+ tokens.unsqueeze(1)
38
+ .expand(-1, 1, -1)
39
+ .reshape(-1, tokens.shape[-1])
40
+ )
41
+
42
+ text_features = clip_model.encode_text(tokens)
43
+ image_features = image_features / image_features.norm(dim=1, keepdim=True)
44
+ text_features = text_features / text_features.norm(dim=1, keepdim=True)
45
+ logit = image_features @ text_features.t()
46
+ scores = logit.diag().tolist()
47
+ return scores
48
+
49
+
50
+ def get_clip_features(clip_model, clip_preprocess, pil_image):
51
+
52
+ images = [clip_preprocess(i) for i in pil_image]
53
+ image = torch.stack(images)
54
+
55
+ image = image.to(device)
56
+ with torch.no_grad():
57
+ image_features = clip_model.encode_image(image)
58
+ return image_features
59
+
60
+
61
+
62
+ if __name__ == '__main__':
63
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
64
+ img_root=str(input('input image path:'));print('img root=',img_root)
65
+ cap_root=str(input('input cap path:'));print('cap root=',cap_root)
66
+ clip_model, clip_preprocess = clip.load("ViT-L/14", device=device, jit=False)
67
+ # clip_model, clip_preprocess = clip.load("ViT-B/32", device=device, jit=False)
68
+
69
+ all_scores=[]
70
+ for images,prompts in clip_iterator(img_root=img_root,cap_root=cap_root,batch_size=16):
71
+ image_features = get_clip_features(clip_model, clip_preprocess, images)
72
+ clip_scores = get_clip_score_batched(clip_model ,image_features, prompts,device)
73
+ all_scores=all_scores+clip_scores
74
+ print(clip_scores)
75
+ final=np.mean(np.array(all_scores))
76
+ print('length=',len(all_scores),' final score=',final)
metric/eval_is.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import torch
4
+ from torchvision import models, transforms
5
+ from torch.nn.functional import softmax
6
+ from PIL import Image
7
+ import argparse
8
+
9
+ def load_inception_model():
10
+ model = models.inception_v3(pretrained=False)
11
+ local_inception_path='/home/disk2/nfs/maxiaoxiao/ckpts/inceptionv3_torch/inception_v3_google-0cc3c7bd.pth'
12
+ state_dict=torch.load(local_inception_path)
13
+ model.load_state_dict(state_dict)
14
+ model.eval()
15
+ return model
16
+
17
+ def calculate_inception_score(images, model, splits=10):
18
+ preprocess = transforms.Compose([
19
+ transforms.Resize(299),
20
+ transforms.CenterCrop(299),
21
+ transforms.ToTensor(),
22
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
23
+ ])
24
+
25
+ all_probs = []
26
+ for img in images:
27
+ img_tensor = preprocess(img).unsqueeze(0)
28
+ with torch.no_grad():
29
+ preds = model(img_tensor)
30
+ probs = softmax(preds, dim=1).cpu().numpy()
31
+ all_probs.append(probs)
32
+
33
+ all_probs = np.vstack(all_probs)
34
+
35
+ scores = []
36
+ for i in range(splits):
37
+ part = all_probs[i * (len(all_probs) // splits): (i + 1) * (len(all_probs) // splits)]
38
+ kl_div = np.mean(np.sum(part * (np.log(part) - np.log(np.mean(all_probs, axis=0))), axis=1))
39
+ scores.append(np.exp(kl_div))
40
+
41
+ return np.mean(scores), np.std(scores)
42
+
43
+ def load_images_from_folders(prediction_folder, reference_folder):
44
+ images = []
45
+ for filename in os.listdir(prediction_folder):
46
+ pred_path = os.path.join(prediction_folder, filename)
47
+ ref_path = os.path.join(reference_folder, filename)
48
+
49
+ if os.path.isfile(pred_path) and os.path.isfile(ref_path):
50
+ pred_image = Image.open(pred_path).convert('RGB')
51
+ ref_image = Image.open(ref_path).convert('RGB')
52
+ images.append(pred_image) # 你可以选择只用预测图像或两者都用
53
+ images.append(ref_image) # 如果你希望一起计算
54
+
55
+ return images
56
+
57
+ # 使用argparse
58
+ if __name__ == "__main__":
59
+ parser = argparse.ArgumentParser(description='Calculate Inception Score for images.')
60
+ parser.add_argument('--prediction_folder', type=str, required=True, help='Path to the prediction_total folder.')
61
+ parser.add_argument('--reference_folder', type=str, required=True, help='Path to the reference_total folder.')
62
+
63
+ args = parser.parse_args()
64
+
65
+ model = load_inception_model()
66
+
67
+ images = load_images_from_folders(args.prediction_folder, args.reference_folder)
68
+ is_mean, is_std = calculate_inception_score(images, model)
69
+ print(f"Inception Score: {is_mean:.2f} ± {is_std:.2f}")
metric/inception.py ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import torchvision
5
+
6
+ try:
7
+ from torchvision.models.utils import load_state_dict_from_url
8
+ except ImportError:
9
+ from torch.utils.model_zoo import load_url as load_state_dict_from_url
10
+
11
+ # Inception weights ported to Pytorch from
12
+ # http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
13
+ FID_WEIGHTS_URL = "https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth" # noqa: E501
14
+
15
+
16
+ class InceptionV3(nn.Module):
17
+ """Pretrained InceptionV3 network returning feature maps"""
18
+
19
+ # Index of default block of inception to return,
20
+ # corresponds to output of final average pooling
21
+ DEFAULT_BLOCK_INDEX = 3
22
+
23
+ # Maps feature dimensionality to their output blocks indices
24
+ BLOCK_INDEX_BY_DIM = {
25
+ 64: 0, # First max pooling features
26
+ 192: 1, # Second max pooling featurs
27
+ 768: 2, # Pre-aux classifier features
28
+ 2048: 3, # Final average pooling features
29
+ }
30
+
31
+ def __init__(
32
+ self,
33
+ output_blocks=(DEFAULT_BLOCK_INDEX,),
34
+ resize_input=True,
35
+ normalize_input=True,
36
+ requires_grad=False,
37
+ use_fid_inception=True,
38
+ ):
39
+ """Build pretrained InceptionV3
40
+
41
+ Parameters
42
+ ----------
43
+ output_blocks : list of int
44
+ Indices of blocks to return features of. Possible values are:
45
+ - 0: corresponds to output of first max pooling
46
+ - 1: corresponds to output of second max pooling
47
+ - 2: corresponds to output which is fed to aux classifier
48
+ - 3: corresponds to output of final average pooling
49
+ resize_input : bool
50
+ If true, bilinearly resizes input to width and height 299 before
51
+ feeding input to model. As the network without fully connected
52
+ layers is fully convolutional, it should be able to handle inputs
53
+ of arbitrary size, so resizing might not be strictly needed
54
+ normalize_input : bool
55
+ If true, scales the input from range (0, 1) to the range the
56
+ pretrained Inception network expects, namely (-1, 1)
57
+ requires_grad : bool
58
+ If true, parameters of the model require gradients. Possibly useful
59
+ for finetuning the network
60
+ use_fid_inception : bool
61
+ If true, uses the pretrained Inception model used in Tensorflow's
62
+ FID implementation. If false, uses the pretrained Inception model
63
+ available in torchvision. The FID Inception model has different
64
+ weights and a slightly different structure from torchvision's
65
+ Inception model. If you want to compute FID scores, you are
66
+ strongly advised to set this parameter to true to get comparable
67
+ results.
68
+ """
69
+ super(InceptionV3, self).__init__()
70
+
71
+ self.resize_input = resize_input
72
+ self.normalize_input = normalize_input
73
+ self.output_blocks = sorted(output_blocks)
74
+ self.last_needed_block = max(output_blocks)
75
+
76
+ assert self.last_needed_block <= 3, "Last possible output block index is 3"
77
+
78
+ self.blocks = nn.ModuleList()
79
+
80
+ if use_fid_inception:
81
+ inception = fid_inception_v3()
82
+ else:
83
+ inception = _inception_v3(weights="DEFAULT")
84
+
85
+ # Block 0: input to maxpool1
86
+ block0 = [
87
+ inception.Conv2d_1a_3x3,
88
+ inception.Conv2d_2a_3x3,
89
+ inception.Conv2d_2b_3x3,
90
+ nn.MaxPool2d(kernel_size=3, stride=2),
91
+ ]
92
+ self.blocks.append(nn.Sequential(*block0))
93
+
94
+ # Block 1: maxpool1 to maxpool2
95
+ if self.last_needed_block >= 1:
96
+ block1 = [
97
+ inception.Conv2d_3b_1x1,
98
+ inception.Conv2d_4a_3x3,
99
+ nn.MaxPool2d(kernel_size=3, stride=2),
100
+ ]
101
+ self.blocks.append(nn.Sequential(*block1))
102
+
103
+ # Block 2: maxpool2 to aux classifier
104
+ if self.last_needed_block >= 2:
105
+ block2 = [
106
+ inception.Mixed_5b,
107
+ inception.Mixed_5c,
108
+ inception.Mixed_5d,
109
+ inception.Mixed_6a,
110
+ inception.Mixed_6b,
111
+ inception.Mixed_6c,
112
+ inception.Mixed_6d,
113
+ inception.Mixed_6e,
114
+ ]
115
+ self.blocks.append(nn.Sequential(*block2))
116
+
117
+ # Block 3: aux classifier to final avgpool
118
+ if self.last_needed_block >= 3:
119
+ block3 = [
120
+ inception.Mixed_7a,
121
+ inception.Mixed_7b,
122
+ inception.Mixed_7c,
123
+ nn.AdaptiveAvgPool2d(output_size=(1, 1)),
124
+ ]
125
+ self.blocks.append(nn.Sequential(*block3))
126
+
127
+ for param in self.parameters():
128
+ param.requires_grad = requires_grad
129
+
130
+ def forward(self, inp):
131
+ """Get Inception feature maps
132
+
133
+ Parameters
134
+ ----------
135
+ inp : torch.autograd.Variable
136
+ Input tensor of shape Bx3xHxW. Values are expected to be in
137
+ range (0, 1)
138
+
139
+ Returns
140
+ -------
141
+ List of torch.autograd.Variable, corresponding to the selected output
142
+ block, sorted ascending by index
143
+ """
144
+ outp = []
145
+ x = inp
146
+
147
+ if self.resize_input:
148
+ x = F.interpolate(x, size=(299, 299), mode="bilinear", align_corners=False)
149
+
150
+ if self.normalize_input:
151
+ x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
152
+
153
+ for idx, block in enumerate(self.blocks):
154
+ x = block(x)
155
+ if idx in self.output_blocks:
156
+ outp.append(x)
157
+
158
+ if idx == self.last_needed_block:
159
+ break
160
+
161
+ return outp
162
+
163
+
164
+ def _inception_v3(*args, **kwargs):
165
+ """Wraps `torchvision.models.inception_v3`"""
166
+ try:
167
+ version = tuple(map(int, torchvision.__version__.split(".")[:2]))
168
+ except ValueError:
169
+ # Just a caution against weird version strings
170
+ version = (0,)
171
+
172
+ # Skips default weight inititialization if supported by torchvision
173
+ # version. See https://github.com/mseitzer/pytorch-fid/issues/28.
174
+ if version >= (0, 6):
175
+ kwargs["init_weights"] = False
176
+
177
+ # Backwards compatibility: `weights` argument was handled by `pretrained`
178
+ # argument prior to version 0.13.
179
+ if version < (0, 13) and "weights" in kwargs:
180
+ if kwargs["weights"] == "DEFAULT":
181
+ kwargs["pretrained"] = True
182
+ elif kwargs["weights"] is None:
183
+ kwargs["pretrained"] = False
184
+ else:
185
+ raise ValueError(
186
+ "weights=={} not supported in torchvision {}".format(
187
+ kwargs["weights"], torchvision.__version__
188
+ )
189
+ )
190
+ del kwargs["weights"]
191
+
192
+ return torchvision.models.inception_v3(*args, **kwargs)
193
+
194
+
195
+ def fid_inception_v3():
196
+ """Build pretrained Inception model for FID computation
197
+
198
+ The Inception model for FID computation uses a different set of weights
199
+ and has a slightly different structure than torchvision's Inception.
200
+
201
+ This method first constructs torchvision's Inception and then patches the
202
+ necessary parts that are different in the FID Inception model.
203
+ """
204
+ inception = _inception_v3(num_classes=1008, aux_logits=False, weights=None)
205
+ inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
206
+ inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
207
+ inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
208
+ inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
209
+ inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
210
+ inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
211
+ inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
212
+ inception.Mixed_7b = FIDInceptionE_1(1280)
213
+ inception.Mixed_7c = FIDInceptionE_2(2048)
214
+
215
+ state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
216
+ inception.load_state_dict(state_dict)
217
+ return inception
218
+
219
+
220
+ class FIDInceptionA(torchvision.models.inception.InceptionA):
221
+ """InceptionA block patched for FID computation"""
222
+
223
+ def __init__(self, in_channels, pool_features):
224
+ super(FIDInceptionA, self).__init__(in_channels, pool_features)
225
+
226
+ def forward(self, x):
227
+ branch1x1 = self.branch1x1(x)
228
+
229
+ branch5x5 = self.branch5x5_1(x)
230
+ branch5x5 = self.branch5x5_2(branch5x5)
231
+
232
+ branch3x3dbl = self.branch3x3dbl_1(x)
233
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
234
+ branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
235
+
236
+ # Patch: Tensorflow's average pool does not use the padded zero's in
237
+ # its average calculation
238
+ branch_pool = F.avg_pool2d(
239
+ x, kernel_size=3, stride=1, padding=1, count_include_pad=False
240
+ )
241
+ branch_pool = self.branch_pool(branch_pool)
242
+
243
+ outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
244
+ return torch.cat(outputs, 1)
245
+
246
+
247
+ class FIDInceptionC(torchvision.models.inception.InceptionC):
248
+ """InceptionC block patched for FID computation"""
249
+
250
+ def __init__(self, in_channels, channels_7x7):
251
+ super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
252
+
253
+ def forward(self, x):
254
+ branch1x1 = self.branch1x1(x)
255
+
256
+ branch7x7 = self.branch7x7_1(x)
257
+ branch7x7 = self.branch7x7_2(branch7x7)
258
+ branch7x7 = self.branch7x7_3(branch7x7)
259
+
260
+ branch7x7dbl = self.branch7x7dbl_1(x)
261
+ branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
262
+ branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
263
+ branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
264
+ branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
265
+
266
+ # Patch: Tensorflow's average pool does not use the padded zero's in
267
+ # its average calculation
268
+ branch_pool = F.avg_pool2d(
269
+ x, kernel_size=3, stride=1, padding=1, count_include_pad=False
270
+ )
271
+ branch_pool = self.branch_pool(branch_pool)
272
+
273
+ outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
274
+ return torch.cat(outputs, 1)
275
+
276
+
277
+ class FIDInceptionE_1(torchvision.models.inception.InceptionE):
278
+ """First InceptionE block patched for FID computation"""
279
+
280
+ def __init__(self, in_channels):
281
+ super(FIDInceptionE_1, self).__init__(in_channels)
282
+
283
+ def forward(self, x):
284
+ branch1x1 = self.branch1x1(x)
285
+
286
+ branch3x3 = self.branch3x3_1(x)
287
+ branch3x3 = [
288
+ self.branch3x3_2a(branch3x3),
289
+ self.branch3x3_2b(branch3x3),
290
+ ]
291
+ branch3x3 = torch.cat(branch3x3, 1)
292
+
293
+ branch3x3dbl = self.branch3x3dbl_1(x)
294
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
295
+ branch3x3dbl = [
296
+ self.branch3x3dbl_3a(branch3x3dbl),
297
+ self.branch3x3dbl_3b(branch3x3dbl),
298
+ ]
299
+ branch3x3dbl = torch.cat(branch3x3dbl, 1)
300
+
301
+ # Patch: Tensorflow's average pool does not use the padded zero's in
302
+ # its average calculation
303
+ branch_pool = F.avg_pool2d(
304
+ x, kernel_size=3, stride=1, padding=1, count_include_pad=False
305
+ )
306
+ branch_pool = self.branch_pool(branch_pool)
307
+
308
+ outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
309
+ return torch.cat(outputs, 1)
310
+
311
+
312
+ class FIDInceptionE_2(torchvision.models.inception.InceptionE):
313
+ """Second InceptionE block patched for FID computation"""
314
+
315
+ def __init__(self, in_channels):
316
+ super(FIDInceptionE_2, self).__init__(in_channels)
317
+
318
+ def forward(self, x):
319
+ branch1x1 = self.branch1x1(x)
320
+
321
+ branch3x3 = self.branch3x3_1(x)
322
+ branch3x3 = [
323
+ self.branch3x3_2a(branch3x3),
324
+ self.branch3x3_2b(branch3x3),
325
+ ]
326
+ branch3x3 = torch.cat(branch3x3, 1)
327
+
328
+ branch3x3dbl = self.branch3x3dbl_1(x)
329
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
330
+ branch3x3dbl = [
331
+ self.branch3x3dbl_3a(branch3x3dbl),
332
+ self.branch3x3dbl_3b(branch3x3dbl),
333
+ ]
334
+ branch3x3dbl = torch.cat(branch3x3dbl, 1)
335
+
336
+ # Patch: The FID Inception model uses max pooling instead of average
337
+ # pooling. This is likely an error in this specific Inception
338
+ # implementation, as other Inception models use average pooling here
339
+ # (which matches the description in the paper).
340
+ branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
341
+ branch_pool = self.branch_pool(branch_pool)
342
+
343
+ outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
344
+ return torch.cat(outputs, 1)
models/__init__.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+ import torch.nn as nn
3
+
4
+ from .quant import VectorQuantizer2
5
+ from .var import VAR
6
+ from .vqvae import VQVAE
7
+
8
+
9
+ def build_vae_var(
10
+ # Shared args
11
+ # device, patch_nums=(1, 2, 3, 4, 5, 6, 8, 10, 13, 16),
12
+ device, patch_nums=(1, 2, 3, 4, 5, 6, 8, 10, 13, 16), # 10 steps by default
13
+ # VQVAE args
14
+ V=4096, Cvae=32, ch=160, share_quant_resi=4,
15
+ # VAR args
16
+ depth=16, shared_aln=False, attn_l2_norm=True,
17
+ flash_if_available=True, fused_if_available=True,
18
+ init_adaln=0.5, init_adaln_gamma=1e-5, init_head=0.02, init_std=-1, # init_std < 0: automated
19
+ ) -> Tuple[VQVAE, VAR]:
20
+ heads = depth
21
+ width = depth * 64
22
+ dpr = 0.1 * depth/24
23
+
24
+ # disable built-in initialization for speed
25
+ for clz in (nn.Linear, nn.LayerNorm, nn.BatchNorm2d, nn.SyncBatchNorm, nn.Conv1d, nn.Conv2d, nn.ConvTranspose1d, nn.ConvTranspose2d):
26
+ setattr(clz, 'reset_parameters', lambda self: None)
27
+
28
+ # build models
29
+ vae_local = VQVAE(vocab_size=V, z_channels=Cvae, ch=ch, test_mode=True, share_quant_resi=share_quant_resi, v_patch_nums=patch_nums).to(device)
30
+ var_wo_ddp = VAR(
31
+ vae_local=vae_local,
32
+ depth=depth, embed_dim=width, num_heads=heads, drop_rate=0., attn_drop_rate=0., drop_path_rate=dpr,
33
+ norm_eps=1e-6, shared_aln=shared_aln, cond_drop_rate=0.1,
34
+ attn_l2_norm=attn_l2_norm,
35
+ patch_nums=patch_nums,
36
+ flash_if_available=flash_if_available, fused_if_available=fused_if_available,
37
+ ).to(device)
38
+ var_wo_ddp.init_weights(init_adaln=init_adaln, init_adaln_gamma=init_adaln_gamma, init_head=init_head, init_std=init_std)
39
+
40
+ return vae_local, var_wo_ddp
models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.6 kB). View file
 
models/__pycache__/basic_vae.cpython-310.pyc ADDED
Binary file (6.84 kB). View file
 
models/__pycache__/basic_var.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
models/__pycache__/helpers.cpython-310.pyc ADDED
Binary file (2.81 kB). View file
 
models/__pycache__/quant.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
models/__pycache__/var.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
models/__pycache__/vqvae.cpython-310.pyc ADDED
Binary file (5 kB). View file
 
models/basic_vae.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+
6
+ # this file only provides the 2 modules used in VQVAE
7
+ __all__ = ['Encoder', 'Decoder',]
8
+
9
+
10
+ """
11
+ References: https://github.com/CompVis/stable-diffusion/blob/21f890f9da3cfbeaba8e2ac3c425ee9e998d5229/ldm/modules/diffusionmodules/model.py
12
+ """
13
+ # swish
14
+ def nonlinearity(x):
15
+ return x * torch.sigmoid(x)
16
+
17
+
18
+ def Normalize(in_channels, num_groups=32):
19
+ return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
20
+
21
+
22
+ class Upsample2x(nn.Module):
23
+ def __init__(self, in_channels):
24
+ super().__init__()
25
+ self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
26
+
27
+ def forward(self, x):
28
+ return self.conv(F.interpolate(x, scale_factor=2, mode='nearest'))
29
+
30
+
31
+ class Downsample2x(nn.Module):
32
+ def __init__(self, in_channels):
33
+ super().__init__()
34
+ self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
35
+
36
+ def forward(self, x):
37
+ return self.conv(F.pad(x, pad=(0, 1, 0, 1), mode='constant', value=0))
38
+
39
+
40
+ class ResnetBlock(nn.Module):
41
+ def __init__(self, *, in_channels, out_channels=None, dropout): # conv_shortcut=False, # conv_shortcut: always False in VAE
42
+ super().__init__()
43
+ self.in_channels = in_channels
44
+ out_channels = in_channels if out_channels is None else out_channels
45
+ self.out_channels = out_channels
46
+
47
+ self.norm1 = Normalize(in_channels)
48
+ self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
49
+ self.norm2 = Normalize(out_channels)
50
+ self.dropout = torch.nn.Dropout(dropout) if dropout > 1e-6 else nn.Identity()
51
+ self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
52
+ if self.in_channels != self.out_channels:
53
+ self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
54
+ else:
55
+ self.nin_shortcut = nn.Identity()
56
+
57
+ def forward(self, x):
58
+ h = self.conv1(F.silu(self.norm1(x), inplace=True))
59
+ h = self.conv2(self.dropout(F.silu(self.norm2(h), inplace=True)))
60
+ return self.nin_shortcut(x) + h
61
+
62
+
63
+ class AttnBlock(nn.Module):
64
+ def __init__(self, in_channels):
65
+ super().__init__()
66
+ self.C = in_channels
67
+
68
+ self.norm = Normalize(in_channels)
69
+ self.qkv = torch.nn.Conv2d(in_channels, 3*in_channels, kernel_size=1, stride=1, padding=0)
70
+ self.w_ratio = int(in_channels) ** (-0.5)
71
+ self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
72
+
73
+ def forward(self, x):
74
+ qkv = self.qkv(self.norm(x))
75
+ B, _, H, W = qkv.shape # should be B,3C,H,W
76
+ C = self.C
77
+ q, k, v = qkv.reshape(B, 3, C, H, W).unbind(1)
78
+
79
+ # compute attention
80
+ q = q.view(B, C, H * W).contiguous()
81
+ q = q.permute(0, 2, 1).contiguous() # B,HW,C
82
+ k = k.view(B, C, H * W).contiguous() # B,C,HW
83
+ w = torch.bmm(q, k).mul_(self.w_ratio) # B,HW,HW w[B,i,j]=sum_c q[B,i,C]k[B,C,j]
84
+ w = F.softmax(w, dim=2)
85
+
86
+ # attend to values
87
+ v = v.view(B, C, H * W).contiguous()
88
+ w = w.permute(0, 2, 1).contiguous() # B,HW,HW (first HW of k, second of q)
89
+ h = torch.bmm(v, w) # B, C,HW (HW of q) h[B,C,j] = sum_i v[B,C,i] w[B,i,j]
90
+ h = h.view(B, C, H, W).contiguous()
91
+
92
+ return x + self.proj_out(h)
93
+
94
+
95
+ def make_attn(in_channels, using_sa=True):
96
+ return AttnBlock(in_channels) if using_sa else nn.Identity()
97
+
98
+
99
+ class Encoder(nn.Module):
100
+ def __init__(
101
+ self, *, ch=128, ch_mult=(1, 2, 4, 8), num_res_blocks=2,
102
+ dropout=0.0, in_channels=3,
103
+ z_channels, double_z=False, using_sa=True, using_mid_sa=True,
104
+ ):
105
+ super().__init__()
106
+ self.ch = ch
107
+ self.num_resolutions = len(ch_mult)
108
+ self.downsample_ratio = 2 ** (self.num_resolutions - 1)
109
+ self.num_res_blocks = num_res_blocks
110
+ self.in_channels = in_channels
111
+
112
+ # downsampling
113
+ self.conv_in = torch.nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1)
114
+
115
+ in_ch_mult = (1,) + tuple(ch_mult)
116
+ self.down = nn.ModuleList()
117
+ for i_level in range(self.num_resolutions):
118
+ block = nn.ModuleList()
119
+ attn = nn.ModuleList()
120
+ block_in = ch * in_ch_mult[i_level]
121
+ block_out = ch * ch_mult[i_level]
122
+ for i_block in range(self.num_res_blocks):
123
+ block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, dropout=dropout))
124
+ block_in = block_out
125
+ if i_level == self.num_resolutions - 1 and using_sa:
126
+ attn.append(make_attn(block_in, using_sa=True))
127
+ down = nn.Module()
128
+ down.block = block
129
+ down.attn = attn
130
+ if i_level != self.num_resolutions - 1:
131
+ down.downsample = Downsample2x(block_in)
132
+ self.down.append(down)
133
+
134
+ # middle
135
+ self.mid = nn.Module()
136
+ self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, dropout=dropout)
137
+ self.mid.attn_1 = make_attn(block_in, using_sa=using_mid_sa)
138
+ self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, dropout=dropout)
139
+
140
+ # end
141
+ self.norm_out = Normalize(block_in)
142
+ self.conv_out = torch.nn.Conv2d(block_in, (2 * z_channels if double_z else z_channels), kernel_size=3, stride=1, padding=1)
143
+
144
+ def forward(self, x):
145
+ # downsampling
146
+ h = self.conv_in(x)
147
+ for i_level in range(self.num_resolutions):
148
+ for i_block in range(self.num_res_blocks):
149
+ h = self.down[i_level].block[i_block](h)
150
+ if len(self.down[i_level].attn) > 0:
151
+ h = self.down[i_level].attn[i_block](h)
152
+ if i_level != self.num_resolutions - 1:
153
+ h = self.down[i_level].downsample(h)
154
+
155
+ # middle
156
+ h = self.mid.block_2(self.mid.attn_1(self.mid.block_1(h)))
157
+
158
+ # end
159
+ h = self.conv_out(F.silu(self.norm_out(h), inplace=True))
160
+ return h
161
+
162
+
163
+ class Decoder(nn.Module):
164
+ def __init__(
165
+ self, *, ch=128, ch_mult=(1, 2, 4, 8), num_res_blocks=2,
166
+ dropout=0.0, in_channels=3, # in_channels: raw img channels
167
+ z_channels, using_sa=True, using_mid_sa=True,
168
+ ):
169
+ super().__init__()
170
+ self.ch = ch
171
+ self.num_resolutions = len(ch_mult)
172
+ self.num_res_blocks = num_res_blocks
173
+ self.in_channels = in_channels
174
+
175
+ # compute in_ch_mult, block_in and curr_res at lowest res
176
+ in_ch_mult = (1,) + tuple(ch_mult)
177
+ block_in = ch * ch_mult[self.num_resolutions - 1]
178
+
179
+ # z to block_in
180
+ self.conv_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1)
181
+
182
+ # middle
183
+ self.mid = nn.Module()
184
+ self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, dropout=dropout)
185
+ self.mid.attn_1 = make_attn(block_in, using_sa=using_mid_sa)
186
+ self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, dropout=dropout)
187
+
188
+ # upsampling
189
+ self.up = nn.ModuleList()
190
+ for i_level in reversed(range(self.num_resolutions)):
191
+ block = nn.ModuleList()
192
+ attn = nn.ModuleList()
193
+ block_out = ch * ch_mult[i_level]
194
+ for i_block in range(self.num_res_blocks + 1):
195
+ block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, dropout=dropout))
196
+ block_in = block_out
197
+ if i_level == self.num_resolutions-1 and using_sa:
198
+ attn.append(make_attn(block_in, using_sa=True))
199
+ up = nn.Module()
200
+ up.block = block
201
+ up.attn = attn
202
+ if i_level != 0:
203
+ up.upsample = Upsample2x(block_in)
204
+ self.up.insert(0, up) # prepend to get consistent order
205
+
206
+ # end
207
+ self.norm_out = Normalize(block_in)
208
+ self.conv_out = torch.nn.Conv2d(block_in, in_channels, kernel_size=3, stride=1, padding=1)
209
+
210
+ def forward(self, z):
211
+ # z to block_in
212
+ # middle
213
+ h = self.mid.block_2(self.mid.attn_1(self.mid.block_1(self.conv_in(z))))
214
+
215
+ # upsampling
216
+ for i_level in reversed(range(self.num_resolutions)):
217
+ for i_block in range(self.num_res_blocks + 1):
218
+ h = self.up[i_level].block[i_block](h)
219
+ if len(self.up[i_level].attn) > 0:
220
+ h = self.up[i_level].attn[i_block](h)
221
+ if i_level != 0:
222
+ h = self.up[i_level].upsample(h)
223
+
224
+ # end
225
+ h = self.conv_out(F.silu(self.norm_out(h), inplace=True))
226
+ return h
models/basic_var.py ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Optional, List, Union, Tuple
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+
8
+ from models.helpers import DropPath, drop_path
9
+
10
+ from utils.model_args import ModelArgs
11
+ from transformers import AutoImageProcessor, AutoModel
12
+
13
+ # this file only provides the 3 blocks used in VAR transformer
14
+ __all__ = ['FFN', 'AdaLNSelfAttn', 'AdaLNBeforeHead']
15
+
16
+
17
+ # automatically import fused operators
18
+ dropout_add_layer_norm = fused_mlp_func = memory_efficient_attention = flash_attn_func = None
19
+ try:
20
+ from flash_attn.ops.layer_norm import dropout_add_layer_norm
21
+ from flash_attn.ops.fused_dense import fused_mlp_func
22
+ except ImportError: pass
23
+ # automatically import faster attention implementations
24
+ try: from xformers.ops import memory_efficient_attention
25
+ except ImportError: pass
26
+ try: from flash_attn import flash_attn_func # qkv: BLHc, ret: BLHcq
27
+ except ImportError: pass
28
+ try: from torch.nn.functional import scaled_dot_product_attention as slow_attn # q, k, v: BHLc
29
+ except ImportError:
30
+ def slow_attn(query, key, value, scale: float, attn_mask=None, dropout_p=0.0):
31
+ attn = query.mul(scale) @ key.transpose(-2, -1) # BHLc @ BHcL => BHLL
32
+ if attn_mask is not None: attn.add_(attn_mask)
33
+ return (F.dropout(attn.softmax(dim=-1), p=dropout_p, inplace=True) if dropout_p > 0 else attn.softmax(dim=-1)) @ value
34
+
35
+
36
+
37
+
38
+ class ConditionEmbedder(nn.Module):
39
+ """
40
+ Embeds Condition into vector representations. Also handles label dropout for classifier-free guidance.
41
+ """
42
+ def __init__(self, in_channels, hidden_size, uncond_prob, token_num=120, vocab_size=16384):
43
+ super().__init__()
44
+ self.cap_proj = MLP(in_features=hidden_size, hidden_features=hidden_size, out_features=hidden_size)
45
+ self.register_buffer("uncond_embedding", torch.zeros(token_num, hidden_size) / hidden_size ** 0.5)
46
+ self.uncond_prob = uncond_prob
47
+
48
+ def token_drop(self, caption, force_drop_ids=None, drop_ids=None):
49
+ """
50
+ Drops labels to enable classifier-free guidance.
51
+ """
52
+ if force_drop_ids is None:
53
+ if drop_ids is None:
54
+ drop_ids = torch.rand(caption.shape[0], device=caption.device) < self.uncond_prob
55
+ else:
56
+ drop_ids = force_drop_ids == 1
57
+ if self.uncond_embedding.shape[0] < caption.shape[1]:
58
+ # 动态扩展
59
+ repeat_factor = int(caption.shape[1] / self.uncond_embedding.shape[0]) + 1
60
+ extended = self.uncond_embedding.repeat(repeat_factor, 1)[:caption.shape[1]]
61
+ else:
62
+ extended = self.uncond_embedding[:caption.shape[1]]
63
+
64
+ caption = torch.where(drop_ids[:, None, None], extended, caption)
65
+
66
+ # caption = torch.where(drop_ids[:, None, None], self.uncond_embedding[:caption.shape[1]], caption)
67
+ return caption
68
+
69
+ def forward(self, caption, train, force_drop_ids=None, drop_ids=None):
70
+ use_dropout = self.uncond_prob > 0
71
+ if (train and use_dropout) or (force_drop_ids is not None):
72
+ caption = self.token_drop(caption, force_drop_ids, drop_ids)
73
+ embeddings = self.cap_proj(caption)
74
+ return embeddings
75
+
76
+ class MLP(nn.Module):
77
+ def __init__(self, in_features, hidden_features, out_features):
78
+ super().__init__()
79
+ out_features = out_features or in_features
80
+ hidden_features = hidden_features or in_features
81
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=False)
82
+ self.act = nn.GELU(approximate='tanh')
83
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=False)
84
+
85
+ nn.init.zeros_(self.fc1.weight)
86
+ nn.init.zeros_(self.fc2.weight)
87
+
88
+ def forward(self, x):
89
+ x = self.fc1(x)
90
+ x = self.act(x)
91
+ x = self.fc2(x)
92
+ return x
93
+
94
+
95
+ class Dinov2_Adapter(nn.Module):
96
+ def __init__(self, input_dim=1, output_dim=768, attention=False, pool=False, nheads=8, dropout=0.1, adapter_size='small', condition_type='seg'):
97
+ super(Dinov2_Adapter, self).__init__()
98
+ # print(f"Choose adapter size: {adapter_size}")
99
+ # print(f"condition type: {condition_type}")
100
+
101
+ from transformers import logging
102
+ logging.set_verbosity_error()
103
+
104
+
105
+
106
+ self.model = AutoModel.from_pretrained(f'./dinov2_small',local_files_only=True, use_safetensors=False)
107
+ self.condition_type = condition_type
108
+
109
+ def to_patch14(self, input):
110
+ H, W = input.shape[2:]
111
+ new_H = (H // 16) * 14
112
+ new_W = (W // 16) * 14
113
+ if self.condition_type in ['canny', 'seg']:
114
+ output = torch.nn.functional.interpolate(input, size=(new_H, new_W), mode='nearest')#, align_corners=True) canny, seg
115
+ else:
116
+ output = torch.nn.functional.interpolate(input, size=(new_H, new_W), mode='bicubic', align_corners=True) # depth, lineart, hed
117
+ return output
118
+
119
+ def forward(self, x):
120
+ x = self.to_patch14(x)
121
+ x = self.model(x)
122
+ return x.last_hidden_state[:, 1:]
123
+
124
+
125
+ #################################################################################
126
+ # Cross-Attention Injection #
127
+ #################################################################################
128
+ class CrossAttentionInjection(nn.Module):
129
+ def __init__(self, embed_dim, num_heads):
130
+ super().__init__()
131
+ self.query_proj = nn.Linear(embed_dim, embed_dim)
132
+ self.key_proj = nn.Linear(embed_dim, embed_dim)
133
+ self.value_proj = nn.Linear(embed_dim, embed_dim)
134
+ self.out_proj = nn.Linear(embed_dim, embed_dim)
135
+ self.num_heads = num_heads
136
+ self.scale = (embed_dim // num_heads) ** -0.5
137
+
138
+ def forward(self, x, cond_feat):
139
+ """
140
+ x: [B, L, C],主特征序列
141
+ cond_feat: [B, L_cond, C],来自分割图的条件token序列
142
+ """
143
+ B, L, C = x.shape
144
+ H = self.num_heads
145
+ Q = self.query_proj(x).reshape(B, L, H, C // H).transpose(1, 2) # [B, H, L, d]
146
+ K = self.key_proj(cond_feat).reshape(B, -1, H, C // H).transpose(1, 2) # [B, H, Lc, d]
147
+ V = self.value_proj(cond_feat).reshape(B, -1, H, C // H).transpose(1, 2) # [B, H, Lc, d]
148
+
149
+ attn = (Q @ K.transpose(-2, -1)) * self.scale # [B, H, L, Lc]
150
+ attn = attn.softmax(dim=-1)
151
+ out = (attn @ V).transpose(1, 2).reshape(B, L, C) # [B, L, C]
152
+ return self.out_proj(out)
153
+
154
+
155
+
156
+ class FFN(nn.Module):
157
+ def __init__(self, in_features, hidden_features=None, out_features=None, drop=0., fused_if_available=True):
158
+ super().__init__()
159
+ self.fused_mlp_func = fused_mlp_func if fused_if_available else None
160
+ out_features = out_features or in_features
161
+ hidden_features = hidden_features or in_features
162
+ self.fc1 = nn.Linear(in_features, hidden_features)
163
+ self.act = nn.GELU(approximate='tanh')
164
+ self.fc2 = nn.Linear(hidden_features, out_features)
165
+ self.drop = nn.Dropout(drop, inplace=True) if drop > 0 else nn.Identity()
166
+
167
+ def forward(self, x):
168
+ if self.fused_mlp_func is not None:
169
+ return self.drop(self.fused_mlp_func(
170
+ x=x, weight1=self.fc1.weight, weight2=self.fc2.weight, bias1=self.fc1.bias, bias2=self.fc2.bias,
171
+ activation='gelu_approx', save_pre_act=self.training, return_residual=False, checkpoint_lvl=0,
172
+ heuristic=0, process_group=None,
173
+ ))
174
+ else:
175
+ return self.drop(self.fc2( self.act(self.fc1(x)) ))
176
+
177
+ def extra_repr(self) -> str:
178
+ return f'fused_mlp_func={self.fused_mlp_func is not None}'
179
+
180
+
181
+ class SelfAttention(nn.Module):
182
+ def __init__(
183
+ self, block_idx, embed_dim=768, num_heads=12,
184
+ attn_drop=0., proj_drop=0., attn_l2_norm=False, flash_if_available=False,
185
+ ):
186
+ super().__init__()
187
+ assert embed_dim % num_heads == 0
188
+ self.block_idx, self.num_heads, self.head_dim = block_idx, num_heads, embed_dim // num_heads # =64
189
+ self.attn_l2_norm = attn_l2_norm
190
+ if self.attn_l2_norm:
191
+ self.scale = 1
192
+ self.scale_mul_1H11 = nn.Parameter(torch.full(size=(1, self.num_heads, 1, 1), fill_value=4.0).log(), requires_grad=True)
193
+ self.max_scale_mul = torch.log(torch.tensor(100)).item()
194
+ else:
195
+ self.scale = 0.25 / math.sqrt(self.head_dim)
196
+
197
+ self.mat_qkv = nn.Linear(embed_dim, embed_dim * 3, bias=False)
198
+ self.q_bias, self.v_bias = nn.Parameter(torch.zeros(embed_dim)), nn.Parameter(torch.zeros(embed_dim))
199
+ self.register_buffer('zero_k_bias', torch.zeros(embed_dim))
200
+
201
+ self.proj = nn.Linear(embed_dim, embed_dim)
202
+ self.proj_drop = nn.Dropout(proj_drop, inplace=True) if proj_drop > 0 else nn.Identity()
203
+ self.attn_drop: float = attn_drop
204
+ self.using_flash = flash_if_available and flash_attn_func is not None
205
+ # self.using_xform = flash_if_available and memory_efficient_attention is not None
206
+ self.using_xform = False
207
+ # only used during inference
208
+ self.caching, self.cached_k, self.cached_v = False, None, None
209
+
210
+ def kv_caching(self, enable: bool): self.caching, self.cached_k, self.cached_v = enable, None, None
211
+
212
+ # NOTE: attn_bias is None during inference because kv cache is enabled
213
+ def forward(self, x, attn_bias):
214
+ B, L, C = x.shape
215
+
216
+ qkv = F.linear(input=x, weight=self.mat_qkv.weight, bias=torch.cat((self.q_bias, self.zero_k_bias, self.v_bias))).view(B, L, 3, self.num_heads, self.head_dim)
217
+ main_type = qkv.dtype
218
+ # qkv: BL3Hc
219
+
220
+ using_flash = self.using_flash and attn_bias is None and qkv.dtype != torch.float32
221
+ if using_flash or self.using_xform: q, k, v = qkv.unbind(dim=2); dim_cat = 1 # q or k or v: BLHc
222
+ else: q, k, v = qkv.permute(2, 0, 3, 1, 4).unbind(dim=0); dim_cat = 2 # q or k or v: BHLc
223
+
224
+ if self.attn_l2_norm:
225
+ scale_mul = self.scale_mul_1H11.clamp_max(self.max_scale_mul).exp()
226
+ if using_flash or self.using_xform: scale_mul = scale_mul.transpose(1, 2) # 1H11 to 11H1
227
+ q = F.normalize(q, dim=-1).mul(scale_mul)
228
+ k = F.normalize(k, dim=-1)
229
+
230
+ if self.caching:
231
+ if self.cached_k is None: self.cached_k = k; self.cached_v = v
232
+ else: k = self.cached_k = torch.cat((self.cached_k, k), dim=dim_cat); v = self.cached_v = torch.cat((self.cached_v, v), dim=dim_cat)
233
+
234
+ dropout_p = self.attn_drop if self.training else 0.0
235
+ if using_flash:
236
+ oup = flash_attn_func(q.to(dtype=main_type), k.to(dtype=main_type), v.to(dtype=main_type), dropout_p=dropout_p, softmax_scale=self.scale).view(B, L, C)
237
+ elif self.using_xform:
238
+ oup = memory_efficient_attention(q.to(dtype=main_type), k.to(dtype=main_type), v.to(dtype=main_type), attn_bias=None if attn_bias is None else attn_bias.to(dtype=main_type).expand(B, self.num_heads, -1, -1), p=dropout_p, scale=self.scale).view(B, L, C)
239
+ else:
240
+ oup = slow_attn(query=q, key=k, value=v, scale=self.scale, attn_mask=attn_bias, dropout_p=dropout_p).transpose(1, 2).reshape(B, L, C)
241
+
242
+ return self.proj_drop(self.proj(oup))
243
+ # attn = (q @ k.transpose(-2, -1)).add_(attn_bias + self.local_rpb()) # BHLc @ BHcL => BHLL
244
+ # attn = self.attn_drop(attn.softmax(dim=-1))
245
+ # oup = (attn @ v).transpose_(1, 2).reshape(B, L, -1) # BHLL @ BHLc = BHLc => BLHc => BLC
246
+
247
+ def extra_repr(self) -> str:
248
+ return f'using_flash={self.using_flash}, using_xform={self.using_xform}, attn_l2_norm={self.attn_l2_norm}'
249
+
250
+ config = ModelArgs()
251
+ class AdaLNSelfAttn(nn.Module):
252
+ def __init__(
253
+ self, block_idx, last_drop_p, embed_dim, cond_dim, shared_aln: bool, norm_layer,
254
+ num_heads, mlp_ratio=4., drop=0., attn_drop=0., drop_path=0., attn_l2_norm=False,
255
+ flash_if_available=False, fused_if_available=True,depth=16,
256
+ ):
257
+ super(AdaLNSelfAttn, self).__init__()
258
+ self.block_idx, self.last_drop_p, self.C = block_idx, last_drop_p, embed_dim
259
+ self.C, self.D = embed_dim, cond_dim
260
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
261
+ self.attn = SelfAttention(block_idx=block_idx, embed_dim=embed_dim, num_heads=num_heads, attn_drop=attn_drop, proj_drop=drop, attn_l2_norm=attn_l2_norm, flash_if_available=flash_if_available)
262
+ self.ffn = FFN(in_features=embed_dim, hidden_features=round(embed_dim * mlp_ratio), drop=drop, fused_if_available=fused_if_available)
263
+
264
+ self.ln_wo_grad = norm_layer(embed_dim, elementwise_affine=False)
265
+ self.shared_aln = shared_aln
266
+ if self.shared_aln:
267
+ self.ada_gss = nn.Parameter(torch.randn(1, 1, 6, embed_dim) / embed_dim**0.5)
268
+ else:
269
+ lin = nn.Linear(cond_dim, 6*embed_dim)
270
+ self.ada_lin = nn.Sequential(nn.SiLU(inplace=False), lin)
271
+
272
+ self.fused_add_norm_fn = None
273
+
274
+ self.adapter = Dinov2_Adapter(adapter_size=config.adapter_size, condition_type=config.condition_type)
275
+ # 冻结 Dinov2 主干权重
276
+ for p in self.adapter.model.parameters():
277
+ p.requires_grad = False
278
+
279
+ # self.adapter = EVA_Adapter()
280
+ if config.adapter_size == "small":
281
+ self.adapter_mlp = MLP(384, config.dim, config.dim)
282
+ elif config.adapter_size == 'base':
283
+ self.adapter_mlp = MLP(768, config.dim, config.dim)
284
+
285
+ self.condition_embeddings = nn.Embedding(config.vocab_size, config.dim)
286
+ self.condition_mlp = ConditionEmbedder(config.block_size, config.dim, config.class_dropout_prob, config.block_size, config.vocab_size)
287
+ # conditon 注入层
288
+ self.condition_layers = torch.nn.ModuleList()
289
+ for layer_id in range(3):
290
+ self.condition_layers.append(MLP(config.dim,config.dim,config.dim))
291
+
292
+ self.layer_internal = depth=16 // 2
293
+ self.control_strength = 1
294
+
295
+ #CrossAttention注入方式
296
+ self.cross_attn_inject = CrossAttentionInjection(embed_dim=config.dim, num_heads=num_heads)
297
+
298
+
299
+ # NOTE: attn_bias is None during inference because kv cache is enabled
300
+ def forward(self, x, cond_BD, condition, attn_bias, current_step: int, total_steps:int): # C: embed_dim, D: cond_dim
301
+ if self.shared_aln:
302
+ gamma1, gamma2, scale1, scale2, shift1, shift2 = (self.ada_gss + cond_BD).unbind(2) # 116C + B16C =unbind(2)=> 6 B1C
303
+ else:
304
+ gamma1, gamma2, scale1, scale2, shift1, shift2 = self.ada_lin(cond_BD).view(-1, 1, 6, self.C).unbind(2)
305
+
306
+ # --------- 注入策略一:条件注入机制(例如每隔 N 层注入) ---------
307
+ # if condition is not None:
308
+ # condition_embeddings = self.adapter(condition)
309
+ # condition_embeddings = self.adapter_mlp(condition_embeddings)
310
+ # self.condition_token = self.condition_mlp(condition_embeddings,train=self.training)
311
+
312
+ # # self.block_idx 是当前block编号
313
+ # if self.block_idx % self.layer_internal == 0:
314
+ # cond_feat = self.condition_layers[self.block_idx // self.layer_internal](self.condition_token) # [B, 1, C]
315
+ # # cond_feat: [B, Lc, C] → [B, Lx, C]
316
+ # cond_feat = cond_feat.mean(dim=1, keepdim=True).expand(-1, x.shape[1], -1)
317
+ # x = x + self.control_strength * cond_feat
318
+
319
+ # # --------- 注入策略二:在某些层激活 cross-attention 注入(例如每隔 N 层注入) ---------
320
+ # if condition is not None:
321
+ # condition_embeddings = self.adapter(condition)
322
+ # condition_embeddings = self.adapter_mlp(condition_embeddings)
323
+ # self.condition_token = self.condition_mlp(condition_embeddings,train=self.training)
324
+
325
+ # if self.block_idx % self.layer_internal == 0:
326
+ # cond_feat = self.condition_layers[self.block_idx // self.layer_internal](self.condition_token)
327
+ # cond_feat = cond_feat.mean(dim=1, keepdim=True).expand(-1, x.shape[1], -1)
328
+ # x = x + self.control_strength * cond_feat
329
+
330
+ # # cross-attention: x attends to condition token
331
+ # # query: x, key/value: condition_token
332
+ # cross_attn_out = self.cross_attn_inject(query=x, key=self.condition_token, value=self.condition_token)
333
+ # x = x + self.control_strength * cross_attn_out
334
+
335
+
336
+ # --------- 注入策略三:注入强度控制机制 ---------
337
+ # 使用训练步调度:
338
+ if condition is not None:
339
+ condition_embeddings = self.adapter(condition)
340
+ condition_embeddings = self.adapter_mlp(condition_embeddings)
341
+ self.condition_token = self.condition_mlp(condition_embeddings,train=self.training)
342
+
343
+ cond_feat = self.condition_layers[self.block_idx // self.layer_internal](self.condition_token)
344
+ cond_feat = cond_feat.mean(dim=1, keepdim=True).expand(-1, x.shape[1], -1)
345
+
346
+ # cross_attn_out = self.cross_attn_inject(x, key=self.condition_token, value=self.condition_token)
347
+ cross_attn_out = self.cross_attn_inject(x, self.condition_token)
348
+
349
+ if current_step is not None:
350
+ progress = min(current_step / total_steps, 1.0)
351
+ alpha = 0.5 * (1 + math.cos(math.pi * progress))
352
+ else:
353
+ alpha = 1.0
354
+
355
+ x = x + alpha * cross_attn_out
356
+
357
+ # --------- 注入策略四:区域掩码控制机制 ---------
358
+ # 在生成的早期阶段,边缘信息可能会有更大的权重来帮助模型生成图像的轮廓
359
+ # 在生成的后期,分割图的控制可能更重要,用于确保图像区域的语义一致性
360
+ # progress = current_step / total_steps
361
+
362
+ # alpha_edge = 1.0 - progress # 边缘控制随时间递减
363
+ # alpha_seg = progress # 分割控制随时间增强
364
+
365
+ # edge_feat = self.edge_adapter(edge_map) # 这两个部分需要处理
366
+ # seg_feat = self.seg_adapter(seg_map) # 需要边缘提取图,以及处理模型:ViT?????
367
+
368
+ # # 将两个特征注入模型
369
+ # x = x + alpha_edge * self.cross_attn_edge(x, edge_feat)
370
+ # x = x + alpha_seg * self.cross_attn_seg(x, seg_feat)
371
+
372
+
373
+ # --------- 注入策略五:阶段性控制机制 ---------
374
+
375
+ # 根据训练轮次逐步增强/弱化注入作用:warm-up、余弦下降、分阶段注入策略
376
+
377
+ # if self.training:
378
+ # if current_epoch < 10:
379
+ # alpha = 0.1
380
+ # elif current_epoch < 30:
381
+ # alpha = 0.5
382
+ # else:
383
+ # alpha = 1.0
384
+ # x = x + alpha * cross_attn_out
385
+
386
+
387
+ # --------- 注意力 + FFN ---------
388
+ x = x + self.drop_path(self.attn( self.ln_wo_grad(x).mul(scale1.add(1)).add_(shift1), attn_bias=attn_bias ).mul_(gamma1))
389
+ x = x + self.drop_path(self.ffn( self.ln_wo_grad(x).mul(scale2.add(1)).add_(shift2) ).mul(gamma2)) # this mul(gamma2) cannot be in-placed when FusedMLP is used
390
+ return x
391
+
392
+
393
+ def extra_repr(self) -> str:
394
+ return f'shared_aln={self.shared_aln}'
395
+
396
+
397
+ class AdaLNBeforeHead(nn.Module):
398
+ def __init__(self, C, D, norm_layer): # C: embed_dim, D: cond_dim
399
+ super().__init__()
400
+ self.C, self.D = C, D
401
+ self.ln_wo_grad = norm_layer(C, elementwise_affine=False)
402
+ self.ada_lin = nn.Sequential(nn.SiLU(inplace=False), nn.Linear(D, 2*C))
403
+
404
+ def forward(self, x_BLC: torch.Tensor, cond_BD: torch.Tensor):
405
+ scale, shift = self.ada_lin(cond_BD).view(-1, 1, 2, self.C).unbind(2)
406
+ return self.ln_wo_grad(x_BLC).mul(scale.add(1)).add_(shift)
models/helpers.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn as nn
3
+ from torch.nn import functional as F
4
+
5
+
6
+ def sample_with_top_k_top_p_(logits_BlV: torch.Tensor, top_k: int = 0, top_p: float = 0.0, rng=None, num_samples=1) -> torch.Tensor: # return idx, shaped (B, l)
7
+ B, l, V = logits_BlV.shape
8
+ if top_k > 0:
9
+ idx_to_remove = logits_BlV < logits_BlV.topk(top_k, largest=True, sorted=False, dim=-1)[0].amin(dim=-1, keepdim=True)
10
+ logits_BlV.masked_fill_(idx_to_remove, -torch.inf)
11
+ if top_p > 0:
12
+ sorted_logits, sorted_idx = logits_BlV.sort(dim=-1, descending=False)
13
+ sorted_idx_to_remove = sorted_logits.softmax(dim=-1).cumsum_(dim=-1) <= (1 - top_p)
14
+ sorted_idx_to_remove[..., -1:] = False
15
+ logits_BlV.masked_fill_(sorted_idx_to_remove.scatter(sorted_idx.ndim - 1, sorted_idx, sorted_idx_to_remove), -torch.inf)
16
+ # sample (have to squeeze cuz torch.multinomial can only be used for 2D tensor)
17
+ replacement = num_samples >= 0
18
+ num_samples = abs(num_samples)
19
+ return torch.multinomial(logits_BlV.softmax(dim=-1).view(-1, V), num_samples=num_samples, replacement=replacement, generator=rng).view(B, l, num_samples)
20
+
21
+
22
+ def gumbel_softmax_with_rng(logits: torch.Tensor, tau: float = 1, hard: bool = False, eps: float = 1e-10, dim: int = -1, rng: torch.Generator = None) -> torch.Tensor:
23
+ if rng is None:
24
+ return F.gumbel_softmax(logits=logits, tau=tau, hard=hard, eps=eps, dim=dim)
25
+
26
+ gumbels = (-torch.empty_like(logits, memory_format=torch.legacy_contiguous_format).exponential_(generator=rng).log())
27
+ gumbels = (logits + gumbels) / tau
28
+ y_soft = gumbels.softmax(dim)
29
+
30
+ if hard:
31
+ index = y_soft.max(dim, keepdim=True)[1]
32
+ y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
33
+ ret = y_hard - y_soft.detach() + y_soft
34
+ else:
35
+ ret = y_soft
36
+ return ret
37
+
38
+
39
+ def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True): # taken from timm
40
+ if drop_prob == 0. or not training: return x
41
+ keep_prob = 1 - drop_prob
42
+ shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
43
+ random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
44
+ if keep_prob > 0.0 and scale_by_keep:
45
+ random_tensor.div_(keep_prob)
46
+ return x * random_tensor
47
+
48
+
49
+ class DropPath(nn.Module): # taken from timm
50
+ def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):
51
+ super(DropPath, self).__init__()
52
+ self.drop_prob = drop_prob
53
+ self.scale_by_keep = scale_by_keep
54
+
55
+ def forward(self, x):
56
+ return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
57
+
58
+ def extra_repr(self):
59
+ return f'(drop_prob=...)'
models/new_gpt_t2i.py ADDED
@@ -0,0 +1,569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # VQGAN: https://github.com/CompVis/taming-transformers/blob/master/taming/modules/transformer/mingpt.py
3
+ # DiT: https://github.com/facebookresearch/DiT/blob/main/models.py
4
+ # nanoGPT: https://github.com/karpathy/nanoGPT/blob/master/model.py
5
+ # llama: https://github.com/facebookresearch/llama/blob/main/llama/model.py
6
+ # gpt-fast: https://github.com/pytorch-labs/gpt-fast/blob/main/model.py
7
+ # PixArt: https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py
8
+ from dataclasses import dataclass
9
+ from typing import Optional, List
10
+
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ from torch.nn import functional as F
15
+ from utils.drop_path import DropPath
16
+ from autoregressive.models.vit_adapter import ViT_Adapter
17
+ from autoregressive.models.dinov2_adapter import Dinov2_Adapter
18
+
19
+
20
+ def get_causal_mask(seq_length):
21
+ mask = torch.triu(torch.ones(seq_length, seq_length), diagonal=1).type(torch.bool)
22
+ mask = mask.masked_fill(mask, float('-inf'))
23
+ mask = mask.masked_fill(~mask, float(0.0))
24
+ return mask
25
+
26
+ def find_multiple(n: int, k: int):
27
+ if n % k == 0:
28
+ return n
29
+ return n + k - (n % k)
30
+
31
+ @dataclass
32
+ class ModelArgs:
33
+ dim: int = 4096
34
+ n_layer: int = 32
35
+ n_head: int = 32
36
+ n_kv_head: Optional[int] = None
37
+ multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2
38
+ ffn_dim_multiplier: Optional[float] = None
39
+ rope_base: float = 10000
40
+ norm_eps: float = 1e-5
41
+ initializer_range: float = 0.02
42
+
43
+ token_dropout_p: float = 0.1
44
+ attn_dropout_p: float = 0.0
45
+ resid_dropout_p: float = 0.1
46
+ ffn_dropout_p: float = 0.1
47
+ drop_path_rate: float = 0.0
48
+
49
+ num_classes: int = 1000
50
+ caption_dim: int = 2048
51
+ class_dropout_prob: float = 0.1
52
+ model_type: str = 'c2i'
53
+
54
+ vocab_size: int = 16384
55
+ cls_token_num: int = 1
56
+ block_size: int = 256
57
+ max_batch_size: int = 32
58
+ max_seq_len: int = 2048
59
+ adapter_size: str = 'small'
60
+ condition_type: str = 'canny'
61
+
62
+
63
+
64
+ #################################################################################
65
+ # Embedding Layers for Class Labels #
66
+ #################################################################################
67
+ class LabelEmbedder(nn.Module):
68
+ """
69
+ Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
70
+ """
71
+ def __init__(self, num_classes, hidden_size, dropout_prob):
72
+ super().__init__()
73
+ use_cfg_embedding = dropout_prob > 0
74
+ self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size)
75
+ self.num_classes = num_classes
76
+ self.dropout_prob = dropout_prob
77
+
78
+ def token_drop(self, labels, force_drop_ids=None):
79
+ """
80
+ Drops labels to enable classifier-free guidance.
81
+ """
82
+ if force_drop_ids is None:
83
+ drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob
84
+ else:
85
+ drop_ids = force_drop_ids == 1
86
+ labels = torch.where(drop_ids, self.num_classes, labels)
87
+ return labels, drop_ids
88
+
89
+ def forward(self, labels, train, force_drop_ids=None):
90
+ use_dropout = self.dropout_prob > 0
91
+ if (train and use_dropout) or (force_drop_ids is not None):
92
+ labels,drop_ids = self.token_drop(labels, force_drop_ids)
93
+ embeddings = self.embedding_table(labels).unsqueeze(1)
94
+ if (train and use_dropout) or (force_drop_ids is not None):
95
+ return embeddings,drop_ids
96
+ else:
97
+ return embeddings
98
+
99
+
100
+ class ConditionEmbedder(nn.Module):
101
+ """
102
+ Embeds Condition into vector representations. Also handles label dropout for classifier-free guidance.
103
+ """
104
+ def __init__(self, in_channels, hidden_size, uncond_prob, token_num=120, vocab_size=16384):
105
+ super().__init__()
106
+ self.cap_proj = MLP(in_features=hidden_size, hidden_features=hidden_size, out_features=hidden_size)
107
+ self.register_buffer("uncond_embedding", torch.zeros(token_num, hidden_size) / hidden_size ** 0.5)
108
+ self.uncond_prob = uncond_prob
109
+
110
+ def token_drop(self, caption, force_drop_ids=None, drop_ids=None):
111
+ """
112
+ Drops labels to enable classifier-free guidance.
113
+ """
114
+ if force_drop_ids is None:
115
+ if drop_ids is None:
116
+ drop_ids = torch.rand(caption.shape[0], device=caption.device) < self.uncond_prob
117
+ else:
118
+ drop_ids = force_drop_ids == 1
119
+
120
+ caption = torch.where(drop_ids[:, None, None], self.uncond_embedding[:caption.shape[1]], caption)
121
+ return caption
122
+
123
+ def forward(self, caption, train, force_drop_ids=None, drop_ids=None):
124
+ use_dropout = self.uncond_prob > 0
125
+ if (train and use_dropout) or (force_drop_ids is not None):
126
+ caption = self.token_drop(caption, force_drop_ids, drop_ids)
127
+ embeddings = self.cap_proj(caption)
128
+ return embeddings
129
+
130
+ #################################################################################
131
+ # Embedding Layers for Text Feature #
132
+ #################################################################################
133
+ class CaptionEmbedder(nn.Module):
134
+ """
135
+ Embeds text caption into vector representations. Also handles label dropout for classifier-free guidance.
136
+ """
137
+ def __init__(self, in_channels, hidden_size, uncond_prob, token_num=120):
138
+ super().__init__()
139
+ self.cap_proj = MLP(in_features=in_channels, hidden_features=hidden_size, out_features=hidden_size)
140
+ self.register_buffer("uncond_embedding", nn.Parameter(torch.randn(token_num, in_channels) / in_channels ** 0.5))
141
+ self.uncond_prob = uncond_prob
142
+
143
+ def token_drop(self, caption, force_drop_ids=None):
144
+ """
145
+ Drops labels to enable classifier-free guidance.
146
+ """
147
+ if force_drop_ids is None:
148
+ drop_ids = torch.rand(caption.shape[0], device=caption.device) < self.uncond_prob
149
+ else:
150
+ drop_ids = force_drop_ids == 1
151
+ caption = torch.where(drop_ids[:, None, None], self.uncond_embedding, caption)
152
+ return caption, drop_ids
153
+
154
+ def forward(self, caption, train, force_drop_ids=None):
155
+ use_dropout = self.uncond_prob > 0
156
+ if (train and use_dropout) or (force_drop_ids is not None):
157
+ caption, drop_ids = self.token_drop(caption, force_drop_ids)
158
+ embeddings = self.cap_proj(caption)
159
+ if (train and use_dropout) or (force_drop_ids is not None):
160
+ return embeddings,drop_ids
161
+ else:
162
+ return embeddings
163
+
164
+
165
+ class MLP(nn.Module):
166
+ def __init__(self, in_features, hidden_features, out_features):
167
+ super().__init__()
168
+ out_features = out_features or in_features
169
+ hidden_features = hidden_features or in_features
170
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=False)
171
+ self.act = nn.GELU(approximate='tanh')
172
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=False)
173
+
174
+ nn.init.zeros_(self.fc1.weight)
175
+ nn.init.zeros_(self.fc2.weight)
176
+
177
+ def forward(self, x):
178
+ x = self.fc1(x)
179
+ x = self.act(x)
180
+ x = self.fc2(x)
181
+ return x
182
+
183
+
184
+ #################################################################################
185
+ # GPT Model #
186
+ #################################################################################
187
+ class RMSNorm(torch.nn.Module):
188
+ def __init__(self, dim: int, eps: float = 1e-5):
189
+ super().__init__()
190
+ self.eps = eps
191
+ self.weight = nn.Parameter(torch.ones(dim))
192
+
193
+ def _norm(self, x):
194
+ return x * torch.rsqrt(torch.mean(x * x, dim=-1, keepdim=True) + self.eps)
195
+
196
+ def forward(self, x):
197
+ output = self._norm(x.float()).type_as(x)
198
+ return output * self.weight
199
+
200
+
201
+ class FeedForward(nn.Module):
202
+ def __init__(self, config: ModelArgs):
203
+ super().__init__()
204
+ hidden_dim = 4 * config.dim
205
+ hidden_dim = int(2 * hidden_dim / 3)
206
+ # custom dim factor multiplier
207
+ if config.ffn_dim_multiplier is not None:
208
+ hidden_dim = int(config.ffn_dim_multiplier * hidden_dim)
209
+ hidden_dim = find_multiple(hidden_dim, config.multiple_of)
210
+
211
+ self.w1 = nn.Linear(config.dim, hidden_dim, bias=False)
212
+ self.w3 = nn.Linear(config.dim, hidden_dim, bias=False)
213
+ self.w2 = nn.Linear(hidden_dim, config.dim, bias=False)
214
+ self.ffn_dropout = nn.Dropout(config.ffn_dropout_p)
215
+
216
+ def forward(self, x):
217
+ return self.ffn_dropout(self.w2(F.silu(self.w1(x)) * self.w3(x)))
218
+
219
+
220
+ class KVCache(nn.Module):
221
+ def __init__(self, max_batch_size, max_seq_length, n_head, head_dim, dtype):
222
+ super().__init__()
223
+ cache_shape = (max_batch_size, n_head, max_seq_length, head_dim)
224
+ self.register_buffer('k_cache', torch.zeros(cache_shape, dtype=dtype))
225
+ self.register_buffer('v_cache', torch.zeros(cache_shape, dtype=dtype))
226
+
227
+ def update(self, input_pos, k_val, v_val):
228
+ # input_pos: [S], k_val: [B, H, S, D]
229
+ assert input_pos.shape[0] == k_val.shape[2]
230
+ k_out = self.k_cache
231
+ v_out = self.v_cache
232
+ k_out[:, :, input_pos] = k_val
233
+ v_out[:, :, input_pos] = v_val
234
+
235
+ return k_out, v_out
236
+
237
+
238
+ class Attention(nn.Module):
239
+ def __init__(self, config: ModelArgs):
240
+ super().__init__()
241
+ assert config.dim % config.n_head == 0
242
+ self.dim = config.dim
243
+ self.head_dim = config.dim // config.n_head
244
+ self.n_head = config.n_head
245
+ self.n_kv_head = config.n_kv_head if config.n_kv_head is not None else config.n_head
246
+ total_kv_dim = (self.n_head + 2 * self.n_kv_head) * self.head_dim
247
+
248
+ # key, query, value projections for all heads, but in a batch
249
+ self.wqkv = nn.Linear(config.dim, total_kv_dim, bias=False)
250
+ self.wo = nn.Linear(config.dim, config.dim, bias=False)
251
+ self.kv_cache = None
252
+
253
+ # regularization
254
+ self.attn_dropout_p = config.attn_dropout_p
255
+ self.resid_dropout = nn.Dropout(config.resid_dropout_p)
256
+
257
+ def forward(
258
+ self, x: torch.Tensor, freqs_cis: torch.Tensor = None,
259
+ input_pos: Optional[torch.Tensor] = None,
260
+ mask: Optional[torch.Tensor] = None
261
+ ):
262
+ bsz, seqlen, _ = x.shape
263
+ kv_size = self.n_kv_head * self.head_dim
264
+ xq, xk, xv = self.wqkv(x).split([self.dim, kv_size, kv_size], dim=-1)
265
+
266
+ xq = xq.view(bsz, seqlen, self.n_head, self.head_dim)
267
+ xk = xk.view(bsz, seqlen, self.n_kv_head, self.head_dim)
268
+ xv = xv.view(bsz, seqlen, self.n_kv_head, self.head_dim)
269
+
270
+ xq = apply_rotary_emb(xq, freqs_cis)
271
+ xk = apply_rotary_emb(xk, freqs_cis)
272
+
273
+ xq, xk, xv = map(lambda x: x.transpose(1, 2), (xq, xk, xv))
274
+
275
+ if self.kv_cache is not None:
276
+ keys, values = self.kv_cache.update(input_pos, xk, xv)
277
+ else:
278
+ keys, values = xk, xv
279
+ keys = keys.repeat_interleave(self.n_head // self.n_kv_head, dim=1)
280
+ values = values.repeat_interleave(self.n_head // self.n_kv_head, dim=1)
281
+
282
+ output = F.scaled_dot_product_attention(
283
+ xq, keys, values,
284
+ attn_mask=mask,
285
+ is_causal=True if mask is None else False, # is_causal=False is for KV cache
286
+ dropout_p=self.attn_dropout_p if self.training else 0)
287
+
288
+ output = output.transpose(1, 2).contiguous().view(bsz, seqlen, self.dim)
289
+
290
+ output = self.resid_dropout(self.wo(output))
291
+ return output
292
+
293
+
294
+ class TransformerBlock(nn.Module):
295
+ def __init__(self, config: ModelArgs, drop_path: float):
296
+ super().__init__()
297
+ self.attention = Attention(config)
298
+ self.feed_forward = FeedForward(config)
299
+ self.attention_norm = RMSNorm(config.dim, eps=config.norm_eps)
300
+ self.ffn_norm = RMSNorm(config.dim, eps=config.norm_eps)
301
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
302
+
303
+ def forward(
304
+ self, x: torch.Tensor, freqs_cis: torch.Tensor, start_pos: int, mask: Optional[torch.Tensor] = None):
305
+ h = x + self.drop_path(self.attention(self.attention_norm(x), freqs_cis, start_pos, mask))
306
+ out = h + self.drop_path(self.feed_forward(self.ffn_norm(h)))
307
+ return out
308
+
309
+
310
+ class Transformer(nn.Module):
311
+ def __init__(self, config: ModelArgs):
312
+ super().__init__()
313
+ self.config = config
314
+ self.vocab_size = config.vocab_size
315
+ self.n_layer = config.n_layer
316
+ self.block_size = config.block_size
317
+ self.num_classes = config.num_classes
318
+ self.model_type = config.model_type
319
+ self.cls_token_num = config.cls_token_num
320
+ self.layer_internal = config.n_layer // 3
321
+ # self.adapter = Adapter(output_dim=768)
322
+ # self.adapter = ViT_Adapter()
323
+ # self.adapter = DeiT_Adapter()
324
+ self.adapter = Dinov2_Adapter(adapter_size=config.adapter_size, condition_type=config.condition_type)
325
+ # self.adapter = EVA_Adapter()
326
+ if config.adapter_size == "small":
327
+ self.adapter_mlp = MLP(384, config.dim, config.dim)
328
+ elif config.adapter_size == 'base':
329
+ self.adapter_mlp = MLP(768, config.dim, config.dim)
330
+
331
+ if self.model_type == 'c2i':
332
+ self.cls_embedding = LabelEmbedder(config.num_classes, config.dim, config.class_dropout_prob)
333
+ elif self.model_type == 't2i':
334
+ self.cls_embedding = CaptionEmbedder(config.caption_dim, config.dim, config.class_dropout_prob)
335
+ else:
336
+ raise Exception("please check model type")
337
+ self.tok_embeddings = nn.Embedding(config.vocab_size, config.dim)
338
+ self.tok_dropout = nn.Dropout(config.token_dropout_p)
339
+
340
+ self.condition_embeddings = nn.Embedding(config.vocab_size, config.dim)
341
+ self.condition_mlp = ConditionEmbedder(self.block_size, config.dim, config.class_dropout_prob, self.block_size, config.vocab_size)
342
+ self.condition_layers = torch.nn.ModuleList()
343
+ for layer_id in range(3):
344
+ self.condition_layers.append(MLP(config.dim,config.dim,config.dim))
345
+
346
+ # transformer blocks
347
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.n_layer)]
348
+ self.layers = torch.nn.ModuleList()
349
+ for layer_id in range(config.n_layer):
350
+ self.layers.append(TransformerBlock(config, dpr[layer_id]))
351
+
352
+ # output layer
353
+ self.norm = RMSNorm(config.dim, eps=config.norm_eps)
354
+ self.output = nn.Linear(config.dim, config.vocab_size, bias=False)
355
+
356
+ # 2d rotary pos embedding
357
+ grid_size = int(self.block_size ** 0.5)
358
+ assert grid_size * grid_size == self.block_size
359
+ self.freqs_cis = precompute_freqs_cis_2d(grid_size, self.config.dim // self.config.n_head, self.config.rope_base, self.cls_token_num)
360
+
361
+ # KVCache
362
+ self.max_batch_size = -1
363
+ self.max_seq_length = -1
364
+
365
+ self.initialize_weights()
366
+ self.condition_token = None
367
+ self.mask = get_causal_mask(256)
368
+ self.global_token = None
369
+
370
+ self.control_strength = 1
371
+
372
+ def initialize_weights(self):
373
+ # Initialize nn.Linear and nn.Embedding
374
+ self.apply(self._init_weights)
375
+
376
+ # Zero-out output layers:
377
+ nn.init.constant_(self.output.weight, 0)
378
+
379
+
380
+
381
+ def _init_weights(self, module):
382
+ std = self.config.initializer_range
383
+ if isinstance(module, nn.Linear):
384
+ module.weight.data.normal_(mean=0.0, std=std)
385
+ if module.bias is not None:
386
+ module.bias.data.zero_()
387
+ elif isinstance(module, nn.Embedding):
388
+ module.weight.data.normal_(mean=0.0, std=std)
389
+
390
+
391
+ def setup_caches(self, max_batch_size, max_seq_length, dtype):
392
+ # if self.max_seq_length >= max_seq_length and self.max_batch_size >= max_batch_size:
393
+ # return
394
+ head_dim = self.config.dim // self.config.n_head
395
+ max_seq_length = find_multiple(max_seq_length, 8) #
396
+ self.max_seq_length = max_seq_length
397
+ self.max_batch_size = max_batch_size
398
+ for b in self.layers:
399
+ b.attention.kv_cache = KVCache(max_batch_size, max_seq_length, self.config.n_head, head_dim, dtype)
400
+
401
+ causal_mask = torch.tril(torch.ones(self.max_seq_length, self.max_seq_length, dtype=torch.bool))
402
+ self.causal_mask = causal_mask.unsqueeze(0).repeat(self.max_batch_size, 1, 1)
403
+ grid_size = int(self.config.block_size ** 0.5)
404
+ assert grid_size * grid_size == self.block_size
405
+ self.freqs_cis = precompute_freqs_cis_2d(grid_size, self.config.dim // self.config.n_head, self.config.rope_base, self.cls_token_num)
406
+
407
+
408
+
409
+ def forward(
410
+ self,
411
+ idx: torch.Tensor,
412
+ cond_idx: torch.Tensor, # cond_idx_or_embed
413
+ input_pos: Optional[torch.Tensor] = None,
414
+ targets: Optional[torch.Tensor] = None,
415
+ mask: Optional[torch.Tensor] = None,
416
+ valid: Optional[torch.Tensor] = None,
417
+ condition: Optional[torch.Tensor] = None,
418
+ control_strength: Optional[int] = 1
419
+ ):
420
+ if idx is not None and cond_idx is not None: # training or naive inference
421
+ cond_embeddings,drop_ids = self.cls_embedding(cond_idx, train=self.training)#生成条件嵌入
422
+ cond_embeddings = cond_embeddings[:,:self.cls_token_num]
423
+ token_embeddings = self.tok_embeddings(idx)
424
+ if condition is not None:
425
+ condition_embeddings = self.adapter(condition)
426
+ condition_embeddings = self.adapter_mlp(condition_embeddings)
427
+ self.condition_token = self.condition_mlp(condition_embeddings,train=self.training, drop_ids=drop_ids)
428
+ token_embeddings = torch.cat((cond_embeddings, token_embeddings), dim=1)
429
+
430
+ h = self.tok_dropout(token_embeddings)
431
+ self.freqs_cis = self.freqs_cis.to(h.device)
432
+ else:
433
+ if cond_idx is not None: # prefill in inference
434
+ self.control_strength = control_strength
435
+ token_embeddings = self.cls_embedding(cond_idx, train=self.training)
436
+ token_embeddings = token_embeddings[:,:self.cls_token_num]
437
+ if condition is not None:
438
+ condition_embeddings = self.condition_mlp(condition, train=self.training)#.to(torch.bfloat16),train=self.training)
439
+ self.condition_token = condition_embeddings
440
+ self.condition_token = [self.condition_layers[0](self.condition_token),
441
+ self.condition_layers[1](self.condition_token),
442
+ self.condition_layers[2](self.condition_token)]
443
+
444
+ else: # decode_n_tokens(kv cache) in inference
445
+ token_embeddings = self.tok_embeddings(idx)
446
+ bs = token_embeddings.shape[0]
447
+ mask = self.causal_mask[:bs, None, input_pos]
448
+ h = self.tok_dropout(token_embeddings)
449
+ self.freqs_cis = self.freqs_cis
450
+
451
+ if self.training:
452
+ freqs_cis = self.freqs_cis[:token_embeddings.shape[1]]
453
+ else:
454
+ freqs_cis = self.freqs_cis[input_pos]
455
+ # transformer blocks
456
+ for i, layer in enumerate(self.layers):
457
+ if i%self.layer_internal == 0:
458
+ if self.training:
459
+ h[:, self.cls_token_num-1:] = h[:, self.cls_token_num-1:] + self.condition_layers[i//self.layer_internal](self.condition_token)
460
+ else:
461
+ if len(input_pos)>1:
462
+ # h[:, -1:] = h[:, -1:] + self.condition_layers[i//self.layer_internal](self.condition_token[:,0:1])
463
+ h[:,-1:] = h[:, -1:] + self.control_strength*self.condition_token[i//self.layer_internal][:,0:1]
464
+ else:
465
+ # h = h + self.condition_layers[i//self.layer_internal](self.condition_token[:,input_pos-self.cls_token_num+1])
466
+ h = h + self.control_strength*self.condition_token[i//self.layer_internal][:,input_pos-self.cls_token_num+1]
467
+ h = layer(h, freqs_cis, input_pos, mask)
468
+ # output layers
469
+ h = self.norm(h)
470
+ logits = self.output(h).float()
471
+
472
+ if self.training:
473
+ logits = logits[:, self.cls_token_num - 1:].contiguous()
474
+ # if we are given some desired targets also calculate the loss
475
+ loss = None
476
+ if valid is not None:
477
+ loss_all = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), reduction='none')
478
+ valid_all = valid[:,None].repeat(1, targets.shape[1]).view(-1)
479
+ loss = (loss_all * valid_all).sum() / max(valid_all.sum(), 1)
480
+ elif targets is not None:
481
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
482
+
483
+
484
+ return logits, loss
485
+
486
+
487
+ def get_fsdp_wrap_module_list(self) -> List[nn.Module]:
488
+ return list(self.layers)
489
+
490
+
491
+
492
+ #################################################################################
493
+ # Rotary Positional Embedding Functions #
494
+ #################################################################################
495
+ # https://github.com/pytorch-labs/gpt-fast/blob/main/model.py
496
+ def precompute_freqs_cis(seq_len: int, n_elem: int, base: int = 10000, cls_token_num=120):
497
+ freqs = 1.0 / (base ** (torch.arange(0, n_elem, 2)[: (n_elem // 2)].float() / n_elem))
498
+ t = torch.arange(seq_len, device=freqs.device)
499
+ freqs = torch.outer(t, freqs) # (seq_len, head_dim // 2)
500
+ freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
501
+ cache = torch.stack([freqs_cis.real, freqs_cis.imag], dim=-1) # (cls_token_num+seq_len, head_dim // 2, 2)
502
+ cond_cache = torch.cat([torch.zeros(cls_token_num, n_elem // 2, 2), cache]) # (cls_token_num+seq_len, head_dim // 2, 2)
503
+ return cond_cache
504
+
505
+
506
+ def precompute_freqs_cis_2d(grid_size: int, n_elem: int, base: int = 10000, cls_token_num=120):
507
+ # split the dimension into half, one for x and one for y
508
+ half_dim = n_elem // 2
509
+ freqs = 1.0 / (base ** (torch.arange(0, half_dim, 2)[: (half_dim // 2)].float() / half_dim))
510
+ t = torch.arange(grid_size, device=freqs.device)
511
+ freqs = torch.outer(t, freqs) # (grid_size, head_dim // 2)
512
+ freqs_grid = torch.concat([
513
+ freqs[:, None, :].expand(-1, grid_size, -1),
514
+ freqs[None, :, :].expand(grid_size, -1, -1),
515
+ ], dim=-1) # (grid_size, grid_size, head_dim // 2)
516
+ cache_grid = torch.stack([torch.cos(freqs_grid), torch.sin(freqs_grid)], dim=-1) # (grid_size, grid_size, head_dim // 2, 2)
517
+ cache = cache_grid.flatten(0, 1)
518
+ cond_cache = torch.cat([torch.zeros(cls_token_num, n_elem // 2, 2), cache]) # (cls_token_num+grid_size**2, head_dim // 2, 2)
519
+ return cond_cache
520
+
521
+
522
+ def apply_rotary_emb(x: torch.Tensor, freqs_cis: torch.Tensor):
523
+ # x: (bs, seq_len, n_head, head_dim)
524
+ # freqs_cis (seq_len, head_dim // 2, 2)
525
+ xshaped = x.float().reshape(*x.shape[:-1], -1, 2) # (bs, seq_len, n_head, head_dim//2, 2)
526
+ freqs_cis = freqs_cis.view(1, xshaped.size(1), 1, xshaped.size(3), 2) # (1, seq_len, 1, head_dim//2, 2)
527
+ x_out2 = torch.stack([
528
+ xshaped[..., 0] * freqs_cis[..., 0] - xshaped[..., 1] * freqs_cis[..., 1],
529
+ xshaped[..., 1] * freqs_cis[..., 0] + xshaped[..., 0] * freqs_cis[..., 1],
530
+ ], dim=-1)
531
+ x_out2 = x_out2.flatten(3)
532
+ return x_out2.type_as(x)
533
+
534
+
535
+
536
+ #################################################################################
537
+ # GPT Configs #
538
+ #################################################################################
539
+ ### text-conditional
540
+ def GPT_7B(**kwargs):
541
+ return Transformer(ModelArgs(n_layer=32, n_head=32, dim=4096, **kwargs)) # 6.6B
542
+
543
+ def GPT_3B(**kwargs):
544
+ return Transformer(ModelArgs(n_layer=24, n_head=32, dim=3200, **kwargs)) # 3.1B
545
+
546
+ def GPT_1B(**kwargs):
547
+ return Transformer(ModelArgs(n_layer=22, n_head=32, dim=2048, **kwargs)) # 1.2B
548
+
549
+ ### class-conditional
550
+ def GPT_XXXL(**kwargs):
551
+ return Transformer(ModelArgs(n_layer=48, n_head=40, dim=2560, **kwargs)) # 3.9B
552
+
553
+ def GPT_XXL(**kwargs):
554
+ return Transformer(ModelArgs(n_layer=48, n_head=24, dim=1536, **kwargs)) # 1.4B
555
+
556
+ def GPT_XL(**kwargs):
557
+ return Transformer(ModelArgs(n_layer=36, n_head=20, dim=1280, **kwargs)) # 775M
558
+
559
+ def GPT_L(**kwargs):
560
+ return Transformer(ModelArgs(n_layer=24, n_head=16, dim=1024, **kwargs)) # 343M
561
+
562
+ def GPT_B(**kwargs):
563
+ return Transformer(ModelArgs(n_layer=12, n_head=12, dim=768, **kwargs)) # 111M
564
+
565
+
566
+ GPT_models = {
567
+ 'GPT-B': GPT_B, 'GPT-L': GPT_L, 'GPT-XL': GPT_XL, 'GPT-XXL': GPT_XXL, 'GPT-XXXL': GPT_XXXL,
568
+ 'GPT-1B': GPT_1B, 'GPT-3B': GPT_3B, 'GPT-7B': GPT_7B,
569
+ }
models/quant.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Sequence, Tuple, Union
2
+
3
+ import numpy as np
4
+ import torch
5
+ from torch import distributed as tdist, nn as nn
6
+ from torch.nn import functional as F
7
+
8
+ import dist
9
+
10
+
11
+ # this file only provides the VectorQuantizer2 used in VQVAE
12
+ __all__ = ['VectorQuantizer2',]
13
+
14
+
15
+ class VectorQuantizer2(nn.Module):
16
+ # VQGAN originally use beta=1.0, never tried 0.25; SD seems using 0.25
17
+ def __init__(
18
+ self, vocab_size, Cvae, using_znorm, beta: float = 0.25,
19
+ default_qresi_counts=0, v_patch_nums=None, quant_resi=0.5, share_quant_resi=4, # share_quant_resi: args.qsr
20
+ ):
21
+ super().__init__()
22
+ self.vocab_size: int = vocab_size
23
+ self.Cvae: int = Cvae
24
+ self.using_znorm: bool = using_znorm
25
+ self.v_patch_nums: Tuple[int] = v_patch_nums
26
+
27
+ self.quant_resi_ratio = quant_resi
28
+ if share_quant_resi == 0: # non-shared: \phi_{1 to K} for K scales
29
+ self.quant_resi = PhiNonShared([(Phi(Cvae, quant_resi) if abs(quant_resi) > 1e-6 else nn.Identity()) for _ in range(default_qresi_counts or len(self.v_patch_nums))])
30
+ elif share_quant_resi == 1: # fully shared: only a single \phi for K scales
31
+ self.quant_resi = PhiShared(Phi(Cvae, quant_resi) if abs(quant_resi) > 1e-6 else nn.Identity())
32
+ else: # partially shared: \phi_{1 to share_quant_resi} for K scales
33
+ self.quant_resi = PhiPartiallyShared(nn.ModuleList([(Phi(Cvae, quant_resi) if abs(quant_resi) > 1e-6 else nn.Identity()) for _ in range(share_quant_resi)]))
34
+
35
+ self.register_buffer('ema_vocab_hit_SV', torch.full((len(self.v_patch_nums), self.vocab_size), fill_value=0.0))
36
+ self.record_hit = 0
37
+
38
+ self.beta: float = beta
39
+ self.embedding = nn.Embedding(self.vocab_size, self.Cvae)
40
+
41
+ # only used for progressive training of VAR (not supported yet, will be tested and supported in the future)
42
+ self.prog_si = -1 # progressive training: not supported yet, prog_si always -1
43
+
44
+ def eini(self, eini):
45
+ if eini > 0: nn.init.trunc_normal_(self.embedding.weight.data, std=eini)
46
+ elif eini < 0: self.embedding.weight.data.uniform_(-abs(eini) / self.vocab_size, abs(eini) / self.vocab_size)
47
+
48
+ def extra_repr(self) -> str:
49
+ return f'{self.v_patch_nums}, znorm={self.using_znorm}, beta={self.beta} | S={len(self.v_patch_nums)}, quant_resi={self.quant_resi_ratio}'
50
+
51
+ # ===================== `forward` is only used in VAE training =====================
52
+ def forward(self, f_BChw: torch.Tensor, ret_usages=False) -> Tuple[torch.Tensor, List[float], torch.Tensor]:
53
+ dtype = f_BChw.dtype
54
+ if dtype != torch.float32: f_BChw = f_BChw.float()
55
+ B, C, H, W = f_BChw.shape
56
+ f_no_grad = f_BChw.detach()
57
+
58
+ f_rest = f_no_grad.clone()
59
+ f_hat = torch.zeros_like(f_rest)
60
+
61
+ with torch.cuda.amp.autocast(enabled=False):
62
+ mean_vq_loss: torch.Tensor = 0.0
63
+ vocab_hit_V = torch.zeros(self.vocab_size, dtype=torch.float, device=f_BChw.device)
64
+ SN = len(self.v_patch_nums)
65
+ for si, pn in enumerate(self.v_patch_nums): # from small to large
66
+ # find the nearest embedding
67
+ if self.using_znorm:
68
+ rest_NC = F.interpolate(f_rest, size=(pn, pn), mode='area').permute(0, 2, 3, 1).reshape(-1, C) if (si != SN-1) else f_rest.permute(0, 2, 3, 1).reshape(-1, C)
69
+ rest_NC = F.normalize(rest_NC, dim=-1)
70
+ idx_N = torch.argmax(rest_NC @ F.normalize(self.embedding.weight.data.T, dim=0), dim=1)
71
+ else:
72
+ rest_NC = F.interpolate(f_rest, size=(pn, pn), mode='area').permute(0, 2, 3, 1).reshape(-1, C) if (si != SN-1) else f_rest.permute(0, 2, 3, 1).reshape(-1, C)
73
+ d_no_grad = torch.sum(rest_NC.square(), dim=1, keepdim=True) + torch.sum(self.embedding.weight.data.square(), dim=1, keepdim=False)
74
+ d_no_grad.addmm_(rest_NC, self.embedding.weight.data.T, alpha=-2, beta=1) # (B*h*w, vocab_size)
75
+ idx_N = torch.argmin(d_no_grad, dim=1)
76
+
77
+ hit_V = idx_N.bincount(minlength=self.vocab_size).float()
78
+ if self.training:
79
+ if dist.initialized(): handler = tdist.all_reduce(hit_V, async_op=True)
80
+
81
+ # calc loss
82
+ idx_Bhw = idx_N.view(B, pn, pn)
83
+ h_BChw = F.interpolate(self.embedding(idx_Bhw).permute(0, 3, 1, 2), size=(H, W), mode='bicubic').contiguous() if (si != SN-1) else self.embedding(idx_Bhw).permute(0, 3, 1, 2).contiguous()
84
+ h_BChw = self.quant_resi[si/(SN-1)](h_BChw)
85
+ f_hat = f_hat + h_BChw
86
+ f_rest -= h_BChw
87
+
88
+ if self.training and dist.initialized():
89
+ handler.wait()
90
+ if self.record_hit == 0: self.ema_vocab_hit_SV[si].copy_(hit_V)
91
+ elif self.record_hit < 100: self.ema_vocab_hit_SV[si].mul_(0.9).add_(hit_V.mul(0.1))
92
+ else: self.ema_vocab_hit_SV[si].mul_(0.99).add_(hit_V.mul(0.01))
93
+ self.record_hit += 1
94
+ vocab_hit_V.add_(hit_V)
95
+ mean_vq_loss += F.mse_loss(f_hat.data, f_BChw).mul_(self.beta) + F.mse_loss(f_hat, f_no_grad)
96
+
97
+ mean_vq_loss *= 1. / SN
98
+ f_hat = (f_hat.data - f_no_grad).add_(f_BChw)
99
+
100
+ margin = tdist.get_world_size() * (f_BChw.numel() / f_BChw.shape[1]) / self.vocab_size * 0.08
101
+ # margin = pn*pn / 100
102
+ if ret_usages: usages = [(self.ema_vocab_hit_SV[si] >= margin).float().mean().item() * 100 for si, pn in enumerate(self.v_patch_nums)]
103
+ else: usages = None
104
+ return f_hat, usages, mean_vq_loss
105
+ # ===================== `forward` is only used in VAE training =====================
106
+
107
+ def embed_to_fhat(self, ms_h_BChw: List[torch.Tensor], all_to_max_scale=True, last_one=False) -> Union[List[torch.Tensor], torch.Tensor]:
108
+ ls_f_hat_BChw = []
109
+ B = ms_h_BChw[0].shape[0]
110
+ H = W = self.v_patch_nums[-1]
111
+ SN = len(self.v_patch_nums)
112
+ if all_to_max_scale:
113
+ f_hat = ms_h_BChw[0].new_zeros(B, self.Cvae, H, W, dtype=torch.float32)
114
+ for si, pn in enumerate(self.v_patch_nums): # from small to large
115
+ h_BChw = ms_h_BChw[si]
116
+ if si < len(self.v_patch_nums) - 1:
117
+ h_BChw = F.interpolate(h_BChw, size=(H, W), mode='bicubic')
118
+ h_BChw = self.quant_resi[si/(SN-1)](h_BChw)
119
+ f_hat.add_(h_BChw)
120
+ if last_one: ls_f_hat_BChw = f_hat
121
+ else: ls_f_hat_BChw.append(f_hat.clone())
122
+ else:
123
+ # WARNING: this is not the case in VQ-VAE training or inference (we'll interpolate every token map to the max H W, like above)
124
+ # WARNING: this should only be used for experimental purpose
125
+ f_hat = ms_h_BChw[0].new_zeros(B, self.Cvae, self.v_patch_nums[0], self.v_patch_nums[0], dtype=torch.float32)
126
+ for si, pn in enumerate(self.v_patch_nums): # from small to large
127
+ f_hat = F.interpolate(f_hat, size=(pn, pn), mode='bicubic')
128
+ h_BChw = self.quant_resi[si/(SN-1)](ms_h_BChw[si])
129
+ f_hat.add_(h_BChw)
130
+ if last_one: ls_f_hat_BChw = f_hat
131
+ else: ls_f_hat_BChw.append(f_hat)
132
+
133
+ return ls_f_hat_BChw
134
+
135
+ def f_to_idxBl_or_fhat(self, f_BChw: torch.Tensor, to_fhat: bool, v_patch_nums: Optional[Sequence[Union[int, Tuple[int, int]]]] = None) -> List[Union[torch.Tensor, torch.LongTensor]]: # z_BChw is the feature from inp_img_no_grad
136
+ B, C, H, W = f_BChw.shape
137
+ f_no_grad = f_BChw.detach()
138
+ f_rest = f_no_grad.clone()
139
+ f_hat = torch.zeros_like(f_rest)
140
+
141
+ f_hat_or_idx_Bl: List[torch.Tensor] = []
142
+
143
+ patch_hws = [(pn, pn) if isinstance(pn, int) else (pn[0], pn[1]) for pn in (v_patch_nums or self.v_patch_nums)] # from small to large
144
+ assert patch_hws[-1][0] == H and patch_hws[-1][1] == W, f'{patch_hws[-1]=} != ({H=}, {W=})'
145
+
146
+ SN = len(patch_hws)
147
+ for si, (ph, pw) in enumerate(patch_hws): # from small to large
148
+ if 0 <= self.prog_si < si: break # progressive training: not supported yet, prog_si always -1
149
+ # find the nearest embedding
150
+ z_NC = F.interpolate(f_rest, size=(ph, pw), mode='area').permute(0, 2, 3, 1).reshape(-1, C) if (si != SN-1) else f_rest.permute(0, 2, 3, 1).reshape(-1, C)
151
+ if self.using_znorm:
152
+ z_NC = F.normalize(z_NC, dim=-1)
153
+ idx_N = torch.argmax(z_NC @ F.normalize(self.embedding.weight.data.T, dim=0), dim=1)
154
+ else:
155
+ d_no_grad = torch.sum(z_NC.square(), dim=1, keepdim=True) + torch.sum(self.embedding.weight.data.square(), dim=1, keepdim=False)
156
+ d_no_grad.addmm_(z_NC, self.embedding.weight.data.T, alpha=-2, beta=1) # (B*h*w, vocab_size)
157
+ idx_N = torch.argmin(d_no_grad, dim=1)
158
+
159
+ idx_Bhw = idx_N.view(B, ph, pw)
160
+ h_BChw = F.interpolate(self.embedding(idx_Bhw).permute(0, 3, 1, 2), size=(H, W), mode='bicubic').contiguous() if (si != SN-1) else self.embedding(idx_Bhw).permute(0, 3, 1, 2).contiguous()
161
+ h_BChw = self.quant_resi[si/(SN-1)](h_BChw)
162
+ f_hat.add_(h_BChw)
163
+ f_rest.sub_(h_BChw)
164
+ f_hat_or_idx_Bl.append(f_hat.clone() if to_fhat else idx_N.reshape(B, ph*pw))
165
+
166
+ return f_hat_or_idx_Bl
167
+
168
+ # ===================== idxBl_to_var_input: only used in VAR training, for getting teacher-forcing input =====================
169
+ def idxBl_to_var_input(self, gt_ms_idx_Bl: List[torch.Tensor]) -> torch.Tensor:
170
+ next_scales = []
171
+ B = gt_ms_idx_Bl[0].shape[0]
172
+ C = self.Cvae
173
+ H = W = self.v_patch_nums[-1]
174
+ SN = len(self.v_patch_nums)
175
+
176
+ f_hat = gt_ms_idx_Bl[0].new_zeros(B, C, H, W, dtype=torch.float32)
177
+ pn_next: int = self.v_patch_nums[0]
178
+ for si in range(SN-1):
179
+ if self.prog_si == 0 or (0 <= self.prog_si-1 < si): break # progressive training: not supported yet, prog_si always -1
180
+ h_BChw = F.interpolate(self.embedding(gt_ms_idx_Bl[si]).transpose_(1, 2).view(B, C, pn_next, pn_next), size=(H, W), mode='bicubic')
181
+ f_hat.add_(self.quant_resi[si/(SN-1)](h_BChw))
182
+ pn_next = self.v_patch_nums[si+1]
183
+ next_scales.append(F.interpolate(f_hat, size=(pn_next, pn_next), mode='area').view(B, C, -1).transpose(1, 2))
184
+ return torch.cat(next_scales, dim=1) if len(next_scales) else None # cat BlCs to BLC, this should be float32
185
+
186
+ # ===================== get_next_autoregressive_input: only used in VAR inference, for getting next step's input =====================
187
+ def get_next_autoregressive_input(self, si: int, SN: int, f_hat: torch.Tensor, h_BChw: torch.Tensor) -> Tuple[Optional[torch.Tensor], torch.Tensor]: # only used in VAR inference
188
+ HW = self.v_patch_nums[-1]
189
+ if si != SN-1:
190
+ h = self.quant_resi[si/(SN-1)](F.interpolate(h_BChw, size=(HW, HW), mode='bicubic')) # conv after upsample
191
+ f_hat.add_(h)
192
+ return f_hat, F.interpolate(f_hat, size=(self.v_patch_nums[si+1], self.v_patch_nums[si+1]), mode='area')
193
+ else:
194
+ h = self.quant_resi[si/(SN-1)](h_BChw)
195
+ f_hat.add_(h)
196
+ return f_hat, f_hat
197
+
198
+
199
+ class Phi(nn.Conv2d):
200
+ def __init__(self, embed_dim, quant_resi):
201
+ ks = 3
202
+ super().__init__(in_channels=embed_dim, out_channels=embed_dim, kernel_size=ks, stride=1, padding=ks//2)
203
+ self.resi_ratio = abs(quant_resi)
204
+
205
+ def forward(self, h_BChw):
206
+ return h_BChw.mul(1-self.resi_ratio) + super().forward(h_BChw).mul_(self.resi_ratio)
207
+
208
+
209
+ class PhiShared(nn.Module):
210
+ def __init__(self, qresi: Phi):
211
+ super().__init__()
212
+ self.qresi: Phi = qresi
213
+
214
+ def __getitem__(self, _) -> Phi:
215
+ return self.qresi
216
+
217
+
218
+ class PhiPartiallyShared(nn.Module):
219
+ def __init__(self, qresi_ls: nn.ModuleList):
220
+ super().__init__()
221
+ self.qresi_ls = qresi_ls
222
+ K = len(qresi_ls)
223
+ self.ticks = np.linspace(1/3/K, 1-1/3/K, K) if K == 4 else np.linspace(1/2/K, 1-1/2/K, K)
224
+
225
+ def __getitem__(self, at_from_0_to_1: float) -> Phi:
226
+ return self.qresi_ls[np.argmin(np.abs(self.ticks - at_from_0_to_1)).item()]
227
+
228
+ def extra_repr(self) -> str:
229
+ return f'ticks={self.ticks}'
230
+
231
+
232
+ class PhiNonShared(nn.ModuleList):
233
+ def __init__(self, qresi: List):
234
+ super().__init__(qresi)
235
+ # self.qresi = qresi
236
+ K = len(qresi)
237
+ self.ticks = np.linspace(1/3/K, 1-1/3/K, K) if K == 4 else np.linspace(1/2/K, 1-1/2/K, K)
238
+
239
+ def __getitem__(self, at_from_0_to_1: float) -> Phi:
240
+ return super().__getitem__(np.argmin(np.abs(self.ticks - at_from_0_to_1)).item())
241
+
242
+ def extra_repr(self) -> str:
243
+ return f'ticks={self.ticks}'
models/var.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from functools import partial
3
+ from typing import Optional, Tuple, Union
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch import Tensor
8
+ from huggingface_hub import PyTorchModelHubMixin
9
+
10
+ import dist
11
+ from models.basic_var import AdaLNBeforeHead, AdaLNSelfAttn
12
+ from models.helpers import gumbel_softmax_with_rng, sample_with_top_k_top_p_
13
+ from models.vqvae import VQVAE, VectorQuantizer2
14
+ from utils.model_args import ModelArgs
15
+ from transformers import AutoImageProcessor, AutoModel
16
+
17
+
18
+ class SharedAdaLin(nn.Linear):
19
+ def forward(self, cond_BD):
20
+ C = self.weight.shape[0] // 6
21
+ return super().forward(cond_BD).view(-1, 1, 6, C) # B16C
22
+
23
+
24
+ #################################################################################
25
+ # Embedding Layers for Text Feature #
26
+ #################################################################################
27
+ class AttentionPooling(nn.Module):
28
+ def __init__(self, dim, num_heads=4):
29
+ super().__init__()
30
+ self.attn = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, batch_first=True)
31
+
32
+ def forward(self, x): # x: [B*N, T, C]
33
+ B_N, T, C = x.shape
34
+ query = torch.zeros(B_N, 1, C, device=x.device)
35
+ out, _ = self.attn(query, x, x)
36
+ return out.squeeze(1) # [B*N, C]
37
+
38
+ class CaptionEmbedder(nn.Module):
39
+ def __init__(self, in_channels, hidden_size, uncond_prob, token_num=120, num_heads=4):
40
+ super().__init__()
41
+ self.cap_proj = nn.Sequential(
42
+ nn.LayerNorm(in_channels),
43
+ nn.Linear(in_channels, hidden_size),
44
+ nn.GELU(),
45
+ nn.Linear(hidden_size, hidden_size)
46
+ )
47
+ self.uncond_embedding = nn.Parameter(torch.randn(token_num, in_channels) / in_channels ** 0.5)
48
+ self.attn_pool = AttentionPooling(dim=hidden_size, num_heads=num_heads)
49
+ self.uncond_prob = uncond_prob
50
+ def token_drop(self, caption: Tensor, force_drop_ids=None):
51
+ B, N, T, C = caption.shape
52
+ if force_drop_ids is None:
53
+ drop_ids = torch.rand(B, N, device=caption.device) < self.uncond_prob
54
+ else:
55
+ drop_ids = force_drop_ids == 1
56
+
57
+ uncond_embed = self.uncond_embedding.unsqueeze(0).unsqueeze(0).expand(B, N, -1, -1)
58
+ drop_mask = drop_ids.unsqueeze(-1).unsqueeze(-1) # [B, N, 1, 1]
59
+ caption = torch.where(drop_mask, uncond_embed, caption)
60
+ return caption, drop_ids
61
+ def forward(self, caption: Tensor, train: bool = True, force_drop_ids=None):
62
+ # caption: [B, N, T, C]
63
+ B, N, T, C = caption.shape
64
+ if (train and self.uncond_prob > 0) or (force_drop_ids is not None):
65
+ caption, drop_ids = self.token_drop(caption, force_drop_ids)
66
+ else:
67
+ drop_ids = None
68
+
69
+ caption = caption.view(B * N, T, C)
70
+ embeddings = self.cap_proj(caption) # [B*N, T, D]
71
+ pooled = self.attn_pool(embeddings) # [B*N, D]
72
+ pooled = pooled.view(B, N, -1) # [B, N, D]
73
+ cond_BD = pooled.mean(dim=1) # [B, D]
74
+
75
+ if drop_ids is not None:
76
+ return cond_BD, drop_ids
77
+ else:
78
+ return cond_BD
79
+
80
+
81
+ class MLP(nn.Module):
82
+ def __init__(self, in_features, hidden_features, out_features):
83
+ super().__init__()
84
+ out_features = out_features or in_features
85
+ hidden_features = hidden_features or in_features
86
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=False)
87
+ self.act = nn.GELU(approximate='tanh')
88
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=False)
89
+
90
+ nn.init.zeros_(self.fc1.weight)
91
+ nn.init.zeros_(self.fc2.weight)
92
+
93
+ def forward(self, x):
94
+ x = self.fc1(x)
95
+ x = self.act(x)
96
+ x = self.fc2(x)
97
+ return x
98
+
99
+
100
+
101
+ config=ModelArgs()
102
+
103
+ class VAR(nn.Module):
104
+ def __init__(
105
+ self, vae_local: VQVAE,
106
+ depth=12, embed_dim=1024, num_heads=16, mlp_ratio=4., drop_rate=0., attn_drop_rate=0., drop_path_rate=0.,
107
+ norm_eps=1e-6, shared_aln=False, cond_drop_rate=0.1,
108
+ attn_l2_norm=False,
109
+ patch_nums=(1, 2, 3, 4, 5, 6, 8, 10, 13, 16), #这是定义每行每列划分的patch的数量 (pn*pn)# 10 steps by default这些整数表示将输入图像划分为不同大小的 块(patches)。例如,patch_nums 可以是 (1, 2, 3, 4),表示将图像划分成多个尺度的块,块的大小分别是 1x1、2x2、3x3、4x4
110
+ flash_if_available=True, fused_if_available=True,
111
+ ):
112
+ super().__init__()
113
+ # 0. hyperparameters
114
+ assert embed_dim % num_heads == 0
115
+ self.Cvae, self.V = vae_local.Cvae, vae_local.vocab_size
116
+ self.depth, self.C, self.D, self.num_heads = depth, embed_dim, embed_dim, num_heads
117
+ # self.C 是目标空间(共享空间)的维度
118
+
119
+ self.cond_drop_rate = cond_drop_rate
120
+ self.prog_si = -1 # progressive training
121
+
122
+ self.patch_nums: Tuple[int] = patch_nums
123
+ self.L = sum(pn ** 2 for pn in self.patch_nums) #用于存储图像分块后的 总数量:最终将包含所有块大小的总和,即 1^2 + 2^2 + 3^2 + 4^2....
124
+ self.first_l = self.patch_nums[0] ** 2 #self.first_l 存储的是 第一个图像块的大小的平方。也就是说,它代表的是最小的图像块的大小(例如,假设 patch_nums = (1, 2, 3, 4),则 self.first_l = 1^2 = 1)
125
+ self.begin_ends = []
126
+ cur = 0
127
+ for i, pn in enumerate(self.patch_nums):
128
+ self.begin_ends.append((cur, cur+pn ** 2))
129
+ cur += pn ** 2
130
+ # 这段代码遍历 patch_nums 中的每个块尺寸(pn),并计算每个尺度中 每个块的起始和结束位置。具体来说:
131
+ # enumerate(self.patch_nums) 遍历 patch_nums 列表,并为每个元素 pn 提供索引 i。
132
+ # 对于每个 pn,计算块的 开始索引(cur) 和 结束索引(cur + pn ** 2)。
133
+ # 每计算完一个尺度的块起始和结束索引后,将 (cur, cur + pn ** 2) 添加到 self.begin_ends 列表中。
134
+ # 然后更新 cur,使其指向下一个块的开始位置。
135
+ # 例如,如果 patch_nums = (1, 2, 3, 4),那么 self.begin_ends 的值会是:
136
+
137
+ # 第一尺度(1x1)的起始和结束索引:(0, 1)
138
+ # 第二尺度(2x2)的起始和结束索引:(1, 5)
139
+ # 第三尺度(3x3)的起始和结束索引:(5, 14)
140
+ # 第四尺度(4x4)的起始和结束索引:(14, 30)
141
+ # self.begin_ends 最终会是:[(0, 1), (1, 5), (5, 14), (14, 30)]
142
+ self.num_stages_minus_1 = len(self.patch_nums) - 1 #阶段数:记录图像分块的尺度数
143
+ self.rng = torch.Generator(device=dist.get_device())
144
+
145
+
146
+
147
+
148
+ # 1. input (word) embedding
149
+ quant: VectorQuantizer2 = vae_local.quantize
150
+ self.vae_proxy: Tuple[VQVAE] = (vae_local,)
151
+ self.vae_quant_proxy: Tuple[VectorQuantizer2] = (quant,)
152
+ self.word_embed = nn.Linear(self.Cvae, self.C)
153
+
154
+
155
+ # 2. caption embedding
156
+ init_std = math.sqrt(1 / self.C / 3)
157
+ self.Caption_embedding = CaptionEmbedder(config.caption_dim, config.dim, config.class_dropout_prob)
158
+ nn.init.trunc_normal_(self.Caption_embedding.uncond_embedding, mean=0, std=init_std)
159
+ self.pos_start = nn.Parameter(torch.empty(1, self.first_l, self.C))
160
+ nn.init.trunc_normal_(self.pos_start.data, mean=0, std=init_std)
161
+
162
+
163
+ # 3. absolute position embedding
164
+ pos_1LC = []
165
+ for i, pn in enumerate(self.patch_nums):
166
+ pe = torch.empty(1, pn*pn, self.C)
167
+ nn.init.trunc_normal_(pe, mean=0, std=init_std)
168
+ pos_1LC.append(pe)
169
+ pos_1LC = torch.cat(pos_1LC, dim=1) # 1, L, C
170
+ assert tuple(pos_1LC.shape) == (1, self.L, self.C)
171
+ self.pos_1LC = nn.Parameter(pos_1LC)
172
+ # level embedding (similar to GPT's segment embedding, used to distinguish different levels of token pyramid)
173
+ self.lvl_embed = nn.Embedding(len(self.patch_nums), self.C)
174
+ nn.init.trunc_normal_(self.lvl_embed.weight.data, mean=0, std=init_std)
175
+
176
+ # 4. backbone blocks
177
+ self.shared_ada_lin = nn.Sequential(nn.SiLU(inplace=False), SharedAdaLin(self.D, 6*self.C)) if shared_aln else nn.Identity()
178
+
179
+ norm_layer = partial(nn.LayerNorm, eps=norm_eps)
180
+ self.drop_path_rate = drop_path_rate
181
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule (linearly increasing)
182
+ self.blocks = nn.ModuleList([
183
+ AdaLNSelfAttn(
184
+ cond_dim=self.D, shared_aln=shared_aln,
185
+ block_idx=block_idx, embed_dim=self.C, norm_layer=norm_layer, num_heads=num_heads, mlp_ratio=mlp_ratio,
186
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[block_idx], last_drop_p=0 if block_idx == 0 else dpr[block_idx-1],
187
+ attn_l2_norm=attn_l2_norm,
188
+ flash_if_available=flash_if_available, fused_if_available=fused_if_available,
189
+ )
190
+ for block_idx in range(depth)
191
+ ])
192
+
193
+ fused_add_norm_fns = [b.fused_add_norm_fn is not None for b in self.blocks]
194
+ self.using_fused_add_norm_fn = any(fused_add_norm_fns)
195
+ print(
196
+ f'\n[constructor] ==== flash_if_available={flash_if_available} ({sum(b.attn.using_flash for b in self.blocks)}/{self.depth}), fused_if_available={fused_if_available} (fusing_add_ln={sum(fused_add_norm_fns)}/{self.depth}, fusing_mlp={sum(b.ffn.fused_mlp_func is not None for b in self.blocks)}/{self.depth}) ==== \n'
197
+ f' [VAR config ] embed_dim={embed_dim}, num_heads={num_heads}, depth={depth}, mlp_ratio={mlp_ratio}\n'
198
+ f' [drop ratios ] drop_rate={drop_rate}, attn_drop_rate={attn_drop_rate}, drop_path_rate={drop_path_rate:g} ({torch.linspace(0, drop_path_rate, depth)})',
199
+ end='\n\n', flush=True
200
+ )
201
+
202
+ # 5. attention mask used in training (for masking out the future)
203
+ # it won't be used in inference, since kv cache is enabled
204
+ # d 和 dT 创建了一个标识符矩阵,表示图像的不同区域或文本的不同部分。
205
+ # 通过 torch.where 创建了一个 掩码,阻止模型在计算注意力时查看未来的部分。
206
+ # attn_bias_for_masking 将被用作 注意力偏置,在自注意力计算中进行遮蔽处理
207
+ d: torch.Tensor = torch.cat([torch.full((pn*pn,), i) for i, pn in enumerate(self.patch_nums)]).view(1, self.L, 1)
208
+ dT = d.transpose(1, 2) # dT: 11L
209
+ lvl_1L = dT[:, 0].contiguous()
210
+ self.register_buffer('lvl_1L', lvl_1L)
211
+ attn_bias_for_masking = torch.where(d >= dT, 0., -torch.inf).reshape(1, 1, self.L, self.L)
212
+ self.register_buffer('attn_bias_for_masking', attn_bias_for_masking.contiguous())
213
+
214
+ # 6. classifier head
215
+ self.head_nm = AdaLNBeforeHead(self.C, self.D, norm_layer=norm_layer)
216
+ self.head = nn.Linear(self.C, self.V)
217
+
218
+ # h_or_h_and_residual:这是函数的第一个输入,
219
+ # 可以是一个张量 h,
220
+ # 也可以是一个包含两个张量的元组 (h, resi)。h 是输入的特征,resi 是某种残差或中间结果。
221
+
222
+ # 判断 h_or_h_and_residual 是否是张量:
223
+ # 如果 h_or_h_and_residual 不是一个张量,而是一个元组 (h, resi),
224
+ # 则意味着模型使用了 融合的加法归一化(fused_add_norm),并且需要将两个张量 h 和 resi 进行处理。
225
+ # h = resi + self.blocks[-1].drop_path(h):将 resi(残差)与 h 进行加和,
226
+ # 并通过 drop_path 对 h 进行正则化(丢弃路径)。drop_path 是一种 正则化 技术,用于防止网络过拟合。
227
+ # self.blocks[-1] 代表模型中的最后一层。
228
+ # 如果 h_or_h_and_residual 是一个 单一的张量(即没有残差),那么直接使用该张量 h_or_h_and_residual 作为输入。
229
+ def get_logits(self, h_or_h_and_residual: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]], cond_BD: Optional[torch.Tensor]):
230
+ if not isinstance(h_or_h_and_residual, torch.Tensor):
231
+ h, resi = h_or_h_and_residual # fused_add_norm must be used
232
+ h = resi + self.blocks[-1].drop_path(h)
233
+ else: # fused_add_norm is not used
234
+ h = h_or_h_and_residual
235
+ return self.head(self.head_nm(h.float(), cond_BD).float()).float()
236
+
237
+ @torch.no_grad()
238
+ def autoregressive_infer_cfg(
239
+ self, B: int, caption: Optional[Union[int, torch.LongTensor]],
240
+ g_seed: Optional[int] = None, cfg=1.5, top_k=0, top_p=0.0,
241
+ more_smooth=False,
242
+ ) -> torch.Tensor: # returns reconstructed image (B, 3, H, W) in [0, 1]
243
+ """
244
+ only used for inference, on autoregressive mode
245
+ :param B: batch size
246
+ :param caption: if None, randomly sampled
247
+ :param g_seed: random seed
248
+ :param cfg: classifier-free guidance ratio
249
+ :param top_k: top-k sampling
250
+ :param top_p: top-p sampling
251
+ :param more_smooth: smoothing the pred using gumbel softmax; only used in visualization, not used in FID/IS benchmarking
252
+ :return: if returns_vemb: list of embedding h_BChw := vae_embed(idx_Bl), else: list of idx_Bl
253
+ """
254
+ if g_seed is None: rng = None
255
+ else: self.rng.manual_seed(g_seed); rng = self.rng
256
+
257
+ if caption is None:
258
+ caption = torch.multinomial(self.uniform_prob, num_samples=B, replacement=True, generator=rng).reshape(B)
259
+ elif isinstance(caption, int):
260
+ caption = torch.full((B,), fill_value=self.num_classes if caption < 0 else caption, device=self.lvl_1L.device)
261
+
262
+ sos = cond_BD = self.Caption_embedding(caption)
263
+
264
+
265
+ # lvl_pos:计算位置嵌入,表示不同尺度的 token 的位置。
266
+ # next_token_map:初始化每个样本的生成序列,包括开始标记 sos 和相应的位置信息。
267
+ lvl_pos = self.lvl_embed(self.lvl_1L) + self.pos_1LC
268
+ next_token_map = sos.unsqueeze(1).expand(2 * B, self.first_l, -1) + self.pos_start.expand(2 * B, self.first_l, -1) + lvl_pos[:, :self.first_l]
269
+
270
+ cur_L = 0
271
+ f_hat = sos.new_zeros(B, self.Cvae, self.patch_nums[-1], self.patch_nums[-1])
272
+
273
+ for b in self.blocks: b.attn.kv_caching(True)
274
+ for si, pn in enumerate(self.patch_nums): # si: i-th segment
275
+ ratio = si / self.num_stages_minus_1
276
+ # last_L = cur_L
277
+ cur_L += pn*pn
278
+ # assert self.attn_bias_for_masking[:, :, last_L:cur_L, :cur_L].sum() == 0, f'AR with {(self.attn_bias_for_masking[:, :, last_L:cur_L, :cur_L] != 0).sum()} / {self.attn_bias_for_masking[:, :, last_L:cur_L, :cur_L].numel()} mask item'
279
+ cond_BD_or_gss = self.shared_ada_lin(cond_BD)
280
+ x = next_token_map
281
+ AdaLNSelfAttn.forward
282
+ for b in self.blocks:
283
+ x = b(x=x, cond_BD=cond_BD_or_gss, attn_bias=None)
284
+ logits_BlV = self.get_logits(x, cond_BD)
285
+
286
+ t = cfg * ratio
287
+ logits_BlV = (1+t) * logits_BlV[:B] - t * logits_BlV[B:]
288
+
289
+ idx_Bl = sample_with_top_k_top_p_(logits_BlV, rng=rng, top_k=top_k, top_p=top_p, num_samples=1)[:, :, 0]
290
+ if not more_smooth: # this is the default case
291
+ h_BChw = self.vae_quant_proxy[0].embedding(idx_Bl) # B, l, Cvae
292
+ else: # not used when evaluating FID/IS/Precision/Recall
293
+ gum_t = max(0.27 * (1 - ratio * 0.95), 0.005) # refer to mask-git
294
+ h_BChw = gumbel_softmax_with_rng(logits_BlV.mul(1 + ratio), tau=gum_t, hard=False, dim=-1, rng=rng) @ self.vae_quant_proxy[0].embedding.weight.unsqueeze(0)
295
+
296
+ h_BChw = h_BChw.transpose_(1, 2).reshape(B, self.Cvae, pn, pn)
297
+ f_hat, next_token_map = self.vae_quant_proxy[0].get_next_autoregressive_input(si, len(self.patch_nums), f_hat, h_BChw)
298
+ if si != self.num_stages_minus_1: # prepare for next stage
299
+ next_token_map = next_token_map.view(B, self.Cvae, -1).transpose(1, 2)
300
+ next_token_map = self.word_embed(next_token_map) + lvl_pos[:, cur_L:cur_L + self.patch_nums[si+1] ** 2]
301
+ next_token_map = next_token_map.repeat(2, 1, 1) # double the batch sizes due to CFG
302
+
303
+ for b in self.blocks: b.attn.kv_caching(False)
304
+ return self.vae_proxy[0].fhat_to_img(f_hat).add_(1).mul_(0.5) # de-normalize, from [-1, 1] to [0, 1]
305
+
306
+
307
+
308
+
309
+ def forward(self, caption: torch.Tensor, x_BLCv_wo_first_l: torch.Tensor, condition: torch.Tensor, current_step=None, total_steps=None) -> torch.Tensor: # returns logits_BLV
310
+ """
311
+ :param caption: caption
312
+ :param x_BLCv_wo_first_l: teacher forcing input (B, self.L-self.first_l, self.Cvae)
313
+ :param condition:seg_image
314
+ :return: logits BLV, V is vocab_size
315
+ """
316
+ bg, ed = self.begin_ends[self.prog_si] if self.prog_si >= 0 else (0, self.L)
317
+ B = x_BLCv_wo_first_l.shape[0]
318
+ with torch.cuda.amp.autocast(enabled=False):
319
+ cond_BD, _ = self.Caption_embedding(caption,train=True) #文本
320
+ sos = cond_BD.unsqueeze(1).expand(B, self.first_l, -1) + self.pos_start.expand(B, self.first_l, -1)
321
+
322
+ if self.prog_si == 0: x_BLC = sos
323
+ else: x_BLC = torch.cat((sos, self.word_embed(x_BLCv_wo_first_l.float())), dim=1)
324
+ x_BLC += self.lvl_embed(self.lvl_1L[:, :ed].expand(B, -1)) + self.pos_1LC[:, :ed] # lvl: BLC; pos: 1LC
325
+
326
+ attn_bias = self.attn_bias_for_masking[:, :, :ed, :ed]
327
+ cond_BD_or_gss = self.shared_ada_lin(cond_BD)
328
+
329
+ # hack: get the dtype if mixed precision is used
330
+ temp = x_BLC.new_ones(8, 8)
331
+ main_type = torch.matmul(temp, temp).dtype
332
+
333
+ x_BLC = x_BLC.to(dtype=main_type)
334
+ cond_BD_or_gss = cond_BD_or_gss.to(dtype=main_type)
335
+ attn_bias = attn_bias.to(dtype=main_type)
336
+
337
+ AdaLNSelfAttn.forward
338
+ for i, b in enumerate(self.blocks): # 将condition图像添加进去。按照一定的控制策略添加到哪些层
339
+ x_BLC = b(x=x_BLC, cond_BD=cond_BD_or_gss, condition=condition, attn_bias=attn_bias, current_step=current_step, total_steps=total_steps)# 添加了condition参数,需要更改AdaLNSelfAttn.forward函数
340
+ x_BLC = self.get_logits(x_BLC.to(dtype=main_type), cond_BD.to(dtype=main_type))
341
+
342
+ if self.prog_si == 0:
343
+ if isinstance(self.word_embed, nn.Linear):
344
+ x_BLC[0, 0, 0] += self.word_embed.weight[0, 0] * 0 + self.word_embed.bias[0] * 0
345
+ else:
346
+ s = 0
347
+ for p in self.word_embed.parameters():
348
+ if p.requires_grad:
349
+ s += p.view(-1)[0] * 0
350
+ x_BLC[0, 0, 0] += s
351
+ return x_BLC # logits BLV, V is vocab_size
352
+
353
+ def init_weights(self, init_adaln=0.5, init_adaln_gamma=1e-5, init_head=0.02, init_std=0.02, conv_std_or_gain=0.02):
354
+ if init_std < 0: init_std = (1 / self.C / 3) ** 0.5 # init_std < 0: automated
355
+
356
+ print(f'[init_weights] {type(self).__name__} with {init_std=:g}')
357
+ for m in self.modules():
358
+ with_weight = hasattr(m, 'weight') and m.weight is not None
359
+ with_bias = hasattr(m, 'bias') and m.bias is not None
360
+ if isinstance(m, nn.Linear):
361
+ nn.init.trunc_normal_(m.weight.data, std=init_std)
362
+ if with_bias: m.bias.data.zero_()
363
+ elif isinstance(m, nn.Embedding):
364
+ nn.init.trunc_normal_(m.weight.data, std=init_std)
365
+ if m.padding_idx is not None: m.weight.data[m.padding_idx].zero_()
366
+ elif isinstance(m, (nn.LayerNorm, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm, nn.GroupNorm, nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d)):
367
+ if with_weight: m.weight.data.fill_(1.)
368
+ if with_bias: m.bias.data.zero_()
369
+ # conv: VAR has no conv, only VQVAE has conv
370
+ elif isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d)):
371
+ if conv_std_or_gain > 0: nn.init.trunc_normal_(m.weight.data, std=conv_std_or_gain)
372
+ else: nn.init.xavier_normal_(m.weight.data, gain=-conv_std_or_gain)
373
+ if with_bias: m.bias.data.zero_()
374
+
375
+ if init_head >= 0:
376
+ if isinstance(self.head, nn.Linear):
377
+ self.head.weight.data.mul_(init_head)
378
+ self.head.bias.data.zero_()
379
+ elif isinstance(self.head, nn.Sequential):
380
+ self.head[-1].weight.data.mul_(init_head)
381
+ self.head[-1].bias.data.zero_()
382
+
383
+ if isinstance(self.head_nm, AdaLNBeforeHead):
384
+ self.head_nm.ada_lin[-1].weight.data.mul_(init_adaln)
385
+ if hasattr(self.head_nm.ada_lin[-1], 'bias') and self.head_nm.ada_lin[-1].bias is not None:
386
+ self.head_nm.ada_lin[-1].bias.data.zero_()
387
+
388
+ depth = len(self.blocks)
389
+ for block_idx, sab in enumerate(self.blocks):
390
+ sab: AdaLNSelfAttn
391
+ sab.attn.proj.weight.data.div_(math.sqrt(2 * depth))
392
+ sab.ffn.fc2.weight.data.div_(math.sqrt(2 * depth))
393
+ if hasattr(sab.ffn, 'fcg') and sab.ffn.fcg is not None:
394
+ nn.init.ones_(sab.ffn.fcg.bias)
395
+ nn.init.trunc_normal_(sab.ffn.fcg.weight, std=1e-5)
396
+ if hasattr(sab, 'ada_lin'):
397
+ sab.ada_lin[-1].weight.data[2*self.C:].mul_(init_adaln)
398
+ sab.ada_lin[-1].weight.data[:2*self.C].mul_(init_adaln_gamma)
399
+ if hasattr(sab.ada_lin[-1], 'bias') and sab.ada_lin[-1].bias is not None:
400
+ sab.ada_lin[-1].bias.data.zero_()
401
+ elif hasattr(sab, 'ada_gss'):
402
+ sab.ada_gss.data[:, :, 2:].mul_(init_adaln)
403
+ sab.ada_gss.data[:, :, :2].mul_(init_adaln_gamma)
404
+
405
+ def extra_repr(self):
406
+ return f'drop_path_rate={self.drop_path_rate:g}'
407
+
408
+
409
+ class VARHF(VAR, PyTorchModelHubMixin):
410
+ # repo_url="https://github.com/FoundationVision/VAR",
411
+ # tags=["image-generation"]):
412
+ def __init__(
413
+ self,
414
+ vae_kwargs,
415
+ num_classes=1000, depth=16, embed_dim=1024, num_heads=16, mlp_ratio=4., drop_rate=0., attn_drop_rate=0., drop_path_rate=0.,
416
+ norm_eps=1e-6, shared_aln=False, cond_drop_rate=0.1,
417
+ attn_l2_norm=False,
418
+ patch_nums=(1, 2, 3, 4, 5, 6, 8, 10, 13, 16), # 10 steps by default
419
+ flash_if_available=True, fused_if_available=True,
420
+ ):
421
+ vae_local = VQVAE(**vae_kwargs)
422
+ super().__init__(
423
+ vae_local=vae_local,
424
+ num_classes=num_classes, depth=depth, embed_dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rate,
425
+ norm_eps=norm_eps, shared_aln=shared_aln, cond_drop_rate=cond_drop_rate,
426
+ attn_l2_norm=attn_l2_norm,
427
+ patch_nums=patch_nums,
428
+ flash_if_available=flash_if_available, fused_if_available=fused_if_available,
429
+ )
models/vqvae.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ References:
4
+ - VectorQuantizer2: https://github.com/CompVis/taming-transformers/blob/3ba01b241669f5ade541ce990f7650a3b8f65318/taming/modules/vqvae/quantize.py#L110
5
+ - GumbelQuantize: https://github.com/CompVis/taming-transformers/blob/3ba01b241669f5ade541ce990f7650a3b8f65318/taming/modules/vqvae/quantize.py#L213
6
+ - VQVAE (VQModel): https://github.com/CompVis/stable-diffusion/blob/21f890f9da3cfbeaba8e2ac3c425ee9e998d5229/ldm/models/autoencoder.py#L14
7
+ """
8
+ from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+
13
+ from .basic_vae import Decoder, Encoder
14
+ from .quant import VectorQuantizer2
15
+
16
+
17
+ class VQVAE(nn.Module):
18
+ def __init__(
19
+ self, vocab_size=4096, z_channels=32, ch=128, dropout=0.0,
20
+ beta=0.25, # commitment loss weight
21
+ using_znorm=False, # whether to normalize when computing the nearest neighbors
22
+ quant_conv_ks=3, # quant conv kernel size
23
+ quant_resi=0.5, # 0.5 means \phi(x) = 0.5conv(x) + (1-0.5)x
24
+ share_quant_resi=4, # use 4 \phi layers for K scales: partially-shared \phi
25
+ default_qresi_counts=0, # if is 0: automatically set to len(v_patch_nums)
26
+ v_patch_nums=(1, 2, 3, 4, 5, 6, 8, 10, 13, 16), # number of patches for each scale, h_{1 to K} = w_{1 to K} = v_patch_nums[k]
27
+ test_mode=True,
28
+ ):
29
+ super().__init__()
30
+ self.test_mode = test_mode
31
+ self.V, self.Cvae = vocab_size, z_channels
32
+ # ddconfig is copied from https://github.com/CompVis/latent-diffusion/blob/e66308c7f2e64cb581c6d27ab6fbeb846828253b/models/first_stage_models/vq-f16/config.yaml
33
+ ddconfig = dict(
34
+ dropout=dropout, ch=ch, z_channels=z_channels,
35
+ in_channels=3, ch_mult=(1, 1, 2, 2, 4), num_res_blocks=2, # from vq-f16/config.yaml above
36
+ using_sa=True, using_mid_sa=True, # from vq-f16/config.yaml above
37
+ # resamp_with_conv=True, # always True, removed.
38
+ )
39
+ ddconfig.pop('double_z', None) # only KL-VAE should use double_z=True
40
+ self.encoder = Encoder(double_z=False, **ddconfig)
41
+ self.decoder = Decoder(**ddconfig)
42
+
43
+ self.vocab_size = vocab_size
44
+ self.downsample = 2 ** (len(ddconfig['ch_mult'])-1)
45
+ self.quantize: VectorQuantizer2 = VectorQuantizer2(
46
+ vocab_size=vocab_size, Cvae=self.Cvae, using_znorm=using_znorm, beta=beta,
47
+ default_qresi_counts=default_qresi_counts, v_patch_nums=v_patch_nums, quant_resi=quant_resi, share_quant_resi=share_quant_resi,
48
+ )
49
+ self.quant_conv = torch.nn.Conv2d(self.Cvae, self.Cvae, quant_conv_ks, stride=1, padding=quant_conv_ks//2)
50
+ self.post_quant_conv = torch.nn.Conv2d(self.Cvae, self.Cvae, quant_conv_ks, stride=1, padding=quant_conv_ks//2)
51
+
52
+ if self.test_mode:
53
+ self.eval()
54
+ [p.requires_grad_(False) for p in self.parameters()]
55
+
56
+ # ===================== `forward` is only used in VAE training =====================
57
+ def forward(self, inp, ret_usages=False): # -> rec_B3HW, idx_N, loss
58
+ VectorQuantizer2.forward
59
+ f_hat, usages, vq_loss = self.quantize(self.quant_conv(self.encoder(inp)), ret_usages=ret_usages)
60
+ return self.decoder(self.post_quant_conv(f_hat)), usages, vq_loss
61
+ # ===================== `forward` is only used in VAE training =====================
62
+
63
+ def fhat_to_img(self, f_hat: torch.Tensor):
64
+ return self.decoder(self.post_quant_conv(f_hat)).clamp_(-1, 1)
65
+
66
+ def img_to_idxBl(self, inp_img_no_grad: torch.Tensor, v_patch_nums: Optional[Sequence[Union[int, Tuple[int, int]]]] = None) -> List[torch.LongTensor]: # return List[Bl]
67
+ f = self.quant_conv(self.encoder(inp_img_no_grad))
68
+ return self.quantize.f_to_idxBl_or_fhat(f, to_fhat=False, v_patch_nums=v_patch_nums)
69
+
70
+ def idxBl_to_img(self, ms_idx_Bl: List[torch.Tensor], same_shape: bool, last_one=False) -> Union[List[torch.Tensor], torch.Tensor]:
71
+ B = ms_idx_Bl[0].shape[0]
72
+ ms_h_BChw = []
73
+ for idx_Bl in ms_idx_Bl:
74
+ l = idx_Bl.shape[1]
75
+ pn = round(l ** 0.5)
76
+ ms_h_BChw.append(self.quantize.embedding(idx_Bl).transpose(1, 2).view(B, self.Cvae, pn, pn))
77
+ return self.embed_to_img(ms_h_BChw=ms_h_BChw, all_to_max_scale=same_shape, last_one=last_one)
78
+
79
+ def embed_to_img(self, ms_h_BChw: List[torch.Tensor], all_to_max_scale: bool, last_one=False) -> Union[List[torch.Tensor], torch.Tensor]:
80
+ if last_one:
81
+ return self.decoder(self.post_quant_conv(self.quantize.embed_to_fhat(ms_h_BChw, all_to_max_scale=all_to_max_scale, last_one=True))).clamp_(-1, 1)
82
+ else:
83
+ return [self.decoder(self.post_quant_conv(f_hat)).clamp_(-1, 1) for f_hat in self.quantize.embed_to_fhat(ms_h_BChw, all_to_max_scale=all_to_max_scale, last_one=False)]
84
+
85
+ def img_to_reconstructed_img(self, x, v_patch_nums: Optional[Sequence[Union[int, Tuple[int, int]]]] = None, last_one=False) -> List[torch.Tensor]:
86
+ f = self.quant_conv(self.encoder(x))
87
+ ls_f_hat_BChw = self.quantize.f_to_idxBl_or_fhat(f, to_fhat=True, v_patch_nums=v_patch_nums)
88
+ if last_one:
89
+ return self.decoder(self.post_quant_conv(ls_f_hat_BChw[-1])).clamp_(-1, 1)
90
+ else:
91
+ return [self.decoder(self.post_quant_conv(f_hat)).clamp_(-1, 1) for f_hat in ls_f_hat_BChw]
92
+
93
+ def load_state_dict(self, state_dict: Dict[str, Any], strict=True, assign=False):
94
+ if 'quantize.ema_vocab_hit_SV' in state_dict and state_dict['quantize.ema_vocab_hit_SV'].shape[0] != self.quantize.ema_vocab_hit_SV.shape[0]:
95
+ state_dict['quantize.ema_vocab_hit_SV'] = self.quantize.ema_vocab_hit_SV
96
+ return super().load_state_dict(state_dict=state_dict, strict=strict, assign=assign)
requirements.txt ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # torch~=2.1.0
2
+
3
+ # Pillow
4
+ # huggingface_hub
5
+ # numpy
6
+ # pytz
7
+ # transformers
8
+ # typed-argument-parser
9
+ --find-links https://download.pytorch.org/whl/torch_stable.html
10
+
11
+ accelerate==0.28.0
12
+ audio-separator==0.17.2
13
+ av==12.1.0
14
+ bitsandbytes==0.43.1
15
+ decord==0.6.0
16
+ diffusers==0.27.2
17
+ einops==0.8.0
18
+ insightface==0.7.3
19
+ librosa==0.10.2.post1
20
+ mediapipe[vision]==0.10.14
21
+ mlflow==2.13.1
22
+ moviepy==1.0.3
23
+ numpy==1.26.4
24
+ omegaconf==2.3.0
25
+ onnx2torch==1.5.14
26
+ onnx==1.16.1
27
+ onnxruntime-gpu==1.18.0
28
+ opencv-contrib-python==4.9.0.80
29
+ opencv-python-headless==4.9.0.80
30
+ opencv-python==4.9.0.80
31
+ pillow==10.3.0
32
+ setuptools==70.0.0
33
+ torch==2.2.2+cu121
34
+ torchvision==0.17.2+cu121
35
+ tqdm==4.66.4
36
+ transformers==4.39.2
37
+ xformers==0.0.25.post1
38
+ isort==5.13.2
39
+ pylint==3.2.2
40
+ pre-commit==3.7.1
41
+ gradio==4.36.1
train.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import os
3
+ import shutil
4
+ import sys
5
+ import time
6
+ import warnings
7
+ from functools import partial
8
+
9
+ import torch
10
+ from torch.utils.data import DataLoader
11
+
12
+ import dist
13
+ from utils import arg_util, misc
14
+
15
+ from utils.data import build_dataset
16
+ from utils.data_sampler import DistInfiniteBatchSampler, EvalDistributedSampler
17
+ from torch.utils.data import random_split
18
+ from utils.t2i_control import build_t2i_control_code
19
+ from torch.utils.data.distributed import DistributedSampler
20
+
21
+ from utils.misc import auto_resume
22
+ import torch.nn.functional as F
23
+
24
+
25
+
26
+ import warnings
27
+ from transformers import logging
28
+
29
+ warnings.filterwarnings("ignore") # 全部屏蔽
30
+ logging.set_verbosity_error() # transformers 屏蔽
31
+
32
+ # xformers 警告屏蔽(可加到 shell 启动命令中)
33
+ import os
34
+ os.environ["XFORMERS_DISABLE_TORCH_VERSION_CHECK"] = "1"
35
+ os.environ["XFORMERS_MORE_DETAILS"] = "0"
36
+
37
+
38
+
39
+ print(f"RANK={os.environ.get('RANK')}, WORLD_SIZE={os.environ.get('WORLD_SIZE')}, LOCAL_RANK={os.environ.get('LOCAL_RANK')}")
40
+
41
+
42
+ def build_everything(args: arg_util.Args):
43
+ # resume
44
+ auto_resume_info, start_ep, start_it, trainer_state, args_state = auto_resume(args, 'ar-ckpt*.pth')
45
+ # create tensorboard logger
46
+ tb_lg: misc.TensorboardLogger
47
+ with_tb_lg = dist.is_master()
48
+ if with_tb_lg:
49
+ os.makedirs(args.tb_log_dir_path, exist_ok=True)
50
+ # noinspection PyTypeChecker
51
+ tb_lg = misc.DistLogger(misc.TensorboardLogger(log_dir=args.tb_log_dir_path, filename_suffix=f'__{misc.time_str("%m%d_%H%M")}'), verbose=True)
52
+ tb_lg.flush()
53
+ else:
54
+ # noinspection PyTypeChecker
55
+ tb_lg = misc.DistLogger(None, verbose=False)
56
+ dist.barrier()
57
+
58
+ # log args
59
+ print(f'global bs={args.glb_batch_size}, local bs={args.batch_size}')
60
+ print(f'initial args:\n{str(args)}')
61
+
62
+ # build data
63
+ # if not args.local_debug:
64
+ # print(f'[build PT data] ...\n')
65
+ # num_classes, dataset_train, dataset_val = build_dataset(
66
+ # args.data_path, final_reso=args.data_load_reso, hflip=args.hflip, mid_reso=args.mid_reso,
67
+ # )
68
+ # types = str((type(dataset_train).__name__, type(dataset_val).__name__))
69
+
70
+ # ld_val = DataLoader(
71
+ # dataset_val, num_workers=0, pin_memory=True,
72
+ # batch_size=round(args.batch_size*1.5), sampler=EvalDistributedSampler(dataset_val, num_replicas=dist.get_world_size(), rank=dist.get_rank()),
73
+ # shuffle=False, drop_last=False,
74
+ # )
75
+ # del dataset_val
76
+
77
+ # ld_train = DataLoader(
78
+ # dataset=dataset_train, num_workers=args.workers, pin_memory=True,
79
+ # generator=args.get_different_generator_for_each_rank(), # worker_init_fn=worker_init_fn,
80
+ # batch_sampler=DistInfiniteBatchSampler(
81
+ # dataset_len=len(dataset_train), glb_batch_size=args.glb_batch_size, same_seed_for_all_ranks=args.same_seed_for_all_ranks,
82
+ # shuffle=True, fill_last=True, rank=dist.get_rank(), world_size=dist.get_world_size(), start_ep=start_ep, start_it=start_it,
83
+ # ),
84
+ # )
85
+ # del dataset_train
86
+
87
+ # [print(line) for line in auto_resume_info]
88
+ # print(f'[dataloader multi processing] ...', end='', flush=True)
89
+ # stt = time.time()
90
+ # iters_train = len(ld_train)
91
+ # ld_train = iter(ld_train)
92
+ # # noinspection PyArgumentList
93
+ # print(f' [dataloader multi processing](*) finished! ({time.time()-stt:.2f}s)', flush=True, clean=True)
94
+ # print(f'[dataloader] gbs={args.glb_batch_size}, lbs={args.batch_size}, iters_train={iters_train}, types(tr, va)={types}')
95
+
96
+ # else:
97
+ # num_classes = 1000
98
+ # ld_val = ld_train = None
99
+ # iters_train = 10
100
+
101
+
102
+ # 加载数据集
103
+ if not args.local_debug:
104
+ print(f'[build PT data] ...\n')
105
+ print('当前是训练阶段')
106
+
107
+ args.code_path = "dataset/Captioned_ADE20K/train"
108
+ train_dataset = build_t2i_control_code(args)
109
+ args.code_path = "dataset/Captioned_ADE20K/val"
110
+ val_dataset = build_t2i_control_code(args)
111
+ # dataset = build_t2i_control_code(args) #需要在arg_util中添加数据集路径
112
+ # 获取总长度
113
+ # total_len = len(dataset)
114
+ # val_len = int(total_len * 0.3)
115
+ # train_len = total_len - val_len
116
+
117
+ # 使用 random_split 进行划分(可指定 random seed)
118
+ #train_dataset, val_dataset = random_split(dataset, [train_len, val_len], generator=torch.Generator().manual_seed(42))
119
+
120
+ sampler = DistributedSampler(
121
+ train_dataset,
122
+ num_replicas=dist.get_world_size(),
123
+ rank=dist.get_rank(),
124
+ shuffle=True,
125
+ seed=args.same_seed_for_all_ranks
126
+ )
127
+
128
+ ld_train = DataLoader(
129
+ train_dataset,
130
+ shuffle=False,
131
+ collate_fn=train_dataset.collate_fn,
132
+ batch_size=int(args.glb_batch_size // dist.get_world_size()),
133
+ num_workers=args.workers,
134
+ pin_memory=True,
135
+ sampler=sampler,
136
+ drop_last=True
137
+ )
138
+ ld_val = DataLoader(
139
+ val_dataset, num_workers=0, pin_memory=True,
140
+ batch_size=round(args.batch_size*1.5), sampler=EvalDistributedSampler(val_dataset, num_replicas=dist.get_world_size(), rank=dist.get_rank()),
141
+ shuffle=False, drop_last=True,
142
+ )
143
+
144
+ [print(line) for line in auto_resume_info]
145
+ print(f'[dataloader multi processing] ...', end='', flush=True)
146
+ stt = time.time()
147
+ iters_train = len(ld_train)
148
+ #ld_train = iter(ld_train)
149
+ # noinspection PyArgumentList
150
+ print(f'[dataloader multi processing](*) finished! ({time.time()-stt:.2f}s)', flush=True, clean=True)
151
+ print(f'[dataloader] gbs={args.glb_batch_size}, lbs={args.batch_size}, iters_train={iters_train}')
152
+
153
+ else:
154
+ print('当前是测试阶段')
155
+ ld_val = ld_train = None
156
+ iters_train = 10
157
+
158
+
159
+ # build models
160
+ from torch.nn.parallel import DistributedDataParallel as DDP
161
+ from models import VAR, VQVAE, build_vae_var
162
+ from trainer import VARTrainer
163
+ from utils.amp_sc import AmpOptimizer
164
+ from utils.lr_control import filter_params
165
+ from utils.freeze_utils import freeze_model
166
+
167
+ vae_local, var_wo_ddp = build_vae_var(
168
+ V=4096, Cvae=32, ch=160, share_quant_resi=4, # hard-coded VQVAE hyperparameters
169
+ device=dist.get_device(), patch_nums=args.patch_nums,
170
+ depth=args.depth, shared_aln=args.saln, attn_l2_norm=args.anorm,
171
+ flash_if_available=args.fuse, fused_if_available=args.fuse,
172
+ init_adaln=args.aln, init_adaln_gamma=args.alng, init_head=args.hd, init_std=args.ini,
173
+ )
174
+
175
+ # ✅ 冻结指定模块
176
+ # 加载 var_d12.pth 权重并冻结对应模块
177
+ var_ckpt_path = 'var_d12.pth'
178
+ if dist.is_local_master():
179
+ if not os.path.exists(var_ckpt_path):
180
+ os.system(f'wget https://huggingface.co/FoundationVision/var/resolve/main/{var_ckpt_path}')
181
+ dist.barrier()
182
+
183
+ var_ckpt = torch.load(var_ckpt_path, map_location='cuda')
184
+ var_wo_ddp.load_state_dict(var_ckpt, strict=False)
185
+
186
+ # 只冻结 checkpoint 中出现过的参数,保留特定模块可训练
187
+ from utils.freeze_utils import freeze_model
188
+ freeze_model(
189
+ var_wo_ddp,
190
+ pretrained_state_dict=var_ckpt,
191
+ keep_keywords=('adapter', 'cross_attn', 'condition_layers', 'condition_mlp', 'Caption_embedding', 'word_embed')
192
+ )
193
+ # for n, p in var_wo_ddp.named_parameters():
194
+ # print(f"{n:60} requires_grad={p.requires_grad}")
195
+
196
+
197
+ vae_ckpt = 'vae_ch160v4096z32.pth'
198
+ if dist.is_local_master():
199
+ if not os.path.exists(vae_ckpt):
200
+ os.system(f'wget https://huggingface.co/FoundationVision/var/resolve/main/{vae_ckpt}')
201
+ dist.barrier()
202
+ vae_local.load_state_dict(torch.load(vae_ckpt, map_location='cuda'), strict=True)
203
+ for p in vae_local.parameters():
204
+ p.requires_grad = False
205
+
206
+ vae_local: VQVAE = args.compile_model(vae_local, args.vfast)
207
+ var_wo_ddp: VAR = args.compile_model(var_wo_ddp, args.tfast)
208
+ var: DDP = (DDP if dist.initialized() else NullDDP)(var_wo_ddp, device_ids=[dist.get_local_rank()], find_unused_parameters=True, broadcast_buffers=False)
209
+
210
+ # print(f'[INIT] VAR model = {var_wo_ddp}\n\n')
211
+ count_p = lambda m: f'{sum(p.numel() for p in m.parameters())/1e6:.2f}'
212
+ # print(f'[INIT][#para] ' + ', '.join([f'{k}={count_p(m)}' for k, m in (('VAE', vae_local), ('VAE.enc', vae_local.encoder), ('VAE.dec', vae_local.decoder), ('VAE.quant', vae_local.quantize))]))
213
+ # print(f'[INIT][#para] ' + ', '.join([f'{k}={count_p(m)}' for k, m in (('VAR', var_wo_ddp),)]) + '\n\n')
214
+
215
+ # build optimizer
216
+ names, paras, para_groups = filter_params(var_wo_ddp, nowd_keys={
217
+ 'cls_token', 'start_token', 'task_token', 'cfg_uncond',
218
+ 'pos_embed', 'pos_1LC', 'pos_start', 'start_pos', 'lvl_embed',
219
+ 'gamma', 'beta',
220
+ 'ada_gss', 'moe_bias',
221
+ 'scale_mul',
222
+ })
223
+ opt_clz = {
224
+ 'adam': partial(torch.optim.AdamW, betas=(0.9, 0.95), fused=False),
225
+ 'adamw': partial(torch.optim.AdamW, betas=(0.9, 0.95), fused=False),
226
+ }[args.opt.lower().strip()]
227
+ opt_kw = dict(lr=args.tlr, weight_decay=0)
228
+ print(f'[INIT] optim={opt_clz}, opt_kw={opt_kw}\n')
229
+
230
+ var_optim = AmpOptimizer(
231
+ mixed_precision=args.fp16, optimizer=opt_clz(params=para_groups, **opt_kw), names=names, paras=paras,
232
+ grad_clip=args.tclip, n_gradient_accumulation=args.ac
233
+ )
234
+ del names, paras, para_groups
235
+
236
+ # build trainer
237
+ trainer = VARTrainer(
238
+ device=args.device, patch_nums=args.patch_nums, resos=args.resos,
239
+ vae_local=vae_local, var_wo_ddp=var_wo_ddp, var=var,
240
+ var_opt=var_optim, label_smooth=args.ls, args=args
241
+ )
242
+ if trainer_state is not None and len(trainer_state):
243
+ trainer.load_state_dict(trainer_state, strict=False, skip_vae=True) # don't load vae again
244
+ del vae_local, var_wo_ddp, var, var_optim
245
+
246
+ # 本地测试
247
+ if args.local_debug:
248
+ rng = torch.Generator('cuda')
249
+ rng.manual_seed(0)
250
+ B = 4
251
+ image = torch.rand(B, 3, args.data_load_reso, args.data_load_reso)
252
+ caption_emb = torch.rand(B, 3, 120, 2048)
253
+ condition_img = torch.rand(B, 3, args.data_load_reso, args.data_load_reso)
254
+
255
+ image = image.to(args.device, non_blocking=True)
256
+ caption_emb = caption_emb.to(args.device, non_blocking=True)
257
+ condition_img = condition_img.to(args.device, non_blocking=True)
258
+
259
+
260
+
261
+ me = misc.MetricLogger(delimiter=' ')
262
+ trainer.train_step(
263
+ it=0, g_it=0,max_it=1, stepping=True, metric_lg=me, tb_lg=tb_lg,
264
+ inp_B3HW=image, caption=caption_emb, condition=condition_img, prog_si=args.pg0, prog_wp_it=20,
265
+ )
266
+ trainer.load_state_dict(trainer.state_dict())
267
+ trainer.train_step(
268
+ it=99, g_it=599,max_it=1999, stepping=True, metric_lg=me, tb_lg=tb_lg,
269
+ inp_B3HW=image, caption=caption_emb, condition=condition_img, prog_si=-1, prog_wp_it=20,
270
+ )
271
+ print({k: meter.global_avg for k, meter in me.meters.items()})
272
+
273
+ args.dump_log(); tb_lg.flush(); tb_lg.close()
274
+ if isinstance(sys.stdout, misc.SyncPrint) and isinstance(sys.stderr, misc.SyncPrint):
275
+ sys.stdout.close(), sys.stderr.close()
276
+ exit(0)
277
+
278
+ dist.barrier()
279
+ return (
280
+ tb_lg, trainer, start_ep, start_it,
281
+ iters_train, ld_train, ld_val
282
+ )
283
+
284
+
285
+ def main_training():
286
+ args: arg_util.Args = arg_util.init_dist_and_get_args()
287
+ if args.local_debug:
288
+ torch.autograd.set_detect_anomaly(True)
289
+
290
+ (
291
+ tb_lg, trainer,
292
+ start_ep, start_it,
293
+ iters_train, ld_train, ld_val
294
+ ) = build_everything(args)
295
+
296
+ # train
297
+ start_time = time.time()
298
+ best_L_mean, best_L_tail, best_acc_mean, best_acc_tail = 999., 999., -1., -1.
299
+ best_val_loss_mean, best_val_loss_tail, best_val_acc_mean, best_val_acc_tail = 999, 999, -1, -1
300
+
301
+ L_mean, L_tail = -1, -1
302
+
303
+ # 开始训练主循环
304
+ for ep in range(start_ep, args.ep):
305
+ print(f"Epoch {ep} start...")
306
+ if hasattr(ld_train, 'sampler') and hasattr(ld_train.sampler, 'set_epoch'):
307
+ ld_train.sampler.set_epoch(ep)
308
+ if ep < 3:
309
+ # noinspection PyArgumentList
310
+ print(f'[{type(ld_train).__name__}] [ld_train.sampler.set_epoch({ep})]', flush=True, force=True)
311
+ tb_lg.set_step(ep * iters_train)
312
+
313
+ # # 每个 epoch 前都重新构建 dataloader 的 iterator
314
+ # ld_or_itrt = iter(ld_train)
315
+
316
+ # stats, (sec, remain_time, finish_time) = train_one_ep(
317
+ # ep, ep == start_ep, start_it if ep == start_ep else 0,
318
+ # args, tb_lg, ld_or_itrt, iters_train, trainer
319
+ # )
320
+
321
+ # 训练每一轮
322
+ stats, (sec, remain_time, finish_time) = train_one_ep(
323
+ ep, ep == start_ep, start_it if ep == start_ep else 0, args, tb_lg, ld_train, iters_train, trainer
324
+ )
325
+
326
+ L_mean, L_tail, acc_mean, acc_tail, grad_norm = stats['Lm'], stats['Lt'], stats['Accm'], stats['Acct'], stats['tnm']
327
+ best_L_mean, best_acc_mean = min(best_L_mean, L_mean), max(best_acc_mean, acc_mean)
328
+ if L_tail != -1: best_L_tail, best_acc_tail = min(best_L_tail, L_tail), max(best_acc_tail, acc_tail)
329
+ args.L_mean, args.L_tail, args.acc_mean, args.acc_tail, args.grad_norm = L_mean, L_tail, acc_mean, acc_tail, grad_norm
330
+ args.cur_ep = f'{ep+1}/{args.ep}'
331
+ args.remain_time, args.finish_time = remain_time, finish_time
332
+
333
+ AR_ep_loss = dict(L_mean=L_mean, L_tail=L_tail, acc_mean=acc_mean, acc_tail=acc_tail)
334
+
335
+ # 每10轮或最后一轮进行评估,调用评估函数trainer.eval_ep
336
+ #is_val_and_also_saving = (ep + 1) % 10 == 0 or (ep + 1) == args.ep
337
+ is_val_and_also_saving = 1
338
+ if is_val_and_also_saving:
339
+ val_loss_mean, val_loss_tail, val_acc_mean, val_acc_tail, tot, cost = trainer.eval_ep(ld_val,args)
340
+ best_updated = best_val_loss_tail > val_loss_tail
341
+ best_val_loss_mean, best_val_loss_tail = min(best_val_loss_mean, val_loss_mean), min(best_val_loss_tail, val_loss_tail)
342
+ best_val_acc_mean, best_val_acc_tail = max(best_val_acc_mean, val_acc_mean), max(best_val_acc_tail, val_acc_tail)
343
+ AR_ep_loss.update(vL_mean=val_loss_mean, vL_tail=val_loss_tail, vacc_mean=val_acc_mean, vacc_tail=val_acc_tail)
344
+ args.vL_mean, args.vL_tail, args.vacc_mean, args.vacc_tail = val_loss_mean, val_loss_tail, val_acc_mean, val_acc_tail
345
+ print(f' [*] [ep{ep}] (val {tot}) Lm: {L_mean:.4f}, Lt: {L_tail:.4f}, Acc m&t: {acc_mean:.2f} {acc_tail:.2f}, Val cost: {cost:.2f}s')
346
+
347
+ # 保存模型(最近一次和最优模型)
348
+ if dist.is_local_master():
349
+ local_out_ckpt = os.path.join(args.local_out_dir_path, 'ar-ckpt-last.pth')
350
+ local_out_ckpt_best = os.path.join(args.local_out_dir_path, 'ar-ckpt-best.pth')
351
+ print(f'[saving ckpt] ...', end='', flush=True)
352
+ torch.save({
353
+ 'epoch': ep+1,
354
+ 'iter': 0,
355
+ 'trainer': trainer.state_dict(),
356
+ 'args': args.state_dict(),
357
+ }, local_out_ckpt)
358
+ if best_updated:
359
+ shutil.copy(local_out_ckpt, local_out_ckpt_best)
360
+ print(f' [saving ckpt](*) finished! @ {local_out_ckpt}', flush=True, clean=True)
361
+ dist.barrier()
362
+
363
+ # 打印与记录日志
364
+ print( f' [ep{ep}] (training ) Lm: {best_L_mean:.3f} ({L_mean:.3f}), Lt: {best_L_tail:.3f} ({L_tail:.3f}), Acc m&t: {best_acc_mean:.2f} {best_acc_tail:.2f}, Remain: {remain_time}, Finish: {finish_time}', flush=True)
365
+ tb_lg.update(head='AR_ep_loss', step=ep+1, **AR_ep_loss)
366
+ tb_lg.update(head='AR_z_burnout', step=ep+1, rest_hours=round(sec / 60 / 60, 2))
367
+ args.dump_log(); tb_lg.flush()
368
+
369
+ # 结束训练
370
+ total_time = f'{(time.time() - start_time) / 60 / 60:.1f}h'
371
+ print('\n\n')
372
+ print(f' [*] [PT finished] Total cost: {total_time}, Lm: {best_L_mean:.3f} ({L_mean}), Lt: {best_L_tail:.3f} ({L_tail})')
373
+ print('\n\n')
374
+
375
+ if 'stats' in locals():
376
+ del stats
377
+
378
+ del iters_train, ld_train
379
+ time.sleep(3), gc.collect(), torch.cuda.empty_cache(), time.sleep(3)
380
+
381
+ args.remain_time, args.finish_time = '-', time.strftime("%Y-%m-%d %H:%M", time.localtime(time.time() - 60))
382
+ print(f'final args:\n\n{str(args)}')
383
+ args.dump_log(); tb_lg.flush(); tb_lg.close()
384
+ dist.barrier()
385
+
386
+
387
+ def train_one_ep(ep: int, is_first_ep: bool, start_it: int, args: arg_util.Args, tb_lg: misc.TensorboardLogger, ld_or_itrt, iters_train: int, trainer):
388
+ # import heavy packages after Dataloader object creation
389
+ from trainer import VARTrainer
390
+ from utils.lr_control import lr_wd_annealing
391
+ trainer: VARTrainer
392
+
393
+ step_cnt = 0
394
+ me = misc.MetricLogger(delimiter=' ')
395
+ me.add_meter('tlr', misc.SmoothedValue(window_size=1, fmt='{value:.2g}'))
396
+ me.add_meter('tnm', misc.SmoothedValue(window_size=1, fmt='{value:.2f}'))
397
+ [me.add_meter(x, misc.SmoothedValue(fmt='{median:.3f} ({global_avg:.3f})')) for x in ['Lm', 'Lt']]
398
+ [me.add_meter(x, misc.SmoothedValue(fmt='{median:.2f} ({global_avg:.2f})')) for x in ['Accm', 'Acct']]
399
+ header = f'[Ep]: [{ep:4d}/{args.ep}]'
400
+
401
+ if is_first_ep:
402
+ warnings.filterwarnings('ignore', category=DeprecationWarning)
403
+ warnings.filterwarnings('ignore', category=UserWarning)
404
+ g_it, max_it = ep * iters_train, args.ep * iters_train
405
+
406
+ if isinstance(ld_or_itrt, DataLoader):
407
+ ld_or_itrt = iter(ld_or_itrt)
408
+
409
+
410
+ # 读取输入
411
+ for it, batch in me.log_every(start_it, iters_train, ld_or_itrt, 30 if iters_train > 8000 else 5, header):
412
+ g_it = ep * iters_train + it
413
+ if it < start_it: continue
414
+ if is_first_ep and it == start_it: warnings.resetwarnings()
415
+
416
+ print(f" running iter {it}")
417
+
418
+ image = batch['image']# 原图
419
+
420
+ image = F.interpolate(image, size=(args.data_load_reso, args.data_load_reso), mode='bicubic', align_corners=False)
421
+
422
+
423
+ # print("image.shape:", image.shape)
424
+ # print("args.data_load_reso:", args.data_load_reso)
425
+
426
+ # code = batch['code']# 生成的图像的 VQ 代码:npy
427
+ caption_emb = batch['caption_emb']# 文本 T5 Embedding:npz
428
+ condition_img = batch['control'] # 条件图像(如 Canny Edge)
429
+
430
+ # code = code.to(args.device, non_blocking=True)
431
+ image = image.to(args.device, non_blocking=True)
432
+ caption_emb = caption_emb.to(args.device, non_blocking=True)
433
+ condition_img = condition_img.to(args.device, non_blocking=True)
434
+
435
+
436
+ args.cur_it = f'{it+1}/{iters_train}'
437
+
438
+ wp_it = args.wp * iters_train
439
+ min_tlr, max_tlr, min_twd, max_twd = lr_wd_annealing(args.sche, trainer.var_opt.optimizer, args.tlr, args.twd, args.twde, g_it, wp_it, max_it, wp0=args.wp0, wpe=args.wpe)
440
+ args.cur_lr, args.cur_wd = max_tlr, max_twd
441
+
442
+ if args.pg: # default: args.pg == 0.0, means no progressive training, won't get into this
443
+ if g_it <= wp_it: prog_si = args.pg0
444
+ elif g_it >= max_it*args.pg: prog_si = len(args.patch_nums) - 1
445
+ else:
446
+ delta = len(args.patch_nums) - 1 - args.pg0
447
+ progress = min(max((g_it - wp_it) / (max_it*args.pg - wp_it), 0), 1) # from 0 to 1
448
+ prog_si = args.pg0 + round(progress * delta) # from args.pg0 to len(args.patch_nums)-1
449
+ else:
450
+ prog_si = -1
451
+
452
+ stepping = (g_it + 1) % args.ac == 0
453
+ step_cnt += int(stepping)
454
+
455
+ grad_norm, scale_log2 = trainer.train_step(
456
+ it=it, g_it=g_it,max_it=max_it, stepping=stepping, metric_lg=me, tb_lg=tb_lg,
457
+ inp_B3HW=image, caption=caption_emb, condition=condition_img, prog_si=prog_si, prog_wp_it=args.pgwp * iters_train,
458
+ )
459
+
460
+ me.update(tlr=max_tlr)
461
+ tb_lg.set_step(step=g_it)
462
+ tb_lg.update(head='AR_opt_lr/lr_min', sche_tlr=min_tlr)
463
+ tb_lg.update(head='AR_opt_lr/lr_max', sche_tlr=max_tlr)
464
+ tb_lg.update(head='AR_opt_wd/wd_max', sche_twd=max_twd)
465
+ tb_lg.update(head='AR_opt_wd/wd_min', sche_twd=min_twd)
466
+ tb_lg.update(head='AR_opt_grad/fp16', scale_log2=scale_log2)
467
+
468
+ if args.tclip > 0:
469
+ tb_lg.update(head='AR_opt_grad/grad', grad_norm=grad_norm)
470
+ tb_lg.update(head='AR_opt_grad/grad', grad_clip=args.tclip)
471
+
472
+ me.synchronize_between_processes()
473
+ return {k: meter.global_avg for k, meter in me.meters.items()}, me.iter_time.time_preds(max_it - (g_it + 1) + (args.ep - ep) * 15) # +15: other cost
474
+
475
+ # 一个空的 DDP 代理类 用于单 GPU
476
+ class NullDDP(torch.nn.Module):
477
+ def __init__(self, module, *args, **kwargs):
478
+ super(NullDDP, self).__init__()
479
+ self.module = module
480
+ self.require_backward_grad_sync = False
481
+
482
+ def forward(self, *args, **kwargs):
483
+ return self.module(*args, **kwargs)
484
+
485
+
486
+ if __name__ == '__main__':
487
+ try: main_training()
488
+ finally:
489
+ dist.finalize()
490
+ if isinstance(sys.stdout, misc.SyncPrint) and isinstance(sys.stderr, misc.SyncPrint):
491
+ sys.stdout.close(), sys.stderr.close()
492
+
493
+
494
+
495
+ # def main_training():
496
+ # args: arg_util.Args = arg_util.init_dist_and_get_args()
497
+ # if args.local_debug:
498
+ # torch.autograd.set_detect_anomaly(True)
499
+
500
+ # (
501
+ # tb_lg, trainer,
502
+ # start_ep, start_it,
503
+ # iters_train, ld_train, ld_val
504
+ # ) = build_everything(args)
505
+
506
+ # # train
507
+ # start_time = time.time()
508
+ # best_L_mean, best_L_tail = 999., 999.
509
+ # best_val_loss_mean, best_val_loss_tail, best_val_fid = 999, 999, 999
510
+
511
+ # L_mean, L_tail = -1, -1
512
+ # for ep in range(start_ep, args.ep):
513
+ # if hasattr(ld_train, 'sampler') and hasattr(ld_train.sampler, 'set_epoch'):
514
+ # ld_train.sampler.set_epoch(ep)
515
+ # if ep < 3:
516
+ # print(f'[{type(ld_train).__name__}] [ld_train.sampler.set_epoch({ep})]', flush=True, force=True)
517
+ # tb_lg.set_step(ep * iters_train)
518
+
519
+ # stats, (sec, remain_time, finish_time) = train_one_ep(
520
+ # ep, ep == start_ep, start_it if ep == start_ep else 0, args, tb_lg, ld_train, iters_train, trainer
521
+ # )
522
+
523
+ # L_mean, L_tail, grad_norm = stats['Lm'], stats['Lt'], stats['tnm']
524
+ # best_L_mean = min(best_L_mean, L_mean)
525
+ # if L_tail != -1:
526
+ # best_L_tail = min(best_L_tail, L_tail)
527
+ # args.L_mean, args.L_tail, args.grad_norm = L_mean, L_tail, grad_norm
528
+ # args.cur_ep = f'{ep+1}/{args.ep}'
529
+ # args.remain_time, args.finish_time = remain_time, finish_time
530
+
531
+ # AR_ep_loss = dict(L_mean=L_mean, L_tail=L_tail)
532
+ # is_val_and_also_saving = (ep + 1) % 10 == 0 or (ep + 1) == args.ep
533
+ # if is_val_and_also_saving:
534
+ # val_loss_mean, val_loss_tail, fid_score, tot, cost = trainer.eval_ep(ld_val)
535
+ # best_updated = best_val_loss_tail > val_loss_tail
536
+ # best_val_loss_mean = min(best_val_loss_mean, val_loss_mean)
537
+ # best_val_loss_tail = min(best_val_loss_tail, val_loss_tail)
538
+ # best_val_fid = min(best_val_fid, fid_score)
539
+ # AR_ep_loss.update(vL_mean=val_loss_mean, vL_tail=val_loss_tail, FID=fid_score)
540
+ # args.vL_mean, args.vL_tail, args.fid_score = val_loss_mean, val_loss_tail, fid_score
541
+ # print(f' [*] [ep{ep}] (val {tot}) Lm: {L_mean:.4f}, Lt: {L_tail:.4f}, FID: {fid_score:.2f}, Val cost: {cost:.2f}s')
542
+
543
+ # if dist.is_local_master():
544
+ # local_out_ckpt = os.path.join(args.local_out_dir_path, 'ar-ckpt-last.pth')
545
+ # local_out_ckpt_best = os.path.join(args.local_out_dir_path, 'ar-ckpt-best.pth')
546
+ # print(f'[saving ckpt] ...', end='', flush=True)
547
+ # torch.save({
548
+ # 'epoch': ep+1,
549
+ # 'iter': 0,
550
+ # 'trainer': trainer.state_dict(),
551
+ # 'args': args.state_dict(),
552
+ # }, local_out_ckpt)
553
+ # if best_updated:
554
+ # shutil.copy(local_out_ckpt, local_out_ckpt_best)
555
+ # print(f' [saving ckpt](*) finished! @ {local_out_ckpt}', flush=True, clean=True)
556
+ # dist.barrier()
557
+
558
+ # print(f' [ep{ep}] (training ) Lm: {best_L_mean:.3f} ({L_mean:.3f}), Lt: {best_L_tail:.3f} ({L_tail:.3f}), Remain: {remain_time}, Finish: {finish_time}', flush=True)
559
+ # tb_lg.update(head='AR_ep_loss', step=ep+1, **AR_ep_loss)
560
+ # tb_lg.update(head='AR_z_burnout', step=ep+1, rest_hours=round(sec / 60 / 60, 2))
561
+ # args.dump_log(); tb_lg.flush()
562
+
563
+ # total_time = f'{(time.time() - start_time) / 60 / 60:.1f}h'
564
+ # print('\n\n')
565
+ # print(f' [*] [PT finished] Total cost: {total_time}, Lm: {best_L_mean:.3f} ({L_mean}), Lt: {best_L_tail:.3f} ({L_tail}), Best FID: {best_val_fid:.2f}')
566
+ # print('\n\n')
567
+
568
+ # del stats
569
+ # del iters_train, ld_train
570
+ # time.sleep(3), gc.collect(), torch.cuda.empty_cache(), time.sleep(3)
571
+
572
+ # args.remain_time, args.finish_time = '-', time.strftime("%Y-%m-%d %H:%M", time.localtime(time.time() - 60))
573
+ # print(f'final args:\n\n{str(args)}')
574
+ # args.dump_log(); tb_lg.flush(); tb_lg.close()
575
+ # dist.barrier()
trainer.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from typing import List, Optional, Tuple, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch.nn.parallel import DistributedDataParallel as DDP
7
+ from torch.utils.data import DataLoader
8
+ import torch.nn.functional as F
9
+
10
+ import dist
11
+ from models import VAR, VQVAE, VectorQuantizer2
12
+ from utils.amp_sc import AmpOptimizer
13
+ from utils.misc import MetricLogger, TensorboardLogger
14
+
15
+ from torchvision.utils import save_image
16
+ # from torchmetrics.image.fid import FrechetInceptionDistance
17
+ # from torchmetrics.image.inception import InceptionScore
18
+ from torchvision.utils import save_image
19
+ from torchvision.transforms.functional import to_pil_image
20
+ import tempfile
21
+ import os
22
+ import shutil
23
+ # from torch_fidelity import calculate_metrics # 或你自己的 FID 实现
24
+
25
+ Ten = torch.Tensor
26
+ FTen = torch.Tensor
27
+ ITen = torch.LongTensor
28
+ BTen = torch.BoolTensor
29
+
30
+
31
+ class VARTrainer(object):
32
+ def __init__(
33
+ self, device, patch_nums: Tuple[int, ...], resos: Tuple[int, ...],
34
+ vae_local: VQVAE, var_wo_ddp: VAR, var: DDP,
35
+ var_opt: AmpOptimizer, label_smooth: float,args,
36
+ ):
37
+ super(VARTrainer, self).__init__()
38
+
39
+ self.args= args
40
+ self.var, self.vae_local, self.quantize_local = var, vae_local, vae_local.quantize
41
+ self.quantize_local: VectorQuantizer2
42
+ self.var_wo_ddp: VAR = var_wo_ddp # after torch.compile
43
+ self.var_opt = var_opt
44
+
45
+ del self.var_wo_ddp.rng
46
+ self.var_wo_ddp.rng = torch.Generator(device=device)
47
+
48
+ self.label_smooth = label_smooth
49
+ self.train_loss = nn.CrossEntropyLoss(label_smoothing=label_smooth, reduction='none')
50
+ self.val_loss = nn.CrossEntropyLoss(label_smoothing=0.0, reduction='mean')
51
+ self.L = sum(pn * pn for pn in patch_nums)
52
+ self.last_l = patch_nums[-1] * patch_nums[-1]
53
+ self.loss_weight = torch.ones(1, self.L, device=device) / self.L
54
+
55
+ self.patch_nums, self.resos = patch_nums, resos
56
+ self.begin_ends = []
57
+ cur = 0
58
+ for i, pn in enumerate(patch_nums):
59
+ self.begin_ends.append((cur, cur + pn * pn))
60
+ cur += pn*pn
61
+
62
+ self.prog_it = 0
63
+ self.last_prog_si = -1
64
+ self.first_prog = True
65
+
66
+ @torch.no_grad()
67
+ # def eval_ep(self, ld_val: DataLoader):
68
+ # tot = 0
69
+ # L_mean, L_tail, acc_mean, acc_tail = 0, 0, 0, 0
70
+ # stt = time.time()
71
+ # training = self.var_wo_ddp.training
72
+ # self.var_wo_ddp.eval()
73
+ # generated_images = []
74
+ # real_images = []
75
+ # all_captions = []
76
+
77
+
78
+ # for inp_B3HW, label_B in ld_val:
79
+ # B, V = label_B.shape[0], self.vae_local.vocab_size
80
+ # inp_B3HW = inp_B3HW.to(dist.get_device(), non_blocking=True)
81
+ # label_B = label_B.to(dist.get_device(), non_blocking=True)
82
+
83
+ # gt_idx_Bl: List[ITen] = self.vae_local.img_to_idxBl(inp_B3HW)
84
+ # gt_BL = torch.cat(gt_idx_Bl, dim=1)
85
+ # x_BLCv_wo_first_l: Ten = self.quantize_local.idxBl_to_var_input(gt_idx_Bl)
86
+
87
+ # self.var_wo_ddp.forward
88
+ # logits_BLV = self.var_wo_ddp(label_B, x_BLCv_wo_first_l)
89
+ # L_mean += self.val_loss(logits_BLV.data.view(-1, V), gt_BL.view(-1)) * B
90
+ # L_tail += self.val_loss(logits_BLV.data[:, -self.last_l:].reshape(-1, V), gt_BL[:, -self.last_l:].reshape(-1)) * B
91
+ # acc_mean += (logits_BLV.data.argmax(dim=-1) == gt_BL).sum() * (100/gt_BL.shape[1])
92
+ # acc_tail += (logits_BLV.data[:, -self.last_l:].argmax(dim=-1) == gt_BL[:, -self.last_l:]).sum() * (100 / self.last_l)
93
+ # tot += B
94
+ # self.var_wo_ddp.train(training)
95
+
96
+ # stats = L_mean.new_tensor([L_mean.item(), L_tail.item(), acc_mean.item(), acc_tail.item(), tot])
97
+ # dist.allreduce(stats)
98
+ # tot = round(stats[-1].item())
99
+ # stats /= tot
100
+ # L_mean, L_tail, acc_mean, acc_tail, _ = stats.tolist()
101
+ # return L_mean, L_tail, acc_mean, acc_tail, tot, time.time()-stt
102
+ def eval_ep(self, ld_val: DataLoader,args):
103
+ tot = 0
104
+ L_mean, L_tail, acc_mean, acc_tail = 0, 0, 0, 0
105
+ stt = time.time()
106
+ training = self.var_wo_ddp.training
107
+ self.var_wo_ddp.eval()
108
+
109
+ for batch in ld_val:
110
+ inp_B3HW = batch['image'].to(dist.get_device(), non_blocking=True)
111
+ caption_emb = batch['caption_emb']# 文本 T5 Embedding:npz
112
+ condition_img = batch['control'] # 条件图像(如 Canny Edge)
113
+
114
+ caption_emb = caption_emb.to(args.device, non_blocking=True)
115
+ condition_img = condition_img.to(args.device, non_blocking=True)
116
+
117
+ B, V = inp_B3HW.shape[0], self.vae_local.vocab_size
118
+
119
+ # inp_B3HW = batch['image']# 原图
120
+
121
+ # inp_B3HW = F.interpolate(inp_B3HW, size=(args.data_load_reso, args.data_load_reso), mode='bicubic', align_corners=False)
122
+ inp_B3HW = F.interpolate(inp_B3HW, size=(self.args.data_load_reso, self.args.data_load_reso), mode='bicubic', align_corners=False)
123
+
124
+
125
+ gt_idx_Bl: List[ITen] = self.vae_local.img_to_idxBl(inp_B3HW)
126
+ gt_BL = torch.cat(gt_idx_Bl, dim=1)
127
+ x_BLCv_wo_first_l: Ten = self.quantize_local.idxBl_to_var_input(gt_idx_Bl)
128
+
129
+ logits_BLV = self.var_wo_ddp(caption_emb, x_BLCv_wo_first_l, condition_img)
130
+
131
+ L_mean += self.val_loss(logits_BLV.data.view(-1, V), gt_BL.view(-1)) * B
132
+ L_tail += self.val_loss(logits_BLV.data[:, -self.last_l:].reshape(-1, V), gt_BL[:, -self.last_l:].reshape(-1)) * B
133
+ acc_mean += (logits_BLV.data.argmax(dim=-1) == gt_BL).sum() * (100 / gt_BL.shape[1])
134
+ acc_tail += (logits_BLV.data[:, -self.last_l:].argmax(dim=-1) == gt_BL[:, -self.last_l:]).sum() * (100 / self.last_l)
135
+ tot += B
136
+
137
+ self.var_wo_ddp.train(training)
138
+
139
+ stats = L_mean.new_tensor([L_mean.item(), L_tail.item(), acc_mean.item(), acc_tail.item(), tot])
140
+ dist.allreduce(stats)
141
+ tot = round(stats[-1].item())
142
+ stats /= tot
143
+ L_mean, L_tail, acc_mean, acc_tail, _ = stats.tolist()
144
+ return L_mean, L_tail, acc_mean, acc_tail, tot, time.time() - stt
145
+
146
+ def train_step(
147
+ self, it: int, g_it: int, max_it: int, stepping: bool, metric_lg: MetricLogger, tb_lg: TensorboardLogger,
148
+ inp_B3HW: FTen, caption: Union[ITen, FTen], condition, prog_si: int, prog_wp_it: float,
149
+ ) -> Tuple[Optional[Union[Ten, float]], Optional[float]]:
150
+ # if progressive training
151
+ self.var_wo_ddp.prog_si = self.vae_local.quantize.prog_si = prog_si
152
+ if self.last_prog_si != prog_si:
153
+ if self.last_prog_si != -1: self.first_prog = False
154
+ self.last_prog_si = prog_si
155
+ self.prog_it = 0
156
+ self.prog_it += 1
157
+ prog_wp = max(min(self.prog_it / prog_wp_it, 1), 0.01)
158
+ if self.first_prog: prog_wp = 1 # no prog warmup at first prog stage, as it's already solved in wp
159
+ if prog_si == len(self.patch_nums) - 1: prog_si = -1 # max prog, as if no prog
160
+
161
+ # forward
162
+ B, V = inp_B3HW.shape[0], self.vae_local.vocab_size
163
+ self.var.require_backward_grad_sync = stepping
164
+
165
+ # 将原始图像转换为可接受的特征输入(向量表示)
166
+ gt_idx_Bl: List[ITen] = self.vae_local.img_to_idxBl(inp_B3HW)
167
+ gt_BL = torch.cat(gt_idx_Bl, dim=1)
168
+ x_BLCv_wo_first_l: Ten = self.quantize_local.idxBl_to_var_input(gt_idx_Bl)
169
+
170
+ with self.var_opt.amp_ctx:
171
+ self.var_wo_ddp.forward
172
+ logits_BLV = self.var(caption, x_BLCv_wo_first_l, condition, g_it, max_it) #将处理之后的图像表示 以及类别标签传入VAR模型
173
+ loss = self.train_loss(logits_BLV.view(-1, V), gt_BL.view(-1)).view(B, -1)
174
+ if prog_si >= 0: # in progressive training
175
+ bg, ed = self.begin_ends[prog_si]
176
+ assert logits_BLV.shape[1] == gt_BL.shape[1] == ed
177
+ lw = self.loss_weight[:, :ed].clone()
178
+ lw[:, bg:ed] *= min(max(prog_wp, 0), 1)
179
+ else: # not in progressive training
180
+ lw = self.loss_weight
181
+ loss = loss.mul(lw).sum(dim=-1).mean()
182
+
183
+ # backward
184
+ grad_norm, scale_log2 = self.var_opt.backward_clip_step(loss=loss, stepping=stepping)
185
+
186
+ # log
187
+ pred_BL = logits_BLV.data.argmax(dim=-1)
188
+ if it == 0 or it in metric_lg.log_iters:
189
+ Lmean = self.val_loss(logits_BLV.data.view(-1, V), gt_BL.view(-1)).item()
190
+ acc_mean = (pred_BL == gt_BL).float().mean().item() * 100
191
+ if prog_si >= 0: # in progressive training
192
+ Ltail = acc_tail = -1
193
+ else: # not in progressive training
194
+ Ltail = self.val_loss(logits_BLV.data[:, -self.last_l:].reshape(-1, V), gt_BL[:, -self.last_l:].reshape(-1)).item()
195
+ acc_tail = (pred_BL[:, -self.last_l:] == gt_BL[:, -self.last_l:]).float().mean().item() * 100
196
+ grad_norm = grad_norm.item()
197
+ metric_lg.update(Lm=Lmean, Lt=Ltail, Accm=acc_mean, Acct=acc_tail, tnm=grad_norm)
198
+
199
+ # log to tensorboard
200
+ if g_it == 0 or (g_it + 1) % 500 == 0:
201
+ prob_per_class_is_chosen = pred_BL.view(-1).bincount(minlength=V).float()
202
+ dist.allreduce(prob_per_class_is_chosen)
203
+ prob_per_class_is_chosen /= prob_per_class_is_chosen.sum()
204
+ cluster_usage = (prob_per_class_is_chosen > 0.001 / V).float().mean().item() * 100
205
+ if dist.is_master():
206
+ if g_it == 0:
207
+ tb_lg.update(head='AR_iter_loss', z_voc_usage=cluster_usage, step=-10000)
208
+ tb_lg.update(head='AR_iter_loss', z_voc_usage=cluster_usage, step=-1000)
209
+ kw = dict(z_voc_usage=cluster_usage)
210
+ for si, (bg, ed) in enumerate(self.begin_ends):
211
+ if 0 <= prog_si < si: break
212
+ pred, tar = logits_BLV.data[:, bg:ed].reshape(-1, V), gt_BL[:, bg:ed].reshape(-1)
213
+ acc = (pred.argmax(dim=-1) == tar).float().mean().item() * 100
214
+ ce = self.val_loss(pred, tar).item()
215
+ kw[f'acc_{self.resos[si]}'] = acc
216
+ kw[f'L_{self.resos[si]}'] = ce
217
+ tb_lg.update(head='AR_iter_loss', **kw, step=g_it)
218
+ tb_lg.update(head='AR_iter_schedule', prog_a_reso=self.resos[prog_si], prog_si=prog_si, prog_wp=prog_wp, step=g_it)
219
+
220
+ self.var_wo_ddp.prog_si = self.vae_local.quantize.prog_si = -1
221
+ return grad_norm, scale_log2
222
+
223
+ def get_config(self):
224
+ return {
225
+ 'patch_nums': self.patch_nums, 'resos': self.resos,
226
+ 'label_smooth': self.label_smooth,
227
+ 'prog_it': self.prog_it, 'last_prog_si': self.last_prog_si, 'first_prog': self.first_prog,
228
+ }
229
+
230
+ def state_dict(self):
231
+ state = {'config': self.get_config()}
232
+ for k in ('var_wo_ddp', 'vae_local', 'var_opt'):
233
+ m = getattr(self, k)
234
+ if m is not None:
235
+ if hasattr(m, '_orig_mod'):
236
+ m = m._orig_mod
237
+ state[k] = m.state_dict()
238
+ return state
239
+
240
+ def load_state_dict(self, state, strict=True, skip_vae=False):
241
+ for k in ('var_wo_ddp', 'vae_local', 'var_opt'):
242
+ if skip_vae and 'vae' in k: continue
243
+ m = getattr(self, k)
244
+ if m is not None:
245
+ if hasattr(m, '_orig_mod'):
246
+ m = m._orig_mod
247
+ ret = m.load_state_dict(state[k], strict=strict)
248
+ if ret is not None:
249
+ missing, unexpected = ret
250
+ print(f'[VARTrainer.load_state_dict] {k} missing: {missing}')
251
+ print(f'[VARTrainer.load_state_dict] {k} unexpected: {unexpected}')
252
+
253
+ config: dict = state.pop('config', None)
254
+ self.prog_it = config.get('prog_it', 0)
255
+ self.last_prog_si = config.get('last_prog_si', -1)
256
+ self.first_prog = config.get('first_prog', True)
257
+ if config is not None:
258
+ for k, v in self.get_config().items():
259
+ if config.get(k, None) != v:
260
+ err = f'[VAR.load_state_dict] config mismatch: this.{k}={v} (ckpt.{k}={config.get(k, None)})'
261
+ if strict: raise AttributeError(err)
262
+ else: print(err)
utils/amp_sc.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import List, Optional, Tuple, Union
3
+
4
+ import torch
5
+
6
+
7
+ class NullCtx:
8
+ def __enter__(self):
9
+ pass
10
+
11
+ def __exit__(self, exc_type, exc_val, exc_tb):
12
+ pass
13
+
14
+
15
+ class AmpOptimizer:
16
+ def __init__(
17
+ self,
18
+ mixed_precision: int,
19
+ optimizer: torch.optim.Optimizer, names: List[str], paras: List[torch.nn.Parameter],
20
+ grad_clip: float, n_gradient_accumulation: int = 1,
21
+ ):
22
+ self.enable_amp = mixed_precision > 0
23
+ self.using_fp16_rather_bf16 = mixed_precision == 1
24
+
25
+ if self.enable_amp:
26
+ self.amp_ctx = torch.autocast('cuda', enabled=True, dtype=torch.float16 if self.using_fp16_rather_bf16 else torch.bfloat16, cache_enabled=True)
27
+ self.scaler = torch.cuda.amp.GradScaler(init_scale=2. ** 11, growth_interval=1000) if self.using_fp16_rather_bf16 else None # only fp16 needs a scaler
28
+ else:
29
+ self.amp_ctx = NullCtx()
30
+ self.scaler = None
31
+
32
+ self.optimizer, self.names, self.paras = optimizer, names, paras # paras have been filtered so everyone requires grad
33
+ self.grad_clip = grad_clip
34
+ self.early_clipping = self.grad_clip > 0 and not hasattr(optimizer, 'global_grad_norm')
35
+ self.late_clipping = self.grad_clip > 0 and hasattr(optimizer, 'global_grad_norm')
36
+
37
+ self.r_accu = 1 / n_gradient_accumulation # r_accu == 1.0 / n_gradient_accumulation
38
+
39
+ def backward_clip_step(
40
+ self, stepping: bool, loss: torch.Tensor,
41
+ ) -> Tuple[Optional[Union[torch.Tensor, float]], Optional[float]]:
42
+ # backward
43
+ loss = loss.mul(self.r_accu) # r_accu == 1.0 / n_gradient_accumulation
44
+ orig_norm = scaler_sc = None
45
+ if self.scaler is not None:
46
+ self.scaler.scale(loss).backward(retain_graph=False, create_graph=False)
47
+ else:
48
+ loss.backward(retain_graph=False, create_graph=False)
49
+
50
+ if stepping:
51
+ if self.scaler is not None: self.scaler.unscale_(self.optimizer)
52
+ if self.early_clipping:
53
+ orig_norm = torch.nn.utils.clip_grad_norm_(self.paras, self.grad_clip)
54
+
55
+ if self.scaler is not None:
56
+ self.scaler.step(self.optimizer)
57
+ scaler_sc: float = self.scaler.get_scale()
58
+ if scaler_sc > 32768.: # fp16 will overflow when >65536, so multiply 32768 could be dangerous
59
+ self.scaler.update(new_scale=32768.)
60
+ else:
61
+ self.scaler.update()
62
+ try:
63
+ # scaler_sc = float(math.log2(scaler_sc))
64
+ if scaler_sc is None or scaler_sc <= 0.0:
65
+ print(f"[Warning] Invalid scaler_sc = {scaler_sc}, using log2 fallback = -inf")
66
+ scaler_sc = float("-inf") # 或设置为 0.0 / -1.0 / None,根据你的训练需求
67
+ else:
68
+ scaler_sc = float(math.log2(scaler_sc))
69
+ except Exception as e:
70
+ print(f'[scaler_sc = {scaler_sc}]\n' * 15, flush=True)
71
+ raise e
72
+ else:
73
+ self.optimizer.step()
74
+
75
+ if self.late_clipping:
76
+ orig_norm = self.optimizer.global_grad_norm
77
+
78
+ self.optimizer.zero_grad(set_to_none=True)
79
+
80
+ return orig_norm, scaler_sc
81
+
82
+ def state_dict(self):
83
+ return {
84
+ 'optimizer': self.optimizer.state_dict()
85
+ } if self.scaler is None else {
86
+ 'scaler': self.scaler.state_dict(),
87
+ 'optimizer': self.optimizer.state_dict()
88
+ }
89
+
90
+ def load_state_dict(self, state, strict=True):
91
+ if self.scaler is not None:
92
+ try: self.scaler.load_state_dict(state['scaler'])
93
+ except Exception as e: print(f'[fp16 load_state_dict err] {e}')
94
+ self.optimizer.load_state_dict(state['optimizer'])
utils/arg_util.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import random
4
+ import re
5
+ import subprocess
6
+ import sys
7
+ import time
8
+ from collections import OrderedDict
9
+ from typing import Optional, Union
10
+
11
+ import numpy as np
12
+ import torch
13
+
14
+ try:
15
+ from tap import Tap
16
+ except ImportError as e:
17
+ print(f'`>>>>>>>> from tap import Tap` failed, please run: pip3 install typed-argument-parser <<<<<<<<', file=sys.stderr, flush=True)
18
+ print(f'`>>>>>>>> from tap import Tap` failed, please run: pip3 install typed-argument-parser <<<<<<<<', file=sys.stderr, flush=True)
19
+ time.sleep(5)
20
+ raise e
21
+
22
+ import dist
23
+
24
+
25
+ class Args(Tap):
26
+ data_path: str = 'dataset/Captioned_ADE20K/train'
27
+ exp_name: str = 'text'
28
+
29
+ # build_t2i_control_code
30
+ code_path: str = 'dataset/Captioned_ADE20K'
31
+ image_size: int = 256
32
+ downsample_size: int = 32
33
+ get_image: bool = True
34
+ get_prompt: bool = False
35
+ get_label: bool = True
36
+ condition_type: str = 'seg'
37
+
38
+
39
+ # VAE
40
+ vfast: int = 0 # torch.compile VAE; =0: not compile; 1: compile with 'reduce-overhead'; 2: compile with 'max-autotune'
41
+ # VAR
42
+ tfast: int = 0 # torch.compile VAR; =0: not compile; 1: compile with 'reduce-overhead'; 2: compile with 'max-autotune'
43
+ depth: int = 16 # VAR depth
44
+ # VAR initialization
45
+ ini: float = -1 # -1: automated model parameter initialization
46
+ hd: float = 0.02 # head.w *= hd
47
+ aln: float = 0.5 # the multiplier of ada_lin.w's initialization
48
+ alng: float = 1e-5 # the multiplier of ada_lin.w[gamma channels]'s initialization
49
+ # VAR optimization
50
+ fp16: int = 0 # 1: using fp16, 2: bf16
51
+ tblr: float = 1e-4 # base lr
52
+ tlr: float = None # lr = base lr * (bs / 256)
53
+ twd: float = 0.05 # initial wd
54
+ twde: float = 0 # final wd, =twde or twd
55
+ tclip: float = 2. # <=0 for not using grad clip
56
+ ls: float = 0.0 # label smooth
57
+
58
+ bs: int = 768 # global batch size
59
+ batch_size: int = 0 # [automatically set; don't specify this] batch size per GPU = round(args.bs / args.ac / dist.get_world_size() / 8) * 8
60
+ glb_batch_size: int = 0 # [automatically set; don't specify this] global batch size = args.batch_size * dist.get_world_size()
61
+ ac: int = 1 # gradient accumulation
62
+
63
+ ep: int = 50
64
+ wp: float = 0
65
+ wp0: float = 0.005 # initial lr ratio at the begging of lr warm up
66
+ wpe: float = 0.01 # final lr ratio at the end of training
67
+ sche: str = 'lin0' # lr schedule
68
+
69
+ opt: str = 'adamw' # lion: https://cloud.tencent.com/developer/article/2336657?areaId=106001 lr=5e-5 (0.25x) wd=0.8 (8x); Lion needs a large bs to work
70
+ afuse: bool = True # fused adamw
71
+
72
+ # other hps
73
+ saln: bool = False # whether to use shared adaln
74
+ anorm: bool = True # whether to use L2 normalized attention
75
+ fuse: bool = True # whether to use fused op like flash attn, xformers, fused MLP, fused LayerNorm, etc.
76
+
77
+ # data
78
+ pn: str = '1_2_3_4_5_6_8_10_13_16'
79
+ patch_size: int = 16
80
+ patch_nums: tuple = None # [automatically set; don't specify this] = tuple(map(int, args.pn.replace('-', '_').split('_')))
81
+ resos: tuple = None # [automatically set; don't specify this] = tuple(pn * args.patch_size for pn in args.patch_nums)
82
+
83
+ data_load_reso: int = None # [automatically set; don't specify this] would be max(patch_nums) * patch_size
84
+ mid_reso: float = 1.125 # aug: first resize to mid_reso = 1.125 * data_load_reso, then crop to data_load_reso
85
+ hflip: bool = False # augmentation: horizontal flip
86
+ workers: int = 0 # num workers; 0: auto, -1: don't use multiprocessing in DataLoader
87
+
88
+ # progressive training
89
+ pg: float = 0.0 # >0 for use progressive training during [0%, this] of training
90
+ pg0: int = 4 # progressive initial stage, 0: from the 1st token map, 1: from the 2nd token map, etc
91
+ pgwp: float = 0 # num of warmup epochs at each progressive stage
92
+
93
+ # would be automatically set in runtime
94
+ cmd: str = ' '.join(sys.argv[1:]) # [automatically set; don't specify this]
95
+ branch: str = subprocess.check_output(f'git symbolic-ref --short HEAD 2>/dev/null || git rev-parse HEAD', shell=True).decode('utf-8').strip() or '[unknown]' # [automatically set; don't specify this]
96
+ commit_id: str = subprocess.check_output(f'git rev-parse HEAD', shell=True).decode('utf-8').strip() or '[unknown]' # [automatically set; don't specify this]
97
+ commit_msg: str = (subprocess.check_output(f'git log -1', shell=True).decode('utf-8').strip().splitlines() or ['[unknown]'])[-1].strip() # [automatically set; don't specify this]
98
+ acc_mean: float = None # [automatically set; don't specify this]
99
+ acc_tail: float = None # [automatically set; don't specify this]
100
+ L_mean: float = None # [automatically set; don't specify this]
101
+ L_tail: float = None # [automatically set; don't specify this]
102
+ vacc_mean: float = None # [automatically set; don't specify this]
103
+ vacc_tail: float = None # [automatically set; don't specify this]
104
+ vL_mean: float = None # [automatically set; don't specify this]
105
+ vL_tail: float = None # [automatically set; don't specify this]
106
+ grad_norm: float = None # [automatically set; don't specify this]
107
+ cur_lr: float = None # [automatically set; don't specify this]
108
+ cur_wd: float = None # [automatically set; don't specify this]
109
+ cur_it: str = '' # [automatically set; don't specify this]
110
+ cur_ep: str = '' # [automatically set; don't specify this]
111
+ remain_time: str = '' # [automatically set; don't specify this]
112
+ finish_time: str = '' # [automatically set; don't specify this]
113
+
114
+ # environment
115
+ local_out_dir_path: str = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'local_output') # [automatically set; don't specify this]
116
+ tb_log_dir_path: str = '...tb-...' # [automatically set; don't specify this]
117
+ log_txt_path: str = '...' # [automatically set; don't specify this]
118
+ last_ckpt_path: str = '...' # [automatically set; don't specify this]
119
+
120
+ tf32: bool = True # whether to use TensorFloat32
121
+ device: str = 'cuda' # [automatically set; don't specify this]
122
+ seed: int = None # seed
123
+ def seed_everything(self, benchmark: bool):
124
+ torch.backends.cudnn.enabled = True
125
+ torch.backends.cudnn.benchmark = benchmark
126
+ if self.seed is None:
127
+ torch.backends.cudnn.deterministic = False
128
+ else:
129
+ torch.backends.cudnn.deterministic = True
130
+ seed = self.seed * dist.get_world_size() + dist.get_rank()
131
+ os.environ['PYTHONHASHSEED'] = str(seed)
132
+ random.seed(seed)
133
+ np.random.seed(seed)
134
+ torch.manual_seed(seed)
135
+ if torch.cuda.is_available():
136
+ torch.cuda.manual_seed(seed)
137
+ torch.cuda.manual_seed_all(seed)
138
+ same_seed_for_all_ranks: int = 0 # this is only for distributed sampler
139
+ def get_different_generator_for_each_rank(self) -> Optional[torch.Generator]: # for random augmentation
140
+ if self.seed is None:
141
+ return None
142
+ g = torch.Generator()
143
+ g.manual_seed(self.seed * dist.get_world_size() + dist.get_rank())
144
+ return g
145
+
146
+ # local_debug: bool = 'KEVIN_LOCAL' in os.environ
147
+ local_debug: bool = False
148
+ dbg_nan: bool = False # 'KEVIN_LOCAL' in os.environ
149
+
150
+ def compile_model(self, m, fast):
151
+ if fast == 0 or self.local_debug:
152
+ return m
153
+ return torch.compile(m, mode={
154
+ 1: 'reduce-overhead',
155
+ 2: 'max-autotune',
156
+ 3: 'default',
157
+ }[fast]) if hasattr(torch, 'compile') else m
158
+
159
+ def state_dict(self, key_ordered=True) -> Union[OrderedDict, dict]:
160
+ d = (OrderedDict if key_ordered else dict)()
161
+ # self.as_dict() would contain methods, but we only need variables
162
+ for k in self.class_variables.keys():
163
+ if k not in {'device'}: # these are not serializable
164
+ d[k] = getattr(self, k)
165
+ return d
166
+
167
+ def load_state_dict(self, d: Union[OrderedDict, dict, str]):
168
+ if isinstance(d, str): # for compatibility with old version
169
+ d: dict = eval('\n'.join([l for l in d.splitlines() if '<bound' not in l and 'device(' not in l]))
170
+ for k in d.keys():
171
+ try:
172
+ setattr(self, k, d[k])
173
+ except Exception as e:
174
+ print(f'k={k}, v={d[k]}')
175
+ raise e
176
+
177
+ @staticmethod
178
+ def set_tf32(tf32: bool):
179
+ if torch.cuda.is_available():
180
+ torch.backends.cudnn.allow_tf32 = bool(tf32)
181
+ torch.backends.cuda.matmul.allow_tf32 = bool(tf32)
182
+ if hasattr(torch, 'set_float32_matmul_precision'):
183
+ torch.set_float32_matmul_precision('high' if tf32 else 'highest')
184
+ print(f'[tf32] [precis] torch.get_float32_matmul_precision(): {torch.get_float32_matmul_precision()}')
185
+ print(f'[tf32] [ conv ] torch.backends.cudnn.allow_tf32: {torch.backends.cudnn.allow_tf32}')
186
+ print(f'[tf32] [matmul] torch.backends.cuda.matmul.allow_tf32: {torch.backends.cuda.matmul.allow_tf32}')
187
+
188
+ def dump_log(self):
189
+ if not dist.is_local_master():
190
+ return
191
+ if '1/' in self.cur_ep: # first time to dump log
192
+ with open(self.log_txt_path, 'w') as fp:
193
+ json.dump({'is_master': dist.is_master(), 'name': self.exp_name, 'cmd': self.cmd, 'commit': self.commit_id, 'branch': self.branch, 'tb_log_dir_path': self.tb_log_dir_path}, fp, indent=0)
194
+ fp.write('\n')
195
+
196
+ log_dict = {}
197
+ for k, v in {
198
+ 'it': self.cur_it, 'ep': self.cur_ep,
199
+ 'lr': self.cur_lr, 'wd': self.cur_wd, 'grad_norm': self.grad_norm,
200
+ 'L_mean': self.L_mean, 'L_tail': self.L_tail, 'acc_mean': self.acc_mean, 'acc_tail': self.acc_tail,
201
+ 'vL_mean': self.vL_mean, 'vL_tail': self.vL_tail, 'vacc_mean': self.vacc_mean, 'vacc_tail': self.vacc_tail,
202
+ 'remain_time': self.remain_time, 'finish_time': self.finish_time,
203
+ }.items():
204
+ if hasattr(v, 'item'): v = v.item()
205
+ log_dict[k] = v
206
+ with open(self.log_txt_path, 'a') as fp:
207
+ fp.write(f'{log_dict}\n')
208
+
209
+ def __str__(self):
210
+ s = []
211
+ for k in self.class_variables.keys():
212
+ if k not in {'device', 'dbg_ks_fp'}: # these are not serializable
213
+ s.append(f' {k:20s}: {getattr(self, k)}')
214
+ s = '\n'.join(s)
215
+ return f'{{\n{s}\n}}\n'
216
+
217
+
218
+ def init_dist_and_get_args():
219
+ for i in range(len(sys.argv)):
220
+ if sys.argv[i].startswith('--local-rank=') or sys.argv[i].startswith('--local_rank='):
221
+ del sys.argv[i]
222
+ break
223
+
224
+ args = Args(explicit_bool=True).parse_args(known_only=True)
225
+ if args.local_debug:
226
+ args.pn = '1_2_3_4_5_6_8_10_13_16'
227
+ args.seed = 1
228
+ args.aln = 1e-2
229
+ args.alng = 1e-5
230
+ args.saln = False
231
+ args.afuse = False
232
+ args.pg = 0.8
233
+ args.pg0 = 1
234
+ else:
235
+ if args.data_path == '/path/to/imagenet':
236
+ raise ValueError(f'{"*"*40} please specify --data_path=/path/to/imagenet {"*"*40}')
237
+
238
+ # warn args.extra_args
239
+ if len(args.extra_args) > 0:
240
+ print(f'======================================================================================')
241
+ print(f'=========================== WARNING: UNEXPECTED EXTRA ARGS ===========================\n{args.extra_args}')
242
+ print(f'=========================== WARNING: UNEXPECTED EXTRA ARGS ===========================')
243
+ print(f'======================================================================================\n\n')
244
+
245
+ # init torch distributed
246
+ from utils import misc
247
+ os.makedirs(args.local_out_dir_path, exist_ok=True)
248
+ misc.init_distributed_mode(local_out_path=args.local_out_dir_path, timeout=30)
249
+
250
+ # set env
251
+ args.set_tf32(args.tf32)
252
+ args.seed_everything(benchmark=args.pg == 0)
253
+
254
+ # update args: data loading
255
+ args.device = dist.get_device()
256
+ if args.pn == '256':
257
+ args.pn = '1_2_3_4_5_6_8_10_13_16'
258
+ elif args.pn == '512':
259
+ args.pn = '1_2_3_4_6_9_13_18_24_32'
260
+ elif args.pn == '1024':
261
+ args.pn = '1_2_3_4_5_7_9_12_16_21_27_36_48_64'
262
+ args.patch_nums = tuple(map(int, args.pn.replace('-', '_').split('_')))
263
+ args.resos = tuple(pn * args.patch_size for pn in args.patch_nums)
264
+ args.data_load_reso = max(args.resos)
265
+
266
+ # update args: bs and lr
267
+ bs_per_gpu = round(args.bs / args.ac / dist.get_world_size())
268
+ args.batch_size = bs_per_gpu
269
+ args.bs = args.glb_batch_size = args.batch_size * dist.get_world_size()
270
+ args.workers = min(max(0, args.workers), args.batch_size)
271
+
272
+ args.tlr = args.ac * args.tblr * args.glb_batch_size / 256
273
+ args.twde = args.twde or args.twd
274
+
275
+ if args.wp == 0:
276
+ args.wp = args.ep * 1/50
277
+
278
+ # update args: progressive training
279
+ if args.pgwp == 0:
280
+ args.pgwp = args.ep * 1/300
281
+ if args.pg > 0:
282
+ args.sche = f'lin{args.pg:g}'
283
+
284
+ # update args: paths
285
+ args.log_txt_path = os.path.join(args.local_out_dir_path, 'log.txt')
286
+ args.last_ckpt_path = os.path.join(args.local_out_dir_path, f'ar-ckpt-last.pth')
287
+ _reg_valid_name = re.compile(r'[^\w\-+,.]')
288
+ tb_name = _reg_valid_name.sub(
289
+ '_',
290
+ f'tb-VARd{args.depth}'
291
+ f'__pn{args.pn}'
292
+ f'__b{args.bs}ep{args.ep}{args.opt[:4]}lr{args.tblr:g}wd{args.twd:g}'
293
+ )
294
+ args.tb_log_dir_path = os.path.join(args.local_out_dir_path, tb_name)
295
+
296
+ return args
utils/canny.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import torch
3
+ import numpy as np
4
+
5
+
6
+ class CannyDetector:
7
+ def __call__(self, img, low_threshold=100, high_threshold=200):
8
+ """
9
+ input: array or tensor (H,W,3)
10
+ output: array (H,W)
11
+ """
12
+ if torch.is_tensor(img):
13
+ img = img.cpu().detach().numpy().astype(np.uint8)
14
+ return cv2.Canny(img, low_threshold, high_threshold)
15
+
16
+
utils/data.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path as osp
2
+
3
+ import PIL.Image as PImage
4
+ from torchvision.datasets.folder import DatasetFolder, IMG_EXTENSIONS
5
+ from torchvision.transforms import InterpolationMode, transforms
6
+
7
+
8
+ def normalize_01_into_pm1(x): # normalize x from [0, 1] to [-1, 1] by (x*2) - 1
9
+ return x.add(x).add_(-1)
10
+
11
+
12
+ def build_dataset(
13
+ data_path: str, final_reso: int,
14
+ hflip=False, mid_reso=1.125,
15
+ ):
16
+ # build augmentations数据增强
17
+ mid_reso = round(mid_reso * final_reso) # first resize to mid_reso, then crop to final_reso
18
+ train_aug, val_aug = [
19
+ transforms.Resize(mid_reso, interpolation=InterpolationMode.LANCZOS), # transforms.Resize: resize the shorter edge to mid_reso
20
+ transforms.RandomCrop((final_reso, final_reso)),
21
+ transforms.ToTensor(), normalize_01_into_pm1,
22
+ ], [
23
+ transforms.Resize(mid_reso, interpolation=InterpolationMode.LANCZOS), # transforms.Resize: resize the shorter edge to mid_reso
24
+ transforms.CenterCrop((final_reso, final_reso)),
25
+ transforms.ToTensor(), normalize_01_into_pm1,
26
+ ]
27
+ if hflip: train_aug.insert(0, transforms.RandomHorizontalFlip())
28
+ train_aug, val_aug = transforms.Compose(train_aug), transforms.Compose(val_aug)
29
+
30
+ # build dataset创建数据集
31
+ train_set = DatasetFolder(root=osp.join(data_path, 'train'), loader=pil_loader, extensions=IMG_EXTENSIONS, transform=train_aug)
32
+ val_set = DatasetFolder(root=osp.join(data_path, 'val'), loader=pil_loader, extensions=IMG_EXTENSIONS, transform=val_aug)
33
+ num_classes = 1000
34
+ print(f'[Dataset] {len(train_set)=}, {len(val_set)=}, {num_classes=}')
35
+ print_aug(train_aug, '[train]')
36
+ print_aug(val_aug, '[val]')
37
+
38
+ # 返回数据集对象
39
+ return num_classes, train_set, val_set
40
+
41
+
42
+ def pil_loader(path):
43
+ with open(path, 'rb') as f:
44
+ img: PImage.Image = PImage.open(f).convert('RGB')
45
+ return img
46
+
47
+
48
+ def print_aug(transform, label):
49
+ print(f'Transform {label} = ')
50
+ if hasattr(transform, 'transforms'):
51
+ for t in transform.transforms:
52
+ print(t)
53
+ else:
54
+ print(transform)
55
+ print('---------------------------\n')
utils/data_sampler.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from torch.utils.data.sampler import Sampler
4
+
5
+
6
+ class EvalDistributedSampler(Sampler):
7
+ def __init__(self, dataset, num_replicas, rank):
8
+ seps = np.linspace(0, len(dataset), num_replicas+1, dtype=int)
9
+ beg, end = seps[:-1], seps[1:]
10
+ beg, end = beg[rank], end[rank]
11
+ self.indices = tuple(range(beg, end))
12
+
13
+ def __iter__(self):
14
+ return iter(self.indices)
15
+
16
+ def __len__(self) -> int:
17
+ return len(self.indices)
18
+
19
+
20
+ class InfiniteBatchSampler(Sampler):
21
+ def __init__(self, dataset_len, batch_size, seed_for_all_rank=0, fill_last=False, shuffle=True, drop_last=False, start_ep=0, start_it=0):
22
+ self.dataset_len = dataset_len
23
+ self.batch_size = batch_size
24
+ self.iters_per_ep = dataset_len // batch_size if drop_last else (dataset_len + batch_size - 1) // batch_size
25
+ self.max_p = self.iters_per_ep * batch_size
26
+ self.fill_last = fill_last
27
+ self.shuffle = shuffle
28
+ self.epoch = start_ep
29
+ self.same_seed_for_all_ranks = seed_for_all_rank
30
+ self.indices = self.gener_indices()
31
+ self.start_ep, self.start_it = start_ep, start_it
32
+
33
+ def gener_indices(self):
34
+ if self.shuffle:
35
+ g = torch.Generator()
36
+ g.manual_seed(self.epoch + self.same_seed_for_all_ranks)
37
+ indices = torch.randperm(self.dataset_len, generator=g).numpy()
38
+ else:
39
+ indices = torch.arange(self.dataset_len).numpy()
40
+
41
+ tails = self.batch_size - (self.dataset_len % self.batch_size)
42
+ if tails != self.batch_size and self.fill_last:
43
+ tails = indices[:tails]
44
+ np.random.shuffle(indices)
45
+ indices = np.concatenate((indices, tails))
46
+
47
+ # built-in list/tuple is faster than np.ndarray (when collating the data via a for-loop)
48
+ # noinspection PyTypeChecker
49
+ return tuple(indices.tolist())
50
+
51
+ def __iter__(self):
52
+ self.epoch = self.start_ep
53
+ while True:
54
+ self.epoch += 1
55
+ p = (self.start_it * self.batch_size) if self.epoch == self.start_ep else 0
56
+ while p < self.max_p:
57
+ q = p + self.batch_size
58
+ yield self.indices[p:q]
59
+ p = q
60
+ if self.shuffle:
61
+ self.indices = self.gener_indices()
62
+
63
+ def __len__(self):
64
+ return self.iters_per_ep
65
+
66
+
67
+ class DistInfiniteBatchSampler(InfiniteBatchSampler):
68
+ def __init__(self, world_size, rank, dataset_len, glb_batch_size, same_seed_for_all_ranks=0, repeated_aug=0, fill_last=False, shuffle=True, start_ep=0, start_it=0):
69
+ assert glb_batch_size % world_size == 0
70
+ self.world_size, self.rank = world_size, rank
71
+ self.dataset_len = dataset_len
72
+ self.glb_batch_size = glb_batch_size
73
+ self.batch_size = glb_batch_size // world_size
74
+
75
+ self.iters_per_ep = (dataset_len + glb_batch_size - 1) // glb_batch_size
76
+ self.fill_last = fill_last
77
+ self.shuffle = shuffle
78
+ self.repeated_aug = repeated_aug
79
+ self.epoch = start_ep
80
+ self.same_seed_for_all_ranks = same_seed_for_all_ranks
81
+ self.indices = self.gener_indices()
82
+ self.start_ep, self.start_it = start_ep, start_it
83
+
84
+ def gener_indices(self):
85
+ global_max_p = self.iters_per_ep * self.glb_batch_size # global_max_p % world_size must be 0 cuz glb_batch_size % world_size == 0
86
+ # print(f'global_max_p = iters_per_ep({self.iters_per_ep}) * glb_batch_size({self.glb_batch_size}) = {global_max_p}')
87
+ if self.shuffle:
88
+ g = torch.Generator()
89
+ g.manual_seed(self.epoch + self.same_seed_for_all_ranks)
90
+ global_indices = torch.randperm(self.dataset_len, generator=g)
91
+ if self.repeated_aug > 1:
92
+ global_indices = global_indices[:(self.dataset_len + self.repeated_aug - 1) // self.repeated_aug].repeat_interleave(self.repeated_aug, dim=0)[:global_max_p]
93
+ else:
94
+ global_indices = torch.arange(self.dataset_len)
95
+ filling = global_max_p - global_indices.shape[0]
96
+ if filling > 0 and self.fill_last:
97
+ global_indices = torch.cat((global_indices, global_indices[:filling]))
98
+ # global_indices = tuple(global_indices.numpy().tolist())
99
+
100
+ seps = torch.linspace(0, global_indices.shape[0], self.world_size + 1, dtype=torch.int)
101
+ local_indices = global_indices[seps[self.rank].item():seps[self.rank + 1].item()].tolist()
102
+ self.max_p = len(local_indices)
103
+ return local_indices
utils/freeze_utils.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ def freeze_model(model, pretrained_state_dict=None, freeze_keywords=('blocks',), keep_keywords=('Caption_embedding', 'condition_layers', 'cross_attn_inject')):
5
+ """
6
+ 冻结模型中已在预训练权重中出现的参数(来自 state_dict),但保留 keep_keywords 中包含的模块可训练。
7
+
8
+ :param model: 要处理的 PyTorch 模型
9
+ :param pretrained_state_dict: 来自 torch.load() 的 state_dict,默认不提供时使用关键词模式
10
+ :param freeze_keywords: 当未提供 state_dict 时,根据关键词冻结
11
+ :param keep_keywords: 始终保留可训练的关键词( )
12
+ """
13
+ frozen, trainable = [], []
14
+
15
+ if pretrained_state_dict is not None:
16
+ pretrained_keys = set(pretrained_state_dict.keys())
17
+ for name, param in model.named_parameters():
18
+ if name in pretrained_keys and not any(k in name for k in keep_keywords):
19
+ param.requires_grad = False
20
+ frozen.append(name)
21
+ else:
22
+ param.requires_grad = True
23
+ trainable.append(name)
24
+ else:
25
+ # fallback:关键词方式
26
+ for name, param in model.named_parameters():
27
+ if any(k in name for k in freeze_keywords) and not any(k in name for k in keep_keywords):
28
+ param.requires_grad = False
29
+ frozen.append(name)
30
+ else:
31
+ param.requires_grad = True
32
+ trainable.append(name)
33
+
34
+ # print(f"[Freeze Summary] Frozen: {len(frozen)}, Trainable: {len(trainable)}")
35
+ # for name in frozen:
36
+ # print(f" [Frozen] {name}")
37
+ # for name in trainable:
38
+ # print(f" [Trainable] {name}")
utils/lr_control.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from pprint import pformat
3
+ from typing import Tuple, List, Dict, Union
4
+
5
+ import torch.nn
6
+
7
+ import dist
8
+
9
+
10
+ def lr_wd_annealing(sche_type: str, optimizer, peak_lr, wd, wd_end, cur_it, wp_it, max_it, wp0=0.005, wpe=0.001):
11
+ """Decay the learning rate with half-cycle cosine after warmup"""
12
+ wp_it = round(wp_it)
13
+
14
+ if cur_it < wp_it:
15
+ cur_lr = wp0 + (1-wp0) * cur_it / wp_it
16
+ else:
17
+ pasd = (cur_it - wp_it) / (max_it-1 - wp_it) # [0, 1]
18
+ rest = 1 - pasd # [1, 0]
19
+ if sche_type == 'cos':
20
+ cur_lr = wpe + (1-wpe) * (0.5 + 0.5 * math.cos(math.pi * pasd))
21
+ elif sche_type == 'lin':
22
+ T = 0.15; max_rest = 1-T
23
+ if pasd < T: cur_lr = 1
24
+ else: cur_lr = wpe + (1-wpe) * rest / max_rest # 1 to wpe
25
+ elif sche_type == 'lin0':
26
+ T = 0.05; max_rest = 1-T
27
+ if pasd < T: cur_lr = 1
28
+ else: cur_lr = wpe + (1-wpe) * rest / max_rest
29
+ elif sche_type == 'lin00':
30
+ cur_lr = wpe + (1-wpe) * rest
31
+ elif sche_type.startswith('lin'):
32
+ T = float(sche_type[3:]); max_rest = 1-T
33
+ wpe_mid = wpe + (1-wpe) * max_rest
34
+ wpe_mid = (1 + wpe_mid) / 2
35
+ if pasd < T: cur_lr = 1 + (wpe_mid-1) * pasd / T
36
+ else: cur_lr = wpe + (wpe_mid-wpe) * rest / max_rest
37
+ elif sche_type == 'exp':
38
+ T = 0.15; max_rest = 1-T
39
+ if pasd < T: cur_lr = 1
40
+ else:
41
+ expo = (pasd-T) / max_rest * math.log(wpe)
42
+ cur_lr = math.exp(expo)
43
+ else:
44
+ raise NotImplementedError(f'unknown sche_type {sche_type}')
45
+
46
+ cur_lr *= peak_lr
47
+ pasd = cur_it / (max_it-1)
48
+ cur_wd = wd_end + (wd - wd_end) * (0.5 + 0.5 * math.cos(math.pi * pasd))
49
+
50
+ inf = 1e6
51
+ min_lr, max_lr = inf, -1
52
+ min_wd, max_wd = inf, -1
53
+ for param_group in optimizer.param_groups:
54
+ param_group['lr'] = cur_lr * param_group.get('lr_sc', 1) # 'lr_sc' could be assigned
55
+ max_lr = max(max_lr, param_group['lr'])
56
+ min_lr = min(min_lr, param_group['lr'])
57
+
58
+ param_group['weight_decay'] = cur_wd * param_group.get('wd_sc', 1)
59
+ max_wd = max(max_wd, param_group['weight_decay'])
60
+ if param_group['weight_decay'] > 0:
61
+ min_wd = min(min_wd, param_group['weight_decay'])
62
+
63
+ if min_lr == inf: min_lr = -1
64
+ if min_wd == inf: min_wd = -1
65
+ return min_lr, max_lr, min_wd, max_wd
66
+
67
+
68
+ def filter_params(model, nowd_keys=()) -> Tuple[
69
+ List[str], List[torch.nn.Parameter], List[Dict[str, Union[torch.nn.Parameter, float]]]
70
+ ]:
71
+ para_groups, para_groups_dbg = {}, {}
72
+ names, paras = [], []
73
+ names_no_grad = []
74
+ count, numel = 0, 0
75
+ for name, para in model.named_parameters():
76
+ name = name.replace('_fsdp_wrapped_module.', '')
77
+ if not para.requires_grad:
78
+ names_no_grad.append(name)
79
+ continue # frozen weights
80
+ count += 1
81
+ numel += para.numel()
82
+ names.append(name)
83
+ paras.append(para)
84
+
85
+ if para.ndim == 1 or name.endswith('bias') or any(k in name for k in nowd_keys):
86
+ cur_wd_sc, group_name = 0., 'ND'
87
+ else:
88
+ cur_wd_sc, group_name = 1., 'D'
89
+ cur_lr_sc = 1.
90
+ if group_name not in para_groups:
91
+ para_groups[group_name] = {'params': [], 'wd_sc': cur_wd_sc, 'lr_sc': cur_lr_sc}
92
+ para_groups_dbg[group_name] = {'params': [], 'wd_sc': cur_wd_sc, 'lr_sc': cur_lr_sc}
93
+ para_groups[group_name]['params'].append(para)
94
+ para_groups_dbg[group_name]['params'].append(name)
95
+
96
+ for g in para_groups_dbg.values():
97
+ g['params'] = pformat(', '.join(g['params']), width=200)
98
+
99
+ # print(f'[get_param_groups] param_groups = \n{pformat(para_groups_dbg, indent=2, width=240)}\n')
100
+
101
+ for rk in range(dist.get_world_size()):
102
+ dist.barrier()
103
+ if dist.get_rank() == rk:
104
+ # print(f'[get_param_groups][rank{dist.get_rank()}] {type(model).__name__=} {count=}, {numel=}', flush=True, force=True)
105
+ print('')
106
+
107
+ # assert len(names_no_grad) == 0, f'[get_param_groups] names_no_grad = \n{pformat(names_no_grad, indent=2, width=240)}\n'
108
+ return names, paras, list(para_groups.values())
utils/misc.py ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import functools
3
+ import glob
4
+ import os
5
+ import subprocess
6
+ import sys
7
+ import time
8
+ from collections import defaultdict, deque
9
+ from typing import Iterator, List, Tuple
10
+
11
+ import numpy as np
12
+ import pytz
13
+ import torch
14
+ import torch.distributed as tdist
15
+
16
+ import dist
17
+ from utils import arg_util
18
+
19
+ os_system = functools.partial(subprocess.call, shell=True)
20
+ def echo(info):
21
+ os_system(f'echo "[$(date "+%m-%d-%H:%M:%S")] ({os.path.basename(sys._getframe().f_back.f_code.co_filename)}, line{sys._getframe().f_back.f_lineno})=> {info}"')
22
+ def os_system_get_stdout(cmd):
23
+ return subprocess.run(cmd, shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8')
24
+ def os_system_get_stdout_stderr(cmd):
25
+ cnt = 0
26
+ while True:
27
+ try:
28
+ sp = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=30)
29
+ except subprocess.TimeoutExpired:
30
+ cnt += 1
31
+ print(f'[fetch free_port file] timeout cnt={cnt}')
32
+ else:
33
+ return sp.stdout.decode('utf-8'), sp.stderr.decode('utf-8')
34
+
35
+
36
+ def time_str(fmt='[%m-%d %H:%M:%S]'):
37
+ return datetime.datetime.now(tz=pytz.timezone('Asia/Shanghai')).strftime(fmt)
38
+
39
+
40
+ def init_distributed_mode(local_out_path, only_sync_master=False, timeout=30):
41
+ try:
42
+ dist.initialize(fork=False, timeout=timeout)
43
+ dist.barrier()
44
+ except RuntimeError:
45
+ print(f'{">"*75} NCCL Error {"<"*75}', flush=True)
46
+ time.sleep(10)
47
+
48
+ if local_out_path is not None: os.makedirs(local_out_path, exist_ok=True)
49
+ _change_builtin_print(dist.is_local_master())
50
+ if (dist.is_master() if only_sync_master else dist.is_local_master()) and local_out_path is not None and len(local_out_path):
51
+ sys.stdout, sys.stderr = SyncPrint(local_out_path, sync_stdout=True), SyncPrint(local_out_path, sync_stdout=False)
52
+
53
+
54
+ def _change_builtin_print(is_master):
55
+ import builtins as __builtin__
56
+
57
+ builtin_print = __builtin__.print
58
+ if type(builtin_print) != type(open):
59
+ return
60
+
61
+ def prt(*args, **kwargs):
62
+ force = kwargs.pop('force', False)
63
+ clean = kwargs.pop('clean', False)
64
+ deeper = kwargs.pop('deeper', False)
65
+ if is_master or force:
66
+ if not clean:
67
+ f_back = sys._getframe().f_back
68
+ if deeper and f_back.f_back is not None:
69
+ f_back = f_back.f_back
70
+ file_desc = f'{f_back.f_code.co_filename:24s}'[-24:]
71
+ builtin_print(f'{time_str()} ({file_desc}, line{f_back.f_lineno:-4d})=>', *args, **kwargs)
72
+ else:
73
+ builtin_print(*args, **kwargs)
74
+
75
+ __builtin__.print = prt
76
+
77
+
78
+ class SyncPrint(object):
79
+ def __init__(self, local_output_dir, sync_stdout=True):
80
+ self.sync_stdout = sync_stdout
81
+ self.terminal_stream = sys.stdout if sync_stdout else sys.stderr
82
+ fname = os.path.join(local_output_dir, 'stdout.txt' if sync_stdout else 'stderr.txt')
83
+ existing = os.path.exists(fname)
84
+ self.file_stream = open(fname, 'a')
85
+ if existing:
86
+ self.file_stream.write('\n'*7 + '='*55 + f' RESTART {time_str()} ' + '='*55 + '\n')
87
+ self.file_stream.flush()
88
+ self.enabled = True
89
+
90
+ def write(self, message):
91
+ self.terminal_stream.write(message)
92
+ self.file_stream.write(message)
93
+
94
+ def flush(self):
95
+ self.terminal_stream.flush()
96
+ self.file_stream.flush()
97
+
98
+ def close(self):
99
+ if not self.enabled:
100
+ return
101
+ self.enabled = False
102
+ self.file_stream.flush()
103
+ self.file_stream.close()
104
+ if self.sync_stdout:
105
+ sys.stdout = self.terminal_stream
106
+ sys.stdout.flush()
107
+ else:
108
+ sys.stderr = self.terminal_stream
109
+ sys.stderr.flush()
110
+
111
+ def __del__(self):
112
+ self.close()
113
+
114
+
115
+ class DistLogger(object):
116
+ def __init__(self, lg, verbose):
117
+ self._lg, self._verbose = lg, verbose
118
+
119
+ @staticmethod
120
+ def do_nothing(*args, **kwargs):
121
+ pass
122
+
123
+ def __getattr__(self, attr: str):
124
+ return getattr(self._lg, attr) if self._verbose else DistLogger.do_nothing
125
+
126
+
127
+ class TensorboardLogger(object):
128
+ def __init__(self, log_dir, filename_suffix):
129
+ try: import tensorflow_io as tfio
130
+ except: pass
131
+ from torch.utils.tensorboard import SummaryWriter
132
+ self.writer = SummaryWriter(log_dir=log_dir, filename_suffix=filename_suffix)
133
+ self.step = 0
134
+
135
+ def set_step(self, step=None):
136
+ if step is not None:
137
+ self.step = step
138
+ else:
139
+ self.step += 1
140
+
141
+ def update(self, head='scalar', step=None, **kwargs):
142
+ for k, v in kwargs.items():
143
+ if v is None:
144
+ continue
145
+ # assert isinstance(v, (float, int)), type(v)
146
+ if step is None: # iter wise
147
+ it = self.step
148
+ if it == 0 or (it + 1) % 500 == 0:
149
+ if hasattr(v, 'item'): v = v.item()
150
+ self.writer.add_scalar(f'{head}/{k}', v, it)
151
+ else: # epoch wise
152
+ if hasattr(v, 'item'): v = v.item()
153
+ self.writer.add_scalar(f'{head}/{k}', v, step)
154
+
155
+ def log_tensor_as_distri(self, tag, tensor1d, step=None):
156
+ if step is None: # iter wise
157
+ step = self.step
158
+ loggable = step == 0 or (step + 1) % 500 == 0
159
+ else: # epoch wise
160
+ loggable = True
161
+ if loggable:
162
+ try:
163
+ self.writer.add_histogram(tag=tag, values=tensor1d, global_step=step)
164
+ except Exception as e:
165
+ print(f'[log_tensor_as_distri writer.add_histogram failed]: {e}')
166
+
167
+ def log_image(self, tag, img_chw, step=None):
168
+ if step is None: # iter wise
169
+ step = self.step
170
+ loggable = step == 0 or (step + 1) % 500 == 0
171
+ else: # epoch wise
172
+ loggable = True
173
+ if loggable:
174
+ self.writer.add_image(tag, img_chw, step, dataformats='CHW')
175
+
176
+ def flush(self):
177
+ self.writer.flush()
178
+
179
+ def close(self):
180
+ self.writer.close()
181
+
182
+
183
+ class SmoothedValue(object):
184
+ """Track a series of values and provide access to smoothed values over a
185
+ window or the global series average.
186
+ """
187
+
188
+ def __init__(self, window_size=30, fmt=None):
189
+ if fmt is None:
190
+ fmt = "{median:.4f} ({global_avg:.4f})"
191
+ self.deque = deque(maxlen=window_size)
192
+ self.total = 0.0
193
+ self.count = 0
194
+ self.fmt = fmt
195
+
196
+ def update(self, value, n=1):
197
+ self.deque.append(value)
198
+ self.count += n
199
+ self.total += value * n
200
+
201
+ def synchronize_between_processes(self):
202
+ """
203
+ Warning: does not synchronize the deque!
204
+ """
205
+ t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
206
+ tdist.barrier()
207
+ tdist.all_reduce(t)
208
+ t = t.tolist()
209
+ self.count = int(t[0])
210
+ self.total = t[1]
211
+
212
+ @property
213
+ def median(self):
214
+ return np.median(self.deque) if len(self.deque) else 0
215
+
216
+ @property
217
+ def avg(self):
218
+ return sum(self.deque) / (len(self.deque) or 1)
219
+
220
+ @property
221
+ def global_avg(self):
222
+ return self.total / (self.count or 1)
223
+
224
+ @property
225
+ def max(self):
226
+ return max(self.deque)
227
+
228
+ @property
229
+ def value(self):
230
+ return self.deque[-1] if len(self.deque) else 0
231
+
232
+ def time_preds(self, counts) -> Tuple[float, str, str]:
233
+ remain_secs = counts * self.median
234
+ return remain_secs, str(datetime.timedelta(seconds=round(remain_secs))), time.strftime("%Y-%m-%d %H:%M", time.localtime(time.time() + remain_secs))
235
+
236
+ def __str__(self):
237
+ return self.fmt.format(
238
+ median=self.median,
239
+ avg=self.avg,
240
+ global_avg=self.global_avg,
241
+ max=self.max,
242
+ value=self.value)
243
+
244
+
245
+ class MetricLogger(object):
246
+ def __init__(self, delimiter=' '):
247
+ self.meters = defaultdict(SmoothedValue)
248
+ self.delimiter = delimiter
249
+ self.iter_end_t = time.time()
250
+ self.log_iters = []
251
+
252
+ def update(self, **kwargs):
253
+ for k, v in kwargs.items():
254
+ if v is None:
255
+ continue
256
+ if hasattr(v, 'item'): v = v.item()
257
+ # assert isinstance(v, (float, int)), type(v)
258
+ assert isinstance(v, (float, int))
259
+ self.meters[k].update(v)
260
+
261
+ def __getattr__(self, attr):
262
+ if attr in self.meters:
263
+ return self.meters[attr]
264
+ if attr in self.__dict__:
265
+ return self.__dict__[attr]
266
+ raise AttributeError("'{}' object has no attribute '{}'".format(
267
+ type(self).__name__, attr))
268
+
269
+ def __str__(self):
270
+ loss_str = []
271
+ for name, meter in self.meters.items():
272
+ if len(meter.deque):
273
+ loss_str.append(
274
+ "{}: {}".format(name, str(meter))
275
+ )
276
+ return self.delimiter.join(loss_str)
277
+
278
+ def synchronize_between_processes(self):
279
+ for meter in self.meters.values():
280
+ meter.synchronize_between_processes()
281
+
282
+ def add_meter(self, name, meter):
283
+ self.meters[name] = meter
284
+
285
+ def log_every(self, start_it, max_iters, itrt, print_freq, header=None):
286
+ self.log_iters = set(np.linspace(0, max_iters-1, print_freq, dtype=int).tolist())
287
+ self.log_iters.add(start_it)
288
+ if not header:
289
+ header = ''
290
+ start_time = time.time()
291
+ self.iter_end_t = time.time()
292
+ self.iter_time = SmoothedValue(fmt='{avg:.4f}')
293
+ self.data_time = SmoothedValue(fmt='{avg:.4f}')
294
+ space_fmt = ':' + str(len(str(max_iters))) + 'd'
295
+ log_msg = [
296
+ header,
297
+ '[{0' + space_fmt + '}/{1}]',
298
+ 'eta: {eta}',
299
+ '{meters}',
300
+ 'time: {time}',
301
+ 'data: {data}'
302
+ ]
303
+ log_msg = self.delimiter.join(log_msg)
304
+
305
+ if isinstance(itrt, Iterator) and not hasattr(itrt, 'preload') and not hasattr(itrt, 'set_epoch'):
306
+ for i in range(start_it, max_iters):
307
+ # obj = next(itrt)
308
+ try:
309
+ obj = next(itrt)
310
+ except StopIteration:
311
+ break # 或者 return,防止出错
312
+ self.data_time.update(time.time() - self.iter_end_t)
313
+ yield i, obj
314
+ self.iter_time.update(time.time() - self.iter_end_t)
315
+ if i in self.log_iters:
316
+ eta_seconds = self.iter_time.global_avg * (max_iters - i)
317
+ eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
318
+ print(log_msg.format(
319
+ i, max_iters, eta=eta_string,
320
+ meters=str(self),
321
+ time=str(self.iter_time), data=str(self.data_time)), flush=True)
322
+ self.iter_end_t = time.time()
323
+ else:
324
+ if isinstance(itrt, int): itrt = range(itrt)
325
+ for i, obj in enumerate(itrt):
326
+ self.data_time.update(time.time() - self.iter_end_t)
327
+ yield i, obj
328
+ self.iter_time.update(time.time() - self.iter_end_t)
329
+ if i in self.log_iters:
330
+ eta_seconds = self.iter_time.global_avg * (max_iters - i)
331
+ eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
332
+ print(log_msg.format(
333
+ i, max_iters, eta=eta_string,
334
+ meters=str(self),
335
+ time=str(self.iter_time), data=str(self.data_time)), flush=True)
336
+ self.iter_end_t = time.time()
337
+
338
+ total_time = time.time() - start_time
339
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
340
+ print('{} Total time: {} ({:.3f} s / it)'.format(
341
+ header, total_time_str, total_time / max_iters), flush=True)
342
+
343
+
344
+ def glob_with_latest_modified_first(pattern, recursive=False):
345
+ return sorted(glob.glob(pattern, recursive=recursive), key=os.path.getmtime, reverse=True)
346
+
347
+
348
+ def auto_resume(args: arg_util.Args, pattern='ckpt*.pth') -> Tuple[List[str], int, int, dict, dict]:
349
+ info = []
350
+ file = os.path.join(args.local_out_dir_path, pattern)
351
+ all_ckpt = glob_with_latest_modified_first(file)
352
+ if len(all_ckpt) == 0:
353
+ info.append(f'[auto_resume] no ckpt found @ {file}')
354
+ info.append(f'[auto_resume quit]')
355
+ return info, 0, 0, {}, {}
356
+ else:
357
+ info.append(f'[auto_resume] load ckpt from @ {all_ckpt[0]} ...')
358
+ ckpt = torch.load(all_ckpt[0], map_location='cpu')
359
+ ep, it = ckpt['epoch'], ckpt['iter']
360
+ info.append(f'[auto_resume success] resume from ep{ep}, it{it}')
361
+ return info, ep, it, ckpt['trainer'], ckpt['args']
362
+
363
+
364
+ def create_npz_from_sample_folder(sample_folder: str):
365
+ """
366
+ Builds a single .npz file from a folder of .png samples. Refer to DiT.
367
+ """
368
+ import os, glob
369
+ import numpy as np
370
+ from tqdm import tqdm
371
+ from PIL import Image
372
+
373
+ samples = []
374
+ pngs = glob.glob(os.path.join(sample_folder, '*.png')) + glob.glob(os.path.join(sample_folder, '*.PNG'))
375
+ assert len(pngs) == 50_000, f'{len(pngs)} png files found in {sample_folder}, but expected 50,000'
376
+ for png in tqdm(pngs, desc='Building .npz file from samples (png only)'):
377
+ with Image.open(png) as sample_pil:
378
+ sample_np = np.asarray(sample_pil).astype(np.uint8)
379
+ samples.append(sample_np)
380
+ samples = np.stack(samples)
381
+ assert samples.shape == (50_000, samples.shape[1], samples.shape[2], 3)
382
+ npz_path = f'{sample_folder}.npz'
383
+ np.savez(npz_path, arr_0=samples)
384
+ print(f'Saved .npz file to {npz_path} [shape={samples.shape}].')
385
+ return npz_path
utils/model_args.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ from dataclasses import dataclass
3
+
4
+ @dataclass
5
+ class ModelArgs:
6
+ dim: int = 768 #之前是1024
7
+ n_layer: int = 32
8
+ n_head: int = 32
9
+ n_kv_head: Optional[int] = None
10
+ multiple_of: int = 256
11
+ ffn_dim_multiplier: Optional[float] = None
12
+ rope_base: float = 10000
13
+ norm_eps: float = 1e-5
14
+ initializer_range: float = 0.02
15
+
16
+ token_dropout_p: float = 0.1
17
+ attn_dropout_p: float = 0.0
18
+ resid_dropout_p: float = 0.1
19
+ ffn_dropout_p: float = 0.1
20
+ drop_path_rate: float = 0.0
21
+
22
+ num_classes: int = 1000
23
+ caption_dim: int = 2048
24
+ class_dropout_prob: float = 0.1
25
+ model_type: str = 'c2i'
26
+
27
+ vocab_size: int = 16384
28
+ cls_token_num: int = 1
29
+ block_size: int = 256
30
+ max_batch_size: int = 32
31
+ max_seq_len: int = 2048
32
+ adapter_size: str = 'small'
33
+ condition_type: str = 'seg'
34
+
35
+ def get_model_args() -> ModelArgs:
36
+ return ModelArgs()
utils/t2i_control.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import PngImagePlugin
2
+ MaximumDecompressedSize = 1024
3
+ MegaByte = 2**20
4
+ PngImagePlugin.MAX_TEXT_CHUNK = MaximumDecompressedSize * MegaByte
5
+ import torch
6
+ # from datasets import load_dataset, load_from_disk
7
+ # import random
8
+ # import pickle
9
+ # import logging
10
+ from accelerate import Accelerator
11
+ from accelerate.logging import get_logger
12
+ from accelerate.utils import ProjectConfiguration, set_seed
13
+ from datasets import load_dataset, load_from_disk, concatenate_datasets
14
+ from huggingface_hub import create_repo, upload_folder
15
+ from transformers import AutoTokenizer, PretrainedConfig
16
+ import argparse
17
+ from PIL import Image
18
+ from pathlib import Path
19
+ from tqdm.auto import tqdm
20
+ from packaging import version
21
+ from torchvision import transforms
22
+ from torch.cuda.amp import autocast
23
+ from torchvision.transforms.functional import normalize
24
+
25
+ # from util import group_random_crop
26
+ import numpy as np
27
+ import os
28
+ from torch.utils.data import Dataset
29
+ from utils.canny import CannyDetector
30
+ # from condition.hed import HEDdetector
31
+
32
+
33
+ logger = get_logger(__name__)
34
+
35
+ class T2IControlCode(Dataset):
36
+ def __init__(self, args):
37
+ self.get_image = args.get_image
38
+ self.get_prompt = args.get_prompt
39
+ self.get_label = args.get_label
40
+ self.control_type = args.condition_type
41
+ if self.control_type == 'canny':
42
+ self.get_control = CannyDetector()
43
+
44
+ self.code_path = args.code_path
45
+ code_file_path = os.path.join(self.code_path, 'code')
46
+ file_num = len(os.listdir(code_file_path))
47
+ self.code_files = [os.path.join(code_file_path, f"{i}.npy") for i in range(file_num)]
48
+
49
+ # if args.code_path2 is not None:
50
+ # self.code_path2 = args.code_path2
51
+ # code_file_path2 = os.path.join(self.code_path2, 'code')
52
+ # file_num2 = len(os.listdir(code_file_path2))
53
+ # self.code_files2 = [os.path.join(code_file_path2, f"{i}.npy") for i in range(file_num2)]
54
+ # self.code_files = self.code_files + self.code_files2
55
+
56
+ self.image_size = args.image_size
57
+ latent_size = args.image_size // args.downsample_size
58
+ self.code_len = latent_size ** 2
59
+ self.t5_feature_max_len = 120
60
+ self.t5_feature_dim = 2048
61
+ self.max_seq_length = self.t5_feature_max_len + self.code_len
62
+
63
+ def __len__(self):
64
+ return len(self.code_files)
65
+
66
+ def dummy_data(self):
67
+ img = torch.zeros((3, self.image_size, self.image_size), dtype=torch.float32)
68
+ t5_feat_padding = torch.zeros((1, self.t5_feature_max_len, self.t5_feature_dim))
69
+ attn_mask = torch.tril(torch.ones(self.max_seq_length, self.max_seq_length, dtype=torch.bool)).unsqueeze(0)
70
+ valid = 0
71
+ return img, t5_feat_padding, attn_mask, valid
72
+
73
+ def collate_fn(self, examples):
74
+
75
+ code = torch.stack([example["code"] for example in examples])
76
+ control = torch.stack([example["control"] for example in examples])
77
+ if self.control_type == 'canny':
78
+ control = control.unsqueeze(1).repeat(1,3,1,1)
79
+ caption_emb = torch.stack([example["caption_emb"] for example in examples])
80
+ attn_mask = torch.stack([example["attn_mask"] for example in examples])
81
+ valid = torch.stack([example["valid"] for example in examples])
82
+ if self.get_image:
83
+ image = torch.stack([example["image"] for example in examples])
84
+ if self.get_prompt:
85
+ prompt = [example["prompt"][0] for example in examples]
86
+ if self.control_type == "seg":
87
+ label = torch.stack([example["label"] for example in examples])
88
+
89
+ output = {}
90
+ output['code'] = code
91
+ output['control'] = control
92
+ output['caption_emb'] = caption_emb
93
+ output['attn_mask'] = attn_mask
94
+ output['valid'] = valid
95
+ if self.get_image:
96
+ output['image'] = image
97
+ if self.get_prompt:
98
+ output['prompt'] = prompt
99
+ if self.control_type == "seg":
100
+ output['label'] = label
101
+ return output
102
+
103
+ def __getitem__(self, index):
104
+
105
+
106
+ code_path = self.code_files[index]
107
+ if self.control_type == 'seg':
108
+ control_path = code_path.replace('code', 'control').replace('npy', 'png')
109
+ control = np.array(Image.open(control_path))/255
110
+ control = 2*(control - 0.5)
111
+ elif self.control_type == 'depth':
112
+ control_path = code_path.replace('code', 'control_depth').replace('npy', 'png')
113
+ control = np.array(Image.open(control_path))/255
114
+ control = 2*(control - 0.5)
115
+ caption_path = code_path.replace('code', 'caption_emb').replace('npy', 'npz')
116
+ image_path = code_path.replace('code', 'image').replace('npy', 'png')
117
+ label_path = code_path.replace('code', 'label').replace('npy', 'png')
118
+
119
+ code = np.load(code_path)
120
+ image = np.array(Image.open(image_path)).astype(np.float32) / 255.0
121
+
122
+
123
+
124
+
125
+ t5_feat_padding = torch.zeros((1, self.t5_feature_max_len, self.t5_feature_dim))
126
+ caption = np.load(caption_path)
127
+
128
+ t5_feat = torch.from_numpy(caption['caption_emb'])
129
+ prompt = caption['prompt']
130
+ t5_feat_len = t5_feat.shape[1]
131
+ feat_len = min(self.t5_feature_max_len, t5_feat_len)
132
+ t5_feat_padding[:, -feat_len:] = t5_feat[:, :feat_len]
133
+
134
+
135
+ emb_mask = torch.zeros((self.t5_feature_max_len,))
136
+ emb_mask[-feat_len:] = 1
137
+ attn_mask = torch.tril(torch.ones(self.max_seq_length, self.max_seq_length))
138
+ T = self.t5_feature_max_len
139
+ attn_mask[:, :T] = attn_mask[:, :T] * emb_mask.unsqueeze(0)
140
+ eye_matrix = torch.eye(self.max_seq_length, self.max_seq_length)
141
+ attn_mask = attn_mask * (1 - eye_matrix) + eye_matrix
142
+ attn_mask = attn_mask.unsqueeze(0).to(torch.bool)
143
+ valid = 1
144
+
145
+ output = {}
146
+ output['code'] = torch.from_numpy(code)
147
+ if self.control_type == 'canny':
148
+ output['control'] = torch.from_numpy(2*(self.get_control(image)/255 - 0.5))
149
+ elif self.control_type == "seg":
150
+ output['control'] = torch.from_numpy(control.transpose(2,0,1))
151
+ elif self.control_type == "depth":
152
+ output['control'] = torch.from_numpy(control.transpose(2,0,1))
153
+ elif self.control_type == 'hed':
154
+ output['control'] = torch.from_numpy(image.transpose(2,0,1))
155
+ elif self.control_type == 'lineart':
156
+ output['control'] = torch.from_numpy(image.transpose(2,0,1))
157
+ output['caption_emb'] = t5_feat_padding
158
+ output['attn_mask'] = attn_mask
159
+ output['valid'] = torch.tensor(valid)
160
+ output['image'] = torch.from_numpy(image.transpose(2,0,1))
161
+ if self.get_prompt:
162
+ output['prompt'] = prompt
163
+ if self.control_type == "seg":
164
+ output['label'] = torch.from_numpy(np.array(Image.open(label_path)))
165
+ return output
166
+
167
+
168
+ def build_t2i_control_code(args):
169
+ dataset = T2IControlCode(args)
170
+ return dataset
171
+
172
+ if __name__ == '__main__':
173
+
174
+ args = parse_args()
175
+
176
+ logging_dir = Path(args.output_dir, args.logging_dir)
177
+
178
+ accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
179
+
180
+ accelerator = Accelerator(
181
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
182
+ mixed_precision=args.mixed_precision,
183
+ log_with=args.report_to,
184
+ project_config=accelerator_project_config,
185
+ )
186
+
187
+ train_dataset, val_dataset = make_train_dataset(args, None, accelerator)
188
+
189
+
190
+ train_dataloader = torch.utils.data.DataLoader(
191
+ train_dataset,
192
+ shuffle=True,
193
+ collate_fn=collate_fn,
194
+ batch_size=8,
195
+ num_workers=0,
196
+ )
197
+
198
+ from tqdm import tqdm
199
+ for step, batch in tqdm(enumerate(train_dataloader)):
200
+ continue
vae_ch160v4096z32.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c3ec27ae28a3f87055e83211ea8cc8558bd1985d7b51742d074fb4c2fcf186c
3
+ size 436075834