algohunt commited on
Commit
c295391
·
1 Parent(s): 484e623

initial_commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Stable3DGen/__init__.py +0 -0
  2. Stable3DGen/__pycache__/__init__.cpython-310.pyc +0 -0
  3. Stable3DGen/hi3dgen/__init__.py +36 -0
  4. Stable3DGen/hi3dgen/__pycache__/__init__.cpython-310.pyc +0 -0
  5. Stable3DGen/hi3dgen/models/__init__.py +98 -0
  6. Stable3DGen/hi3dgen/models/__pycache__/__init__.cpython-310.pyc +0 -0
  7. Stable3DGen/hi3dgen/models/__pycache__/sparse_structure_flow.cpython-310.pyc +0 -0
  8. Stable3DGen/hi3dgen/models/__pycache__/sparse_structure_vae.cpython-310.pyc +0 -0
  9. Stable3DGen/hi3dgen/models/__pycache__/structured_latent_flow.cpython-310.pyc +0 -0
  10. Stable3DGen/hi3dgen/models/sparse_structure_flow.py +228 -0
  11. Stable3DGen/hi3dgen/models/sparse_structure_vae.py +334 -0
  12. Stable3DGen/hi3dgen/models/structured_latent_flow.py +290 -0
  13. Stable3DGen/hi3dgen/models/structured_latent_vae/__init__.py +30 -0
  14. Stable3DGen/hi3dgen/models/structured_latent_vae/__pycache__/__init__.cpython-310.pyc +0 -0
  15. Stable3DGen/hi3dgen/models/structured_latent_vae/__pycache__/base.cpython-310.pyc +0 -0
  16. Stable3DGen/hi3dgen/models/structured_latent_vae/__pycache__/decoder_mesh.cpython-310.pyc +0 -0
  17. Stable3DGen/hi3dgen/models/structured_latent_vae/__pycache__/encoder.cpython-310.pyc +0 -0
  18. Stable3DGen/hi3dgen/models/structured_latent_vae/base.py +145 -0
  19. Stable3DGen/hi3dgen/models/structured_latent_vae/decoder_mesh.py +195 -0
  20. Stable3DGen/hi3dgen/models/structured_latent_vae/encoder.py +100 -0
  21. Stable3DGen/hi3dgen/modules/__pycache__/norm.cpython-310.pyc +0 -0
  22. Stable3DGen/hi3dgen/modules/__pycache__/spatial.cpython-310.pyc +0 -0
  23. Stable3DGen/hi3dgen/modules/__pycache__/utils.cpython-310.pyc +0 -0
  24. Stable3DGen/hi3dgen/modules/attention/__init__.py +60 -0
  25. Stable3DGen/hi3dgen/modules/attention/__pycache__/__init__.cpython-310.pyc +0 -0
  26. Stable3DGen/hi3dgen/modules/attention/__pycache__/full_attn.cpython-310.pyc +0 -0
  27. Stable3DGen/hi3dgen/modules/attention/__pycache__/modules.cpython-310.pyc +0 -0
  28. Stable3DGen/hi3dgen/modules/attention/full_attn.py +164 -0
  29. Stable3DGen/hi3dgen/modules/attention/modules.py +170 -0
  30. Stable3DGen/hi3dgen/modules/norm.py +49 -0
  31. Stable3DGen/hi3dgen/modules/sparse/__init__.py +126 -0
  32. Stable3DGen/hi3dgen/modules/sparse/__pycache__/__init__.cpython-310.pyc +0 -0
  33. Stable3DGen/hi3dgen/modules/sparse/__pycache__/basic.cpython-310.pyc +0 -0
  34. Stable3DGen/hi3dgen/modules/sparse/__pycache__/linear.cpython-310.pyc +0 -0
  35. Stable3DGen/hi3dgen/modules/sparse/__pycache__/nonlinearity.cpython-310.pyc +0 -0
  36. Stable3DGen/hi3dgen/modules/sparse/__pycache__/norm.cpython-310.pyc +0 -0
  37. Stable3DGen/hi3dgen/modules/sparse/__pycache__/spatial.cpython-310.pyc +0 -0
  38. Stable3DGen/hi3dgen/modules/sparse/attention/__init__.py +28 -0
  39. Stable3DGen/hi3dgen/modules/sparse/attention/__pycache__/__init__.cpython-310.pyc +0 -0
  40. Stable3DGen/hi3dgen/modules/sparse/attention/__pycache__/full_attn.cpython-310.pyc +0 -0
  41. Stable3DGen/hi3dgen/modules/sparse/attention/__pycache__/modules.cpython-310.pyc +0 -0
  42. Stable3DGen/hi3dgen/modules/sparse/attention/__pycache__/serialized_attn.cpython-310.pyc +0 -0
  43. Stable3DGen/hi3dgen/modules/sparse/attention/__pycache__/windowed_attn.cpython-310.pyc +0 -0
  44. Stable3DGen/hi3dgen/modules/sparse/attention/full_attn.py +239 -0
  45. Stable3DGen/hi3dgen/modules/sparse/attention/modules.py +163 -0
  46. Stable3DGen/hi3dgen/modules/sparse/attention/serialized_attn.py +217 -0
  47. Stable3DGen/hi3dgen/modules/sparse/attention/windowed_attn.py +159 -0
  48. Stable3DGen/hi3dgen/modules/sparse/basic.py +483 -0
  49. Stable3DGen/hi3dgen/modules/sparse/conv/__init__.py +45 -0
  50. Stable3DGen/hi3dgen/modules/sparse/conv/__pycache__/__init__.cpython-310.pyc +0 -0
Stable3DGen/__init__.py ADDED
File without changes
Stable3DGen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (161 Bytes). View file
 
Stable3DGen/hi3dgen/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # Copyright (c) [2025] [Chongjie Ye]
25
+
26
+ # SPDX-License-Identifier: MIT
27
+ # This file has been modified by Chongjie Ye on 2025/04/10
28
+ #
29
+ # Original file was released under MIT, with the full license text
30
+ # available at https://github.com/atong01/conditional-flow-matching/blob/1.0.7/LICENSE.
31
+ #
32
+ # This modified file is released under the same license.
33
+ from . import models
34
+ from . import modules
35
+ from . import pipelines
36
+ from . import representations
Stable3DGen/hi3dgen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (300 Bytes). View file
 
Stable3DGen/hi3dgen/models/__init__.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # Copyright (c) [2025] [Chongjie Ye]
25
+ # SPDX-License-Identifier: MIT
26
+ # This file has been modified by Chongjie Ye on 2025/04/10
27
+ # Original file was released under MIT, with the full license text # available at https://github.com/atong01/conditional-flow-matching/blob/1.0.7/LICENSE.
28
+ # This modified file is released under the same license.
29
+ import importlib
30
+
31
+ __attributes = {
32
+ 'SparseStructureEncoder': 'sparse_structure_vae',
33
+ 'SparseStructureDecoder': 'sparse_structure_vae',
34
+ 'SparseStructureFlowModel': 'sparse_structure_flow',
35
+ 'SLatEncoder': 'structured_latent_vae',
36
+ 'SLatGaussianDecoder': 'structured_latent_vae',
37
+ 'SLatRadianceFieldDecoder': 'structured_latent_vae',
38
+ 'SLatMeshDecoder': 'structured_latent_vae',
39
+ 'SLatFlowModel': 'structured_latent_flow',
40
+ }
41
+
42
+ __submodules = []
43
+
44
+ __all__ = list(__attributes.keys()) + __submodules
45
+
46
+ def __getattr__(name):
47
+ if name not in globals():
48
+ if name in __attributes:
49
+ module_name = __attributes[name]
50
+ module = importlib.import_module(f".{module_name}", __name__)
51
+ globals()[name] = getattr(module, name)
52
+ elif name in __submodules:
53
+ module = importlib.import_module(f".{name}", __name__)
54
+ globals()[name] = module
55
+ else:
56
+ raise AttributeError(f"module {__name__} has no attribute {name}")
57
+ return globals()[name]
58
+
59
+
60
+ def from_pretrained(path: str, **kwargs):
61
+ """
62
+ Load a model from a pretrained checkpoint.
63
+
64
+ Args:
65
+ path: The path to the checkpoint. Can be either local path or a Hugging Face model name.
66
+ NOTE: config file and model file should take the name f'{path}.json' and f'{path}.safetensors' respectively.
67
+ **kwargs: Additional arguments for the model constructor.
68
+ """
69
+ import os
70
+ import json
71
+ from safetensors.torch import load_file
72
+ is_local = os.path.exists(f"{path}.json") and os.path.exists(f"{path}.safetensors")
73
+
74
+ if is_local:
75
+ config_file = f"{path}.json"
76
+ model_file = f"{path}.safetensors"
77
+ else:
78
+ from huggingface_hub import hf_hub_download
79
+ path_parts = path.split('/')
80
+ repo_id = f'{path_parts[0]}/{path_parts[1]}'
81
+ model_name = '/'.join(path_parts[2:])
82
+ config_file = hf_hub_download(repo_id, f"{model_name}.json")
83
+ model_file = hf_hub_download(repo_id, f"{model_name}.safetensors")
84
+
85
+ with open(config_file, 'r') as f:
86
+ config = json.load(f)
87
+ model = __getattr__(config['name'])(**config['args'], **kwargs)
88
+ model.load_state_dict(load_file(model_file))
89
+
90
+ return model
91
+
92
+
93
+ # For Pylance
94
+ if __name__ == '__main__':
95
+ from .sparse_structure_vae import SparseStructureEncoder, SparseStructureDecoder
96
+ from .sparse_structure_flow import SparseStructureFlowModel
97
+ from .structured_latent_vae import SLatEncoder, SLatGaussianDecoder, SLatRadianceFieldDecoder, SLatMeshDecoder
98
+ from .structured_latent_flow import SLatFlowModel
Stable3DGen/hi3dgen/models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.28 kB). View file
 
Stable3DGen/hi3dgen/models/__pycache__/sparse_structure_flow.cpython-310.pyc ADDED
Binary file (6.75 kB). View file
 
Stable3DGen/hi3dgen/models/__pycache__/sparse_structure_vae.cpython-310.pyc ADDED
Binary file (9.2 kB). View file
 
Stable3DGen/hi3dgen/models/__pycache__/structured_latent_flow.cpython-310.pyc ADDED
Binary file (7.65 kB). View file
 
Stable3DGen/hi3dgen/models/sparse_structure_flow.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # Copyright (c) [2025] [Chongjie Ye]
25
+ # SPDX-License-Identifier: MIT
26
+ # This file has been modified by Chongjie Ye on 2025/04/10
27
+ # Original file was released under MIT, with the full license text # available at https://github.com/atong01/conditional-flow-matching/blob/1.0.7/LICENSE.
28
+ # This modified file is released under the same license.
29
+ from typing import *
30
+ import torch
31
+ import torch.nn as nn
32
+ import torch.nn.functional as F
33
+ import numpy as np
34
+ from ..modules.utils import convert_module_to_f16, convert_module_to_f32
35
+ from ..modules.transformer import AbsolutePositionEmbedder, ModulatedTransformerCrossBlock
36
+ from ..modules.spatial import patchify, unpatchify
37
+
38
+
39
+ class TimestepEmbedder(nn.Module):
40
+ """
41
+ Embeds scalar timesteps into vector representations.
42
+ """
43
+ def __init__(self, hidden_size, frequency_embedding_size=256):
44
+ super().__init__()
45
+ self.mlp = nn.Sequential(
46
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
47
+ nn.SiLU(),
48
+ nn.Linear(hidden_size, hidden_size, bias=True),
49
+ )
50
+ self.frequency_embedding_size = frequency_embedding_size
51
+
52
+ @staticmethod
53
+ def timestep_embedding(t, dim, max_period=10000):
54
+ """
55
+ Create sinusoidal timestep embeddings.
56
+
57
+ Args:
58
+ t: a 1-D Tensor of N indices, one per batch element.
59
+ These may be fractional.
60
+ dim: the dimension of the output.
61
+ max_period: controls the minimum frequency of the embeddings.
62
+
63
+ Returns:
64
+ an (N, D) Tensor of positional embeddings.
65
+ """
66
+ # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
67
+ half = dim // 2
68
+ freqs = torch.exp(
69
+ -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
70
+ ).to(device=t.device)
71
+ args = t[:, None].float() * freqs[None]
72
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
73
+ if dim % 2:
74
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
75
+ return embedding
76
+
77
+ def forward(self, t):
78
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
79
+ t_emb = self.mlp(t_freq)
80
+ return t_emb
81
+
82
+
83
+ class SparseStructureFlowModel(nn.Module):
84
+ def __init__(
85
+ self,
86
+ resolution: int,
87
+ in_channels: int,
88
+ model_channels: int,
89
+ cond_channels: int,
90
+ out_channels: int,
91
+ num_blocks: int,
92
+ num_heads: Optional[int] = None,
93
+ num_head_channels: Optional[int] = 64,
94
+ mlp_ratio: float = 4,
95
+ patch_size: int = 2,
96
+ pe_mode: Literal["ape", "rope"] = "ape",
97
+ use_fp16: bool = False,
98
+ use_checkpoint: bool = False,
99
+ share_mod: bool = False,
100
+ qk_rms_norm: bool = False,
101
+ qk_rms_norm_cross: bool = False,
102
+ ):
103
+ super().__init__()
104
+ self.resolution = resolution
105
+ self.in_channels = in_channels
106
+ self.model_channels = model_channels
107
+ self.cond_channels = cond_channels
108
+ self.out_channels = out_channels
109
+ self.num_blocks = num_blocks
110
+ self.num_heads = num_heads or model_channels // num_head_channels
111
+ self.mlp_ratio = mlp_ratio
112
+ self.patch_size = patch_size
113
+ self.pe_mode = pe_mode
114
+ self.use_fp16 = use_fp16
115
+ self.use_checkpoint = use_checkpoint
116
+ self.share_mod = share_mod
117
+ self.qk_rms_norm = qk_rms_norm
118
+ self.qk_rms_norm_cross = qk_rms_norm_cross
119
+ self.dtype = torch.float16 if use_fp16 else torch.float32
120
+
121
+ self.t_embedder = TimestepEmbedder(model_channels)
122
+ if share_mod:
123
+ self.adaLN_modulation = nn.Sequential(
124
+ nn.SiLU(),
125
+ nn.Linear(model_channels, 6 * model_channels, bias=True)
126
+ )
127
+
128
+ if pe_mode == "ape":
129
+ pos_embedder = AbsolutePositionEmbedder(model_channels, 3)
130
+ coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution // patch_size] * 3], indexing='ij')
131
+ coords = torch.stack(coords, dim=-1).reshape(-1, 3)
132
+ pos_emb = pos_embedder(coords)
133
+ self.register_buffer("pos_emb", pos_emb)
134
+
135
+ self.input_layer = nn.Linear(in_channels * patch_size**3, model_channels)
136
+
137
+ self.blocks = nn.ModuleList([
138
+ ModulatedTransformerCrossBlock(
139
+ model_channels,
140
+ cond_channels,
141
+ num_heads=self.num_heads,
142
+ mlp_ratio=self.mlp_ratio,
143
+ attn_mode='full',
144
+ use_checkpoint=self.use_checkpoint,
145
+ use_rope=(pe_mode == "rope"),
146
+ share_mod=share_mod,
147
+ qk_rms_norm=self.qk_rms_norm,
148
+ qk_rms_norm_cross=self.qk_rms_norm_cross,
149
+ )
150
+ for _ in range(num_blocks)
151
+ ])
152
+
153
+ self.out_layer = nn.Linear(model_channels, out_channels * patch_size**3)
154
+
155
+ self.initialize_weights()
156
+ if use_fp16:
157
+ self.convert_to_fp16()
158
+
159
+ @property
160
+ def device(self) -> torch.device:
161
+ """
162
+ Return the device of the model.
163
+ """
164
+ return next(self.parameters()).device
165
+
166
+ def convert_to_fp16(self) -> None:
167
+ """
168
+ Convert the torso of the model to float16.
169
+ """
170
+ self.blocks.apply(convert_module_to_f16)
171
+
172
+ def convert_to_fp32(self) -> None:
173
+ """
174
+ Convert the torso of the model to float32.
175
+ """
176
+ self.blocks.apply(convert_module_to_f32)
177
+
178
+ def initialize_weights(self) -> None:
179
+ # Initialize transformer layers:
180
+ def _basic_init(module):
181
+ if isinstance(module, nn.Linear):
182
+ torch.nn.init.xavier_uniform_(module.weight)
183
+ if module.bias is not None:
184
+ nn.init.constant_(module.bias, 0)
185
+ self.apply(_basic_init)
186
+
187
+ # Initialize timestep embedding MLP:
188
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
189
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
190
+
191
+ # Zero-out adaLN modulation layers in DiT blocks:
192
+ if self.share_mod:
193
+ nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
194
+ nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
195
+ else:
196
+ for block in self.blocks:
197
+ nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
198
+ nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
199
+
200
+ # Zero-out output layers:
201
+ nn.init.constant_(self.out_layer.weight, 0)
202
+ nn.init.constant_(self.out_layer.bias, 0)
203
+
204
+ def forward(self, x: torch.Tensor, t: torch.Tensor, cond: torch.Tensor) -> torch.Tensor:
205
+ assert [*x.shape] == [x.shape[0], self.in_channels, *[self.resolution] * 3], \
206
+ f"Input shape mismatch, got {x.shape}, expected {[x.shape[0], self.in_channels, *[self.resolution] * 3]}"
207
+
208
+ h = patchify(x, self.patch_size)
209
+ h = h.view(*h.shape[:2], -1).permute(0, 2, 1).contiguous()
210
+
211
+ h = self.input_layer(h)
212
+ h = h + self.pos_emb[None]
213
+ t_emb = self.t_embedder(t)
214
+ if self.share_mod:
215
+ t_emb = self.adaLN_modulation(t_emb)
216
+ t_emb = t_emb.type(self.dtype)
217
+ h = h.type(self.dtype)
218
+ cond = cond.type(self.dtype)
219
+ for block in self.blocks:
220
+ h = block(h, t_emb, cond)
221
+ h = h.type(x.dtype)
222
+ h = F.layer_norm(h, h.shape[-1:])
223
+ h = self.out_layer(h)
224
+
225
+ h = h.permute(0, 2, 1).view(h.shape[0], h.shape[2], *[self.resolution // self.patch_size] * 3)
226
+ h = unpatchify(h, self.patch_size).contiguous()
227
+
228
+ return h
Stable3DGen/hi3dgen/models/sparse_structure_vae.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # Copyright (c) [2025] [Chongjie Ye]
25
+ # SPDX-License-Identifier: MIT
26
+ # This file has been modified by Chongjie Ye on 2025/04/10
27
+ # Original file was released under MIT, with the full license text # available at https://github.com/atong01/conditional-flow-matching/blob/1.0.7/LICENSE.
28
+ # This modified file is released under the same license.
29
+ from typing import *
30
+ import torch
31
+ import torch.nn as nn
32
+ import torch.nn.functional as F
33
+ from ..modules.norm import GroupNorm32, ChannelLayerNorm32
34
+ from ..modules.spatial import pixel_shuffle_3d
35
+ from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
36
+
37
+
38
+ def norm_layer(norm_type: str, *args, **kwargs) -> nn.Module:
39
+ """
40
+ Return a normalization layer.
41
+ """
42
+ if norm_type == "group":
43
+ return GroupNorm32(32, *args, **kwargs)
44
+ elif norm_type == "layer":
45
+ return ChannelLayerNorm32(*args, **kwargs)
46
+ else:
47
+ raise ValueError(f"Invalid norm type {norm_type}")
48
+
49
+
50
+ class ResBlock3d(nn.Module):
51
+ def __init__(
52
+ self,
53
+ channels: int,
54
+ out_channels: Optional[int] = None,
55
+ norm_type: Literal["group", "layer"] = "layer",
56
+ ):
57
+ super().__init__()
58
+ self.channels = channels
59
+ self.out_channels = out_channels or channels
60
+
61
+ self.norm1 = norm_layer(norm_type, channels)
62
+ self.norm2 = norm_layer(norm_type, self.out_channels)
63
+ self.conv1 = nn.Conv3d(channels, self.out_channels, 3, padding=1)
64
+ self.conv2 = zero_module(nn.Conv3d(self.out_channels, self.out_channels, 3, padding=1))
65
+ self.skip_connection = nn.Conv3d(channels, self.out_channels, 1) if channels != self.out_channels else nn.Identity()
66
+
67
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
68
+ h = self.norm1(x)
69
+ h = F.silu(h)
70
+ h = self.conv1(h)
71
+ h = self.norm2(h)
72
+ h = F.silu(h)
73
+ h = self.conv2(h)
74
+ h = h + self.skip_connection(x)
75
+ return h
76
+
77
+
78
+ class DownsampleBlock3d(nn.Module):
79
+ def __init__(
80
+ self,
81
+ in_channels: int,
82
+ out_channels: int,
83
+ mode: Literal["conv", "avgpool"] = "conv",
84
+ ):
85
+ assert mode in ["conv", "avgpool"], f"Invalid mode {mode}"
86
+
87
+ super().__init__()
88
+ self.in_channels = in_channels
89
+ self.out_channels = out_channels
90
+
91
+ if mode == "conv":
92
+ self.conv = nn.Conv3d(in_channels, out_channels, 2, stride=2)
93
+ elif mode == "avgpool":
94
+ assert in_channels == out_channels, "Pooling mode requires in_channels to be equal to out_channels"
95
+
96
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
97
+ if hasattr(self, "conv"):
98
+ return self.conv(x)
99
+ else:
100
+ return F.avg_pool3d(x, 2)
101
+
102
+
103
+ class UpsampleBlock3d(nn.Module):
104
+ def __init__(
105
+ self,
106
+ in_channels: int,
107
+ out_channels: int,
108
+ mode: Literal["conv", "nearest"] = "conv",
109
+ ):
110
+ assert mode in ["conv", "nearest"], f"Invalid mode {mode}"
111
+
112
+ super().__init__()
113
+ self.in_channels = in_channels
114
+ self.out_channels = out_channels
115
+
116
+ if mode == "conv":
117
+ self.conv = nn.Conv3d(in_channels, out_channels*8, 3, padding=1)
118
+ elif mode == "nearest":
119
+ assert in_channels == out_channels, "Nearest mode requires in_channels to be equal to out_channels"
120
+
121
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
122
+ if hasattr(self, "conv"):
123
+ x = self.conv(x)
124
+ return pixel_shuffle_3d(x, 2)
125
+ else:
126
+ return F.interpolate(x, scale_factor=2, mode="nearest")
127
+
128
+
129
+ class SparseStructureEncoder(nn.Module):
130
+ """
131
+ Encoder for Sparse Structure (\mathcal{E}_S in the paper Sec. 3.3).
132
+
133
+ Args:
134
+ in_channels (int): Channels of the input.
135
+ latent_channels (int): Channels of the latent representation.
136
+ num_res_blocks (int): Number of residual blocks at each resolution.
137
+ channels (List[int]): Channels of the encoder blocks.
138
+ num_res_blocks_middle (int): Number of residual blocks in the middle.
139
+ norm_type (Literal["group", "layer"]): Type of normalization layer.
140
+ use_fp16 (bool): Whether to use FP16.
141
+ """
142
+ def __init__(
143
+ self,
144
+ in_channels: int,
145
+ latent_channels: int,
146
+ num_res_blocks: int,
147
+ channels: List[int],
148
+ num_res_blocks_middle: int = 2,
149
+ norm_type: Literal["group", "layer"] = "layer",
150
+ use_fp16: bool = False,
151
+ ):
152
+ super().__init__()
153
+ self.in_channels = in_channels
154
+ self.latent_channels = latent_channels
155
+ self.num_res_blocks = num_res_blocks
156
+ self.channels = channels
157
+ self.num_res_blocks_middle = num_res_blocks_middle
158
+ self.norm_type = norm_type
159
+ self.use_fp16 = use_fp16
160
+ self.dtype = torch.float16 if use_fp16 else torch.float32
161
+
162
+ self.input_layer = nn.Conv3d(in_channels, channels[0], 3, padding=1)
163
+
164
+ self.blocks = nn.ModuleList([])
165
+ for i, ch in enumerate(channels):
166
+ self.blocks.extend([
167
+ ResBlock3d(ch, ch)
168
+ for _ in range(num_res_blocks)
169
+ ])
170
+ if i < len(channels) - 1:
171
+ self.blocks.append(
172
+ DownsampleBlock3d(ch, channels[i+1])
173
+ )
174
+
175
+ self.middle_block = nn.Sequential(*[
176
+ ResBlock3d(channels[-1], channels[-1])
177
+ for _ in range(num_res_blocks_middle)
178
+ ])
179
+
180
+ self.out_layer = nn.Sequential(
181
+ norm_layer(norm_type, channels[-1]),
182
+ nn.SiLU(),
183
+ nn.Conv3d(channels[-1], latent_channels*2, 3, padding=1)
184
+ )
185
+
186
+ if use_fp16:
187
+ self.convert_to_fp16()
188
+
189
+ @property
190
+ def device(self) -> torch.device:
191
+ """
192
+ Return the device of the model.
193
+ """
194
+ return next(self.parameters()).device
195
+
196
+ def convert_to_fp16(self) -> None:
197
+ """
198
+ Convert the torso of the model to float16.
199
+ """
200
+ self.use_fp16 = True
201
+ self.dtype = torch.float16
202
+ self.blocks.apply(convert_module_to_f16)
203
+ self.middle_block.apply(convert_module_to_f16)
204
+
205
+ def convert_to_fp32(self) -> None:
206
+ """
207
+ Convert the torso of the model to float32.
208
+ """
209
+ self.use_fp16 = False
210
+ self.dtype = torch.float32
211
+ self.blocks.apply(convert_module_to_f32)
212
+ self.middle_block.apply(convert_module_to_f32)
213
+
214
+ def forward(self, x: torch.Tensor, sample_posterior: bool = False, return_raw: bool = False) -> torch.Tensor:
215
+ h = self.input_layer(x)
216
+ h = h.type(self.dtype)
217
+
218
+ for block in self.blocks:
219
+ h = block(h)
220
+ h = self.middle_block(h)
221
+
222
+ h = h.type(x.dtype)
223
+ h = self.out_layer(h)
224
+
225
+ mean, logvar = h.chunk(2, dim=1)
226
+
227
+ if sample_posterior:
228
+ std = torch.exp(0.5 * logvar)
229
+ z = mean + std * torch.randn_like(std)
230
+ else:
231
+ z = mean
232
+
233
+ if return_raw:
234
+ return z, mean, logvar
235
+ return z
236
+
237
+
238
+ class SparseStructureDecoder(nn.Module):
239
+ """
240
+ Decoder for Sparse Structure (\mathcal{D}_S in the paper Sec. 3.3).
241
+
242
+ Args:
243
+ out_channels (int): Channels of the output.
244
+ latent_channels (int): Channels of the latent representation.
245
+ num_res_blocks (int): Number of residual blocks at each resolution.
246
+ channels (List[int]): Channels of the decoder blocks.
247
+ num_res_blocks_middle (int): Number of residual blocks in the middle.
248
+ norm_type (Literal["group", "layer"]): Type of normalization layer.
249
+ use_fp16 (bool): Whether to use FP16.
250
+ """
251
+ def __init__(
252
+ self,
253
+ out_channels: int,
254
+ latent_channels: int,
255
+ num_res_blocks: int,
256
+ channels: List[int],
257
+ num_res_blocks_middle: int = 2,
258
+ norm_type: Literal["group", "layer"] = "layer",
259
+ use_fp16: bool = False,
260
+ ):
261
+ super().__init__()
262
+ self.out_channels = out_channels
263
+ self.latent_channels = latent_channels
264
+ self.num_res_blocks = num_res_blocks
265
+ self.channels = channels
266
+ self.num_res_blocks_middle = num_res_blocks_middle
267
+ self.norm_type = norm_type
268
+ self.use_fp16 = use_fp16
269
+ self.dtype = torch.float16 if use_fp16 else torch.float32
270
+
271
+ self.input_layer = nn.Conv3d(latent_channels, channels[0], 3, padding=1)
272
+
273
+ self.middle_block = nn.Sequential(*[
274
+ ResBlock3d(channels[0], channels[0])
275
+ for _ in range(num_res_blocks_middle)
276
+ ])
277
+
278
+ self.blocks = nn.ModuleList([])
279
+ for i, ch in enumerate(channels):
280
+ self.blocks.extend([
281
+ ResBlock3d(ch, ch)
282
+ for _ in range(num_res_blocks)
283
+ ])
284
+ if i < len(channels) - 1:
285
+ self.blocks.append(
286
+ UpsampleBlock3d(ch, channels[i+1])
287
+ )
288
+
289
+ self.out_layer = nn.Sequential(
290
+ norm_layer(norm_type, channels[-1]),
291
+ nn.SiLU(),
292
+ nn.Conv3d(channels[-1], out_channels, 3, padding=1)
293
+ )
294
+
295
+ if use_fp16:
296
+ self.convert_to_fp16()
297
+
298
+ @property
299
+ def device(self) -> torch.device:
300
+ """
301
+ Return the device of the model.
302
+ """
303
+ return next(self.parameters()).device
304
+
305
+ def convert_to_fp16(self) -> None:
306
+ """
307
+ Convert the torso of the model to float16.
308
+ """
309
+ self.use_fp16 = True
310
+ self.dtype = torch.float16
311
+ self.blocks.apply(convert_module_to_f16)
312
+ self.middle_block.apply(convert_module_to_f16)
313
+
314
+ def convert_to_fp32(self) -> None:
315
+ """
316
+ Convert the torso of the model to float32.
317
+ """
318
+ self.use_fp16 = False
319
+ self.dtype = torch.float32
320
+ self.blocks.apply(convert_module_to_f32)
321
+ self.middle_block.apply(convert_module_to_f32)
322
+
323
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
324
+ h = self.input_layer(x)
325
+
326
+ h = h.type(self.dtype)
327
+
328
+ h = self.middle_block(h)
329
+ for block in self.blocks:
330
+ h = block(h)
331
+
332
+ h = h.type(x.dtype)
333
+ h = self.out_layer(h)
334
+ return h
Stable3DGen/hi3dgen/models/structured_latent_flow.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # Copyright (c) [2025] [Chongjie Ye]
25
+ # SPDX-License-Identifier: MIT
26
+ # This file has been modified by Chongjie Ye on 2025/04/10
27
+ # Original file was released under MIT, with the full license text # available at https://github.com/atong01/conditional-flow-matching/blob/1.0.7/LICENSE.
28
+ # This modified file is released under the same license.
29
+ from typing import *
30
+ import torch
31
+ import torch.nn as nn
32
+ import torch.nn.functional as F
33
+ import numpy as np
34
+ from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
35
+ from ..modules.transformer import AbsolutePositionEmbedder
36
+ from ..modules.norm import LayerNorm32
37
+ from ..modules import sparse as sp
38
+ from ..modules.sparse.transformer import ModulatedSparseTransformerCrossBlock
39
+ from .sparse_structure_flow import TimestepEmbedder
40
+
41
+
42
+ class SparseResBlock3d(nn.Module):
43
+ def __init__(
44
+ self,
45
+ channels: int,
46
+ emb_channels: int,
47
+ out_channels: Optional[int] = None,
48
+ downsample: bool = False,
49
+ upsample: bool = False,
50
+ ):
51
+ super().__init__()
52
+ self.channels = channels
53
+ self.emb_channels = emb_channels
54
+ self.out_channels = out_channels or channels
55
+ self.downsample = downsample
56
+ self.upsample = upsample
57
+
58
+ assert not (downsample and upsample), "Cannot downsample and upsample at the same time"
59
+
60
+ self.norm1 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
61
+ self.norm2 = LayerNorm32(self.out_channels, elementwise_affine=False, eps=1e-6)
62
+ self.conv1 = sp.SparseConv3d(channels, self.out_channels, 3)
63
+ self.conv2 = zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3))
64
+ self.emb_layers = nn.Sequential(
65
+ nn.SiLU(),
66
+ nn.Linear(emb_channels, 2 * self.out_channels, bias=True),
67
+ )
68
+ self.skip_connection = sp.SparseLinear(channels, self.out_channels) if channels != self.out_channels else nn.Identity()
69
+ self.updown = None
70
+ if self.downsample:
71
+ self.updown = sp.SparseDownsample(2)
72
+ elif self.upsample:
73
+ self.updown = sp.SparseUpsample(2)
74
+
75
+ def _updown(self, x: sp.SparseTensor) -> sp.SparseTensor:
76
+ if self.updown is not None:
77
+ x = self.updown(x)
78
+ return x
79
+
80
+ def forward(self, x: sp.SparseTensor, emb: torch.Tensor) -> sp.SparseTensor:
81
+ emb_out = self.emb_layers(emb).type(x.dtype)
82
+ scale, shift = torch.chunk(emb_out, 2, dim=1)
83
+
84
+ x = self._updown(x)
85
+ h = x.replace(self.norm1(x.feats))
86
+ h = h.replace(F.silu(h.feats))
87
+ h = self.conv1(h)
88
+ h = h.replace(self.norm2(h.feats)) * (1 + scale) + shift
89
+ h = h.replace(F.silu(h.feats))
90
+ h = self.conv2(h)
91
+ h = h + self.skip_connection(x)
92
+
93
+ return h
94
+
95
+
96
+ class SLatFlowModel(nn.Module):
97
+ def __init__(
98
+ self,
99
+ resolution: int,
100
+ in_channels: int,
101
+ model_channels: int,
102
+ cond_channels: int,
103
+ out_channels: int,
104
+ num_blocks: int,
105
+ num_heads: Optional[int] = None,
106
+ num_head_channels: Optional[int] = 64,
107
+ mlp_ratio: float = 4,
108
+ patch_size: int = 2,
109
+ num_io_res_blocks: int = 2,
110
+ io_block_channels: List[int] = None,
111
+ pe_mode: Literal["ape", "rope"] = "ape",
112
+ use_fp16: bool = False,
113
+ use_checkpoint: bool = False,
114
+ use_skip_connection: bool = True,
115
+ share_mod: bool = False,
116
+ qk_rms_norm: bool = False,
117
+ qk_rms_norm_cross: bool = False,
118
+ ):
119
+ super().__init__()
120
+ self.resolution = resolution
121
+ self.in_channels = in_channels
122
+ self.model_channels = model_channels
123
+ self.cond_channels = cond_channels
124
+ self.out_channels = out_channels
125
+ self.num_blocks = num_blocks
126
+ self.num_heads = num_heads or model_channels // num_head_channels
127
+ self.mlp_ratio = mlp_ratio
128
+ self.patch_size = patch_size
129
+ self.num_io_res_blocks = num_io_res_blocks
130
+ self.io_block_channels = io_block_channels
131
+ self.pe_mode = pe_mode
132
+ self.use_fp16 = use_fp16
133
+ self.use_checkpoint = use_checkpoint
134
+ self.use_skip_connection = use_skip_connection
135
+ self.share_mod = share_mod
136
+ self.qk_rms_norm = qk_rms_norm
137
+ self.qk_rms_norm_cross = qk_rms_norm_cross
138
+ self.dtype = torch.float16 if use_fp16 else torch.float32
139
+
140
+ assert int(np.log2(patch_size)) == np.log2(patch_size), "Patch size must be a power of 2"
141
+ assert np.log2(patch_size) == len(io_block_channels), "Number of IO ResBlocks must match the number of stages"
142
+
143
+ self.t_embedder = TimestepEmbedder(model_channels)
144
+ if share_mod:
145
+ self.adaLN_modulation = nn.Sequential(
146
+ nn.SiLU(),
147
+ nn.Linear(model_channels, 6 * model_channels, bias=True)
148
+ )
149
+
150
+ if pe_mode == "ape":
151
+ self.pos_embedder = AbsolutePositionEmbedder(model_channels)
152
+
153
+ self.input_layer = sp.SparseLinear(in_channels, io_block_channels[0])
154
+ self.input_blocks = nn.ModuleList([])
155
+ for chs, next_chs in zip(io_block_channels, io_block_channels[1:] + [model_channels]):
156
+ self.input_blocks.extend([
157
+ SparseResBlock3d(
158
+ chs,
159
+ model_channels,
160
+ out_channels=chs,
161
+ )
162
+ for _ in range(num_io_res_blocks-1)
163
+ ])
164
+ self.input_blocks.append(
165
+ SparseResBlock3d(
166
+ chs,
167
+ model_channels,
168
+ out_channels=next_chs,
169
+ downsample=True,
170
+ )
171
+ )
172
+
173
+ self.blocks = nn.ModuleList([
174
+ ModulatedSparseTransformerCrossBlock(
175
+ model_channels,
176
+ cond_channels,
177
+ num_heads=self.num_heads,
178
+ mlp_ratio=self.mlp_ratio,
179
+ attn_mode='full',
180
+ use_checkpoint=self.use_checkpoint,
181
+ use_rope=(pe_mode == "rope"),
182
+ share_mod=self.share_mod,
183
+ qk_rms_norm=self.qk_rms_norm,
184
+ qk_rms_norm_cross=self.qk_rms_norm_cross,
185
+ )
186
+ for _ in range(num_blocks)
187
+ ])
188
+
189
+ self.out_blocks = nn.ModuleList([])
190
+ for chs, prev_chs in zip(reversed(io_block_channels), [model_channels] + list(reversed(io_block_channels[1:]))):
191
+ self.out_blocks.append(
192
+ SparseResBlock3d(
193
+ prev_chs * 2 if self.use_skip_connection else prev_chs,
194
+ model_channels,
195
+ out_channels=chs,
196
+ upsample=True,
197
+ )
198
+ )
199
+ self.out_blocks.extend([
200
+ SparseResBlock3d(
201
+ chs * 2 if self.use_skip_connection else chs,
202
+ model_channels,
203
+ out_channels=chs,
204
+ )
205
+ for _ in range(num_io_res_blocks-1)
206
+ ])
207
+ self.out_layer = sp.SparseLinear(io_block_channels[0], out_channels)
208
+
209
+ self.initialize_weights()
210
+ if use_fp16:
211
+ self.convert_to_fp16()
212
+
213
+ @property
214
+ def device(self) -> torch.device:
215
+ """
216
+ Return the device of the model.
217
+ """
218
+ return next(self.parameters()).device
219
+
220
+ def convert_to_fp16(self) -> None:
221
+ """
222
+ Convert the torso of the model to float16.
223
+ """
224
+ self.input_blocks.apply(convert_module_to_f16)
225
+ self.blocks.apply(convert_module_to_f16)
226
+ self.out_blocks.apply(convert_module_to_f16)
227
+
228
+ def convert_to_fp32(self) -> None:
229
+ """
230
+ Convert the torso of the model to float32.
231
+ """
232
+ self.input_blocks.apply(convert_module_to_f32)
233
+ self.blocks.apply(convert_module_to_f32)
234
+ self.out_blocks.apply(convert_module_to_f32)
235
+
236
+ def initialize_weights(self) -> None:
237
+ # Initialize transformer layers:
238
+ def _basic_init(module):
239
+ if isinstance(module, nn.Linear):
240
+ torch.nn.init.xavier_uniform_(module.weight)
241
+ if module.bias is not None:
242
+ nn.init.constant_(module.bias, 0)
243
+ self.apply(_basic_init)
244
+
245
+ # Initialize timestep embedding MLP:
246
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
247
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
248
+
249
+ # Zero-out adaLN modulation layers in DiT blocks:
250
+ if self.share_mod:
251
+ nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
252
+ nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
253
+ else:
254
+ for block in self.blocks:
255
+ nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
256
+ nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
257
+
258
+ # Zero-out output layers:
259
+ nn.init.constant_(self.out_layer.weight, 0)
260
+ nn.init.constant_(self.out_layer.bias, 0)
261
+
262
+ def forward(self, x: sp.SparseTensor, t: torch.Tensor, cond: torch.Tensor) -> sp.SparseTensor:
263
+ h = self.input_layer(x).type(self.dtype)
264
+ t_emb = self.t_embedder(t)
265
+ if self.share_mod:
266
+ t_emb = self.adaLN_modulation(t_emb)
267
+ t_emb = t_emb.type(self.dtype)
268
+ cond = cond.type(self.dtype)
269
+
270
+ skips = []
271
+ # pack with input blocks
272
+ for block in self.input_blocks:
273
+ h = block(h, t_emb)
274
+ skips.append(h.feats)
275
+
276
+ if self.pe_mode == "ape":
277
+ h = h + self.pos_embedder(h.coords[:, 1:]).type(self.dtype)
278
+ for block in self.blocks:
279
+ h = block(h, t_emb, cond)
280
+
281
+ # unpack with output blocks
282
+ for block, skip in zip(self.out_blocks, reversed(skips)):
283
+ if self.use_skip_connection:
284
+ h = block(h.replace(torch.cat([h.feats, skip], dim=1)), t_emb)
285
+ else:
286
+ h = block(h, t_emb)
287
+
288
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
289
+ h = self.out_layer(h.type(x.dtype))
290
+ return h
Stable3DGen/hi3dgen/models/structured_latent_vae/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # Copyright (c) [2025] [Chongjie Ye]
25
+ # SPDX-License-Identifier: MIT
26
+ # This file has been modified by Chongjie Ye on 2025/04/10
27
+ # Original file was released under MIT, with the full license text # available at https://github.com/atong01/conditional-flow-matching/blob/1.0.7/LICENSE.
28
+ # This modified file is released under the same license.
29
+ from .encoder import SLatEncoder
30
+ from .decoder_mesh import SLatMeshDecoder
Stable3DGen/hi3dgen/models/structured_latent_vae/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (293 Bytes). View file
 
Stable3DGen/hi3dgen/models/structured_latent_vae/__pycache__/base.cpython-310.pyc ADDED
Binary file (4.37 kB). View file
 
Stable3DGen/hi3dgen/models/structured_latent_vae/__pycache__/decoder_mesh.cpython-310.pyc ADDED
Binary file (5.33 kB). View file
 
Stable3DGen/hi3dgen/models/structured_latent_vae/__pycache__/encoder.cpython-310.pyc ADDED
Binary file (2.36 kB). View file
 
Stable3DGen/hi3dgen/models/structured_latent_vae/base.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # Copyright (c) [2025] [Chongjie Ye]
25
+ # SPDX-License-Identifier: MIT
26
+ # This file has been modified by Chongjie Ye on 2025/04/10
27
+ # Original file was released under MIT, with the full license text # available at https://github.com/atong01/conditional-flow-matching/blob/1.0.7/LICENSE.
28
+ # This modified file is released under the same license.
29
+ from typing import *
30
+ import torch
31
+ import torch.nn as nn
32
+ from ...modules.utils import convert_module_to_f16, convert_module_to_f32
33
+ from ...modules import sparse as sp
34
+ from ...modules.transformer import AbsolutePositionEmbedder
35
+ from ...modules.sparse.transformer import SparseTransformerBlock
36
+
37
+
38
+ def block_attn_config(self):
39
+ """
40
+ Return the attention configuration of the model.
41
+ """
42
+ for i in range(self.num_blocks):
43
+ if self.attn_mode == "shift_window":
44
+ yield "serialized", self.window_size, 0, (16 * (i % 2),) * 3, sp.SerializeMode.Z_ORDER
45
+ elif self.attn_mode == "shift_sequence":
46
+ yield "serialized", self.window_size, self.window_size // 2 * (i % 2), (0, 0, 0), sp.SerializeMode.Z_ORDER
47
+ elif self.attn_mode == "shift_order":
48
+ yield "serialized", self.window_size, 0, (0, 0, 0), sp.SerializeModes[i % 4]
49
+ elif self.attn_mode == "full":
50
+ yield "full", None, None, None, None
51
+ elif self.attn_mode == "swin":
52
+ yield "windowed", self.window_size, None, self.window_size // 2 * (i % 2), None
53
+
54
+
55
+ class SparseTransformerBase(nn.Module):
56
+ """
57
+ Sparse Transformer without output layers.
58
+ Serve as the base class for encoder and decoder.
59
+ """
60
+ def __init__(
61
+ self,
62
+ in_channels: int,
63
+ model_channels: int,
64
+ num_blocks: int,
65
+ num_heads: Optional[int] = None,
66
+ num_head_channels: Optional[int] = 64,
67
+ mlp_ratio: float = 4.0,
68
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
69
+ window_size: Optional[int] = None,
70
+ pe_mode: Literal["ape", "rope"] = "ape",
71
+ use_fp16: bool = False,
72
+ use_checkpoint: bool = False,
73
+ qk_rms_norm: bool = False,
74
+ ):
75
+ super().__init__()
76
+ self.in_channels = in_channels
77
+ self.model_channels = model_channels
78
+ self.num_blocks = num_blocks
79
+ self.window_size = window_size
80
+ self.num_heads = num_heads or model_channels // num_head_channels
81
+ self.mlp_ratio = mlp_ratio
82
+ self.attn_mode = attn_mode
83
+ self.pe_mode = pe_mode
84
+ self.use_fp16 = use_fp16
85
+ self.use_checkpoint = use_checkpoint
86
+ self.qk_rms_norm = qk_rms_norm
87
+ self.dtype = torch.float16 if use_fp16 else torch.float32
88
+
89
+ if pe_mode == "ape":
90
+ self.pos_embedder = AbsolutePositionEmbedder(model_channels)
91
+
92
+ self.input_layer = sp.SparseLinear(in_channels, model_channels)
93
+ self.blocks = nn.ModuleList([
94
+ SparseTransformerBlock(
95
+ model_channels,
96
+ num_heads=self.num_heads,
97
+ mlp_ratio=self.mlp_ratio,
98
+ attn_mode=attn_mode,
99
+ window_size=window_size,
100
+ shift_sequence=shift_sequence,
101
+ shift_window=shift_window,
102
+ serialize_mode=serialize_mode,
103
+ use_checkpoint=self.use_checkpoint,
104
+ use_rope=(pe_mode == "rope"),
105
+ qk_rms_norm=self.qk_rms_norm,
106
+ )
107
+ for attn_mode, window_size, shift_sequence, shift_window, serialize_mode in block_attn_config(self)
108
+ ])
109
+
110
+ @property
111
+ def device(self) -> torch.device:
112
+ """
113
+ Return the device of the model.
114
+ """
115
+ return next(self.parameters()).device
116
+
117
+ def convert_to_fp16(self) -> None:
118
+ """
119
+ Convert the torso of the model to float16.
120
+ """
121
+ self.blocks.apply(convert_module_to_f16)
122
+
123
+ def convert_to_fp32(self) -> None:
124
+ """
125
+ Convert the torso of the model to float32.
126
+ """
127
+ self.blocks.apply(convert_module_to_f32)
128
+
129
+ def initialize_weights(self) -> None:
130
+ # Initialize transformer layers:
131
+ def _basic_init(module):
132
+ if isinstance(module, nn.Linear):
133
+ torch.nn.init.xavier_uniform_(module.weight)
134
+ if module.bias is not None:
135
+ nn.init.constant_(module.bias, 0)
136
+ self.apply(_basic_init)
137
+
138
+ def forward(self, x: sp.SparseTensor) -> sp.SparseTensor:
139
+ h = self.input_layer(x)
140
+ if self.pe_mode == "ape":
141
+ h = h + self.pos_embedder(x.coords[:, 1:])
142
+ h = h.type(self.dtype)
143
+ for block in self.blocks:
144
+ h = block(h)
145
+ return h
Stable3DGen/hi3dgen/models/structured_latent_vae/decoder_mesh.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # Copyright (c) [2025] [Chongjie Ye]
25
+ # SPDX-License-Identifier: MIT
26
+ # This file has been modified by Chongjie Ye on 2025/04/10
27
+ # Original file was released under MIT, with the full license text # available at https://github.com/atong01/conditional-flow-matching/blob/1.0.7/LICENSE.
28
+ # This modified file is released under the same license.
29
+ from typing import *
30
+ import torch
31
+ import torch.nn as nn
32
+ import torch.nn.functional as F
33
+ import numpy as np
34
+ from ...modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
35
+ from ...modules import sparse as sp
36
+ from .base import SparseTransformerBase
37
+ from ...representations import MeshExtractResult
38
+ from ...representations.mesh import SparseFeatures2Mesh
39
+
40
+
41
+ class SparseSubdivideBlock3d(nn.Module):
42
+ """
43
+ A 3D subdivide block that can subdivide the sparse tensor.
44
+
45
+ Args:
46
+ channels: channels in the inputs and outputs.
47
+ out_channels: if specified, the number of output channels.
48
+ num_groups: the number of groups for the group norm.
49
+ """
50
+ def __init__(
51
+ self,
52
+ channels: int,
53
+ resolution: int,
54
+ out_channels: Optional[int] = None,
55
+ num_groups: int = 32
56
+ ):
57
+ super().__init__()
58
+ self.channels = channels
59
+ self.resolution = resolution
60
+ self.out_resolution = resolution * 2
61
+ self.out_channels = out_channels or channels
62
+
63
+ self.act_layers = nn.Sequential(
64
+ sp.SparseGroupNorm32(num_groups, channels),
65
+ sp.SparseSiLU()
66
+ )
67
+
68
+ self.sub = sp.SparseSubdivide()
69
+
70
+ self.out_layers = nn.Sequential(
71
+ sp.SparseConv3d(channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}"),
72
+ sp.SparseGroupNorm32(num_groups, self.out_channels),
73
+ sp.SparseSiLU(),
74
+ zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}")),
75
+ )
76
+
77
+ if self.out_channels == channels:
78
+ self.skip_connection = nn.Identity()
79
+ else:
80
+ self.skip_connection = sp.SparseConv3d(channels, self.out_channels, 1, indice_key=f"res_{self.out_resolution}")
81
+
82
+ def forward(self, x: sp.SparseTensor) -> sp.SparseTensor:
83
+ """
84
+ Apply the block to a Tensor, conditioned on a timestep embedding.
85
+
86
+ Args:
87
+ x: an [N x C x ...] Tensor of features.
88
+ Returns:
89
+ an [N x C x ...] Tensor of outputs.
90
+ """
91
+ h = self.act_layers(x)
92
+ h = self.sub(h)
93
+ x = self.sub(x)
94
+ h = self.out_layers(h)
95
+ h = h + self.skip_connection(x)
96
+ return h
97
+
98
+
99
+ class SLatMeshDecoder(SparseTransformerBase):
100
+ def __init__(
101
+ self,
102
+ resolution: int,
103
+ model_channels: int,
104
+ latent_channels: int,
105
+ num_blocks: int,
106
+ num_heads: Optional[int] = None,
107
+ num_head_channels: Optional[int] = 64,
108
+ mlp_ratio: float = 4,
109
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
110
+ window_size: int = 8,
111
+ pe_mode: Literal["ape", "rope"] = "ape",
112
+ use_fp16: bool = False,
113
+ use_checkpoint: bool = False,
114
+ qk_rms_norm: bool = False,
115
+ representation_config: dict = None,
116
+ ):
117
+ super().__init__(
118
+ in_channels=latent_channels,
119
+ model_channels=model_channels,
120
+ num_blocks=num_blocks,
121
+ num_heads=num_heads,
122
+ num_head_channels=num_head_channels,
123
+ mlp_ratio=mlp_ratio,
124
+ attn_mode=attn_mode,
125
+ window_size=window_size,
126
+ pe_mode=pe_mode,
127
+ use_fp16=use_fp16,
128
+ use_checkpoint=use_checkpoint,
129
+ qk_rms_norm=qk_rms_norm,
130
+ )
131
+ self.resolution = resolution
132
+ self.rep_config = representation_config
133
+ self.mesh_extractor = SparseFeatures2Mesh(res=self.resolution*4, use_color=self.rep_config.get('use_color', False))
134
+ self.out_channels = self.mesh_extractor.feats_channels
135
+ self.upsample = nn.ModuleList([
136
+ SparseSubdivideBlock3d(
137
+ channels=model_channels,
138
+ resolution=resolution,
139
+ out_channels=model_channels // 4
140
+ ),
141
+ SparseSubdivideBlock3d(
142
+ channels=model_channels // 4,
143
+ resolution=resolution * 2,
144
+ out_channels=model_channels // 8
145
+ )
146
+ ])
147
+ self.out_layer = sp.SparseLinear(model_channels // 8, self.out_channels)
148
+
149
+ self.initialize_weights()
150
+ if use_fp16:
151
+ self.convert_to_fp16()
152
+
153
+ def initialize_weights(self) -> None:
154
+ super().initialize_weights()
155
+ # Zero-out output layers:
156
+ nn.init.constant_(self.out_layer.weight, 0)
157
+ nn.init.constant_(self.out_layer.bias, 0)
158
+
159
+ def convert_to_fp16(self) -> None:
160
+ """
161
+ Convert the torso of the model to float16.
162
+ """
163
+ super().convert_to_fp16()
164
+ self.upsample.apply(convert_module_to_f16)
165
+
166
+ def convert_to_fp32(self) -> None:
167
+ """
168
+ Convert the torso of the model to float32.
169
+ """
170
+ super().convert_to_fp32()
171
+ self.upsample.apply(convert_module_to_f32)
172
+
173
+ def to_representation(self, x: sp.SparseTensor) -> List[MeshExtractResult]:
174
+ """
175
+ Convert a batch of network outputs to 3D representations.
176
+
177
+ Args:
178
+ x: The [N x * x C] sparse tensor output by the network.
179
+
180
+ Returns:
181
+ list of representations
182
+ """
183
+ ret = []
184
+ for i in range(x.shape[0]):
185
+ mesh = self.mesh_extractor(x[i], training=self.training)
186
+ ret.append(mesh)
187
+ return ret
188
+
189
+ def forward(self, x: sp.SparseTensor) -> List[MeshExtractResult]:
190
+ h = super().forward(x)
191
+ for block in self.upsample:
192
+ h = block(h)
193
+ h = h.type(x.dtype)
194
+ h = self.out_layer(h)
195
+ return self.to_representation(h)
Stable3DGen/hi3dgen/models/structured_latent_vae/encoder.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # Copyright (c) [2025] [Chongjie Ye]
25
+ # SPDX-License-Identifier: MIT
26
+ # This file has been modified by Chongjie Ye on 2025/04/10
27
+ # Original file was released under MIT, with the full license text # available at https://github.com/atong01/conditional-flow-matching/blob/1.0.7/LICENSE.
28
+ # This modified file is released under the same license.
29
+ from typing import *
30
+ import torch
31
+ import torch.nn as nn
32
+ import torch.nn.functional as F
33
+ from ...modules import sparse as sp
34
+ from .base import SparseTransformerBase
35
+
36
+
37
+ class SLatEncoder(SparseTransformerBase):
38
+ def __init__(
39
+ self,
40
+ resolution: int,
41
+ in_channels: int,
42
+ model_channels: int,
43
+ latent_channels: int,
44
+ num_blocks: int,
45
+ num_heads: Optional[int] = None,
46
+ num_head_channels: Optional[int] = 64,
47
+ mlp_ratio: float = 4,
48
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
49
+ window_size: int = 8,
50
+ pe_mode: Literal["ape", "rope"] = "ape",
51
+ use_fp16: bool = False,
52
+ use_checkpoint: bool = False,
53
+ qk_rms_norm: bool = False,
54
+ ):
55
+ super().__init__(
56
+ in_channels=in_channels,
57
+ model_channels=model_channels,
58
+ num_blocks=num_blocks,
59
+ num_heads=num_heads,
60
+ num_head_channels=num_head_channels,
61
+ mlp_ratio=mlp_ratio,
62
+ attn_mode=attn_mode,
63
+ window_size=window_size,
64
+ pe_mode=pe_mode,
65
+ use_fp16=use_fp16,
66
+ use_checkpoint=use_checkpoint,
67
+ qk_rms_norm=qk_rms_norm,
68
+ )
69
+ self.resolution = resolution
70
+ self.out_layer = sp.SparseLinear(model_channels, 2 * latent_channels)
71
+
72
+ self.initialize_weights()
73
+ if use_fp16:
74
+ self.convert_to_fp16()
75
+
76
+ def initialize_weights(self) -> None:
77
+ super().initialize_weights()
78
+ # Zero-out output layers:
79
+ nn.init.constant_(self.out_layer.weight, 0)
80
+ nn.init.constant_(self.out_layer.bias, 0)
81
+
82
+ def forward(self, x: sp.SparseTensor, sample_posterior=True, return_raw=False):
83
+ h = super().forward(x)
84
+ h = h.type(x.dtype)
85
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
86
+ h = self.out_layer(h)
87
+
88
+ # Sample from the posterior distribution
89
+ mean, logvar = h.feats.chunk(2, dim=-1)
90
+ if sample_posterior:
91
+ std = torch.exp(0.5 * logvar)
92
+ z = mean + std * torch.randn_like(std)
93
+ else:
94
+ z = mean
95
+ z = h.replace(z)
96
+
97
+ if return_raw:
98
+ return z, mean, logvar
99
+ else:
100
+ return z
Stable3DGen/hi3dgen/modules/__pycache__/norm.cpython-310.pyc ADDED
Binary file (1.44 kB). View file
 
Stable3DGen/hi3dgen/modules/__pycache__/spatial.cpython-310.pyc ADDED
Binary file (2.49 kB). View file
 
Stable3DGen/hi3dgen/modules/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.53 kB). View file
 
Stable3DGen/hi3dgen/modules/attention/__init__.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # SPDX-License-Identifier: MIT
25
+ from typing import *
26
+
27
+ BACKEND = 'xformers'
28
+ DEBUG = False
29
+
30
+ def __from_env():
31
+ import os
32
+
33
+ global BACKEND
34
+ global DEBUG
35
+
36
+ env_attn_backend = os.environ.get('ATTN_BACKEND')
37
+ env_sttn_debug = os.environ.get('ATTN_DEBUG')
38
+
39
+ if env_attn_backend is not None and env_attn_backend in ['xformers', 'flash_attn', 'sdpa', 'naive']:
40
+ BACKEND = env_attn_backend
41
+ if env_sttn_debug is not None:
42
+ DEBUG = env_sttn_debug == '1'
43
+
44
+ print(f"[ATTENTION] Using backend: {BACKEND}")
45
+
46
+
47
+ __from_env()
48
+
49
+
50
+ def set_backend(backend: Literal['xformers', 'flash_attn']):
51
+ global BACKEND
52
+ BACKEND = backend
53
+
54
+ def set_debug(debug: bool):
55
+ global DEBUG
56
+ DEBUG = debug
57
+
58
+
59
+ from .full_attn import *
60
+ from .modules import *
Stable3DGen/hi3dgen/modules/attention/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (943 Bytes). View file
 
Stable3DGen/hi3dgen/modules/attention/__pycache__/full_attn.cpython-310.pyc ADDED
Binary file (4.16 kB). View file
 
Stable3DGen/hi3dgen/modules/attention/__pycache__/modules.cpython-310.pyc ADDED
Binary file (5.54 kB). View file
 
Stable3DGen/hi3dgen/modules/attention/full_attn.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # SPDX-License-Identifier: MIT
25
+ from typing import *
26
+ import torch
27
+ import math
28
+ from . import DEBUG, BACKEND
29
+
30
+ if BACKEND == 'xformers':
31
+ import xformers.ops as xops
32
+ elif BACKEND == 'flash_attn':
33
+ import flash_attn
34
+ elif BACKEND == 'sdpa':
35
+ from torch.nn.functional import scaled_dot_product_attention as sdpa
36
+ elif BACKEND == 'naive':
37
+ pass
38
+ else:
39
+ raise ValueError(f"Unknown attention backend: {BACKEND}")
40
+
41
+
42
+ __all__ = [
43
+ 'scaled_dot_product_attention',
44
+ ]
45
+
46
+
47
+ def _naive_sdpa(q, k, v):
48
+ """
49
+ Naive implementation of scaled dot product attention.
50
+ """
51
+ q = q.permute(0, 2, 1, 3) # [N, H, L, C]
52
+ k = k.permute(0, 2, 1, 3) # [N, H, L, C]
53
+ v = v.permute(0, 2, 1, 3) # [N, H, L, C]
54
+ scale_factor = 1 / math.sqrt(q.size(-1))
55
+ attn_weight = q @ k.transpose(-2, -1) * scale_factor
56
+ attn_weight = torch.softmax(attn_weight, dim=-1)
57
+ out = attn_weight @ v
58
+ out = out.permute(0, 2, 1, 3) # [N, L, H, C]
59
+ return out
60
+
61
+
62
+ @overload
63
+ def scaled_dot_product_attention(qkv: torch.Tensor) -> torch.Tensor:
64
+ """
65
+ Apply scaled dot product attention.
66
+
67
+ Args:
68
+ qkv (torch.Tensor): A [N, L, 3, H, C] tensor containing Qs, Ks, and Vs.
69
+ """
70
+ ...
71
+
72
+ @overload
73
+ def scaled_dot_product_attention(q: torch.Tensor, kv: torch.Tensor) -> torch.Tensor:
74
+ """
75
+ Apply scaled dot product attention.
76
+
77
+ Args:
78
+ q (torch.Tensor): A [N, L, H, C] tensor containing Qs.
79
+ kv (torch.Tensor): A [N, L, 2, H, C] tensor containing Ks and Vs.
80
+ """
81
+ ...
82
+
83
+ @overload
84
+ def scaled_dot_product_attention(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
85
+ """
86
+ Apply scaled dot product attention.
87
+
88
+ Args:
89
+ q (torch.Tensor): A [N, L, H, Ci] tensor containing Qs.
90
+ k (torch.Tensor): A [N, L, H, Ci] tensor containing Ks.
91
+ v (torch.Tensor): A [N, L, H, Co] tensor containing Vs.
92
+
93
+ Note:
94
+ k and v are assumed to have the same coordinate map.
95
+ """
96
+ ...
97
+
98
+ def scaled_dot_product_attention(*args, **kwargs):
99
+ arg_names_dict = {
100
+ 1: ['qkv'],
101
+ 2: ['q', 'kv'],
102
+ 3: ['q', 'k', 'v']
103
+ }
104
+ num_all_args = len(args) + len(kwargs)
105
+ assert num_all_args in arg_names_dict, f"Invalid number of arguments, got {num_all_args}, expected 1, 2, or 3"
106
+ for key in arg_names_dict[num_all_args][len(args):]:
107
+ assert key in kwargs, f"Missing argument {key}"
108
+
109
+ if num_all_args == 1:
110
+ qkv = args[0] if len(args) > 0 else kwargs['qkv']
111
+ assert len(qkv.shape) == 5 and qkv.shape[2] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, L, 3, H, C]"
112
+ device = qkv.device
113
+
114
+ elif num_all_args == 2:
115
+ q = args[0] if len(args) > 0 else kwargs['q']
116
+ kv = args[1] if len(args) > 1 else kwargs['kv']
117
+ assert q.shape[0] == kv.shape[0], f"Batch size mismatch, got {q.shape[0]} and {kv.shape[0]}"
118
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, C]"
119
+ assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]"
120
+ device = q.device
121
+
122
+ elif num_all_args == 3:
123
+ q = args[0] if len(args) > 0 else kwargs['q']
124
+ k = args[1] if len(args) > 1 else kwargs['k']
125
+ v = args[2] if len(args) > 2 else kwargs['v']
126
+ assert q.shape[0] == k.shape[0] == v.shape[0], f"Batch size mismatch, got {q.shape[0]}, {k.shape[0]}, and {v.shape[0]}"
127
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, Ci]"
128
+ assert len(k.shape) == 4, f"Invalid shape for k, got {k.shape}, expected [N, L, H, Ci]"
129
+ assert len(v.shape) == 4, f"Invalid shape for v, got {v.shape}, expected [N, L, H, Co]"
130
+ device = q.device
131
+
132
+ if BACKEND == 'xformers':
133
+ if num_all_args == 1:
134
+ q, k, v = qkv.unbind(dim=2)
135
+ elif num_all_args == 2:
136
+ k, v = kv.unbind(dim=2)
137
+ out = xops.memory_efficient_attention(q, k, v)
138
+ elif BACKEND == 'flash_attn':
139
+ if num_all_args == 1:
140
+ out = flash_attn.flash_attn_qkvpacked_func(qkv)
141
+ elif num_all_args == 2:
142
+ out = flash_attn.flash_attn_kvpacked_func(q, kv)
143
+ elif num_all_args == 3:
144
+ out = flash_attn.flash_attn_func(q, k, v)
145
+ elif BACKEND == 'sdpa':
146
+ if num_all_args == 1:
147
+ q, k, v = qkv.unbind(dim=2)
148
+ elif num_all_args == 2:
149
+ k, v = kv.unbind(dim=2)
150
+ q = q.permute(0, 2, 1, 3) # [N, H, L, C]
151
+ k = k.permute(0, 2, 1, 3) # [N, H, L, C]
152
+ v = v.permute(0, 2, 1, 3) # [N, H, L, C]
153
+ out = sdpa(q, k, v) # [N, H, L, C]
154
+ out = out.permute(0, 2, 1, 3) # [N, L, H, C]
155
+ elif BACKEND == 'naive':
156
+ if num_all_args == 1:
157
+ q, k, v = qkv.unbind(dim=2)
158
+ elif num_all_args == 2:
159
+ k, v = kv.unbind(dim=2)
160
+ out = _naive_sdpa(q, k, v)
161
+ else:
162
+ raise ValueError(f"Unknown attention module: {BACKEND}")
163
+
164
+ return out
Stable3DGen/hi3dgen/modules/attention/modules.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # SPDX-License-Identifier: MIT
25
+ from typing import *
26
+ import torch
27
+ import torch.nn as nn
28
+ import torch.nn.functional as F
29
+ from .full_attn import scaled_dot_product_attention
30
+
31
+
32
+ class MultiHeadRMSNorm(nn.Module):
33
+ def __init__(self, dim: int, heads: int):
34
+ super().__init__()
35
+ self.scale = dim ** 0.5
36
+ self.gamma = nn.Parameter(torch.ones(heads, dim))
37
+
38
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
39
+ return (F.normalize(x.float(), dim = -1) * self.gamma * self.scale).to(x.dtype)
40
+
41
+
42
+ class RotaryPositionEmbedder(nn.Module):
43
+ def __init__(self, hidden_size: int, in_channels: int = 3):
44
+ super().__init__()
45
+ assert hidden_size % 2 == 0, "Hidden size must be divisible by 2"
46
+ self.hidden_size = hidden_size
47
+ self.in_channels = in_channels
48
+ self.freq_dim = hidden_size // in_channels // 2
49
+ self.freqs = torch.arange(self.freq_dim, dtype=torch.float32) / self.freq_dim
50
+ self.freqs = 1.0 / (10000 ** self.freqs)
51
+
52
+ def _get_phases(self, indices: torch.Tensor) -> torch.Tensor:
53
+ self.freqs = self.freqs.to(indices.device)
54
+ phases = torch.outer(indices, self.freqs)
55
+ phases = torch.polar(torch.ones_like(phases), phases)
56
+ return phases
57
+
58
+ def _rotary_embedding(self, x: torch.Tensor, phases: torch.Tensor) -> torch.Tensor:
59
+ x_complex = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2))
60
+ x_rotated = x_complex * phases
61
+ x_embed = torch.view_as_real(x_rotated).reshape(*x_rotated.shape[:-1], -1).to(x.dtype)
62
+ return x_embed
63
+
64
+ def forward(self, q: torch.Tensor, k: torch.Tensor, indices: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
65
+ """
66
+ Args:
67
+ q (sp.SparseTensor): [..., N, D] tensor of queries
68
+ k (sp.SparseTensor): [..., N, D] tensor of keys
69
+ indices (torch.Tensor): [..., N, C] tensor of spatial positions
70
+ """
71
+ if indices is None:
72
+ indices = torch.arange(q.shape[-2], device=q.device)
73
+ if len(q.shape) > 2:
74
+ indices = indices.unsqueeze(0).expand(q.shape[:-2] + (-1,))
75
+
76
+ phases = self._get_phases(indices.reshape(-1)).reshape(*indices.shape[:-1], -1)
77
+ if phases.shape[1] < self.hidden_size // 2:
78
+ phases = torch.cat([phases, torch.polar(
79
+ torch.ones(*phases.shape[:-1], self.hidden_size // 2 - phases.shape[1], device=phases.device),
80
+ torch.zeros(*phases.shape[:-1], self.hidden_size // 2 - phases.shape[1], device=phases.device)
81
+ )], dim=-1)
82
+ q_embed = self._rotary_embedding(q, phases)
83
+ k_embed = self._rotary_embedding(k, phases)
84
+ return q_embed, k_embed
85
+
86
+
87
+ class MultiHeadAttention(nn.Module):
88
+ def __init__(
89
+ self,
90
+ channels: int,
91
+ num_heads: int,
92
+ ctx_channels: Optional[int]=None,
93
+ type: Literal["self", "cross"] = "self",
94
+ attn_mode: Literal["full", "windowed"] = "full",
95
+ window_size: Optional[int] = None,
96
+ shift_window: Optional[Tuple[int, int, int]] = None,
97
+ qkv_bias: bool = True,
98
+ use_rope: bool = False,
99
+ qk_rms_norm: bool = False,
100
+ ):
101
+ super().__init__()
102
+ assert channels % num_heads == 0
103
+ assert type in ["self", "cross"], f"Invalid attention type: {type}"
104
+ assert attn_mode in ["full", "windowed"], f"Invalid attention mode: {attn_mode}"
105
+ assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention"
106
+
107
+ if attn_mode == "windowed":
108
+ raise NotImplementedError("Windowed attention is not yet implemented")
109
+
110
+ self.channels = channels
111
+ self.head_dim = channels // num_heads
112
+ self.ctx_channels = ctx_channels if ctx_channels is not None else channels
113
+ self.num_heads = num_heads
114
+ self._type = type
115
+ self.attn_mode = attn_mode
116
+ self.window_size = window_size
117
+ self.shift_window = shift_window
118
+ self.use_rope = use_rope
119
+ self.qk_rms_norm = qk_rms_norm
120
+
121
+ if self._type == "self":
122
+ self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
123
+ else:
124
+ self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
125
+ self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
126
+
127
+ if self.qk_rms_norm:
128
+ self.q_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads)
129
+ self.k_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads)
130
+
131
+ self.to_out = nn.Linear(channels, channels)
132
+
133
+ if use_rope:
134
+ self.rope = RotaryPositionEmbedder(channels)
135
+
136
+ def forward(self, x: torch.Tensor, context: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None) -> torch.Tensor:
137
+ B, L, C = x.shape
138
+ if self._type == "self":
139
+ qkv = self.to_qkv(x)
140
+ qkv = qkv.reshape(B, L, 3, self.num_heads, -1)
141
+ if self.use_rope:
142
+ q, k, v = qkv.unbind(dim=2)
143
+ q, k = self.rope(q, k, indices)
144
+ qkv = torch.stack([q, k, v], dim=2)
145
+ if self.attn_mode == "full":
146
+ if self.qk_rms_norm:
147
+ q, k, v = qkv.unbind(dim=2)
148
+ q = self.q_rms_norm(q)
149
+ k = self.k_rms_norm(k)
150
+ h = scaled_dot_product_attention(q, k, v)
151
+ else:
152
+ h = scaled_dot_product_attention(qkv)
153
+ elif self.attn_mode == "windowed":
154
+ raise NotImplementedError("Windowed attention is not yet implemented")
155
+ else:
156
+ Lkv = context.shape[1]
157
+ q = self.to_q(x)
158
+ kv = self.to_kv(context)
159
+ q = q.reshape(B, L, self.num_heads, -1)
160
+ kv = kv.reshape(B, Lkv, 2, self.num_heads, -1)
161
+ if self.qk_rms_norm:
162
+ q = self.q_rms_norm(q)
163
+ k, v = kv.unbind(dim=2)
164
+ k = self.k_rms_norm(k)
165
+ h = scaled_dot_product_attention(q, k, v)
166
+ else:
167
+ h = scaled_dot_product_attention(q, kv)
168
+ h = h.reshape(B, L, -1)
169
+ h = self.to_out(h)
170
+ return h
Stable3DGen/hi3dgen/modules/norm.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # SPDX-License-Identifier: MIT
25
+ import torch
26
+ import torch.nn as nn
27
+
28
+
29
+ class LayerNorm32(nn.LayerNorm):
30
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
31
+ return super().forward(x.float()).type(x.dtype)
32
+
33
+
34
+ class GroupNorm32(nn.GroupNorm):
35
+ """
36
+ A GroupNorm layer that converts to float32 before the forward pass.
37
+ """
38
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
39
+ return super().forward(x.float()).type(x.dtype)
40
+
41
+
42
+ class ChannelLayerNorm32(LayerNorm32):
43
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
44
+ DIM = x.dim()
45
+ x = x.permute(0, *range(2, DIM), 1).contiguous()
46
+ x = super().forward(x)
47
+ x = x.permute(0, DIM-1, *range(1, DIM-1)).contiguous()
48
+ return x
49
+
Stable3DGen/hi3dgen/modules/sparse/__init__.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # SPDX-License-Identifier: MIT
25
+ from typing import *
26
+
27
+ BACKEND = 'spconv'
28
+ DEBUG = False
29
+ ATTN = 'xformers'
30
+
31
+ def __from_env():
32
+ import os
33
+
34
+ global BACKEND
35
+ global DEBUG
36
+ global ATTN
37
+
38
+ env_sparse_backend = os.environ.get('SPARSE_BACKEND')
39
+ env_sparse_debug = os.environ.get('SPARSE_DEBUG')
40
+ env_sparse_attn = os.environ.get('SPARSE_ATTN_BACKEND')
41
+ if env_sparse_attn is None:
42
+ env_sparse_attn = os.environ.get('ATTN_BACKEND')
43
+
44
+ if env_sparse_backend is not None and env_sparse_backend in ['spconv', 'torchsparse']:
45
+ BACKEND = env_sparse_backend
46
+ if env_sparse_debug is not None:
47
+ DEBUG = env_sparse_debug == '1'
48
+ if env_sparse_attn is not None and env_sparse_attn in ['xformers', 'flash_attn']:
49
+ ATTN = env_sparse_attn
50
+
51
+ print(f"[SPARSE] Backend: {BACKEND}, Attention: {ATTN}")
52
+
53
+
54
+ __from_env()
55
+
56
+
57
+ def set_backend(backend: Literal['spconv', 'torchsparse']):
58
+ global BACKEND
59
+ BACKEND = backend
60
+
61
+ def set_debug(debug: bool):
62
+ global DEBUG
63
+ DEBUG = debug
64
+
65
+ def set_attn(attn: Literal['xformers', 'flash_attn']):
66
+ global ATTN
67
+ ATTN = attn
68
+
69
+
70
+ import importlib
71
+
72
+ __attributes = {
73
+ 'SparseTensor': 'basic',
74
+ 'sparse_batch_broadcast': 'basic',
75
+ 'sparse_batch_op': 'basic',
76
+ 'sparse_cat': 'basic',
77
+ 'sparse_unbind': 'basic',
78
+ 'SparseGroupNorm': 'norm',
79
+ 'SparseLayerNorm': 'norm',
80
+ 'SparseGroupNorm32': 'norm',
81
+ 'SparseLayerNorm32': 'norm',
82
+ 'SparseReLU': 'nonlinearity',
83
+ 'SparseSiLU': 'nonlinearity',
84
+ 'SparseGELU': 'nonlinearity',
85
+ 'SparseActivation': 'nonlinearity',
86
+ 'SparseLinear': 'linear',
87
+ 'sparse_scaled_dot_product_attention': 'attention',
88
+ 'SerializeMode': 'attention',
89
+ 'sparse_serialized_scaled_dot_product_self_attention': 'attention',
90
+ 'sparse_windowed_scaled_dot_product_self_attention': 'attention',
91
+ 'SparseMultiHeadAttention': 'attention',
92
+ 'SparseConv3d': 'conv',
93
+ 'SparseInverseConv3d': 'conv',
94
+ 'SparseDownsample': 'spatial',
95
+ 'SparseUpsample': 'spatial',
96
+ 'SparseSubdivide' : 'spatial'
97
+ }
98
+
99
+ __submodules = ['transformer']
100
+
101
+ __all__ = list(__attributes.keys()) + __submodules
102
+
103
+ def __getattr__(name):
104
+ if name not in globals():
105
+ if name in __attributes:
106
+ module_name = __attributes[name]
107
+ module = importlib.import_module(f".{module_name}", __name__)
108
+ globals()[name] = getattr(module, name)
109
+ elif name in __submodules:
110
+ module = importlib.import_module(f".{name}", __name__)
111
+ globals()[name] = module
112
+ else:
113
+ raise AttributeError(f"module {__name__} has no attribute {name}")
114
+ return globals()[name]
115
+
116
+
117
+ # For Pylance
118
+ if __name__ == '__main__':
119
+ from .basic import *
120
+ from .norm import *
121
+ from .nonlinearity import *
122
+ from .linear import *
123
+ from .attention import *
124
+ from .conv import *
125
+ from .spatial import *
126
+ import transformer
Stable3DGen/hi3dgen/modules/sparse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.59 kB). View file
 
Stable3DGen/hi3dgen/modules/sparse/__pycache__/basic.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
Stable3DGen/hi3dgen/modules/sparse/__pycache__/linear.cpython-310.pyc ADDED
Binary file (887 Bytes). View file
 
Stable3DGen/hi3dgen/modules/sparse/__pycache__/nonlinearity.cpython-310.pyc ADDED
Binary file (1.69 kB). View file
 
Stable3DGen/hi3dgen/modules/sparse/__pycache__/norm.cpython-310.pyc ADDED
Binary file (2.7 kB). View file
 
Stable3DGen/hi3dgen/modules/sparse/__pycache__/spatial.cpython-310.pyc ADDED
Binary file (4.97 kB). View file
 
Stable3DGen/hi3dgen/modules/sparse/attention/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # SPDX-License-Identifier: MIT
25
+ from .full_attn import *
26
+ from .serialized_attn import *
27
+ from .windowed_attn import *
28
+ from .modules import *
Stable3DGen/hi3dgen/modules/sparse/attention/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (291 Bytes). View file
 
Stable3DGen/hi3dgen/modules/sparse/attention/__pycache__/full_attn.cpython-310.pyc ADDED
Binary file (7.3 kB). View file
 
Stable3DGen/hi3dgen/modules/sparse/attention/__pycache__/modules.cpython-310.pyc ADDED
Binary file (5.25 kB). View file
 
Stable3DGen/hi3dgen/modules/sparse/attention/__pycache__/serialized_attn.cpython-310.pyc ADDED
Binary file (5.98 kB). View file
 
Stable3DGen/hi3dgen/modules/sparse/attention/__pycache__/windowed_attn.cpython-310.pyc ADDED
Binary file (4.71 kB). View file
 
Stable3DGen/hi3dgen/modules/sparse/attention/full_attn.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # SPDX-License-Identifier: MIT
25
+ from typing import *
26
+ import torch
27
+ from .. import SparseTensor
28
+ from .. import DEBUG, ATTN
29
+
30
+ if ATTN == 'xformers':
31
+ import xformers.ops as xops
32
+ elif ATTN == 'flash_attn':
33
+ import flash_attn
34
+ else:
35
+ raise ValueError(f"Unknown attention module: {ATTN}")
36
+
37
+
38
+ __all__ = [
39
+ 'sparse_scaled_dot_product_attention',
40
+ ]
41
+
42
+
43
+ @overload
44
+ def sparse_scaled_dot_product_attention(qkv: SparseTensor) -> SparseTensor:
45
+ """
46
+ Apply scaled dot product attention to a sparse tensor.
47
+
48
+ Args:
49
+ qkv (SparseTensor): A [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
50
+ """
51
+ ...
52
+
53
+ @overload
54
+ def sparse_scaled_dot_product_attention(q: SparseTensor, kv: Union[SparseTensor, torch.Tensor]) -> SparseTensor:
55
+ """
56
+ Apply scaled dot product attention to a sparse tensor.
57
+
58
+ Args:
59
+ q (SparseTensor): A [N, *, H, C] sparse tensor containing Qs.
60
+ kv (SparseTensor or torch.Tensor): A [N, *, 2, H, C] sparse tensor or a [N, L, 2, H, C] dense tensor containing Ks and Vs.
61
+ """
62
+ ...
63
+
64
+ @overload
65
+ def sparse_scaled_dot_product_attention(q: torch.Tensor, kv: SparseTensor) -> torch.Tensor:
66
+ """
67
+ Apply scaled dot product attention to a sparse tensor.
68
+
69
+ Args:
70
+ q (SparseTensor): A [N, L, H, C] dense tensor containing Qs.
71
+ kv (SparseTensor or torch.Tensor): A [N, *, 2, H, C] sparse tensor containing Ks and Vs.
72
+ """
73
+ ...
74
+
75
+ @overload
76
+ def sparse_scaled_dot_product_attention(q: SparseTensor, k: SparseTensor, v: SparseTensor) -> SparseTensor:
77
+ """
78
+ Apply scaled dot product attention to a sparse tensor.
79
+
80
+ Args:
81
+ q (SparseTensor): A [N, *, H, Ci] sparse tensor containing Qs.
82
+ k (SparseTensor): A [N, *, H, Ci] sparse tensor containing Ks.
83
+ v (SparseTensor): A [N, *, H, Co] sparse tensor containing Vs.
84
+
85
+ Note:
86
+ k and v are assumed to have the same coordinate map.
87
+ """
88
+ ...
89
+
90
+ @overload
91
+ def sparse_scaled_dot_product_attention(q: SparseTensor, k: torch.Tensor, v: torch.Tensor) -> SparseTensor:
92
+ """
93
+ Apply scaled dot product attention to a sparse tensor.
94
+
95
+ Args:
96
+ q (SparseTensor): A [N, *, H, Ci] sparse tensor containing Qs.
97
+ k (torch.Tensor): A [N, L, H, Ci] dense tensor containing Ks.
98
+ v (torch.Tensor): A [N, L, H, Co] dense tensor containing Vs.
99
+ """
100
+ ...
101
+
102
+ @overload
103
+ def sparse_scaled_dot_product_attention(q: torch.Tensor, k: SparseTensor, v: SparseTensor) -> torch.Tensor:
104
+ """
105
+ Apply scaled dot product attention to a sparse tensor.
106
+
107
+ Args:
108
+ q (torch.Tensor): A [N, L, H, Ci] dense tensor containing Qs.
109
+ k (SparseTensor): A [N, *, H, Ci] sparse tensor containing Ks.
110
+ v (SparseTensor): A [N, *, H, Co] sparse tensor containing Vs.
111
+ """
112
+ ...
113
+
114
+ def sparse_scaled_dot_product_attention(*args, **kwargs):
115
+ arg_names_dict = {
116
+ 1: ['qkv'],
117
+ 2: ['q', 'kv'],
118
+ 3: ['q', 'k', 'v']
119
+ }
120
+ num_all_args = len(args) + len(kwargs)
121
+ assert num_all_args in arg_names_dict, f"Invalid number of arguments, got {num_all_args}, expected 1, 2, or 3"
122
+ for key in arg_names_dict[num_all_args][len(args):]:
123
+ assert key in kwargs, f"Missing argument {key}"
124
+
125
+ if num_all_args == 1:
126
+ qkv = args[0] if len(args) > 0 else kwargs['qkv']
127
+ assert isinstance(qkv, SparseTensor), f"qkv must be a SparseTensor, got {type(qkv)}"
128
+ assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
129
+ device = qkv.device
130
+
131
+ s = qkv
132
+ q_seqlen = [qkv.layout[i].stop - qkv.layout[i].start for i in range(qkv.shape[0])]
133
+ kv_seqlen = q_seqlen
134
+ qkv = qkv.feats # [T, 3, H, C]
135
+
136
+ elif num_all_args == 2:
137
+ q = args[0] if len(args) > 0 else kwargs['q']
138
+ kv = args[1] if len(args) > 1 else kwargs['kv']
139
+ assert isinstance(q, SparseTensor) and isinstance(kv, (SparseTensor, torch.Tensor)) or \
140
+ isinstance(q, torch.Tensor) and isinstance(kv, SparseTensor), \
141
+ f"Invalid types, got {type(q)} and {type(kv)}"
142
+ assert q.shape[0] == kv.shape[0], f"Batch size mismatch, got {q.shape[0]} and {kv.shape[0]}"
143
+ device = q.device
144
+
145
+ if isinstance(q, SparseTensor):
146
+ assert len(q.shape) == 3, f"Invalid shape for q, got {q.shape}, expected [N, *, H, C]"
147
+ s = q
148
+ q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
149
+ q = q.feats # [T_Q, H, C]
150
+ else:
151
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, C]"
152
+ s = None
153
+ N, L, H, C = q.shape
154
+ q_seqlen = [L] * N
155
+ q = q.reshape(N * L, H, C) # [T_Q, H, C]
156
+
157
+ if isinstance(kv, SparseTensor):
158
+ assert len(kv.shape) == 4 and kv.shape[1] == 2, f"Invalid shape for kv, got {kv.shape}, expected [N, *, 2, H, C]"
159
+ kv_seqlen = [kv.layout[i].stop - kv.layout[i].start for i in range(kv.shape[0])]
160
+ kv = kv.feats # [T_KV, 2, H, C]
161
+ else:
162
+ assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]"
163
+ N, L, _, H, C = kv.shape
164
+ kv_seqlen = [L] * N
165
+ kv = kv.reshape(N * L, 2, H, C) # [T_KV, 2, H, C]
166
+
167
+ elif num_all_args == 3:
168
+ q = args[0] if len(args) > 0 else kwargs['q']
169
+ k = args[1] if len(args) > 1 else kwargs['k']
170
+ v = args[2] if len(args) > 2 else kwargs['v']
171
+ assert isinstance(q, SparseTensor) and isinstance(k, (SparseTensor, torch.Tensor)) and type(k) == type(v) or \
172
+ isinstance(q, torch.Tensor) and isinstance(k, SparseTensor) and isinstance(v, SparseTensor), \
173
+ f"Invalid types, got {type(q)}, {type(k)}, and {type(v)}"
174
+ assert q.shape[0] == k.shape[0] == v.shape[0], f"Batch size mismatch, got {q.shape[0]}, {k.shape[0]}, and {v.shape[0]}"
175
+ device = q.device
176
+
177
+ if isinstance(q, SparseTensor):
178
+ assert len(q.shape) == 3, f"Invalid shape for q, got {q.shape}, expected [N, *, H, Ci]"
179
+ s = q
180
+ q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
181
+ q = q.feats # [T_Q, H, Ci]
182
+ else:
183
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, Ci]"
184
+ s = None
185
+ N, L, H, CI = q.shape
186
+ q_seqlen = [L] * N
187
+ q = q.reshape(N * L, H, CI) # [T_Q, H, Ci]
188
+
189
+ if isinstance(k, SparseTensor):
190
+ assert len(k.shape) == 3, f"Invalid shape for k, got {k.shape}, expected [N, *, H, Ci]"
191
+ assert len(v.shape) == 3, f"Invalid shape for v, got {v.shape}, expected [N, *, H, Co]"
192
+ kv_seqlen = [k.layout[i].stop - k.layout[i].start for i in range(k.shape[0])]
193
+ k = k.feats # [T_KV, H, Ci]
194
+ v = v.feats # [T_KV, H, Co]
195
+ else:
196
+ assert len(k.shape) == 4, f"Invalid shape for k, got {k.shape}, expected [N, L, H, Ci]"
197
+ assert len(v.shape) == 4, f"Invalid shape for v, got {v.shape}, expected [N, L, H, Co]"
198
+ N, L, H, CI, CO = *k.shape, v.shape[-1]
199
+ kv_seqlen = [L] * N
200
+ k = k.reshape(N * L, H, CI) # [T_KV, H, Ci]
201
+ v = v.reshape(N * L, H, CO) # [T_KV, H, Co]
202
+
203
+ if DEBUG:
204
+ if s is not None:
205
+ for i in range(s.shape[0]):
206
+ assert (s.coords[s.layout[i]] == i).all(), f"SparseScaledDotProductSelfAttention: batch index mismatch"
207
+ if num_all_args in [2, 3]:
208
+ assert q.shape[:2] == [1, sum(q_seqlen)], f"SparseScaledDotProductSelfAttention: q shape mismatch"
209
+ if num_all_args == 3:
210
+ assert k.shape[:2] == [1, sum(kv_seqlen)], f"SparseScaledDotProductSelfAttention: k shape mismatch"
211
+ assert v.shape[:2] == [1, sum(kv_seqlen)], f"SparseScaledDotProductSelfAttention: v shape mismatch"
212
+
213
+ if ATTN == 'xformers':
214
+ if num_all_args == 1:
215
+ q, k, v = qkv.unbind(dim=1)
216
+ elif num_all_args == 2:
217
+ k, v = kv.unbind(dim=1)
218
+ q = q.unsqueeze(0)
219
+ k = k.unsqueeze(0)
220
+ v = v.unsqueeze(0)
221
+ mask = xops.fmha.BlockDiagonalMask.from_seqlens(q_seqlen, kv_seqlen)
222
+ out = xops.memory_efficient_attention(q, k, v, mask)[0]
223
+ elif ATTN == 'flash_attn':
224
+ cu_seqlens_q = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(q_seqlen), dim=0)]).int().to(device)
225
+ if num_all_args in [2, 3]:
226
+ cu_seqlens_kv = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(kv_seqlen), dim=0)]).int().to(device)
227
+ if num_all_args == 1:
228
+ out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv, cu_seqlens_q, max(q_seqlen))
229
+ elif num_all_args == 2:
230
+ out = flash_attn.flash_attn_varlen_kvpacked_func(q, kv, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen))
231
+ elif num_all_args == 3:
232
+ out = flash_attn.flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen))
233
+ else:
234
+ raise ValueError(f"Unknown attention module: {ATTN}")
235
+
236
+ if s is not None:
237
+ return s.replace(out)
238
+ else:
239
+ return out.reshape(N, L, H, -1)
Stable3DGen/hi3dgen/modules/sparse/attention/modules.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # SPDX-License-Identifier: MIT
25
+ from typing import *
26
+ import torch
27
+ import torch.nn as nn
28
+ import torch.nn.functional as F
29
+ from .. import SparseTensor
30
+ from .full_attn import sparse_scaled_dot_product_attention
31
+ from .serialized_attn import SerializeMode, sparse_serialized_scaled_dot_product_self_attention
32
+ from .windowed_attn import sparse_windowed_scaled_dot_product_self_attention
33
+ from ...attention import RotaryPositionEmbedder
34
+
35
+
36
+ class SparseMultiHeadRMSNorm(nn.Module):
37
+ def __init__(self, dim: int, heads: int):
38
+ super().__init__()
39
+ self.scale = dim ** 0.5
40
+ self.gamma = nn.Parameter(torch.ones(heads, dim))
41
+
42
+ def forward(self, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]:
43
+ x_type = x.dtype
44
+ x = x.float()
45
+ if isinstance(x, SparseTensor):
46
+ x = x.replace(F.normalize(x.feats, dim=-1))
47
+ else:
48
+ x = F.normalize(x, dim=-1)
49
+ return (x * self.gamma * self.scale).to(x_type)
50
+
51
+
52
+ class SparseMultiHeadAttention(nn.Module):
53
+ def __init__(
54
+ self,
55
+ channels: int,
56
+ num_heads: int,
57
+ ctx_channels: Optional[int] = None,
58
+ type: Literal["self", "cross"] = "self",
59
+ attn_mode: Literal["full", "serialized", "windowed"] = "full",
60
+ window_size: Optional[int] = None,
61
+ shift_sequence: Optional[int] = None,
62
+ shift_window: Optional[Tuple[int, int, int]] = None,
63
+ serialize_mode: Optional[SerializeMode] = None,
64
+ qkv_bias: bool = True,
65
+ use_rope: bool = False,
66
+ qk_rms_norm: bool = False,
67
+ ):
68
+ super().__init__()
69
+ assert channels % num_heads == 0
70
+ assert type in ["self", "cross"], f"Invalid attention type: {type}"
71
+ assert attn_mode in ["full", "serialized", "windowed"], f"Invalid attention mode: {attn_mode}"
72
+ assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention"
73
+ assert type == "self" or use_rope is False, "Rotary position embeddings only supported for self-attention"
74
+ self.channels = channels
75
+ self.ctx_channels = ctx_channels if ctx_channels is not None else channels
76
+ self.num_heads = num_heads
77
+ self._type = type
78
+ self.attn_mode = attn_mode
79
+ self.window_size = window_size
80
+ self.shift_sequence = shift_sequence
81
+ self.shift_window = shift_window
82
+ self.serialize_mode = serialize_mode
83
+ self.use_rope = use_rope
84
+ self.qk_rms_norm = qk_rms_norm
85
+
86
+ if self._type == "self":
87
+ self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
88
+ else:
89
+ self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
90
+ self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
91
+
92
+ if self.qk_rms_norm:
93
+ self.q_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads)
94
+ self.k_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads)
95
+
96
+ self.to_out = nn.Linear(channels, channels)
97
+
98
+ if use_rope:
99
+ self.rope = RotaryPositionEmbedder(channels)
100
+
101
+ @staticmethod
102
+ def _linear(module: nn.Linear, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]:
103
+ if isinstance(x, SparseTensor):
104
+ return x.replace(module(x.feats))
105
+ else:
106
+ return module(x)
107
+
108
+ @staticmethod
109
+ def _reshape_chs(x: Union[SparseTensor, torch.Tensor], shape: Tuple[int, ...]) -> Union[SparseTensor, torch.Tensor]:
110
+ if isinstance(x, SparseTensor):
111
+ return x.reshape(*shape)
112
+ else:
113
+ return x.reshape(*x.shape[:2], *shape)
114
+
115
+ def _fused_pre(self, x: Union[SparseTensor, torch.Tensor], num_fused: int) -> Union[SparseTensor, torch.Tensor]:
116
+ if isinstance(x, SparseTensor):
117
+ x_feats = x.feats.unsqueeze(0)
118
+ else:
119
+ x_feats = x
120
+ x_feats = x_feats.reshape(*x_feats.shape[:2], num_fused, self.num_heads, -1)
121
+ return x.replace(x_feats.squeeze(0)) if isinstance(x, SparseTensor) else x_feats
122
+
123
+ def _rope(self, qkv: SparseTensor) -> SparseTensor:
124
+ q, k, v = qkv.feats.unbind(dim=1) # [T, H, C]
125
+ q, k = self.rope(q, k, qkv.coords[:, 1:])
126
+ qkv = qkv.replace(torch.stack([q, k, v], dim=1))
127
+ return qkv
128
+
129
+ def forward(self, x: Union[SparseTensor, torch.Tensor], context: Optional[Union[SparseTensor, torch.Tensor]] = None) -> Union[SparseTensor, torch.Tensor]:
130
+ if self._type == "self":
131
+ qkv = self._linear(self.to_qkv, x)
132
+ qkv = self._fused_pre(qkv, num_fused=3)
133
+ if self.use_rope:
134
+ qkv = self._rope(qkv)
135
+ if self.qk_rms_norm:
136
+ q, k, v = qkv.unbind(dim=1)
137
+ q = self.q_rms_norm(q)
138
+ k = self.k_rms_norm(k)
139
+ qkv = qkv.replace(torch.stack([q.feats, k.feats, v.feats], dim=1))
140
+ if self.attn_mode == "full":
141
+ h = sparse_scaled_dot_product_attention(qkv)
142
+ elif self.attn_mode == "serialized":
143
+ h = sparse_serialized_scaled_dot_product_self_attention(
144
+ qkv, self.window_size, serialize_mode=self.serialize_mode, shift_sequence=self.shift_sequence, shift_window=self.shift_window
145
+ )
146
+ elif self.attn_mode == "windowed":
147
+ h = sparse_windowed_scaled_dot_product_self_attention(
148
+ qkv, self.window_size, shift_window=self.shift_window
149
+ )
150
+ else:
151
+ q = self._linear(self.to_q, x)
152
+ q = self._reshape_chs(q, (self.num_heads, -1))
153
+ kv = self._linear(self.to_kv, context)
154
+ kv = self._fused_pre(kv, num_fused=2)
155
+ if self.qk_rms_norm:
156
+ q = self.q_rms_norm(q)
157
+ k, v = kv.unbind(dim=1)
158
+ k = self.k_rms_norm(k)
159
+ kv = kv.replace(torch.stack([k.feats, v.feats], dim=1))
160
+ h = sparse_scaled_dot_product_attention(q, kv)
161
+ h = self._reshape_chs(h, (-1,))
162
+ h = self._linear(self.to_out, h)
163
+ return h
Stable3DGen/hi3dgen/modules/sparse/attention/serialized_attn.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # SPDX-License-Identifier: MIT
25
+ from typing import *
26
+ from enum import Enum
27
+ import torch
28
+ import math
29
+ from .. import SparseTensor
30
+ from .. import DEBUG, ATTN
31
+
32
+ if ATTN == 'xformers':
33
+ import xformers.ops as xops
34
+ elif ATTN == 'flash_attn':
35
+ import flash_attn
36
+ else:
37
+ raise ValueError(f"Unknown attention module: {ATTN}")
38
+
39
+
40
+ __all__ = [
41
+ 'sparse_serialized_scaled_dot_product_self_attention',
42
+ ]
43
+
44
+
45
+ class SerializeMode(Enum):
46
+ Z_ORDER = 0
47
+ Z_ORDER_TRANSPOSED = 1
48
+ HILBERT = 2
49
+ HILBERT_TRANSPOSED = 3
50
+
51
+
52
+ SerializeModes = [
53
+ SerializeMode.Z_ORDER,
54
+ SerializeMode.Z_ORDER_TRANSPOSED,
55
+ SerializeMode.HILBERT,
56
+ SerializeMode.HILBERT_TRANSPOSED
57
+ ]
58
+
59
+
60
+ def calc_serialization(
61
+ tensor: SparseTensor,
62
+ window_size: int,
63
+ serialize_mode: SerializeMode = SerializeMode.Z_ORDER,
64
+ shift_sequence: int = 0,
65
+ shift_window: Tuple[int, int, int] = (0, 0, 0)
66
+ ) -> Tuple[torch.Tensor, torch.Tensor, List[int]]:
67
+ """
68
+ Calculate serialization and partitioning for a set of coordinates.
69
+
70
+ Args:
71
+ tensor (SparseTensor): The input tensor.
72
+ window_size (int): The window size to use.
73
+ serialize_mode (SerializeMode): The serialization mode to use.
74
+ shift_sequence (int): The shift of serialized sequence.
75
+ shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
76
+
77
+ Returns:
78
+ (torch.Tensor, torch.Tensor): Forwards and backwards indices.
79
+ """
80
+ fwd_indices = []
81
+ bwd_indices = []
82
+ seq_lens = []
83
+ seq_batch_indices = []
84
+ offsets = [0]
85
+
86
+ if 'vox2seq' not in globals():
87
+ import vox2seq
88
+
89
+ # Serialize the input
90
+ serialize_coords = tensor.coords[:, 1:].clone()
91
+ serialize_coords += torch.tensor(shift_window, dtype=torch.int32, device=tensor.device).reshape(1, 3)
92
+ if serialize_mode == SerializeMode.Z_ORDER:
93
+ code = vox2seq.encode(serialize_coords, mode='z_order', permute=[0, 1, 2])
94
+ elif serialize_mode == SerializeMode.Z_ORDER_TRANSPOSED:
95
+ code = vox2seq.encode(serialize_coords, mode='z_order', permute=[1, 0, 2])
96
+ elif serialize_mode == SerializeMode.HILBERT:
97
+ code = vox2seq.encode(serialize_coords, mode='hilbert', permute=[0, 1, 2])
98
+ elif serialize_mode == SerializeMode.HILBERT_TRANSPOSED:
99
+ code = vox2seq.encode(serialize_coords, mode='hilbert', permute=[1, 0, 2])
100
+ else:
101
+ raise ValueError(f"Unknown serialize mode: {serialize_mode}")
102
+
103
+ for bi, s in enumerate(tensor.layout):
104
+ num_points = s.stop - s.start
105
+ num_windows = (num_points + window_size - 1) // window_size
106
+ valid_window_size = num_points / num_windows
107
+ to_ordered = torch.argsort(code[s.start:s.stop])
108
+ if num_windows == 1:
109
+ fwd_indices.append(to_ordered)
110
+ bwd_indices.append(torch.zeros_like(to_ordered).scatter_(0, to_ordered, torch.arange(num_points, device=tensor.device)))
111
+ fwd_indices[-1] += s.start
112
+ bwd_indices[-1] += offsets[-1]
113
+ seq_lens.append(num_points)
114
+ seq_batch_indices.append(bi)
115
+ offsets.append(offsets[-1] + seq_lens[-1])
116
+ else:
117
+ # Partition the input
118
+ offset = 0
119
+ mids = [(i + 0.5) * valid_window_size + shift_sequence for i in range(num_windows)]
120
+ split = [math.floor(i * valid_window_size + shift_sequence) for i in range(num_windows + 1)]
121
+ bwd_index = torch.zeros((num_points,), dtype=torch.int64, device=tensor.device)
122
+ for i in range(num_windows):
123
+ mid = mids[i]
124
+ valid_start = split[i]
125
+ valid_end = split[i + 1]
126
+ padded_start = math.floor(mid - 0.5 * window_size)
127
+ padded_end = padded_start + window_size
128
+ fwd_indices.append(to_ordered[torch.arange(padded_start, padded_end, device=tensor.device) % num_points])
129
+ offset += valid_start - padded_start
130
+ bwd_index.scatter_(0, fwd_indices[-1][valid_start-padded_start:valid_end-padded_start], torch.arange(offset, offset + valid_end - valid_start, device=tensor.device))
131
+ offset += padded_end - valid_start
132
+ fwd_indices[-1] += s.start
133
+ seq_lens.extend([window_size] * num_windows)
134
+ seq_batch_indices.extend([bi] * num_windows)
135
+ bwd_indices.append(bwd_index + offsets[-1])
136
+ offsets.append(offsets[-1] + num_windows * window_size)
137
+
138
+ fwd_indices = torch.cat(fwd_indices)
139
+ bwd_indices = torch.cat(bwd_indices)
140
+
141
+ return fwd_indices, bwd_indices, seq_lens, seq_batch_indices
142
+
143
+
144
+ def sparse_serialized_scaled_dot_product_self_attention(
145
+ qkv: SparseTensor,
146
+ window_size: int,
147
+ serialize_mode: SerializeMode = SerializeMode.Z_ORDER,
148
+ shift_sequence: int = 0,
149
+ shift_window: Tuple[int, int, int] = (0, 0, 0)
150
+ ) -> SparseTensor:
151
+ """
152
+ Apply serialized scaled dot product self attention to a sparse tensor.
153
+
154
+ Args:
155
+ qkv (SparseTensor): [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
156
+ window_size (int): The window size to use.
157
+ serialize_mode (SerializeMode): The serialization mode to use.
158
+ shift_sequence (int): The shift of serialized sequence.
159
+ shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
160
+ shift (int): The shift to use.
161
+ """
162
+ assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
163
+
164
+ serialization_spatial_cache_name = f'serialization_{serialize_mode}_{window_size}_{shift_sequence}_{shift_window}'
165
+ serialization_spatial_cache = qkv.get_spatial_cache(serialization_spatial_cache_name)
166
+ if serialization_spatial_cache is None:
167
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = calc_serialization(qkv, window_size, serialize_mode, shift_sequence, shift_window)
168
+ qkv.register_spatial_cache(serialization_spatial_cache_name, (fwd_indices, bwd_indices, seq_lens, seq_batch_indices))
169
+ else:
170
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = serialization_spatial_cache
171
+
172
+ M = fwd_indices.shape[0]
173
+ T = qkv.feats.shape[0]
174
+ H = qkv.feats.shape[2]
175
+ C = qkv.feats.shape[3]
176
+
177
+ qkv_feats = qkv.feats[fwd_indices] # [M, 3, H, C]
178
+
179
+ if DEBUG:
180
+ start = 0
181
+ qkv_coords = qkv.coords[fwd_indices]
182
+ for i in range(len(seq_lens)):
183
+ assert (qkv_coords[start:start+seq_lens[i], 0] == seq_batch_indices[i]).all(), f"SparseWindowedScaledDotProductSelfAttention: batch index mismatch"
184
+ start += seq_lens[i]
185
+
186
+ if all([seq_len == window_size for seq_len in seq_lens]):
187
+ B = len(seq_lens)
188
+ N = window_size
189
+ qkv_feats = qkv_feats.reshape(B, N, 3, H, C)
190
+ if ATTN == 'xformers':
191
+ q, k, v = qkv_feats.unbind(dim=2) # [B, N, H, C]
192
+ out = xops.memory_efficient_attention(q, k, v) # [B, N, H, C]
193
+ elif ATTN == 'flash_attn':
194
+ out = flash_attn.flash_attn_qkvpacked_func(qkv_feats) # [B, N, H, C]
195
+ else:
196
+ raise ValueError(f"Unknown attention module: {ATTN}")
197
+ out = out.reshape(B * N, H, C) # [M, H, C]
198
+ else:
199
+ if ATTN == 'xformers':
200
+ q, k, v = qkv_feats.unbind(dim=1) # [M, H, C]
201
+ q = q.unsqueeze(0) # [1, M, H, C]
202
+ k = k.unsqueeze(0) # [1, M, H, C]
203
+ v = v.unsqueeze(0) # [1, M, H, C]
204
+ mask = xops.fmha.BlockDiagonalMask.from_seqlens(seq_lens)
205
+ out = xops.memory_efficient_attention(q, k, v, mask)[0] # [M, H, C]
206
+ elif ATTN == 'flash_attn':
207
+ cu_seqlens = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(seq_lens), dim=0)], dim=0) \
208
+ .to(qkv.device).int()
209
+ out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv_feats, cu_seqlens, max(seq_lens)) # [M, H, C]
210
+
211
+ out = out[bwd_indices] # [T, H, C]
212
+
213
+ if DEBUG:
214
+ qkv_coords = qkv_coords[bwd_indices]
215
+ assert torch.equal(qkv_coords, qkv.coords), "SparseWindowedScaledDotProductSelfAttention: coordinate mismatch"
216
+
217
+ return qkv.replace(out)
Stable3DGen/hi3dgen/modules/sparse/attention/windowed_attn.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # SPDX-License-Identifier: MIT
25
+ from typing import *
26
+ import torch
27
+ import math
28
+ from .. import SparseTensor
29
+ from .. import DEBUG, ATTN
30
+
31
+ if ATTN == 'xformers':
32
+ import xformers.ops as xops
33
+ elif ATTN == 'flash_attn':
34
+ import flash_attn
35
+ else:
36
+ raise ValueError(f"Unknown attention module: {ATTN}")
37
+
38
+
39
+ __all__ = [
40
+ 'sparse_windowed_scaled_dot_product_self_attention',
41
+ ]
42
+
43
+
44
+ def calc_window_partition(
45
+ tensor: SparseTensor,
46
+ window_size: Union[int, Tuple[int, ...]],
47
+ shift_window: Union[int, Tuple[int, ...]] = 0
48
+ ) -> Tuple[torch.Tensor, torch.Tensor, List[int], List[int]]:
49
+ """
50
+ Calculate serialization and partitioning for a set of coordinates.
51
+
52
+ Args:
53
+ tensor (SparseTensor): The input tensor.
54
+ window_size (int): The window size to use.
55
+ shift_window (Tuple[int, ...]): The shift of serialized coordinates.
56
+
57
+ Returns:
58
+ (torch.Tensor): Forwards indices.
59
+ (torch.Tensor): Backwards indices.
60
+ (List[int]): Sequence lengths.
61
+ (List[int]): Sequence batch indices.
62
+ """
63
+ DIM = tensor.coords.shape[1] - 1
64
+ shift_window = (shift_window,) * DIM if isinstance(shift_window, int) else shift_window
65
+ window_size = (window_size,) * DIM if isinstance(window_size, int) else window_size
66
+ shifted_coords = tensor.coords.clone().detach()
67
+ shifted_coords[:, 1:] += torch.tensor(shift_window, device=tensor.device, dtype=torch.int32).unsqueeze(0)
68
+
69
+ MAX_COORDS = shifted_coords[:, 1:].max(dim=0).values.tolist()
70
+ NUM_WINDOWS = [math.ceil((mc + 1) / ws) for mc, ws in zip(MAX_COORDS, window_size)]
71
+ OFFSET = torch.cumprod(torch.tensor([1] + NUM_WINDOWS[::-1]), dim=0).tolist()[::-1]
72
+
73
+ shifted_coords[:, 1:] //= torch.tensor(window_size, device=tensor.device, dtype=torch.int32).unsqueeze(0)
74
+ shifted_indices = (shifted_coords * torch.tensor(OFFSET, device=tensor.device, dtype=torch.int32).unsqueeze(0)).sum(dim=1)
75
+ fwd_indices = torch.argsort(shifted_indices)
76
+ bwd_indices = torch.empty_like(fwd_indices)
77
+ bwd_indices[fwd_indices] = torch.arange(fwd_indices.shape[0], device=tensor.device)
78
+ seq_lens = torch.bincount(shifted_indices)
79
+ seq_batch_indices = torch.arange(seq_lens.shape[0], device=tensor.device, dtype=torch.int32) // OFFSET[0]
80
+ mask = seq_lens != 0
81
+ seq_lens = seq_lens[mask].tolist()
82
+ seq_batch_indices = seq_batch_indices[mask].tolist()
83
+
84
+ return fwd_indices, bwd_indices, seq_lens, seq_batch_indices
85
+
86
+
87
+ def sparse_windowed_scaled_dot_product_self_attention(
88
+ qkv: SparseTensor,
89
+ window_size: int,
90
+ shift_window: Tuple[int, int, int] = (0, 0, 0)
91
+ ) -> SparseTensor:
92
+ """
93
+ Apply windowed scaled dot product self attention to a sparse tensor.
94
+
95
+ Args:
96
+ qkv (SparseTensor): [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
97
+ window_size (int): The window size to use.
98
+ shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
99
+ shift (int): The shift to use.
100
+ """
101
+ assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
102
+
103
+ serialization_spatial_cache_name = f'window_partition_{window_size}_{shift_window}'
104
+ serialization_spatial_cache = qkv.get_spatial_cache(serialization_spatial_cache_name)
105
+ if serialization_spatial_cache is None:
106
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = calc_window_partition(qkv, window_size, shift_window)
107
+ qkv.register_spatial_cache(serialization_spatial_cache_name, (fwd_indices, bwd_indices, seq_lens, seq_batch_indices))
108
+ else:
109
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = serialization_spatial_cache
110
+
111
+ M = fwd_indices.shape[0]
112
+ T = qkv.feats.shape[0]
113
+ H = qkv.feats.shape[2]
114
+ C = qkv.feats.shape[3]
115
+
116
+ qkv_feats = qkv.feats[fwd_indices] # [M, 3, H, C]
117
+
118
+ if DEBUG:
119
+ start = 0
120
+ qkv_coords = qkv.coords[fwd_indices]
121
+ for i in range(len(seq_lens)):
122
+ seq_coords = qkv_coords[start:start+seq_lens[i]]
123
+ assert (seq_coords[:, 0] == seq_batch_indices[i]).all(), f"SparseWindowedScaledDotProductSelfAttention: batch index mismatch"
124
+ assert (seq_coords[:, 1:].max(dim=0).values - seq_coords[:, 1:].min(dim=0).values < window_size).all(), \
125
+ f"SparseWindowedScaledDotProductSelfAttention: window size exceeded"
126
+ start += seq_lens[i]
127
+
128
+ if all([seq_len == window_size for seq_len in seq_lens]):
129
+ B = len(seq_lens)
130
+ N = window_size
131
+ qkv_feats = qkv_feats.reshape(B, N, 3, H, C)
132
+ if ATTN == 'xformers':
133
+ q, k, v = qkv_feats.unbind(dim=2) # [B, N, H, C]
134
+ out = xops.memory_efficient_attention(q, k, v) # [B, N, H, C]
135
+ elif ATTN == 'flash_attn':
136
+ out = flash_attn.flash_attn_qkvpacked_func(qkv_feats) # [B, N, H, C]
137
+ else:
138
+ raise ValueError(f"Unknown attention module: {ATTN}")
139
+ out = out.reshape(B * N, H, C) # [M, H, C]
140
+ else:
141
+ if ATTN == 'xformers':
142
+ q, k, v = qkv_feats.unbind(dim=1) # [M, H, C]
143
+ q = q.unsqueeze(0) # [1, M, H, C]
144
+ k = k.unsqueeze(0) # [1, M, H, C]
145
+ v = v.unsqueeze(0) # [1, M, H, C]
146
+ mask = xops.fmha.BlockDiagonalMask.from_seqlens(seq_lens)
147
+ out = xops.memory_efficient_attention(q, k, v, mask)[0] # [M, H, C]
148
+ elif ATTN == 'flash_attn':
149
+ cu_seqlens = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(seq_lens), dim=0)], dim=0) \
150
+ .to(qkv.device).int()
151
+ out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv_feats, cu_seqlens, max(seq_lens)) # [M, H, C]
152
+
153
+ out = out[bwd_indices] # [T, H, C]
154
+
155
+ if DEBUG:
156
+ qkv_coords = qkv_coords[bwd_indices]
157
+ assert torch.equal(qkv_coords, qkv.coords), "SparseWindowedScaledDotProductSelfAttention: coordinate mismatch"
158
+
159
+ return qkv.replace(out)
Stable3DGen/hi3dgen/modules/sparse/basic.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # SPDX-License-Identifier: MIT
25
+ from typing import *
26
+ import torch
27
+ import torch.nn as nn
28
+ from . import BACKEND, DEBUG
29
+ SparseTensorData = None # Lazy import
30
+
31
+
32
+ __all__ = [
33
+ 'SparseTensor',
34
+ 'sparse_batch_broadcast',
35
+ 'sparse_batch_op',
36
+ 'sparse_cat',
37
+ 'sparse_unbind',
38
+ ]
39
+
40
+
41
+ class SparseTensor:
42
+ """
43
+ Sparse tensor with support for both torchsparse and spconv backends.
44
+
45
+ Parameters:
46
+ - feats (torch.Tensor): Features of the sparse tensor.
47
+ - coords (torch.Tensor): Coordinates of the sparse tensor.
48
+ - shape (torch.Size): Shape of the sparse tensor.
49
+ - layout (List[slice]): Layout of the sparse tensor for each batch
50
+ - data (SparseTensorData): Sparse tensor data used for convolusion
51
+
52
+ NOTE:
53
+ - Data corresponding to a same batch should be contiguous.
54
+ - Coords should be in [0, 1023]
55
+ """
56
+ @overload
57
+ def __init__(self, feats: torch.Tensor, coords: torch.Tensor, shape: Optional[torch.Size] = None, layout: Optional[List[slice]] = None, **kwargs): ...
58
+
59
+ @overload
60
+ def __init__(self, data, shape: Optional[torch.Size] = None, layout: Optional[List[slice]] = None, **kwargs): ...
61
+
62
+ def __init__(self, *args, **kwargs):
63
+ # Lazy import of sparse tensor backend
64
+ global SparseTensorData
65
+ if SparseTensorData is None:
66
+ import importlib
67
+ if BACKEND == 'torchsparse':
68
+ SparseTensorData = importlib.import_module('torchsparse').SparseTensor
69
+ elif BACKEND == 'spconv':
70
+ SparseTensorData = importlib.import_module('spconv.pytorch').SparseConvTensor
71
+
72
+ method_id = 0
73
+ if len(args) != 0:
74
+ method_id = 0 if isinstance(args[0], torch.Tensor) else 1
75
+ else:
76
+ method_id = 1 if 'data' in kwargs else 0
77
+
78
+ if method_id == 0:
79
+ feats, coords, shape, layout = args + (None,) * (4 - len(args))
80
+ if 'feats' in kwargs:
81
+ feats = kwargs['feats']
82
+ del kwargs['feats']
83
+ if 'coords' in kwargs:
84
+ coords = kwargs['coords']
85
+ del kwargs['coords']
86
+ if 'shape' in kwargs:
87
+ shape = kwargs['shape']
88
+ del kwargs['shape']
89
+ if 'layout' in kwargs:
90
+ layout = kwargs['layout']
91
+ del kwargs['layout']
92
+
93
+ if shape is None:
94
+ shape = self.__cal_shape(feats, coords)
95
+ if layout is None:
96
+ layout = self.__cal_layout(coords, shape[0])
97
+ if BACKEND == 'torchsparse':
98
+ self.data = SparseTensorData(feats, coords, **kwargs)
99
+ elif BACKEND == 'spconv':
100
+ spatial_shape = list(coords.max(0)[0] + 1)[1:]
101
+ self.data = SparseTensorData(feats.reshape(feats.shape[0], -1), coords, spatial_shape, shape[0], **kwargs)
102
+ self.data._features = feats
103
+ elif method_id == 1:
104
+ data, shape, layout = args + (None,) * (3 - len(args))
105
+ if 'data' in kwargs:
106
+ data = kwargs['data']
107
+ del kwargs['data']
108
+ if 'shape' in kwargs:
109
+ shape = kwargs['shape']
110
+ del kwargs['shape']
111
+ if 'layout' in kwargs:
112
+ layout = kwargs['layout']
113
+ del kwargs['layout']
114
+
115
+ self.data = data
116
+ if shape is None:
117
+ shape = self.__cal_shape(self.feats, self.coords)
118
+ if layout is None:
119
+ layout = self.__cal_layout(self.coords, shape[0])
120
+
121
+ self._shape = shape
122
+ self._layout = layout
123
+ self._scale = kwargs.get('scale', (1, 1, 1))
124
+ self._spatial_cache = kwargs.get('spatial_cache', {})
125
+
126
+ if DEBUG:
127
+ try:
128
+ assert self.feats.shape[0] == self.coords.shape[0], f"Invalid feats shape: {self.feats.shape}, coords shape: {self.coords.shape}"
129
+ assert self.shape == self.__cal_shape(self.feats, self.coords), f"Invalid shape: {self.shape}"
130
+ assert self.layout == self.__cal_layout(self.coords, self.shape[0]), f"Invalid layout: {self.layout}"
131
+ for i in range(self.shape[0]):
132
+ assert torch.all(self.coords[self.layout[i], 0] == i), f"The data of batch {i} is not contiguous"
133
+ except Exception as e:
134
+ print('Debugging information:')
135
+ print(f"- Shape: {self.shape}")
136
+ print(f"- Layout: {self.layout}")
137
+ print(f"- Scale: {self._scale}")
138
+ print(f"- Coords: {self.coords}")
139
+ raise e
140
+
141
+ def __cal_shape(self, feats, coords):
142
+ shape = []
143
+ shape.append(coords[:, 0].max().item() + 1)
144
+ shape.extend([*feats.shape[1:]])
145
+ return torch.Size(shape)
146
+
147
+ def __cal_layout(self, coords, batch_size):
148
+ seq_len = torch.bincount(coords[:, 0], minlength=batch_size)
149
+ offset = torch.cumsum(seq_len, dim=0)
150
+ layout = [slice((offset[i] - seq_len[i]).item(), offset[i].item()) for i in range(batch_size)]
151
+ return layout
152
+
153
+ @property
154
+ def shape(self) -> torch.Size:
155
+ return self._shape
156
+
157
+ def dim(self) -> int:
158
+ return len(self.shape)
159
+
160
+ @property
161
+ def layout(self) -> List[slice]:
162
+ return self._layout
163
+
164
+ @property
165
+ def feats(self) -> torch.Tensor:
166
+ if BACKEND == 'torchsparse':
167
+ return self.data.F
168
+ elif BACKEND == 'spconv':
169
+ return self.data.features
170
+
171
+ @feats.setter
172
+ def feats(self, value: torch.Tensor):
173
+ if BACKEND == 'torchsparse':
174
+ self.data.F = value
175
+ elif BACKEND == 'spconv':
176
+ self.data.features = value
177
+
178
+ @property
179
+ def coords(self) -> torch.Tensor:
180
+ if BACKEND == 'torchsparse':
181
+ return self.data.C
182
+ elif BACKEND == 'spconv':
183
+ return self.data.indices
184
+
185
+ @coords.setter
186
+ def coords(self, value: torch.Tensor):
187
+ if BACKEND == 'torchsparse':
188
+ self.data.C = value
189
+ elif BACKEND == 'spconv':
190
+ self.data.indices = value
191
+
192
+ @property
193
+ def dtype(self):
194
+ return self.feats.dtype
195
+
196
+ @property
197
+ def device(self):
198
+ return self.feats.device
199
+
200
+ @overload
201
+ def to(self, dtype: torch.dtype) -> 'SparseTensor': ...
202
+
203
+ @overload
204
+ def to(self, device: Optional[Union[str, torch.device]] = None, dtype: Optional[torch.dtype] = None) -> 'SparseTensor': ...
205
+
206
+ def to(self, *args, **kwargs) -> 'SparseTensor':
207
+ device = None
208
+ dtype = None
209
+ if len(args) == 2:
210
+ device, dtype = args
211
+ elif len(args) == 1:
212
+ if isinstance(args[0], torch.dtype):
213
+ dtype = args[0]
214
+ else:
215
+ device = args[0]
216
+ if 'dtype' in kwargs:
217
+ assert dtype is None, "to() received multiple values for argument 'dtype'"
218
+ dtype = kwargs['dtype']
219
+ if 'device' in kwargs:
220
+ assert device is None, "to() received multiple values for argument 'device'"
221
+ device = kwargs['device']
222
+
223
+ new_feats = self.feats.to(device=device, dtype=dtype)
224
+ new_coords = self.coords.to(device=device)
225
+ return self.replace(new_feats, new_coords)
226
+
227
+ def type(self, dtype):
228
+ new_feats = self.feats.type(dtype)
229
+ return self.replace(new_feats)
230
+
231
+ def cpu(self) -> 'SparseTensor':
232
+ new_feats = self.feats.cpu()
233
+ new_coords = self.coords.cpu()
234
+ return self.replace(new_feats, new_coords)
235
+
236
+ def cuda(self) -> 'SparseTensor':
237
+ new_feats = self.feats.cuda()
238
+ new_coords = self.coords.cuda()
239
+ return self.replace(new_feats, new_coords)
240
+
241
+ def half(self) -> 'SparseTensor':
242
+ new_feats = self.feats.half()
243
+ return self.replace(new_feats)
244
+
245
+ def float(self) -> 'SparseTensor':
246
+ new_feats = self.feats.float()
247
+ return self.replace(new_feats)
248
+
249
+ def detach(self) -> 'SparseTensor':
250
+ new_coords = self.coords.detach()
251
+ new_feats = self.feats.detach()
252
+ return self.replace(new_feats, new_coords)
253
+
254
+ def dense(self) -> torch.Tensor:
255
+ if BACKEND == 'torchsparse':
256
+ return self.data.dense()
257
+ elif BACKEND == 'spconv':
258
+ return self.data.dense()
259
+
260
+ def reshape(self, *shape) -> 'SparseTensor':
261
+ new_feats = self.feats.reshape(self.feats.shape[0], *shape)
262
+ return self.replace(new_feats)
263
+
264
+ def unbind(self, dim: int) -> List['SparseTensor']:
265
+ return sparse_unbind(self, dim)
266
+
267
+ def replace(self, feats: torch.Tensor, coords: Optional[torch.Tensor] = None) -> 'SparseTensor':
268
+ new_shape = [self.shape[0]]
269
+ new_shape.extend(feats.shape[1:])
270
+ if BACKEND == 'torchsparse':
271
+ new_data = SparseTensorData(
272
+ feats=feats,
273
+ coords=self.data.coords if coords is None else coords,
274
+ stride=self.data.stride,
275
+ spatial_range=self.data.spatial_range,
276
+ )
277
+ new_data._caches = self.data._caches
278
+ elif BACKEND == 'spconv':
279
+ new_data = SparseTensorData(
280
+ self.data.features.reshape(self.data.features.shape[0], -1),
281
+ self.data.indices,
282
+ self.data.spatial_shape,
283
+ self.data.batch_size,
284
+ self.data.grid,
285
+ self.data.voxel_num,
286
+ self.data.indice_dict
287
+ )
288
+ new_data._features = feats
289
+ new_data.benchmark = self.data.benchmark
290
+ new_data.benchmark_record = self.data.benchmark_record
291
+ new_data.thrust_allocator = self.data.thrust_allocator
292
+ new_data._timer = self.data._timer
293
+ new_data.force_algo = self.data.force_algo
294
+ new_data.int8_scale = self.data.int8_scale
295
+ if coords is not None:
296
+ new_data.indices = coords
297
+ new_tensor = SparseTensor(new_data, shape=torch.Size(new_shape), layout=self.layout, scale=self._scale, spatial_cache=self._spatial_cache)
298
+ return new_tensor
299
+
300
+ @staticmethod
301
+ def full(aabb, dim, value, dtype=torch.float32, device=None) -> 'SparseTensor':
302
+ N, C = dim
303
+ x = torch.arange(aabb[0], aabb[3] + 1)
304
+ y = torch.arange(aabb[1], aabb[4] + 1)
305
+ z = torch.arange(aabb[2], aabb[5] + 1)
306
+ coords = torch.stack(torch.meshgrid(x, y, z, indexing='ij'), dim=-1).reshape(-1, 3)
307
+ coords = torch.cat([
308
+ torch.arange(N).view(-1, 1).repeat(1, coords.shape[0]).view(-1, 1),
309
+ coords.repeat(N, 1),
310
+ ], dim=1).to(dtype=torch.int32, device=device)
311
+ feats = torch.full((coords.shape[0], C), value, dtype=dtype, device=device)
312
+ return SparseTensor(feats=feats, coords=coords)
313
+
314
+ def __merge_sparse_cache(self, other: 'SparseTensor') -> dict:
315
+ new_cache = {}
316
+ for k in set(list(self._spatial_cache.keys()) + list(other._spatial_cache.keys())):
317
+ if k in self._spatial_cache:
318
+ new_cache[k] = self._spatial_cache[k]
319
+ if k in other._spatial_cache:
320
+ if k not in new_cache:
321
+ new_cache[k] = other._spatial_cache[k]
322
+ else:
323
+ new_cache[k].update(other._spatial_cache[k])
324
+ return new_cache
325
+
326
+ def __neg__(self) -> 'SparseTensor':
327
+ return self.replace(-self.feats)
328
+
329
+ def __elemwise__(self, other: Union[torch.Tensor, 'SparseTensor'], op: callable) -> 'SparseTensor':
330
+ if isinstance(other, torch.Tensor):
331
+ try:
332
+ other = torch.broadcast_to(other, self.shape)
333
+ other = sparse_batch_broadcast(self, other)
334
+ except:
335
+ pass
336
+ if isinstance(other, SparseTensor):
337
+ other = other.feats
338
+ new_feats = op(self.feats, other)
339
+ new_tensor = self.replace(new_feats)
340
+ if isinstance(other, SparseTensor):
341
+ new_tensor._spatial_cache = self.__merge_sparse_cache(other)
342
+ return new_tensor
343
+
344
+ def __add__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
345
+ return self.__elemwise__(other, torch.add)
346
+
347
+ def __radd__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
348
+ return self.__elemwise__(other, torch.add)
349
+
350
+ def __sub__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
351
+ return self.__elemwise__(other, torch.sub)
352
+
353
+ def __rsub__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
354
+ return self.__elemwise__(other, lambda x, y: torch.sub(y, x))
355
+
356
+ def __mul__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
357
+ return self.__elemwise__(other, torch.mul)
358
+
359
+ def __rmul__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
360
+ return self.__elemwise__(other, torch.mul)
361
+
362
+ def __truediv__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
363
+ return self.__elemwise__(other, torch.div)
364
+
365
+ def __rtruediv__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
366
+ return self.__elemwise__(other, lambda x, y: torch.div(y, x))
367
+
368
+ def __getitem__(self, idx):
369
+ if isinstance(idx, int):
370
+ idx = [idx]
371
+ elif isinstance(idx, slice):
372
+ idx = range(*idx.indices(self.shape[0]))
373
+ elif isinstance(idx, torch.Tensor):
374
+ if idx.dtype == torch.bool:
375
+ assert idx.shape == (self.shape[0],), f"Invalid index shape: {idx.shape}"
376
+ idx = idx.nonzero().squeeze(1)
377
+ elif idx.dtype in [torch.int32, torch.int64]:
378
+ assert len(idx.shape) == 1, f"Invalid index shape: {idx.shape}"
379
+ else:
380
+ raise ValueError(f"Unknown index type: {idx.dtype}")
381
+ else:
382
+ raise ValueError(f"Unknown index type: {type(idx)}")
383
+
384
+ coords = []
385
+ feats = []
386
+ for new_idx, old_idx in enumerate(idx):
387
+ coords.append(self.coords[self.layout[old_idx]].clone())
388
+ coords[-1][:, 0] = new_idx
389
+ feats.append(self.feats[self.layout[old_idx]])
390
+ coords = torch.cat(coords, dim=0).contiguous()
391
+ feats = torch.cat(feats, dim=0).contiguous()
392
+ return SparseTensor(feats=feats, coords=coords)
393
+
394
+ def register_spatial_cache(self, key, value) -> None:
395
+ """
396
+ Register a spatial cache.
397
+ The spatial cache can be any thing you want to cache.
398
+ The registery and retrieval of the cache is based on current scale.
399
+ """
400
+ scale_key = str(self._scale)
401
+ if scale_key not in self._spatial_cache:
402
+ self._spatial_cache[scale_key] = {}
403
+ self._spatial_cache[scale_key][key] = value
404
+
405
+ def get_spatial_cache(self, key=None):
406
+ """
407
+ Get a spatial cache.
408
+ """
409
+ scale_key = str(self._scale)
410
+ cur_scale_cache = self._spatial_cache.get(scale_key, {})
411
+ if key is None:
412
+ return cur_scale_cache
413
+ return cur_scale_cache.get(key, None)
414
+
415
+
416
+ def sparse_batch_broadcast(input: SparseTensor, other: torch.Tensor) -> torch.Tensor:
417
+ """
418
+ Broadcast a 1D tensor to a sparse tensor along the batch dimension then perform an operation.
419
+
420
+ Args:
421
+ input (torch.Tensor): 1D tensor to broadcast.
422
+ target (SparseTensor): Sparse tensor to broadcast to.
423
+ op (callable): Operation to perform after broadcasting. Defaults to torch.add.
424
+ """
425
+ coords, feats = input.coords, input.feats
426
+ broadcasted = torch.zeros_like(feats)
427
+ for k in range(input.shape[0]):
428
+ broadcasted[input.layout[k]] = other[k]
429
+ return broadcasted
430
+
431
+
432
+ def sparse_batch_op(input: SparseTensor, other: torch.Tensor, op: callable = torch.add) -> SparseTensor:
433
+ """
434
+ Broadcast a 1D tensor to a sparse tensor along the batch dimension then perform an operation.
435
+
436
+ Args:
437
+ input (torch.Tensor): 1D tensor to broadcast.
438
+ target (SparseTensor): Sparse tensor to broadcast to.
439
+ op (callable): Operation to perform after broadcasting. Defaults to torch.add.
440
+ """
441
+ return input.replace(op(input.feats, sparse_batch_broadcast(input, other)))
442
+
443
+
444
+ def sparse_cat(inputs: List[SparseTensor], dim: int = 0) -> SparseTensor:
445
+ """
446
+ Concatenate a list of sparse tensors.
447
+
448
+ Args:
449
+ inputs (List[SparseTensor]): List of sparse tensors to concatenate.
450
+ """
451
+ if dim == 0:
452
+ start = 0
453
+ coords = []
454
+ for input in inputs:
455
+ coords.append(input.coords.clone())
456
+ coords[-1][:, 0] += start
457
+ start += input.shape[0]
458
+ coords = torch.cat(coords, dim=0)
459
+ feats = torch.cat([input.feats for input in inputs], dim=0)
460
+ output = SparseTensor(
461
+ coords=coords,
462
+ feats=feats,
463
+ )
464
+ else:
465
+ feats = torch.cat([input.feats for input in inputs], dim=dim)
466
+ output = inputs[0].replace(feats)
467
+
468
+ return output
469
+
470
+
471
+ def sparse_unbind(input: SparseTensor, dim: int) -> List[SparseTensor]:
472
+ """
473
+ Unbind a sparse tensor along a dimension.
474
+
475
+ Args:
476
+ input (SparseTensor): Sparse tensor to unbind.
477
+ dim (int): Dimension to unbind.
478
+ """
479
+ if dim == 0:
480
+ return [input[i] for i in range(input.shape[0])]
481
+ else:
482
+ feats = input.feats.unbind(dim)
483
+ return [input.replace(f) for f in feats]
Stable3DGen/hi3dgen/modules/sparse/conv/__init__.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) Microsoft
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # Copyright (c) [2025] [Microsoft]
24
+ # SPDX-License-Identifier: MIT
25
+ from .. import BACKEND
26
+
27
+
28
+ SPCONV_ALGO = 'auto' # 'auto', 'implicit_gemm', 'native'
29
+
30
+ def __from_env():
31
+ import os
32
+
33
+ global SPCONV_ALGO
34
+ env_spconv_algo = os.environ.get('SPCONV_ALGO')
35
+ if env_spconv_algo is not None and env_spconv_algo in ['auto', 'implicit_gemm', 'native']:
36
+ SPCONV_ALGO = env_spconv_algo
37
+ print(f"[SPARSE][CONV] spconv algo: {SPCONV_ALGO}")
38
+
39
+
40
+ __from_env()
41
+
42
+ if BACKEND == 'torchsparse':
43
+ from .conv_torchsparse import *
44
+ elif BACKEND == 'spconv':
45
+ from .conv_spconv import *
Stable3DGen/hi3dgen/modules/sparse/conv/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (649 Bytes). View file