text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
ff_output = self.ff(norm_hidden_states) ff_output = gate_mlp * ff_output hidden_states = ff_output + hidden_states # TODO(aryan): maybe following line is not required if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) return hidden_states
1,104
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
class AllegroTransformer3DModel(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = True """ A 3D Transformer model for video-like data.
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
Args: patch_size (`int`, defaults to `2`): The size of spatial patches to use in the patch embedding layer. patch_size_t (`int`, defaults to `1`): The size of temporal patches to use in the patch embedding layer. num_attention_heads (`int`, defaults to `24`): The number of heads to use for multi-head attention. attention_head_dim (`int`, defaults to `96`): The number of channels in each head. in_channels (`int`, defaults to `4`): The number of channels in the input. out_channels (`int`, *optional*, defaults to `4`): The number of channels in the output. num_layers (`int`, defaults to `32`): The number of layers of Transformer blocks to use. dropout (`float`, defaults to `0.0`): The dropout probability to use. cross_attention_dim (`int`, defaults to `2304`): The dimension of the cross attention features.
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
attention_bias (`bool`, defaults to `True`): Whether or not to use bias in the attention projection layers. sample_height (`int`, defaults to `90`): The height of the input latents. sample_width (`int`, defaults to `160`): The width of the input latents. sample_frames (`int`, defaults to `22`): The number of frames in the input latents. activation_fn (`str`, defaults to `"gelu-approximate"`): Activation function to use in feed-forward. norm_elementwise_affine (`bool`, defaults to `False`): Whether or not to use elementwise affine in normalization layers. norm_eps (`float`, defaults to `1e-6`): The epsilon value to use in normalization layers. caption_channels (`int`, defaults to `4096`): Number of channels to use for projecting the caption embeddings. interpolation_scale_h (`float`, defaults to `2.0`):
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
Scaling factor to apply in 3D positional embeddings across height dimension. interpolation_scale_w (`float`, defaults to `2.0`): Scaling factor to apply in 3D positional embeddings across width dimension. interpolation_scale_t (`float`, defaults to `2.2`): Scaling factor to apply in 3D positional embeddings across time dimension. """
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
_supports_gradient_checkpointing = True @register_to_config def __init__( self, patch_size: int = 2, patch_size_t: int = 1, num_attention_heads: int = 24, attention_head_dim: int = 96, in_channels: int = 4, out_channels: int = 4, num_layers: int = 32, dropout: float = 0.0, cross_attention_dim: int = 2304, attention_bias: bool = True, sample_height: int = 90, sample_width: int = 160, sample_frames: int = 22, activation_fn: str = "gelu-approximate", norm_elementwise_affine: bool = False, norm_eps: float = 1e-6, caption_channels: int = 4096, interpolation_scale_h: float = 2.0, interpolation_scale_w: float = 2.0, interpolation_scale_t: float = 2.2, ): super().__init__() self.inner_dim = num_attention_heads * attention_head_dim
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
interpolation_scale_t = ( interpolation_scale_t if interpolation_scale_t is not None else ((sample_frames - 1) // 16 + 1) if sample_frames % 2 == 1 else sample_frames // 16 ) interpolation_scale_h = interpolation_scale_h if interpolation_scale_h is not None else sample_height / 30 interpolation_scale_w = interpolation_scale_w if interpolation_scale_w is not None else sample_width / 40 # 1. Patch embedding self.pos_embed = PatchEmbed( height=sample_height, width=sample_width, patch_size=patch_size, in_channels=in_channels, embed_dim=self.inner_dim, pos_embed_type=None, )
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
# 2. Transformer blocks self.transformer_blocks = nn.ModuleList( [ AllegroTransformerBlock( self.inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, attention_bias=attention_bias, norm_elementwise_affine=norm_elementwise_affine, norm_eps=norm_eps, ) for _ in range(num_layers) ] ) # 3. Output projection & norm self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6) self.scale_shift_table = nn.Parameter(torch.randn(2, self.inner_dim) / self.inner_dim**0.5) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * out_channels)
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
# 4. Timestep embeddings self.adaln_single = AdaLayerNormSingle(self.inner_dim, use_additional_conditions=False) # 5. Caption projection self.caption_projection = PixArtAlphaTextProjection(in_features=caption_channels, hidden_size=self.inner_dim) self.gradient_checkpointing = False def _set_gradient_checkpointing(self, module, value=False): self.gradient_checkpointing = value def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, timestep: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, return_dict: bool = True, ): batch_size, num_channels, num_frames, height, width = hidden_states.shape p_t = self.config.patch_size_t p = self.config.patch_size
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
post_patch_num_frames = num_frames // p_t post_patch_height = height // p post_patch_width = width // p
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension. # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward. # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias. # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) attention_mask_vid, attention_mask_img = None, None if attention_mask is not None and attention_mask.ndim == 4: # assume that mask is expressed as: # (1 = keep, 0 = discard)
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
# convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) # b, frame+use_image_num, h, w -> a video with images # b, 1, h, w -> only images attention_mask = attention_mask.to(hidden_states.dtype) attention_mask = attention_mask[:, :num_frames] # [batch_size, num_frames, height, width]
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
if attention_mask.numel() > 0: attention_mask = attention_mask.unsqueeze(1) # [batch_size, 1, num_frames, height, width] attention_mask = F.max_pool3d(attention_mask, kernel_size=(p_t, p, p), stride=(p_t, p, p)) attention_mask = attention_mask.flatten(1).view(batch_size, 1, -1) attention_mask = ( (1 - attention_mask.bool().to(hidden_states.dtype)) * -10000.0 if attention_mask.numel() > 0 else None ) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: encoder_attention_mask = (1 - encoder_attention_mask.to(self.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 1. Timestep embeddings timestep, embedded_timestep = self.adaln_single( timestep, batch_size=batch_size, hidden_dtype=hidden_states.dtype )
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
# 2. Patch embeddings hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) hidden_states = self.pos_embed(hidden_states) hidden_states = hidden_states.unflatten(0, (batch_size, -1)).flatten(1, 2) encoder_hidden_states = self.caption_projection(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, encoder_hidden_states.shape[-1]) # 3. Transformer blocks for i, block in enumerate(self.transformer_blocks): # TODO(aryan): Implement gradient checkpointing if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, encoder_hidden_states, timestep, attention_mask, encoder_attention_mask, image_rotary_emb, **ckpt_kwargs, ) else: hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=timestep, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, image_rotary_emb=image_rotary_emb, )
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
# 4. Output normalization & projection shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) # Modulation hidden_states = hidden_states * (1 + scale) + shift hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.squeeze(1) # 5. Unpatchify hidden_states = hidden_states.reshape( batch_size, post_patch_num_frames, post_patch_height, post_patch_width, p_t, p, p, -1 ) hidden_states = hidden_states.permute(0, 7, 1, 4, 2, 5, 3, 6) output = hidden_states.reshape(batch_size, -1, num_frames, height, width) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
1,105
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_allegro.py
class AdaLayerNormShift(nn.Module): r""" Norm layer modified to incorporate timestep embeddings. Parameters: embedding_dim (`int`): The size of each embedding vector. num_embeddings (`int`): The size of the embeddings dictionary. """ def __init__(self, embedding_dim: int, elementwise_affine=True, eps=1e-6): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, embedding_dim) self.norm = FP32LayerNorm(embedding_dim, elementwise_affine=elementwise_affine, eps=eps) def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor: shift = self.linear(self.silu(emb.to(torch.float32)).to(emb.dtype)) x = self.norm(x) + shift.unsqueeze(dim=1) return x
1,106
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
class HunyuanDiTBlock(nn.Module): r""" Transformer block used in Hunyuan-DiT model (https://github.com/Tencent/HunyuanDiT). Allow skip connection and QKNorm
1,107
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of headsto use for multi-head attention. cross_attention_dim (`int`,*optional*): The size of the encoder_hidden_states vector for cross attention. dropout(`float`, *optional*, defaults to 0.0): The dropout probability to use. activation_fn (`str`,*optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. . norm_elementwise_affine (`bool`, *optional*, defaults to `True`): Whether to use learnable elementwise affine parameters for normalization. norm_eps (`float`, *optional*, defaults to 1e-6): A small constant added to the denominator in normalization layers to prevent division by zero. final_dropout (`bool` *optional*, defaults to False): Whether to apply a final dropout after the last feed-forward layer.
1,107
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
ff_inner_dim (`int`, *optional*): The size of the hidden layer in the feed-forward block. Defaults to `None`. ff_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the feed-forward block. skip (`bool`, *optional*, defaults to `False`): Whether to use skip connection. Defaults to `False` for down-blocks and mid-blocks. qk_norm (`bool`, *optional*, defaults to `True`): Whether to use normalization in QK calculation. Defaults to `True`. """
1,107
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
def __init__( self, dim: int, num_attention_heads: int, cross_attention_dim: int = 1024, dropout=0.0, activation_fn: str = "geglu", norm_elementwise_affine: bool = True, norm_eps: float = 1e-6, final_dropout: bool = False, ff_inner_dim: Optional[int] = None, ff_bias: bool = True, skip: bool = False, qk_norm: bool = True, ): super().__init__() # Define 3 blocks. Each block has its own normalization layer. # NOTE: when new version comes, check norm2 and norm 3 # 1. Self-Attn self.norm1 = AdaLayerNormShift(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
1,107
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
self.attn1 = Attention( query_dim=dim, cross_attention_dim=None, dim_head=dim // num_attention_heads, heads=num_attention_heads, qk_norm="layer_norm" if qk_norm else None, eps=1e-6, bias=True, processor=HunyuanAttnProcessor2_0(), ) # 2. Cross-Attn self.norm2 = FP32LayerNorm(dim, norm_eps, norm_elementwise_affine) self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim, dim_head=dim // num_attention_heads, heads=num_attention_heads, qk_norm="layer_norm" if qk_norm else None, eps=1e-6, bias=True, processor=HunyuanAttnProcessor2_0(), ) # 3. Feed-forward self.norm3 = FP32LayerNorm(dim, norm_eps, norm_elementwise_affine)
1,107
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
self.ff = FeedForward( dim, dropout=dropout, ### 0.0 activation_fn=activation_fn, ### approx GeLU final_dropout=final_dropout, ### 0.0 inner_dim=ff_inner_dim, ### int(dim * mlp_ratio) bias=ff_bias, ) # 4. Skip Connection if skip: self.skip_norm = FP32LayerNorm(2 * dim, norm_eps, elementwise_affine=True) self.skip_linear = nn.Linear(2 * dim, dim) else: self.skip_linear = None # let chunk size default to None self._chunk_size = None self._chunk_dim = 0 # Copied from diffusers.models.attention.BasicTransformerBlock.set_chunk_feed_forward def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0): # Sets chunk feed-forward self._chunk_size = chunk_size self._chunk_dim = dim
1,107
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, image_rotary_emb=None, skip=None, ) -> torch.Tensor: # Notice that normalization is always applied before the real computation in the following blocks. # 0. Long Skip Connection if self.skip_linear is not None: cat = torch.cat([hidden_states, skip], dim=-1) cat = self.skip_norm(cat) hidden_states = self.skip_linear(cat) # 1. Self-Attention norm_hidden_states = self.norm1(hidden_states, temb) ### checked: self.norm1 is correct attn_output = self.attn1( norm_hidden_states, image_rotary_emb=image_rotary_emb, ) hidden_states = hidden_states + attn_output
1,107
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
# 2. Cross-Attention hidden_states = hidden_states + self.attn2( self.norm2(hidden_states), encoder_hidden_states=encoder_hidden_states, image_rotary_emb=image_rotary_emb, ) # FFN Layer ### TODO: switch norm2 and norm3 in the state dict mlp_inputs = self.norm3(hidden_states) hidden_states = hidden_states + self.ff(mlp_inputs) return hidden_states
1,107
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
class HunyuanDiT2DModel(ModelMixin, ConfigMixin): """ HunYuanDiT: Diffusion model with a Transformer backbone. Inherit ModelMixin and ConfigMixin to be compatible with the sampler StableDiffusionPipeline of diffusers.
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). patch_size (`int`, *optional*): The size of the patch to use for the input. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. sample_size (`int`, *optional*): The width of the latent images. This is fixed during training since it is used to learn a number of position embeddings. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of dimension in the clip text embedding.
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
hidden_size (`int`, *optional*): The size of hidden layer in the conditioning embedding layers. num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. mlp_ratio (`float`, *optional*, defaults to 4.0): The ratio of the hidden layer size to the input size. learn_sigma (`bool`, *optional*, defaults to `True`): Whether to predict variance. cross_attention_dim_t5 (`int`, *optional*): The number dimensions in t5 text embedding. pooled_projection_dim (`int`, *optional*): The size of the pooled projection. text_len (`int`, *optional*): The length of the clip text embedding. text_len_t5 (`int`, *optional*): The length of the T5 text embedding. use_style_cond_and_image_meta_size (`bool`, *optional*):
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
Whether or not to use style condition and image meta size. True for version <=1.1, False for version >= 1.2 """
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
@register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, patch_size: Optional[int] = None, activation_fn: str = "gelu-approximate", sample_size=32, hidden_size=1152, num_layers: int = 28, mlp_ratio: float = 4.0, learn_sigma: bool = True, cross_attention_dim: int = 1024, norm_type: str = "layer_norm", cross_attention_dim_t5: int = 2048, pooled_projection_dim: int = 1024, text_len: int = 77, text_len_t5: int = 256, use_style_cond_and_image_meta_size: bool = True, ): super().__init__() self.out_channels = in_channels * 2 if learn_sigma else in_channels self.num_heads = num_attention_heads self.inner_dim = num_attention_heads * attention_head_dim
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
self.text_embedder = PixArtAlphaTextProjection( in_features=cross_attention_dim_t5, hidden_size=cross_attention_dim_t5 * 4, out_features=cross_attention_dim, act_fn="silu_fp32", ) self.text_embedding_padding = nn.Parameter( torch.randn(text_len + text_len_t5, cross_attention_dim, dtype=torch.float32) ) self.pos_embed = PatchEmbed( height=sample_size, width=sample_size, in_channels=in_channels, embed_dim=hidden_size, patch_size=patch_size, pos_embed_type=None, ) self.time_extra_emb = HunyuanCombinedTimestepTextSizeStyleEmbedding( hidden_size, pooled_projection_dim=pooled_projection_dim, seq_len=text_len_t5, cross_attention_dim=cross_attention_dim_t5, use_style_cond_and_image_meta_size=use_style_cond_and_image_meta_size, )
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
# HunyuanDiT Blocks self.blocks = nn.ModuleList( [ HunyuanDiTBlock( dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, activation_fn=activation_fn, ff_inner_dim=int(self.inner_dim * mlp_ratio), cross_attention_dim=cross_attention_dim, qk_norm=True, # See http://arxiv.org/abs/2302.05442 for details. skip=layer > num_layers // 2, ) for layer in range(num_layers) ] ) self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedHunyuanAttnProcessor2_0 def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True)
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
self.set_attn_processor(FusedHunyuanAttnProcessor2_0()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {}
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention.
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(HunyuanAttnProcessor2_0())
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
def forward( self, hidden_states, timestep, encoder_hidden_states=None, text_embedding_mask=None, encoder_hidden_states_t5=None, text_embedding_mask_t5=None, image_meta_size=None, style=None, image_rotary_emb=None, controlnet_block_samples=None, return_dict=True, ): """ The [`HunyuanDiT2DModel`] forward method.
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
Args: hidden_states (`torch.Tensor` of shape `(batch size, dim, height, width)`): The input tensor. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. This is the output of `BertModel`. text_embedding_mask: torch.Tensor An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output of `BertModel`. encoder_hidden_states_t5 ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. This is the output of T5 Text Encoder. text_embedding_mask_t5: torch.Tensor An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
of T5 Text Encoder. image_meta_size (torch.Tensor): Conditional embedding indicate the image sizes style: torch.Tensor: Conditional embedding indicate the style image_rotary_emb (`torch.Tensor`): The image rotary embeddings to apply on query and key tensors during attention calculation. return_dict: bool Whether to return a dictionary. """
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
height, width = hidden_states.shape[-2:] hidden_states = self.pos_embed(hidden_states) temb = self.time_extra_emb( timestep, encoder_hidden_states_t5, image_meta_size, style, hidden_dtype=timestep.dtype ) # [B, D] # text projection batch_size, sequence_length, _ = encoder_hidden_states_t5.shape encoder_hidden_states_t5 = self.text_embedder( encoder_hidden_states_t5.view(-1, encoder_hidden_states_t5.shape[-1]) ) encoder_hidden_states_t5 = encoder_hidden_states_t5.view(batch_size, sequence_length, -1) encoder_hidden_states = torch.cat([encoder_hidden_states, encoder_hidden_states_t5], dim=1) text_embedding_mask = torch.cat([text_embedding_mask, text_embedding_mask_t5], dim=-1) text_embedding_mask = text_embedding_mask.unsqueeze(2).bool() encoder_hidden_states = torch.where(text_embedding_mask, encoder_hidden_states, self.text_embedding_padding)
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
skips = [] for layer, block in enumerate(self.blocks): if layer > self.config.num_layers // 2: if controlnet_block_samples is not None: skip = skips.pop() + controlnet_block_samples.pop() else: skip = skips.pop() hidden_states = block( hidden_states, temb=temb, encoder_hidden_states=encoder_hidden_states, image_rotary_emb=image_rotary_emb, skip=skip, ) # (N, L, D) else: hidden_states = block( hidden_states, temb=temb, encoder_hidden_states=encoder_hidden_states, image_rotary_emb=image_rotary_emb, ) # (N, L, D) if layer < (self.config.num_layers // 2 - 1): skips.append(hidden_states)
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
if controlnet_block_samples is not None and len(controlnet_block_samples) != 0: raise ValueError("The number of controls is not equal to the number of skip connections.") # final layer hidden_states = self.norm_out(hidden_states, temb.to(torch.float32)) hidden_states = self.proj_out(hidden_states) # (N, L, patch_size ** 2 * out_channels) # unpatchify: (N, out_channels, H, W) patch_size = self.pos_embed.patch_size height = height // patch_size width = width // patch_size
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
hidden_states = hidden_states.reshape( shape=(hidden_states.shape[0], height, width, patch_size, patch_size, self.out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(hidden_states.shape[0], self.out_channels, height * patch_size, width * patch_size) ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None: """ Sets the attention processor to use [feed forward chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers).
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
Parameters: chunk_size (`int`, *optional*): The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually over each tensor of dim=`dim`. dim (`int`, *optional*, defaults to `0`): The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch) or dim=1 (sequence length). """ if dim not in [0, 1]: raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}") # By default chunk size is 1 chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, "set_chunk_feed_forward"): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim)
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.disable_forward_chunking def disable_forward_chunking(self): def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, "set_chunk_feed_forward"): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, None, 0)
1,108
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/hunyuan_transformer_2d.py
class LuminaNextDiTBlock(nn.Module): """ A LuminaNextDiTBlock for LuminaNextDiT2DModel. Parameters: dim (`int`): Embedding dimension of the input features. num_attention_heads (`int`): Number of attention heads. num_kv_heads (`int`): Number of attention heads in key and value features (if using GQA), or set to None for the same as query. multiple_of (`int`): The number of multiple of ffn layer. ffn_dim_multiplier (`float`): The multipier factor of ffn layer dimension. norm_eps (`float`): The eps for norm layer. qk_norm (`bool`): normalization for query and key. cross_attention_dim (`int`): Cross attention embedding dimension of the input text prompt hidden_states. norm_elementwise_affine (`bool`, *optional*, defaults to True), """
1,109
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
def __init__( self, dim: int, num_attention_heads: int, num_kv_heads: int, multiple_of: int, ffn_dim_multiplier: float, norm_eps: float, qk_norm: bool, cross_attention_dim: int, norm_elementwise_affine: bool = True, ) -> None: super().__init__() self.head_dim = dim // num_attention_heads self.gate = nn.Parameter(torch.zeros([num_attention_heads])) # Self-attention self.attn1 = Attention( query_dim=dim, cross_attention_dim=None, dim_head=dim // num_attention_heads, qk_norm="layer_norm_across_heads" if qk_norm else None, heads=num_attention_heads, kv_heads=num_kv_heads, eps=1e-5, bias=False, out_bias=False, processor=LuminaAttnProcessor2_0(), ) self.attn1.to_out = nn.Identity()
1,109
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
# Cross-attention self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim, dim_head=dim // num_attention_heads, qk_norm="layer_norm_across_heads" if qk_norm else None, heads=num_attention_heads, kv_heads=num_kv_heads, eps=1e-5, bias=False, out_bias=False, processor=LuminaAttnProcessor2_0(), ) self.feed_forward = LuminaFeedForward( dim=dim, inner_dim=4 * dim, multiple_of=multiple_of, ffn_dim_multiplier=ffn_dim_multiplier, ) self.norm1 = LuminaRMSNormZero( embedding_dim=dim, norm_eps=norm_eps, norm_elementwise_affine=norm_elementwise_affine, ) self.ffn_norm1 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine)
1,109
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
self.norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) self.ffn_norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) self.norm1_context = RMSNorm(cross_attention_dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, image_rotary_emb: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_mask: torch.Tensor, temb: torch.Tensor, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ): """ Perform a forward pass through the LuminaNextDiTBlock.
1,109
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
Parameters: hidden_states (`torch.Tensor`): The input of hidden_states for LuminaNextDiTBlock. attention_mask (`torch.Tensor): The input of hidden_states corresponse attention mask. image_rotary_emb (`torch.Tensor`): Precomputed cosine and sine frequencies. encoder_hidden_states: (`torch.Tensor`): The hidden_states of text prompt are processed by Gemma encoder. encoder_mask (`torch.Tensor`): The hidden_states of text prompt attention mask. temb (`torch.Tensor`): Timestep embedding with text prompt embedding. cross_attention_kwargs (`Dict[str, Any]`): kwargs for cross attention. """ residual = hidden_states
1,109
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
# Self-attention norm_hidden_states, gate_msa, scale_mlp, gate_mlp = self.norm1(hidden_states, temb) self_attn_output = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=norm_hidden_states, attention_mask=attention_mask, query_rotary_emb=image_rotary_emb, key_rotary_emb=image_rotary_emb, **cross_attention_kwargs, )
1,109
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
# Cross-attention norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states) cross_attn_output = self.attn2( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, attention_mask=encoder_mask, query_rotary_emb=image_rotary_emb, key_rotary_emb=None, **cross_attention_kwargs, ) cross_attn_output = cross_attn_output * self.gate.tanh().view(1, 1, -1, 1) mixed_attn_output = self_attn_output + cross_attn_output mixed_attn_output = mixed_attn_output.flatten(-2) # linear proj hidden_states = self.attn2.to_out[0](mixed_attn_output) hidden_states = residual + gate_msa.unsqueeze(1).tanh() * self.norm2(hidden_states) mlp_output = self.feed_forward(self.ffn_norm1(hidden_states) * (1 + scale_mlp.unsqueeze(1))) hidden_states = hidden_states + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2(mlp_output)
1,109
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
return hidden_states
1,109
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
class LuminaNextDiT2DModel(ModelMixin, ConfigMixin): """ LuminaNextDiT: Diffusion model with a Transformer backbone. Inherit ModelMixin and ConfigMixin to be compatible with the sampler StableDiffusionPipeline of diffusers.
1,110
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
Parameters: sample_size (`int`): The width of the latent images. This is fixed during training since it is used to learn a number of position embeddings. patch_size (`int`, *optional*, (`int`, *optional*, defaults to 2): The size of each patch in the image. This parameter defines the resolution of patches fed into the model. in_channels (`int`, *optional*, defaults to 4): The number of input channels for the model. Typically, this matches the number of channels in the input images. hidden_size (`int`, *optional*, defaults to 4096): The dimensionality of the hidden layers in the model. This parameter determines the width of the model's hidden representations. num_layers (`int`, *optional*, default to 32): The number of layers in the model. This defines the depth of the neural network. num_attention_heads (`int`, *optional*, defaults to 32):
1,110
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
The number of attention heads in each attention layer. This parameter specifies how many separate attention mechanisms are used. num_kv_heads (`int`, *optional*, defaults to 8): The number of key-value heads in the attention mechanism, if different from the number of attention heads. If None, it defaults to num_attention_heads. multiple_of (`int`, *optional*, defaults to 256): A factor that the hidden size should be a multiple of. This can help optimize certain hardware configurations. ffn_dim_multiplier (`float`, *optional*): A multiplier for the dimensionality of the feed-forward network. If None, it uses a default value based on the model configuration. norm_eps (`float`, *optional*, defaults to 1e-5): A small value added to the denominator for numerical stability in normalization layers. learn_sigma (`bool`, *optional*, defaults to True):
1,110
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
Whether the model should learn the sigma parameter, which might be related to uncertainty or variance in predictions. qk_norm (`bool`, *optional*, defaults to True): Indicates if the queries and keys in the attention mechanism should be normalized. cross_attention_dim (`int`, *optional*, defaults to 2048): The dimensionality of the text embeddings. This parameter defines the size of the text representations used in the model. scaling_factor (`float`, *optional*, defaults to 1.0): A scaling factor applied to certain parameters or layers in the model. This can be used for adjusting the overall scale of the model's operations. """
1,110
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
@register_to_config def __init__( self, sample_size: int = 128, patch_size: Optional[int] = 2, in_channels: Optional[int] = 4, hidden_size: Optional[int] = 2304, num_layers: Optional[int] = 32, num_attention_heads: Optional[int] = 32, num_kv_heads: Optional[int] = None, multiple_of: Optional[int] = 256, ffn_dim_multiplier: Optional[float] = None, norm_eps: Optional[float] = 1e-5, learn_sigma: Optional[bool] = True, qk_norm: Optional[bool] = True, cross_attention_dim: Optional[int] = 2048, scaling_factor: Optional[float] = 1.0, ) -> None: super().__init__() self.sample_size = sample_size self.patch_size = patch_size self.in_channels = in_channels self.out_channels = in_channels * 2 if learn_sigma else in_channels self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads
1,110
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
self.head_dim = hidden_size // num_attention_heads self.scaling_factor = scaling_factor
1,110
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
self.patch_embedder = LuminaPatchEmbed( patch_size=patch_size, in_channels=in_channels, embed_dim=hidden_size, bias=True ) self.pad_token = nn.Parameter(torch.empty(hidden_size)) self.time_caption_embed = LuminaCombinedTimestepCaptionEmbedding( hidden_size=min(hidden_size, 1024), cross_attention_dim=cross_attention_dim )
1,110
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
self.layers = nn.ModuleList( [ LuminaNextDiTBlock( hidden_size, num_attention_heads, num_kv_heads, multiple_of, ffn_dim_multiplier, norm_eps, qk_norm, cross_attention_dim, ) for _ in range(num_layers) ] ) self.norm_out = LuminaLayerNormContinuous( embedding_dim=hidden_size, conditioning_embedding_dim=min(hidden_size, 1024), elementwise_affine=False, eps=1e-6, bias=True, out_dim=patch_size * patch_size * self.out_channels, ) # self.final_layer = LuminaFinalLayer(hidden_size, patch_size, self.out_channels) assert (hidden_size // num_attention_heads) % 4 == 0, "2d rope needs head dim to be divisible by 4"
1,110
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
def forward( self, hidden_states: torch.Tensor, timestep: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_mask: torch.Tensor, image_rotary_emb: torch.Tensor, cross_attention_kwargs: Dict[str, Any] = None, return_dict=True, ) -> torch.Tensor: """ Forward pass of LuminaNextDiT. Parameters: hidden_states (torch.Tensor): Input tensor of shape (N, C, H, W). timestep (torch.Tensor): Tensor of diffusion timesteps of shape (N,). encoder_hidden_states (torch.Tensor): Tensor of caption features of shape (N, D). encoder_mask (torch.Tensor): Tensor of caption masks of shape (N, L). """ hidden_states, mask, img_size, image_rotary_emb = self.patch_embedder(hidden_states, image_rotary_emb) image_rotary_emb = image_rotary_emb.to(hidden_states.device) temb = self.time_caption_embed(timestep, encoder_hidden_states, encoder_mask)
1,110
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
encoder_mask = encoder_mask.bool() for layer in self.layers: hidden_states = layer( hidden_states, mask, image_rotary_emb, encoder_hidden_states, encoder_mask, temb=temb, cross_attention_kwargs=cross_attention_kwargs, ) hidden_states = self.norm_out(hidden_states, temb) # unpatchify height_tokens = width_tokens = self.patch_size height, width = img_size[0] batch_size = hidden_states.size(0) sequence_length = (height // height_tokens) * (width // width_tokens) hidden_states = hidden_states[:, :sequence_length].view( batch_size, height // height_tokens, width // width_tokens, height_tokens, width_tokens, self.out_channels ) output = hidden_states.permute(0, 5, 1, 3, 2, 4).flatten(4, 5).flatten(2, 3) if not return_dict: return (output,)
1,110
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
return Transformer2DModelOutput(sample=output)
1,110
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py
class DiTTransformer2DModel(ModelMixin, ConfigMixin): r""" A 2D Transformer model as introduced in DiT (https://arxiv.org/abs/2212.09748).
1,111
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dit_transformer_2d.py
Parameters: num_attention_heads (int, optional, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (int, optional, defaults to 72): The number of channels in each head. in_channels (int, defaults to 4): The number of channels in the input. out_channels (int, optional): The number of channels in the output. Specify this parameter if the output channel number differs from the input. num_layers (int, optional, defaults to 28): The number of layers of Transformer blocks to use. dropout (float, optional, defaults to 0.0): The dropout probability to use within the Transformer blocks. norm_num_groups (int, optional, defaults to 32): Number of groups for group normalization within Transformer blocks. attention_bias (bool, optional, defaults to True): Configure if the Transformer blocks' attention should contain a bias parameter.
1,111
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dit_transformer_2d.py
sample_size (int, defaults to 32): The width of the latent images. This parameter is fixed during training. patch_size (int, defaults to 2): Size of the patches the model processes, relevant for architectures working on non-sequential data. activation_fn (str, optional, defaults to "gelu-approximate"): Activation function to use in feed-forward networks within Transformer blocks. num_embeds_ada_norm (int, optional, defaults to 1000): Number of embeddings for AdaLayerNorm, fixed during training and affects the maximum denoising steps during inference. upcast_attention (bool, optional, defaults to False): If true, upcasts the attention mechanism dimensions for potentially improved performance. norm_type (str, optional, defaults to "ada_norm_zero"): Specifies the type of normalization used, can be 'ada_norm_zero'. norm_elementwise_affine (bool, optional, defaults to False):
1,111
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dit_transformer_2d.py
If true, enables element-wise affine parameters in the normalization layers. norm_eps (float, optional, defaults to 1e-5): A small constant added to the denominator in normalization layers to prevent division by zero. """
1,111
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dit_transformer_2d.py
_supports_gradient_checkpointing = True @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 72, in_channels: int = 4, out_channels: Optional[int] = None, num_layers: int = 28, dropout: float = 0.0, norm_num_groups: int = 32, attention_bias: bool = True, sample_size: int = 32, patch_size: int = 2, activation_fn: str = "gelu-approximate", num_embeds_ada_norm: Optional[int] = 1000, upcast_attention: bool = False, norm_type: str = "ada_norm_zero", norm_elementwise_affine: bool = False, norm_eps: float = 1e-5, ): super().__init__()
1,111
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dit_transformer_2d.py
# Validate inputs. if norm_type != "ada_norm_zero": raise NotImplementedError( f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'." ) elif norm_type == "ada_norm_zero" and num_embeds_ada_norm is None: raise ValueError( f"When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None." ) # Set some common variables used across the board. self.attention_head_dim = attention_head_dim self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.out_channels = in_channels if out_channels is None else out_channels self.gradient_checkpointing = False # 2. Initialize the position embedding and transformer blocks. self.height = self.config.sample_size self.width = self.config.sample_size
1,111
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dit_transformer_2d.py
self.patch_size = self.config.patch_size self.pos_embed = PatchEmbed( height=self.config.sample_size, width=self.config.sample_size, patch_size=self.config.patch_size, in_channels=self.config.in_channels, embed_dim=self.inner_dim, )
1,111
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dit_transformer_2d.py
self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, ) for _ in range(self.config.num_layers) ] )
1,111
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dit_transformer_2d.py
# 3. Output blocks. self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out_1 = nn.Linear(self.inner_dim, 2 * self.inner_dim) self.proj_out_2 = nn.Linear( self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels ) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value def forward( self, hidden_states: torch.Tensor, timestep: Optional[torch.LongTensor] = None, class_labels: Optional[torch.LongTensor] = None, cross_attention_kwargs: Dict[str, Any] = None, return_dict: bool = True, ): """ The [`DiTTransformer2DModel`] forward method.
1,111
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dit_transformer_2d.py
Args: hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous): Input `hidden_states`. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in `AdaLayerZeroNorm`. cross_attention_kwargs ( `Dict[str, Any]`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in
1,111
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dit_transformer_2d.py
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
1,111
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dit_transformer_2d.py
Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ # 1. Input height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size hidden_states = self.pos_embed(hidden_states) # 2. Blocks for block in self.transformer_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward
1,111
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dit_transformer_2d.py
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, None, None, None, timestep, cross_attention_kwargs, class_labels, **ckpt_kwargs, ) else: hidden_states = block( hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, )
1,111
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dit_transformer_2d.py
# 3. Output conditioning = self.transformer_blocks[0].norm1.emb(timestep, class_labels, hidden_dtype=hidden_states.dtype) shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None] hidden_states = self.proj_out_2(hidden_states) # unpatchify height = width = int(hidden_states.shape[1] ** 0.5) hidden_states = hidden_states.reshape( shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
1,111
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dit_transformer_2d.py
class DualTransformer2DModel(nn.Module): """ Dual transformer wrapper that combines two `Transformer2DModel`s for mixed inference.
1,112
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dual_transformer_2d.py
Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): Pass if the input is continuous. The number of channels in the input and output. num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.1): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. Note that this is fixed at training time as it is used for learning a number of position embeddings. See `ImagePositionalEmbeddings`. num_vector_embeds (`int`, *optional*):
1,112
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dual_transformer_2d.py
Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked latent pixel. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. The number of diffusion steps used during training. Note that this is fixed at training time as it is used to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for up to but not more than steps than `num_embeds_ada_norm`. attention_bias (`bool`, *optional*): Configure if the TransformerBlocks' attention should contain a bias parameter. """
1,112
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dual_transformer_2d.py
def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, num_vector_embeds: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, ): super().__init__() self.transformers = nn.ModuleList( [ Transformer2DModel( num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, in_channels=in_channels, num_layers=num_layers, dropout=dropout, norm_num_groups=norm_num_groups, cross_attention_dim=cross_attention_dim,
1,112
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dual_transformer_2d.py
attention_bias=attention_bias, sample_size=sample_size, num_vector_embeds=num_vector_embeds, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, ) for _ in range(2) ] )
1,112
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dual_transformer_2d.py
# Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference self.mix_ratio = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` self.condition_lengths = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` self.transformer_index_for_condition = [1, 0]
1,112
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dual_transformer_2d.py
def forward( self, hidden_states, encoder_hidden_states, timestep=None, attention_mask=None, cross_attention_kwargs=None, return_dict: bool = True, ): """ Args: hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`. When continuous, `torch.Tensor` of shape `(batch size, channel, height, width)`): Input hidden_states. encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep ( `torch.long`, *optional*): Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step. attention_mask (`torch.Tensor`, *optional*): Optional attention mask to be applied in Attention.
1,112
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dual_transformer_2d.py
cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
1,112
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dual_transformer_2d.py
Returns: [`~models.transformers.transformer_2d.Transformer2DModelOutput`] or `tuple`: [`~models.transformers.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ input_states = hidden_states
1,112
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dual_transformer_2d.py
encoded_states = [] tokens_start = 0 # attention_mask is not used yet for i in range(2): # for each of the two transformers, pass the corresponding condition tokens condition_state = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] transformer_index = self.transformer_index_for_condition[i] encoded_state = self.transformers[transformer_index]( input_states, encoder_hidden_states=condition_state, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] encoded_states.append(encoded_state - input_states) tokens_start += self.condition_lengths[i] output_states = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) output_states = output_states + input_states
1,112
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dual_transformer_2d.py
if not return_dict: return (output_states,) return Transformer2DModelOutput(sample=output_states)
1,112
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/dual_transformer_2d.py
class TransformerTemporalModelOutput(BaseOutput): """ The output of [`TransformerTemporalModel`]. Args: sample (`torch.Tensor` of shape `(batch_size x num_frames, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. """ sample: torch.Tensor
1,113
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_temporal.py
class TransformerTemporalModel(ModelMixin, ConfigMixin): """ A Transformer model for video-like data.
1,114
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_temporal.py
Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. attention_bias (`bool`, *optional*): Configure if the `TransformerBlock` attention should contain a bias parameter. sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).
1,114
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_temporal.py
This is fixed during training since it is used to learn a number of position embeddings. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. See `diffusers.models.activations.get_activation` for supported activation functions. norm_elementwise_affine (`bool`, *optional*): Configure if the `TransformerBlock` should use learnable elementwise affine parameters for normalization. double_self_attention (`bool`, *optional*): Configure if each `TransformerBlock` should contain two self-attention layers. positional_embeddings: (`str`, *optional*): The type of positional embeddings to apply to the sequence input before passing use. num_positional_embeddings: (`int`, *optional*): The maximum length of the sequence over which to apply positional embeddings. """
1,114
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_temporal.py
@register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, activation_fn: str = "geglu", norm_elementwise_affine: bool = True, double_self_attention: bool = True, positional_embeddings: Optional[str] = None, num_positional_embeddings: Optional[int] = None, ): super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim self.in_channels = in_channels
1,114
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_temporal.py
self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) self.proj_in = nn.Linear(in_channels, inner_dim) # 3. Define transformers blocks self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, attention_bias=attention_bias, double_self_attention=double_self_attention, norm_elementwise_affine=norm_elementwise_affine, positional_embeddings=positional_embeddings, num_positional_embeddings=num_positional_embeddings, ) for d in range(num_layers) ] )
1,114
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_temporal.py
self.proj_out = nn.Linear(inner_dim, in_channels) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.LongTensor] = None, timestep: Optional[torch.LongTensor] = None, class_labels: torch.LongTensor = None, num_frames: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> TransformerTemporalModelOutput: """ The [`TransformerTemporal`] forward method.
1,114
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_temporal.py
Args: hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.Tensor` of shape `(batch size, channel, height, width)` if continuous): Input hidden_states. encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in `AdaLayerZeroNorm`. num_frames (`int`, *optional*, defaults to 1):
1,114
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_temporal.py
The number of frames to be processed per batch. This is used to reshape the hidden states. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] instead of a plain tuple.
1,114
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_temporal.py
Returns: [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`: If `return_dict` is True, an [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ # 1. Input batch_frames, channel, height, width = hidden_states.shape batch_size = batch_frames // num_frames residual = hidden_states hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width) hidden_states = hidden_states.permute(0, 2, 1, 3, 4) hidden_states = self.norm(hidden_states) hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel) hidden_states = self.proj_in(hidden_states)
1,114
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_temporal.py