text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
class PixArtTransformer2DModel(ModelMixin, ConfigMixin): r""" A 2D Transformer model as introduced in PixArt family of models (https://arxiv.org/abs/2310.00426, https://arxiv.org/abs/2403.04692).
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
Parameters: num_attention_heads (int, optional, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (int, optional, defaults to 72): The number of channels in each head. in_channels (int, defaults to 4): The number of channels in the input. out_channels (int, optional): The number of channels in the output. Specify this parameter if the output channel number differs from the input. num_layers (int, optional, defaults to 28): The number of layers of Transformer blocks to use. dropout (float, optional, defaults to 0.0): The dropout probability to use within the Transformer blocks. norm_num_groups (int, optional, defaults to 32): Number of groups for group normalization within Transformer blocks. cross_attention_dim (int, optional): The dimensionality for cross-attention layers, typically matching the encoder's hidden dimension.
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
attention_bias (bool, optional, defaults to True): Configure if the Transformer blocks' attention should contain a bias parameter. sample_size (int, defaults to 128): The width of the latent images. This parameter is fixed during training. patch_size (int, defaults to 2): Size of the patches the model processes, relevant for architectures working on non-sequential data. activation_fn (str, optional, defaults to "gelu-approximate"): Activation function to use in feed-forward networks within Transformer blocks. num_embeds_ada_norm (int, optional, defaults to 1000): Number of embeddings for AdaLayerNorm, fixed during training and affects the maximum denoising steps during inference. upcast_attention (bool, optional, defaults to False): If true, upcasts the attention mechanism dimensions for potentially improved performance.
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
norm_type (str, optional, defaults to "ada_norm_zero"): Specifies the type of normalization used, can be 'ada_norm_zero'. norm_elementwise_affine (bool, optional, defaults to False): If true, enables element-wise affine parameters in the normalization layers. norm_eps (float, optional, defaults to 1e-6): A small constant added to the denominator in normalization layers to prevent division by zero. interpolation_scale (int, optional): Scale factor to use during interpolating the position embeddings. use_additional_conditions (bool, optional): If we're using additional conditions as inputs. attention_type (str, optional, defaults to "default"): Kind of attention mechanism to be used. caption_channels (int, optional, defaults to None): Number of channels to use for projecting the caption embeddings. use_linear_projection (bool, optional, defaults to False):
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
Deprecated argument. Will be removed in a future version. num_vector_embeds (bool, optional, defaults to False): Deprecated argument. Will be removed in a future version. """
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
_supports_gradient_checkpointing = True _no_split_modules = ["BasicTransformerBlock", "PatchEmbed"]
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
@register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 72, in_channels: int = 4, out_channels: Optional[int] = 8, num_layers: int = 28, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = 1152, attention_bias: bool = True, sample_size: int = 128, patch_size: int = 2, activation_fn: str = "gelu-approximate", num_embeds_ada_norm: Optional[int] = 1000, upcast_attention: bool = False, norm_type: str = "ada_norm_single", norm_elementwise_affine: bool = False, norm_eps: float = 1e-6, interpolation_scale: Optional[int] = None, use_additional_conditions: Optional[bool] = None, caption_channels: Optional[int] = None, attention_type: Optional[str] = "default", ): super().__init__()
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
# Validate inputs. if norm_type != "ada_norm_single": raise NotImplementedError( f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'." ) elif norm_type == "ada_norm_single" and num_embeds_ada_norm is None: raise ValueError( f"When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None." )
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
# Set some common variables used across the board. self.attention_head_dim = attention_head_dim self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.out_channels = in_channels if out_channels is None else out_channels if use_additional_conditions is None: if sample_size == 128: use_additional_conditions = True else: use_additional_conditions = False self.use_additional_conditions = use_additional_conditions self.gradient_checkpointing = False # 2. Initialize the position embedding and transformer blocks. self.height = self.config.sample_size self.width = self.config.sample_size
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
interpolation_scale = ( self.config.interpolation_scale if self.config.interpolation_scale is not None else max(self.config.sample_size // 64, 1) ) self.pos_embed = PatchEmbed( height=self.config.sample_size, width=self.config.sample_size, patch_size=self.config.patch_size, in_channels=self.config.in_channels, embed_dim=self.inner_dim, interpolation_scale=interpolation_scale, )
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type, ) for _ in range(self.config.num_layers) ] )
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
# 3. Output blocks. self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6) self.scale_shift_table = nn.Parameter(torch.randn(2, self.inner_dim) / self.inner_dim**0.5) self.proj_out = nn.Linear(self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels) self.adaln_single = AdaLayerNormSingle( self.inner_dim, use_additional_conditions=self.use_additional_conditions ) self.caption_projection = None if self.config.caption_channels is not None: self.caption_projection = PixArtAlphaTextProjection( in_features=self.config.caption_channels, hidden_size=self.inner_dim ) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
@property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys())
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor)
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. Safe to just use `AttnProcessor()` as PixArt doesn't have any exotic attention processors in default model. """ self.set_attn_processor(AttnProcessor()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAttnProcessor2_0()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors)
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, timestep: Optional[torch.LongTensor] = None, added_cond_kwargs: Dict[str, torch.Tensor] = None, cross_attention_kwargs: Dict[str, Any] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ): """ The [`PixArtTransformer2DModel`] forward method.
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
Args: hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input `hidden_states`. encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep (`torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. added_cond_kwargs: (`Dict[str, Any]`, *optional*): Additional conditions to be used as inputs. cross_attention_kwargs ( `Dict[str, Any]`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). attention_mask ( `torch.Tensor`, *optional*): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. encoder_attention_mask ( `torch.Tensor`, *optional*): Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
* Mask `(batch, sequence_length)` True = keep, False = discard. * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard. If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format above. This bias will be added to the cross-attention scores. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ if self.use_additional_conditions and added_cond_kwargs is None: raise ValueError("`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`.")
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension. # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward. # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias. # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None and attention_mask.ndim == 2: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores:
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
# (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1)
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
# convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 1. Input batch_size = hidden_states.shape[0] height, width = ( hidden_states.shape[-2] // self.config.patch_size, hidden_states.shape[-1] // self.config.patch_size, ) hidden_states = self.pos_embed(hidden_states) timestep, embedded_timestep = self.adaln_single( timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype )
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
if self.caption_projection is not None: encoder_hidden_states = self.caption_projection(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1]) # 2. Blocks for block in self.transformer_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, timestep, cross_attention_kwargs, None, **ckpt_kwargs, ) else: hidden_states = block( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=None, )
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
# 3. Output shift, scale = ( self.scale_shift_table[None] + embedded_timestep[:, None].to(self.scale_shift_table.device) ).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) # Modulation hidden_states = hidden_states * (1 + scale.to(hidden_states.device)) + shift.to(hidden_states.device) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.squeeze(1) # unpatchify hidden_states = hidden_states.reshape( shape=(-1, height, width, self.config.patch_size, self.config.patch_size, self.out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(-1, self.out_channels, height * self.config.patch_size, width * self.config.patch_size) ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
1,144
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py
class HunyuanVideoAttnProcessor2_0: def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "HunyuanVideoAttnProcessor2_0 requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> torch.Tensor: if attn.add_q_proj is None and encoder_hidden_states is not None: hidden_states = torch.cat([hidden_states, encoder_hidden_states], dim=1) # 1. QKV projections query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states)
1,145
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
query = query.unflatten(2, (attn.heads, -1)).transpose(1, 2) key = key.unflatten(2, (attn.heads, -1)).transpose(1, 2) value = value.unflatten(2, (attn.heads, -1)).transpose(1, 2) # 2. QK normalization if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # 3. Rotational positional embeddings applied to latent stream if image_rotary_emb is not None: from ..embeddings import apply_rotary_emb
1,145
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
if attn.add_q_proj is None and encoder_hidden_states is not None: query = torch.cat( [ apply_rotary_emb(query[:, :, : -encoder_hidden_states.shape[1]], image_rotary_emb), query[:, :, -encoder_hidden_states.shape[1] :], ], dim=2, ) key = torch.cat( [ apply_rotary_emb(key[:, :, : -encoder_hidden_states.shape[1]], image_rotary_emb), key[:, :, -encoder_hidden_states.shape[1] :], ], dim=2, ) else: query = apply_rotary_emb(query, image_rotary_emb) key = apply_rotary_emb(key, image_rotary_emb)
1,145
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
# 4. Encoder condition QKV projection and normalization if attn.add_q_proj is not None and encoder_hidden_states is not None: encoder_query = attn.add_q_proj(encoder_hidden_states) encoder_key = attn.add_k_proj(encoder_hidden_states) encoder_value = attn.add_v_proj(encoder_hidden_states) encoder_query = encoder_query.unflatten(2, (attn.heads, -1)).transpose(1, 2) encoder_key = encoder_key.unflatten(2, (attn.heads, -1)).transpose(1, 2) encoder_value = encoder_value.unflatten(2, (attn.heads, -1)).transpose(1, 2) if attn.norm_added_q is not None: encoder_query = attn.norm_added_q(encoder_query) if attn.norm_added_k is not None: encoder_key = attn.norm_added_k(encoder_key) query = torch.cat([query, encoder_query], dim=2) key = torch.cat([key, encoder_key], dim=2) value = torch.cat([value, encoder_value], dim=2)
1,145
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
# 5. Attention hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).flatten(2, 3) hidden_states = hidden_states.to(query.dtype) # 6. Output projection if encoder_hidden_states is not None: hidden_states, encoder_hidden_states = ( hidden_states[:, : -encoder_hidden_states.shape[1]], hidden_states[:, -encoder_hidden_states.shape[1] :], ) if getattr(attn, "to_out", None) is not None: hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if getattr(attn, "to_add_out", None) is not None: encoder_hidden_states = attn.to_add_out(encoder_hidden_states) return hidden_states, encoder_hidden_states
1,145
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
class HunyuanVideoPatchEmbed(nn.Module): def __init__( self, patch_size: Union[int, Tuple[int, int, int]] = 16, in_chans: int = 3, embed_dim: int = 768, ) -> None: super().__init__() patch_size = (patch_size, patch_size, patch_size) if isinstance(patch_size, int) else patch_size self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.proj(hidden_states) hidden_states = hidden_states.flatten(2).transpose(1, 2) # BCFHW -> BNC return hidden_states
1,146
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
class HunyuanVideoAdaNorm(nn.Module): def __init__(self, in_features: int, out_features: Optional[int] = None) -> None: super().__init__() out_features = out_features or 2 * in_features self.linear = nn.Linear(in_features, out_features) self.nonlinearity = nn.SiLU() def forward( self, temb: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: temb = self.linear(self.nonlinearity(temb)) gate_msa, gate_mlp = temb.chunk(2, dim=1) gate_msa, gate_mlp = gate_msa.unsqueeze(1), gate_mlp.unsqueeze(1) return gate_msa, gate_mlp
1,147
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
class HunyuanVideoIndividualTokenRefinerBlock(nn.Module): def __init__( self, num_attention_heads: int, attention_head_dim: int, mlp_width_ratio: str = 4.0, mlp_drop_rate: float = 0.0, attention_bias: bool = True, ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6) self.attn = Attention( query_dim=hidden_size, cross_attention_dim=None, heads=num_attention_heads, dim_head=attention_head_dim, bias=attention_bias, ) self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6) self.ff = FeedForward(hidden_size, mult=mlp_width_ratio, activation_fn="linear-silu", dropout=mlp_drop_rate) self.norm_out = HunyuanVideoAdaNorm(hidden_size, 2 * hidden_size)
1,148
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
def forward( self, hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=None, attention_mask=attention_mask, ) gate_msa, gate_mlp = self.norm_out(temb) hidden_states = hidden_states + attn_output * gate_msa ff_output = self.ff(self.norm2(hidden_states)) hidden_states = hidden_states + ff_output * gate_mlp return hidden_states
1,148
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
class HunyuanVideoIndividualTokenRefiner(nn.Module): def __init__( self, num_attention_heads: int, attention_head_dim: int, num_layers: int, mlp_width_ratio: float = 4.0, mlp_drop_rate: float = 0.0, attention_bias: bool = True, ) -> None: super().__init__() self.refiner_blocks = nn.ModuleList( [ HunyuanVideoIndividualTokenRefinerBlock( num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, mlp_width_ratio=mlp_width_ratio, mlp_drop_rate=mlp_drop_rate, attention_bias=attention_bias, ) for _ in range(num_layers) ] )
1,149
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
def forward( self, hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, ) -> None: self_attn_mask = None if attention_mask is not None: batch_size = attention_mask.shape[0] seq_len = attention_mask.shape[1] attention_mask = attention_mask.to(hidden_states.device).bool() self_attn_mask_1 = attention_mask.view(batch_size, 1, 1, seq_len).repeat(1, 1, seq_len, 1) self_attn_mask_2 = self_attn_mask_1.transpose(2, 3) self_attn_mask = (self_attn_mask_1 & self_attn_mask_2).bool() self_attn_mask[:, :, :, 0] = True for block in self.refiner_blocks: hidden_states = block(hidden_states, temb, self_attn_mask) return hidden_states
1,149
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
class HunyuanVideoTokenRefiner(nn.Module): def __init__( self, in_channels: int, num_attention_heads: int, attention_head_dim: int, num_layers: int, mlp_ratio: float = 4.0, mlp_drop_rate: float = 0.0, attention_bias: bool = True, ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim self.time_text_embed = CombinedTimestepTextProjEmbeddings( embedding_dim=hidden_size, pooled_projection_dim=in_channels ) self.proj_in = nn.Linear(in_channels, hidden_size, bias=True) self.token_refiner = HunyuanVideoIndividualTokenRefiner( num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, num_layers=num_layers, mlp_width_ratio=mlp_ratio, mlp_drop_rate=mlp_drop_rate, attention_bias=attention_bias, )
1,150
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
def forward( self, hidden_states: torch.Tensor, timestep: torch.LongTensor, attention_mask: Optional[torch.LongTensor] = None, ) -> torch.Tensor: if attention_mask is None: pooled_projections = hidden_states.mean(dim=1) else: original_dtype = hidden_states.dtype mask_float = attention_mask.float().unsqueeze(-1) pooled_projections = (hidden_states * mask_float).sum(dim=1) / mask_float.sum(dim=1) pooled_projections = pooled_projections.to(original_dtype) temb = self.time_text_embed(timestep, pooled_projections) hidden_states = self.proj_in(hidden_states) hidden_states = self.token_refiner(hidden_states, temb, attention_mask) return hidden_states
1,150
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
class HunyuanVideoRotaryPosEmbed(nn.Module): def __init__(self, patch_size: int, patch_size_t: int, rope_dim: List[int], theta: float = 256.0) -> None: super().__init__() self.patch_size = patch_size self.patch_size_t = patch_size_t self.rope_dim = rope_dim self.theta = theta def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = hidden_states.shape rope_sizes = [num_frames // self.patch_size_t, height // self.patch_size, width // self.patch_size]
1,151
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
axes_grids = [] for i in range(3): # Note: The following line diverges from original behaviour. We create the grid on the device, whereas # original implementation creates it on CPU and then moves it to device. This results in numerical # differences in layerwise debugging outputs, but visually it is the same. grid = torch.arange(0, rope_sizes[i], device=hidden_states.device, dtype=torch.float32) axes_grids.append(grid) grid = torch.meshgrid(*axes_grids, indexing="ij") # [W, H, T] grid = torch.stack(grid, dim=0) # [3, W, H, T] freqs = [] for i in range(3): freq = get_1d_rotary_pos_embed(self.rope_dim[i], grid[i].reshape(-1), self.theta, use_real=True) freqs.append(freq) freqs_cos = torch.cat([f[0] for f in freqs], dim=1) # (W * H * T, D / 2) freqs_sin = torch.cat([f[1] for f in freqs], dim=1) # (W * H * T, D / 2) return freqs_cos, freqs_sin
1,151
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
class HunyuanVideoSingleTransformerBlock(nn.Module): def __init__( self, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float = 4.0, qk_norm: str = "rms_norm", ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim mlp_dim = int(hidden_size * mlp_ratio) self.attn = Attention( query_dim=hidden_size, cross_attention_dim=None, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=hidden_size, bias=True, processor=HunyuanVideoAttnProcessor2_0(), qk_norm=qk_norm, eps=1e-6, pre_only=True, ) self.norm = AdaLayerNormZeroSingle(hidden_size, norm_type="layer_norm") self.proj_mlp = nn.Linear(hidden_size, mlp_dim) self.act_mlp = nn.GELU(approximate="tanh") self.proj_out = nn.Linear(hidden_size + mlp_dim, hidden_size)
1,152
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, ) -> torch.Tensor: text_seq_length = encoder_hidden_states.shape[1] hidden_states = torch.cat([hidden_states, encoder_hidden_states], dim=1) residual = hidden_states # 1. Input normalization norm_hidden_states, gate = self.norm(hidden_states, emb=temb) mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) norm_hidden_states, norm_encoder_hidden_states = ( norm_hidden_states[:, :-text_seq_length, :], norm_hidden_states[:, -text_seq_length:, :], )
1,152
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
# 2. Attention attn_output, context_attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, attention_mask=attention_mask, image_rotary_emb=image_rotary_emb, ) attn_output = torch.cat([attn_output, context_attn_output], dim=1) # 3. Modulation and residual connection hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) hidden_states = gate.unsqueeze(1) * self.proj_out(hidden_states) hidden_states = hidden_states + residual hidden_states, encoder_hidden_states = ( hidden_states[:, :-text_seq_length, :], hidden_states[:, -text_seq_length:, :], ) return hidden_states, encoder_hidden_states
1,152
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
class HunyuanVideoTransformerBlock(nn.Module): def __init__( self, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float, qk_norm: str = "rms_norm", ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim self.norm1 = AdaLayerNormZero(hidden_size, norm_type="layer_norm") self.norm1_context = AdaLayerNormZero(hidden_size, norm_type="layer_norm") self.attn = Attention( query_dim=hidden_size, cross_attention_dim=None, added_kv_proj_dim=hidden_size, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=hidden_size, context_pre_only=False, bias=True, processor=HunyuanVideoAttnProcessor2_0(), qk_norm=qk_norm, eps=1e-6, )
1,153
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.ff = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate") self.norm2_context = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.ff_context = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, freqs_cis: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: # 1. Input normalization norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( encoder_hidden_states, emb=temb )
1,153
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
# 2. Joint attention attn_output, context_attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, attention_mask=attention_mask, image_rotary_emb=freqs_cis, ) # 3. Modulation and residual connection hidden_states = hidden_states + attn_output * gate_msa.unsqueeze(1) encoder_hidden_states = encoder_hidden_states + context_attn_output * c_gate_msa.unsqueeze(1) norm_hidden_states = self.norm2(hidden_states) norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] # 4. Feed-forward ff_output = self.ff(norm_hidden_states) context_ff_output = self.ff_context(norm_encoder_hidden_states)
1,153
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
hidden_states = hidden_states + gate_mlp.unsqueeze(1) * ff_output encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output return hidden_states, encoder_hidden_states
1,153
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
class HunyuanVideoTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin): r""" A Transformer model for video-like data used in [HunyuanVideo](https://huggingface.co/tencent/HunyuanVideo).
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
Args: in_channels (`int`, defaults to `16`): The number of channels in the input. out_channels (`int`, defaults to `16`): The number of channels in the output. num_attention_heads (`int`, defaults to `24`): The number of heads to use for multi-head attention. attention_head_dim (`int`, defaults to `128`): The number of channels in each head. num_layers (`int`, defaults to `20`): The number of layers of dual-stream blocks to use. num_single_layers (`int`, defaults to `40`): The number of layers of single-stream blocks to use. num_refiner_layers (`int`, defaults to `2`): The number of layers of refiner blocks to use. mlp_ratio (`float`, defaults to `4.0`): The ratio of the hidden layer size to the input size in the feedforward network. patch_size (`int`, defaults to `2`):
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
The size of the spatial patches to use in the patch embedding layer. patch_size_t (`int`, defaults to `1`): The size of the tmeporal patches to use in the patch embedding layer. qk_norm (`str`, defaults to `rms_norm`): The normalization to use for the query and key projections in the attention layers. guidance_embeds (`bool`, defaults to `True`): Whether to use guidance embeddings in the model. text_embed_dim (`int`, defaults to `4096`): Input dimension of text embeddings from the text encoder. pooled_projection_dim (`int`, defaults to `768`): The dimension of the pooled projection of the text embeddings. rope_theta (`float`, defaults to `256.0`): The value of theta to use in the RoPE layer. rope_axes_dim (`Tuple[int]`, defaults to `(16, 56, 56)`): The dimensions of the axes to use in the RoPE layer. """
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
_supports_gradient_checkpointing = True _no_split_modules = [ "HunyuanVideoTransformerBlock", "HunyuanVideoSingleTransformerBlock", "HunyuanVideoPatchEmbed", "HunyuanVideoTokenRefiner", ] @register_to_config def __init__( self, in_channels: int = 16, out_channels: int = 16, num_attention_heads: int = 24, attention_head_dim: int = 128, num_layers: int = 20, num_single_layers: int = 40, num_refiner_layers: int = 2, mlp_ratio: float = 4.0, patch_size: int = 2, patch_size_t: int = 1, qk_norm: str = "rms_norm", guidance_embeds: bool = True, text_embed_dim: int = 4096, pooled_projection_dim: int = 768, rope_theta: float = 256.0, rope_axes_dim: Tuple[int] = (16, 56, 56), ) -> None: super().__init__() inner_dim = num_attention_heads * attention_head_dim out_channels = out_channels or in_channels
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
# 1. Latent and condition embedders self.x_embedder = HunyuanVideoPatchEmbed((patch_size_t, patch_size, patch_size), in_channels, inner_dim) self.context_embedder = HunyuanVideoTokenRefiner( text_embed_dim, num_attention_heads, attention_head_dim, num_layers=num_refiner_layers ) self.time_text_embed = CombinedTimestepGuidanceTextProjEmbeddings(inner_dim, pooled_projection_dim) # 2. RoPE self.rope = HunyuanVideoRotaryPosEmbed(patch_size, patch_size_t, rope_axes_dim, rope_theta) # 3. Dual stream transformer blocks self.transformer_blocks = nn.ModuleList( [ HunyuanVideoTransformerBlock( num_attention_heads, attention_head_dim, mlp_ratio=mlp_ratio, qk_norm=qk_norm ) for _ in range(num_layers) ] )
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
# 4. Single stream transformer blocks self.single_transformer_blocks = nn.ModuleList( [ HunyuanVideoSingleTransformerBlock( num_attention_heads, attention_head_dim, mlp_ratio=mlp_ratio, qk_norm=qk_norm ) for _ in range(num_single_layers) ] ) # 5. Output projection self.norm_out = AdaLayerNormContinuous(inner_dim, inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out = nn.Linear(inner_dim, patch_size_t * patch_size * patch_size * out_channels) self.gradient_checkpointing = False
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
@property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys())
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor)
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value def forward( self, hidden_states: torch.Tensor, timestep: torch.LongTensor, encoder_hidden_states: torch.Tensor, encoder_attention_mask: torch.Tensor, pooled_projections: torch.Tensor, guidance: torch.Tensor = None, attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]: if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective." ) batch_size, num_channels, num_frames, height, width = hidden_states.shape p, p_t = self.config.patch_size, self.config.patch_size_t post_patch_num_frames = num_frames // p_t post_patch_height = height // p post_patch_width = width // p # 1. RoPE image_rotary_emb = self.rope(hidden_states)
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
# 2. Conditional embeddings temb = self.time_text_embed(timestep, guidance, pooled_projections) hidden_states = self.x_embedder(hidden_states) encoder_hidden_states = self.context_embedder(encoder_hidden_states, timestep, encoder_attention_mask) # 3. Attention mask preparation latent_sequence_length = hidden_states.shape[1] condition_sequence_length = encoder_hidden_states.shape[1] sequence_length = latent_sequence_length + condition_sequence_length attention_mask = torch.zeros( batch_size, sequence_length, device=hidden_states.device, dtype=torch.bool ) # [B, N] effective_condition_sequence_length = encoder_attention_mask.sum(dim=1, dtype=torch.int) # [B,] effective_sequence_length = latent_sequence_length + effective_condition_sequence_length
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
for i in range(batch_size): attention_mask[i, : effective_sequence_length[i]] = True # [B, 1, 1, N], for broadcasting across attention heads attention_mask = attention_mask.unsqueeze(1).unsqueeze(1) # 4. Transformer blocks if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
for block in self.transformer_blocks: hidden_states, encoder_hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, encoder_hidden_states, temb, attention_mask, image_rotary_emb, **ckpt_kwargs, ) for block in self.single_transformer_blocks: hidden_states, encoder_hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, encoder_hidden_states, temb, attention_mask, image_rotary_emb, **ckpt_kwargs, )
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
else: for block in self.transformer_blocks: hidden_states, encoder_hidden_states = block( hidden_states, encoder_hidden_states, temb, attention_mask, image_rotary_emb ) for block in self.single_transformer_blocks: hidden_states, encoder_hidden_states = block( hidden_states, encoder_hidden_states, temb, attention_mask, image_rotary_emb ) # 5. Output projection hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape( batch_size, post_patch_num_frames, post_patch_height, post_patch_width, -1, p_t, p, p ) hidden_states = hidden_states.permute(0, 4, 1, 5, 2, 6, 3, 7) hidden_states = hidden_states.flatten(6, 7).flatten(4, 5).flatten(2, 3)
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (hidden_states,) return Transformer2DModelOutput(sample=hidden_states)
1,154
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py
class CogVideoXBlock(nn.Module): r""" Transformer block used in [CogVideoX](https://github.com/THUDM/CogVideo) model.
1,155
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. time_embed_dim (`int`): The number of channels in timestep embedding. dropout (`float`, defaults to `0.0`): The dropout probability to use. activation_fn (`str`, defaults to `"gelu-approximate"`): Activation function to be used in feed-forward. attention_bias (`bool`, defaults to `False`): Whether or not to use bias in attention projection layers. qk_norm (`bool`, defaults to `True`): Whether or not to use normalization after query and key projections in Attention. norm_elementwise_affine (`bool`, defaults to `True`): Whether to use learnable elementwise affine parameters for normalization.
1,155
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
norm_eps (`float`, defaults to `1e-5`): Epsilon value for normalization layers. final_dropout (`bool` defaults to `False`): Whether to apply a final dropout after the last feed-forward layer. ff_inner_dim (`int`, *optional*, defaults to `None`): Custom hidden dimension of Feed-forward layer. If not provided, `4 * dim` is used. ff_bias (`bool`, defaults to `True`): Whether or not to use bias in Feed-forward layer. attention_out_bias (`bool`, defaults to `True`): Whether or not to use bias in Attention output projection layer. """
1,155
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, time_embed_dim: int, dropout: float = 0.0, activation_fn: str = "gelu-approximate", attention_bias: bool = False, qk_norm: bool = True, norm_elementwise_affine: bool = True, norm_eps: float = 1e-5, final_dropout: bool = True, ff_inner_dim: Optional[int] = None, ff_bias: bool = True, attention_out_bias: bool = True, ): super().__init__() # 1. Self Attention self.norm1 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True)
1,155
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
self.attn1 = Attention( query_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, qk_norm="layer_norm" if qk_norm else None, eps=1e-6, bias=attention_bias, out_bias=attention_out_bias, processor=CogVideoXAttnProcessor2_0(), ) # 2. Feed Forward self.norm2 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True) self.ff = FeedForward( dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout, inner_dim=ff_inner_dim, bias=ff_bias, )
1,155
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, attention_kwargs: Optional[Dict[str, Any]] = None, ) -> torch.Tensor: text_seq_length = encoder_hidden_states.size(1) attention_kwargs = attention_kwargs or {} # norm & modulate norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1( hidden_states, encoder_hidden_states, temb ) # attention attn_hidden_states, attn_encoder_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, **attention_kwargs, )
1,155
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
hidden_states = hidden_states + gate_msa * attn_hidden_states encoder_hidden_states = encoder_hidden_states + enc_gate_msa * attn_encoder_hidden_states # norm & modulate norm_hidden_states, norm_encoder_hidden_states, gate_ff, enc_gate_ff = self.norm2( hidden_states, encoder_hidden_states, temb ) # feed-forward norm_hidden_states = torch.cat([norm_encoder_hidden_states, norm_hidden_states], dim=1) ff_output = self.ff(norm_hidden_states) hidden_states = hidden_states + gate_ff * ff_output[:, text_seq_length:] encoder_hidden_states = encoder_hidden_states + enc_gate_ff * ff_output[:, :text_seq_length] return hidden_states, encoder_hidden_states
1,155
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): """ A Transformer model for video-like data in [CogVideoX](https://github.com/THUDM/CogVideo).
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
Parameters: num_attention_heads (`int`, defaults to `30`): The number of heads to use for multi-head attention. attention_head_dim (`int`, defaults to `64`): The number of channels in each head. in_channels (`int`, defaults to `16`): The number of channels in the input. out_channels (`int`, *optional*, defaults to `16`): The number of channels in the output. flip_sin_to_cos (`bool`, defaults to `True`): Whether to flip the sin to cos in the time embedding. time_embed_dim (`int`, defaults to `512`): Output dimension of timestep embeddings. ofs_embed_dim (`int`, defaults to `512`): Output dimension of "ofs" embeddings used in CogVideoX-5b-I2B in version 1.5 text_embed_dim (`int`, defaults to `4096`): Input dimension of text embeddings from the text encoder. num_layers (`int`, defaults to `30`):
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
The number of layers of Transformer blocks to use. dropout (`float`, defaults to `0.0`): The dropout probability to use. attention_bias (`bool`, defaults to `True`): Whether to use bias in the attention projection layers. sample_width (`int`, defaults to `90`): The width of the input latents. sample_height (`int`, defaults to `60`): The height of the input latents. sample_frames (`int`, defaults to `49`): The number of frames in the input latents. Note that this parameter was incorrectly initialized to 49 instead of 13 because CogVideoX processed 13 latent frames at once in its default and recommended settings, but cannot be changed to the correct value to ensure backwards compatibility. To create a transformer with K latent frames, the correct value to pass here would be: ((K - 1) * temporal_compression_ratio + 1). patch_size (`int`, defaults to `2`):
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
The size of the patches to use in the patch embedding layer. temporal_compression_ratio (`int`, defaults to `4`): The compression ratio across the temporal dimension. See documentation for `sample_frames`. max_text_seq_length (`int`, defaults to `226`): The maximum sequence length of the input text embeddings. activation_fn (`str`, defaults to `"gelu-approximate"`): Activation function to use in feed-forward. timestep_activation_fn (`str`, defaults to `"silu"`): Activation function to use when generating the timestep embeddings. norm_elementwise_affine (`bool`, defaults to `True`): Whether to use elementwise affine in normalization layers. norm_eps (`float`, defaults to `1e-5`): The epsilon value to use in normalization layers. spatial_interpolation_scale (`float`, defaults to `1.875`):
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
Scaling factor to apply in 3D positional embeddings across spatial dimensions. temporal_interpolation_scale (`float`, defaults to `1.0`): Scaling factor to apply in 3D positional embeddings across temporal dimensions. """
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
_supports_gradient_checkpointing = True _no_split_modules = ["CogVideoXBlock", "CogVideoXPatchEmbed"]
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
@register_to_config def __init__( self, num_attention_heads: int = 30, attention_head_dim: int = 64, in_channels: int = 16, out_channels: Optional[int] = 16, flip_sin_to_cos: bool = True, freq_shift: int = 0, time_embed_dim: int = 512, ofs_embed_dim: Optional[int] = None, text_embed_dim: int = 4096, num_layers: int = 30, dropout: float = 0.0, attention_bias: bool = True, sample_width: int = 90, sample_height: int = 60, sample_frames: int = 49, patch_size: int = 2, patch_size_t: Optional[int] = None, temporal_compression_ratio: int = 4, max_text_seq_length: int = 226, activation_fn: str = "gelu-approximate", timestep_activation_fn: str = "silu", norm_elementwise_affine: bool = True, norm_eps: float = 1e-5, spatial_interpolation_scale: float = 1.875, temporal_interpolation_scale: float = 1.0,
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
use_rotary_positional_embeddings: bool = False, use_learned_positional_embeddings: bool = False, patch_bias: bool = True, ): super().__init__() inner_dim = num_attention_heads * attention_head_dim
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
if not use_rotary_positional_embeddings and use_learned_positional_embeddings: raise ValueError( "There are no CogVideoX checkpoints available with disable rotary embeddings and learned positional " "embeddings. If you're using a custom model and/or believe this should be supported, please open an " "issue at https://github.com/huggingface/diffusers/issues." )
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
# 1. Patch embedding self.patch_embed = CogVideoXPatchEmbed( patch_size=patch_size, patch_size_t=patch_size_t, in_channels=in_channels, embed_dim=inner_dim, text_embed_dim=text_embed_dim, bias=patch_bias, sample_width=sample_width, sample_height=sample_height, sample_frames=sample_frames, temporal_compression_ratio=temporal_compression_ratio, max_text_seq_length=max_text_seq_length, spatial_interpolation_scale=spatial_interpolation_scale, temporal_interpolation_scale=temporal_interpolation_scale, use_positional_embeddings=not use_rotary_positional_embeddings, use_learned_positional_embeddings=use_learned_positional_embeddings, ) self.embedding_dropout = nn.Dropout(dropout) # 2. Time embeddings and ofs embedding(Only CogVideoX1.5-5B I2V have)
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
self.time_proj = Timesteps(inner_dim, flip_sin_to_cos, freq_shift) self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, timestep_activation_fn) self.ofs_proj = None self.ofs_embedding = None if ofs_embed_dim: self.ofs_proj = Timesteps(ofs_embed_dim, flip_sin_to_cos, freq_shift) self.ofs_embedding = TimestepEmbedding( ofs_embed_dim, ofs_embed_dim, timestep_activation_fn ) # same as time embeddings, for ofs
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
# 3. Define spatio-temporal transformers blocks self.transformer_blocks = nn.ModuleList( [ CogVideoXBlock( dim=inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, time_embed_dim=time_embed_dim, dropout=dropout, activation_fn=activation_fn, attention_bias=attention_bias, norm_elementwise_affine=norm_elementwise_affine, norm_eps=norm_eps, ) for _ in range(num_layers) ] ) self.norm_final = nn.LayerNorm(inner_dim, norm_eps, norm_elementwise_affine)
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
# 4. Output blocks self.norm_out = AdaLayerNorm( embedding_dim=time_embed_dim, output_dim=2 * inner_dim, norm_elementwise_affine=norm_elementwise_affine, norm_eps=norm_eps, chunk_dim=1, ) if patch_size_t is None: # For CogVideox 1.0 output_dim = patch_size * patch_size * out_channels else: # For CogVideoX 1.5 output_dim = patch_size * patch_size * patch_size_t * out_channels self.proj_out = nn.Linear(inner_dim, output_dim) self.gradient_checkpointing = False def _set_gradient_checkpointing(self, module, value=False): self.gradient_checkpointing = value
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
@property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys())
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor)
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedCogVideoXAttnProcessor2_0 def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True)
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
self.set_attn_processor(FusedCogVideoXAttnProcessor2_0()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors)
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, timestep: Union[int, float, torch.LongTensor], timestep_cond: Optional[torch.Tensor] = None, ofs: Optional[Union[int, float, torch.LongTensor]] = None, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ): if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective." ) batch_size, num_frames, channels, height, width = hidden_states.shape # 1. Time embedding timesteps = timestep t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=hidden_states.dtype) emb = self.time_embedding(t_emb, timestep_cond)
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
if self.ofs_embedding is not None: ofs_emb = self.ofs_proj(ofs) ofs_emb = ofs_emb.to(dtype=hidden_states.dtype) ofs_emb = self.ofs_embedding(ofs_emb) emb = emb + ofs_emb # 2. Patch embedding hidden_states = self.patch_embed(encoder_hidden_states, hidden_states) hidden_states = self.embedding_dropout(hidden_states) text_seq_length = encoder_hidden_states.shape[1] encoder_hidden_states = hidden_states[:, :text_seq_length] hidden_states = hidden_states[:, text_seq_length:] # 3. Transformer blocks for i, block in enumerate(self.transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states, encoder_hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, encoder_hidden_states, emb, image_rotary_emb, attention_kwargs, **ckpt_kwargs, ) else: hidden_states, encoder_hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=emb, image_rotary_emb=image_rotary_emb, attention_kwargs=attention_kwargs, )
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
if not self.config.use_rotary_positional_embeddings: # CogVideoX-2B hidden_states = self.norm_final(hidden_states) else: # CogVideoX-5B hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) hidden_states = self.norm_final(hidden_states) hidden_states = hidden_states[:, text_seq_length:] # 4. Final block hidden_states = self.norm_out(hidden_states, temb=emb) hidden_states = self.proj_out(hidden_states) # 5. Unpatchify p = self.config.patch_size p_t = self.config.patch_size_t
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
if p_t is None: output = hidden_states.reshape(batch_size, num_frames, height // p, width // p, -1, p, p) output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4) else: output = hidden_states.reshape( batch_size, (num_frames + p_t - 1) // p_t, height // p, width // p, -1, p_t, p, p ) output = output.permute(0, 1, 5, 4, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(1, 2) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
1,156
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/cogvideox_transformer_3d.py
class CogView3PlusTransformerBlock(nn.Module): r""" Transformer block used in [CogView](https://github.com/THUDM/CogView3) model. Args: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. time_embed_dim (`int`): The number of channels in timestep embedding. """ def __init__( self, dim: int = 2560, num_attention_heads: int = 64, attention_head_dim: int = 40, time_embed_dim: int = 512, ): super().__init__() self.norm1 = CogView3PlusAdaLayerNormZeroTextImage(embedding_dim=time_embed_dim, dim=dim)
1,157
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, out_dim=dim, bias=True, qk_norm="layer_norm", elementwise_affine=False, eps=1e-6, processor=CogVideoXAttnProcessor2_0(), ) self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-5) self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-5) self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, emb: torch.Tensor, ) -> torch.Tensor: text_seq_length = encoder_hidden_states.size(1)
1,157
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
# norm & modulate ( norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp, norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp, ) = self.norm1(hidden_states, encoder_hidden_states, emb) # attention attn_hidden_states, attn_encoder_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states ) hidden_states = hidden_states + gate_msa.unsqueeze(1) * attn_hidden_states encoder_hidden_states = encoder_hidden_states + c_gate_msa.unsqueeze(1) * attn_encoder_hidden_states # norm & modulate norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
1,157
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] # feed-forward norm_hidden_states = torch.cat([norm_encoder_hidden_states, norm_hidden_states], dim=1) ff_output = self.ff(norm_hidden_states) hidden_states = hidden_states + gate_mlp.unsqueeze(1) * ff_output[:, text_seq_length:] encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * ff_output[:, :text_seq_length] if hidden_states.dtype == torch.float16: hidden_states = hidden_states.clip(-65504, 65504) if encoder_hidden_states.dtype == torch.float16: encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) return hidden_states, encoder_hidden_states
1,157
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py