text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
class CogView3PlusTransformer2DModel(ModelMixin, ConfigMixin): r""" The Transformer model introduced in [CogView3: Finer and Faster Text-to-Image Generation via Relay Diffusion](https://huggingface.co/papers/2403.05121).
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
Args: patch_size (`int`, defaults to `2`): The size of the patches to use in the patch embedding layer. in_channels (`int`, defaults to `16`): The number of channels in the input. num_layers (`int`, defaults to `30`): The number of layers of Transformer blocks to use. attention_head_dim (`int`, defaults to `40`): The number of channels in each head. num_attention_heads (`int`, defaults to `64`): The number of heads to use for multi-head attention. out_channels (`int`, defaults to `16`): The number of channels in the output. text_embed_dim (`int`, defaults to `4096`): Input dimension of text embeddings from the text encoder. time_embed_dim (`int`, defaults to `512`): Output dimension of timestep embeddings. condition_dim (`int`, defaults to `256`):
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
The embedding dimension of the input SDXL-style resolution conditions (original_size, target_size, crop_coords). pos_embed_max_size (`int`, defaults to `128`): The maximum resolution of the positional embeddings, from which slices of shape `H x W` are taken and added to input patched latents, where `H` and `W` are the latent height and width respectively. A value of 128 means that the maximum supported height and width for image generation is `128 * vae_scale_factor * patch_size => 128 * 8 * 2 => 2048`. sample_size (`int`, defaults to `128`): The base resolution of input latents. If height/width is not provided during generation, this value is used to determine the resolution as `sample_size * vae_scale_factor => 128 * 8 => 1024` """
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
_supports_gradient_checkpointing = True _no_split_modules = ["CogView3PlusTransformerBlock", "CogView3PlusPatchEmbed"] @register_to_config def __init__( self, patch_size: int = 2, in_channels: int = 16, num_layers: int = 30, attention_head_dim: int = 40, num_attention_heads: int = 64, out_channels: int = 16, text_embed_dim: int = 4096, time_embed_dim: int = 512, condition_dim: int = 256, pos_embed_max_size: int = 128, sample_size: int = 128, ): super().__init__() self.out_channels = out_channels self.inner_dim = num_attention_heads * attention_head_dim # CogView3 uses 3 additional SDXL-like conditions - original_size, target_size, crop_coords # Each of these are sincos embeddings of shape 2 * condition_dim self.pooled_projection_dim = 3 * 2 * condition_dim
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
self.patch_embed = CogView3PlusPatchEmbed( in_channels=in_channels, hidden_size=self.inner_dim, patch_size=patch_size, text_hidden_size=text_embed_dim, pos_embed_max_size=pos_embed_max_size, ) self.time_condition_embed = CogView3CombinedTimestepSizeEmbeddings( embedding_dim=time_embed_dim, condition_dim=condition_dim, pooled_projection_dim=self.pooled_projection_dim, timesteps_dim=self.inner_dim, ) self.transformer_blocks = nn.ModuleList( [ CogView3PlusTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, time_embed_dim=time_embed_dim, ) for _ in range(num_layers) ] )
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
self.norm_out = AdaLayerNormContinuous( embedding_dim=self.inner_dim, conditioning_embedding_dim=time_embed_dim, elementwise_affine=False, eps=1e-6, ) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) self.gradient_checkpointing = False @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {}
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention.
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, timestep: torch.LongTensor, original_size: torch.Tensor, target_size: torch.Tensor, crop_coords: torch.Tensor, return_dict: bool = True, ) -> Union[torch.Tensor, Transformer2DModelOutput]: """ The [`CogView3PlusTransformer2DModel`] forward method.
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
Args: hidden_states (`torch.Tensor`): Input `hidden_states` of shape `(batch size, channel, height, width)`. encoder_hidden_states (`torch.Tensor`): Conditional embeddings (embeddings computed from the input conditions such as prompts) of shape `(batch_size, sequence_len, text_embed_dim)` timestep (`torch.LongTensor`): Used to indicate denoising step. original_size (`torch.Tensor`): CogView3 uses SDXL-like micro-conditioning for original image size as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`torch.Tensor`): CogView3 uses SDXL-like micro-conditioning for target image size as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crop_coords (`torch.Tensor`):
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
CogView3 uses SDXL-like micro-conditioning for crop coordinates as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain tuple.
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
Returns: `torch.Tensor` or [`~models.transformer_2d.Transformer2DModelOutput`]: The denoised latents using provided inputs as conditioning. """ height, width = hidden_states.shape[-2:] text_seq_length = encoder_hidden_states.shape[1] hidden_states = self.patch_embed( hidden_states, encoder_hidden_states ) # takes care of adding positional embeddings too. emb = self.time_condition_embed(timestep, original_size, target_size, crop_coords, hidden_states.dtype) encoder_hidden_states = hidden_states[:, :text_seq_length] hidden_states = hidden_states[:, text_seq_length:] for index_block, block in enumerate(self.transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states, encoder_hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, encoder_hidden_states, emb, **ckpt_kwargs, ) else: hidden_states, encoder_hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, emb=emb, ) hidden_states = self.norm_out(hidden_states, emb) hidden_states = self.proj_out(hidden_states) # (batch_size, height*width, patch_size*patch_size*out_channels) # unpatchify patch_size = self.config.patch_size height = height // patch_size width = width // patch_size
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
hidden_states = hidden_states.reshape( shape=(hidden_states.shape[0], height, width, self.out_channels, patch_size, patch_size) ) hidden_states = torch.einsum("nhwcpq->nchpwq", hidden_states) output = hidden_states.reshape( shape=(hidden_states.shape[0], self.out_channels, height * patch_size, width * patch_size) ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
1,158
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_cogview3plus.py
class StableAudioGaussianFourierProjection(nn.Module): """Gaussian Fourier embeddings for noise levels.""" # Copied from diffusers.models.embeddings.GaussianFourierProjection.__init__ def __init__( self, embedding_size: int = 256, scale: float = 1.0, set_W_to_weight=True, log=True, flip_sin_to_cos=False ): super().__init__() self.weight = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) self.log = log self.flip_sin_to_cos = flip_sin_to_cos if set_W_to_weight: # to delete later del self.weight self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) self.weight = self.W del self.W def forward(self, x): if self.log: x = torch.log(x) x_proj = 2 * np.pi * x[:, None] @ self.weight[None, :]
1,159
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
if self.flip_sin_to_cos: out = torch.cat([torch.cos(x_proj), torch.sin(x_proj)], dim=-1) else: out = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1) return out
1,159
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
class StableAudioDiTBlock(nn.Module): r""" Transformer block used in Stable Audio model (https://github.com/Stability-AI/stable-audio-tools). Allow skip connection and QKNorm Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for the query states. num_key_value_attention_heads (`int`): The number of heads to use for the key and value states. attention_head_dim (`int`): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. upcast_attention (`bool`, *optional*): Whether to upcast the attention computation to float32. This is useful for mixed precision training. """
1,160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
def __init__( self, dim: int, num_attention_heads: int, num_key_value_attention_heads: int, attention_head_dim: int, dropout=0.0, cross_attention_dim: Optional[int] = None, upcast_attention: bool = False, norm_eps: float = 1e-5, ff_inner_dim: Optional[int] = None, ): super().__init__() # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn self.norm1 = nn.LayerNorm(dim, elementwise_affine=True, eps=norm_eps) self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=False, upcast_attention=upcast_attention, out_bias=False, processor=StableAudioAttnProcessor2_0(), ) # 2. Cross-Attn self.norm2 = nn.LayerNorm(dim, norm_eps, True)
1,160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim, kv_heads=num_key_value_attention_heads, dropout=dropout, bias=False, upcast_attention=upcast_attention, out_bias=False, processor=StableAudioAttnProcessor2_0(), ) # is self-attn if encoder_hidden_states is none # 3. Feed-forward self.norm3 = nn.LayerNorm(dim, norm_eps, True) self.ff = FeedForward( dim, dropout=dropout, activation_fn="swiglu", final_dropout=False, inner_dim=ff_inner_dim, bias=True, ) # let chunk size default to None self._chunk_size = None self._chunk_dim = 0
1,160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0): # Sets chunk feed-forward self._chunk_size = chunk_size self._chunk_dim = dim def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, rotary_embedding: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: # Notice that normalization is always applied before the real computation in the following blocks. # 0. Self-Attention norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1( norm_hidden_states, attention_mask=attention_mask, rotary_emb=rotary_embedding, ) hidden_states = attn_output + hidden_states # 2. Cross-Attention norm_hidden_states = self.norm2(hidden_states)
1,160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
attn_output = self.attn2( norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, ) hidden_states = attn_output + hidden_states # 3. Feed-forward norm_hidden_states = self.norm3(hidden_states) ff_output = self.ff(norm_hidden_states) hidden_states = ff_output + hidden_states return hidden_states
1,160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
class StableAudioDiTModel(ModelMixin, ConfigMixin): """ The Diffusion Transformer model introduced in Stable Audio. Reference: https://github.com/Stability-AI/stable-audio-tools
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
Parameters: sample_size ( `int`, *optional*, defaults to 1024): The size of the input sample. in_channels (`int`, *optional*, defaults to 64): The number of channels in the input. num_layers (`int`, *optional*, defaults to 24): The number of layers of Transformer blocks to use. attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head. num_attention_heads (`int`, *optional*, defaults to 24): The number of heads to use for the query states. num_key_value_attention_heads (`int`, *optional*, defaults to 12): The number of heads to use for the key and value states. out_channels (`int`, defaults to 64): Number of output channels. cross_attention_dim ( `int`, *optional*, defaults to 768): Dimension of the cross-attention projection. time_proj_dim ( `int`, *optional*, defaults to 256): Dimension of the timestep inner projection.
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
global_states_input_dim ( `int`, *optional*, defaults to 1536): Input dimension of the global hidden states projection. cross_attention_input_dim ( `int`, *optional*, defaults to 768): Input dimension of the cross-attention projection """
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
_supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: int = 1024, in_channels: int = 64, num_layers: int = 24, attention_head_dim: int = 64, num_attention_heads: int = 24, num_key_value_attention_heads: int = 12, out_channels: int = 64, cross_attention_dim: int = 768, time_proj_dim: int = 256, global_states_input_dim: int = 1536, cross_attention_input_dim: int = 768, ): super().__init__() self.sample_size = sample_size self.out_channels = out_channels self.inner_dim = num_attention_heads * attention_head_dim self.time_proj = StableAudioGaussianFourierProjection( embedding_size=time_proj_dim // 2, flip_sin_to_cos=True, log=False, set_W_to_weight=False, )
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
self.timestep_proj = nn.Sequential( nn.Linear(time_proj_dim, self.inner_dim, bias=True), nn.SiLU(), nn.Linear(self.inner_dim, self.inner_dim, bias=True), ) self.global_proj = nn.Sequential( nn.Linear(global_states_input_dim, self.inner_dim, bias=False), nn.SiLU(), nn.Linear(self.inner_dim, self.inner_dim, bias=False), ) self.cross_attention_proj = nn.Sequential( nn.Linear(cross_attention_input_dim, cross_attention_dim, bias=False), nn.SiLU(), nn.Linear(cross_attention_dim, cross_attention_dim, bias=False), ) self.preprocess_conv = nn.Conv1d(in_channels, in_channels, 1, bias=False) self.proj_in = nn.Linear(in_channels, self.inner_dim, bias=False)
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
self.transformer_blocks = nn.ModuleList( [ StableAudioDiTBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, num_key_value_attention_heads=num_key_value_attention_heads, attention_head_dim=attention_head_dim, cross_attention_dim=cross_attention_dim, ) for i in range(num_layers) ] ) self.proj_out = nn.Linear(self.inner_dim, self.out_channels, bias=False) self.postprocess_conv = nn.Conv1d(self.out_channels, self.out_channels, 1, bias=False) self.gradient_checkpointing = False
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
@property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys())
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor)
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
# Copied from diffusers.models.transformers.hunyuan_transformer_2d.HunyuanDiT2DModel.set_default_attn_processor with Hunyuan->StableAudio def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(StableAudioAttnProcessor2_0()) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
def forward( self, hidden_states: torch.FloatTensor, timestep: torch.LongTensor = None, encoder_hidden_states: torch.FloatTensor = None, global_hidden_states: torch.FloatTensor = None, rotary_embedding: torch.FloatTensor = None, return_dict: bool = True, attention_mask: Optional[torch.LongTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, ) -> Union[torch.FloatTensor, Transformer2DModelOutput]: """ The [`StableAudioDiTModel`] forward method.
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
Args: hidden_states (`torch.FloatTensor` of shape `(batch size, in_channels, sequence_len)`): Input `hidden_states`. timestep ( `torch.LongTensor`): Used to indicate denoising step. encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, encoder_sequence_len, cross_attention_input_dim)`): Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. global_hidden_states (`torch.FloatTensor` of shape `(batch size, global_sequence_len, global_states_input_dim)`): Global embeddings that will be prepended to the hidden states. rotary_embedding (`torch.Tensor`): The rotary embeddings to apply on query and key tensors during attention calculation. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
tuple. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_len)`, *optional*): Mask to avoid performing attention on padding token indices, formed by concatenating the attention masks for the two text encoders together. Mask values selected in `[0, 1]`:
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
- 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_len)`, *optional*): Mask to avoid performing attention on padding token cross-attention indices, formed by concatenating the attention masks for the two text encoders together. Mask values selected in `[0, 1]`:
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
- 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ cross_attention_hidden_states = self.cross_attention_proj(encoder_hidden_states) global_hidden_states = self.global_proj(global_hidden_states) time_hidden_states = self.timestep_proj(self.time_proj(timestep.to(self.dtype))) global_hidden_states = global_hidden_states + time_hidden_states.unsqueeze(1) hidden_states = self.preprocess_conv(hidden_states) + hidden_states # (batch_size, dim, sequence_length) -> (batch_size, sequence_length, dim) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.proj_in(hidden_states)
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
# prepend global states to hidden states hidden_states = torch.cat([global_hidden_states, hidden_states], dim=-2) if attention_mask is not None: prepend_mask = torch.ones((hidden_states.shape[0], 1), device=hidden_states.device, dtype=torch.bool) attention_mask = torch.cat([prepend_mask, attention_mask], dim=-1) for block in self.transformer_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, attention_mask, cross_attention_hidden_states, encoder_attention_mask, rotary_embedding, **ckpt_kwargs, ) else: hidden_states = block( hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=cross_attention_hidden_states, encoder_attention_mask=encoder_attention_mask, rotary_embedding=rotary_embedding, ) hidden_states = self.proj_out(hidden_states)
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
# (batch_size, sequence_length, dim) -> (batch_size, dim, sequence_length) # remove prepend length that has been added by global hidden states hidden_states = hidden_states.transpose(1, 2)[:, :, 1:] hidden_states = self.postprocess_conv(hidden_states) + hidden_states if not return_dict: return (hidden_states,) return Transformer2DModelOutput(sample=hidden_states)
1,161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/stable_audio_transformer.py
class MochiModulatedRMSNorm(nn.Module): def __init__(self, eps: float): super().__init__() self.eps = eps self.norm = RMSNorm(0, eps, False) def forward(self, hidden_states, scale=None): hidden_states_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) hidden_states = self.norm(hidden_states) if scale is not None: hidden_states = hidden_states * scale hidden_states = hidden_states.to(hidden_states_dtype) return hidden_states
1,162
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
class MochiLayerNormContinuous(nn.Module): def __init__( self, embedding_dim: int, conditioning_embedding_dim: int, eps=1e-5, bias=True, ): super().__init__() # AdaLN self.silu = nn.SiLU() self.linear_1 = nn.Linear(conditioning_embedding_dim, embedding_dim, bias=bias) self.norm = MochiModulatedRMSNorm(eps=eps) def forward( self, x: torch.Tensor, conditioning_embedding: torch.Tensor, ) -> torch.Tensor: input_dtype = x.dtype # convert back to the original dtype in case `conditioning_embedding`` is upcasted to float32 (needed for hunyuanDiT) scale = self.linear_1(self.silu(conditioning_embedding).to(x.dtype)) x = self.norm(x, (1 + scale.unsqueeze(1).to(torch.float32))) return x.to(input_dtype)
1,163
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
class MochiRMSNormZero(nn.Module): r""" Adaptive RMS Norm used in Mochi. Parameters: embedding_dim (`int`): The size of each embedding vector. """ def __init__( self, embedding_dim: int, hidden_dim: int, eps: float = 1e-5, elementwise_affine: bool = False ) -> None: super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, hidden_dim) self.norm = RMSNorm(0, eps, False) def forward( self, hidden_states: torch.Tensor, emb: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: hidden_states_dtype = hidden_states.dtype emb = self.linear(self.silu(emb)) scale_msa, gate_msa, scale_mlp, gate_mlp = emb.chunk(4, dim=1) hidden_states = self.norm(hidden_states.to(torch.float32)) * (1 + scale_msa[:, None].to(torch.float32)) hidden_states = hidden_states.to(hidden_states_dtype) return hidden_states, gate_msa, scale_mlp, gate_mlp
1,164
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
class MochiTransformerBlock(nn.Module): r""" Transformer block used in [Mochi](https://huggingface.co/genmo/mochi-1-preview). Args: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. qk_norm (`str`, defaults to `"rms_norm"`): The normalization layer to use. activation_fn (`str`, defaults to `"swiglu"`): Activation function to use in feed-forward. context_pre_only (`bool`, defaults to `False`): Whether or not to process context-related conditions with additional layers. eps (`float`, defaults to `1e-6`): Epsilon value for normalization layers. """
1,165
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, pooled_projection_dim: int, qk_norm: str = "rms_norm", activation_fn: str = "swiglu", context_pre_only: bool = False, eps: float = 1e-6, ) -> None: super().__init__() self.context_pre_only = context_pre_only self.ff_inner_dim = (4 * dim * 2) // 3 self.ff_context_inner_dim = (4 * pooled_projection_dim * 2) // 3 self.norm1 = MochiRMSNormZero(dim, 4 * dim, eps=eps, elementwise_affine=False) if not context_pre_only: self.norm1_context = MochiRMSNormZero(dim, 4 * pooled_projection_dim, eps=eps, elementwise_affine=False) else: self.norm1_context = MochiLayerNormContinuous( embedding_dim=pooled_projection_dim, conditioning_embedding_dim=dim, eps=eps, )
1,165
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
self.attn1 = MochiAttention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, bias=False, added_kv_proj_dim=pooled_projection_dim, added_proj_bias=False, out_dim=dim, out_context_dim=pooled_projection_dim, context_pre_only=context_pre_only, processor=MochiAttnProcessor2_0(), eps=1e-5, ) # TODO(aryan): norm_context layers are not needed when `context_pre_only` is True self.norm2 = MochiModulatedRMSNorm(eps=eps) self.norm2_context = MochiModulatedRMSNorm(eps=eps) if not self.context_pre_only else None self.norm3 = MochiModulatedRMSNorm(eps) self.norm3_context = MochiModulatedRMSNorm(eps=eps) if not self.context_pre_only else None
1,165
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
self.ff = FeedForward(dim, inner_dim=self.ff_inner_dim, activation_fn=activation_fn, bias=False) self.ff_context = None if not context_pre_only: self.ff_context = FeedForward( pooled_projection_dim, inner_dim=self.ff_context_inner_dim, activation_fn=activation_fn, bias=False, ) self.norm4 = MochiModulatedRMSNorm(eps=eps) self.norm4_context = MochiModulatedRMSNorm(eps=eps) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, encoder_attention_mask: torch.Tensor, image_rotary_emb: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: norm_hidden_states, gate_msa, scale_mlp, gate_mlp = self.norm1(hidden_states, temb)
1,165
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
if not self.context_pre_only: norm_encoder_hidden_states, enc_gate_msa, enc_scale_mlp, enc_gate_mlp = self.norm1_context( encoder_hidden_states, temb ) else: norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states, temb) attn_hidden_states, context_attn_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, attention_mask=encoder_attention_mask, ) hidden_states = hidden_states + self.norm2(attn_hidden_states, torch.tanh(gate_msa).unsqueeze(1)) norm_hidden_states = self.norm3(hidden_states, (1 + scale_mlp.unsqueeze(1).to(torch.float32))) ff_output = self.ff(norm_hidden_states) hidden_states = hidden_states + self.norm4(ff_output, torch.tanh(gate_mlp).unsqueeze(1))
1,165
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
if not self.context_pre_only: encoder_hidden_states = encoder_hidden_states + self.norm2_context( context_attn_hidden_states, torch.tanh(enc_gate_msa).unsqueeze(1) ) norm_encoder_hidden_states = self.norm3_context( encoder_hidden_states, (1 + enc_scale_mlp.unsqueeze(1).to(torch.float32)) ) context_ff_output = self.ff_context(norm_encoder_hidden_states) encoder_hidden_states = encoder_hidden_states + self.norm4_context( context_ff_output, torch.tanh(enc_gate_mlp).unsqueeze(1) ) return hidden_states, encoder_hidden_states
1,165
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
class MochiRoPE(nn.Module): r""" RoPE implementation used in [Mochi](https://huggingface.co/genmo/mochi-1-preview). Args: base_height (`int`, defaults to `192`): Base height used to compute interpolation scale for rotary positional embeddings. base_width (`int`, defaults to `192`): Base width used to compute interpolation scale for rotary positional embeddings. """ def __init__(self, base_height: int = 192, base_width: int = 192) -> None: super().__init__() self.target_area = base_height * base_width def _centers(self, start, stop, num, device, dtype) -> torch.Tensor: edges = torch.linspace(start, stop, num + 1, device=device, dtype=dtype) return (edges[:-1] + edges[1:]) / 2
1,166
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
def _get_positions( self, num_frames: int, height: int, width: int, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ) -> torch.Tensor: scale = (self.target_area / (height * width)) ** 0.5 t = torch.arange(num_frames, device=device, dtype=dtype) h = self._centers(-height * scale / 2, height * scale / 2, height, device, dtype) w = self._centers(-width * scale / 2, width * scale / 2, width, device, dtype) grid_t, grid_h, grid_w = torch.meshgrid(t, h, w, indexing="ij") positions = torch.stack([grid_t, grid_h, grid_w], dim=-1).view(-1, 3) return positions def _create_rope(self, freqs: torch.Tensor, pos: torch.Tensor) -> torch.Tensor: with torch.autocast(freqs.device.type, torch.float32): # Always run ROPE freqs computation in FP32 freqs = torch.einsum("nd,dhf->nhf", pos.to(torch.float32), freqs.to(torch.float32))
1,166
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
freqs_cos = torch.cos(freqs) freqs_sin = torch.sin(freqs) return freqs_cos, freqs_sin def forward( self, pos_frequencies: torch.Tensor, num_frames: int, height: int, width: int, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: pos = self._get_positions(num_frames, height, width, device, dtype) rope_cos, rope_sin = self._create_rope(pos_frequencies, pos) return rope_cos, rope_sin
1,166
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
class MochiTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin): r""" A Transformer model for video-like data introduced in [Mochi](https://huggingface.co/genmo/mochi-1-preview).
1,167
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
Args: patch_size (`int`, defaults to `2`): The size of the patches to use in the patch embedding layer. num_attention_heads (`int`, defaults to `24`): The number of heads to use for multi-head attention. attention_head_dim (`int`, defaults to `128`): The number of channels in each head. num_layers (`int`, defaults to `48`): The number of layers of Transformer blocks to use. in_channels (`int`, defaults to `12`): The number of channels in the input. out_channels (`int`, *optional*, defaults to `None`): The number of channels in the output. qk_norm (`str`, defaults to `"rms_norm"`): The normalization layer to use. text_embed_dim (`int`, defaults to `4096`): Input dimension of text embeddings from the text encoder. time_embed_dim (`int`, defaults to `256`): Output dimension of timestep embeddings.
1,167
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
activation_fn (`str`, defaults to `"swiglu"`): Activation function to use in feed-forward. max_sequence_length (`int`, defaults to `256`): The maximum sequence length of text embeddings supported. """
1,167
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
_supports_gradient_checkpointing = True _no_split_modules = ["MochiTransformerBlock"] @register_to_config def __init__( self, patch_size: int = 2, num_attention_heads: int = 24, attention_head_dim: int = 128, num_layers: int = 48, pooled_projection_dim: int = 1536, in_channels: int = 12, out_channels: Optional[int] = None, qk_norm: str = "rms_norm", text_embed_dim: int = 4096, time_embed_dim: int = 256, activation_fn: str = "swiglu", max_sequence_length: int = 256, ) -> None: super().__init__() inner_dim = num_attention_heads * attention_head_dim out_channels = out_channels or in_channels self.patch_embed = PatchEmbed( patch_size=patch_size, in_channels=in_channels, embed_dim=inner_dim, pos_embed_type=None, )
1,167
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
self.time_embed = MochiCombinedTimestepCaptionEmbedding( embedding_dim=inner_dim, pooled_projection_dim=pooled_projection_dim, text_embed_dim=text_embed_dim, time_embed_dim=time_embed_dim, num_attention_heads=8, ) self.pos_frequencies = nn.Parameter(torch.full((3, num_attention_heads, attention_head_dim // 2), 0.0)) self.rope = MochiRoPE() self.transformer_blocks = nn.ModuleList( [ MochiTransformerBlock( dim=inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, pooled_projection_dim=pooled_projection_dim, qk_norm=qk_norm, activation_fn=activation_fn, context_pre_only=i == num_layers - 1, ) for i in range(num_layers) ] )
1,167
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
self.norm_out = AdaLayerNormContinuous( inner_dim, inner_dim, elementwise_affine=False, eps=1e-6, norm_type="layer_norm", ) self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * out_channels) self.gradient_checkpointing = False def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, timestep: torch.LongTensor, encoder_attention_mask: torch.Tensor, attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> torch.Tensor: if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0
1,167
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective." ) batch_size, num_channels, num_frames, height, width = hidden_states.shape p = self.config.patch_size post_patch_height = height // p post_patch_width = width // p temb, encoder_hidden_states = self.time_embed( timestep, encoder_hidden_states, encoder_attention_mask, hidden_dtype=hidden_states.dtype, )
1,167
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) hidden_states = self.patch_embed(hidden_states) hidden_states = hidden_states.unflatten(0, (batch_size, -1)).flatten(1, 2) image_rotary_emb = self.rope( self.pos_frequencies, num_frames, post_patch_height, post_patch_width, device=hidden_states.device, dtype=torch.float32, ) for i, block in enumerate(self.transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward
1,167
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states, encoder_hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, encoder_hidden_states, temb, encoder_attention_mask, image_rotary_emb, **ckpt_kwargs, ) else: hidden_states, encoder_hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, encoder_attention_mask=encoder_attention_mask, image_rotary_emb=image_rotary_emb, ) hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states)
1,167
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
hidden_states = hidden_states.reshape(batch_size, num_frames, post_patch_height, post_patch_width, p, p, -1) hidden_states = hidden_states.permute(0, 6, 1, 2, 4, 3, 5) output = hidden_states.reshape(batch_size, -1, num_frames, height, width) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
1,167
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_mochi.py
class TemporalDecoder(nn.Module): def __init__( self, in_channels: int = 4, out_channels: int = 3, block_out_channels: Tuple[int] = (128, 256, 512, 512), layers_per_block: int = 2, ): super().__init__() self.layers_per_block = layers_per_block self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1) self.mid_block = MidBlockTemporalDecoder( num_layers=self.layers_per_block, in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], attention_head_dim=block_out_channels[-1], ) # up self.up_blocks = nn.ModuleList([]) reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] for i in range(len(block_out_channels)): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i]
1,168
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
is_final_block = i == len(block_out_channels) - 1 up_block = UpBlockTemporalDecoder( num_layers=self.layers_per_block + 1, in_channels=prev_output_channel, out_channels=output_channel, add_upsample=not is_final_block, ) self.up_blocks.append(up_block) prev_output_channel = output_channel self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-6) self.conv_act = nn.SiLU() self.conv_out = torch.nn.Conv2d( in_channels=block_out_channels[0], out_channels=out_channels, kernel_size=3, padding=1, )
1,168
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
conv_out_kernel_size = (3, 1, 1) padding = [int(k // 2) for k in conv_out_kernel_size] self.time_conv_out = torch.nn.Conv3d( in_channels=out_channels, out_channels=out_channels, kernel_size=conv_out_kernel_size, padding=padding, ) self.gradient_checkpointing = False def forward( self, sample: torch.Tensor, image_only_indicator: torch.Tensor, num_frames: int = 1, ) -> torch.Tensor: r"""The forward method of the `Decoder` class.""" sample = self.conv_in(sample) upscale_dtype = next(itertools.chain(self.up_blocks.parameters(), self.up_blocks.buffers())).dtype if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward
1,168
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
if is_torch_version(">=", "1.11.0"): # middle sample = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block), sample, image_only_indicator, use_reentrant=False, ) sample = sample.to(upscale_dtype) # up for up_block in self.up_blocks: sample = torch.utils.checkpoint.checkpoint( create_custom_forward(up_block), sample, image_only_indicator, use_reentrant=False, ) else: # middle sample = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block), sample, image_only_indicator, ) sample = sample.to(upscale_dtype)
1,168
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
# up for up_block in self.up_blocks: sample = torch.utils.checkpoint.checkpoint( create_custom_forward(up_block), sample, image_only_indicator, ) else: # middle sample = self.mid_block(sample, image_only_indicator=image_only_indicator) sample = sample.to(upscale_dtype) # up for up_block in self.up_blocks: sample = up_block(sample, image_only_indicator=image_only_indicator) # post-process sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) batch_frames, channels, height, width = sample.shape batch_size = batch_frames // num_frames sample = sample[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) sample = self.time_conv_out(sample)
1,168
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width) return sample
1,168
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
class AutoencoderKLTemporalDecoder(ModelMixin, ConfigMixin): r""" A VAE model with KL loss for encoding images into latents and decoding latent representations into images. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving).
1,169
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
Parameters: in_channels (int, *optional*, defaults to 3): Number of channels in the input image. out_channels (int, *optional*, defaults to 3): Number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): Tuple of downsample block types. block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): Tuple of block output channels. layers_per_block: (`int`, *optional*, defaults to 1): Number of layers per block. latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space. sample_size (`int`, *optional*, defaults to `32`): Sample input size. scaling_factor (`float`, *optional*, defaults to 0.18215): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion
1,169
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. force_upcast (`bool`, *optional*, default to `True`): If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE can be fine-tuned / trained to a lower range without loosing too much precision in which case `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix """
1,169
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
_supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 3, out_channels: int = 3, down_block_types: Tuple[str] = ("DownEncoderBlock2D",), block_out_channels: Tuple[int] = (64,), layers_per_block: int = 1, latent_channels: int = 4, sample_size: int = 32, scaling_factor: float = 0.18215, force_upcast: float = True, ): super().__init__() # pass init params to Encoder self.encoder = Encoder( in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, double_z=True, )
1,169
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
# pass init params to Decoder self.decoder = TemporalDecoder( in_channels=latent_channels, out_channels=out_channels, block_out_channels=block_out_channels, layers_per_block=layers_per_block, ) self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (Encoder, TemporalDecoder)): module.gradient_checkpointing = value @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {}
1,169
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention.
1,169
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
1,169
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor)
1,169
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) @apply_forward_hook def encode( self, x: torch.Tensor, return_dict: bool = True ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: """ Encode a batch of images into latents.
1,169
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoders.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded images. If `return_dict` is True, a [`~models.autoencoders.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) @apply_forward_hook def decode( self, z: torch.Tensor, num_frames: int, return_dict: bool = True, ) -> Union[DecoderOutput, torch.Tensor]: """ Decode a batch of images.
1,169
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ batch_size = z.shape[0] // num_frames image_only_indicator = torch.zeros(batch_size, num_frames, dtype=z.dtype, device=z.device) decoded = self.decoder(z, num_frames=num_frames, image_only_indicator=image_only_indicator) if not return_dict: return (decoded,) return DecoderOutput(sample=decoded)
1,169
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
def forward( self, sample: torch.Tensor, sample_posterior: bool = False, return_dict: bool = True, generator: Optional[torch.Generator] = None, num_frames: int = 1, ) -> Union[DecoderOutput, torch.Tensor]: r""" Args: sample (`torch.Tensor`): Input sample. sample_posterior (`bool`, *optional*, defaults to `False`): Whether to sample from the posterior. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z, num_frames=num_frames).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec)
1,169
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
class CogVideoXSafeConv3d(nn.Conv3d): r""" A 3D convolution layer that splits the input tensor into smaller parts to avoid OOM in CogVideoX Model. """ def forward(self, input: torch.Tensor) -> torch.Tensor: memory_count = ( (input.shape[0] * input.shape[1] * input.shape[2] * input.shape[3] * input.shape[4]) * 2 / 1024**3 ) # Set to 2GB, suitable for CuDNN if memory_count > 2: kernel_size = self.kernel_size[0] part_num = int(memory_count / 2) + 1 input_chunks = torch.chunk(input, part_num, dim=2) if kernel_size > 1: input_chunks = [input_chunks[0]] + [ torch.cat((input_chunks[i - 1][:, :, -kernel_size + 1 :], input_chunks[i]), dim=2) for i in range(1, len(input_chunks)) ]
1,170
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
output_chunks = [] for input_chunk in input_chunks: output_chunks.append(super().forward(input_chunk)) output = torch.cat(output_chunks, dim=2) return output else: return super().forward(input)
1,170
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
class CogVideoXCausalConv3d(nn.Module): r"""A 3D causal convolution layer that pads the input tensor to ensure causality in CogVideoX Model. Args: in_channels (`int`): Number of channels in the input tensor. out_channels (`int`): Number of output channels produced by the convolution. kernel_size (`int` or `Tuple[int, int, int]`): Kernel size of the convolutional kernel. stride (`int`, defaults to `1`): Stride of the convolution. dilation (`int`, defaults to `1`): Dilation rate of the convolution. pad_mode (`str`, defaults to `"constant"`): Padding mode. """ def __init__( self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int, int]], stride: int = 1, dilation: int = 1, pad_mode: str = "constant", ): super().__init__() if isinstance(kernel_size, int): kernel_size = (kernel_size,) * 3
1,171
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
time_kernel_size, height_kernel_size, width_kernel_size = kernel_size # TODO(aryan): configure calculation based on stride and dilation in the future. # Since CogVideoX does not use it, it is currently tailored to "just work" with Mochi time_pad = time_kernel_size - 1 height_pad = (height_kernel_size - 1) // 2 width_pad = (width_kernel_size - 1) // 2 self.pad_mode = pad_mode self.height_pad = height_pad self.width_pad = width_pad self.time_pad = time_pad self.time_causal_padding = (width_pad, width_pad, height_pad, height_pad, time_pad, 0) self.temporal_dim = 2 self.time_kernel_size = time_kernel_size
1,171
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
stride = stride if isinstance(stride, tuple) else (stride, 1, 1) dilation = (dilation, 1, 1) self.conv = CogVideoXSafeConv3d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, ) def fake_context_parallel_forward( self, inputs: torch.Tensor, conv_cache: Optional[torch.Tensor] = None ) -> torch.Tensor: if self.pad_mode == "replicate": inputs = F.pad(inputs, self.time_causal_padding, mode="replicate") else: kernel_size = self.time_kernel_size if kernel_size > 1: cached_inputs = [conv_cache] if conv_cache is not None else [inputs[:, :, :1]] * (kernel_size - 1) inputs = torch.cat(cached_inputs + [inputs], dim=2) return inputs
1,171
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
def forward(self, inputs: torch.Tensor, conv_cache: Optional[torch.Tensor] = None) -> torch.Tensor: inputs = self.fake_context_parallel_forward(inputs, conv_cache) if self.pad_mode == "replicate": conv_cache = None else: padding_2d = (self.width_pad, self.width_pad, self.height_pad, self.height_pad) conv_cache = inputs[:, :, -self.time_kernel_size + 1 :].clone() inputs = F.pad(inputs, padding_2d, mode="constant", value=0) output = self.conv(inputs) return output, conv_cache
1,171
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
class CogVideoXSpatialNorm3D(nn.Module): r""" Spatially conditioned normalization as defined in https://arxiv.org/abs/2209.09002. This implementation is specific to 3D-video like data. CogVideoXSafeConv3d is used instead of nn.Conv3d to avoid OOM in CogVideoX Model. Args: f_channels (`int`): The number of channels for input to group normalization layer, and output of the spatial norm layer. zq_channels (`int`): The number of channels for the quantized vector as described in the paper. groups (`int`): Number of groups to separate the channels into for group normalization. """
1,172
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
def __init__( self, f_channels: int, zq_channels: int, groups: int = 32, ): super().__init__() self.norm_layer = nn.GroupNorm(num_channels=f_channels, num_groups=groups, eps=1e-6, affine=True) self.conv_y = CogVideoXCausalConv3d(zq_channels, f_channels, kernel_size=1, stride=1) self.conv_b = CogVideoXCausalConv3d(zq_channels, f_channels, kernel_size=1, stride=1) def forward( self, f: torch.Tensor, zq: torch.Tensor, conv_cache: Optional[Dict[str, torch.Tensor]] = None ) -> torch.Tensor: new_conv_cache = {} conv_cache = conv_cache or {}
1,172
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
if f.shape[2] > 1 and f.shape[2] % 2 == 1: f_first, f_rest = f[:, :, :1], f[:, :, 1:] f_first_size, f_rest_size = f_first.shape[-3:], f_rest.shape[-3:] z_first, z_rest = zq[:, :, :1], zq[:, :, 1:] z_first = F.interpolate(z_first, size=f_first_size) z_rest = F.interpolate(z_rest, size=f_rest_size) zq = torch.cat([z_first, z_rest], dim=2) else: zq = F.interpolate(zq, size=f.shape[-3:]) conv_y, new_conv_cache["conv_y"] = self.conv_y(zq, conv_cache=conv_cache.get("conv_y")) conv_b, new_conv_cache["conv_b"] = self.conv_b(zq, conv_cache=conv_cache.get("conv_b")) norm_f = self.norm_layer(f) new_f = norm_f * conv_y + conv_b return new_f, new_conv_cache
1,172
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
class CogVideoXResnetBlock3D(nn.Module): r""" A 3D ResNet block used in the CogVideoX model.
1,173
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
Args: in_channels (`int`): Number of input channels. out_channels (`int`, *optional*): Number of output channels. If None, defaults to `in_channels`. dropout (`float`, defaults to `0.0`): Dropout rate. temb_channels (`int`, defaults to `512`): Number of time embedding channels. groups (`int`, defaults to `32`): Number of groups to separate the channels into for group normalization. eps (`float`, defaults to `1e-6`): Epsilon value for normalization layers. non_linearity (`str`, defaults to `"swish"`): Activation function to use. conv_shortcut (bool, defaults to `False`): Whether or not to use a convolution shortcut. spatial_norm_dim (`int`, *optional*): The dimension to use for spatial norm if it is to be used instead of group norm. pad_mode (str, defaults to `"first"`): Padding mode. """
1,173
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
def __init__( self, in_channels: int, out_channels: Optional[int] = None, dropout: float = 0.0, temb_channels: int = 512, groups: int = 32, eps: float = 1e-6, non_linearity: str = "swish", conv_shortcut: bool = False, spatial_norm_dim: Optional[int] = None, pad_mode: str = "first", ): super().__init__() out_channels = out_channels or in_channels self.in_channels = in_channels self.out_channels = out_channels self.nonlinearity = get_activation(non_linearity) self.use_conv_shortcut = conv_shortcut self.spatial_norm_dim = spatial_norm_dim
1,173
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
if spatial_norm_dim is None: self.norm1 = nn.GroupNorm(num_channels=in_channels, num_groups=groups, eps=eps) self.norm2 = nn.GroupNorm(num_channels=out_channels, num_groups=groups, eps=eps) else: self.norm1 = CogVideoXSpatialNorm3D( f_channels=in_channels, zq_channels=spatial_norm_dim, groups=groups, ) self.norm2 = CogVideoXSpatialNorm3D( f_channels=out_channels, zq_channels=spatial_norm_dim, groups=groups, ) self.conv1 = CogVideoXCausalConv3d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode ) if temb_channels > 0: self.temb_proj = nn.Linear(in_features=temb_channels, out_features=out_channels)
1,173
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
self.dropout = nn.Dropout(dropout) self.conv2 = CogVideoXCausalConv3d( in_channels=out_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode ) if self.in_channels != self.out_channels: if self.use_conv_shortcut: self.conv_shortcut = CogVideoXCausalConv3d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode ) else: self.conv_shortcut = CogVideoXSafeConv3d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0 ) def forward( self, inputs: torch.Tensor, temb: Optional[torch.Tensor] = None, zq: Optional[torch.Tensor] = None, conv_cache: Optional[Dict[str, torch.Tensor]] = None, ) -> torch.Tensor: new_conv_cache = {} conv_cache = conv_cache or {} hidden_states = inputs
1,173
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
if zq is not None: hidden_states, new_conv_cache["norm1"] = self.norm1(hidden_states, zq, conv_cache=conv_cache.get("norm1")) else: hidden_states = self.norm1(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states, new_conv_cache["conv1"] = self.conv1(hidden_states, conv_cache=conv_cache.get("conv1")) if temb is not None: hidden_states = hidden_states + self.temb_proj(self.nonlinearity(temb))[:, :, None, None, None] if zq is not None: hidden_states, new_conv_cache["norm2"] = self.norm2(hidden_states, zq, conv_cache=conv_cache.get("norm2")) else: hidden_states = self.norm2(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states, new_conv_cache["conv2"] = self.conv2(hidden_states, conv_cache=conv_cache.get("conv2"))
1,173
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
if self.in_channels != self.out_channels: if self.use_conv_shortcut: inputs, new_conv_cache["conv_shortcut"] = self.conv_shortcut( inputs, conv_cache=conv_cache.get("conv_shortcut") ) else: inputs = self.conv_shortcut(inputs) hidden_states = hidden_states + inputs return hidden_states, new_conv_cache
1,173
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
class CogVideoXDownBlock3D(nn.Module): r""" A downsampling block used in the CogVideoX model.
1,174
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
Args: in_channels (`int`): Number of input channels. out_channels (`int`, *optional*): Number of output channels. If None, defaults to `in_channels`. temb_channels (`int`, defaults to `512`): Number of time embedding channels. num_layers (`int`, defaults to `1`): Number of resnet layers. dropout (`float`, defaults to `0.0`): Dropout rate. resnet_eps (`float`, defaults to `1e-6`): Epsilon value for normalization layers. resnet_act_fn (`str`, defaults to `"swish"`): Activation function to use. resnet_groups (`int`, defaults to `32`): Number of groups to separate the channels into for group normalization. add_downsample (`bool`, defaults to `True`): Whether or not to use a downsampling layer. If not used, output dimension would be same as input dimension. compress_time (`bool`, defaults to `False`):
1,174
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
Whether or not to downsample across temporal dimension. pad_mode (str, defaults to `"first"`): Padding mode. """
1,174
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
_supports_gradient_checkpointing = True def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_act_fn: str = "swish", resnet_groups: int = 32, add_downsample: bool = True, downsample_padding: int = 0, compress_time: bool = False, pad_mode: str = "first", ): super().__init__()
1,174
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py