text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value def forward( self, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor = None, timestep: torch.LongTensor = None, return_dict: bool = True, ) -> Union[torch.FloatTensor, Transformer2DModelOutput]: height, width = hidden_states.shape[-2:]
1,129
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/auraflow_transformer_2d.py
# Apply patch embedding, timestep embedding, and project the caption embeddings. hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too. temb = self.time_step_embed(timestep).to(dtype=next(self.parameters()).dtype) temb = self.time_step_proj(temb) encoder_hidden_states = self.context_embedder(encoder_hidden_states) encoder_hidden_states = torch.cat( [self.register_tokens.repeat(encoder_hidden_states.size(0), 1, 1), encoder_hidden_states], dim=1 ) # MMDiT blocks. for index_block, block in enumerate(self.joint_transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing:
1,129
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/auraflow_transformer_2d.py
def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, encoder_hidden_states, temb, **ckpt_kwargs, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb )
1,129
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/auraflow_transformer_2d.py
# Single DiT blocks that combine the `hidden_states` (image) and `encoder_hidden_states` (text) if len(self.single_transformer_blocks) > 0: encoder_seq_len = encoder_hidden_states.size(1) combined_hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) for index_block, block in enumerate(self.single_transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward
1,129
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/auraflow_transformer_2d.py
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} combined_hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), combined_hidden_states, temb, **ckpt_kwargs, ) else: combined_hidden_states = block(hidden_states=combined_hidden_states, temb=temb) hidden_states = combined_hidden_states[:, encoder_seq_len:] hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) # unpatchify patch_size = self.config.patch_size out_channels = self.config.out_channels height = height // patch_size width = width // patch_size
1,129
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/auraflow_transformer_2d.py
hidden_states = hidden_states.reshape( shape=(hidden_states.shape[0], height, width, patch_size, patch_size, out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(hidden_states.shape[0], out_channels, height * patch_size, width * patch_size) ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
1,129
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/auraflow_transformer_2d.py
class T5FilmDecoder(ModelMixin, ConfigMixin): r""" T5 style decoder with FiLM conditioning. Args: input_dims (`int`, *optional*, defaults to `128`): The number of input dimensions. targets_length (`int`, *optional*, defaults to `256`): The length of the targets. d_model (`int`, *optional*, defaults to `768`): Size of the input hidden states. num_layers (`int`, *optional*, defaults to `12`): The number of `DecoderLayer`'s to use. num_heads (`int`, *optional*, defaults to `12`): The number of attention heads to use. d_kv (`int`, *optional*, defaults to `64`): Size of the key-value projection vectors. d_ff (`int`, *optional*, defaults to `2048`): The number of dimensions in the intermediate feed-forward layer of `DecoderLayer`'s. dropout_rate (`float`, *optional*, defaults to `0.1`): Dropout probability. """
1,130
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
@register_to_config def __init__( self, input_dims: int = 128, targets_length: int = 256, max_decoder_noise_time: float = 2000.0, d_model: int = 768, num_layers: int = 12, num_heads: int = 12, d_kv: int = 64, d_ff: int = 2048, dropout_rate: float = 0.1, ): super().__init__() self.conditioning_emb = nn.Sequential( nn.Linear(d_model, d_model * 4, bias=False), nn.SiLU(), nn.Linear(d_model * 4, d_model * 4, bias=False), nn.SiLU(), ) self.position_encoding = nn.Embedding(targets_length, d_model) self.position_encoding.weight.requires_grad = False self.continuous_inputs_projection = nn.Linear(input_dims, d_model, bias=False) self.dropout = nn.Dropout(p=dropout_rate)
1,130
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
self.decoders = nn.ModuleList() for lyr_num in range(num_layers): # FiLM conditional T5 decoder lyr = DecoderLayer(d_model=d_model, d_kv=d_kv, num_heads=num_heads, d_ff=d_ff, dropout_rate=dropout_rate) self.decoders.append(lyr) self.decoder_norm = T5LayerNorm(d_model) self.post_dropout = nn.Dropout(p=dropout_rate) self.spec_out = nn.Linear(d_model, input_dims, bias=False) def encoder_decoder_mask(self, query_input: torch.Tensor, key_input: torch.Tensor) -> torch.Tensor: mask = torch.mul(query_input.unsqueeze(-1), key_input.unsqueeze(-2)) return mask.unsqueeze(-3) def forward(self, encodings_and_masks, decoder_input_tokens, decoder_noise_time): batch, _, _ = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,)
1,130
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
# decoder_noise_time is in [0, 1), so rescale to expected timing range. time_steps = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time, embedding_dim=self.config.d_model, max_period=self.config.max_decoder_noise_time, ).to(dtype=self.dtype) conditioning_emb = self.conditioning_emb(time_steps).unsqueeze(1) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) seq_length = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. decoder_positions = torch.broadcast_to( torch.arange(seq_length, device=decoder_input_tokens.device), (batch, seq_length), ) position_encodings = self.position_encoding(decoder_positions)
1,130
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
inputs = self.continuous_inputs_projection(decoder_input_tokens) inputs += position_encodings y = self.dropout(inputs) # decoder: No padding present. decoder_mask = torch.ones( decoder_input_tokens.shape[:2], device=decoder_input_tokens.device, dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. encodings_and_encdec_masks = [(x, self.encoder_decoder_mask(decoder_mask, y)) for x, y in encodings_and_masks] # cross attend style: concat encodings encoded = torch.cat([x[0] for x in encodings_and_encdec_masks], dim=1) encoder_decoder_mask = torch.cat([x[1] for x in encodings_and_encdec_masks], dim=-1) for lyr in self.decoders: y = lyr( y, conditioning_emb=conditioning_emb, encoder_hidden_states=encoded, encoder_attention_mask=encoder_decoder_mask, )[0]
1,130
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
y = self.decoder_norm(y) y = self.post_dropout(y) spec_out = self.spec_out(y) return spec_out
1,130
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
class DecoderLayer(nn.Module): r""" T5 decoder layer. Args: d_model (`int`): Size of the input hidden states. d_kv (`int`): Size of the key-value projection vectors. num_heads (`int`): Number of attention heads. d_ff (`int`): Size of the intermediate feed-forward layer. dropout_rate (`float`): Dropout probability. layer_norm_epsilon (`float`, *optional*, defaults to `1e-6`): A small value used for numerical stability to avoid dividing by zero. """ def __init__( self, d_model: int, d_kv: int, num_heads: int, d_ff: int, dropout_rate: float, layer_norm_epsilon: float = 1e-6 ): super().__init__() self.layer = nn.ModuleList() # cond self attention: layer 0 self.layer.append( T5LayerSelfAttentionCond(d_model=d_model, d_kv=d_kv, num_heads=num_heads, dropout_rate=dropout_rate) )
1,131
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
# cross attention: layer 1 self.layer.append( T5LayerCrossAttention( d_model=d_model, d_kv=d_kv, num_heads=num_heads, dropout_rate=dropout_rate, layer_norm_epsilon=layer_norm_epsilon, ) ) # Film Cond MLP + dropout: last layer self.layer.append( T5LayerFFCond(d_model=d_model, d_ff=d_ff, dropout_rate=dropout_rate, layer_norm_epsilon=layer_norm_epsilon) )
1,131
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
def forward( self, hidden_states: torch.Tensor, conditioning_emb: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, encoder_decoder_position_bias=None, ) -> Tuple[torch.Tensor]: hidden_states = self.layer[0]( hidden_states, conditioning_emb=conditioning_emb, attention_mask=attention_mask, ) if encoder_hidden_states is not None: encoder_extended_attention_mask = torch.where(encoder_attention_mask > 0, 0, -1e10).to( encoder_hidden_states.dtype ) hidden_states = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_extended_attention_mask, )
1,131
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
# Apply Film Conditional Feed Forward layer hidden_states = self.layer[-1](hidden_states, conditioning_emb) return (hidden_states,)
1,131
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
class T5LayerSelfAttentionCond(nn.Module): r""" T5 style self-attention layer with conditioning. Args: d_model (`int`): Size of the input hidden states. d_kv (`int`): Size of the key-value projection vectors. num_heads (`int`): Number of attention heads. dropout_rate (`float`): Dropout probability. """ def __init__(self, d_model: int, d_kv: int, num_heads: int, dropout_rate: float): super().__init__() self.layer_norm = T5LayerNorm(d_model) self.FiLMLayer = T5FiLMLayer(in_features=d_model * 4, out_features=d_model) self.attention = Attention(query_dim=d_model, heads=num_heads, dim_head=d_kv, out_bias=False, scale_qk=False) self.dropout = nn.Dropout(dropout_rate)
1,132
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
def forward( self, hidden_states: torch.Tensor, conditioning_emb: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: # pre_self_attention_layer_norm normed_hidden_states = self.layer_norm(hidden_states) if conditioning_emb is not None: normed_hidden_states = self.FiLMLayer(normed_hidden_states, conditioning_emb) # Self-attention block attention_output = self.attention(normed_hidden_states) hidden_states = hidden_states + self.dropout(attention_output) return hidden_states
1,132
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
class T5LayerCrossAttention(nn.Module): r""" T5 style cross-attention layer. Args: d_model (`int`): Size of the input hidden states. d_kv (`int`): Size of the key-value projection vectors. num_heads (`int`): Number of attention heads. dropout_rate (`float`): Dropout probability. layer_norm_epsilon (`float`): A small value used for numerical stability to avoid dividing by zero. """ def __init__(self, d_model: int, d_kv: int, num_heads: int, dropout_rate: float, layer_norm_epsilon: float): super().__init__() self.attention = Attention(query_dim=d_model, heads=num_heads, dim_head=d_kv, out_bias=False, scale_qk=False) self.layer_norm = T5LayerNorm(d_model, eps=layer_norm_epsilon) self.dropout = nn.Dropout(dropout_rate)
1,133
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.attention( normed_hidden_states, encoder_hidden_states=key_value_states, attention_mask=attention_mask.squeeze(1), ) layer_output = hidden_states + self.dropout(attention_output) return layer_output
1,133
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
class T5LayerFFCond(nn.Module): r""" T5 style feed-forward conditional layer. Args: d_model (`int`): Size of the input hidden states. d_ff (`int`): Size of the intermediate feed-forward layer. dropout_rate (`float`): Dropout probability. layer_norm_epsilon (`float`): A small value used for numerical stability to avoid dividing by zero. """ def __init__(self, d_model: int, d_ff: int, dropout_rate: float, layer_norm_epsilon: float): super().__init__() self.DenseReluDense = T5DenseGatedActDense(d_model=d_model, d_ff=d_ff, dropout_rate=dropout_rate) self.film = T5FiLMLayer(in_features=d_model * 4, out_features=d_model) self.layer_norm = T5LayerNorm(d_model, eps=layer_norm_epsilon) self.dropout = nn.Dropout(dropout_rate)
1,134
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
def forward(self, hidden_states: torch.Tensor, conditioning_emb: Optional[torch.Tensor] = None) -> torch.Tensor: forwarded_states = self.layer_norm(hidden_states) if conditioning_emb is not None: forwarded_states = self.film(forwarded_states, conditioning_emb) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states
1,134
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
class T5DenseGatedActDense(nn.Module): r""" T5 style feed-forward layer with gated activations and dropout. Args: d_model (`int`): Size of the input hidden states. d_ff (`int`): Size of the intermediate feed-forward layer. dropout_rate (`float`): Dropout probability. """ def __init__(self, d_model: int, d_ff: int, dropout_rate: float): super().__init__() self.wi_0 = nn.Linear(d_model, d_ff, bias=False) self.wi_1 = nn.Linear(d_model, d_ff, bias=False) self.wo = nn.Linear(d_ff, d_model, bias=False) self.dropout = nn.Dropout(dropout_rate) self.act = NewGELUActivation() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states)
1,135
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
hidden_states = self.wo(hidden_states) return hidden_states
1,135
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
class T5LayerNorm(nn.Module): r""" T5 style layer normalization module. Args: hidden_size (`int`): Size of the input hidden states. eps (`float`, `optional`, defaults to `1e-6`): A small value used for numerical stability to avoid dividing by zero. """ def __init__(self, hidden_size: int, eps: float = 1e-6): """ Construct a layernorm module in the T5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps
1,136
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states
1,136
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
class NewGELUActivation(nn.Module): """ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 """ def forward(self, input: torch.Tensor) -> torch.Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
1,137
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
class T5FiLMLayer(nn.Module): """ T5 style FiLM Layer. Args: in_features (`int`): Number of input features. out_features (`int`): Number of output features. """ def __init__(self, in_features: int, out_features: int): super().__init__() self.scale_bias = nn.Linear(in_features, out_features * 2, bias=False) def forward(self, x: torch.Tensor, conditioning_emb: torch.Tensor) -> torch.Tensor: emb = self.scale_bias(conditioning_emb) scale, shift = torch.chunk(emb, 2, -1) x = x * (1 + scale) + shift return x
1,138
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/t5_film_transformer.py
class FluxSingleTransformerBlock(nn.Module): r""" A Transformer block following the MMDiT architecture, introduced in Stable Diffusion 3. Reference: https://arxiv.org/abs/2403.03206 Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. context_pre_only (`bool`): Boolean to determine if we should add some blocks associated with the processing of `context` conditions. """ def __init__(self, dim, num_attention_heads, attention_head_dim, mlp_ratio=4.0): super().__init__() self.mlp_hidden_dim = int(dim * mlp_ratio) self.norm = AdaLayerNormZeroSingle(dim) self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim) self.act_mlp = nn.GELU(approximate="tanh") self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim)
1,139
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
if is_torch_npu_available(): processor = FluxAttnProcessor2_0_NPU() else: processor = FluxAttnProcessor2_0() self.attn = Attention( query_dim=dim, cross_attention_dim=None, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, bias=True, processor=processor, qk_norm="rms_norm", eps=1e-6, pre_only=True, )
1,139
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
def forward( self, hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, ) -> torch.Tensor: residual = hidden_states norm_hidden_states, gate = self.norm(hidden_states, emb=temb) mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) joint_attention_kwargs = joint_attention_kwargs or {} attn_output = self.attn( hidden_states=norm_hidden_states, image_rotary_emb=image_rotary_emb, **joint_attention_kwargs, ) hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) gate = gate.unsqueeze(1) hidden_states = gate * self.proj_out(hidden_states) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16: hidden_states = hidden_states.clip(-65504, 65504)
1,139
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
return hidden_states
1,139
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
class FluxTransformerBlock(nn.Module): r""" A Transformer block following the MMDiT architecture, introduced in Stable Diffusion 3. Reference: https://arxiv.org/abs/2403.03206 Args: dim (`int`): The embedding dimension of the block. num_attention_heads (`int`): The number of attention heads to use. attention_head_dim (`int`): The number of dimensions to use for each attention head. qk_norm (`str`, defaults to `"rms_norm"`): The normalization to use for the query and key tensors. eps (`float`, defaults to `1e-6`): The epsilon value to use for the normalization. """ def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, qk_norm: str = "rms_norm", eps: float = 1e-6 ): super().__init__() self.norm1 = AdaLayerNormZero(dim) self.norm1_context = AdaLayerNormZero(dim)
1,140
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
if hasattr(F, "scaled_dot_product_attention"): processor = FluxAttnProcessor2_0() else: raise ValueError( "The current PyTorch version does not support the `scaled_dot_product_attention` function." ) self.attn = Attention( query_dim=dim, cross_attention_dim=None, added_kv_proj_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, context_pre_only=False, bias=True, processor=processor, qk_norm=qk_norm, eps=eps, ) self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
1,140
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
# let chunk size default to None self._chunk_size = None self._chunk_dim = 0 def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
1,140
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( encoder_hidden_states, emb=temb ) joint_attention_kwargs = joint_attention_kwargs or {} # Attention. attention_outputs = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, **joint_attention_kwargs, ) if len(attention_outputs) == 2: attn_output, context_attn_output = attention_outputs elif len(attention_outputs) == 3: attn_output, context_attn_output, ip_attn_output = attention_outputs # Process attention outputs for the `hidden_states`. attn_output = gate_msa.unsqueeze(1) * attn_output hidden_states = hidden_states + attn_output
1,140
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] ff_output = self.ff(norm_hidden_states) ff_output = gate_mlp.unsqueeze(1) * ff_output hidden_states = hidden_states + ff_output if len(attention_outputs) == 3: hidden_states = hidden_states + ip_attn_output # Process attention outputs for the `encoder_hidden_states`. context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output encoder_hidden_states = encoder_hidden_states + context_attn_output norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
1,140
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
context_ff_output = self.ff_context(norm_encoder_hidden_states) encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output if encoder_hidden_states.dtype == torch.float16: encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) return encoder_hidden_states, hidden_states
1,140
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
class FluxTransformer2DModel( ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, FluxTransformer2DLoadersMixin ): """ The Transformer model introduced in Flux. Reference: https://blackforestlabs.ai/announcing-black-forest-labs/
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
Args: patch_size (`int`, defaults to `1`): Patch size to turn the input data into small patches. in_channels (`int`, defaults to `64`): The number of channels in the input. out_channels (`int`, *optional*, defaults to `None`): The number of channels in the output. If not specified, it defaults to `in_channels`. num_layers (`int`, defaults to `19`): The number of layers of dual stream DiT blocks to use. num_single_layers (`int`, defaults to `38`): The number of layers of single stream DiT blocks to use. attention_head_dim (`int`, defaults to `128`): The number of dimensions to use for each attention head. num_attention_heads (`int`, defaults to `24`): The number of attention heads to use. joint_attention_dim (`int`, defaults to `4096`): The number of dimensions to use for the joint attention (embedding/channel dimension of
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
`encoder_hidden_states`). pooled_projection_dim (`int`, defaults to `768`): The number of dimensions to use for the pooled projection. guidance_embeds (`bool`, defaults to `False`): Whether to use guidance embeddings for guidance-distilled variant of the model. axes_dims_rope (`Tuple[int]`, defaults to `(16, 56, 56)`): The dimensions to use for the rotary positional embeddings. """
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
_supports_gradient_checkpointing = True _no_split_modules = ["FluxTransformerBlock", "FluxSingleTransformerBlock"] @register_to_config def __init__( self, patch_size: int = 1, in_channels: int = 64, out_channels: Optional[int] = None, num_layers: int = 19, num_single_layers: int = 38, attention_head_dim: int = 128, num_attention_heads: int = 24, joint_attention_dim: int = 4096, pooled_projection_dim: int = 768, guidance_embeds: bool = False, axes_dims_rope: Tuple[int] = (16, 56, 56), ): super().__init__() self.out_channels = out_channels or in_channels self.inner_dim = num_attention_heads * attention_head_dim self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope)
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
text_time_guidance_cls = ( CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings ) self.time_text_embed = text_time_guidance_cls( embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim ) self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim) self.x_embedder = nn.Linear(in_channels, self.inner_dim) self.transformer_blocks = nn.ModuleList( [ FluxTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for _ in range(num_layers) ] )
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
self.single_transformer_blocks = nn.ModuleList( [ FluxSingleTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for _ in range(num_single_layers) ] ) self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) self.gradient_checkpointing = False
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
@property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys())
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor)
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedFluxAttnProcessor2_0 def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True)
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
self.set_attn_processor(FusedFluxAttnProcessor2_0()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, pooled_projections: torch.Tensor = None, timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, guidance: torch.Tensor = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_block_samples=None, controlnet_single_block_samples=None, return_dict: bool = True, controlnet_blocks_repeat: bool = False, ) -> Union[torch.Tensor, Transformer2DModelOutput]: """ The [`FluxTransformer2DModel`] forward method.
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
Args: hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`): Input `hidden_states`. encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`): Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. pooled_projections (`torch.Tensor` of shape `(batch_size, projection_dim)`): Embeddings projected from the embeddings of input conditions. timestep ( `torch.LongTensor`): Used to indicate denoising step. block_controlnet_hidden_states: (`list` of `torch.Tensor`): A list of tensors that if specified are added to the residuals of transformer blocks. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
`self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain tuple.
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ if joint_attention_kwargs is not None: joint_attention_kwargs = joint_attention_kwargs.copy() lora_scale = joint_attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." ) hidden_states = self.x_embedder(hidden_states)
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
timestep = timestep.to(hidden_states.dtype) * 1000 if guidance is not None: guidance = guidance.to(hidden_states.dtype) * 1000 else: guidance = None temb = ( self.time_text_embed(timestep, pooled_projections) if guidance is None else self.time_text_embed(timestep, guidance, pooled_projections) ) encoder_hidden_states = self.context_embedder(encoder_hidden_states) if txt_ids.ndim == 3: logger.warning( "Passing `txt_ids` 3d torch.Tensor is deprecated." "Please remove the batch dimension and pass it as a 2d torch Tensor" ) txt_ids = txt_ids[0] if img_ids.ndim == 3: logger.warning( "Passing `img_ids` 3d torch.Tensor is deprecated." "Please remove the batch dimension and pass it as a 2d torch Tensor" ) img_ids = img_ids[0]
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
ids = torch.cat((txt_ids, img_ids), dim=0) image_rotary_emb = self.pos_embed(ids) if joint_attention_kwargs is not None and "ip_adapter_image_embeds" in joint_attention_kwargs: ip_adapter_image_embeds = joint_attention_kwargs.pop("ip_adapter_image_embeds") ip_hidden_states = self.encoder_hid_proj(ip_adapter_image_embeds) joint_attention_kwargs.update({"ip_hidden_states": ip_hidden_states}) for index_block, block in enumerate(self.transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, encoder_hidden_states, temb, image_rotary_emb, **ckpt_kwargs, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, joint_attention_kwargs=joint_attention_kwargs, )
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
# controlnet residual if controlnet_block_samples is not None: interval_control = len(self.transformer_blocks) / len(controlnet_block_samples) interval_control = int(np.ceil(interval_control)) # For Xlabs ControlNet. if controlnet_blocks_repeat: hidden_states = ( hidden_states + controlnet_block_samples[index_block % len(controlnet_block_samples)] ) else: hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control] hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) for index_block, block in enumerate(self.single_transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing:
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, temb, image_rotary_emb, **ckpt_kwargs, )
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
else: hidden_states = block( hidden_states=hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, joint_attention_kwargs=joint_attention_kwargs, ) # controlnet residual if controlnet_single_block_samples is not None: interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples) interval_control = int(np.ceil(interval_control)) hidden_states[:, encoder_hidden_states.shape[1] :, ...] = ( hidden_states[:, encoder_hidden_states.shape[1] :, ...] + controlnet_single_block_samples[index_block // interval_control] ) hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :, ...] hidden_states = self.norm_out(hidden_states, temb) output = self.proj_out(hidden_states)
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
1,141
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_flux.py
class Transformer2DModelOutput(Transformer2DModelOutput): def __init__(self, *args, **kwargs): deprecation_message = "Importing `Transformer2DModelOutput` from `diffusers.models.transformer_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.modeling_outputs import Transformer2DModelOutput`, instead." deprecate("Transformer2DModelOutput", "1.0.0", deprecation_message) super().__init__(*args, **kwargs)
1,142
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
class Transformer2DModel(LegacyModelMixin, LegacyConfigMixin): """ A 2D Transformer model for image-like data.
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**). This is fixed during training since it is used to learn a number of position embeddings. num_vector_embeds (`int`, *optional*):
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**). Includes the class for the masked latent pixel. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. num_embeds_ada_norm ( `int`, *optional*): The number of diffusion steps used during training. Pass if at least one of the norm_layers is `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are added to the hidden states.
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`. attention_bias (`bool`, *optional*): Configure if the `TransformerBlocks` attention should contain a bias parameter. """ _supports_gradient_checkpointing = True _no_split_modules = ["BasicTransformerBlock"]
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
@register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, num_vector_embeds: Optional[int] = None, patch_size: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, use_linear_projection: bool = False, only_cross_attention: bool = False, double_self_attention: bool = False, upcast_attention: bool = False, norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single', 'ada_norm_continuous', 'layer_norm_i2vgen' norm_elementwise_affine: bool = True,
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
norm_eps: float = 1e-5, attention_type: str = "default", caption_channels: int = None, interpolation_scale: float = None, use_additional_conditions: Optional[bool] = None, ): super().__init__()
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
# Validate inputs. if patch_size is not None: if norm_type not in ["ada_norm", "ada_norm_zero", "ada_norm_single"]: raise NotImplementedError( f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'." ) elif norm_type in ["ada_norm", "ada_norm_zero"] and num_embeds_ada_norm is None: raise ValueError( f"When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None." )
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
# 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)` # Define whether input is continuous or discrete depending on configuration self.is_input_continuous = (in_channels is not None) and (patch_size is None) self.is_input_vectorized = num_vector_embeds is not None self.is_input_patches = in_channels is not None and patch_size is not None
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
if self.is_input_continuous and self.is_input_vectorized: raise ValueError( f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make" " sure that either `in_channels` or `num_vector_embeds` is None." ) elif self.is_input_vectorized and self.is_input_patches: raise ValueError( f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make" " sure that either `num_vector_embeds` or `num_patches` is None." ) elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches: raise ValueError( f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:" f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None." )
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
if norm_type == "layer_norm" and num_embeds_ada_norm is not None: deprecation_message = ( f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or" " incorrectly set to `'layer_norm'`. Make sure to set `norm_type` to `'ada_norm'` in the config." " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect" " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it" " would be very nice if you could open a Pull request for the `transformer/config.json` file" ) deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False) norm_type = "ada_norm"
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
# Set some common variables used across the board. self.use_linear_projection = use_linear_projection self.interpolation_scale = interpolation_scale self.caption_channels = caption_channels self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.in_channels = in_channels self.out_channels = in_channels if out_channels is None else out_channels self.gradient_checkpointing = False if use_additional_conditions is None: if norm_type == "ada_norm_single" and sample_size == 128: use_additional_conditions = True else: use_additional_conditions = False self.use_additional_conditions = use_additional_conditions
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
# 2. Initialize the right blocks. # These functions follow a common structure: # a. Initialize the input blocks. b. Initialize the transformer blocks. # c. Initialize the output blocks and other projection blocks when necessary. if self.is_input_continuous: self._init_continuous_input(norm_type=norm_type) elif self.is_input_vectorized: self._init_vectorized_inputs(norm_type=norm_type) elif self.is_input_patches: self._init_patched_inputs(norm_type=norm_type) def _init_continuous_input(self, norm_type): self.norm = torch.nn.GroupNorm( num_groups=self.config.norm_num_groups, num_channels=self.in_channels, eps=1e-6, affine=True ) if self.use_linear_projection: self.proj_in = torch.nn.Linear(self.in_channels, self.inner_dim) else: self.proj_in = torch.nn.Conv2d(self.in_channels, self.inner_dim, kernel_size=1, stride=1, padding=0)
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, only_cross_attention=self.config.only_cross_attention, double_self_attention=self.config.double_self_attention, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type,
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
) for _ in range(self.config.num_layers) ] )
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
if self.use_linear_projection: self.proj_out = torch.nn.Linear(self.inner_dim, self.out_channels) else: self.proj_out = torch.nn.Conv2d(self.inner_dim, self.out_channels, kernel_size=1, stride=1, padding=0) def _init_vectorized_inputs(self, norm_type): assert self.config.sample_size is not None, "Transformer2DModel over discrete input must provide sample_size" assert ( self.config.num_vector_embeds is not None ), "Transformer2DModel over discrete input must provide num_embed" self.height = self.config.sample_size self.width = self.config.sample_size self.num_latent_pixels = self.height * self.width self.latent_image_embedding = ImagePositionalEmbeddings( num_embed=self.config.num_vector_embeds, embed_dim=self.inner_dim, height=self.height, width=self.width )
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, only_cross_attention=self.config.only_cross_attention, double_self_attention=self.config.double_self_attention, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type,
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
) for _ in range(self.config.num_layers) ] )
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
self.norm_out = nn.LayerNorm(self.inner_dim) self.out = nn.Linear(self.inner_dim, self.config.num_vector_embeds - 1) def _init_patched_inputs(self, norm_type): assert self.config.sample_size is not None, "Transformer2DModel over patched input must provide sample_size" self.height = self.config.sample_size self.width = self.config.sample_size self.patch_size = self.config.patch_size interpolation_scale = ( self.config.interpolation_scale if self.config.interpolation_scale is not None else max(self.config.sample_size // 64, 1) ) self.pos_embed = PatchEmbed( height=self.config.sample_size, width=self.config.sample_size, patch_size=self.config.patch_size, in_channels=self.in_channels, embed_dim=self.inner_dim, interpolation_scale=interpolation_scale, )
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, only_cross_attention=self.config.only_cross_attention, double_self_attention=self.config.double_self_attention, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type,
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
) for _ in range(self.config.num_layers) ] )
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
if self.config.norm_type != "ada_norm_single": self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out_1 = nn.Linear(self.inner_dim, 2 * self.inner_dim) self.proj_out_2 = nn.Linear( self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels ) elif self.config.norm_type == "ada_norm_single": self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6) self.scale_shift_table = nn.Parameter(torch.randn(2, self.inner_dim) / self.inner_dim**0.5) self.proj_out = nn.Linear( self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels )
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
# PixArt-Alpha blocks. self.adaln_single = None if self.config.norm_type == "ada_norm_single": # TODO(Sayak, PVP) clean this, for now we use sample size to determine whether to use # additional conditions until we find better name self.adaln_single = AdaLayerNormSingle( self.inner_dim, use_additional_conditions=self.use_additional_conditions ) self.caption_projection = None if self.caption_channels is not None: self.caption_projection = PixArtAlphaTextProjection( in_features=self.caption_channels, hidden_size=self.inner_dim ) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, timestep: Optional[torch.LongTensor] = None, added_cond_kwargs: Dict[str, torch.Tensor] = None, class_labels: Optional[torch.LongTensor] = None, cross_attention_kwargs: Dict[str, Any] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ): """ The [`Transformer2DModel`] forward method.
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
Args: hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.Tensor` of shape `(batch size, channel, height, width)` if continuous): Input `hidden_states`. encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in `AdaLayerZeroNorm`. cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). attention_mask ( `torch.Tensor`, *optional*): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. encoder_attention_mask ( `torch.Tensor`, *optional*): Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
* Mask `(batch, sequence_length)` True = keep, False = discard. * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard. If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format above. This bias will be added to the cross-attention scores. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
Returns: If `return_dict` is True, an [`~models.transformers.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") # ensure attention_mask is a bias, and give it a singleton query_tokens dimension. # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward. # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias. # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens]
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None and attention_mask.ndim == 2: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1)
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
# convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
# 1. Input if self.is_input_continuous: batch_size, _, height, width = hidden_states.shape residual = hidden_states hidden_states, inner_dim = self._operate_on_continuous_inputs(hidden_states) elif self.is_input_vectorized: hidden_states = self.latent_image_embedding(hidden_states) elif self.is_input_patches: height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size hidden_states, encoder_hidden_states, timestep, embedded_timestep = self._operate_on_patched_inputs( hidden_states, encoder_hidden_states, timestep, added_cond_kwargs ) # 2. Blocks for block in self.transformer_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing:
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, timestep, cross_attention_kwargs, class_labels, **ckpt_kwargs, ) else: hidden_states = block( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, )
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
# 3. Output if self.is_input_continuous: output = self._get_output_for_continuous_inputs( hidden_states=hidden_states, residual=residual, batch_size=batch_size, height=height, width=width, inner_dim=inner_dim, ) elif self.is_input_vectorized: output = self._get_output_for_vectorized_inputs(hidden_states) elif self.is_input_patches: output = self._get_output_for_patched_inputs( hidden_states=hidden_states, timestep=timestep, class_labels=class_labels, embedded_timestep=embedded_timestep, height=height, width=width, ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
def _operate_on_continuous_inputs(self, hidden_states): batch, _, height, width = hidden_states.shape hidden_states = self.norm(hidden_states) if not self.use_linear_projection: hidden_states = self.proj_in(hidden_states) inner_dim = hidden_states.shape[1] hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim) else: inner_dim = hidden_states.shape[1] hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim) hidden_states = self.proj_in(hidden_states) return hidden_states, inner_dim def _operate_on_patched_inputs(self, hidden_states, encoder_hidden_states, timestep, added_cond_kwargs): batch_size = hidden_states.shape[0] hidden_states = self.pos_embed(hidden_states) embedded_timestep = None
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
if self.adaln_single is not None: if self.use_additional_conditions and added_cond_kwargs is None: raise ValueError( "`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`." ) timestep, embedded_timestep = self.adaln_single( timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype ) if self.caption_projection is not None: encoder_hidden_states = self.caption_projection(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1]) return hidden_states, encoder_hidden_states, timestep, embedded_timestep
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
def _get_output_for_continuous_inputs(self, hidden_states, residual, batch_size, height, width, inner_dim): if not self.use_linear_projection: hidden_states = ( hidden_states.reshape(batch_size, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() ) hidden_states = self.proj_out(hidden_states) else: hidden_states = self.proj_out(hidden_states) hidden_states = ( hidden_states.reshape(batch_size, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() ) output = hidden_states + residual return output
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
def _get_output_for_vectorized_inputs(self, hidden_states): hidden_states = self.norm_out(hidden_states) logits = self.out(hidden_states) # (batch, self.num_vector_embeds - 1, self.num_latent_pixels) logits = logits.permute(0, 2, 1) # log(p(x_0)) output = F.log_softmax(logits.double(), dim=1).float() return output
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
def _get_output_for_patched_inputs( self, hidden_states, timestep, class_labels, embedded_timestep, height=None, width=None ): if self.config.norm_type != "ada_norm_single": conditioning = self.transformer_blocks[0].norm1.emb( timestep, class_labels, hidden_dtype=hidden_states.dtype ) shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None] hidden_states = self.proj_out_2(hidden_states) elif self.config.norm_type == "ada_norm_single": shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) # Modulation hidden_states = hidden_states * (1 + scale) + shift hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.squeeze(1)
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py
# unpatchify if self.adaln_single is None: height = width = int(hidden_states.shape[1] ** 0.5) hidden_states = hidden_states.reshape( shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) ) return output
1,143
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/transformer_2d.py