text
stringlengths 1
1.02k
| class_index
int64 0
1.38k
| source
stringclasses 431
values |
---|---|---|
class SkipBlock(nn.Module):
def __init__(self, dim: int):
super().__init__()
self.skip_linear = nn.Linear(2 * dim, dim)
# Use torch.nn.LayerNorm for now, following the original code
self.norm = nn.LayerNorm(dim)
def forward(self, x, skip):
x = self.skip_linear(torch.cat([x, skip], dim=-1))
x = self.norm(x)
return x
| 125 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
class UTransformerBlock(nn.Module):
r"""
A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations.
| 126 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
Parameters:
dim (`int`): The number of channels in the input and output.
num_attention_heads (`int`): The number of heads to use for multi-head attention.
attention_head_dim (`int`): The number of channels in each head.
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
activation_fn (`str`, *optional*, defaults to `"geglu"`):
Activation function to be used in feed-forward.
num_embeds_ada_norm (:obj: `int`, *optional*):
The number of diffusion steps used during training. See `Transformer2DModel`.
attention_bias (:obj: `bool`, *optional*, defaults to `False`):
Configure if the attentions should contain a bias parameter.
only_cross_attention (`bool`, *optional*):
| 126 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
Whether to use only cross-attention layers. In this case two cross attention layers are used.
double_self_attention (`bool`, *optional*):
Whether to use two self-attention layers. In this case no cross attention layers are used.
upcast_attention (`bool`, *optional*):
Whether to upcast the query and key to float32 when performing the attention calculation.
norm_elementwise_affine (`bool`, *optional*):
Whether to use learnable per-element affine parameters during layer normalization.
norm_type (`str`, defaults to `"layer_norm"`):
The layer norm implementation to use.
pre_layer_norm (`bool`, *optional*):
Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"),
as opposed to after ("post-LayerNorm"). Note that `BasicTransformerBlock` uses pre-LayerNorm, e.g.
`pre_layer_norm = True`.
final_dropout (`bool`, *optional*):
| 126 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
Whether to use a final Dropout layer after the feedforward network.
"""
| 126 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
def __init__(
self,
dim: int,
num_attention_heads: int,
attention_head_dim: int,
dropout=0.0,
cross_attention_dim: Optional[int] = None,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
attention_bias: bool = False,
only_cross_attention: bool = False,
double_self_attention: bool = False,
upcast_attention: bool = False,
norm_elementwise_affine: bool = True,
norm_type: str = "layer_norm",
pre_layer_norm: bool = True,
final_dropout: bool = False,
):
super().__init__()
self.only_cross_attention = only_cross_attention
self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
self.pre_layer_norm = pre_layer_norm
| 126 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
)
# 1. Self-Attn
self.attn1 = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
cross_attention_dim=cross_attention_dim if only_cross_attention else None,
upcast_attention=upcast_attention,
)
| 126 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
self.attn2 = Attention(
query_dim=dim,
cross_attention_dim=cross_attention_dim if not double_self_attention else None,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
) # is self-attn if encoder_hidden_states is none
else:
self.attn2 = None
if self.use_ada_layer_norm:
self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
else:
self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
| 126 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
self.norm2 = (
AdaLayerNorm(dim, num_embeds_ada_norm)
if self.use_ada_layer_norm
else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
)
else:
self.norm2 = None
# 3. Feed-forward
self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)
| 126 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
timestep=None,
cross_attention_kwargs=None,
class_labels=None,
):
# Pre-LayerNorm
if self.pre_layer_norm:
if self.use_ada_layer_norm:
norm_hidden_states = self.norm1(hidden_states, timestep)
else:
norm_hidden_states = self.norm1(hidden_states)
else:
norm_hidden_states = hidden_states
# 1. Self-Attention
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
attn_output = self.attn1(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
| 126 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# Post-LayerNorm
if not self.pre_layer_norm:
if self.use_ada_layer_norm:
attn_output = self.norm1(attn_output, timestep)
else:
attn_output = self.norm1(attn_output)
hidden_states = attn_output + hidden_states
if self.attn2 is not None:
# Pre-LayerNorm
if self.pre_layer_norm:
norm_hidden_states = (
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
)
else:
norm_hidden_states = hidden_states
# TODO (Birch-San): Here we should prepare the encoder_attention mask correctly
# prepare attention mask here
| 126 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 2. Cross-Attention
attn_output = self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
**cross_attention_kwargs,
)
# Post-LayerNorm
if not self.pre_layer_norm:
attn_output = self.norm2(attn_output, timestep) if self.use_ada_layer_norm else self.norm2(attn_output)
hidden_states = attn_output + hidden_states
# 3. Feed-forward
# Pre-LayerNorm
if self.pre_layer_norm:
norm_hidden_states = self.norm3(hidden_states)
else:
norm_hidden_states = hidden_states
ff_output = self.ff(norm_hidden_states)
# Post-LayerNorm
if not self.pre_layer_norm:
ff_output = self.norm3(ff_output)
hidden_states = ff_output + hidden_states
return hidden_states
| 126 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
class UniDiffuserBlock(nn.Module):
r"""
A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations and puts the
LayerNorms on the residual backbone of the block. This matches the transformer block in the [original UniDiffuser
implementation](https://github.com/thu-ml/unidiffuser/blob/main/libs/uvit_multi_post_ln_v1.py#L104).
| 127 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
Parameters:
dim (`int`): The number of channels in the input and output.
num_attention_heads (`int`): The number of heads to use for multi-head attention.
attention_head_dim (`int`): The number of channels in each head.
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
activation_fn (`str`, *optional*, defaults to `"geglu"`):
Activation function to be used in feed-forward.
num_embeds_ada_norm (:obj: `int`, *optional*):
The number of diffusion steps used during training. See `Transformer2DModel`.
attention_bias (:obj: `bool`, *optional*, defaults to `False`):
Configure if the attentions should contain a bias parameter.
only_cross_attention (`bool`, *optional*):
| 127 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
Whether to use only cross-attention layers. In this case two cross attention layers are used.
double_self_attention (`bool`, *optional*):
Whether to use two self-attention layers. In this case no cross attention layers are used.
upcast_attention (`bool`, *optional*):
Whether to upcast the query and key to float() when performing the attention calculation.
norm_elementwise_affine (`bool`, *optional*):
Whether to use learnable per-element affine parameters during layer normalization.
norm_type (`str`, defaults to `"layer_norm"`):
The layer norm implementation to use.
pre_layer_norm (`bool`, *optional*):
Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"),
as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm
(`pre_layer_norm = False`).
final_dropout (`bool`, *optional*):
| 127 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
Whether to use a final Dropout layer after the feedforward network.
"""
| 127 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
def __init__(
self,
dim: int,
num_attention_heads: int,
attention_head_dim: int,
dropout=0.0,
cross_attention_dim: Optional[int] = None,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
attention_bias: bool = False,
only_cross_attention: bool = False,
double_self_attention: bool = False,
upcast_attention: bool = False,
norm_elementwise_affine: bool = True,
norm_type: str = "layer_norm",
pre_layer_norm: bool = False,
final_dropout: bool = True,
):
super().__init__()
self.only_cross_attention = only_cross_attention
self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
self.pre_layer_norm = pre_layer_norm
| 127 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
)
# 1. Self-Attn
self.attn1 = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
cross_attention_dim=cross_attention_dim if only_cross_attention else None,
upcast_attention=upcast_attention,
)
| 127 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
self.attn2 = Attention(
query_dim=dim,
cross_attention_dim=cross_attention_dim if not double_self_attention else None,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
) # is self-attn if encoder_hidden_states is none
else:
self.attn2 = None
if self.use_ada_layer_norm:
self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
else:
self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
| 127 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
self.norm2 = (
AdaLayerNorm(dim, num_embeds_ada_norm)
if self.use_ada_layer_norm
else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
)
else:
self.norm2 = None
# 3. Feed-forward
self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)
| 127 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
timestep=None,
cross_attention_kwargs=None,
class_labels=None,
):
# Following the diffusers transformer block implementation, put the LayerNorm on the
# residual backbone
# Pre-LayerNorm
if self.pre_layer_norm:
if self.use_ada_layer_norm:
hidden_states = self.norm1(hidden_states, timestep)
else:
hidden_states = self.norm1(hidden_states)
# 1. Self-Attention
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
attn_output = self.attn1(
hidden_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
| 127 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
hidden_states = attn_output + hidden_states
# Following the diffusers transformer block implementation, put the LayerNorm on the
# residual backbone
# Post-LayerNorm
if not self.pre_layer_norm:
if self.use_ada_layer_norm:
hidden_states = self.norm1(hidden_states, timestep)
else:
hidden_states = self.norm1(hidden_states)
if self.attn2 is not None:
# Pre-LayerNorm
if self.pre_layer_norm:
hidden_states = (
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
)
# TODO (Birch-San): Here we should prepare the encoder_attention mask correctly
# prepare attention mask here
| 127 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 2. Cross-Attention
attn_output = self.attn2(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
**cross_attention_kwargs,
)
hidden_states = attn_output + hidden_states
# Post-LayerNorm
if not self.pre_layer_norm:
hidden_states = (
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
)
# 3. Feed-forward
# Pre-LayerNorm
if self.pre_layer_norm:
hidden_states = self.norm3(hidden_states)
ff_output = self.ff(hidden_states)
hidden_states = ff_output + hidden_states
# Post-LayerNorm
if not self.pre_layer_norm:
hidden_states = self.norm3(hidden_states)
return hidden_states
| 127 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
class UTransformer2DModel(ModelMixin, ConfigMixin):
"""
Transformer model based on the [U-ViT](https://github.com/baofff/U-ViT) architecture for image-like data. Compared
to [`Transformer2DModel`], this model has skip connections between transformer blocks in a "U"-shaped fashion,
similar to a U-Net. Supports only continuous (actual embeddings) inputs, which are embedded via a [`PatchEmbed`]
layer and then reshaped to (b, t, d).
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
Parameters:
num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
in_channels (`int`, *optional*):
Pass if the input is continuous. The number of channels in the input.
out_channels (`int`, *optional*):
The number of output channels; if `None`, defaults to `in_channels`.
num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
norm_num_groups (`int`, *optional*, defaults to `32`):
The number of groups to use when performing Group Normalization.
cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.
attention_bias (`bool`, *optional*):
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
Configure if the TransformerBlocks' attention should contain a bias parameter.
sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.
Note that this is fixed at training time as it is used for learning a number of position embeddings. See
`ImagePositionalEmbeddings`.
num_vector_embeds (`int`, *optional*):
Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.
Includes the class for the masked latent pixel.
patch_size (`int`, *optional*, defaults to 2):
The patch size to use in the patch embedding.
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
The number of diffusion steps used during training. Note that this is fixed at training time as it is used
to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for
up to but not more than steps than `num_embeds_ada_norm`.
use_linear_projection (int, *optional*): TODO: Not used
only_cross_attention (`bool`, *optional*):
Whether to use only cross-attention layers. In this case two cross attention layers are used in each
transformer block.
upcast_attention (`bool`, *optional*):
Whether to upcast the query and key to float() when performing the attention calculation.
norm_type (`str`, *optional*, defaults to `"layer_norm"`):
The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`.
block_type (`str`, *optional*, defaults to `"unidiffuser"`):
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual
backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard
behavior in `diffusers`.)
pre_layer_norm (`bool`, *optional*):
Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"),
as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm
(`pre_layer_norm = False`).
norm_elementwise_affine (`bool`, *optional*):
Whether to use learnable per-element affine parameters during layer normalization.
use_patch_pos_embed (`bool`, *optional*):
Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`).
final_dropout (`bool`, *optional*):
Whether to use a final Dropout layer after the feedforward network.
"""
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
@register_to_config
def __init__(
self,
num_attention_heads: int = 16,
attention_head_dim: int = 88,
in_channels: Optional[int] = None,
out_channels: Optional[int] = None,
num_layers: int = 1,
dropout: float = 0.0,
norm_num_groups: int = 32,
cross_attention_dim: Optional[int] = None,
attention_bias: bool = False,
sample_size: Optional[int] = None,
num_vector_embeds: Optional[int] = None,
patch_size: Optional[int] = 2,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
use_linear_projection: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
norm_type: str = "layer_norm",
block_type: str = "unidiffuser",
pre_layer_norm: bool = False,
norm_elementwise_affine: bool = True,
use_patch_pos_embed=False,
ff_final_dropout: bool = False,
):
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
super().__init__()
self.use_linear_projection = use_linear_projection
self.num_attention_heads = num_attention_heads
self.attention_head_dim = attention_head_dim
inner_dim = num_attention_heads * attention_head_dim
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 1. Input
# Only support patch input of shape (batch_size, num_channels, height, width) for now
assert in_channels is not None and patch_size is not None, "Patch input requires in_channels and patch_size."
assert sample_size is not None, "UTransformer2DModel over patched input must provide sample_size"
# 2. Define input layers
self.height = sample_size
self.width = sample_size
self.patch_size = patch_size
self.pos_embed = PatchEmbed(
height=sample_size,
width=sample_size,
patch_size=patch_size,
in_channels=in_channels,
embed_dim=inner_dim,
use_pos_embed=use_patch_pos_embed,
)
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 3. Define transformers blocks
# Modify this to have in_blocks ("downsample" blocks, even though we don't actually downsample), a mid_block,
# and out_blocks ("upsample" blocks). Like a U-Net, there are skip connections from in_blocks to out_blocks in
# a "U"-shaped fashion (e.g. first in_block to last out_block, etc.).
# Quick hack to make the transformer block type configurable
if block_type == "unidiffuser":
block_cls = UniDiffuserBlock
else:
block_cls = UTransformerBlock
self.transformer_in_blocks = nn.ModuleList(
[
block_cls(
inner_dim,
num_attention_heads,
attention_head_dim,
dropout=dropout,
cross_attention_dim=cross_attention_dim,
activation_fn=activation_fn,
num_embeds_ada_norm=num_embeds_ada_norm,
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
attention_bias=attention_bias,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
norm_type=norm_type,
pre_layer_norm=pre_layer_norm,
norm_elementwise_affine=norm_elementwise_affine,
final_dropout=ff_final_dropout,
)
for d in range(num_layers // 2)
]
)
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
self.transformer_mid_block = block_cls(
inner_dim,
num_attention_heads,
attention_head_dim,
dropout=dropout,
cross_attention_dim=cross_attention_dim,
activation_fn=activation_fn,
num_embeds_ada_norm=num_embeds_ada_norm,
attention_bias=attention_bias,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
norm_type=norm_type,
pre_layer_norm=pre_layer_norm,
norm_elementwise_affine=norm_elementwise_affine,
final_dropout=ff_final_dropout,
)
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# For each skip connection, we use a SkipBlock (concatenation + Linear + LayerNorm) to process the inputs
# before each transformer out_block.
self.transformer_out_blocks = nn.ModuleList(
[
nn.ModuleDict(
{
"skip": SkipBlock(
inner_dim,
),
"block": block_cls(
inner_dim,
num_attention_heads,
attention_head_dim,
dropout=dropout,
cross_attention_dim=cross_attention_dim,
activation_fn=activation_fn,
num_embeds_ada_norm=num_embeds_ada_norm,
attention_bias=attention_bias,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
norm_type=norm_type,
pre_layer_norm=pre_layer_norm,
norm_elementwise_affine=norm_elementwise_affine,
final_dropout=ff_final_dropout,
),
}
)
for d in range(num_layers // 2)
]
)
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 4. Define output layers
self.out_channels = in_channels if out_channels is None else out_channels
# Following the UniDiffuser U-ViT implementation, we process the transformer output with
# a LayerNorm layer with per-element affine params
self.norm_out = nn.LayerNorm(inner_dim)
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
def forward(
self,
hidden_states,
encoder_hidden_states=None,
timestep=None,
class_labels=None,
cross_attention_kwargs=None,
return_dict: bool = True,
hidden_states_is_embedding: bool = False,
unpatchify: bool = True,
):
"""
Args:
hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`.
When continuous, `torch.Tensor` of shape `(batch size, channel, height, width)`): Input hidden_states
encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
self-attention.
timestep ( `torch.long`, *optional*):
Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step.
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
Optional class labels to be applied as an embedding in AdaLayerZeroNorm. Used to indicate class labels
conditioning.
cross_attention_kwargs (*optional*):
Keyword arguments to supply to the cross attention layers, if used.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
hidden_states_is_embedding (`bool`, *optional*, defaults to `False`):
Whether or not hidden_states is an embedding directly usable by the transformer. In this case we will
ignore input handling (e.g. continuous, vectorized, etc.) and directly feed hidden_states into the
transformer blocks.
unpatchify (`bool`, *optional*, defaults to `True`):
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
Whether to unpatchify the transformer output.
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
Returns:
[`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`:
[`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
# 0. Check inputs
if not unpatchify and return_dict:
raise ValueError(
f"Cannot both define `unpatchify`: {unpatchify} and `return_dict`: {return_dict} since when"
f" `unpatchify` is {unpatchify} the returned output is of shape (batch_size, seq_len, hidden_dim)"
" rather than (batch_size, num_channels, height, width)."
)
# 1. Input
if not hidden_states_is_embedding:
hidden_states = self.pos_embed(hidden_states)
# 2. Blocks
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# In ("downsample") blocks
skips = []
for in_block in self.transformer_in_blocks:
hidden_states = in_block(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
timestep=timestep,
cross_attention_kwargs=cross_attention_kwargs,
class_labels=class_labels,
)
skips.append(hidden_states)
# Mid block
hidden_states = self.transformer_mid_block(hidden_states)
# Out ("upsample") blocks
for out_block in self.transformer_out_blocks:
hidden_states = out_block["skip"](hidden_states, skips.pop())
hidden_states = out_block["block"](
hidden_states,
encoder_hidden_states=encoder_hidden_states,
timestep=timestep,
cross_attention_kwargs=cross_attention_kwargs,
class_labels=class_labels,
)
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 3. Output
# Don't support AdaLayerNorm for now, so no conditioning/scale/shift logic
hidden_states = self.norm_out(hidden_states)
# hidden_states = self.proj_out(hidden_states)
if unpatchify:
# unpatchify
height = width = int(hidden_states.shape[1] ** 0.5)
hidden_states = hidden_states.reshape(
shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)
)
hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
output = hidden_states.reshape(
shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)
)
else:
output = hidden_states
if not return_dict:
return (output,)
return Transformer2DModelOutput(sample=output)
| 128 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
class UniDiffuserModel(ModelMixin, ConfigMixin):
"""
Transformer model for a image-text [UniDiffuser](https://arxiv.org/pdf/2303.06555.pdf) model. This is a
modification of [`UTransformer2DModel`] with input and output heads for the VAE-embedded latent image, the
CLIP-embedded image, and the CLIP-embedded prompt (see paper for more details).
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
Parameters:
text_dim (`int`): The hidden dimension of the CLIP text model used to embed images.
clip_img_dim (`int`): The hidden dimension of the CLIP vision model used to embed prompts.
num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
in_channels (`int`, *optional*):
Pass if the input is continuous. The number of channels in the input.
out_channels (`int`, *optional*):
The number of output channels; if `None`, defaults to `in_channels`.
num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
norm_num_groups (`int`, *optional*, defaults to `32`):
The number of groups to use when performing Group Normalization.
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.
attention_bias (`bool`, *optional*):
Configure if the TransformerBlocks' attention should contain a bias parameter.
sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.
Note that this is fixed at training time as it is used for learning a number of position embeddings. See
`ImagePositionalEmbeddings`.
num_vector_embeds (`int`, *optional*):
Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.
Includes the class for the masked latent pixel.
patch_size (`int`, *optional*, defaults to 2):
The patch size to use in the patch embedding.
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.
The number of diffusion steps used during training. Note that this is fixed at training time as it is used
to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for
up to but not more than steps than `num_embeds_ada_norm`.
use_linear_projection (int, *optional*): TODO: Not used
only_cross_attention (`bool`, *optional*):
Whether to use only cross-attention layers. In this case two cross attention layers are used in each
transformer block.
upcast_attention (`bool`, *optional*):
Whether to upcast the query and key to float32 when performing the attention calculation.
norm_type (`str`, *optional*, defaults to `"layer_norm"`):
The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`.
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
block_type (`str`, *optional*, defaults to `"unidiffuser"`):
The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual
backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard
behavior in `diffusers`.)
pre_layer_norm (`bool`, *optional*):
Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"),
as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm
(`pre_layer_norm = False`).
norm_elementwise_affine (`bool`, *optional*):
Whether to use learnable per-element affine parameters during layer normalization.
use_patch_pos_embed (`bool`, *optional*):
Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`).
ff_final_dropout (`bool`, *optional*):
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
Whether to use a final Dropout layer after the feedforward network.
use_data_type_embedding (`bool`, *optional*):
Whether to use a data type embedding. This is only relevant for UniDiffuser-v1 style models; UniDiffuser-v1
is continue-trained from UniDiffuser-v0 on non-publically-available data and accepts a `data_type`
argument, which can either be `1` to use the weights trained on non-publically-available data or `0`
otherwise. This argument is subsequently embedded by the data type embedding, if used.
"""
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
@register_to_config
def __init__(
self,
text_dim: int = 768,
clip_img_dim: int = 512,
num_text_tokens: int = 77,
num_attention_heads: int = 16,
attention_head_dim: int = 88,
in_channels: Optional[int] = None,
out_channels: Optional[int] = None,
num_layers: int = 1,
dropout: float = 0.0,
norm_num_groups: int = 32,
cross_attention_dim: Optional[int] = None,
attention_bias: bool = False,
sample_size: Optional[int] = None,
num_vector_embeds: Optional[int] = None,
patch_size: Optional[int] = None,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
use_linear_projection: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
norm_type: str = "layer_norm",
block_type: str = "unidiffuser",
pre_layer_norm: bool = False,
use_timestep_embedding=False,
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
norm_elementwise_affine: bool = True,
use_patch_pos_embed=False,
ff_final_dropout: bool = True,
use_data_type_embedding: bool = False,
):
super().__init__()
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 0. Handle dimensions
self.inner_dim = num_attention_heads * attention_head_dim
assert sample_size is not None, "UniDiffuserModel over patched input must provide sample_size"
self.sample_size = sample_size
self.in_channels = in_channels
self.out_channels = in_channels if out_channels is None else out_channels
self.patch_size = patch_size
# Assume image is square...
self.num_patches = (self.sample_size // patch_size) * (self.sample_size // patch_size)
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 1. Define input layers
# 1.1 Input layers for text and image input
# For now, only support patch input for VAE latent image input
self.vae_img_in = PatchEmbed(
height=sample_size,
width=sample_size,
patch_size=patch_size,
in_channels=in_channels,
embed_dim=self.inner_dim,
use_pos_embed=use_patch_pos_embed,
)
self.clip_img_in = nn.Linear(clip_img_dim, self.inner_dim)
self.text_in = nn.Linear(text_dim, self.inner_dim)
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 1.2. Timestep embeddings for t_img, t_text
self.timestep_img_proj = Timesteps(
self.inner_dim,
flip_sin_to_cos=True,
downscale_freq_shift=0,
)
self.timestep_img_embed = (
TimestepEmbedding(
self.inner_dim,
4 * self.inner_dim,
out_dim=self.inner_dim,
)
if use_timestep_embedding
else nn.Identity()
)
self.timestep_text_proj = Timesteps(
self.inner_dim,
flip_sin_to_cos=True,
downscale_freq_shift=0,
)
self.timestep_text_embed = (
TimestepEmbedding(
self.inner_dim,
4 * self.inner_dim,
out_dim=self.inner_dim,
)
if use_timestep_embedding
else nn.Identity()
)
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 1.3. Positional embedding
self.num_text_tokens = num_text_tokens
self.num_tokens = 1 + 1 + num_text_tokens + 1 + self.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, self.inner_dim))
self.pos_embed_drop = nn.Dropout(p=dropout)
trunc_normal_(self.pos_embed, std=0.02)
# 1.4. Handle data type token embeddings for UniDiffuser-V1, if necessary
self.use_data_type_embedding = use_data_type_embedding
if self.use_data_type_embedding:
self.data_type_token_embedding = nn.Embedding(2, self.inner_dim)
self.data_type_pos_embed_token = nn.Parameter(torch.zeros(1, 1, self.inner_dim))
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 2. Define transformer blocks
self.transformer = UTransformer2DModel(
num_attention_heads=num_attention_heads,
attention_head_dim=attention_head_dim,
in_channels=in_channels,
out_channels=out_channels,
num_layers=num_layers,
dropout=dropout,
norm_num_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attention_bias=attention_bias,
sample_size=sample_size,
num_vector_embeds=num_vector_embeds,
patch_size=patch_size,
activation_fn=activation_fn,
num_embeds_ada_norm=num_embeds_ada_norm,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
norm_type=norm_type,
block_type=block_type,
pre_layer_norm=pre_layer_norm,
norm_elementwise_affine=norm_elementwise_affine,
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
use_patch_pos_embed=use_patch_pos_embed,
ff_final_dropout=ff_final_dropout,
)
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 3. Define output layers
patch_dim = (patch_size**2) * out_channels
self.vae_img_out = nn.Linear(self.inner_dim, patch_dim)
self.clip_img_out = nn.Linear(self.inner_dim, clip_img_dim)
self.text_out = nn.Linear(self.inner_dim, text_dim)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed"}
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
def forward(
self,
latent_image_embeds: torch.Tensor,
image_embeds: torch.Tensor,
prompt_embeds: torch.Tensor,
timestep_img: Union[torch.Tensor, float, int],
timestep_text: Union[torch.Tensor, float, int],
data_type: Optional[Union[torch.Tensor, float, int]] = 1,
encoder_hidden_states=None,
cross_attention_kwargs=None,
):
"""
Args:
latent_image_embeds (`torch.Tensor` of shape `(batch size, latent channels, height, width)`):
Latent image representation from the VAE encoder.
image_embeds (`torch.Tensor` of shape `(batch size, 1, clip_img_dim)`):
CLIP-embedded image representation (unsqueezed in the first dimension).
prompt_embeds (`torch.Tensor` of shape `(batch size, seq_len, text_dim)`):
CLIP-embedded text representation.
timestep_img (`torch.long` or `float` or `int`):
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
Current denoising step for the image.
timestep_text (`torch.long` or `float` or `int`):
Current denoising step for the text.
data_type: (`torch.int` or `float` or `int`, *optional*, defaults to `1`):
Only used in UniDiffuser-v1-style models. Can be either `1`, to use weights trained on nonpublic data,
or `0` otherwise.
encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
self-attention.
cross_attention_kwargs (*optional*):
Keyword arguments to supply to the cross attention layers, if used.
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
Returns:
`tuple`: Returns relevant parts of the model's noise prediction: the first element of the tuple is tbe VAE
image embedding, the second element is the CLIP image embedding, and the third element is the CLIP text
embedding.
"""
batch_size = latent_image_embeds.shape[0]
# 1. Input
# 1.1. Map inputs to shape (B, N, inner_dim)
vae_hidden_states = self.vae_img_in(latent_image_embeds)
clip_hidden_states = self.clip_img_in(image_embeds)
text_hidden_states = self.text_in(prompt_embeds)
num_text_tokens, num_img_tokens = text_hidden_states.size(1), vae_hidden_states.size(1)
# 1.2. Encode image timesteps to single token (B, 1, inner_dim)
if not torch.is_tensor(timestep_img):
timestep_img = torch.tensor([timestep_img], dtype=torch.long, device=vae_hidden_states.device)
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timestep_img = timestep_img * torch.ones(batch_size, dtype=timestep_img.dtype, device=timestep_img.device)
timestep_img_token = self.timestep_img_proj(timestep_img)
# t_img_token does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
timestep_img_token = timestep_img_token.to(dtype=self.dtype)
timestep_img_token = self.timestep_img_embed(timestep_img_token)
timestep_img_token = timestep_img_token.unsqueeze(dim=1)
# 1.3. Encode text timesteps to single token (B, 1, inner_dim)
if not torch.is_tensor(timestep_text):
timestep_text = torch.tensor([timestep_text], dtype=torch.long, device=vae_hidden_states.device)
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timestep_text = timestep_text * torch.ones(batch_size, dtype=timestep_text.dtype, device=timestep_text.device)
timestep_text_token = self.timestep_text_proj(timestep_text)
# t_text_token does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
timestep_text_token = timestep_text_token.to(dtype=self.dtype)
timestep_text_token = self.timestep_text_embed(timestep_text_token)
timestep_text_token = timestep_text_token.unsqueeze(dim=1)
# 1.4. Concatenate all of the embeddings together.
if self.use_data_type_embedding:
assert data_type is not None, "data_type must be supplied if the model uses a data type embedding"
if not torch.is_tensor(data_type):
data_type = torch.tensor([data_type], dtype=torch.int, device=vae_hidden_states.device)
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
data_type = data_type * torch.ones(batch_size, dtype=data_type.dtype, device=data_type.device)
data_type_token = self.data_type_token_embedding(data_type).unsqueeze(dim=1)
hidden_states = torch.cat(
[
timestep_img_token,
timestep_text_token,
data_type_token,
text_hidden_states,
clip_hidden_states,
vae_hidden_states,
],
dim=1,
)
else:
hidden_states = torch.cat(
[timestep_img_token, timestep_text_token, text_hidden_states, clip_hidden_states, vae_hidden_states],
dim=1,
)
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 1.5. Prepare the positional embeddings and add to hidden states
# Note: I think img_vae should always have the proper shape, so there's no need to interpolate
# the position embeddings.
if self.use_data_type_embedding:
pos_embed = torch.cat(
[self.pos_embed[:, : 1 + 1, :], self.data_type_pos_embed_token, self.pos_embed[:, 1 + 1 :, :]], dim=1
)
else:
pos_embed = self.pos_embed
hidden_states = hidden_states + pos_embed
hidden_states = self.pos_embed_drop(hidden_states)
# 2. Blocks
hidden_states = self.transformer(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
timestep=None,
class_labels=None,
cross_attention_kwargs=cross_attention_kwargs,
return_dict=False,
hidden_states_is_embedding=True,
unpatchify=False,
)[0]
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# 3. Output
# Split out the predicted noise representation.
if self.use_data_type_embedding:
(
t_img_token_out,
t_text_token_out,
data_type_token_out,
text_out,
img_clip_out,
img_vae_out,
) = hidden_states.split((1, 1, 1, num_text_tokens, 1, num_img_tokens), dim=1)
else:
t_img_token_out, t_text_token_out, text_out, img_clip_out, img_vae_out = hidden_states.split(
(1, 1, num_text_tokens, 1, num_img_tokens), dim=1
)
img_vae_out = self.vae_img_out(img_vae_out)
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
# unpatchify
height = width = int(img_vae_out.shape[1] ** 0.5)
img_vae_out = img_vae_out.reshape(
shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)
)
img_vae_out = torch.einsum("nhwpqc->nchpwq", img_vae_out)
img_vae_out = img_vae_out.reshape(
shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)
)
img_clip_out = self.clip_img_out(img_clip_out)
text_out = self.text_out(text_out)
return img_vae_out, img_clip_out, text_out
| 129 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
|
class CrossAttnStoreProcessor:
def __init__(self):
self.attention_probs = None
def __call__(
self,
attn,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,
):
batch_size, sequence_length, _ = hidden_states.shape
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
query = attn.to_q(hidden_states)
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states
elif attn.norm_cross:
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
key = attn.to_k(encoder_hidden_states)
value = attn.to_v(encoder_hidden_states)
query = attn.head_to_batch_dim(query)
key = attn.head_to_batch_dim(key)
value = attn.head_to_batch_dim(value)
| 130 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
self.attention_probs = attn.get_attention_scores(query, key, attention_mask)
hidden_states = torch.bmm(self.attention_probs, value)
hidden_states = attn.batch_to_head_dim(hidden_states)
# linear proj
hidden_states = attn.to_out[0](hidden_states)
# dropout
hidden_states = attn.to_out[1](hidden_states)
return hidden_states
| 130 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
class StableDiffusionSAGPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
The pipeline also inherits the following loading methods:
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
text_encoder ([`~transformers.CLIPTextModel`]):
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
tokenizer ([`~transformers.CLIPTokenizer`]):
A `CLIPTokenizer` to tokenize text.
unet ([`UNet2DConditionModel`]):
A `UNet2DConditionModel` to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for
more details about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
"""
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
model_cpu_offload_seq = "text_encoder->unet->vae"
_optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
_exclude_from_cpu_offload = ["safety_checker"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
image_encoder: Optional[CLIPVisionModelWithProjection] = None,
requires_safety_checker: bool = True,
):
super().__init__()
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.register_to_config(requires_safety_checker=requires_safety_checker)
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
def _encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
lora_scale: Optional[float] = None,
**kwargs,
):
deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
prompt_embeds_tuple = self.encode_prompt(
prompt=prompt,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=do_classifier_free_guidance,
negative_prompt=negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=lora_scale,
**kwargs,
)
# concatenate for backwards comp
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
return prompt_embeds
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
def encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
lora_scale: Optional[float] = None,
clip_skip: Optional[int] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
lora_scale (`float`, *optional*):
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
"""
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
self._lora_scale = lora_scale
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
# dynamically adjust the LoRA scale
if not USE_PEFT_BACKEND:
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
else:
scale_lora_layers(self.text_encoder, lora_scale)
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
# textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask.to(device)
else:
attention_mask = None
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
if clip_skip is None:
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
prompt_embeds = prompt_embeds[0]
else:
prompt_embeds = self.text_encoder(
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
)
# Access the `hidden_states` first, that contains a tuple of
# all the hidden states from the encoder layers. Then index into
# the tuple to access the hidden states from the desired layer.
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
# We also need to apply the final LayerNorm here to not mess with the
# representations. The `last_hidden_states` that we typically use for
# obtaining the final prompt representations passes through the LayerNorm
# layer.
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
if self.text_encoder is not None:
prompt_embeds_dtype = self.text_encoder.dtype
elif self.unet is not None:
prompt_embeds_dtype = self.unet.dtype
else:
prompt_embeds_dtype = prompt_embeds.dtype
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
# textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask.to(device)
else:
attention_mask = None
negative_prompt_embeds = self.text_encoder(
uncond_input.input_ids.to(device),
attention_mask=attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds[0]
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
if self.text_encoder is not None:
if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder, lora_scale)
return prompt_embeds, negative_prompt_embeds
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
dtype = next(self.image_encoder.parameters()).dtype
if not isinstance(image, torch.Tensor):
image = self.feature_extractor(image, return_tensors="pt").pixel_values
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
image = image.to(device=device, dtype=dtype)
if output_hidden_states:
image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_enc_hidden_states = self.image_encoder(
torch.zeros_like(image), output_hidden_states=True
).hidden_states[-2]
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
num_images_per_prompt, dim=0
)
return image_enc_hidden_states, uncond_image_enc_hidden_states
else:
image_embeds = self.image_encoder(image).image_embeds
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_embeds = torch.zeros_like(image_embeds)
return image_embeds, uncond_image_embeds
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
def prepare_ip_adapter_image_embeds(
self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
):
if ip_adapter_image_embeds is None:
if not isinstance(ip_adapter_image, list):
ip_adapter_image = [ip_adapter_image]
if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
raise ValueError(
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
)
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
image_embeds = []
for single_ip_adapter_image, image_proj_layer in zip(
ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
):
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
single_image_embeds, single_negative_image_embeds = self.encode_image(
single_ip_adapter_image, device, 1, output_hidden_state
)
single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
single_negative_image_embeds = torch.stack(
[single_negative_image_embeds] * num_images_per_prompt, dim=0
)
if do_classifier_free_guidance:
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
single_image_embeds = single_image_embeds.to(device)
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
image_embeds.append(single_image_embeds)
else:
image_embeds = ip_adapter_image_embeds
return image_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is None:
has_nsfw_concept = None
else:
if torch.is_tensor(image):
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
else:
feature_extractor_input = self.image_processor.numpy_to_pil(image)
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
return image, has_nsfw_concept
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
def decode_latents(self, latents):
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
# Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs
def check_inputs(
self,
prompt,
height,
width,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
callback_on_step_end_tensor_inputs=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if callback_on_step_end_tensor_inputs is not None and not all(
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
):
raise ValueError(
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
)
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
| 131 |
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.