text
stringlengths 1
1.02k
| class_index
int64 0
1.38k
| source
stringclasses 431
values |
---|---|---|
Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
norm_type (`str`, *optional*, defaults to `"group"`):
Type of normalization layer to use. Can be one of `"group"` or `"spatial"`.
""" | 1,213 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vq_model.py |
@register_to_config
def __init__(
self,
in_channels: int = 3,
out_channels: int = 3,
down_block_types: Tuple[str, ...] = ("DownEncoderBlock2D",),
up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
block_out_channels: Tuple[int, ...] = (64,),
layers_per_block: int = 1,
act_fn: str = "silu",
latent_channels: int = 3,
sample_size: int = 32,
num_vq_embeddings: int = 256,
norm_num_groups: int = 32,
vq_embed_dim: Optional[int] = None,
scaling_factor: float = 0.18215,
norm_type: str = "group", # group, spatial
mid_block_add_attention=True,
lookup_from_codebook=False,
force_upcast=False,
):
super().__init__() | 1,213 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vq_model.py |
# pass init params to Encoder
self.encoder = Encoder(
in_channels=in_channels,
out_channels=latent_channels,
down_block_types=down_block_types,
block_out_channels=block_out_channels,
layers_per_block=layers_per_block,
act_fn=act_fn,
norm_num_groups=norm_num_groups,
double_z=False,
mid_block_add_attention=mid_block_add_attention,
)
vq_embed_dim = vq_embed_dim if vq_embed_dim is not None else latent_channels
self.quant_conv = nn.Conv2d(latent_channels, vq_embed_dim, 1)
self.quantize = VectorQuantizer(num_vq_embeddings, vq_embed_dim, beta=0.25, remap=None, sane_index_shape=False)
self.post_quant_conv = nn.Conv2d(vq_embed_dim, latent_channels, 1) | 1,213 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vq_model.py |
# pass init params to Decoder
self.decoder = Decoder(
in_channels=latent_channels,
out_channels=out_channels,
up_block_types=up_block_types,
block_out_channels=block_out_channels,
layers_per_block=layers_per_block,
act_fn=act_fn,
norm_num_groups=norm_num_groups,
norm_type=norm_type,
mid_block_add_attention=mid_block_add_attention,
)
@apply_forward_hook
def encode(self, x: torch.Tensor, return_dict: bool = True) -> VQEncoderOutput:
h = self.encoder(x)
h = self.quant_conv(h)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=h) | 1,213 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vq_model.py |
@apply_forward_hook
def decode(
self, h: torch.Tensor, force_not_quantize: bool = False, return_dict: bool = True, shape=None
) -> Union[DecoderOutput, torch.Tensor]:
# also go through quantization layer
if not force_not_quantize:
quant, commit_loss, _ = self.quantize(h)
elif self.config.lookup_from_codebook:
quant = self.quantize.get_codebook_entry(h, shape)
commit_loss = torch.zeros((h.shape[0])).to(h.device, dtype=h.dtype)
else:
quant = h
commit_loss = torch.zeros((h.shape[0])).to(h.device, dtype=h.dtype)
quant2 = self.post_quant_conv(quant)
dec = self.decoder(quant2, quant if self.config.norm_type == "spatial" else None)
if not return_dict:
return dec, commit_loss
return DecoderOutput(sample=dec, commit_loss=commit_loss) | 1,213 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vq_model.py |
def forward(
self, sample: torch.Tensor, return_dict: bool = True
) -> Union[DecoderOutput, Tuple[torch.Tensor, ...]]:
r"""
The [`VQModel`] forward method.
Args:
sample (`torch.Tensor`): Input sample.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.autoencoders.vq_model.VQEncoderOutput`] instead of a plain tuple.
Returns:
[`~models.autoencoders.vq_model.VQEncoderOutput`] or `tuple`:
If return_dict is True, a [`~models.autoencoders.vq_model.VQEncoderOutput`] is returned, otherwise a
plain `tuple` is returned.
"""
h = self.encode(sample).latents
dec = self.decode(h)
if not return_dict:
return dec.sample, dec.commit_loss
return dec | 1,213 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vq_model.py |
class ResBlock(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
norm_type: str = "batch_norm",
act_fn: str = "relu6",
) -> None:
super().__init__()
self.norm_type = norm_type
self.nonlinearity = get_activation(act_fn) if act_fn is not None else nn.Identity()
self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, 1)
self.conv2 = nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False)
self.norm = get_normalization(norm_type, out_channels)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
residual = hidden_states
hidden_states = self.conv1(hidden_states)
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.conv2(hidden_states) | 1,214 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
if self.norm_type == "rms_norm":
# move channel to the last dimension so we apply RMSnorm across channel dimension
hidden_states = self.norm(hidden_states.movedim(1, -1)).movedim(-1, 1)
else:
hidden_states = self.norm(hidden_states)
return hidden_states + residual | 1,214 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
class EfficientViTBlock(nn.Module):
def __init__(
self,
in_channels: int,
mult: float = 1.0,
attention_head_dim: int = 32,
qkv_multiscales: Tuple[int, ...] = (5,),
norm_type: str = "batch_norm",
) -> None:
super().__init__()
self.attn = SanaMultiscaleLinearAttention(
in_channels=in_channels,
out_channels=in_channels,
mult=mult,
attention_head_dim=attention_head_dim,
norm_type=norm_type,
kernel_sizes=qkv_multiscales,
residual_connection=True,
)
self.conv_out = GLUMBConv(
in_channels=in_channels,
out_channels=in_channels,
norm_type="rms_norm",
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.attn(x)
x = self.conv_out(x)
return x | 1,215 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
class DCDownBlock2d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, downsample: bool = False, shortcut: bool = True) -> None:
super().__init__()
self.downsample = downsample
self.factor = 2
self.stride = 1 if downsample else 2
self.group_size = in_channels * self.factor**2 // out_channels
self.shortcut = shortcut
out_ratio = self.factor**2
if downsample:
assert out_channels % out_ratio == 0
out_channels = out_channels // out_ratio
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=self.stride,
padding=1,
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
x = self.conv(hidden_states)
if self.downsample:
x = F.pixel_unshuffle(x, self.factor) | 1,216 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
if self.shortcut:
y = F.pixel_unshuffle(hidden_states, self.factor)
y = y.unflatten(1, (-1, self.group_size))
y = y.mean(dim=2)
hidden_states = x + y
else:
hidden_states = x
return hidden_states | 1,216 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
class DCUpBlock2d(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
interpolate: bool = False,
shortcut: bool = True,
interpolation_mode: str = "nearest",
) -> None:
super().__init__()
self.interpolate = interpolate
self.interpolation_mode = interpolation_mode
self.shortcut = shortcut
self.factor = 2
self.repeats = out_channels * self.factor**2 // in_channels
out_ratio = self.factor**2
if not interpolate:
out_channels = out_channels * out_ratio
self.conv = nn.Conv2d(in_channels, out_channels, 3, 1, 1)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
if self.interpolate:
x = F.interpolate(hidden_states, scale_factor=self.factor, mode=self.interpolation_mode)
x = self.conv(x)
else:
x = self.conv(hidden_states)
x = F.pixel_shuffle(x, self.factor) | 1,217 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
if self.shortcut:
y = hidden_states.repeat_interleave(self.repeats, dim=1)
y = F.pixel_shuffle(y, self.factor)
hidden_states = x + y
else:
hidden_states = x
return hidden_states | 1,217 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
class Encoder(nn.Module):
def __init__(
self,
in_channels: int,
latent_channels: int,
attention_head_dim: int = 32,
block_type: Union[str, Tuple[str]] = "ResBlock",
block_out_channels: Tuple[int] = (128, 256, 512, 512, 1024, 1024),
layers_per_block: Tuple[int] = (2, 2, 2, 2, 2, 2),
qkv_multiscales: Tuple[Tuple[int, ...], ...] = ((), (), (), (5,), (5,), (5,)),
downsample_block_type: str = "pixel_unshuffle",
out_shortcut: bool = True,
):
super().__init__()
num_blocks = len(block_out_channels)
if isinstance(block_type, str):
block_type = (block_type,) * num_blocks | 1,218 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
if layers_per_block[0] > 0:
self.conv_in = nn.Conv2d(
in_channels,
block_out_channels[0] if layers_per_block[0] > 0 else block_out_channels[1],
kernel_size=3,
stride=1,
padding=1,
)
else:
self.conv_in = DCDownBlock2d(
in_channels=in_channels,
out_channels=block_out_channels[0] if layers_per_block[0] > 0 else block_out_channels[1],
downsample=downsample_block_type == "pixel_unshuffle",
shortcut=False,
)
down_blocks = []
for i, (out_channel, num_layers) in enumerate(zip(block_out_channels, layers_per_block)):
down_block_list = [] | 1,218 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
for _ in range(num_layers):
block = get_block(
block_type[i],
out_channel,
out_channel,
attention_head_dim=attention_head_dim,
norm_type="rms_norm",
act_fn="silu",
qkv_mutliscales=qkv_multiscales[i],
)
down_block_list.append(block)
if i < num_blocks - 1 and num_layers > 0:
downsample_block = DCDownBlock2d(
in_channels=out_channel,
out_channels=block_out_channels[i + 1],
downsample=downsample_block_type == "pixel_unshuffle",
shortcut=True,
)
down_block_list.append(downsample_block)
down_blocks.append(nn.Sequential(*down_block_list))
self.down_blocks = nn.ModuleList(down_blocks) | 1,218 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
self.conv_out = nn.Conv2d(block_out_channels[-1], latent_channels, 3, 1, 1)
self.out_shortcut = out_shortcut
if out_shortcut:
self.out_shortcut_average_group_size = block_out_channels[-1] // latent_channels
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.conv_in(hidden_states)
for down_block in self.down_blocks:
hidden_states = down_block(hidden_states)
if self.out_shortcut:
x = hidden_states.unflatten(1, (-1, self.out_shortcut_average_group_size))
x = x.mean(dim=2)
hidden_states = self.conv_out(hidden_states) + x
else:
hidden_states = self.conv_out(hidden_states)
return hidden_states | 1,218 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
class Decoder(nn.Module):
def __init__(
self,
in_channels: int,
latent_channels: int,
attention_head_dim: int = 32,
block_type: Union[str, Tuple[str]] = "ResBlock",
block_out_channels: Tuple[int] = (128, 256, 512, 512, 1024, 1024),
layers_per_block: Tuple[int] = (2, 2, 2, 2, 2, 2),
qkv_multiscales: Tuple[Tuple[int, ...], ...] = ((), (), (), (5,), (5,), (5,)),
norm_type: Union[str, Tuple[str]] = "rms_norm",
act_fn: Union[str, Tuple[str]] = "silu",
upsample_block_type: str = "pixel_shuffle",
in_shortcut: bool = True,
):
super().__init__()
num_blocks = len(block_out_channels)
if isinstance(block_type, str):
block_type = (block_type,) * num_blocks
if isinstance(norm_type, str):
norm_type = (norm_type,) * num_blocks
if isinstance(act_fn, str):
act_fn = (act_fn,) * num_blocks | 1,219 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
self.conv_in = nn.Conv2d(latent_channels, block_out_channels[-1], 3, 1, 1)
self.in_shortcut = in_shortcut
if in_shortcut:
self.in_shortcut_repeats = block_out_channels[-1] // latent_channels
up_blocks = []
for i, (out_channel, num_layers) in reversed(list(enumerate(zip(block_out_channels, layers_per_block)))):
up_block_list = []
if i < num_blocks - 1 and num_layers > 0:
upsample_block = DCUpBlock2d(
block_out_channels[i + 1],
out_channel,
interpolate=upsample_block_type == "interpolate",
shortcut=True,
)
up_block_list.append(upsample_block) | 1,219 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
for _ in range(num_layers):
block = get_block(
block_type[i],
out_channel,
out_channel,
attention_head_dim=attention_head_dim,
norm_type=norm_type[i],
act_fn=act_fn[i],
qkv_mutliscales=qkv_multiscales[i],
)
up_block_list.append(block)
up_blocks.insert(0, nn.Sequential(*up_block_list))
self.up_blocks = nn.ModuleList(up_blocks)
channels = block_out_channels[0] if layers_per_block[0] > 0 else block_out_channels[1]
self.norm_out = RMSNorm(channels, 1e-5, elementwise_affine=True, bias=True)
self.conv_act = nn.ReLU()
self.conv_out = None | 1,219 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
if layers_per_block[0] > 0:
self.conv_out = nn.Conv2d(channels, in_channels, 3, 1, 1)
else:
self.conv_out = DCUpBlock2d(
channels, in_channels, interpolate=upsample_block_type == "interpolate", shortcut=False
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
if self.in_shortcut:
x = hidden_states.repeat_interleave(self.in_shortcut_repeats, dim=1)
hidden_states = self.conv_in(hidden_states) + x
else:
hidden_states = self.conv_in(hidden_states)
for up_block in reversed(self.up_blocks):
hidden_states = up_block(hidden_states)
hidden_states = self.norm_out(hidden_states.movedim(1, -1)).movedim(-1, 1)
hidden_states = self.conv_act(hidden_states)
hidden_states = self.conv_out(hidden_states)
return hidden_states | 1,219 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
class AutoencoderDC(ModelMixin, ConfigMixin, FromOriginalModelMixin):
r"""
An Autoencoder model introduced in [DCAE](https://arxiv.org/abs/2410.10733) and used in
[SANA](https://arxiv.org/abs/2410.10629).
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
for all models (such as downloading or saving). | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
Args:
in_channels (`int`, defaults to `3`):
The number of input channels in samples.
latent_channels (`int`, defaults to `32`):
The number of channels in the latent space representation.
encoder_block_types (`Union[str, Tuple[str]]`, defaults to `"ResBlock"`):
The type(s) of block to use in the encoder.
decoder_block_types (`Union[str, Tuple[str]]`, defaults to `"ResBlock"`):
The type(s) of block to use in the decoder.
encoder_block_out_channels (`Tuple[int, ...]`, defaults to `(128, 256, 512, 512, 1024, 1024)`):
The number of output channels for each block in the encoder.
decoder_block_out_channels (`Tuple[int, ...]`, defaults to `(128, 256, 512, 512, 1024, 1024)`):
The number of output channels for each block in the decoder.
encoder_layers_per_block (`Tuple[int]`, defaults to `(2, 2, 2, 3, 3, 3)`):
The number of layers per block in the encoder. | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
decoder_layers_per_block (`Tuple[int]`, defaults to `(3, 3, 3, 3, 3, 3)`):
The number of layers per block in the decoder.
encoder_qkv_multiscales (`Tuple[Tuple[int, ...], ...]`, defaults to `((), (), (), (5,), (5,), (5,))`):
Multi-scale configurations for the encoder's QKV (query-key-value) transformations.
decoder_qkv_multiscales (`Tuple[Tuple[int, ...], ...]`, defaults to `((), (), (), (5,), (5,), (5,))`):
Multi-scale configurations for the decoder's QKV (query-key-value) transformations.
upsample_block_type (`str`, defaults to `"pixel_shuffle"`):
The type of block to use for upsampling in the decoder.
downsample_block_type (`str`, defaults to `"pixel_unshuffle"`):
The type of block to use for downsampling in the encoder.
decoder_norm_types (`Union[str, Tuple[str]]`, defaults to `"rms_norm"`):
The normalization type(s) to use in the decoder. | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
decoder_act_fns (`Union[str, Tuple[str]]`, defaults to `"silu"`):
The activation function(s) to use in the decoder.
scaling_factor (`float`, defaults to `1.0`):
The multiplicative inverse of the root mean square of the latent features. This is used to scale the latent
space to have unit variance when training the diffusion model. The latents are scaled with the formula `z =
z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back
to the original scale with the formula: `z = 1 / scaling_factor * z`.
""" | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
_supports_gradient_checkpointing = False | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
@register_to_config
def __init__(
self,
in_channels: int = 3,
latent_channels: int = 32,
attention_head_dim: int = 32,
encoder_block_types: Union[str, Tuple[str]] = "ResBlock",
decoder_block_types: Union[str, Tuple[str]] = "ResBlock",
encoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 512, 1024, 1024),
decoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 512, 1024, 1024),
encoder_layers_per_block: Tuple[int] = (2, 2, 2, 3, 3, 3),
decoder_layers_per_block: Tuple[int] = (3, 3, 3, 3, 3, 3),
encoder_qkv_multiscales: Tuple[Tuple[int, ...], ...] = ((), (), (), (5,), (5,), (5,)),
decoder_qkv_multiscales: Tuple[Tuple[int, ...], ...] = ((), (), (), (5,), (5,), (5,)),
upsample_block_type: str = "pixel_shuffle",
downsample_block_type: str = "pixel_unshuffle",
decoder_norm_types: Union[str, Tuple[str]] = "rms_norm",
decoder_act_fns: Union[str, Tuple[str]] = "silu", | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
scaling_factor: float = 1.0,
) -> None:
super().__init__() | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
self.encoder = Encoder(
in_channels=in_channels,
latent_channels=latent_channels,
attention_head_dim=attention_head_dim,
block_type=encoder_block_types,
block_out_channels=encoder_block_out_channels,
layers_per_block=encoder_layers_per_block,
qkv_multiscales=encoder_qkv_multiscales,
downsample_block_type=downsample_block_type,
)
self.decoder = Decoder(
in_channels=in_channels,
latent_channels=latent_channels,
attention_head_dim=attention_head_dim,
block_type=decoder_block_types,
block_out_channels=decoder_block_out_channels,
layers_per_block=decoder_layers_per_block,
qkv_multiscales=decoder_qkv_multiscales,
norm_type=decoder_norm_types,
act_fn=decoder_act_fns,
upsample_block_type=upsample_block_type,
) | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
self.spatial_compression_ratio = 2 ** (len(encoder_block_out_channels) - 1)
self.temporal_compression_ratio = 1
# When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension
# to perform decoding of a single video latent at a time.
self.use_slicing = False
# When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent
# frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the
# intermediate tiles together, the memory requirement can be lowered.
self.use_tiling = False
# The minimal tile height and width for spatial tiling to be used
self.tile_sample_min_height = 512
self.tile_sample_min_width = 512
# The minimal distance between two spatial tiles
self.tile_sample_stride_height = 448
self.tile_sample_stride_width = 448 | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
self.tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
self.tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
def enable_tiling(
self,
tile_sample_min_height: Optional[int] = None,
tile_sample_min_width: Optional[int] = None,
tile_sample_stride_height: Optional[float] = None,
tile_sample_stride_width: Optional[float] = None,
) -> None:
r"""
Enable tiled AE decoding. When this option is enabled, the AE will split the input tensor into tiles to compute
decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images. | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
Args:
tile_sample_min_height (`int`, *optional*):
The minimum height required for a sample to be separated into tiles across the height dimension.
tile_sample_min_width (`int`, *optional*):
The minimum width required for a sample to be separated into tiles across the width dimension.
tile_sample_stride_height (`int`, *optional*):
The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are
no tiling artifacts produced across the height dimension.
tile_sample_stride_width (`int`, *optional*):
The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling
artifacts produced across the width dimension.
"""
self.use_tiling = True
self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width
self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height
self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width
self.tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
self.tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
def disable_tiling(self) -> None:
r"""
Disable tiled AE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
decoding in one step.
"""
self.use_tiling = False
def enable_slicing(self) -> None:
r"""
Enable sliced AE decoding. When this option is enabled, the AE will split the input tensor in slices to compute
decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.use_slicing = True
def disable_slicing(self) -> None:
r"""
Disable sliced AE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
decoding in one step.
"""
self.use_slicing = False
def _encode(self, x: torch.Tensor) -> torch.Tensor:
batch_size, num_channels, height, width = x.shape | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height):
return self.tiled_encode(x, return_dict=False)[0]
encoded = self.encoder(x)
return encoded
@apply_forward_hook
def encode(self, x: torch.Tensor, return_dict: bool = True) -> Union[EncoderOutput, Tuple[torch.Tensor]]:
r"""
Encode a batch of images into latents.
Args:
x (`torch.Tensor`): Input batch of images.
return_dict (`bool`, defaults to `True`):
Whether to return a [`~models.vae.EncoderOutput`] instead of a plain tuple. | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
Returns:
The latent representations of the encoded videos. If `return_dict` is True, a
[`~models.vae.EncoderOutput`] is returned, otherwise a plain `tuple` is returned.
"""
if self.use_slicing and x.shape[0] > 1:
encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)]
encoded = torch.cat(encoded_slices)
else:
encoded = self._encode(x)
if not return_dict:
return (encoded,)
return EncoderOutput(latent=encoded)
def _decode(self, z: torch.Tensor) -> torch.Tensor:
batch_size, num_channels, height, width = z.shape
if self.use_tiling and (width > self.tile_latent_min_width or height > self.tile_latent_min_height):
return self.tiled_decode(z, return_dict=False)[0]
decoded = self.decoder(z)
return decoded | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
@apply_forward_hook
def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, Tuple[torch.Tensor]]:
r"""
Decode a batch of images.
Args:
z (`torch.Tensor`): Input batch of latent vectors.
return_dict (`bool`, defaults to `True`):
Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
Returns:
[`~models.vae.DecoderOutput`] or `tuple`:
If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
returned.
"""
if self.use_slicing and z.size(0) > 1:
decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
decoded = torch.cat(decoded_slices)
else:
decoded = self._decode(z)
if not return_dict:
return (decoded,)
return DecoderOutput(sample=decoded) | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
blend_extent = min(a.shape[2], b.shape[2], blend_extent)
for y in range(blend_extent):
b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
blend_extent = min(a.shape[3], b.shape[3], blend_extent)
for x in range(blend_extent):
b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def tiled_encode(self, x: torch.Tensor, return_dict: bool = True) -> torch.Tensor:
batch_size, num_channels, height, width = x.shape
latent_height = height // self.spatial_compression_ratio
latent_width = width // self.spatial_compression_ratio | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
blend_height = tile_latent_min_height - tile_latent_stride_height
blend_width = tile_latent_min_width - tile_latent_stride_width | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
# Split x into overlapping tiles and encode them separately.
# The tiles have an overlap to avoid seams between tiles.
rows = []
for i in range(0, x.shape[2], self.tile_sample_stride_height):
row = []
for j in range(0, x.shape[3], self.tile_sample_stride_width):
tile = x[:, :, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width]
if (
tile.shape[2] % self.spatial_compression_ratio != 0
or tile.shape[3] % self.spatial_compression_ratio != 0
):
pad_h = (self.spatial_compression_ratio - tile.shape[2]) % self.spatial_compression_ratio
pad_w = (self.spatial_compression_ratio - tile.shape[3]) % self.spatial_compression_ratio
tile = F.pad(tile, (0, pad_w, 0, pad_h))
tile = self.encoder(tile)
row.append(tile)
rows.append(row)
result_rows = [] | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
for i, row in enumerate(rows):
result_row = []
for j, tile in enumerate(row):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
tile = self.blend_v(rows[i - 1][j], tile, blend_height)
if j > 0:
tile = self.blend_h(row[j - 1], tile, blend_width)
result_row.append(tile[:, :, :tile_latent_stride_height, :tile_latent_stride_width])
result_rows.append(torch.cat(result_row, dim=3)) | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
encoded = torch.cat(result_rows, dim=2)[:, :, :latent_height, :latent_width]
if not return_dict:
return (encoded,)
return EncoderOutput(latent=encoded)
def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
batch_size, num_channels, height, width = z.shape
tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
blend_height = self.tile_sample_min_height - self.tile_sample_stride_height
blend_width = self.tile_sample_min_width - self.tile_sample_stride_width | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
# Split z into overlapping tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
rows = []
for i in range(0, height, tile_latent_stride_height):
row = []
for j in range(0, width, tile_latent_stride_width):
tile = z[:, :, i : i + tile_latent_min_height, j : j + tile_latent_min_width]
decoded = self.decoder(tile)
row.append(decoded)
rows.append(row) | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
result_rows = []
for i, row in enumerate(rows):
result_row = []
for j, tile in enumerate(row):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
tile = self.blend_v(rows[i - 1][j], tile, blend_height)
if j > 0:
tile = self.blend_h(row[j - 1], tile, blend_width)
result_row.append(tile[:, :, : self.tile_sample_stride_height, : self.tile_sample_stride_width])
result_rows.append(torch.cat(result_row, dim=3))
decoded = torch.cat(result_rows, dim=2)
if not return_dict:
return (decoded,)
return DecoderOutput(sample=decoded) | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
def forward(self, sample: torch.Tensor, return_dict: bool = True) -> torch.Tensor:
encoded = self.encode(sample, return_dict=False)[0]
decoded = self.decode(encoded, return_dict=False)[0]
if not return_dict:
return (decoded,)
return DecoderOutput(sample=decoded) | 1,220 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_dc.py |
class EncoderOutput(BaseOutput):
r"""
Output of encoding method.
Args:
latent (`torch.Tensor` of shape `(batch_size, num_channels, latent_height, latent_width)`):
The encoded latent.
"""
latent: torch.Tensor | 1,221 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
class DecoderOutput(BaseOutput):
r"""
Output of decoding method.
Args:
sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
The decoded output sample from the last layer of the model.
"""
sample: torch.Tensor
commit_loss: Optional[torch.FloatTensor] = None | 1,222 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
class Encoder(nn.Module):
r"""
The `Encoder` layer of a variational autoencoder that encodes its input into a latent representation. | 1,223 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
Args:
in_channels (`int`, *optional*, defaults to 3):
The number of input channels.
out_channels (`int`, *optional*, defaults to 3):
The number of output channels.
down_block_types (`Tuple[str, ...]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
The types of down blocks to use. See `~diffusers.models.unet_2d_blocks.get_down_block` for available
options.
block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
The number of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2):
The number of layers per block.
norm_num_groups (`int`, *optional*, defaults to 32):
The number of groups for normalization.
act_fn (`str`, *optional*, defaults to `"silu"`):
The activation function to use. See `~diffusers.models.activations.get_activation` for available options. | 1,223 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
double_z (`bool`, *optional*, defaults to `True`):
Whether to double the number of output channels for the last block.
""" | 1,223 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
def __init__(
self,
in_channels: int = 3,
out_channels: int = 3,
down_block_types: Tuple[str, ...] = ("DownEncoderBlock2D",),
block_out_channels: Tuple[int, ...] = (64,),
layers_per_block: int = 2,
norm_num_groups: int = 32,
act_fn: str = "silu",
double_z: bool = True,
mid_block_add_attention=True,
):
super().__init__()
self.layers_per_block = layers_per_block
self.conv_in = nn.Conv2d(
in_channels,
block_out_channels[0],
kernel_size=3,
stride=1,
padding=1,
)
self.down_blocks = nn.ModuleList([])
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1 | 1,223 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
down_block = get_down_block(
down_block_type,
num_layers=self.layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
add_downsample=not is_final_block,
resnet_eps=1e-6,
downsample_padding=0,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
attention_head_dim=output_channel,
temb_channels=None,
)
self.down_blocks.append(down_block)
# mid
self.mid_block = UNetMidBlock2D(
in_channels=block_out_channels[-1],
resnet_eps=1e-6,
resnet_act_fn=act_fn,
output_scale_factor=1,
resnet_time_scale_shift="default",
attention_head_dim=block_out_channels[-1],
resnet_groups=norm_num_groups,
temb_channels=None,
add_attention=mid_block_add_attention,
) | 1,223 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6)
self.conv_act = nn.SiLU()
conv_out_channels = 2 * out_channels if double_z else out_channels
self.conv_out = nn.Conv2d(block_out_channels[-1], conv_out_channels, 3, padding=1)
self.gradient_checkpointing = False
def forward(self, sample: torch.Tensor) -> torch.Tensor:
r"""The forward method of the `Encoder` class."""
sample = self.conv_in(sample)
if torch.is_grad_enabled() and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward | 1,223 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
# down
if is_torch_version(">=", "1.11.0"):
for down_block in self.down_blocks:
sample = torch.utils.checkpoint.checkpoint(
create_custom_forward(down_block), sample, use_reentrant=False
)
# middle
sample = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block), sample, use_reentrant=False
)
else:
for down_block in self.down_blocks:
sample = torch.utils.checkpoint.checkpoint(create_custom_forward(down_block), sample)
# middle
sample = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), sample)
else:
# down
for down_block in self.down_blocks:
sample = down_block(sample)
# middle
sample = self.mid_block(sample) | 1,223 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
# post-process
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
return sample | 1,223 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
class Decoder(nn.Module):
r"""
The `Decoder` layer of a variational autoencoder that decodes its latent representation into an output sample. | 1,224 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
Args:
in_channels (`int`, *optional*, defaults to 3):
The number of input channels.
out_channels (`int`, *optional*, defaults to 3):
The number of output channels.
up_block_types (`Tuple[str, ...]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
The types of up blocks to use. See `~diffusers.models.unet_2d_blocks.get_up_block` for available options.
block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
The number of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2):
The number of layers per block.
norm_num_groups (`int`, *optional*, defaults to 32):
The number of groups for normalization.
act_fn (`str`, *optional*, defaults to `"silu"`):
The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
norm_type (`str`, *optional*, defaults to `"group"`): | 1,224 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
The normalization type to use. Can be either `"group"` or `"spatial"`.
""" | 1,224 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
def __init__(
self,
in_channels: int = 3,
out_channels: int = 3,
up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
block_out_channels: Tuple[int, ...] = (64,),
layers_per_block: int = 2,
norm_num_groups: int = 32,
act_fn: str = "silu",
norm_type: str = "group", # group, spatial
mid_block_add_attention=True,
):
super().__init__()
self.layers_per_block = layers_per_block
self.conv_in = nn.Conv2d(
in_channels,
block_out_channels[-1],
kernel_size=3,
stride=1,
padding=1,
)
self.up_blocks = nn.ModuleList([])
temb_channels = in_channels if norm_type == "spatial" else None | 1,224 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
# mid
self.mid_block = UNetMidBlock2D(
in_channels=block_out_channels[-1],
resnet_eps=1e-6,
resnet_act_fn=act_fn,
output_scale_factor=1,
resnet_time_scale_shift="default" if norm_type == "group" else norm_type,
attention_head_dim=block_out_channels[-1],
resnet_groups=norm_num_groups,
temb_channels=temb_channels,
add_attention=mid_block_add_attention,
)
# up
reversed_block_out_channels = list(reversed(block_out_channels))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1 | 1,224 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
up_block = get_up_block(
up_block_type,
num_layers=self.layers_per_block + 1,
in_channels=prev_output_channel,
out_channels=output_channel,
prev_output_channel=None,
add_upsample=not is_final_block,
resnet_eps=1e-6,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
attention_head_dim=output_channel,
temb_channels=temb_channels,
resnet_time_scale_shift=norm_type,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel | 1,224 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
# out
if norm_type == "spatial":
self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels)
else:
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6)
self.conv_act = nn.SiLU()
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
self.gradient_checkpointing = False
def forward(
self,
sample: torch.Tensor,
latent_embeds: Optional[torch.Tensor] = None,
) -> torch.Tensor:
r"""The forward method of the `Decoder` class."""
sample = self.conv_in(sample)
upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
if torch.is_grad_enabled() and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward | 1,224 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
if is_torch_version(">=", "1.11.0"):
# middle
sample = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block),
sample,
latent_embeds,
use_reentrant=False,
)
sample = sample.to(upscale_dtype)
# up
for up_block in self.up_blocks:
sample = torch.utils.checkpoint.checkpoint(
create_custom_forward(up_block),
sample,
latent_embeds,
use_reentrant=False,
)
else:
# middle
sample = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block), sample, latent_embeds
)
sample = sample.to(upscale_dtype) | 1,224 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
# up
for up_block in self.up_blocks:
sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, latent_embeds)
else:
# middle
sample = self.mid_block(sample, latent_embeds)
sample = sample.to(upscale_dtype)
# up
for up_block in self.up_blocks:
sample = up_block(sample, latent_embeds)
# post-process
if latent_embeds is None:
sample = self.conv_norm_out(sample)
else:
sample = self.conv_norm_out(sample, latent_embeds)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
return sample | 1,224 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
class UpSample(nn.Module):
r"""
The `UpSample` layer of a variational autoencoder that upsamples its input.
Args:
in_channels (`int`, *optional*, defaults to 3):
The number of input channels.
out_channels (`int`, *optional*, defaults to 3):
The number of output channels.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
) -> None:
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.deconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=4, stride=2, padding=1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
r"""The forward method of the `UpSample` class."""
x = torch.relu(x)
x = self.deconv(x)
return x | 1,225 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
class MaskConditionEncoder(nn.Module):
"""
used in AsymmetricAutoencoderKL
"""
def __init__(
self,
in_ch: int,
out_ch: int = 192,
res_ch: int = 768,
stride: int = 16,
) -> None:
super().__init__()
channels = []
while stride > 1:
stride = stride // 2
in_ch_ = out_ch * 2
if out_ch > res_ch:
out_ch = res_ch
if stride == 1:
in_ch_ = res_ch
channels.append((in_ch_, out_ch))
out_ch *= 2
out_channels = []
for _in_ch, _out_ch in channels:
out_channels.append(_out_ch)
out_channels.append(channels[-1][0]) | 1,226 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
layers = []
in_ch_ = in_ch
for l in range(len(out_channels)):
out_ch_ = out_channels[l]
if l == 0 or l == 1:
layers.append(nn.Conv2d(in_ch_, out_ch_, kernel_size=3, stride=1, padding=1))
else:
layers.append(nn.Conv2d(in_ch_, out_ch_, kernel_size=4, stride=2, padding=1))
in_ch_ = out_ch_
self.layers = nn.Sequential(*layers)
def forward(self, x: torch.Tensor, mask=None) -> torch.Tensor:
r"""The forward method of the `MaskConditionEncoder` class."""
out = {}
for l in range(len(self.layers)):
layer = self.layers[l]
x = layer(x)
out[str(tuple(x.shape))] = x
x = torch.relu(x)
return out | 1,226 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
class MaskConditionDecoder(nn.Module):
r"""The `MaskConditionDecoder` should be used in combination with [`AsymmetricAutoencoderKL`] to enhance the model's
decoder with a conditioner on the mask and masked image. | 1,227 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
Args:
in_channels (`int`, *optional*, defaults to 3):
The number of input channels.
out_channels (`int`, *optional*, defaults to 3):
The number of output channels.
up_block_types (`Tuple[str, ...]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
The types of up blocks to use. See `~diffusers.models.unet_2d_blocks.get_up_block` for available options.
block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
The number of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2):
The number of layers per block.
norm_num_groups (`int`, *optional*, defaults to 32):
The number of groups for normalization.
act_fn (`str`, *optional*, defaults to `"silu"`):
The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
norm_type (`str`, *optional*, defaults to `"group"`): | 1,227 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
The normalization type to use. Can be either `"group"` or `"spatial"`.
""" | 1,227 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
def __init__(
self,
in_channels: int = 3,
out_channels: int = 3,
up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
block_out_channels: Tuple[int, ...] = (64,),
layers_per_block: int = 2,
norm_num_groups: int = 32,
act_fn: str = "silu",
norm_type: str = "group", # group, spatial
):
super().__init__()
self.layers_per_block = layers_per_block
self.conv_in = nn.Conv2d(
in_channels,
block_out_channels[-1],
kernel_size=3,
stride=1,
padding=1,
)
self.up_blocks = nn.ModuleList([])
temb_channels = in_channels if norm_type == "spatial" else None | 1,227 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
# mid
self.mid_block = UNetMidBlock2D(
in_channels=block_out_channels[-1],
resnet_eps=1e-6,
resnet_act_fn=act_fn,
output_scale_factor=1,
resnet_time_scale_shift="default" if norm_type == "group" else norm_type,
attention_head_dim=block_out_channels[-1],
resnet_groups=norm_num_groups,
temb_channels=temb_channels,
)
# up
reversed_block_out_channels = list(reversed(block_out_channels))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1 | 1,227 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
up_block = get_up_block(
up_block_type,
num_layers=self.layers_per_block + 1,
in_channels=prev_output_channel,
out_channels=output_channel,
prev_output_channel=None,
add_upsample=not is_final_block,
resnet_eps=1e-6,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
attention_head_dim=output_channel,
temb_channels=temb_channels,
resnet_time_scale_shift=norm_type,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# condition encoder
self.condition_encoder = MaskConditionEncoder(
in_ch=out_channels,
out_ch=block_out_channels[0],
res_ch=block_out_channels[-1],
) | 1,227 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
# out
if norm_type == "spatial":
self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels)
else:
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6)
self.conv_act = nn.SiLU()
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
self.gradient_checkpointing = False
def forward(
self,
z: torch.Tensor,
image: Optional[torch.Tensor] = None,
mask: Optional[torch.Tensor] = None,
latent_embeds: Optional[torch.Tensor] = None,
) -> torch.Tensor:
r"""The forward method of the `MaskConditionDecoder` class."""
sample = z
sample = self.conv_in(sample)
upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
if torch.is_grad_enabled() and self.gradient_checkpointing: | 1,227 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
if is_torch_version(">=", "1.11.0"):
# middle
sample = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block),
sample,
latent_embeds,
use_reentrant=False,
)
sample = sample.to(upscale_dtype)
# condition encoder
if image is not None and mask is not None:
masked_image = (1 - mask) * image
im_x = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.condition_encoder),
masked_image,
mask,
use_reentrant=False,
) | 1,227 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
# up
for up_block in self.up_blocks:
if image is not None and mask is not None:
sample_ = im_x[str(tuple(sample.shape))]
mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode="nearest")
sample = sample * mask_ + sample_ * (1 - mask_)
sample = torch.utils.checkpoint.checkpoint(
create_custom_forward(up_block),
sample,
latent_embeds,
use_reentrant=False,
)
if image is not None and mask is not None:
sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask)
else:
# middle
sample = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block), sample, latent_embeds
) | 1,227 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
sample = sample.to(upscale_dtype) | 1,227 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
# condition encoder
if image is not None and mask is not None:
masked_image = (1 - mask) * image
im_x = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.condition_encoder),
masked_image,
mask,
) | 1,227 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
# up
for up_block in self.up_blocks:
if image is not None and mask is not None:
sample_ = im_x[str(tuple(sample.shape))]
mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode="nearest")
sample = sample * mask_ + sample_ * (1 - mask_)
sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, latent_embeds)
if image is not None and mask is not None:
sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask)
else:
# middle
sample = self.mid_block(sample, latent_embeds)
sample = sample.to(upscale_dtype)
# condition encoder
if image is not None and mask is not None:
masked_image = (1 - mask) * image
im_x = self.condition_encoder(masked_image, mask) | 1,227 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
# up
for up_block in self.up_blocks:
if image is not None and mask is not None:
sample_ = im_x[str(tuple(sample.shape))]
mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode="nearest")
sample = sample * mask_ + sample_ * (1 - mask_)
sample = up_block(sample, latent_embeds)
if image is not None and mask is not None:
sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask)
# post-process
if latent_embeds is None:
sample = self.conv_norm_out(sample)
else:
sample = self.conv_norm_out(sample, latent_embeds)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
return sample | 1,227 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
class VectorQuantizer(nn.Module):
"""
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly avoids costly matrix
multiplications and allows for post-hoc remapping of indices.
"""
# NOTE: due to a bug the beta term was applied to the wrong term. for
# backwards compatibility we use the buggy version by default, but you can
# specify legacy=False to fix it.
def __init__(
self,
n_e: int,
vq_embed_dim: int,
beta: float,
remap=None,
unknown_index: str = "random",
sane_index_shape: bool = False,
legacy: bool = True,
):
super().__init__()
self.n_e = n_e
self.vq_embed_dim = vq_embed_dim
self.beta = beta
self.legacy = legacy
self.embedding = nn.Embedding(self.n_e, self.vq_embed_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) | 1,228 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
self.remap = remap
if self.remap is not None:
self.register_buffer("used", torch.tensor(np.load(self.remap)))
self.used: torch.Tensor
self.re_embed = self.used.shape[0]
self.unknown_index = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
self.unknown_index = self.re_embed
self.re_embed = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices."
)
else:
self.re_embed = n_e
self.sane_index_shape = sane_index_shape | 1,228 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
def remap_to_used(self, inds: torch.LongTensor) -> torch.LongTensor:
ishape = inds.shape
assert len(ishape) > 1
inds = inds.reshape(ishape[0], -1)
used = self.used.to(inds)
match = (inds[:, :, None] == used[None, None, ...]).long()
new = match.argmax(-1)
unknown = match.sum(2) < 1
if self.unknown_index == "random":
new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
else:
new[unknown] = self.unknown_index
return new.reshape(ishape) | 1,228 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
def unmap_to_all(self, inds: torch.LongTensor) -> torch.LongTensor:
ishape = inds.shape
assert len(ishape) > 1
inds = inds.reshape(ishape[0], -1)
used = self.used.to(inds)
if self.re_embed > self.used.shape[0]: # extra token
inds[inds >= self.used.shape[0]] = 0 # simply set to zero
back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
return back.reshape(ishape)
def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, Tuple]:
# reshape z -> (batch, height, width, channel) and flatten
z = z.permute(0, 2, 3, 1).contiguous()
z_flattened = z.view(-1, self.vq_embed_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
min_encoding_indices = torch.argmin(torch.cdist(z_flattened, self.embedding.weight), dim=1)
z_q = self.embedding(min_encoding_indices).view(z.shape)
perplexity = None
min_encodings = None | 1,228 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
# compute loss for embedding
if not self.legacy:
loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
else:
loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
z_q: torch.Tensor = z + (z_q - z).detach()
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
if self.remap is not None:
min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
min_encoding_indices = self.remap_to_used(min_encoding_indices)
min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
if self.sane_index_shape:
min_encoding_indices = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices) | 1,228 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
def get_codebook_entry(self, indices: torch.LongTensor, shape: Tuple[int, ...]) -> torch.Tensor:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
indices = indices.reshape(shape[0], -1) # add batch axis
indices = self.unmap_to_all(indices)
indices = indices.reshape(-1) # flatten again
# get quantized latent vectors
z_q: torch.Tensor = self.embedding(indices)
if shape is not None:
z_q = z_q.view(shape)
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q | 1,228 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
class DiagonalGaussianDistribution(object):
def __init__(self, parameters: torch.Tensor, deterministic: bool = False):
self.parameters = parameters
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
self.deterministic = deterministic
self.std = torch.exp(0.5 * self.logvar)
self.var = torch.exp(self.logvar)
if self.deterministic:
self.var = self.std = torch.zeros_like(
self.mean, device=self.parameters.device, dtype=self.parameters.dtype
)
def sample(self, generator: Optional[torch.Generator] = None) -> torch.Tensor:
# make sure sample is on the same device as the parameters and has same dtype
sample = randn_tensor(
self.mean.shape,
generator=generator,
device=self.parameters.device,
dtype=self.parameters.dtype,
)
x = self.mean + self.std * sample
return x | 1,229 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
def kl(self, other: "DiagonalGaussianDistribution" = None) -> torch.Tensor:
if self.deterministic:
return torch.Tensor([0.0])
else:
if other is None:
return 0.5 * torch.sum(
torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,
dim=[1, 2, 3],
)
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean, 2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,
dim=[1, 2, 3],
) | 1,229 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
def nll(self, sample: torch.Tensor, dims: Tuple[int, ...] = [1, 2, 3]) -> torch.Tensor:
if self.deterministic:
return torch.Tensor([0.0])
logtwopi = np.log(2.0 * np.pi)
return 0.5 * torch.sum(
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
dim=dims,
)
def mode(self) -> torch.Tensor:
return self.mean | 1,229 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
class EncoderTiny(nn.Module):
r"""
The `EncoderTiny` layer is a simpler version of the `Encoder` layer.
Args:
in_channels (`int`):
The number of input channels.
out_channels (`int`):
The number of output channels.
num_blocks (`Tuple[int, ...]`):
Each value of the tuple represents a Conv2d layer followed by `value` number of `AutoencoderTinyBlock`'s to
use.
block_out_channels (`Tuple[int, ...]`):
The number of output channels for each block.
act_fn (`str`):
The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
num_blocks: Tuple[int, ...],
block_out_channels: Tuple[int, ...],
act_fn: str,
):
super().__init__() | 1,230 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
layers = []
for i, num_block in enumerate(num_blocks):
num_channels = block_out_channels[i]
if i == 0:
layers.append(nn.Conv2d(in_channels, num_channels, kernel_size=3, padding=1))
else:
layers.append(
nn.Conv2d(
num_channels,
num_channels,
kernel_size=3,
padding=1,
stride=2,
bias=False,
)
)
for _ in range(num_block):
layers.append(AutoencoderTinyBlock(num_channels, num_channels, act_fn))
layers.append(nn.Conv2d(block_out_channels[-1], out_channels, kernel_size=3, padding=1))
self.layers = nn.Sequential(*layers)
self.gradient_checkpointing = False | 1,230 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
def forward(self, x: torch.Tensor) -> torch.Tensor:
r"""The forward method of the `EncoderTiny` class."""
if torch.is_grad_enabled() and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
if is_torch_version(">=", "1.11.0"):
x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x, use_reentrant=False)
else:
x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x)
else:
# scale image from [-1, 1] to [0, 1] to match TAESD convention
x = self.layers(x.add(1).div(2))
return x | 1,230 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
class DecoderTiny(nn.Module):
r"""
The `DecoderTiny` layer is a simpler version of the `Decoder` layer.
Args:
in_channels (`int`):
The number of input channels.
out_channels (`int`):
The number of output channels.
num_blocks (`Tuple[int, ...]`):
Each value of the tuple represents a Conv2d layer followed by `value` number of `AutoencoderTinyBlock`'s to
use.
block_out_channels (`Tuple[int, ...]`):
The number of output channels for each block.
upsampling_scaling_factor (`int`):
The scaling factor to use for upsampling.
act_fn (`str`):
The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
""" | 1,231 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
def __init__(
self,
in_channels: int,
out_channels: int,
num_blocks: Tuple[int, ...],
block_out_channels: Tuple[int, ...],
upsampling_scaling_factor: int,
act_fn: str,
upsample_fn: str,
):
super().__init__()
layers = [
nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=1),
get_activation(act_fn),
]
for i, num_block in enumerate(num_blocks):
is_final_block = i == (len(num_blocks) - 1)
num_channels = block_out_channels[i]
for _ in range(num_block):
layers.append(AutoencoderTinyBlock(num_channels, num_channels, act_fn))
if not is_final_block:
layers.append(nn.Upsample(scale_factor=upsampling_scaling_factor, mode=upsample_fn)) | 1,231 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
conv_out_channel = num_channels if not is_final_block else out_channels
layers.append(
nn.Conv2d(
num_channels,
conv_out_channel,
kernel_size=3,
padding=1,
bias=is_final_block,
)
)
self.layers = nn.Sequential(*layers)
self.gradient_checkpointing = False
def forward(self, x: torch.Tensor) -> torch.Tensor:
r"""The forward method of the `DecoderTiny` class."""
# Clamp.
x = torch.tanh(x / 3) * 3
if torch.is_grad_enabled() and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward | 1,231 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
if is_torch_version(">=", "1.11.0"):
x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x, use_reentrant=False)
else:
x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x)
else:
x = self.layers(x)
# scale image from [0, 1] to [-1, 1] to match diffusers convention
return x.mul(2).sub(1) | 1,231 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/vae.py |
class AutoencoderTinyOutput(BaseOutput):
"""
Output of AutoencoderTiny encoding method.
Args:
latents (`torch.Tensor`): Encoded outputs of the `Encoder`.
"""
latents: torch.Tensor | 1,232 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_tiny.py |
class AutoencoderTiny(ModelMixin, ConfigMixin):
r"""
A tiny distilled VAE model for encoding images into latents and decoding latent representations into images.
[`AutoencoderTiny`] is a wrapper around the original implementation of `TAESD`.
This model inherits from [`ModelMixin`]. Check the superclass documentation for its generic methods implemented for
all models (such as downloading or saving). | 1,233 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_tiny.py |
Parameters:
in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image.
out_channels (`int`, *optional*, defaults to 3): Number of channels in the output.
encoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`):
Tuple of integers representing the number of output channels for each encoder block. The length of the
tuple should be equal to the number of encoder blocks.
decoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`):
Tuple of integers representing the number of output channels for each decoder block. The length of the
tuple should be equal to the number of decoder blocks.
act_fn (`str`, *optional*, defaults to `"relu"`):
Activation function to be used throughout the model.
latent_channels (`int`, *optional*, defaults to 4): | 1,233 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_tiny.py |
Number of channels in the latent representation. The latent space acts as a compressed representation of
the input image.
upsampling_scaling_factor (`int`, *optional*, defaults to 2):
Scaling factor for upsampling in the decoder. It determines the size of the output image during the
upsampling process.
num_encoder_blocks (`Tuple[int]`, *optional*, defaults to `(1, 3, 3, 3)`):
Tuple of integers representing the number of encoder blocks at each stage of the encoding process. The
length of the tuple should be equal to the number of stages in the encoder. Each stage has a different
number of encoder blocks.
num_decoder_blocks (`Tuple[int]`, *optional*, defaults to `(3, 3, 3, 1)`):
Tuple of integers representing the number of decoder blocks at each stage of the decoding process. The
length of the tuple should be equal to the number of stages in the decoder. Each stage has a different | 1,233 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_tiny.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.