text
stringlengths 1
1.02k
| class_index
int64 0
1.38k
| source
stringclasses 431
values |
---|---|---|
class SimpleCrossAttnUpBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
prev_output_channel: int,
temb_channels: int,
resolution_idx: Optional[int] = None,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attention_head_dim: int = 1,
cross_attention_dim: int = 1280,
output_scale_factor: float = 1.0,
add_upsample: bool = True,
skip_time_act: bool = False,
only_cross_attention: bool = False,
cross_attention_norm: Optional[str] = None,
):
super().__init__()
resnets = []
attentions = []
self.has_cross_attention = True
self.attention_head_dim = attention_head_dim
self.num_heads = out_channels // self.attention_head_dim | 1,059 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
skip_time_act=skip_time_act,
)
)
processor = (
AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor()
) | 1,059 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
attentions.append(
Attention(
query_dim=out_channels,
cross_attention_dim=out_channels,
heads=self.num_heads,
dim_head=self.attention_head_dim,
added_kv_proj_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
bias=True,
upcast_softmax=True,
only_cross_attention=only_cross_attention,
cross_attention_norm=cross_attention_norm,
processor=processor,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets) | 1,059 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
if add_upsample:
self.upsamplers = nn.ModuleList(
[
ResnetBlock2D(
in_channels=out_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
skip_time_act=skip_time_act,
up=True,
)
]
)
else:
self.upsamplers = None
self.gradient_checkpointing = False
self.resolution_idx = resolution_idx | 1,059 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
def forward(
self,
hidden_states: torch.Tensor,
res_hidden_states_tuple: Tuple[torch.Tensor, ...],
temb: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
upsample_size: Optional[int] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
if cross_attention_kwargs.get("scale", None) is not None:
logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") | 1,059 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
if attention_mask is None:
# if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask.
mask = None if encoder_hidden_states is None else encoder_attention_mask
else:
# when attention_mask is defined: we don't even check for encoder_attention_mask.
# this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks.
# TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask.
# then we can simplify this whole if/else block to:
# mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask
mask = attention_mask | 1,059 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
for resnet, attn in zip(self.resnets, self.attentions):
# resnet
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if torch.is_grad_enabled() and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward | 1,059 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=mask,
**cross_attention_kwargs,
)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=mask,
**cross_attention_kwargs,
)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, temb)
return hidden_states | 1,059 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
class KUpBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
resolution_idx: int,
dropout: float = 0.0,
num_layers: int = 5,
resnet_eps: float = 1e-5,
resnet_act_fn: str = "gelu",
resnet_group_size: Optional[int] = 32,
add_upsample: bool = True,
):
super().__init__()
resnets = []
k_in_channels = 2 * out_channels
k_out_channels = in_channels
num_layers = num_layers - 1
for i in range(num_layers):
in_channels = k_in_channels if i == 0 else out_channels
groups = in_channels // resnet_group_size
groups_out = out_channels // resnet_group_size | 1,060 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
resnets.append(
ResnetBlockCondNorm2D(
in_channels=in_channels,
out_channels=k_out_channels if (i == num_layers - 1) else out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=groups,
groups_out=groups_out,
dropout=dropout,
non_linearity=resnet_act_fn,
time_embedding_norm="ada_group",
conv_shortcut_bias=False,
)
)
self.resnets = nn.ModuleList(resnets)
if add_upsample:
self.upsamplers = nn.ModuleList([KUpsample2D()])
else:
self.upsamplers = None
self.gradient_checkpointing = False
self.resolution_idx = resolution_idx | 1,060 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
def forward(
self,
hidden_states: torch.Tensor,
res_hidden_states_tuple: Tuple[torch.Tensor, ...],
temb: Optional[torch.Tensor] = None,
upsample_size: Optional[int] = None,
*args,
**kwargs,
) -> torch.Tensor:
if len(args) > 0 or kwargs.get("scale", None) is not None:
deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
deprecate("scale", "1.0.0", deprecation_message)
res_hidden_states_tuple = res_hidden_states_tuple[-1]
if res_hidden_states_tuple is not None:
hidden_states = torch.cat([hidden_states, res_hidden_states_tuple], dim=1)
for resnet in self.resnets:
if torch.is_grad_enabled() and self.gradient_checkpointing: | 1,060 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
if is_torch_version(">=", "1.11.0"):
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
)
else:
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb
)
else:
hidden_states = resnet(hidden_states, temb)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states)
return hidden_states | 1,060 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
class KCrossAttnUpBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
resolution_idx: int,
dropout: float = 0.0,
num_layers: int = 4,
resnet_eps: float = 1e-5,
resnet_act_fn: str = "gelu",
resnet_group_size: int = 32,
attention_head_dim: int = 1, # attention dim_head
cross_attention_dim: int = 768,
add_upsample: bool = True,
upcast_attention: bool = False,
):
super().__init__()
resnets = []
attentions = []
is_first_block = in_channels == out_channels == temb_channels
is_middle_block = in_channels != out_channels
add_self_attention = True if is_first_block else False
self.has_cross_attention = True
self.attention_head_dim = attention_head_dim | 1,061 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
# in_channels, and out_channels for the block (k-unet)
k_in_channels = out_channels if is_first_block else 2 * out_channels
k_out_channels = in_channels
num_layers = num_layers - 1
for i in range(num_layers):
in_channels = k_in_channels if i == 0 else out_channels
groups = in_channels // resnet_group_size
groups_out = out_channels // resnet_group_size
if is_middle_block and (i == num_layers - 1):
conv_2d_out_channels = k_out_channels
else:
conv_2d_out_channels = None | 1,061 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
resnets.append(
ResnetBlockCondNorm2D(
in_channels=in_channels,
out_channels=out_channels,
conv_2d_out_channels=conv_2d_out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=groups,
groups_out=groups_out,
dropout=dropout,
non_linearity=resnet_act_fn,
time_embedding_norm="ada_group",
conv_shortcut_bias=False,
)
)
attentions.append(
KAttentionBlock(
k_out_channels if (i == num_layers - 1) else out_channels,
k_out_channels // attention_head_dim
if (i == num_layers - 1)
else out_channels // attention_head_dim,
attention_head_dim,
cross_attention_dim=cross_attention_dim, | 1,061 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
temb_channels=temb_channels,
attention_bias=True,
add_self_attention=add_self_attention,
cross_attention_norm="layer_norm",
upcast_attention=upcast_attention,
)
) | 1,061 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
self.resnets = nn.ModuleList(resnets)
self.attentions = nn.ModuleList(attentions)
if add_upsample:
self.upsamplers = nn.ModuleList([KUpsample2D()])
else:
self.upsamplers = None
self.gradient_checkpointing = False
self.resolution_idx = resolution_idx
def forward(
self,
hidden_states: torch.Tensor,
res_hidden_states_tuple: Tuple[torch.Tensor, ...],
temb: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
upsample_size: Optional[int] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
res_hidden_states_tuple = res_hidden_states_tuple[-1]
if res_hidden_states_tuple is not None:
hidden_states = torch.cat([hidden_states, res_hidden_states_tuple], dim=1) | 1,061 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
for resnet, attn in zip(self.resnets, self.attentions):
if torch.is_grad_enabled() and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward | 1,061 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
**ckpt_kwargs,
)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
emb=temb,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
emb=temb,
attention_mask=attention_mask, | 1,061 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
) | 1,061 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states)
return hidden_states | 1,061 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
class KAttentionBlock(nn.Module):
r"""
A basic Transformer block. | 1,062 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
Parameters:
dim (`int`): The number of channels in the input and output.
num_attention_heads (`int`): The number of heads to use for multi-head attention.
attention_head_dim (`int`): The number of channels in each head.
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
attention_bias (`bool`, *optional*, defaults to `False`):
Configure if the attention layers should contain a bias parameter.
upcast_attention (`bool`, *optional*, defaults to `False`):
Set to `True` to upcast the attention computation to `float32`.
temb_channels (`int`, *optional*, defaults to 768):
The number of channels in the token embedding.
add_self_attention (`bool`, *optional*, defaults to `False`):
Set to `True` to add self-attention to the block. | 1,062 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
cross_attention_norm (`str`, *optional*, defaults to `None`):
The type of normalization to use for the cross attention. Can be `None`, `layer_norm`, or `group_norm`.
group_size (`int`, *optional*, defaults to 32):
The number of groups to separate the channels into for group normalization.
""" | 1,062 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
def __init__(
self,
dim: int,
num_attention_heads: int,
attention_head_dim: int,
dropout: float = 0.0,
cross_attention_dim: Optional[int] = None,
attention_bias: bool = False,
upcast_attention: bool = False,
temb_channels: int = 768, # for ada_group_norm
add_self_attention: bool = False,
cross_attention_norm: Optional[str] = None,
group_size: int = 32,
):
super().__init__()
self.add_self_attention = add_self_attention
# 1. Self-Attn
if add_self_attention:
self.norm1 = AdaGroupNorm(temb_channels, dim, max(1, dim // group_size))
self.attn1 = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
cross_attention_dim=None,
cross_attention_norm=None,
) | 1,062 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
# 2. Cross-Attn
self.norm2 = AdaGroupNorm(temb_channels, dim, max(1, dim // group_size))
self.attn2 = Attention(
query_dim=dim,
cross_attention_dim=cross_attention_dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
cross_attention_norm=cross_attention_norm,
)
def _to_3d(self, hidden_states: torch.Tensor, height: int, weight: int) -> torch.Tensor:
return hidden_states.permute(0, 2, 3, 1).reshape(hidden_states.shape[0], height * weight, -1)
def _to_4d(self, hidden_states: torch.Tensor, height: int, weight: int) -> torch.Tensor:
return hidden_states.permute(0, 2, 1).reshape(hidden_states.shape[0], -1, height, weight) | 1,062 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.Tensor] = None,
# TODO: mark emb as non-optional (self.norm2 requires it).
# requires assessing impact of change to positional param interface.
emb: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
if cross_attention_kwargs.get("scale", None) is not None:
logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
# 1. Self-Attention
if self.add_self_attention:
norm_hidden_states = self.norm1(hidden_states, emb) | 1,062 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
height, weight = norm_hidden_states.shape[2:]
norm_hidden_states = self._to_3d(norm_hidden_states, height, weight)
attn_output = self.attn1(
norm_hidden_states,
encoder_hidden_states=None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
attn_output = self._to_4d(attn_output, height, weight)
hidden_states = attn_output + hidden_states
# 2. Cross-Attention/None
norm_hidden_states = self.norm2(hidden_states, emb) | 1,062 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
height, weight = norm_hidden_states.shape[2:]
norm_hidden_states = self._to_3d(norm_hidden_states, height, weight)
attn_output = self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask if encoder_hidden_states is None else encoder_attention_mask,
**cross_attention_kwargs,
)
attn_output = self._to_4d(attn_output, height, weight)
hidden_states = attn_output + hidden_states
return hidden_states | 1,062 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_2d_blocks.py |
class MultiControlNetModel(ModelMixin):
r"""
Multiple `ControlNetModel` wrapper class for Multi-ControlNet
This module is a wrapper for multiple instances of the `ControlNetModel`. The `forward()` API is designed to be
compatible with `ControlNetModel`.
Args:
controlnets (`List[ControlNetModel]`):
Provides additional conditioning to the unet during the denoising process. You must set multiple
`ControlNetModel` as a list.
"""
def __init__(self, controlnets: Union[List[ControlNetModel], Tuple[ControlNetModel]]):
super().__init__()
self.nets = nn.ModuleList(controlnets) | 1,063 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/multicontrolnet.py |
def forward(
self,
sample: torch.Tensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
controlnet_cond: List[torch.tensor],
conditioning_scale: List[float],
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guess_mode: bool = False,
return_dict: bool = True,
) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)):
down_samples, mid_sample = controlnet(
sample=sample,
timestep=timestep,
encoder_hidden_states=encoder_hidden_states,
controlnet_cond=image,
conditioning_scale=scale, | 1,063 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/multicontrolnet.py |
class_labels=class_labels,
timestep_cond=timestep_cond,
attention_mask=attention_mask,
added_cond_kwargs=added_cond_kwargs,
cross_attention_kwargs=cross_attention_kwargs,
guess_mode=guess_mode,
return_dict=return_dict,
) | 1,063 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/multicontrolnet.py |
# merge samples
if i == 0:
down_block_res_samples, mid_block_res_sample = down_samples, mid_sample
else:
down_block_res_samples = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(down_block_res_samples, down_samples)
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
is_main_process: bool = True,
save_function: Callable = None,
safe_serialization: bool = True,
variant: Optional[str] = None,
):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
`[`~models.controlnets.multicontrolnet.MultiControlNetModel.from_pretrained`]` class method. | 1,063 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/multicontrolnet.py |
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to which to save. Will be created if it doesn't exist.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful when in distributed training like
TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on
the main process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful on distributed training like TPUs when one
need to replace `torch.save` by another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). | 1,063 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/multicontrolnet.py |
variant (`str`, *optional*):
If specified, weights are saved in the format pytorch_model.<variant>.bin.
"""
for idx, controlnet in enumerate(self.nets):
suffix = "" if idx == 0 else f"_{idx}"
controlnet.save_pretrained(
save_directory + suffix,
is_main_process=is_main_process,
save_function=save_function,
safe_serialization=safe_serialization,
variant=variant,
) | 1,063 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/multicontrolnet.py |
@classmethod
def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs):
r"""
Instantiate a pretrained MultiControlNet model from multiple pre-trained controlnet models.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
the model, you should first set it back in training mode with `model.train()`.
The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
task.
The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
weights are discarded. | 1,063 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/multicontrolnet.py |
Parameters:
pretrained_model_path (`os.PathLike`):
A path to a *directory* containing model weights saved using
[`~models.controlnets.multicontrolnet.MultiControlNetModel.save_pretrained`], e.g.,
`./my_model_directory/controlnet`.
torch_dtype (`str` or `torch.dtype`, *optional*):
Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype
will be automatically derived from the model's weights.
output_loading_info(`bool`, *optional*, defaults to `False`):
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
A map that specifies where each submodule should go. It doesn't need to be refined to each | 1,063 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/multicontrolnet.py |
parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
same device. | 1,063 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/multicontrolnet.py |
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
more information about each option see [designing a device
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
max_memory (`Dict`, *optional*):
A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
GPU and the available CPU RAM if unset.
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
Speed up model loading by not initializing the weights and only loading the pre-trained weights. This
also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the
model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch, | 1,063 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/multicontrolnet.py |
setting this argument to `True` will raise an error.
variant (`str`, *optional*):
If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is
ignored when using `from_flax`.
use_safetensors (`bool`, *optional*, defaults to `None`):
If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the
`safetensors` library is installed. If set to `True`, the model will be forcibly loaded from
`safetensors` weights. If set to `False`, loading will *not* use `safetensors`.
"""
idx = 0
controlnets = [] | 1,063 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/multicontrolnet.py |
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
model_path_to_load = pretrained_model_path
while os.path.isdir(model_path_to_load):
controlnet = ControlNetModel.from_pretrained(model_path_to_load, **kwargs)
controlnets.append(controlnet)
idx += 1
model_path_to_load = pretrained_model_path + f"_{idx}"
logger.info(f"{len(controlnets)} controlnets loaded from {pretrained_model_path}.")
if len(controlnets) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}."
)
return cls(controlnets) | 1,063 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/multicontrolnet.py |
class HunyuanControlNetOutput(BaseOutput):
controlnet_block_samples: Tuple[torch.Tensor] | 1,064 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
class HunyuanDiT2DControlNetModel(ModelMixin, ConfigMixin):
@register_to_config
def __init__(
self,
conditioning_channels: int = 3,
num_attention_heads: int = 16,
attention_head_dim: int = 88,
in_channels: Optional[int] = None,
patch_size: Optional[int] = None,
activation_fn: str = "gelu-approximate",
sample_size=32,
hidden_size=1152,
transformer_num_layers: int = 40,
mlp_ratio: float = 4.0,
cross_attention_dim: int = 1024,
cross_attention_dim_t5: int = 2048,
pooled_projection_dim: int = 1024,
text_len: int = 77,
text_len_t5: int = 256,
use_style_cond_and_image_meta_size: bool = True,
):
super().__init__()
self.num_heads = num_attention_heads
self.inner_dim = num_attention_heads * attention_head_dim | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
self.text_embedder = PixArtAlphaTextProjection(
in_features=cross_attention_dim_t5,
hidden_size=cross_attention_dim_t5 * 4,
out_features=cross_attention_dim,
act_fn="silu_fp32",
)
self.text_embedding_padding = nn.Parameter(
torch.randn(text_len + text_len_t5, cross_attention_dim, dtype=torch.float32)
)
self.pos_embed = PatchEmbed(
height=sample_size,
width=sample_size,
in_channels=in_channels,
embed_dim=hidden_size,
patch_size=patch_size,
pos_embed_type=None,
)
self.time_extra_emb = HunyuanCombinedTimestepTextSizeStyleEmbedding(
hidden_size,
pooled_projection_dim=pooled_projection_dim,
seq_len=text_len_t5,
cross_attention_dim=cross_attention_dim_t5,
use_style_cond_and_image_meta_size=use_style_cond_and_image_meta_size,
) | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
# controlnet_blocks
self.controlnet_blocks = nn.ModuleList([]) | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
# HunyuanDiT Blocks
self.blocks = nn.ModuleList(
[
HunyuanDiTBlock(
dim=self.inner_dim,
num_attention_heads=self.config.num_attention_heads,
activation_fn=activation_fn,
ff_inner_dim=int(self.inner_dim * mlp_ratio),
cross_attention_dim=cross_attention_dim,
qk_norm=True, # See http://arxiv.org/abs/2302.05442 for details.
skip=False, # always False as it is the first half of the model
)
for layer in range(transformer_num_layers // 2 - 1)
]
)
self.input_block = zero_module(nn.Linear(hidden_size, hidden_size))
for _ in range(len(self.blocks)):
controlnet_block = nn.Linear(hidden_size, hidden_size)
controlnet_block = zero_module(controlnet_block)
self.controlnet_blocks.append(controlnet_block) | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
@property
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "get_processor"):
processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the
corresponding cross attention processor. This is strongly recommended when setting trainable attention
processors.
"""
count = len(self.attn_processors.keys()) | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor) | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
@classmethod
def from_transformer(
cls, transformer, conditioning_channels=3, transformer_num_layers=None, load_weights_from_transformer=True
):
config = transformer.config
activation_fn = config.activation_fn
attention_head_dim = config.attention_head_dim
cross_attention_dim = config.cross_attention_dim
cross_attention_dim_t5 = config.cross_attention_dim_t5
hidden_size = config.hidden_size
in_channels = config.in_channels
mlp_ratio = config.mlp_ratio
num_attention_heads = config.num_attention_heads
patch_size = config.patch_size
sample_size = config.sample_size
text_len = config.text_len
text_len_t5 = config.text_len_t5
conditioning_channels = conditioning_channels
transformer_num_layers = transformer_num_layers or config.transformer_num_layers | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
controlnet = cls(
conditioning_channels=conditioning_channels,
transformer_num_layers=transformer_num_layers,
activation_fn=activation_fn,
attention_head_dim=attention_head_dim,
cross_attention_dim=cross_attention_dim,
cross_attention_dim_t5=cross_attention_dim_t5,
hidden_size=hidden_size,
in_channels=in_channels,
mlp_ratio=mlp_ratio,
num_attention_heads=num_attention_heads,
patch_size=patch_size,
sample_size=sample_size,
text_len=text_len,
text_len_t5=text_len_t5,
)
if load_weights_from_transformer:
key = controlnet.load_state_dict(transformer.state_dict(), strict=False)
logger.warning(f"controlnet load from Hunyuan-DiT. missing_keys: {key[0]}")
return controlnet | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
def forward(
self,
hidden_states,
timestep,
controlnet_cond: torch.Tensor,
conditioning_scale: float = 1.0,
encoder_hidden_states=None,
text_embedding_mask=None,
encoder_hidden_states_t5=None,
text_embedding_mask_t5=None,
image_meta_size=None,
style=None,
image_rotary_emb=None,
return_dict=True,
):
"""
The [`HunyuanDiT2DControlNetModel`] forward method. | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
Args:
hidden_states (`torch.Tensor` of shape `(batch size, dim, height, width)`):
The input tensor.
timestep ( `torch.LongTensor`, *optional*):
Used to indicate denoising step.
controlnet_cond ( `torch.Tensor` ):
The conditioning input to ControlNet.
conditioning_scale ( `float` ):
Indicate the conditioning scale.
encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. This is the output of `BertModel`.
text_embedding_mask: torch.Tensor
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
of `BertModel`.
encoder_hidden_states_t5 ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. This is the output of T5 Text Encoder. | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
text_embedding_mask_t5: torch.Tensor
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
of T5 Text Encoder.
image_meta_size (torch.Tensor):
Conditional embedding indicate the image sizes
style: torch.Tensor:
Conditional embedding indicate the style
image_rotary_emb (`torch.Tensor`):
The image rotary embeddings to apply on query and key tensors during attention calculation.
return_dict: bool
Whether to return a dictionary.
""" | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
height, width = hidden_states.shape[-2:]
hidden_states = self.pos_embed(hidden_states) # b,c,H,W -> b, N, C
# 2. pre-process
hidden_states = hidden_states + self.input_block(self.pos_embed(controlnet_cond))
temb = self.time_extra_emb(
timestep, encoder_hidden_states_t5, image_meta_size, style, hidden_dtype=timestep.dtype
) # [B, D]
# text projection
batch_size, sequence_length, _ = encoder_hidden_states_t5.shape
encoder_hidden_states_t5 = self.text_embedder(
encoder_hidden_states_t5.view(-1, encoder_hidden_states_t5.shape[-1])
)
encoder_hidden_states_t5 = encoder_hidden_states_t5.view(batch_size, sequence_length, -1)
encoder_hidden_states = torch.cat([encoder_hidden_states, encoder_hidden_states_t5], dim=1)
text_embedding_mask = torch.cat([text_embedding_mask, text_embedding_mask_t5], dim=-1)
text_embedding_mask = text_embedding_mask.unsqueeze(2).bool() | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
encoder_hidden_states = torch.where(text_embedding_mask, encoder_hidden_states, self.text_embedding_padding)
block_res_samples = ()
for layer, block in enumerate(self.blocks):
hidden_states = block(
hidden_states,
temb=temb,
encoder_hidden_states=encoder_hidden_states,
image_rotary_emb=image_rotary_emb,
) # (N, L, D)
block_res_samples = block_res_samples + (hidden_states,)
controlnet_block_res_samples = ()
for block_res_sample, controlnet_block in zip(block_res_samples, self.controlnet_blocks):
block_res_sample = controlnet_block(block_res_sample)
controlnet_block_res_samples = controlnet_block_res_samples + (block_res_sample,)
# 6. scaling
controlnet_block_res_samples = [sample * conditioning_scale for sample in controlnet_block_res_samples]
if not return_dict:
return (controlnet_block_res_samples,) | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
return HunyuanControlNetOutput(controlnet_block_samples=controlnet_block_res_samples) | 1,065 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
class HunyuanDiT2DMultiControlNetModel(ModelMixin):
r"""
`HunyuanDiT2DMultiControlNetModel` wrapper class for Multi-HunyuanDiT2DControlNetModel
This module is a wrapper for multiple instances of the `HunyuanDiT2DControlNetModel`. The `forward()` API is
designed to be compatible with `HunyuanDiT2DControlNetModel`.
Args:
controlnets (`List[HunyuanDiT2DControlNetModel]`):
Provides additional conditioning to the unet during the denoising process. You must set multiple
`HunyuanDiT2DControlNetModel` as a list.
"""
def __init__(self, controlnets):
super().__init__()
self.nets = nn.ModuleList(controlnets) | 1,066 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
def forward(
self,
hidden_states,
timestep,
controlnet_cond: torch.Tensor,
conditioning_scale: float = 1.0,
encoder_hidden_states=None,
text_embedding_mask=None,
encoder_hidden_states_t5=None,
text_embedding_mask_t5=None,
image_meta_size=None,
style=None,
image_rotary_emb=None,
return_dict=True,
):
"""
The [`HunyuanDiT2DControlNetModel`] forward method. | 1,066 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
Args:
hidden_states (`torch.Tensor` of shape `(batch size, dim, height, width)`):
The input tensor.
timestep ( `torch.LongTensor`, *optional*):
Used to indicate denoising step.
controlnet_cond ( `torch.Tensor` ):
The conditioning input to ControlNet.
conditioning_scale ( `float` ):
Indicate the conditioning scale.
encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. This is the output of `BertModel`.
text_embedding_mask: torch.Tensor
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
of `BertModel`.
encoder_hidden_states_t5 ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. This is the output of T5 Text Encoder. | 1,066 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
text_embedding_mask_t5: torch.Tensor
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
of T5 Text Encoder.
image_meta_size (torch.Tensor):
Conditional embedding indicate the image sizes
style: torch.Tensor:
Conditional embedding indicate the style
image_rotary_emb (`torch.Tensor`):
The image rotary embeddings to apply on query and key tensors during attention calculation.
return_dict: bool
Whether to return a dictionary.
"""
for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)):
block_samples = controlnet(
hidden_states=hidden_states,
timestep=timestep,
controlnet_cond=image,
conditioning_scale=scale,
encoder_hidden_states=encoder_hidden_states, | 1,066 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
text_embedding_mask=text_embedding_mask,
encoder_hidden_states_t5=encoder_hidden_states_t5,
text_embedding_mask_t5=text_embedding_mask_t5,
image_meta_size=image_meta_size,
style=style,
image_rotary_emb=image_rotary_emb,
return_dict=return_dict,
) | 1,066 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
# merge samples
if i == 0:
control_block_samples = block_samples
else:
control_block_samples = [
control_block_sample + block_sample
for control_block_sample, block_sample in zip(control_block_samples[0], block_samples[0])
]
control_block_samples = (control_block_samples,)
return control_block_samples | 1,066 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py |
class ControlNetOutput(BaseOutput):
"""
The output of [`ControlNetModel`].
Args:
down_block_res_samples (`tuple[torch.Tensor]`):
A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should
be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be
used to condition the original UNet's downsampling activations.
mid_down_block_re_sample (`torch.Tensor`):
The activation of the middle block (the lowest sample resolution). Each tensor should be of shape
`(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`.
Output can be used to condition the original UNet's middle block activation.
"""
down_block_res_samples: Tuple[torch.Tensor]
mid_block_res_sample: torch.Tensor | 1,067 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
class ControlNetConditioningEmbedding(nn.Module):
"""
Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
[11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
(activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
model) to encode image-space conditions ... into feature maps ..."
"""
def __init__(
self,
conditioning_embedding_channels: int,
conditioning_channels: int = 3,
block_out_channels: Tuple[int, ...] = (16, 32, 96, 256),
):
super().__init__() | 1,068 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
self.blocks = nn.ModuleList([])
for i in range(len(block_out_channels) - 1):
channel_in = block_out_channels[i]
channel_out = block_out_channels[i + 1]
self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
self.conv_out = zero_module(
nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
)
def forward(self, conditioning):
embedding = self.conv_in(conditioning)
embedding = F.silu(embedding)
for block in self.blocks:
embedding = block(embedding)
embedding = F.silu(embedding)
embedding = self.conv_out(embedding)
return embedding | 1,068 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin):
"""
A ControlNet model. | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
Args:
in_channels (`int`, defaults to 4):
The number of channels in the input sample.
flip_sin_to_cos (`bool`, defaults to `True`):
Whether to flip the sin to cos in the time embedding.
freq_shift (`int`, defaults to 0):
The frequency shift to apply to the time embedding.
down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`):
block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, defaults to 2):
The number of layers per block.
downsample_padding (`int`, defaults to 1):
The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, defaults to 1): | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
The scale factor to use for the mid block.
act_fn (`str`, defaults to "silu"):
The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32):
The number of groups to use for the normalization. If None, normalization and activation layers is skipped
in post-processing.
norm_eps (`float`, defaults to 1e-5):
The epsilon to use for the normalization.
cross_attention_dim (`int`, defaults to 1280):
The dimension of the cross attention features.
transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
[`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
[`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
encoder_hid_dim (`int`, *optional*, defaults to None): | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
dimension to `cross_attention_dim`.
encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8):
The dimension of the attention heads.
use_linear_projection (`bool`, defaults to `False`):
class_embed_type (`str`, *optional*, defaults to `None`):
The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None,
`"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
addition_embed_type (`str`, *optional*, defaults to `None`): | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
"text". "text" will use the `TextTimeEmbedding` layer.
num_class_embeds (`int`, *optional*, defaults to 0):
Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
class conditioning with `class_embed_type` equal to `None`.
upcast_attention (`bool`, defaults to `False`):
resnet_time_scale_shift (`str`, defaults to `"default"`):
Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`.
projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`):
The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when
`class_embed_type="projection"`.
controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`): | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`):
The tuple of output channel for each block in the `conditioning_embedding` layer.
global_pool_conditions (`bool`, defaults to `False`):
TODO(Patrick) - unused parameter.
addition_embed_type_num_heads (`int`, defaults to 64):
The number of heads to use for the `TextTimeEmbedding` layer.
""" | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
_supports_gradient_checkpointing = True | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
@register_to_config
def __init__(
self,
in_channels: int = 4,
conditioning_channels: int = 3,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str, ...] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1,
encoder_hid_dim: Optional[int] = None,
encoder_hid_dim_type: Optional[str] = None, | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
attention_head_dim: Union[int, Tuple[int, ...]] = 8,
num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
addition_embed_type: Optional[str] = None,
addition_time_embed_dim: Optional[int] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
projection_class_embeddings_input_dim: Optional[int] = None,
controlnet_conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256),
global_pool_conditions: bool = False,
addition_embed_type_num_heads: int = 64,
):
super().__init__() | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
num_attention_heads = num_attention_heads or attention_head_dim
# Check inputs
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
) | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
)
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
# input
conv_in_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
if encoder_hid_dim_type is None and encoder_hid_dim is not None:
encoder_hid_dim_type = "text_proj"
self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
if encoder_hid_dim is None and encoder_hid_dim_type is not None:
raise ValueError(
f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
)
if encoder_hid_dim_type == "text_proj":
self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
elif encoder_hid_dim_type == "text_image_proj":
# image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
# case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)`
self.encoder_hid_proj = TextImageProjection(
text_embed_dim=encoder_hid_dim,
image_embed_dim=cross_attention_dim,
cross_attention_dim=cross_attention_dim,
) | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
elif encoder_hid_dim_type is not None:
raise ValueError(
f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
)
else:
self.encoder_hid_proj = None | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
elif class_embed_type == "projection":
if projection_class_embeddings_input_dim is None:
raise ValueError(
"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
)
# The projection `class_embed_type` is the same as the timestep `class_embed_type` except
# 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
# 2. it projects from an arbitrary input dimension.
# | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
# Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
# When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
# As a result, `TimestepEmbedding` can be passed arbitrary vectors.
self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
else:
self.class_embedding = None | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
if addition_embed_type == "text":
if encoder_hid_dim is not None:
text_time_embedding_from_dim = encoder_hid_dim
else:
text_time_embedding_from_dim = cross_attention_dim | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
self.add_embedding = TextTimeEmbedding(
text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
)
elif addition_embed_type == "text_image":
# text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
# case when `addition_embed_type == "text_image"` (Kandinsky 2.1)`
self.add_embedding = TextImageTimeEmbedding(
text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
)
elif addition_embed_type == "text_time":
self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
elif addition_embed_type is not None:
raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
# control net conditioning embedding
self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0],
block_out_channels=conditioning_embedding_out_channels,
conditioning_channels=conditioning_channels,
)
self.down_blocks = nn.ModuleList([])
self.controlnet_down_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
if isinstance(num_attention_heads, int):
num_attention_heads = (num_attention_heads,) * len(down_block_types) | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
# down
output_channel = block_out_channels[0]
controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
controlnet_block = zero_module(controlnet_block)
self.controlnet_down_blocks.append(controlnet_block)
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1 | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
transformer_layers_per_block=transformer_layers_per_block[i],
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
num_attention_heads=num_attention_heads[i],
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
downsample_padding=downsample_padding,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention, | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
resnet_time_scale_shift=resnet_time_scale_shift,
)
self.down_blocks.append(down_block) | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
for _ in range(layers_per_block):
controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
controlnet_block = zero_module(controlnet_block)
self.controlnet_down_blocks.append(controlnet_block)
if not is_final_block:
controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
controlnet_block = zero_module(controlnet_block)
self.controlnet_down_blocks.append(controlnet_block)
# mid
mid_block_channel = block_out_channels[-1]
controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)
controlnet_block = zero_module(controlnet_block)
self.controlnet_mid_block = controlnet_block | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
if mid_block_type == "UNetMidBlock2DCrossAttn":
self.mid_block = UNetMidBlock2DCrossAttn(
transformer_layers_per_block=transformer_layers_per_block[-1],
in_channels=mid_block_channel,
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim,
num_attention_heads=num_attention_heads[-1],
resnet_groups=norm_num_groups,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
)
elif mid_block_type == "UNetMidBlock2D":
self.mid_block = UNetMidBlock2D(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
num_layers=0, | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_groups=norm_num_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
add_attention=False,
)
else:
raise ValueError(f"unknown mid_block_type : {mid_block_type}") | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
@classmethod
def from_unet(
cls,
unet: UNet2DConditionModel,
controlnet_conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256),
load_weights_from_unet: bool = True,
conditioning_channels: int = 3,
):
r"""
Instantiate a [`ControlNetModel`] from [`UNet2DConditionModel`]. | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
Parameters:
unet (`UNet2DConditionModel`):
The UNet model weights to copy to the [`ControlNetModel`]. All configuration options are also copied
where applicable.
"""
transformer_layers_per_block = (
unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1
)
encoder_hid_dim = unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None
encoder_hid_dim_type = unet.config.encoder_hid_dim_type if "encoder_hid_dim_type" in unet.config else None
addition_embed_type = unet.config.addition_embed_type if "addition_embed_type" in unet.config else None
addition_time_embed_dim = (
unet.config.addition_time_embed_dim if "addition_time_embed_dim" in unet.config else None
) | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
controlnet = cls(
encoder_hid_dim=encoder_hid_dim,
encoder_hid_dim_type=encoder_hid_dim_type,
addition_embed_type=addition_embed_type,
addition_time_embed_dim=addition_time_embed_dim,
transformer_layers_per_block=transformer_layers_per_block,
in_channels=unet.config.in_channels,
flip_sin_to_cos=unet.config.flip_sin_to_cos,
freq_shift=unet.config.freq_shift,
down_block_types=unet.config.down_block_types,
only_cross_attention=unet.config.only_cross_attention,
block_out_channels=unet.config.block_out_channels,
layers_per_block=unet.config.layers_per_block,
downsample_padding=unet.config.downsample_padding,
mid_block_scale_factor=unet.config.mid_block_scale_factor,
act_fn=unet.config.act_fn,
norm_num_groups=unet.config.norm_num_groups,
norm_eps=unet.config.norm_eps, | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
cross_attention_dim=unet.config.cross_attention_dim,
attention_head_dim=unet.config.attention_head_dim,
num_attention_heads=unet.config.num_attention_heads,
use_linear_projection=unet.config.use_linear_projection,
class_embed_type=unet.config.class_embed_type,
num_class_embeds=unet.config.num_class_embeds,
upcast_attention=unet.config.upcast_attention,
resnet_time_scale_shift=unet.config.resnet_time_scale_shift,
projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,
mid_block_type=unet.config.mid_block_type,
controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,
conditioning_embedding_out_channels=conditioning_embedding_out_channels,
conditioning_channels=conditioning_channels,
) | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
if load_weights_from_unet:
controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())
controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())
controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())
if controlnet.class_embedding:
controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())
if hasattr(controlnet, "add_embedding"):
controlnet.add_embedding.load_state_dict(unet.add_embedding.state_dict())
controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())
controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())
return controlnet | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
@property
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "get_processor"):
processors[f"{name}.processor"] = module.get_processor()
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys()) | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor) | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
def set_default_attn_processor(self):
"""
Disables custom attention processors and sets the default attention implementation.
"""
if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnAddedKVProcessor()
elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnProcessor()
else:
raise ValueError(
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
)
self.set_attn_processor(processor) | 1,069 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.