text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
class ControlNetUnionModel(ModelMixin, ConfigMixin, FromOriginalModelMixin): """ A ControlNetUnion model.
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
Args: in_channels (`int`, defaults to 4): The number of channels in the input sample. flip_sin_to_cos (`bool`, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`): block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, defaults to 2): The number of layers per block. downsample_padding (`int`, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, defaults to 1):
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
The scale factor to use for the mid block. act_fn (`str`, defaults to "silu"): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If None, normalization and activation layers is skipped in post-processing. norm_eps (`float`, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int`, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. encoder_hid_dim (`int`, *optional*, defaults to None):
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` dimension to `cross_attention_dim`. encoder_hid_dim_type (`str`, *optional*, defaults to `None`): If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8): The dimension of the attention heads. use_linear_projection (`bool`, defaults to `False`): class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None, `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. addition_embed_type (`str`, *optional*, defaults to `None`):
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or "text". "text" will use the `TextTimeEmbedding` layer. num_class_embeds (`int`, *optional*, defaults to 0): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing class conditioning with `class_embed_type` equal to `None`. upcast_attention (`bool`, defaults to `False`): resnet_time_scale_shift (`str`, defaults to `"default"`): Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`. projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`): The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when `class_embed_type="projection"`. controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`):
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
The channel order of conditional image. Will convert to `rgb` if it's `bgr`. conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(48, 96, 192, 384)`): The tuple of output channel for each block in the `conditioning_embedding` layer. global_pool_conditions (`bool`, defaults to `False`): """
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
_supports_gradient_checkpointing = True
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
@register_to_config def __init__( self, in_channels: int = 4, conditioning_channels: int = 3, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), layers_per_block: int = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: int = 1280, transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int, ...]] = 8,
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", projection_class_embeddings_input_dim: Optional[int] = None, controlnet_conditioning_channel_order: str = "rgb", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (48, 96, 192, 384), global_pool_conditions: bool = False, addition_embed_type_num_heads: int = 64, num_control_type: int = 6, num_trans_channel: int = 320, num_trans_head: int = 8, num_trans_layer: int = 1, num_proj_channel: int = 320, ): super().__init__()
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
# If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." )
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
# input conv_in_kernel = 3 conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) # time time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, ) if encoder_hid_dim_type is not None: raise ValueError(f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None.") else: self.encoder_hid_proj = None
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
# class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. #
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
# Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kandinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") # control net conditioning embedding self.controlnet_cond_embedding = ControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=conditioning_embedding_out_channels, conditioning_channels=conditioning_channels, )
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
task_scale_factor = num_trans_channel**0.5 self.task_embedding = nn.Parameter(task_scale_factor * torch.randn(num_control_type, num_trans_channel)) self.transformer_layes = nn.ModuleList( [ResidualAttentionBlock(num_trans_channel, num_trans_head) for _ in range(num_trans_layer)] ) self.spatial_ch_projs = zero_module(nn.Linear(num_trans_channel, num_proj_channel)) self.control_type_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.control_add_embedding = TimestepEmbedding(addition_time_embed_dim * num_control_type, time_embed_dim) self.down_blocks = nn.ModuleList([]) self.controlnet_down_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): only_cross_attention = [only_cross_attention] * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types)
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) # down output_channel = block_out_channels[0] controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
down_block = get_down_block( down_block_type, num_layers=layers_per_block, transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[i], attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, downsample_padding=downsample_padding, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention,
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
resnet_time_scale_shift=resnet_time_scale_shift, ) self.down_blocks.append(down_block)
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
for _ in range(layers_per_block): controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) if not is_final_block: controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) # mid mid_block_channel = block_out_channels[-1] controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_mid_block = controlnet_block
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
self.mid_block = UNetMidBlock2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=mid_block_channel, temb_channels=time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, )
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
@classmethod def from_unet( cls, unet: UNet2DConditionModel, controlnet_conditioning_channel_order: str = "rgb", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), load_weights_from_unet: bool = True, ): r""" Instantiate a [`ControlNetUnionModel`] from [`UNet2DConditionModel`].
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
Parameters: unet (`UNet2DConditionModel`): The UNet model weights to copy to the [`ControlNetUnionModel`]. All configuration options are also copied where applicable. """ transformer_layers_per_block = ( unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1 ) encoder_hid_dim = unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None encoder_hid_dim_type = unet.config.encoder_hid_dim_type if "encoder_hid_dim_type" in unet.config else None addition_embed_type = unet.config.addition_embed_type if "addition_embed_type" in unet.config else None addition_time_embed_dim = ( unet.config.addition_time_embed_dim if "addition_time_embed_dim" in unet.config else None )
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
controlnet = cls( encoder_hid_dim=encoder_hid_dim, encoder_hid_dim_type=encoder_hid_dim_type, addition_embed_type=addition_embed_type, addition_time_embed_dim=addition_time_embed_dim, transformer_layers_per_block=transformer_layers_per_block, in_channels=unet.config.in_channels, flip_sin_to_cos=unet.config.flip_sin_to_cos, freq_shift=unet.config.freq_shift, down_block_types=unet.config.down_block_types, only_cross_attention=unet.config.only_cross_attention, block_out_channels=unet.config.block_out_channels, layers_per_block=unet.config.layers_per_block, downsample_padding=unet.config.downsample_padding, mid_block_scale_factor=unet.config.mid_block_scale_factor, act_fn=unet.config.act_fn, norm_num_groups=unet.config.norm_num_groups, norm_eps=unet.config.norm_eps,
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
cross_attention_dim=unet.config.cross_attention_dim, attention_head_dim=unet.config.attention_head_dim, num_attention_heads=unet.config.num_attention_heads, use_linear_projection=unet.config.use_linear_projection, class_embed_type=unet.config.class_embed_type, num_class_embeds=unet.config.num_class_embeds, upcast_attention=unet.config.upcast_attention, resnet_time_scale_shift=unet.config.resnet_time_scale_shift, projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim, controlnet_conditioning_channel_order=controlnet_conditioning_channel_order, conditioning_embedding_out_channels=conditioning_embedding_out_channels, )
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
if load_weights_from_unet: controlnet.conv_in.load_state_dict(unet.conv_in.state_dict()) controlnet.time_proj.load_state_dict(unet.time_proj.state_dict()) controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict()) if controlnet.class_embedding: controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict()) controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict(), strict=False) controlnet.mid_block.load_state_dict(unet.mid_block.state_dict(), strict=False) return controlnet
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
@property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys())
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor)
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor)
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None: r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = []
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop())
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value: bool = False) -> None: if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)): module.gradient_checkpointing = value
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, controlnet_cond: List[torch.Tensor], control_type: torch.Tensor, control_type_idx: List[int], conditioning_scale: float = 1.0, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guess_mode: bool = False, return_dict: bool = True, ) -> Union[ControlNetOutput, Tuple[Tuple[torch.Tensor, ...], torch.Tensor]]: """ The [`ControlNetUnionModel`] forward method.
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
Args: sample (`torch.Tensor`): The noisy input tensor. timestep (`Union[torch.Tensor, float, int]`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.Tensor`): The encoder hidden states. controlnet_cond (`List[torch.Tensor]`): The conditional input tensors. control_type (`torch.Tensor`): A tensor of shape `(batch, num_control_type)` with values `0` or `1` depending on whether the control type is used. control_type_idx (`List[int]`): The indices of `control_type`. conditioning_scale (`float`, defaults to `1.0`): The scale factor for ControlNet outputs. class_labels (`torch.Tensor`, *optional*, defaults to `None`): Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
timestep_cond (`torch.Tensor`, *optional*, defaults to `None`): Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep embeddings. attention_mask (`torch.Tensor`, *optional*, defaults to `None`): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. added_cond_kwargs (`dict`): Additional conditions for the Stable Diffusion XL UNet. cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`): A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
guess_mode (`bool`, defaults to `False`): In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended. return_dict (`bool`, defaults to `True`): Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple.
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
Returns: [`~models.controlnet.ControlNetOutput`] **or** `tuple`: If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ # check channel order channel_order = self.config.controlnet_conditioning_channel_order if channel_order != "rgb": raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}") # prepare attention_mask if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1)
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
# 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps)
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
# timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when num_class_embeds > 0") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) emb = emb + class_emb if self.config.addition_embed_type is not None: if self.config.addition_embed_type == "text": aug_emb = self.add_embedding(encoder_hidden_states)
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
elif self.config.addition_embed_type == "text_time": if "text_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" ) text_embeds = added_cond_kwargs.get("text_embeds") if "time_ids" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" ) time_ids = added_cond_kwargs.get("time_ids") time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) control_embeds = self.control_type_proj(control_type.flatten()) control_embeds = control_embeds.reshape((t_emb.shape[0], -1)) control_embeds = control_embeds.to(emb.dtype) control_emb = self.control_add_embedding(control_embeds) emb = emb + control_emb emb = emb + aug_emb if aug_emb is not None else emb # 2. pre-process sample = self.conv_in(sample) inputs = [] condition_list = [] for cond, control_idx in zip(controlnet_cond, control_type_idx): condition = self.controlnet_cond_embedding(cond) feat_seq = torch.mean(condition, dim=(2, 3)) feat_seq = feat_seq + self.task_embedding[control_idx] inputs.append(feat_seq.unsqueeze(1)) condition_list.append(condition)
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
condition = sample feat_seq = torch.mean(condition, dim=(2, 3)) inputs.append(feat_seq.unsqueeze(1)) condition_list.append(condition) x = torch.cat(inputs, dim=1) for layer in self.transformer_layes: x = layer(x) controlnet_cond_fuser = sample * 0.0 for idx, condition in enumerate(condition_list[:-1]): alpha = self.spatial_ch_projs(x[:, idx]) alpha = alpha.unsqueeze(-1).unsqueeze(-1) controlnet_cond_fuser += condition + alpha sample = sample + controlnet_cond_fuser
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
# 3. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb) down_block_res_samples += res_samples
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
# 4. mid if self.mid_block is not None: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, ) # 5. Control net blocks controlnet_down_block_res_samples = () for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples mid_block_res_sample = self.controlnet_mid_block(sample)
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
# 6. scaling if guess_mode and not self.config.global_pool_conditions: scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0 scales = scales * conditioning_scale down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)] mid_block_res_sample = mid_block_res_sample * scales[-1] # last one else: down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample = mid_block_res_sample * conditioning_scale if self.config.global_pool_conditions: down_block_res_samples = [ torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples ] mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True) if not return_dict: return (down_block_res_samples, mid_block_res_sample)
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
return ControlNetOutput( down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample )
1,088
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_union.py
class FluxControlNetOutput(BaseOutput): controlnet_block_samples: Tuple[torch.Tensor] controlnet_single_block_samples: Tuple[torch.Tensor]
1,089
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
class FluxControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin): _supports_gradient_checkpointing = True @register_to_config def __init__( self, patch_size: int = 1, in_channels: int = 64, num_layers: int = 19, num_single_layers: int = 38, attention_head_dim: int = 128, num_attention_heads: int = 24, joint_attention_dim: int = 4096, pooled_projection_dim: int = 768, guidance_embeds: bool = False, axes_dims_rope: List[int] = [16, 56, 56], num_mode: int = None, conditioning_embedding_channels: int = None, ): super().__init__() self.out_channels = in_channels self.inner_dim = num_attention_heads * attention_head_dim
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope) text_time_guidance_cls = ( CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings ) self.time_text_embed = text_time_guidance_cls( embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim ) self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim) self.x_embedder = torch.nn.Linear(in_channels, self.inner_dim) self.transformer_blocks = nn.ModuleList( [ FluxTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for i in range(num_layers) ] )
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
self.single_transformer_blocks = nn.ModuleList( [ FluxSingleTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for i in range(num_single_layers) ] ) # controlnet_blocks self.controlnet_blocks = nn.ModuleList([]) for _ in range(len(self.transformer_blocks)): self.controlnet_blocks.append(zero_module(nn.Linear(self.inner_dim, self.inner_dim))) self.controlnet_single_blocks = nn.ModuleList([]) for _ in range(len(self.single_transformer_blocks)): self.controlnet_single_blocks.append(zero_module(nn.Linear(self.inner_dim, self.inner_dim))) self.union = num_mode is not None if self.union: self.controlnet_mode_embedder = nn.Embedding(num_mode, self.inner_dim)
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
if conditioning_embedding_channels is not None: self.input_hint_block = ControlNetConditioningEmbedding( conditioning_embedding_channels=conditioning_embedding_channels, block_out_channels=(16, 16, 16, 16) ) self.controlnet_x_embedder = torch.nn.Linear(in_channels, self.inner_dim) else: self.input_hint_block = None self.controlnet_x_embedder = zero_module(torch.nn.Linear(in_channels, self.inner_dim)) self.gradient_checkpointing = False @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self): r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {}
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor): r""" Sets the attention processor to use to compute attention.
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
@classmethod def from_transformer( cls, transformer, num_layers: int = 4, num_single_layers: int = 10, attention_head_dim: int = 128, num_attention_heads: int = 24, load_weights_from_transformer=True, ): config = dict(transformer.config) config["num_layers"] = num_layers config["num_single_layers"] = num_single_layers config["attention_head_dim"] = attention_head_dim config["num_attention_heads"] = num_attention_heads controlnet = cls.from_config(config)
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
if load_weights_from_transformer: controlnet.pos_embed.load_state_dict(transformer.pos_embed.state_dict()) controlnet.time_text_embed.load_state_dict(transformer.time_text_embed.state_dict()) controlnet.context_embedder.load_state_dict(transformer.context_embedder.state_dict()) controlnet.x_embedder.load_state_dict(transformer.x_embedder.state_dict()) controlnet.transformer_blocks.load_state_dict(transformer.transformer_blocks.state_dict(), strict=False) controlnet.single_transformer_blocks.load_state_dict( transformer.single_transformer_blocks.state_dict(), strict=False ) controlnet.controlnet_x_embedder = zero_module(controlnet.controlnet_x_embedder) return controlnet
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
def forward( self, hidden_states: torch.Tensor, controlnet_cond: torch.Tensor, controlnet_mode: torch.Tensor = None, conditioning_scale: float = 1.0, encoder_hidden_states: torch.Tensor = None, pooled_projections: torch.Tensor = None, timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, guidance: torch.Tensor = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> Union[torch.FloatTensor, Transformer2DModelOutput]: """ The [`FluxTransformer2DModel`] forward method.
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
Args: hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input `hidden_states`. controlnet_cond (`torch.Tensor`): The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. controlnet_mode (`torch.Tensor`): The mode tensor of shape `(batch_size, 1)`. conditioning_scale (`float`, defaults to `1.0`): The scale factor for ControlNet outputs. encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`): Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected from the embeddings of input conditions. timestep ( `torch.LongTensor`): Used to indicate denoising step.
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
block_controlnet_hidden_states: (`list` of `torch.Tensor`): A list of tensors that if specified are added to the residuals of transformer blocks. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain tuple.
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ if joint_attention_kwargs is not None: joint_attention_kwargs = joint_attention_kwargs.copy() lora_scale = joint_attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." ) hidden_states = self.x_embedder(hidden_states)
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
if self.input_hint_block is not None: controlnet_cond = self.input_hint_block(controlnet_cond) batch_size, channels, height_pw, width_pw = controlnet_cond.shape height = height_pw // self.config.patch_size width = width_pw // self.config.patch_size controlnet_cond = controlnet_cond.reshape( batch_size, channels, height, self.config.patch_size, width, self.config.patch_size ) controlnet_cond = controlnet_cond.permute(0, 2, 4, 1, 3, 5) controlnet_cond = controlnet_cond.reshape(batch_size, height * width, -1) # add hidden_states = hidden_states + self.controlnet_x_embedder(controlnet_cond)
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
timestep = timestep.to(hidden_states.dtype) * 1000 if guidance is not None: guidance = guidance.to(hidden_states.dtype) * 1000 else: guidance = None temb = ( self.time_text_embed(timestep, pooled_projections) if guidance is None else self.time_text_embed(timestep, guidance, pooled_projections) ) encoder_hidden_states = self.context_embedder(encoder_hidden_states) if self.union: # union mode if controlnet_mode is None: raise ValueError("`controlnet_mode` cannot be `None` when applying ControlNet-Union") # union mode emb controlnet_mode_emb = self.controlnet_mode_embedder(controlnet_mode) encoder_hidden_states = torch.cat([controlnet_mode_emb, encoder_hidden_states], dim=1) txt_ids = torch.cat([txt_ids[:1], txt_ids], dim=0)
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
if txt_ids.ndim == 3: logger.warning( "Passing `txt_ids` 3d torch.Tensor is deprecated." "Please remove the batch dimension and pass it as a 2d torch Tensor" ) txt_ids = txt_ids[0] if img_ids.ndim == 3: logger.warning( "Passing `img_ids` 3d torch.Tensor is deprecated." "Please remove the batch dimension and pass it as a 2d torch Tensor" ) img_ids = img_ids[0] ids = torch.cat((txt_ids, img_ids), dim=0) image_rotary_emb = self.pos_embed(ids) block_samples = () for index_block, block in enumerate(self.transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing:
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, encoder_hidden_states, temb, image_rotary_emb, **ckpt_kwargs, )
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, ) block_samples = block_samples + (hidden_states,) hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) single_block_samples = () for index_block, block in enumerate(self.single_transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, temb, image_rotary_emb, **ckpt_kwargs, ) else: hidden_states = block( hidden_states=hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, ) single_block_samples = single_block_samples + (hidden_states[:, encoder_hidden_states.shape[1] :],) # controlnet block controlnet_block_samples = () for block_sample, controlnet_block in zip(block_samples, self.controlnet_blocks): block_sample = controlnet_block(block_sample) controlnet_block_samples = controlnet_block_samples + (block_sample,)
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
controlnet_single_block_samples = () for single_block_sample, controlnet_block in zip(single_block_samples, self.controlnet_single_blocks): single_block_sample = controlnet_block(single_block_sample) controlnet_single_block_samples = controlnet_single_block_samples + (single_block_sample,) # scaling controlnet_block_samples = [sample * conditioning_scale for sample in controlnet_block_samples] controlnet_single_block_samples = [sample * conditioning_scale for sample in controlnet_single_block_samples] controlnet_block_samples = None if len(controlnet_block_samples) == 0 else controlnet_block_samples controlnet_single_block_samples = ( None if len(controlnet_single_block_samples) == 0 else controlnet_single_block_samples ) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale)
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
if not return_dict: return (controlnet_block_samples, controlnet_single_block_samples) return FluxControlNetOutput( controlnet_block_samples=controlnet_block_samples, controlnet_single_block_samples=controlnet_single_block_samples, )
1,090
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
class FluxMultiControlNetModel(ModelMixin): r""" `FluxMultiControlNetModel` wrapper class for Multi-FluxControlNetModel This module is a wrapper for multiple instances of the `FluxControlNetModel`. The `forward()` API is designed to be compatible with `FluxControlNetModel`. Args: controlnets (`List[FluxControlNetModel]`): Provides additional conditioning to the unet during the denoising process. You must set multiple `FluxControlNetModel` as a list. """ def __init__(self, controlnets): super().__init__() self.nets = nn.ModuleList(controlnets)
1,091
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
def forward( self, hidden_states: torch.FloatTensor, controlnet_cond: List[torch.tensor], controlnet_mode: List[torch.tensor], conditioning_scale: List[float], encoder_hidden_states: torch.Tensor = None, pooled_projections: torch.Tensor = None, timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, guidance: torch.Tensor = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> Union[FluxControlNetOutput, Tuple]: # ControlNet-Union with multiple conditions # only load one ControlNet for saving memories if len(self.nets) == 1 and self.nets[0].union: controlnet = self.nets[0]
1,091
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
for i, (image, mode, scale) in enumerate(zip(controlnet_cond, controlnet_mode, conditioning_scale)): block_samples, single_block_samples = controlnet( hidden_states=hidden_states, controlnet_cond=image, controlnet_mode=mode[:, None], conditioning_scale=scale, timestep=timestep, guidance=guidance, pooled_projections=pooled_projections, encoder_hidden_states=encoder_hidden_states, txt_ids=txt_ids, img_ids=img_ids, joint_attention_kwargs=joint_attention_kwargs, return_dict=return_dict, )
1,091
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
# merge samples if i == 0: control_block_samples = block_samples control_single_block_samples = single_block_samples else: control_block_samples = [ control_block_sample + block_sample for control_block_sample, block_sample in zip(control_block_samples, block_samples) ] control_single_block_samples = [ control_single_block_sample + block_sample for control_single_block_sample, block_sample in zip( control_single_block_samples, single_block_samples ) ]
1,091
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
# Regular Multi-ControlNets # load all ControlNets into memories else: for i, (image, mode, scale, controlnet) in enumerate( zip(controlnet_cond, controlnet_mode, conditioning_scale, self.nets) ): block_samples, single_block_samples = controlnet( hidden_states=hidden_states, controlnet_cond=image, controlnet_mode=mode[:, None], conditioning_scale=scale, timestep=timestep, guidance=guidance, pooled_projections=pooled_projections, encoder_hidden_states=encoder_hidden_states, txt_ids=txt_ids, img_ids=img_ids, joint_attention_kwargs=joint_attention_kwargs, return_dict=return_dict, )
1,091
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
# merge samples if i == 0: control_block_samples = block_samples control_single_block_samples = single_block_samples else: if block_samples is not None and control_block_samples is not None: control_block_samples = [ control_block_sample + block_sample for control_block_sample, block_sample in zip(control_block_samples, block_samples) ] if single_block_samples is not None and control_single_block_samples is not None: control_single_block_samples = [ control_single_block_sample + block_sample for control_single_block_sample, block_sample in zip( control_single_block_samples, single_block_samples ) ]
1,091
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
return control_block_samples, control_single_block_samples
1,091
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flux.py
class FlaxControlNetOutput(BaseOutput): """ The output of [`FlaxControlNetModel`]. Args: down_block_res_samples (`jnp.ndarray`): mid_block_res_sample (`jnp.ndarray`): """ down_block_res_samples: jnp.ndarray mid_block_res_sample: jnp.ndarray
1,092
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
class FlaxControlNetConditioningEmbedding(nn.Module): conditioning_embedding_channels: int block_out_channels: Tuple[int, ...] = (16, 32, 96, 256) dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.conv_in = nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
1,093
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
blocks = [] for i in range(len(self.block_out_channels) - 1): channel_in = self.block_out_channels[i] channel_out = self.block_out_channels[i + 1] conv1 = nn.Conv( channel_in, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(conv1) conv2 = nn.Conv( channel_out, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(conv2) self.blocks = blocks self.conv_out = nn.Conv( self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
1,093
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
def __call__(self, conditioning: jnp.ndarray) -> jnp.ndarray: embedding = self.conv_in(conditioning) embedding = nn.silu(embedding) for block in self.blocks: embedding = block(embedding) embedding = nn.silu(embedding) embedding = self.conv_out(embedding) return embedding
1,093
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin): r""" A ControlNet model. This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it’s generic methods implemented for all models (such as downloading or saving). This model is also a Flax Linen [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matters related to its general usage and behavior. Inherent JAX features such as the following are supported: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
Parameters: sample_size (`int`, *optional*): The size of the input sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. down_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D")`): The tuple of downsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int` or `Tuple[int]`, *optional*): The number of attention heads. cross_attention_dim (`int`, *optional*, defaults to 768):
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
The dimension of the cross attention features. dropout (`float`, *optional*, defaults to 0): Dropout probability for down, up and bottleneck blocks. flip_sin_to_cos (`bool`, *optional*, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. controlnet_conditioning_channel_order (`str`, *optional*, defaults to `rgb`): The channel order of conditional image. Will convert to `rgb` if it's `bgr`. conditioning_embedding_out_channels (`tuple`, *optional*, defaults to `(16, 32, 96, 256)`): The tuple of output channel for each block in the `conditioning_embedding` layer. """
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
sample_size: int = 32 in_channels: int = 4 down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) only_cross_attention: Union[bool, Tuple[bool, ...]] = False block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280) layers_per_block: int = 2 attention_head_dim: Union[int, Tuple[int, ...]] = 8 num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None cross_attention_dim: int = 1280 dropout: float = 0.0 use_linear_projection: bool = False dtype: jnp.dtype = jnp.float32 flip_sin_to_cos: bool = True freq_shift: int = 0 controlnet_conditioning_channel_order: str = "rgb" conditioning_embedding_out_channels: Tuple[int, ...] = (16, 32, 96, 256)
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
def init_weights(self, rng: jax.Array) -> FrozenDict: # init input tensors sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) timesteps = jnp.ones((1,), dtype=jnp.int32) encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) controlnet_cond_shape = (1, 3, self.sample_size * 8, self.sample_size * 8) controlnet_cond = jnp.zeros(controlnet_cond_shape, dtype=jnp.float32) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} return self.init(rngs, sample, timesteps, encoder_hidden_states, controlnet_cond)["params"] def setup(self) -> None: block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
# If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = self.num_attention_heads or self.attention_head_dim # input self.conv_in = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
# time self.time_proj = FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift ) self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype) self.controlnet_cond_embedding = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, ) only_cross_attention = self.only_cross_attention if isinstance(only_cross_attention, bool): only_cross_attention = (only_cross_attention,) * len(self.down_block_types) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(self.down_block_types) # down down_blocks = [] controlnet_down_blocks = [] output_channel = block_out_channels[0]
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
controlnet_block = nn.Conv( output_channel, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(controlnet_block) for i, down_block_type in enumerate(self.down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
if down_block_type == "CrossAttnDownBlock2D": down_block = FlaxCrossAttnDownBlock2D( in_channels=input_channel, out_channels=output_channel, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, ) else: down_block = FlaxDownBlock2D( in_channels=input_channel, out_channels=output_channel, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
down_blocks.append(down_block) for _ in range(self.layers_per_block): controlnet_block = nn.Conv( output_channel, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(controlnet_block) if not is_final_block: controlnet_block = nn.Conv( output_channel, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(controlnet_block)
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
self.down_blocks = down_blocks self.controlnet_down_blocks = controlnet_down_blocks # mid mid_block_channel = block_out_channels[-1] self.mid_block = FlaxUNetMidBlock2DCrossAttn( in_channels=mid_block_channel, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, ) self.controlnet_mid_block = nn.Conv( mid_block_channel, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
def __call__( self, sample: jnp.ndarray, timesteps: Union[jnp.ndarray, float, int], encoder_hidden_states: jnp.ndarray, controlnet_cond: jnp.ndarray, conditioning_scale: float = 1.0, return_dict: bool = True, train: bool = False, ) -> Union[FlaxControlNetOutput, Tuple[Tuple[jnp.ndarray, ...], jnp.ndarray]]: r""" Args: sample (`jnp.ndarray`): (batch, channel, height, width) noisy inputs tensor timestep (`jnp.ndarray` or `float` or `int`): timesteps encoder_hidden_states (`jnp.ndarray`): (batch_size, sequence_length, hidden_size) encoder hidden states controlnet_cond (`jnp.ndarray`): (batch, channel, height, width) the conditional input tensor conditioning_scale (`float`, *optional*, defaults to `1.0`): the scale factor for controlnet outputs return_dict (`bool`, *optional*, defaults to `True`):
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
Whether or not to return a [`models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] instead of a plain tuple. train (`bool`, *optional*, defaults to `False`): Use deterministic functions and disable dropout when not training.
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
Returns: [`~models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] or `tuple`: [`~models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ channel_order = self.controlnet_conditioning_channel_order if channel_order == "bgr": controlnet_cond = jnp.flip(controlnet_cond, axis=1) # 1. time if not isinstance(timesteps, jnp.ndarray): timesteps = jnp.array([timesteps], dtype=jnp.int32) elif isinstance(timesteps, jnp.ndarray) and len(timesteps.shape) == 0: timesteps = timesteps.astype(dtype=jnp.float32) timesteps = jnp.expand_dims(timesteps, 0) t_emb = self.time_proj(timesteps) t_emb = self.time_embedding(t_emb)
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
# 2. pre-process sample = jnp.transpose(sample, (0, 2, 3, 1)) sample = self.conv_in(sample) controlnet_cond = jnp.transpose(controlnet_cond, (0, 2, 3, 1)) controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) sample += controlnet_cond # 3. down down_block_res_samples = (sample,) for down_block in self.down_blocks: if isinstance(down_block, FlaxCrossAttnDownBlock2D): sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train) else: sample, res_samples = down_block(sample, t_emb, deterministic=not train) down_block_res_samples += res_samples # 4. mid sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
# 5. contronet blocks controlnet_down_block_res_samples = () for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples += (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples mid_block_res_sample = self.controlnet_mid_block(sample) # 6. scaling down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample )
1,094
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnets/controlnet_flax.py
class PerceiverAttention(nn.Module): def __init__(self, dim: int, dim_head: int = 64, heads: int = 8, kv_dim: Optional[int] = None): super().__init__() self.scale = dim_head**-0.5 self.dim_head = dim_head self.heads = heads inner_dim = dim_head * heads self.norm1 = nn.LayerNorm(dim if kv_dim is None else kv_dim) self.norm2 = nn.LayerNorm(dim) self.to_q = nn.Linear(dim, inner_dim, bias=False) self.to_kv = nn.Linear(dim if kv_dim is None else kv_dim, inner_dim * 2, bias=False) self.to_out = nn.Linear(inner_dim, dim, bias=False) def forward(self, image_embeds: torch.Tensor, latents: torch.Tensor) -> torch.Tensor: # Apply normalization image_embeds = self.norm1(image_embeds) latents = self.norm2(latents) batch_size, seq_len, _ = latents.shape # Get batch size and sequence length
1,095
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/transformers/consisid_transformer_3d.py