text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
for _ in range(num_layers): attentions.append( Transformer2DModel( in_channels // num_attention_heads, num_attention_heads, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) ) temp_attentions.append( TransformerTemporalModel( in_channels // num_attention_heads, num_attention_heads, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) resnets.append( ResnetBlock2D(
982
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( in_channels, in_channels, dropout=0.1, norm_num_groups=resnet_groups, ) )
982
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) self.attentions = nn.ModuleList(attentions) self.temp_attentions = nn.ModuleList(temp_attentions)
982
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, num_frames: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ) -> torch.Tensor: hidden_states = self.resnets[0](hidden_states, temb) hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames) for attn, temp_attn, resnet, temp_conv in zip( self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:] ): hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] hidden_states = temp_attn( hidden_states, num_frames=num_frames,
982
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames)
982
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
return hidden_states
982
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
class CrossAttnDownBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, downsample_padding: int = 1, add_downsample: bool = True, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, ): super().__init__() resnets = [] attentions = [] temp_attentions = [] temp_convs = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads
983
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ) attentions.append( Transformer2DModel(
983
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) temp_attentions.append( TransformerTemporalModel( out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs)
983
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
self.attentions = nn.ModuleList(attentions) self.temp_attentions = nn.ModuleList(temp_attentions)
983
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op", ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, num_frames: int = 1, cross_attention_kwargs: Dict[str, Any] = None, ) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]: # TODO(Patrick, William) - attention mask is not used output_states = ()
983
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
for resnet, temp_conv, attn, temp_attn in zip( self.resnets, self.temp_convs, self.attentions, self.temp_attentions ): hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] hidden_states = temp_attn( hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,)
983
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
return hidden_states, output_states
983
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
class DownBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_downsample: bool = True, downsample_padding: int = 1, ): super().__init__() resnets = [] temp_convs = []
984
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs)
984
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op", ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, num_frames: int = 1, ) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]: output_states = () for resnet, temp_conv in zip(self.resnets, self.temp_convs): hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) output_states += (hidden_states,)
984
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return hidden_states, output_states
984
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
class CrossAttnUpBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, add_upsample: bool = True, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, resolution_idx: Optional[int] = None, ): super().__init__() resnets = [] temp_convs = [] attentions = [] temp_attentions = []
985
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
self.has_cross_attention = True self.num_attention_heads = num_attention_heads for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels
985
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ) attentions.append( Transformer2DModel( out_channels // num_attention_heads, num_attention_heads,
985
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) temp_attentions.append( TransformerTemporalModel( out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) self.attentions = nn.ModuleList(attentions) self.temp_attentions = nn.ModuleList(temp_attentions)
985
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, upsample_size: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, num_frames: int = 1, cross_attention_kwargs: Dict[str, Any] = None, ) -> torch.Tensor: is_freeu_enabled = ( getattr(self, "s1", None) and getattr(self, "s2", None) and getattr(self, "b1", None) and getattr(self, "b2", None) )
985
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
# TODO(Patrick, William) - attention mask is not used for resnet, temp_conv, attn, temp_attn in zip( self.resnets, self.temp_convs, self.attentions, self.temp_attentions ): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] # FreeU: Only operate on the first two stages if is_freeu_enabled: hidden_states, res_hidden_states = apply_freeu( self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2, ) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
985
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] hidden_states = temp_attn( hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states
985
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
class UpBlock3D(nn.Module): def __init__( self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_upsample: bool = True, resolution_idx: Optional[int] = None, ): super().__init__() resnets = [] temp_convs = [] for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels
986
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs)
986
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, upsample_size: Optional[int] = None, num_frames: int = 1, ) -> torch.Tensor: is_freeu_enabled = ( getattr(self, "s1", None) and getattr(self, "s2", None) and getattr(self, "b1", None) and getattr(self, "b2", None) ) for resnet, temp_conv in zip(self.resnets, self.temp_convs): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1]
986
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
# FreeU: Only operate on the first two stages if is_freeu_enabled: hidden_states, res_hidden_states = apply_freeu( self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2, ) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states
986
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
class MidBlockTemporalDecoder(nn.Module): def __init__( self, in_channels: int, out_channels: int, attention_head_dim: int = 512, num_layers: int = 1, upcast_attention: bool = False, ): super().__init__() resnets = [] attentions = [] for i in range(num_layers): input_channels = in_channels if i == 0 else out_channels resnets.append( SpatioTemporalResBlock( in_channels=input_channels, out_channels=out_channels, temb_channels=None, eps=1e-6, temporal_eps=1e-5, merge_factor=0.0, merge_strategy="learned", switch_spatial_to_temporal_mix=True, ) )
987
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
attentions.append( Attention( query_dim=in_channels, heads=in_channels // attention_head_dim, dim_head=attention_head_dim, eps=1e-6, upcast_attention=upcast_attention, norm_num_groups=32, bias=True, residual_connection=True, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward( self, hidden_states: torch.Tensor, image_only_indicator: torch.Tensor, ): hidden_states = self.resnets[0]( hidden_states, image_only_indicator=image_only_indicator, ) for resnet, attn in zip(self.resnets[1:], self.attentions): hidden_states = attn(hidden_states) hidden_states = resnet( hidden_states, image_only_indicator=image_only_indicator, )
987
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
return hidden_states
987
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
class UpBlockTemporalDecoder(nn.Module): def __init__( self, in_channels: int, out_channels: int, num_layers: int = 1, add_upsample: bool = True, ): super().__init__() resnets = [] for i in range(num_layers): input_channels = in_channels if i == 0 else out_channels resnets.append( SpatioTemporalResBlock( in_channels=input_channels, out_channels=out_channels, temb_channels=None, eps=1e-6, temporal_eps=1e-5, merge_factor=0.0, merge_strategy="learned", switch_spatial_to_temporal_mix=True, ) ) self.resnets = nn.ModuleList(resnets)
988
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None def forward( self, hidden_states: torch.Tensor, image_only_indicator: torch.Tensor, ) -> torch.Tensor: for resnet in self.resnets: hidden_states = resnet( hidden_states, image_only_indicator=image_only_indicator, ) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states
988
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
class UNetMidBlockSpatioTemporal(nn.Module): def __init__( self, in_channels: int, temb_channels: int, num_layers: int = 1, transformer_layers_per_block: Union[int, Tuple[int]] = 1, num_attention_heads: int = 1, cross_attention_dim: int = 1280, ): super().__init__() self.has_cross_attention = True self.num_attention_heads = num_attention_heads # support for variable transformer layers per block if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers # there is always at least one resnet resnets = [ SpatioTemporalResBlock( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=1e-5, ) ] attentions = []
989
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
for i in range(num_layers): attentions.append( TransformerSpatioTemporalModel( num_attention_heads, in_channels // num_attention_heads, in_channels=in_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, ) ) resnets.append( SpatioTemporalResBlock( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=1e-5, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False
989
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, image_only_indicator: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.resnets[0]( hidden_states, temb, image_only_indicator=image_only_indicator, ) for attn, resnet in zip(self.attentions, self.resnets[1:]): if torch.is_grad_enabled() and self.gradient_checkpointing: # TODO def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward
989
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, return_dict=False, )[0] hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, image_only_indicator, **ckpt_kwargs, ) else: hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, return_dict=False, )[0] hidden_states = resnet( hidden_states,
989
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
temb, image_only_indicator=image_only_indicator, )
989
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
return hidden_states
989
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
class DownBlockSpatioTemporal(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, num_layers: int = 1, add_downsample: bool = True, ): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( SpatioTemporalResBlock( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=1e-5, ) ) self.resnets = nn.ModuleList(resnets)
990
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, name="op", ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, image_only_indicator: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: output_states = () for resnet in self.resnets: if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward
990
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
if is_torch_version(">=", "1.11.0"): hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, image_only_indicator, use_reentrant=False, ) else: hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, image_only_indicator, ) else: hidden_states = resnet( hidden_states, temb, image_only_indicator=image_only_indicator, ) output_states = output_states + (hidden_states,)
990
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return hidden_states, output_states
990
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
class CrossAttnDownBlockSpatioTemporal(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, num_layers: int = 1, transformer_layers_per_block: Union[int, Tuple[int]] = 1, num_attention_heads: int = 1, cross_attention_dim: int = 1280, add_downsample: bool = True, ): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers
991
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( SpatioTemporalResBlock( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=1e-6, ) ) attentions.append( TransformerSpatioTemporalModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets)
991
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=1, name="op", ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, image_only_indicator: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: output_states = () blocks = list(zip(self.resnets, self.attentions)) for resnet, attn in blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: # TODO
991
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, image_only_indicator, **ckpt_kwargs, )
991
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, return_dict=False, )[0] else: hidden_states = resnet( hidden_states, temb, image_only_indicator=image_only_indicator, ) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, return_dict=False, )[0] output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,)
991
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
return hidden_states, output_states
991
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
class UpBlockSpatioTemporal(nn.Module): def __init__( self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: Optional[int] = None, num_layers: int = 1, resnet_eps: float = 1e-6, add_upsample: bool = True, ): super().__init__() resnets = [] for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( SpatioTemporalResBlock( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, ) ) self.resnets = nn.ModuleList(resnets)
992
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, image_only_indicator: Optional[torch.Tensor] = None, upsample_size: Optional[int] = None, ) -> torch.Tensor: for resnet in self.resnets: # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if torch.is_grad_enabled() and self.gradient_checkpointing:
992
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward
992
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
if is_torch_version(">=", "1.11.0"): hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, image_only_indicator, use_reentrant=False, ) else: hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, image_only_indicator, ) else: hidden_states = resnet( hidden_states, temb, image_only_indicator=image_only_indicator, ) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size)
992
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
return hidden_states
992
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
class CrossAttnUpBlockSpatioTemporal(nn.Module): def __init__( self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, resolution_idx: Optional[int] = None, num_layers: int = 1, transformer_layers_per_block: Union[int, Tuple[int]] = 1, resnet_eps: float = 1e-6, num_attention_heads: int = 1, cross_attention_dim: int = 1280, add_upsample: bool = True, ): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels
993
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
resnets.append( SpatioTemporalResBlock( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, ) ) attentions.append( TransformerSpatioTemporalModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None
993
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, image_only_indicator: Optional[torch.Tensor] = None, upsample_size: Optional[int] = None, ) -> torch.Tensor: for resnet, attn in zip(self.resnets, self.attentions): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if torch.is_grad_enabled() and self.gradient_checkpointing: # TODO
993
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward
993
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, image_only_indicator, **ckpt_kwargs, ) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, return_dict=False, )[0] else: hidden_states = resnet( hidden_states, temb, image_only_indicator=image_only_indicator, ) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states,
993
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
image_only_indicator=image_only_indicator, return_dict=False, )[0]
993
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states
993
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_3d_blocks.py
class UVit2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): _supports_gradient_checkpointing = True
994
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
@register_to_config def __init__( self, # global config hidden_size: int = 1024, use_bias: bool = False, hidden_dropout: float = 0.0, # conditioning dimensions cond_embed_dim: int = 768, micro_cond_encode_dim: int = 256, micro_cond_embed_dim: int = 1280, encoder_hidden_size: int = 768, # num tokens vocab_size: int = 8256, # codebook_size + 1 (for the mask token) rounded codebook_size: int = 8192, # `UVit2DConvEmbed` in_channels: int = 768, block_out_channels: int = 768, num_res_blocks: int = 3, downsample: bool = False, upsample: bool = False, block_num_heads: int = 12, # `TransformerLayer` num_hidden_layers: int = 22, num_attention_heads: int = 16, # `Attention` attention_dropout: float = 0.0, # `FeedForward` intermediate_size: int = 2816, # `Norm`
994
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
layer_norm_eps: float = 1e-6, ln_elementwise_affine: bool = True, sample_size: int = 64, ): super().__init__()
994
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
self.encoder_proj = nn.Linear(encoder_hidden_size, hidden_size, bias=use_bias) self.encoder_proj_layer_norm = RMSNorm(hidden_size, layer_norm_eps, ln_elementwise_affine) self.embed = UVit2DConvEmbed( in_channels, block_out_channels, vocab_size, ln_elementwise_affine, layer_norm_eps, use_bias ) self.cond_embed = TimestepEmbedding( micro_cond_embed_dim + cond_embed_dim, hidden_size, sample_proj_bias=use_bias ) self.down_block = UVitBlock( block_out_channels, num_res_blocks, hidden_size, hidden_dropout, ln_elementwise_affine, layer_norm_eps, use_bias, block_num_heads, attention_dropout, downsample, False, )
994
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
self.project_to_hidden_norm = RMSNorm(block_out_channels, layer_norm_eps, ln_elementwise_affine) self.project_to_hidden = nn.Linear(block_out_channels, hidden_size, bias=use_bias)
994
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
self.transformer_layers = nn.ModuleList( [ BasicTransformerBlock( dim=hidden_size, num_attention_heads=num_attention_heads, attention_head_dim=hidden_size // num_attention_heads, dropout=hidden_dropout, cross_attention_dim=hidden_size, attention_bias=use_bias, norm_type="ada_norm_continuous", ada_norm_continous_conditioning_embedding_dim=hidden_size, norm_elementwise_affine=ln_elementwise_affine, norm_eps=layer_norm_eps, ada_norm_bias=use_bias, ff_inner_dim=intermediate_size, ff_bias=use_bias, attention_out_bias=use_bias, ) for _ in range(num_hidden_layers) ] )
994
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
self.project_from_hidden_norm = RMSNorm(hidden_size, layer_norm_eps, ln_elementwise_affine) self.project_from_hidden = nn.Linear(hidden_size, block_out_channels, bias=use_bias) self.up_block = UVitBlock( block_out_channels, num_res_blocks, hidden_size, hidden_dropout, ln_elementwise_affine, layer_norm_eps, use_bias, block_num_heads, attention_dropout, downsample=False, upsample=upsample, ) self.mlm_layer = ConvMlmLayer( block_out_channels, in_channels, use_bias, ln_elementwise_affine, layer_norm_eps, codebook_size ) self.gradient_checkpointing = False def _set_gradient_checkpointing(self, module, value: bool = False) -> None: pass
994
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
def forward(self, input_ids, encoder_hidden_states, pooled_text_emb, micro_conds, cross_attention_kwargs=None): encoder_hidden_states = self.encoder_proj(encoder_hidden_states) encoder_hidden_states = self.encoder_proj_layer_norm(encoder_hidden_states) micro_cond_embeds = get_timestep_embedding( micro_conds.flatten(), self.config.micro_cond_encode_dim, flip_sin_to_cos=True, downscale_freq_shift=0 ) micro_cond_embeds = micro_cond_embeds.reshape((input_ids.shape[0], -1)) pooled_text_emb = torch.cat([pooled_text_emb, micro_cond_embeds], dim=1) pooled_text_emb = pooled_text_emb.to(dtype=self.dtype) pooled_text_emb = self.cond_embed(pooled_text_emb).to(encoder_hidden_states.dtype) hidden_states = self.embed(input_ids)
994
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
hidden_states = self.down_block( hidden_states, pooled_text_emb=pooled_text_emb, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, ) batch_size, channels, height, width = hidden_states.shape hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch_size, height * width, channels) hidden_states = self.project_to_hidden_norm(hidden_states) hidden_states = self.project_to_hidden(hidden_states) for layer in self.transformer_layers: if torch.is_grad_enabled() and self.gradient_checkpointing: def layer_(*args): return checkpoint(layer, *args) else: layer_ = layer
994
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
hidden_states = layer_( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs={"pooled_text_emb": pooled_text_emb}, ) hidden_states = self.project_from_hidden_norm(hidden_states) hidden_states = self.project_from_hidden(hidden_states) hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2) hidden_states = self.up_block( hidden_states, pooled_text_emb=pooled_text_emb, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, ) logits = self.mlm_layer(hidden_states) return logits
994
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
@property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors
994
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys())
994
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor)
994
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor)
994
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
class UVit2DConvEmbed(nn.Module): def __init__(self, in_channels, block_out_channels, vocab_size, elementwise_affine, eps, bias): super().__init__() self.embeddings = nn.Embedding(vocab_size, in_channels) self.layer_norm = RMSNorm(in_channels, eps, elementwise_affine) self.conv = nn.Conv2d(in_channels, block_out_channels, kernel_size=1, bias=bias) def forward(self, input_ids): embeddings = self.embeddings(input_ids) embeddings = self.layer_norm(embeddings) embeddings = embeddings.permute(0, 3, 1, 2) embeddings = self.conv(embeddings) return embeddings
995
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
class UVitBlock(nn.Module): def __init__( self, channels, num_res_blocks: int, hidden_size, hidden_dropout, ln_elementwise_affine, layer_norm_eps, use_bias, block_num_heads, attention_dropout, downsample: bool, upsample: bool, ): super().__init__() if downsample: self.downsample = Downsample2D( channels, use_conv=True, padding=0, name="Conv2d_0", kernel_size=2, norm_type="rms_norm", eps=layer_norm_eps, elementwise_affine=ln_elementwise_affine, bias=use_bias, ) else: self.downsample = None
996
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
self.res_blocks = nn.ModuleList( [ ConvNextBlock( channels, layer_norm_eps, ln_elementwise_affine, use_bias, hidden_dropout, hidden_size, ) for i in range(num_res_blocks) ] ) self.attention_blocks = nn.ModuleList( [ SkipFFTransformerBlock( channels, block_num_heads, channels // block_num_heads, hidden_size, use_bias, attention_dropout, channels, attention_bias=use_bias, attention_out_bias=use_bias, ) for _ in range(num_res_blocks) ] )
996
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
if upsample: self.upsample = Upsample2D( channels, use_conv_transpose=True, kernel_size=2, padding=0, name="conv", norm_type="rms_norm", eps=layer_norm_eps, elementwise_affine=ln_elementwise_affine, bias=use_bias, interpolate=False, ) else: self.upsample = None def forward(self, x, pooled_text_emb, encoder_hidden_states, cross_attention_kwargs): if self.downsample is not None: x = self.downsample(x) for res_block, attention_block in zip(self.res_blocks, self.attention_blocks): x = res_block(x, pooled_text_emb)
996
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
batch_size, channels, height, width = x.shape x = x.view(batch_size, channels, height * width).permute(0, 2, 1) x = attention_block( x, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs ) x = x.permute(0, 2, 1).view(batch_size, channels, height, width) if self.upsample is not None: x = self.upsample(x) return x
996
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
class ConvNextBlock(nn.Module): def __init__( self, channels, layer_norm_eps, ln_elementwise_affine, use_bias, hidden_dropout, hidden_size, res_ffn_factor=4 ): super().__init__() self.depthwise = nn.Conv2d( channels, channels, kernel_size=3, padding=1, groups=channels, bias=use_bias, ) self.norm = RMSNorm(channels, layer_norm_eps, ln_elementwise_affine) self.channelwise_linear_1 = nn.Linear(channels, int(channels * res_ffn_factor), bias=use_bias) self.channelwise_act = nn.GELU() self.channelwise_norm = GlobalResponseNorm(int(channels * res_ffn_factor)) self.channelwise_linear_2 = nn.Linear(int(channels * res_ffn_factor), channels, bias=use_bias) self.channelwise_dropout = nn.Dropout(hidden_dropout) self.cond_embeds_mapper = nn.Linear(hidden_size, channels * 2, use_bias) def forward(self, x, cond_embeds): x_res = x
997
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
x = self.depthwise(x) x = x.permute(0, 2, 3, 1) x = self.norm(x) x = self.channelwise_linear_1(x) x = self.channelwise_act(x) x = self.channelwise_norm(x) x = self.channelwise_linear_2(x) x = self.channelwise_dropout(x) x = x.permute(0, 3, 1, 2) x = x + x_res scale, shift = self.cond_embeds_mapper(F.silu(cond_embeds)).chunk(2, dim=1) x = x * (1 + scale[:, :, None, None]) + shift[:, :, None, None] return x
997
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
class ConvMlmLayer(nn.Module): def __init__( self, block_out_channels: int, in_channels: int, use_bias: bool, ln_elementwise_affine: bool, layer_norm_eps: float, codebook_size: int, ): super().__init__() self.conv1 = nn.Conv2d(block_out_channels, in_channels, kernel_size=1, bias=use_bias) self.layer_norm = RMSNorm(in_channels, layer_norm_eps, ln_elementwise_affine) self.conv2 = nn.Conv2d(in_channels, codebook_size, kernel_size=1, bias=use_bias) def forward(self, hidden_states): hidden_states = self.conv1(hidden_states) hidden_states = self.layer_norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) logits = self.conv2(hidden_states) return logits
998
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/uvit_2d.py
class UNetMotionOutput(BaseOutput): """ The output of [`UNetMotionOutput`]. Args: sample (`torch.Tensor` of shape `(batch_size, num_channels, num_frames, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.Tensor
999
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
class AnimateDiffTransformer3D(nn.Module): """ A Transformer model for video-like data.
1,000
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. attention_bias (`bool`, *optional*): Configure if the `TransformerBlock` attention should contain a bias parameter. sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).
1,000
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
This is fixed during training since it is used to learn a number of position embeddings. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. See `diffusers.models.activations.get_activation` for supported activation functions. norm_elementwise_affine (`bool`, *optional*): Configure if the `TransformerBlock` should use learnable elementwise affine parameters for normalization. double_self_attention (`bool`, *optional*): Configure if each `TransformerBlock` should contain two self-attention layers. positional_embeddings: (`str`, *optional*): The type of positional embeddings to apply to the sequence input before passing use. num_positional_embeddings: (`int`, *optional*): The maximum length of the sequence over which to apply positional embeddings. """
1,000
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, activation_fn: str = "geglu", norm_elementwise_affine: bool = True, double_self_attention: bool = True, positional_embeddings: Optional[str] = None, num_positional_embeddings: Optional[int] = None, ): super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim self.in_channels = in_channels
1,000
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
self.norm = nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) self.proj_in = nn.Linear(in_channels, inner_dim) # 3. Define transformers blocks self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, attention_bias=attention_bias, double_self_attention=double_self_attention, norm_elementwise_affine=norm_elementwise_affine, positional_embeddings=positional_embeddings, num_positional_embeddings=num_positional_embeddings, ) for _ in range(num_layers) ] )
1,000
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
self.proj_out = nn.Linear(inner_dim, in_channels) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.LongTensor] = None, timestep: Optional[torch.LongTensor] = None, class_labels: Optional[torch.LongTensor] = None, num_frames: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ) -> torch.Tensor: """ The [`AnimateDiffTransformer3D`] forward method.
1,000
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
Args: hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.Tensor` of shape `(batch size, channel, height, width)` if continuous): Input hidden_states. encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in `AdaLayerZeroNorm`. num_frames (`int`, *optional*, defaults to 1):
1,000
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
The number of frames to be processed per batch. This is used to reshape the hidden states. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1,000
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
Returns: torch.Tensor: The output tensor. """ # 1. Input batch_frames, channel, height, width = hidden_states.shape batch_size = batch_frames // num_frames residual = hidden_states hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width) hidden_states = hidden_states.permute(0, 2, 1, 3, 4) hidden_states = self.norm(hidden_states) hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel) hidden_states = self.proj_in(input=hidden_states) # 2. Blocks for block in self.transformer_blocks: hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, )
1,000
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
# 3. Output hidden_states = self.proj_out(input=hidden_states) hidden_states = ( hidden_states[None, None, :] .reshape(batch_size, height, width, num_frames, channel) .permute(0, 3, 4, 1, 2) .contiguous() ) hidden_states = hidden_states.reshape(batch_frames, channel, height, width) output = hidden_states + residual return output
1,000
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
class DownBlockMotion(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_downsample: bool = True, downsample_padding: int = 1, temporal_num_attention_heads: Union[int, Tuple[int]] = 1, temporal_cross_attention_dim: Optional[int] = None, temporal_max_seq_length: int = 32, temporal_transformer_layers_per_block: Union[int, Tuple[int]] = 1, temporal_double_self_attention: bool = True, ): super().__init__() resnets = [] motion_modules = []
1,001
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
# support for variable transformer layers per temporal block if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = (temporal_transformer_layers_per_block,) * num_layers elif len(temporal_transformer_layers_per_block) != num_layers: raise ValueError( f"`temporal_transformer_layers_per_block` must be an integer or a tuple of integers of length {num_layers}" ) # support for variable number of attention head per temporal layers if isinstance(temporal_num_attention_heads, int): temporal_num_attention_heads = (temporal_num_attention_heads,) * num_layers elif len(temporal_num_attention_heads) != num_layers: raise ValueError( f"`temporal_num_attention_heads` must be an integer or a tuple of integers of length {num_layers}" )
1,001
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) motion_modules.append( AnimateDiffTransformer3D( num_attention_heads=temporal_num_attention_heads[i], in_channels=out_channels, num_layers=temporal_transformer_layers_per_block[i], norm_num_groups=resnet_groups,
1,001
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
cross_attention_dim=temporal_cross_attention_dim, attention_bias=False, activation_fn="geglu", positional_embeddings="sinusoidal", num_positional_embeddings=temporal_max_seq_length, attention_head_dim=out_channels // temporal_num_attention_heads[i], double_self_attention=temporal_double_self_attention, ) )
1,001
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
self.resnets = nn.ModuleList(resnets) self.motion_modules = nn.ModuleList(motion_modules) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op", ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False
1,001
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py
def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, num_frames: int = 1, *args, **kwargs, ) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) output_states = () blocks = zip(self.resnets, self.motion_modules) for resnet, motion_module in blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs)
1,001
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/unets/unet_motion_model.py