text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
if time_compression_ratio == 4: add_spatial_upsample = bool(i < num_spatial_upsample_layers) add_time_upsample = bool( i >= len(block_out_channels) - 1 - num_time_upsample_layers and not is_final_block ) else: raise ValueError(f"Unsupported time_compression_ratio: {time_compression_ratio}") upsample_scale_factor_HW = (2, 2) if add_spatial_upsample else (1, 1) upsample_scale_factor_T = (2,) if add_time_upsample else (1,) upsample_scale_factor = tuple(upsample_scale_factor_T + upsample_scale_factor_HW)
1,249
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
up_block = HunyuanVideoUpBlock3D( num_layers=self.layers_per_block + 1, in_channels=prev_output_channel, out_channels=output_channel, add_upsample=bool(add_spatial_upsample or add_time_upsample), upsample_scale_factor=upsample_scale_factor, resnet_eps=1e-6, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6) self.conv_act = nn.SiLU() self.conv_out = HunyuanVideoCausalConv3d(block_out_channels[0], out_channels, kernel_size=3) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.conv_in(hidden_states)
1,249
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block), hidden_states, **ckpt_kwargs ) for up_block in self.up_blocks: hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(up_block), hidden_states, **ckpt_kwargs ) else: hidden_states = self.mid_block(hidden_states)
1,249
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
for up_block in self.up_blocks: hidden_states = up_block(hidden_states) # post-process hidden_states = self.conv_norm_out(hidden_states) hidden_states = self.conv_act(hidden_states) hidden_states = self.conv_out(hidden_states) return hidden_states
1,249
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
class AutoencoderKLHunyuanVideo(ModelMixin, ConfigMixin): r""" A VAE model with KL loss for encoding videos into latents and decoding latent representations into videos. Introduced in [HunyuanVideo](https://huggingface.co/papers/2412.03603). This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). """ _supports_gradient_checkpointing = True
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
@register_to_config def __init__( self, in_channels: int = 3, out_channels: int = 3, latent_channels: int = 16, down_block_types: Tuple[str, ...] = ( "HunyuanVideoDownBlock3D", "HunyuanVideoDownBlock3D", "HunyuanVideoDownBlock3D", "HunyuanVideoDownBlock3D", ), up_block_types: Tuple[str, ...] = ( "HunyuanVideoUpBlock3D", "HunyuanVideoUpBlock3D", "HunyuanVideoUpBlock3D", "HunyuanVideoUpBlock3D", ), block_out_channels: Tuple[int] = (128, 256, 512, 512), layers_per_block: int = 2, act_fn: str = "silu", norm_num_groups: int = 32, scaling_factor: float = 0.476986, spatial_compression_ratio: int = 8, temporal_compression_ratio: int = 4, mid_block_add_attention: bool = True, ) -> None: super().__init__() self.time_compression_ratio = temporal_compression_ratio
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
self.encoder = HunyuanVideoEncoder3D( in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, norm_num_groups=norm_num_groups, act_fn=act_fn, double_z=True, mid_block_add_attention=mid_block_add_attention, temporal_compression_ratio=temporal_compression_ratio, spatial_compression_ratio=spatial_compression_ratio, )
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
self.decoder = HunyuanVideoDecoder3D( in_channels=latent_channels, out_channels=out_channels, up_block_types=up_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, norm_num_groups=norm_num_groups, act_fn=act_fn, time_compression_ratio=temporal_compression_ratio, spatial_compression_ratio=spatial_compression_ratio, mid_block_add_attention=mid_block_add_attention, ) self.quant_conv = nn.Conv3d(2 * latent_channels, 2 * latent_channels, kernel_size=1) self.post_quant_conv = nn.Conv3d(latent_channels, latent_channels, kernel_size=1) self.spatial_compression_ratio = spatial_compression_ratio self.temporal_compression_ratio = temporal_compression_ratio
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
# When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension # to perform decoding of a single video latent at a time. self.use_slicing = False # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the # intermediate tiles together, the memory requirement can be lowered. self.use_tiling = False # When decoding temporally long video latents, the memory requirement is very high. By decoding latent frames # at a fixed frame batch size (based on `self.num_latent_frames_batch_sizes`), the memory requirement can be lowered. self.use_framewise_encoding = True self.use_framewise_decoding = True
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
# The minimal tile height and width for spatial tiling to be used self.tile_sample_min_height = 256 self.tile_sample_min_width = 256 self.tile_sample_min_num_frames = 16 # The minimal distance between two spatial tiles self.tile_sample_stride_height = 192 self.tile_sample_stride_width = 192 self.tile_sample_stride_num_frames = 12 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (HunyuanVideoEncoder3D, HunyuanVideoDecoder3D)): module.gradient_checkpointing = value
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
def enable_tiling( self, tile_sample_min_height: Optional[int] = None, tile_sample_min_width: Optional[int] = None, tile_sample_min_num_frames: Optional[int] = None, tile_sample_stride_height: Optional[float] = None, tile_sample_stride_width: Optional[float] = None, tile_sample_stride_num_frames: Optional[float] = None, ) -> None: r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images.
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
Args: tile_sample_min_height (`int`, *optional*): The minimum height required for a sample to be separated into tiles across the height dimension. tile_sample_min_width (`int`, *optional*): The minimum width required for a sample to be separated into tiles across the width dimension. tile_sample_min_num_frames (`int`, *optional*): The minimum number of frames required for a sample to be separated into tiles across the frame dimension. tile_sample_stride_height (`int`, *optional*): The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are no tiling artifacts produced across the height dimension. tile_sample_stride_width (`int`, *optional*): The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
artifacts produced across the width dimension. tile_sample_stride_num_frames (`int`, *optional*): The stride between two consecutive frame tiles. This is to ensure that there are no tiling artifacts produced across the frame dimension. """ self.use_tiling = True self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width self.tile_sample_min_num_frames = tile_sample_min_num_frames or self.tile_sample_min_num_frames self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width self.tile_sample_stride_num_frames = tile_sample_stride_num_frames or self.tile_sample_stride_num_frames
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
def disable_tiling(self) -> None: r""" Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.use_tiling = False def enable_slicing(self) -> None: r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.use_slicing = True def disable_slicing(self) -> None: r""" Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.use_slicing = False def _encode(self, x: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = x.shape
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
if self.use_framewise_decoding and num_frames > self.tile_sample_min_num_frames: return self._temporal_tiled_encode(x) if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height): return self.tiled_encode(x) x = self.encoder(x) enc = self.quant_conv(x) return enc @apply_forward_hook def encode( self, x: torch.Tensor, return_dict: bool = True ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: r""" Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
Returns: The latent representations of the encoded videos. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and x.shape[0] > 1: encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self._encode(x) posterior = DiagonalGaussianDistribution(h) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior)
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: batch_size, num_channels, num_frames, height, width = z.shape tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_stride_width // self.spatial_compression_ratio tile_latent_min_num_frames = self.tile_sample_min_num_frames // self.temporal_compression_ratio if self.use_framewise_decoding and num_frames > tile_latent_min_num_frames: return self._temporal_tiled_decode(z, return_dict=return_dict) if self.use_tiling and (width > tile_latent_min_width or height > tile_latent_min_height): return self.tiled_decode(z, return_dict=return_dict) z = self.post_quant_conv(z) dec = self.decoder(z) if not return_dict: return (dec,) return DecoderOutput(sample=dec)
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
@apply_forward_hook def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: r""" Decode a batch of images. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z).sample if not return_dict: return (decoded,) return DecoderOutput(sample=decoded)
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[-2], b.shape[-2], blend_extent) for y in range(blend_extent): b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * ( y / blend_extent ) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[-1], b.shape[-1], blend_extent) for x in range(blend_extent): b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * ( x / blend_extent ) return b
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
def blend_t(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[-3], b.shape[-3], blend_extent) for x in range(blend_extent): b[:, :, x, :, :] = a[:, :, -blend_extent + x, :, :] * (1 - x / blend_extent) + b[:, :, x, :, :] * ( x / blend_extent ) return b def tiled_encode(self, x: torch.Tensor) -> AutoencoderKLOutput: r"""Encode a batch of images using a tiled encoder. Args: x (`torch.Tensor`): Input batch of videos. Returns: `torch.Tensor`: The latent representation of the encoded videos. """ batch_size, num_channels, num_frames, height, width = x.shape latent_height = height // self.spatial_compression_ratio latent_width = width // self.spatial_compression_ratio
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio blend_height = tile_latent_min_height - tile_latent_stride_height blend_width = tile_latent_min_width - tile_latent_stride_width
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
# Split x into overlapping tiles and encode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, height, self.tile_sample_stride_height): row = [] for j in range(0, width, self.tile_sample_stride_width): tile = x[:, :, :, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width] tile = self.encoder(tile) tile = self.quant_conv(tile) row.append(tile) rows.append(row)
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, :tile_latent_stride_height, :tile_latent_stride_width]) result_rows.append(torch.cat(result_row, dim=4)) enc = torch.cat(result_rows, dim=3)[:, :, :, :latent_height, :latent_width] return enc def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: r""" Decode a batch of images using a tiled decoder.
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ batch_size, num_channels, num_frames, height, width = z.shape sample_height = height * self.spatial_compression_ratio sample_width = width * self.spatial_compression_ratio
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio blend_height = self.tile_sample_min_height - self.tile_sample_stride_height blend_width = self.tile_sample_min_width - self.tile_sample_stride_width
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
# Split z into overlapping tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, height, tile_latent_stride_height): row = [] for j in range(0, width, tile_latent_stride_width): tile = z[:, :, :, i : i + tile_latent_min_height, j : j + tile_latent_min_width] tile = self.post_quant_conv(tile) decoded = self.decoder(tile) row.append(decoded) rows.append(row)
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, : self.tile_sample_stride_height, : self.tile_sample_stride_width]) result_rows.append(torch.cat(result_row, dim=-1)) dec = torch.cat(result_rows, dim=3)[:, :, :, :sample_height, :sample_width] if not return_dict: return (dec,) return DecoderOutput(sample=dec)
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
def _temporal_tiled_encode(self, x: torch.Tensor) -> AutoencoderKLOutput: batch_size, num_channels, num_frames, height, width = x.shape latent_num_frames = (num_frames - 1) // self.temporal_compression_ratio + 1 tile_latent_min_num_frames = self.tile_sample_min_num_frames // self.temporal_compression_ratio tile_latent_stride_num_frames = self.tile_sample_stride_num_frames // self.temporal_compression_ratio blend_num_frames = tile_latent_min_num_frames - tile_latent_stride_num_frames
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
row = [] for i in range(0, num_frames, self.tile_sample_stride_num_frames): tile = x[:, :, i : i + self.tile_sample_min_num_frames + 1, :, :] if self.use_tiling and (height > self.tile_sample_min_height or width > self.tile_sample_min_width): tile = self.tiled_encode(tile) else: tile = self.encoder(tile) tile = self.quant_conv(tile) if i > 0: tile = tile[:, :, 1:, :, :] row.append(tile) result_row = [] for i, tile in enumerate(row): if i > 0: tile = self.blend_t(row[i - 1], tile, blend_num_frames) result_row.append(tile[:, :, :tile_latent_stride_num_frames, :, :]) else: result_row.append(tile[:, :, : tile_latent_stride_num_frames + 1, :, :]) enc = torch.cat(result_row, dim=2)[:, :, :latent_num_frames] return enc
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
def _temporal_tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: batch_size, num_channels, num_frames, height, width = z.shape num_sample_frames = (num_frames - 1) * self.temporal_compression_ratio + 1 tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_min_num_frames = self.tile_sample_min_num_frames // self.temporal_compression_ratio tile_latent_stride_num_frames = self.tile_sample_stride_num_frames // self.temporal_compression_ratio blend_num_frames = self.tile_sample_min_num_frames - self.tile_sample_stride_num_frames
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
row = [] for i in range(0, num_frames, tile_latent_stride_num_frames): tile = z[:, :, i : i + tile_latent_min_num_frames + 1, :, :] if self.use_tiling and (tile.shape[-1] > tile_latent_min_width or tile.shape[-2] > tile_latent_min_height): decoded = self.tiled_decode(tile, return_dict=True).sample else: tile = self.post_quant_conv(tile) decoded = self.decoder(tile) if i > 0: decoded = decoded[:, :, 1:, :, :] row.append(decoded) result_row = [] for i, tile in enumerate(row): if i > 0: tile = self.blend_t(row[i - 1], tile, blend_num_frames) result_row.append(tile[:, :, : self.tile_sample_stride_num_frames, :, :]) else: result_row.append(tile[:, :, : self.tile_sample_stride_num_frames + 1, :, :]) dec = torch.cat(result_row, dim=2)[:, :, :num_sample_frames]
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
if not return_dict: return (dec,) return DecoderOutput(sample=dec) def forward( self, sample: torch.Tensor, sample_posterior: bool = False, return_dict: bool = True, generator: Optional[torch.Generator] = None, ) -> Union[DecoderOutput, torch.Tensor]: r""" Args: sample (`torch.Tensor`): Input sample. sample_posterior (`bool`, *optional*, defaults to `False`): Whether to sample from the posterior. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z, return_dict=return_dict) return dec
1,250
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py
class EnvironmentCommand(BaseDiffusersCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser) -> None: download_parser = parser.add_parser("env") download_parser.set_defaults(func=info_command_factory) def run(self) -> dict: hub_version = huggingface_hub.__version__ safetensors_version = "not installed" if is_safetensors_available(): import safetensors safetensors_version = safetensors.__version__ pt_version = "not installed" pt_cuda_available = "NA" if is_torch_available(): import torch pt_version = torch.__version__ pt_cuda_available = torch.cuda.is_available() flax_version = "not installed" jax_version = "not installed" jaxlib_version = "not installed" jax_backend = "NA" if is_flax_available(): import flax import jax import jaxlib
1,251
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/commands/env.py
flax_version = flax.__version__ jax_version = jax.__version__ jaxlib_version = jaxlib.__version__ jax_backend = jax.lib.xla_bridge.get_backend().platform transformers_version = "not installed" if is_transformers_available(): import transformers transformers_version = transformers.__version__ accelerate_version = "not installed" if is_accelerate_available(): import accelerate accelerate_version = accelerate.__version__ peft_version = "not installed" if is_peft_available(): import peft peft_version = peft.__version__ bitsandbytes_version = "not installed" if is_bitsandbytes_available(): import bitsandbytes bitsandbytes_version = bitsandbytes.__version__ xformers_version = "not installed" if is_xformers_available(): import xformers
1,251
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/commands/env.py
xformers_version = xformers.__version__ platform_info = platform.platform() is_google_colab_str = "Yes" if is_google_colab() else "No" accelerator = "NA" if platform.system() in {"Linux", "Windows"}: try: sp = subprocess.Popen( ["nvidia-smi", "--query-gpu=gpu_name,memory.total", "--format=csv,noheader"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) out_str, _ = sp.communicate() out_str = out_str.decode("utf-8")
1,251
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/commands/env.py
if len(out_str) > 0: accelerator = out_str.strip() except FileNotFoundError: pass elif platform.system() == "Darwin": # Mac OS try: sp = subprocess.Popen( ["system_profiler", "SPDisplaysDataType"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) out_str, _ = sp.communicate() out_str = out_str.decode("utf-8") start = out_str.find("Chipset Model:") if start != -1: start += len("Chipset Model:") end = out_str.find("\n", start) accelerator = out_str[start:end].strip()
1,251
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/commands/env.py
start = out_str.find("VRAM (Total):") if start != -1: start += len("VRAM (Total):") end = out_str.find("\n", start) accelerator += " VRAM: " + out_str[start:end].strip() except FileNotFoundError: pass else: print("It seems you are running an unusual OS. Could you fill in the accelerator manually?")
1,251
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/commands/env.py
info = { "🤗 Diffusers version": version, "Platform": platform_info, "Running on Google Colab?": is_google_colab_str, "Python version": platform.python_version(), "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", "Flax version (CPU?/GPU?/TPU?)": f"{flax_version} ({jax_backend})", "Jax version": jax_version, "JaxLib version": jaxlib_version, "Huggingface_hub version": hub_version, "Transformers version": transformers_version, "Accelerate version": accelerate_version, "PEFT version": peft_version, "Bitsandbytes version": bitsandbytes_version, "Safetensors version": safetensors_version, "xFormers version": xformers_version, "Accelerator": accelerator, "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", }
1,251
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/commands/env.py
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n") print(self.format_dict(info)) return info @staticmethod def format_dict(d: dict) -> str: return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
1,251
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/commands/env.py
class BaseDiffusersCLICommand(ABC): @staticmethod @abstractmethod def register_subcommand(parser: ArgumentParser): raise NotImplementedError() @abstractmethod def run(self): raise NotImplementedError()
1,252
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/commands/__init__.py
class FP16SafetensorsCommand(BaseDiffusersCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): conversion_parser = parser.add_parser("fp16_safetensors") conversion_parser.add_argument( "--ckpt_id", type=str, help="Repo id of the checkpoints on which to run the conversion. Example: 'openai/shap-e'.", ) conversion_parser.add_argument( "--fp16", action="store_true", help="If serializing the variables in FP16 precision." ) conversion_parser.add_argument( "--use_safetensors", action="store_true", help="If serializing in the safetensors format." ) conversion_parser.add_argument( "--use_auth_token", action="store_true", help="When working with checkpoints having private visibility. When used `huggingface-cli login` needs to be run beforehand.", ) conversion_parser.set_defaults(func=conversion_command_factory)
1,253
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/commands/fp16_safetensors.py
def __init__(self, ckpt_id: str, fp16: bool, use_safetensors: bool): self.logger = logging.get_logger("diffusers-cli/fp16_safetensors") self.ckpt_id = ckpt_id self.local_ckpt_dir = f"/tmp/{ckpt_id}" self.fp16 = fp16 self.use_safetensors = use_safetensors if not self.use_safetensors and not self.fp16: raise NotImplementedError( "When `use_safetensors` and `fp16` both are False, then this command is of no use." ) def run(self): if version.parse(huggingface_hub.__version__) < version.parse("0.9.0"): raise ImportError( "The huggingface_hub version must be >= 0.9.0 to use this command. Please update your huggingface_hub" " installation." ) else: from huggingface_hub import create_commit from huggingface_hub._commit_api import CommitOperationAdd
1,253
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/commands/fp16_safetensors.py
model_index = hf_hub_download(repo_id=self.ckpt_id, filename="model_index.json") with open(model_index, "r") as f: pipeline_class_name = json.load(f)["_class_name"] pipeline_class = getattr(import_module("diffusers"), pipeline_class_name) self.logger.info(f"Pipeline class imported: {pipeline_class_name}.") # Load the appropriate pipeline. We could have use `DiffusionPipeline` # here, but just to avoid any rough edge cases. pipeline = pipeline_class.from_pretrained( self.ckpt_id, torch_dtype=torch.float16 if self.fp16 else torch.float32 ) pipeline.save_pretrained( self.local_ckpt_dir, safe_serialization=True if self.use_safetensors else False, variant="fp16" if self.fp16 else None, ) self.logger.info(f"Pipeline locally saved to {self.local_ckpt_dir}.")
1,253
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/commands/fp16_safetensors.py
# Fetch all the paths. if self.fp16: modified_paths = glob.glob(f"{self.local_ckpt_dir}/*/*.fp16.*") elif self.use_safetensors: modified_paths = glob.glob(f"{self.local_ckpt_dir}/*/*.safetensors") # Prepare for the PR. commit_message = f"Serialize variables with FP16: {self.fp16} and safetensors: {self.use_safetensors}." operations = [] for path in modified_paths: operations.append(CommitOperationAdd(path_in_repo="/".join(path.split("/")[4:]), path_or_fileobj=path))
1,253
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/commands/fp16_safetensors.py
# Open the PR. commit_description = ( "Variables converted by the [`diffusers`' `fp16_safetensors`" " CLI](https://github.com/huggingface/diffusers/blob/main/src/diffusers/commands/fp16_safetensors.py)." ) hub_pr_url = create_commit( repo_id=self.ckpt_id, operations=operations, commit_message=commit_message, commit_description=commit_description, repo_type="model", create_pr=True, ).pr_url self.logger.info(f"PR created here: {hub_pr_url}.")
1,253
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/commands/fp16_safetensors.py
class SD3Transformer2DLoadersMixin: """Load IP-Adapters and LoRA layers into a `[SD3Transformer2DModel]`.""" def _load_ip_adapter_weights(self, state_dict: Dict, low_cpu_mem_usage: bool = _LOW_CPU_MEM_USAGE_DEFAULT) -> None: """Sets IP-Adapter attention processors, image projection, and loads state_dict.
1,254
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/transformer_sd3.py
Args: state_dict (`Dict`): State dict with keys "ip_adapter", which contains parameters for attention processors, and "image_proj", which contains parameters for image projection net. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. """ # IP-Adapter cross attention parameters hidden_size = self.config.attention_head_dim * self.config.num_attention_heads ip_hidden_states_dim = self.config.attention_head_dim * self.config.num_attention_heads
1,254
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/transformer_sd3.py
timesteps_emb_dim = state_dict["ip_adapter"]["0.norm_ip.linear.weight"].shape[1]
1,254
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/transformer_sd3.py
# Dict where key is transformer layer index, value is attention processor's state dict # ip_adapter state dict keys example: "0.norm_ip.linear.weight" layer_state_dict = {idx: {} for idx in range(len(self.attn_processors))} for key, weights in state_dict["ip_adapter"].items(): idx, name = key.split(".", maxsplit=1) layer_state_dict[int(idx)][name] = weights # Create IP-Adapter attention processor attn_procs = {} for idx, name in enumerate(self.attn_processors.keys()): attn_procs[name] = SD3IPAdapterJointAttnProcessor2_0( hidden_size=hidden_size, ip_hidden_states_dim=ip_hidden_states_dim, head_dim=self.config.attention_head_dim, timesteps_emb_dim=timesteps_emb_dim, ).to(self.device, dtype=self.dtype)
1,254
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/transformer_sd3.py
if not low_cpu_mem_usage: attn_procs[name].load_state_dict(layer_state_dict[idx], strict=True) else: load_model_dict_into_meta( attn_procs[name], layer_state_dict[idx], device=self.device, dtype=self.dtype ) self.set_attn_processor(attn_procs) # Image projetion parameters embed_dim = state_dict["image_proj"]["proj_in.weight"].shape[1] output_dim = state_dict["image_proj"]["proj_out.weight"].shape[0] hidden_dim = state_dict["image_proj"]["proj_in.weight"].shape[0] heads = state_dict["image_proj"]["layers.0.attn.to_q.weight"].shape[0] // 64 num_queries = state_dict["image_proj"]["latents"].shape[1] timestep_in_dim = state_dict["image_proj"]["time_embedding.linear_1.weight"].shape[1]
1,254
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/transformer_sd3.py
# Image projection self.image_proj = IPAdapterTimeImageProjection( embed_dim=embed_dim, output_dim=output_dim, hidden_dim=hidden_dim, heads=heads, num_queries=num_queries, timestep_in_dim=timestep_in_dim, ).to(device=self.device, dtype=self.dtype) if not low_cpu_mem_usage: self.image_proj.load_state_dict(state_dict["image_proj"], strict=True) else: load_model_dict_into_meta(self.image_proj, state_dict["image_proj"], device=self.device, dtype=self.dtype)
1,254
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/transformer_sd3.py
class FromSingleFileMixin: """ Load model weights saved in the `.ckpt` format into a [`DiffusionPipeline`]. """ @classmethod @validate_hf_hub_args def from_single_file(cls, pretrained_model_link_or_path, **kwargs): r""" Instantiate a [`DiffusionPipeline`] from pretrained pipeline weights saved in the `.ckpt` or `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default.
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
Parameters: pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A link to the `.ckpt` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub. - A path to a *file* containing all pipeline weights. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used.
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. original_config_file (`str`, *optional*):
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
The path to the original config file that was used to train the model. If not provided, the config file will be inferred from the checkpoint file. config (`str`, *optional*): Can be either: - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing the pipeline component configs in Diffusers format. disable_mmap ('bool', *optional*, defaults to 'False'): Whether to disable mmap when loading a Safetensors model. This option can perform better when the model is on a network mount or hard drive. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information.
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
Examples: ```py >>> from diffusers import StableDiffusionPipeline >>> # Download pipeline from huggingface.co and cache. >>> pipeline = StableDiffusionPipeline.from_single_file( ... "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors" ... ) >>> # Download pipeline from local file >>> # file is downloaded under ./v1-5-pruned-emaonly.ckpt >>> pipeline = StableDiffusionPipeline.from_single_file("./v1-5-pruned-emaonly.ckpt") >>> # Enable float16 and move to GPU >>> pipeline = StableDiffusionPipeline.from_single_file( ... "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt", ... torch_dtype=torch.float16, ... ) >>> pipeline.to("cuda") ```
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
""" original_config_file = kwargs.pop("original_config_file", None) config = kwargs.pop("config", None) original_config = kwargs.pop("original_config", None) if original_config_file is not None: deprecation_message = ( "`original_config_file` argument is deprecated and will be removed in future versions." "please use the `original_config` argument instead." ) deprecate("original_config_file", "1.0.0", deprecation_message) original_config = original_config_file force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) cache_dir = kwargs.pop("cache_dir", None) local_files_only = kwargs.pop("local_files_only", False) revision = kwargs.pop("revision", None) torch_dtype = kwargs.pop("torch_dtype", None) disable_mmap = kwargs.pop("disable_mmap", False)
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
is_legacy_loading = False # We shouldn't allow configuring individual models components through a Pipeline creation method # These model kwargs should be deprecated scaling_factor = kwargs.get("scaling_factor", None) if scaling_factor is not None: deprecation_message = ( "Passing the `scaling_factor` argument to `from_single_file is deprecated " "and will be ignored in future versions." ) deprecate("scaling_factor", "1.0.0", deprecation_message) if original_config is not None: original_config = fetch_original_config(original_config, local_files_only=local_files_only) from ..pipelines.pipeline_utils import _get_pipeline_class pipeline_class = _get_pipeline_class(cls, config=None)
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
checkpoint = load_single_file_checkpoint( pretrained_model_link_or_path, force_download=force_download, proxies=proxies, token=token, cache_dir=cache_dir, local_files_only=local_files_only, revision=revision, disable_mmap=disable_mmap, ) if config is None: config = fetch_diffusers_config(checkpoint) default_pretrained_model_config_name = config["pretrained_model_name_or_path"] else: default_pretrained_model_config_name = config
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
if not os.path.isdir(default_pretrained_model_config_name): # Provided config is a repo_id if default_pretrained_model_config_name.count("/") > 1: raise ValueError( f'The provided config "{config}"' " is neither a valid local path nor a valid repo id. Please check the parameter." ) try: # Attempt to download the config files for the pipeline cached_model_config_path = _download_diffusers_model_config_from_hub( default_pretrained_model_config_name, cache_dir=cache_dir, revision=revision, proxies=proxies, force_download=force_download, local_files_only=local_files_only, token=token, ) config_dict = pipeline_class.load_config(cached_model_config_path)
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
except LocalEntryNotFoundError: # `local_files_only=True` but a local diffusers format model config is not available in the cache # If `original_config` is not provided, we need override `local_files_only` to False # to fetch the config files from the hub so that we have a way # to configure the pipeline components.
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
if original_config is None: logger.warning( "`local_files_only` is True but no local configs were found for this checkpoint.\n" "Attempting to download the necessary config files for this pipeline.\n" ) cached_model_config_path = _download_diffusers_model_config_from_hub( default_pretrained_model_config_name, cache_dir=cache_dir, revision=revision, proxies=proxies, force_download=force_download, local_files_only=False, token=token, ) config_dict = pipeline_class.load_config(cached_model_config_path)
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
else: # For backwards compatibility # If `original_config` is provided, then we need to assume we are using legacy loading for pipeline components logger.warning( "Detected legacy `from_single_file` loading behavior. Attempting to create the pipeline based on inferred components.\n" "This may lead to errors if the model components are not correctly inferred. \n" "To avoid this warning, please explicity pass the `config` argument to `from_single_file` with a path to a local diffusers model repo \n" "e.g. `from_single_file(<my model checkpoint path>, config=<path to local diffusers model repo>) \n" "or run `from_single_file` with `local_files_only=False` first to update the local cache directory with " "the necessary config files.\n" )
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
is_legacy_loading = True cached_model_config_path = None
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
config_dict = _infer_pipeline_config_dict(pipeline_class) config_dict["_class_name"] = pipeline_class.__name__ else: # Provided config is a path to a local directory attempt to load directly. cached_model_config_path = default_pretrained_model_config_name config_dict = pipeline_class.load_config(cached_model_config_path) # pop out "_ignore_files" as it is only needed for download config_dict.pop("_ignore_files", None) expected_modules, optional_kwargs = pipeline_class._get_signature_keys(cls) passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs}
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs) init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if k in init_dict} init_kwargs = {**init_kwargs, **passed_pipe_kwargs} from diffusers import pipelines # remove `null` components def load_module(name, value): if value[0] is None: return False if name in passed_class_obj and passed_class_obj[name] is None: return False if name in SINGLE_FILE_OPTIONAL_COMPONENTS: return False return True init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)} for name, (library_name, class_name) in logging.tqdm( sorted(init_dict.items()), desc="Loading pipeline components..." ): loaded_sub_model = None is_pipeline_module = hasattr(pipelines, library_name)
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
if name in passed_class_obj: loaded_sub_model = passed_class_obj[name]
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
else: try: loaded_sub_model = load_single_file_sub_model( library_name=library_name, class_name=class_name, name=name, checkpoint=checkpoint, is_pipeline_module=is_pipeline_module, cached_model_config_path=cached_model_config_path, pipelines=pipelines, torch_dtype=torch_dtype, original_config=original_config, local_files_only=local_files_only, is_legacy_loading=is_legacy_loading, disable_mmap=disable_mmap, **kwargs, ) except SingleFileComponentError as e: raise SingleFileComponentError( ( f"{e.message}\n"
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
f"Please load the component before passing it in as an argument to `from_single_file`.\n" f"\n" f"{name} = {class_name}.from_pretrained('...')\n" f"pipe = {pipeline_class.__name__}.from_single_file(<checkpoint path>, {name}={name})\n" f"\n" ) )
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
init_kwargs[name] = loaded_sub_model missing_modules = set(expected_modules) - set(init_kwargs.keys()) passed_modules = list(passed_class_obj.keys()) optional_modules = pipeline_class._optional_components if len(missing_modules) > 0 and missing_modules <= set(passed_modules + optional_modules): for module in missing_modules: init_kwargs[module] = passed_class_obj.get(module, None) elif len(missing_modules) > 0: passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs raise ValueError( f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed." )
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
# deprecated kwargs load_safety_checker = kwargs.pop("load_safety_checker", None) if load_safety_checker is not None: deprecation_message = ( "Please pass instances of `StableDiffusionSafetyChecker` and `AutoImageProcessor`" "using the `safety_checker` and `feature_extractor` arguments in `from_single_file`" ) deprecate("load_safety_checker", "1.0.0", deprecation_message) safety_checker_components = _legacy_load_safety_checker(local_files_only, torch_dtype) init_kwargs.update(safety_checker_components) pipe = pipeline_class(**init_kwargs) return pipe
1,255
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/single_file.py
class PeftAdapterMixin: """ A class containing all functions for loading and using adapters weights that are supported in PEFT library. For more details about adapters and injecting them in a base model, check out the PEFT [documentation](https://huggingface.co/docs/peft/index). Install the latest version of PEFT, and use this mixin to: - Attach new adapters in the model. - Attach multiple adapters and iteratively activate/deactivate them. - Activate/deactivate all adapters from the model. - Get a list of the active adapters. """ _hf_peft_config_loaded = False @classmethod # Copied from diffusers.loaders.lora_base.LoraBaseMixin._optionally_disable_offloading def _optionally_disable_offloading(cls, _pipeline): """ Optionally removes offloading in case the pipeline has been already sequentially offloaded to CPU. Args: _pipeline (`DiffusionPipeline`): The pipeline to disable offloading for.
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
Returns: tuple: A tuple indicating if `is_model_cpu_offload` or `is_sequential_cpu_offload` is True. """ return _func_optionally_disable_offloading(_pipeline=_pipeline) def load_lora_adapter(self, pretrained_model_name_or_path_or_dict, prefix="transformer", **kwargs): r""" Loads a LoRA adapter into the underlying model. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
prefix (`str`, *optional*): Prefix to filter the state dict.
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*):
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. network_alphas (`Dict[str, float]`): The value of the network alpha used for stable learning and preventing underflow. This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning).
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. """ from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict from peft.tuners.tuners_utils import BaseTunerLayer
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) adapter_name = kwargs.pop("adapter_name", None) network_alphas = kwargs.pop("network_alphas", None) _pipeline = kwargs.pop("_pipeline", None) low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", False) allow_pickle = False if low_cpu_mem_usage and is_peft_version("<=", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." )
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
user_agent = { "file_type": "attn_procs_weights", "framework": "pytorch", } state_dict = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) if network_alphas is not None and prefix is None: raise ValueError("`network_alphas` cannot be None when `prefix` is None.")
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
if prefix is not None: keys = list(state_dict.keys()) model_keys = [k for k in keys if k.startswith(f"{prefix}.")] if len(model_keys) > 0: state_dict = {k.replace(f"{prefix}.", ""): v for k, v in state_dict.items() if k in model_keys} if len(state_dict) > 0: if adapter_name in getattr(self, "peft_config", {}): raise ValueError( f"Adapter name {adapter_name} already in use in the model - please select a new adapter name." ) # check with first key if is not in peft format first_key = next(iter(state_dict.keys())) if "lora_A" not in first_key: state_dict = convert_unet_state_dict_to_peft(state_dict)
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
rank = {} for key, val in state_dict.items(): # Cannot figure out rank from lora layers that don't have atleast 2 dimensions. # Bias layers in LoRA only have a single dimension if "lora_B" in key and val.ndim > 1: rank[key] = val.shape[1] if network_alphas is not None and len(network_alphas) >= 1: alpha_keys = [k for k in network_alphas.keys() if k.startswith(f"{prefix}.")] network_alphas = {k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys} lora_config_kwargs = get_peft_kwargs(rank, network_alpha_dict=network_alphas, peft_state_dict=state_dict) lora_config_kwargs = _maybe_adjust_config(lora_config_kwargs)
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
if "use_dora" in lora_config_kwargs: if lora_config_kwargs["use_dora"]: if is_peft_version("<", "0.9.0"): raise ValueError( "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." ) else: if is_peft_version("<", "0.9.0"): lora_config_kwargs.pop("use_dora") if "lora_bias" in lora_config_kwargs: if lora_config_kwargs["lora_bias"]: if is_peft_version("<=", "0.13.2"): raise ValueError( "You need `peft` 0.14.0 at least to use `lora_bias` in LoRAs. Please upgrade your installation of `peft`." ) else: if is_peft_version("<=", "0.13.2"): lora_config_kwargs.pop("lora_bias")
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
lora_config = LoraConfig(**lora_config_kwargs) # adapter_name if adapter_name is None: adapter_name = get_adapter_name(self) # <Unsafe code # We can be sure that the following works as it just sets attention processors, lora layers and puts all in the same dtype # Now we remove any existing hooks to `_pipeline`. # In case the pipeline has been already offloaded to CPU - temporarily remove the hooks # otherwise loading LoRA weights will lead to an error is_model_cpu_offload, is_sequential_cpu_offload = self._optionally_disable_offloading(_pipeline) peft_kwargs = {} if is_peft_version(">=", "0.13.1"): peft_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
# To handle scenarios where we cannot successfully set state dict. If it's unsucessful, # we should also delete the `peft_config` associated to the `adapter_name`. try: inject_adapter_in_model(lora_config, self, adapter_name=adapter_name, **peft_kwargs) incompatible_keys = set_peft_model_state_dict(self, state_dict, adapter_name, **peft_kwargs) except Exception as e: # In case `inject_adapter_in_model()` was unsuccessful even before injecting the `peft_config`. if hasattr(self, "peft_config"): for module in self.modules(): if isinstance(module, BaseTunerLayer): active_adapters = module.active_adapters for active_adapter in active_adapters: if adapter_name in active_adapter: module.delete_adapter(adapter_name)
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
self.peft_config.pop(adapter_name) logger.error(f"Loading {adapter_name} was unsucessful with the following error: \n{e}") raise warn_msg = "" if incompatible_keys is not None: # Check only for unexpected keys. unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) if unexpected_keys: lora_unexpected_keys = [k for k in unexpected_keys if "lora_" in k and adapter_name in k] if lora_unexpected_keys: warn_msg = ( f"Loading adapter weights from state_dict led to unexpected keys found in the model:" f" {', '.join(lora_unexpected_keys)}. " )
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
# Filter missing keys specific to the current adapter. missing_keys = getattr(incompatible_keys, "missing_keys", None) if missing_keys: lora_missing_keys = [k for k in missing_keys if "lora_" in k and adapter_name in k] if lora_missing_keys: warn_msg += ( f"Loading adapter weights from state_dict led to missing keys in the model:" f" {', '.join(lora_missing_keys)}." ) if warn_msg: logger.warning(warn_msg) # Offload back. if is_model_cpu_offload: _pipeline.enable_model_cpu_offload() elif is_sequential_cpu_offload: _pipeline.enable_sequential_cpu_offload() # Unsafe code />
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
def save_lora_adapter( self, save_directory, adapter_name: str = "default", upcast_before_saving: bool = False, safe_serialization: bool = True, weight_name: Optional[str] = None, ): """ Save the LoRA parameters corresponding to the underlying model.
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. adapter_name: (`str`, defaults to "default"): The name of the adapter to serialize. Useful when the underlying model has multiple adapters loaded. upcast_before_saving (`bool`, defaults to `False`): Whether to cast the underlying model to `torch.float32` before serialization. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
weight_name: (`str`, *optional*, defaults to `None`): Name of the file to serialize the state dict with. """ from peft.utils import get_peft_model_state_dict
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
from .lora_base import LORA_WEIGHT_NAME, LORA_WEIGHT_NAME_SAFE if adapter_name is None: adapter_name = get_adapter_name(self) if adapter_name not in getattr(self, "peft_config", {}): raise ValueError(f"Adapter name {adapter_name} not found in the model.") lora_layers_to_save = get_peft_model_state_dict( self.to(dtype=torch.float32 if upcast_before_saving else None), adapter_name=adapter_name ) if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") if safe_serialization: def save_function(weights, filename): return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"}) else: save_function = torch.save os.makedirs(save_directory, exist_ok=True)
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
if weight_name is None: if safe_serialization: weight_name = LORA_WEIGHT_NAME_SAFE else: weight_name = LORA_WEIGHT_NAME # TODO: we could consider saving the `peft_config` as well. save_path = Path(save_directory, weight_name).as_posix() save_function(lora_layers_to_save, save_path) logger.info(f"Model weights saved in {save_path}") def set_adapters( self, adapter_names: Union[List[str], str], weights: Optional[Union[float, Dict, List[float], List[Dict], List[None]]] = None, ): """ Set the currently active adapters for use in the UNet. Args: adapter_names (`List[str]` or `str`): The names of the adapters to use. adapter_weights (`Union[List[float], float]`, *optional*): The adapter(s) weights to use with the UNet. If `None`, the weights are set to `1.0` for all the adapters.
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
Example: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic" ) pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.set_adapters(["cinematic", "pixel"], adapter_weights=[0.5, 0.5]) ``` """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for `set_adapters()`.") adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
# Expand weights into a list, one entry per adapter # examples for e.g. 2 adapters: [{...}, 7] -> [7,7] ; None -> [None, None] if not isinstance(weights, list): weights = [weights] * len(adapter_names) if len(adapter_names) != len(weights): raise ValueError( f"Length of adapter names {len(adapter_names)} is not equal to the length of their weights {len(weights)}." ) # Set None values to default of 1.0 # e.g. [{...}, 7] -> [{...}, 7] ; [None, None] -> [1.0, 1.0] weights = [w if w is not None else 1.0 for w in weights] # e.g. [{...}, 7] -> [{expanded dict...}, 7] scale_expansion_fn = _SET_ADAPTER_SCALE_FN_MAPPING[self.__class__.__name__] weights = scale_expansion_fn(self, weights) set_weights_and_activate_adapters(self, adapter_names, weights)
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
def add_adapter(self, adapter_config, adapter_name: str = "default") -> None: r""" Adds a new adapter to the current model for training. If no adapter name is passed, a default name is assigned to the adapter to follow the convention of the PEFT library. If you are not familiar with adapters and PEFT methods, we invite you to read more about them in the PEFT [documentation](https://huggingface.co/docs/peft). Args: adapter_config (`[~peft.PeftConfig]`): The configuration of the adapter to add; supported adapters are non-prefix tuning and adaption prompt methods. adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to add. If no name is passed, a default name is assigned to the adapter. """ check_peft_version(min_version=MIN_PEFT_VERSION)
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
if not is_peft_available(): raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.") from peft import PeftConfig, inject_adapter_in_model if not self._hf_peft_config_loaded: self._hf_peft_config_loaded = True elif adapter_name in self.peft_config: raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") if not isinstance(adapter_config, PeftConfig): raise ValueError( f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead." )
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
# Unlike transformers, here we don't need to retrieve the name_or_path of the unet as the loading logic is # handled by the `load_lora_layers` or `StableDiffusionLoraLoaderMixin`. Therefore we set it to `None` here. adapter_config.base_model_name_or_path = None inject_adapter_in_model(adapter_config, self, adapter_name) self.set_adapter(adapter_name) def set_adapter(self, adapter_name: Union[str, List[str]]) -> None: """ Sets a specific adapter by forcing the model to only use that adapter and disables the other adapters. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT [documentation](https://huggingface.co/docs/peft). Args: adapter_name (Union[str, List[str]])): The list of adapters to set or the adapter name in the case of a single adapter. """ check_peft_version(min_version=MIN_PEFT_VERSION)
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") if isinstance(adapter_name, str): adapter_name = [adapter_name] missing = set(adapter_name) - set(self.peft_config) if len(missing) > 0: raise ValueError( f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)." f" current loaded adapters are: {list(self.peft_config.keys())}" ) from peft.tuners.tuners_utils import BaseTunerLayer _adapters_has_been_set = False
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, "set_adapter"): module.set_adapter(adapter_name) # Previous versions of PEFT does not support multi-adapter inference elif not hasattr(module, "set_adapter") and len(adapter_name) != 1: raise ValueError( "You are trying to set multiple adapters and you have a PEFT version that does not support multi-adapter inference. Please upgrade to the latest version of PEFT." " `pip install -U peft` or `pip install -U git+https://github.com/huggingface/peft.git`" ) else: module.active_adapter = adapter_name _adapters_has_been_set = True
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py
if not _adapters_has_been_set: raise ValueError( "Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters." ) def disable_adapters(self) -> None: r""" Disable all adapters attached to the model and fallback to inference with the base model only. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT [documentation](https://huggingface.co/docs/peft). """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer
1,256
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/loaders/peft.py