text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
# this becomes applicable when the variant is not None. if variant is not None and (index_file is None or not os.path.exists(index_file)): index_file = _fetch_index_file_legacy(**index_file_kwargs) if index_file is not None and (dduf_entries or index_file.is_file()): is_sharded = True
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
if is_sharded and from_flax: raise ValueError("Loading of sharded checkpoints is not supported when `from_flax=True`.") # load model model_file = None if from_flax: model_file = _get_model_file( pretrained_model_name_or_path, weights_name=FLAX_WEIGHTS_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, commit_hash=commit_hash, ) model = cls.from_config(config, **unused_kwargs) # Convert the weights from .modeling_pytorch_flax_utils import load_flax_checkpoint_in_pytorch_model
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
model = load_flax_checkpoint_in_pytorch_model(model, model_file) else: # in the case it is sharded, we have already the index if is_sharded: sharded_ckpt_cached_folder, sharded_metadata = _get_checkpoint_shard_files( pretrained_model_name_or_path, index_file, cache_dir=cache_dir, proxies=proxies, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder or "", dduf_entries=dduf_entries, ) # TODO: https://github.com/huggingface/diffusers/issues/10013 if hf_quantizer is not None or dduf_entries: model_file = _merge_sharded_checkpoints( sharded_ckpt_cached_folder, sharded_metadata, dduf_entries=dduf_entries
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
) logger.info("Merged sharded checkpoints as `hf_quantizer` is not None.") is_sharded = False
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
elif use_safetensors and not is_sharded: try: model_file = _get_model_file( pretrained_model_name_or_path, weights_name=_add_variant(SAFETENSORS_WEIGHTS_NAME, variant), cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, commit_hash=commit_hash, dduf_entries=dduf_entries, )
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
except IOError as e: logger.error(f"An error occurred while trying to fetch {pretrained_model_name_or_path}: {e}") if not allow_pickle: raise logger.warning( "Defaulting to unsafe serialization. Pass `allow_pickle=False` to raise an error instead." )
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
if model_file is None and not is_sharded: model_file = _get_model_file( pretrained_model_name_or_path, weights_name=_add_variant(WEIGHTS_NAME, variant), cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, commit_hash=commit_hash, dduf_entries=dduf_entries, ) if low_cpu_mem_usage: # Instantiate model with empty weights with accelerate.init_empty_weights(): model = cls.from_config(config, **unused_kwargs)
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
if hf_quantizer is not None: hf_quantizer.preprocess_model( model=model, device_map=device_map, keep_in_fp32_modules=keep_in_fp32_modules )
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
# if device_map is None, load the state dict and move the params from meta device to the cpu if device_map is None and not is_sharded: # `torch.cuda.current_device()` is fine here when `hf_quantizer` is not None. # It would error out during the `validate_environment()` call above in the absence of cuda. if hf_quantizer is None: param_device = "cpu" # TODO (sayakpaul, SunMarc): remove this after model loading refactor else: param_device = torch.device(torch.cuda.current_device()) state_dict = load_state_dict( model_file, variant=variant, dduf_entries=dduf_entries, disable_mmap=disable_mmap ) model._convert_deprecated_attention_blocks(state_dict)
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
# move the params from meta device to cpu missing_keys = set(model.state_dict().keys()) - set(state_dict.keys()) if hf_quantizer is not None: missing_keys = hf_quantizer.update_missing_keys(model, missing_keys, prefix="") if len(missing_keys) > 0: raise ValueError( f"Cannot load {cls} from {pretrained_model_name_or_path} because the following keys are" f" missing: \n {', '.join(missing_keys)}. \n Please make sure to pass" " `low_cpu_mem_usage=False` and `device_map=None` if you want to randomly initialize" " those weights or else make sure your checkpoint file is correct." ) named_buffers = model.named_buffers()
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
unexpected_keys = load_model_dict_into_meta( model, state_dict, device=param_device, dtype=torch_dtype, model_name_or_path=pretrained_model_name_or_path, hf_quantizer=hf_quantizer, keep_in_fp32_modules=keep_in_fp32_modules, named_buffers=named_buffers, ) if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}" )
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
else: # else let accelerate handle loading and dispatching. # Load weights and dispatch according to the device_map # by default the device_map is None and the weights are loaded on the CPU device_map = _determine_device_map( model, device_map, max_memory, torch_dtype, keep_in_fp32_modules, hf_quantizer ) if device_map is None and is_sharded: # we load the parameters on the cpu device_map = {"": "cpu"} try: accelerate.load_checkpoint_and_dispatch( model, model_file if not is_sharded else index_file, device_map, max_memory=max_memory, offload_folder=offload_folder, offload_state_dict=offload_state_dict,
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
dtype=torch_dtype, strict=True, ) except AttributeError as e: # When using accelerate loading, we do not have the ability to load the state # dict and rename the weight names manually. Additionally, accelerate skips # torch loading conventions and directly writes into `module.{_buffers, _parameters}` # (which look like they should be private variables?), so we can't use the standard hooks # to rename parameters on load. We need to mimic the original weight names so the correct # attributes are available. After we have loaded the weights, we convert the deprecated # names to the new non-deprecated names. Then we _greatly encourage_ the user to convert # the weights so we don't have to do this again.
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
if "'Attention' object has no attribute" in str(e): logger.warning( f"Taking `{str(e)}` while using `accelerate.load_checkpoint_and_dispatch` to mean {pretrained_model_name_or_path}" " was saved with deprecated attention block weight names. We will load it with the deprecated attention block" " names and convert them on the fly to the new attention block format. Please re-save the model after this conversion," " so we don't have to do the on the fly renaming in the future. If the model is from a hub checkpoint," " please also re-upload it or open a PR on the original repository." ) model._temp_convert_self_to_deprecated_attention_blocks() accelerate.load_checkpoint_and_dispatch( model,
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
model_file if not is_sharded else index_file, device_map, max_memory=max_memory, offload_folder=offload_folder, offload_state_dict=offload_state_dict, dtype=torch_dtype, strict=True, ) model._undo_temp_convert_self_to_deprecated_attention_blocks() else: raise e
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
loading_info = { "missing_keys": [], "unexpected_keys": [], "mismatched_keys": [], "error_msgs": [], } else: model = cls.from_config(config, **unused_kwargs) state_dict = load_state_dict( model_file, variant=variant, dduf_entries=dduf_entries, disable_mmap=disable_mmap ) model._convert_deprecated_attention_blocks(state_dict) model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model( model, state_dict, model_file, pretrained_model_name_or_path, ignore_mismatched_sizes=ignore_mismatched_sizes, )
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, "error_msgs": error_msgs, } if hf_quantizer is not None: hf_quantizer.postprocess_model(model) model.hf_quantizer = hf_quantizer if torch_dtype is not None and not isinstance(torch_dtype, torch.dtype): raise ValueError( f"{torch_dtype} needs to be of type `torch.dtype`, e.g. `torch.float16`, but is {type(torch_dtype)}." ) # When using `use_keep_in_fp32_modules` if we do a global `to()` here, then we will # completely lose the effectivity of `use_keep_in_fp32_modules`. elif torch_dtype is not None and hf_quantizer is None and not use_keep_in_fp32_modules: model = model.to(torch_dtype)
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
if hf_quantizer is not None: # We also make sure to purge `_pre_quantization_dtype` when we serialize # the model config because `_pre_quantization_dtype` is `torch.dtype`, not JSON serializable. model.register_to_config(_name_or_path=pretrained_model_name_or_path, _pre_quantization_dtype=torch_dtype) else: model.register_to_config(_name_or_path=pretrained_model_name_or_path) # Set model in evaluation mode to deactivate DropOut modules by default model.eval() if output_loading_info: return model, loading_info return model
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
# Adapted from `transformers`. @wraps(torch.nn.Module.cuda) def cuda(self, *args, **kwargs): # Checks if the model has been loaded in 4-bit or 8-bit with BNB if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES: if getattr(self, "is_loaded_in_8bit", False): raise ValueError( "Calling `cuda()` is not supported for `8-bit` quantized models. " " Please use the model as it is, since the model has already been set to the correct devices." ) elif is_bitsandbytes_version("<", "0.43.2"): raise ValueError( "Calling `cuda()` is not supported for `4-bit` quantized models with the installed version of bitsandbytes. " f"The current device is `{self.device}`. If you intended to move the model, please install bitsandbytes >= 0.43.2." ) return super().cuda(*args, **kwargs)
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
# Adapted from `transformers`. @wraps(torch.nn.Module.to) def to(self, *args, **kwargs): dtype_present_in_args = "dtype" in kwargs if not dtype_present_in_args: for arg in args: if isinstance(arg, torch.dtype): dtype_present_in_args = True break if getattr(self, "is_quantized", False): if dtype_present_in_args: raise ValueError( "Casting a quantized model to a new `dtype` is unsupported. To set the dtype of unquantized layers, please " "use the `torch_dtype` argument when loading the model using `from_pretrained` or `from_single_file`" )
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES: if getattr(self, "is_loaded_in_8bit", False): raise ValueError( "`.to` is not supported for `8-bit` bitsandbytes models. Please use the model as it is, since the" " model has already been set to the correct devices and casted to the correct `dtype`." ) elif is_bitsandbytes_version("<", "0.43.2"): raise ValueError( "Calling `to()` is not supported for `4-bit` quantized models with the installed version of bitsandbytes. " f"The current device is `{self.device}`. If you intended to move the model, please install bitsandbytes >= 0.43.2." ) return super().to(*args, **kwargs)
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
# Taken from `transformers`. def half(self, *args): # Checks if the model is quantized if getattr(self, "is_quantized", False): raise ValueError( "`.half()` is not supported for quantized model. Please use the model as it is, since the" " model has already been cast to the correct `dtype`." ) else: return super().half(*args) # Taken from `transformers`. def float(self, *args): # Checks if the model is quantized if getattr(self, "is_quantized", False): raise ValueError( "`.float()` is not supported for quantized model. Please use the model as it is, since the" " model has already been cast to the correct `dtype`." ) else: return super().float(*args)
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
@classmethod def _load_pretrained_model( cls, model, state_dict: OrderedDict, resolved_archive_file, pretrained_model_name_or_path: Union[str, os.PathLike], ignore_mismatched_sizes: bool = False, ): # Retrieve missing & unexpected_keys model_state_dict = model.state_dict() loaded_keys = list(state_dict.keys()) expected_keys = list(model_state_dict.keys()) original_loaded_keys = loaded_keys missing_keys = list(set(expected_keys) - set(loaded_keys)) unexpected_keys = list(set(loaded_keys) - set(expected_keys)) # Make sure we are able to load base models as well as derived models (with heads) model_to_load = model
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
def _find_mismatched_keys( state_dict, model_state_dict, loaded_keys, ignore_mismatched_sizes, ): mismatched_keys = [] if ignore_mismatched_sizes: for checkpoint_key in loaded_keys: model_key = checkpoint_key if ( model_key in model_state_dict and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape ): mismatched_keys.append( (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) ) del state_dict[checkpoint_key] return mismatched_keys
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
if state_dict is not None: # Whole checkpoint mismatched_keys = _find_mismatched_keys( state_dict, model_state_dict, original_loaded_keys, ignore_mismatched_sizes, ) error_msgs = _load_state_dict_into_model(model_to_load, state_dict) if len(error_msgs) > 0: error_msg = "\n\t".join(error_msgs) if "size mismatch" in error_msg: error_msg += ( "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method." ) raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}")
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task" " or with another architecture (e.g. initializing a BertForSequenceClassification model from a" " BertForPreTraining model).\n- This IS NOT expected if you are initializing" f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly" " identical (initializing a BertForSequenceClassification model from a" " BertForSequenceClassification model)." ) else: logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
if len(missing_keys) > 0: logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) elif len(mismatched_keys) == 0: logger.info( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the" f" checkpoint was trained on, you can already use {model.__class__.__name__} for predictions" " without further training." ) if len(mismatched_keys) > 0: mismatched_warning = "\n".join( [
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be" " able to use it for predictions and inference." )
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs @classmethod def _get_signature_keys(cls, obj): parameters = inspect.signature(obj.__init__).parameters required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty} optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty}) expected_modules = set(required_parameters.keys()) - {"self"} return expected_modules, optional_parameters # Adapted from `transformers` modeling_utils.py def _get_no_split_modules(self, device_map: str): """ Get the modules of the model that should not be split when using device_map. We iterate through the modules to get the underlying `_no_split_modules`. Args: device_map (`str`): The device map value. Options are ["auto", "balanced", "balanced_low_0", "sequential"]
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
Returns: `List[str]`: List of modules that should not be split """ _no_split_modules = set() modules_to_check = [self] while len(modules_to_check) > 0: module = modules_to_check.pop(-1) # if the module does not appear in _no_split_modules, we also check the children if module.__class__.__name__ not in _no_split_modules: if isinstance(module, ModelMixin): if module._no_split_modules is None: raise ValueError( f"{module.__class__.__name__} does not support `device_map='{device_map}'`. To implement support, the model " "class needs to implement the `_no_split_modules` attribute." ) else: _no_split_modules = _no_split_modules | set(module._no_split_modules) modules_to_check += list(module.children())
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
return list(_no_split_modules)
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
@property def device(self) -> torch.device: """ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ return get_parameter_device(self) @property def dtype(self) -> torch.dtype: """ `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). """ return get_parameter_dtype(self) def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int: """ Get number of (trainable or non-embedding) parameters in the module. Args: only_trainable (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of trainable parameters. exclude_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of non-embedding parameters.
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
Returns: `int`: The number of parameters. Example: ```py from diffusers import UNet2DConditionModel model_id = "runwayml/stable-diffusion-v1-5" unet = UNet2DConditionModel.from_pretrained(model_id, subfolder="unet") unet.num_parameters(only_trainable=True) 859520964 ``` """ is_loaded_in_4bit = getattr(self, "is_loaded_in_4bit", False) if is_loaded_in_4bit: if is_bitsandbytes_available(): import bitsandbytes as bnb else: raise ValueError( "bitsandbytes is not installed but it seems that the model has been loaded in 4bit precision, something went wrong" " make sure to install bitsandbytes with `pip install bitsandbytes`. You also need a GPU. " )
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
if exclude_embeddings: embedding_param_names = [ f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding) ] total_parameters = [ parameter for name, parameter in self.named_parameters() if name not in embedding_param_names ] else: total_parameters = list(self.parameters()) total_numel = []
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
for param in total_parameters: if param.requires_grad or not only_trainable: # For 4bit models, we need to multiply the number of parameters by 2 as half of the parameters are # used for the 4bit quantization (uint8 tensors are stored) if is_loaded_in_4bit and isinstance(param, bnb.nn.Params4bit): if hasattr(param, "element_size"): num_bytes = param.element_size() elif hasattr(param, "quant_storage"): num_bytes = param.quant_storage.itemsize else: num_bytes = 1 total_numel.append(param.numel() * 2 * num_bytes) else: total_numel.append(param.numel()) return sum(total_numel)
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
def get_memory_footprint(self, return_buffers=True): r""" Get the memory footprint of a model. This will return the memory footprint of the current model in bytes. Useful to benchmark the memory footprint of the current model and design some tests. Solution inspired from the PyTorch discussions: https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
Arguments: return_buffers (`bool`, *optional*, defaults to `True`): Whether to return the size of the buffer tensors in the computation of the memory footprint. Buffers are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch norm layers. Please see: https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2 """ mem = sum([param.nelement() * param.element_size() for param in self.parameters()]) if return_buffers: mem_bufs = sum([buf.nelement() * buf.element_size() for buf in self.buffers()]) mem = mem + mem_bufs return mem def _convert_deprecated_attention_blocks(self, state_dict: OrderedDict) -> None: deprecated_attention_block_paths = []
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
def recursive_find_attn_block(name, module): if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: deprecated_attention_block_paths.append(name) for sub_name, sub_module in module.named_children(): sub_name = sub_name if name == "" else f"{name}.{sub_name}" recursive_find_attn_block(sub_name, sub_module) recursive_find_attn_block("", self) # NOTE: we have to check if the deprecated parameters are in the state dict # because it is possible we are loading from a state dict that was already # converted for path in deprecated_attention_block_paths: # group_norm path stays the same
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
# query -> to_q if f"{path}.query.weight" in state_dict: state_dict[f"{path}.to_q.weight"] = state_dict.pop(f"{path}.query.weight") if f"{path}.query.bias" in state_dict: state_dict[f"{path}.to_q.bias"] = state_dict.pop(f"{path}.query.bias") # key -> to_k if f"{path}.key.weight" in state_dict: state_dict[f"{path}.to_k.weight"] = state_dict.pop(f"{path}.key.weight") if f"{path}.key.bias" in state_dict: state_dict[f"{path}.to_k.bias"] = state_dict.pop(f"{path}.key.bias") # value -> to_v if f"{path}.value.weight" in state_dict: state_dict[f"{path}.to_v.weight"] = state_dict.pop(f"{path}.value.weight") if f"{path}.value.bias" in state_dict: state_dict[f"{path}.to_v.bias"] = state_dict.pop(f"{path}.value.bias")
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
# proj_attn -> to_out.0 if f"{path}.proj_attn.weight" in state_dict: state_dict[f"{path}.to_out.0.weight"] = state_dict.pop(f"{path}.proj_attn.weight") if f"{path}.proj_attn.bias" in state_dict: state_dict[f"{path}.to_out.0.bias"] = state_dict.pop(f"{path}.proj_attn.bias") def _temp_convert_self_to_deprecated_attention_blocks(self) -> None: deprecated_attention_block_modules = [] def recursive_find_attn_block(module): if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: deprecated_attention_block_modules.append(module) for sub_module in module.children(): recursive_find_attn_block(sub_module) recursive_find_attn_block(self)
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
for module in deprecated_attention_block_modules: module.query = module.to_q module.key = module.to_k module.value = module.to_v module.proj_attn = module.to_out[0] # We don't _have_ to delete the old attributes, but it's helpful to ensure # that _all_ the weights are loaded into the new attributes and we're not # making an incorrect assumption that this model should be converted when # it really shouldn't be. del module.to_q del module.to_k del module.to_v del module.to_out def _undo_temp_convert_self_to_deprecated_attention_blocks(self) -> None: deprecated_attention_block_modules = [] def recursive_find_attn_block(module) -> None: if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block: deprecated_attention_block_modules.append(module)
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
for sub_module in module.children(): recursive_find_attn_block(sub_module) recursive_find_attn_block(self) for module in deprecated_attention_block_modules: module.to_q = module.query module.to_k = module.key module.to_v = module.value module.to_out = nn.ModuleList([module.proj_attn, nn.Dropout(module.dropout)]) del module.query del module.key del module.value del module.proj_attn
906
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
class LegacyModelMixin(ModelMixin): r""" A subclass of `ModelMixin` to resolve class mapping from legacy classes (like `Transformer2DModel`) to more pipeline-specific classes (like `DiTTransformer2DModel`). """ @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): # To prevent dependency import problem. from .model_loading_utils import _fetch_remapped_cls_from_config # Create a copy of the kwargs so that we don't mess with the keyword arguments in the downstream calls. kwargs_copy = kwargs.copy() cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None)
907
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
# Load config if we don't provide a configuration config_path = pretrained_model_name_or_path user_agent = { "diffusers": __version__, "file_type": "model", "framework": "pytorch", } # load config config, _, _ = cls.load_config( config_path, cache_dir=cache_dir, return_unused_kwargs=True, return_commit_hash=True, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, **kwargs, ) # resolve remapping remapped_class = _fetch_remapped_cls_from_config(config, cls) return remapped_class.from_pretrained(pretrained_model_name_or_path, **kwargs_copy)
907
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_utils.py
class FlaxTimestepEmbedding(nn.Module): r""" Time step Embedding Module. Learns embeddings for input time steps. Args: time_embed_dim (`int`, *optional*, defaults to `32`): Time step embedding dimension. dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): The data type for the embedding parameters. """ time_embed_dim: int = 32 dtype: jnp.dtype = jnp.float32 @nn.compact def __call__(self, temb): temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_1")(temb) temb = nn.silu(temb) temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_2")(temb) return temb
908
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/embeddings_flax.py
class FlaxTimesteps(nn.Module): r""" Wrapper Module for sinusoidal Time step Embeddings as described in https://arxiv.org/abs/2006.11239 Args: dim (`int`, *optional*, defaults to `32`): Time step embedding dimension. flip_sin_to_cos (`bool`, *optional*, defaults to `False`): Whether to flip the sinusoidal function from sine to cosine. freq_shift (`float`, *optional*, defaults to `1`): Frequency shift applied to the sinusoidal embeddings. """ dim: int = 32 flip_sin_to_cos: bool = False freq_shift: float = 1 @nn.compact def __call__(self, timesteps): return get_sinusoidal_embeddings( timesteps, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift )
909
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/embeddings_flax.py
class PatchedLoraProjection(torch.nn.Module): def __init__(self, regular_linear_layer, lora_scale=1, network_alpha=None, rank=4, dtype=None): deprecation_message = "Use of `PatchedLoraProjection` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." deprecate("PatchedLoraProjection", "1.0.0", deprecation_message) super().__init__() from ..models.lora import LoRALinearLayer self.regular_linear_layer = regular_linear_layer device = self.regular_linear_layer.weight.device if dtype is None: dtype = self.regular_linear_layer.weight.dtype self.lora_linear_layer = LoRALinearLayer( self.regular_linear_layer.in_features, self.regular_linear_layer.out_features, network_alpha=network_alpha, device=device, dtype=dtype, rank=rank, ) self.lora_scale = lora_scale
910
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
# overwrite PyTorch's `state_dict` to be sure that only the 'regular_linear_layer' weights are saved # when saving the whole text encoder model and when LoRA is unloaded or fused def state_dict(self, *args, destination=None, prefix="", keep_vars=False): if self.lora_linear_layer is None: return self.regular_linear_layer.state_dict( *args, destination=destination, prefix=prefix, keep_vars=keep_vars ) return super().state_dict(*args, destination=destination, prefix=prefix, keep_vars=keep_vars) def _fuse_lora(self, lora_scale=1.0, safe_fusing=False): if self.lora_linear_layer is None: return dtype, device = self.regular_linear_layer.weight.data.dtype, self.regular_linear_layer.weight.data.device w_orig = self.regular_linear_layer.weight.data.float() w_up = self.lora_linear_layer.up.weight.data.float() w_down = self.lora_linear_layer.down.weight.data.float()
910
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
if self.lora_linear_layer.network_alpha is not None: w_up = w_up * self.lora_linear_layer.network_alpha / self.lora_linear_layer.rank fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0]) if safe_fusing and torch.isnan(fused_weight).any().item(): raise ValueError( "This LoRA weight seems to be broken. " f"Encountered NaN values when trying to fuse LoRA weights for {self}." "LoRA weights will not be fused." ) self.regular_linear_layer.weight.data = fused_weight.to(device=device, dtype=dtype) # we can drop the lora layer now self.lora_linear_layer = None # offload the up and down matrices to CPU to not blow the memory self.w_up = w_up.cpu() self.w_down = w_down.cpu() self.lora_scale = lora_scale
910
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
def _unfuse_lora(self): if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None): return fused_weight = self.regular_linear_layer.weight.data dtype, device = fused_weight.dtype, fused_weight.device w_up = self.w_up.to(device=device).float() w_down = self.w_down.to(device).float() unfused_weight = fused_weight.float() - (self.lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0]) self.regular_linear_layer.weight.data = unfused_weight.to(device=device, dtype=dtype) self.w_up = None self.w_down = None def forward(self, input): if self.lora_scale is None: self.lora_scale = 1.0 if self.lora_linear_layer is None: return self.regular_linear_layer(input) return self.regular_linear_layer(input) + (self.lora_scale * self.lora_linear_layer(input))
910
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
class LoRALinearLayer(nn.Module): r""" A linear layer that is used with LoRA. Parameters: in_features (`int`): Number of input features. out_features (`int`): Number of output features. rank (`int`, `optional`, defaults to 4): The rank of the LoRA layer. network_alpha (`float`, `optional`, defaults to `None`): The value of the network alpha used for stable learning and preventing underflow. This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning device (`torch.device`, `optional`, defaults to `None`): The device to use for the layer's weights. dtype (`torch.dtype`, `optional`, defaults to `None`): The dtype to use for the layer's weights. """
911
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
def __init__( self, in_features: int, out_features: int, rank: int = 4, network_alpha: Optional[float] = None, device: Optional[Union[torch.device, str]] = None, dtype: Optional[torch.dtype] = None, ): super().__init__() deprecation_message = "Use of `LoRALinearLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." deprecate("LoRALinearLayer", "1.0.0", deprecation_message)
911
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
self.down = nn.Linear(in_features, rank, bias=False, device=device, dtype=dtype) self.up = nn.Linear(rank, out_features, bias=False, device=device, dtype=dtype) # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning self.network_alpha = network_alpha self.rank = rank self.out_features = out_features self.in_features = in_features nn.init.normal_(self.down.weight, std=1 / rank) nn.init.zeros_(self.up.weight) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: orig_dtype = hidden_states.dtype dtype = self.down.weight.dtype down_hidden_states = self.down(hidden_states.to(dtype)) up_hidden_states = self.up(down_hidden_states) if self.network_alpha is not None: up_hidden_states *= self.network_alpha / self.rank
911
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
return up_hidden_states.to(orig_dtype)
911
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
class LoRAConv2dLayer(nn.Module): r""" A convolutional layer that is used with LoRA.
912
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
Parameters: in_features (`int`): Number of input features. out_features (`int`): Number of output features. rank (`int`, `optional`, defaults to 4): The rank of the LoRA layer. kernel_size (`int` or `tuple` of two `int`, `optional`, defaults to 1): The kernel size of the convolution. stride (`int` or `tuple` of two `int`, `optional`, defaults to 1): The stride of the convolution. padding (`int` or `tuple` of two `int` or `str`, `optional`, defaults to 0): The padding of the convolution. network_alpha (`float`, `optional`, defaults to `None`): The value of the network alpha used for stable learning and preventing underflow. This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning """
912
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
def __init__( self, in_features: int, out_features: int, rank: int = 4, kernel_size: Union[int, Tuple[int, int]] = (1, 1), stride: Union[int, Tuple[int, int]] = (1, 1), padding: Union[int, Tuple[int, int], str] = 0, network_alpha: Optional[float] = None, ): super().__init__() deprecation_message = "Use of `LoRAConv2dLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." deprecate("LoRAConv2dLayer", "1.0.0", deprecation_message) self.down = nn.Conv2d(in_features, rank, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) # according to the official kohya_ss trainer kernel_size are always fixed for the up layer # # see: https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L129 self.up = nn.Conv2d(rank, out_features, kernel_size=(1, 1), stride=(1, 1), bias=False)
912
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
# This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning self.network_alpha = network_alpha self.rank = rank nn.init.normal_(self.down.weight, std=1 / rank) nn.init.zeros_(self.up.weight) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: orig_dtype = hidden_states.dtype dtype = self.down.weight.dtype down_hidden_states = self.down(hidden_states.to(dtype)) up_hidden_states = self.up(down_hidden_states) if self.network_alpha is not None: up_hidden_states *= self.network_alpha / self.rank return up_hidden_states.to(orig_dtype)
912
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
class LoRACompatibleConv(nn.Conv2d): """ A convolutional layer that can be used with LoRA. """ def __init__(self, *args, lora_layer: Optional[LoRAConv2dLayer] = None, **kwargs): deprecation_message = "Use of `LoRACompatibleConv` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." deprecate("LoRACompatibleConv", "1.0.0", deprecation_message) super().__init__(*args, **kwargs) self.lora_layer = lora_layer def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]): deprecation_message = "Use of `set_lora_layer()` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." deprecate("set_lora_layer", "1.0.0", deprecation_message) self.lora_layer = lora_layer def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False): if self.lora_layer is None: return dtype, device = self.weight.data.dtype, self.weight.data.device
913
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
w_orig = self.weight.data.float() w_up = self.lora_layer.up.weight.data.float() w_down = self.lora_layer.down.weight.data.float() if self.lora_layer.network_alpha is not None: w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank fusion = torch.mm(w_up.flatten(start_dim=1), w_down.flatten(start_dim=1)) fusion = fusion.reshape((w_orig.shape)) fused_weight = w_orig + (lora_scale * fusion) if safe_fusing and torch.isnan(fused_weight).any().item(): raise ValueError( "This LoRA weight seems to be broken. " f"Encountered NaN values when trying to fuse LoRA weights for {self}." "LoRA weights will not be fused." ) self.weight.data = fused_weight.to(device=device, dtype=dtype) # we can drop the lora layer now self.lora_layer = None
913
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
# offload the up and down matrices to CPU to not blow the memory self.w_up = w_up.cpu() self.w_down = w_down.cpu() self._lora_scale = lora_scale def _unfuse_lora(self): if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None): return fused_weight = self.weight.data dtype, device = fused_weight.data.dtype, fused_weight.data.device self.w_up = self.w_up.to(device=device).float() self.w_down = self.w_down.to(device).float() fusion = torch.mm(self.w_up.flatten(start_dim=1), self.w_down.flatten(start_dim=1)) fusion = fusion.reshape((fused_weight.shape)) unfused_weight = fused_weight.float() - (self._lora_scale * fusion) self.weight.data = unfused_weight.to(device=device, dtype=dtype) self.w_up = None self.w_down = None
913
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor: if self.padding_mode != "zeros": hidden_states = F.pad(hidden_states, self._reversed_padding_repeated_twice, mode=self.padding_mode) padding = (0, 0) else: padding = self.padding original_outputs = F.conv2d( hidden_states, self.weight, self.bias, self.stride, padding, self.dilation, self.groups ) if self.lora_layer is None: return original_outputs else: return original_outputs + (scale * self.lora_layer(hidden_states))
913
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
class LoRACompatibleLinear(nn.Linear): """ A Linear layer that can be used with LoRA. """ def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs): deprecation_message = "Use of `LoRACompatibleLinear` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." deprecate("LoRACompatibleLinear", "1.0.0", deprecation_message) super().__init__(*args, **kwargs) self.lora_layer = lora_layer def set_lora_layer(self, lora_layer: Optional[LoRALinearLayer]): deprecation_message = "Use of `set_lora_layer()` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`." deprecate("set_lora_layer", "1.0.0", deprecation_message) self.lora_layer = lora_layer def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False): if self.lora_layer is None: return dtype, device = self.weight.data.dtype, self.weight.data.device
914
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
w_orig = self.weight.data.float() w_up = self.lora_layer.up.weight.data.float() w_down = self.lora_layer.down.weight.data.float() if self.lora_layer.network_alpha is not None: w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0]) if safe_fusing and torch.isnan(fused_weight).any().item(): raise ValueError( "This LoRA weight seems to be broken. " f"Encountered NaN values when trying to fuse LoRA weights for {self}." "LoRA weights will not be fused." ) self.weight.data = fused_weight.to(device=device, dtype=dtype) # we can drop the lora layer now self.lora_layer = None # offload the up and down matrices to CPU to not blow the memory self.w_up = w_up.cpu() self.w_down = w_down.cpu() self._lora_scale = lora_scale
914
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
def _unfuse_lora(self): if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None): return fused_weight = self.weight.data dtype, device = fused_weight.dtype, fused_weight.device w_up = self.w_up.to(device=device).float() w_down = self.w_down.to(device).float() unfused_weight = fused_weight.float() - (self._lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0]) self.weight.data = unfused_weight.to(device=device, dtype=dtype) self.w_up = None self.w_down = None def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor: if self.lora_layer is None: out = super().forward(hidden_states) return out else: out = super().forward(hidden_states) + (scale * self.lora_layer(hidden_states)) return out
914
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/lora.py
class FlaxDecoderOutput(BaseOutput): """ Output of decoding method. Args: sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): The decoded output sample from the last layer of the model. dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): The `dtype` of the parameters. """ sample: jnp.ndarray
915
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
class FlaxAutoencoderKLOutput(BaseOutput): """ Output of AutoencoderKL encoding method. Args: latent_dist (`FlaxDiagonalGaussianDistribution`): Encoded outputs of `Encoder` represented as the mean and logvar of `FlaxDiagonalGaussianDistribution`. `FlaxDiagonalGaussianDistribution` allows for sampling latents from the distribution. """ latent_dist: "FlaxDiagonalGaussianDistribution"
916
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
class FlaxUpsample2D(nn.Module): """ Flax implementation of 2D Upsample layer Args: in_channels (`int`): Input channels dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( self.in_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) def __call__(self, hidden_states): batch, height, width, channels = hidden_states.shape hidden_states = jax.image.resize( hidden_states, shape=(batch, height * 2, width * 2, channels), method="nearest", ) hidden_states = self.conv(hidden_states) return hidden_states
917
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
class FlaxDownsample2D(nn.Module): """ Flax implementation of 2D Downsample layer Args: in_channels (`int`): Input channels dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( self.in_channels, kernel_size=(3, 3), strides=(2, 2), padding="VALID", dtype=self.dtype, ) def __call__(self, hidden_states): pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim hidden_states = jnp.pad(hidden_states, pad_width=pad) hidden_states = self.conv(hidden_states) return hidden_states
918
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
class FlaxResnetBlock2D(nn.Module): """ Flax implementation of 2D Resnet Block. Args: in_channels (`int`): Input channels out_channels (`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for group norm. use_nin_shortcut (:obj:`bool`, *optional*, defaults to `None`): Whether to use `nin_shortcut`. This activates a new layer inside ResNet block dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int = None dropout: float = 0.0 groups: int = 32 use_nin_shortcut: bool = None dtype: jnp.dtype = jnp.float32 def setup(self): out_channels = self.in_channels if self.out_channels is None else self.out_channels
919
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
self.norm1 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-6) self.conv1 = nn.Conv( out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) self.norm2 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-6) self.dropout_layer = nn.Dropout(self.dropout) self.conv2 = nn.Conv( out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut self.conv_shortcut = None if use_nin_shortcut: self.conv_shortcut = nn.Conv( out_channels, kernel_size=(1, 1), strides=(1, 1), padding="VALID", dtype=self.dtype, )
919
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
def __call__(self, hidden_states, deterministic=True): residual = hidden_states hidden_states = self.norm1(hidden_states) hidden_states = nn.swish(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = self.norm2(hidden_states) hidden_states = nn.swish(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: residual = self.conv_shortcut(residual) return hidden_states + residual
919
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
class FlaxAttentionBlock(nn.Module): r""" Flax Convolutional based multi-head attention block for diffusion-based VAE. Parameters: channels (:obj:`int`): Input channels num_head_channels (:obj:`int`, *optional*, defaults to `None`): Number of attention heads num_groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for group norm dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ channels: int num_head_channels: int = None num_groups: int = 32 dtype: jnp.dtype = jnp.float32 def setup(self): self.num_heads = self.channels // self.num_head_channels if self.num_head_channels is not None else 1 dense = partial(nn.Dense, self.channels, dtype=self.dtype)
920
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
self.group_norm = nn.GroupNorm(num_groups=self.num_groups, epsilon=1e-6) self.query, self.key, self.value = dense(), dense(), dense() self.proj_attn = dense() def transpose_for_scores(self, projection): new_projection_shape = projection.shape[:-1] + (self.num_heads, -1) # move heads to 2nd position (B, T, H * D) -> (B, T, H, D) new_projection = projection.reshape(new_projection_shape) # (B, T, H, D) -> (B, H, T, D) new_projection = jnp.transpose(new_projection, (0, 2, 1, 3)) return new_projection def __call__(self, hidden_states): residual = hidden_states batch, height, width, channels = hidden_states.shape hidden_states = self.group_norm(hidden_states) hidden_states = hidden_states.reshape((batch, height * width, channels)) query = self.query(hidden_states) key = self.key(hidden_states) value = self.value(hidden_states)
920
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
# transpose query = self.transpose_for_scores(query) key = self.transpose_for_scores(key) value = self.transpose_for_scores(value) # compute attentions scale = 1 / math.sqrt(math.sqrt(self.channels / self.num_heads)) attn_weights = jnp.einsum("...qc,...kc->...qk", query * scale, key * scale) attn_weights = nn.softmax(attn_weights, axis=-1) # attend to values hidden_states = jnp.einsum("...kc,...qk->...qc", value, attn_weights) hidden_states = jnp.transpose(hidden_states, (0, 2, 1, 3)) new_hidden_states_shape = hidden_states.shape[:-2] + (self.channels,) hidden_states = hidden_states.reshape(new_hidden_states_shape) hidden_states = self.proj_attn(hidden_states) hidden_states = hidden_states.reshape((batch, height, width, channels)) hidden_states = hidden_states + residual return hidden_states
920
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
class FlaxDownEncoderBlock2D(nn.Module): r""" Flax Resnet blocks-based Encoder block for diffusion-based VAE. Parameters: in_channels (:obj:`int`): Input channels out_channels (:obj:`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of Resnet layer block resnet_groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for the Resnet block group norm add_downsample (:obj:`bool`, *optional*, defaults to `True`): Whether to add downsample layer dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int dropout: float = 0.0 num_layers: int = 1 resnet_groups: int = 32 add_downsample: bool = True dtype: jnp.dtype = jnp.float32
921
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
def setup(self): resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels res_block = FlaxResnetBlock2D( in_channels=in_channels, out_channels=self.out_channels, dropout=self.dropout, groups=self.resnet_groups, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets if self.add_downsample: self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): for resnet in self.resnets: hidden_states = resnet(hidden_states, deterministic=deterministic) if self.add_downsample: hidden_states = self.downsamplers_0(hidden_states) return hidden_states
921
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
class FlaxUpDecoderBlock2D(nn.Module): r""" Flax Resnet blocks-based Decoder block for diffusion-based VAE. Parameters: in_channels (:obj:`int`): Input channels out_channels (:obj:`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of Resnet layer block resnet_groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for the Resnet block group norm add_upsample (:obj:`bool`, *optional*, defaults to `True`): Whether to add upsample layer dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int dropout: float = 0.0 num_layers: int = 1 resnet_groups: int = 32 add_upsample: bool = True dtype: jnp.dtype = jnp.float32
922
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
def setup(self): resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels res_block = FlaxResnetBlock2D( in_channels=in_channels, out_channels=self.out_channels, dropout=self.dropout, groups=self.resnet_groups, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets if self.add_upsample: self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): for resnet in self.resnets: hidden_states = resnet(hidden_states, deterministic=deterministic) if self.add_upsample: hidden_states = self.upsamplers_0(hidden_states) return hidden_states
922
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
class FlaxUNetMidBlock2D(nn.Module): r""" Flax Unet Mid-Block module. Parameters: in_channels (:obj:`int`): Input channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of Resnet layer block resnet_groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for the Resnet and Attention block group norm num_attention_heads (:obj:`int`, *optional*, defaults to `1`): Number of attention heads for each attention block dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int dropout: float = 0.0 num_layers: int = 1 resnet_groups: int = 32 num_attention_heads: int = 1 dtype: jnp.dtype = jnp.float32
923
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
def setup(self): resnet_groups = self.resnet_groups if self.resnet_groups is not None else min(self.in_channels // 4, 32) # there is always at least one resnet resnets = [ FlaxResnetBlock2D( in_channels=self.in_channels, out_channels=self.in_channels, dropout=self.dropout, groups=resnet_groups, dtype=self.dtype, ) ] attentions = [] for _ in range(self.num_layers): attn_block = FlaxAttentionBlock( channels=self.in_channels, num_head_channels=self.num_attention_heads, num_groups=resnet_groups, dtype=self.dtype, ) attentions.append(attn_block)
923
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
res_block = FlaxResnetBlock2D( in_channels=self.in_channels, out_channels=self.in_channels, dropout=self.dropout, groups=resnet_groups, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets self.attentions = attentions def __call__(self, hidden_states, deterministic=True): hidden_states = self.resnets[0](hidden_states, deterministic=deterministic) for attn, resnet in zip(self.attentions, self.resnets[1:]): hidden_states = attn(hidden_states) hidden_states = resnet(hidden_states, deterministic=deterministic) return hidden_states
923
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
class FlaxEncoder(nn.Module): r""" Flax Implementation of VAE Encoder. This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
924
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
Parameters: in_channels (:obj:`int`, *optional*, defaults to 3): Input channels out_channels (:obj:`int`, *optional*, defaults to 3): Output channels down_block_types (:obj:`Tuple[str]`, *optional*, defaults to `(DownEncoderBlock2D)`): DownEncoder block type block_out_channels (:obj:`Tuple[str]`, *optional*, defaults to `(64,)`): Tuple containing the number of output channels for each block layers_per_block (:obj:`int`, *optional*, defaults to `2`): Number of Resnet layer for each block norm_num_groups (:obj:`int`, *optional*, defaults to `32`): norm num group act_fn (:obj:`str`, *optional*, defaults to `silu`): Activation function double_z (:obj:`bool`, *optional*, defaults to `False`): Whether to double the last output channels dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """
924
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
in_channels: int = 3 out_channels: int = 3 down_block_types: Tuple[str] = ("DownEncoderBlock2D",) block_out_channels: Tuple[int] = (64,) layers_per_block: int = 2 norm_num_groups: int = 32 act_fn: str = "silu" double_z: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): block_out_channels = self.block_out_channels # in self.conv_in = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # downsampling down_blocks = [] output_channel = block_out_channels[0] for i, _ in enumerate(self.down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1
924
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
down_block = FlaxDownEncoderBlock2D( in_channels=input_channel, out_channels=output_channel, num_layers=self.layers_per_block, resnet_groups=self.norm_num_groups, add_downsample=not is_final_block, dtype=self.dtype, ) down_blocks.append(down_block) self.down_blocks = down_blocks # middle self.mid_block = FlaxUNetMidBlock2D( in_channels=block_out_channels[-1], resnet_groups=self.norm_num_groups, num_attention_heads=None, dtype=self.dtype, )
924
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
# end conv_out_channels = 2 * self.out_channels if self.double_z else self.out_channels self.conv_norm_out = nn.GroupNorm(num_groups=self.norm_num_groups, epsilon=1e-6) self.conv_out = nn.Conv( conv_out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) def __call__(self, sample, deterministic: bool = True): # in sample = self.conv_in(sample) # downsampling for block in self.down_blocks: sample = block(sample, deterministic=deterministic) # middle sample = self.mid_block(sample, deterministic=deterministic) # end sample = self.conv_norm_out(sample) sample = nn.swish(sample) sample = self.conv_out(sample) return sample
924
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
class FlaxDecoder(nn.Module): r""" Flax Implementation of VAE Decoder. This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
925
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
Parameters: in_channels (:obj:`int`, *optional*, defaults to 3): Input channels out_channels (:obj:`int`, *optional*, defaults to 3): Output channels up_block_types (:obj:`Tuple[str]`, *optional*, defaults to `(UpDecoderBlock2D)`): UpDecoder block type block_out_channels (:obj:`Tuple[str]`, *optional*, defaults to `(64,)`): Tuple containing the number of output channels for each block layers_per_block (:obj:`int`, *optional*, defaults to `2`): Number of Resnet layer for each block norm_num_groups (:obj:`int`, *optional*, defaults to `32`): norm num group act_fn (:obj:`str`, *optional*, defaults to `silu`): Activation function double_z (:obj:`bool`, *optional*, defaults to `False`): Whether to double the last output channels dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): parameters `dtype` """
925
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
in_channels: int = 3 out_channels: int = 3 up_block_types: Tuple[str] = ("UpDecoderBlock2D",) block_out_channels: int = (64,) layers_per_block: int = 2 norm_num_groups: int = 32 act_fn: str = "silu" dtype: jnp.dtype = jnp.float32 def setup(self): block_out_channels = self.block_out_channels # z to block_in self.conv_in = nn.Conv( block_out_channels[-1], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # middle self.mid_block = FlaxUNetMidBlock2D( in_channels=block_out_channels[-1], resnet_groups=self.norm_num_groups, num_attention_heads=None, dtype=self.dtype, )
925
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
# upsampling reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] up_blocks = [] for i, _ in enumerate(self.up_block_types): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 up_block = FlaxUpDecoderBlock2D( in_channels=prev_output_channel, out_channels=output_channel, num_layers=self.layers_per_block + 1, resnet_groups=self.norm_num_groups, add_upsample=not is_final_block, dtype=self.dtype, ) up_blocks.append(up_block) prev_output_channel = output_channel self.up_blocks = up_blocks
925
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
# end self.conv_norm_out = nn.GroupNorm(num_groups=self.norm_num_groups, epsilon=1e-6) self.conv_out = nn.Conv( self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) def __call__(self, sample, deterministic: bool = True): # z to block_in sample = self.conv_in(sample) # middle sample = self.mid_block(sample, deterministic=deterministic) # upsampling for block in self.up_blocks: sample = block(sample, deterministic=deterministic) sample = self.conv_norm_out(sample) sample = nn.swish(sample) sample = self.conv_out(sample) return sample
925
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
class FlaxDiagonalGaussianDistribution(object): def __init__(self, parameters, deterministic=False): # Last axis to account for channels-last self.mean, self.logvar = jnp.split(parameters, 2, axis=-1) self.logvar = jnp.clip(self.logvar, -30.0, 20.0) self.deterministic = deterministic self.std = jnp.exp(0.5 * self.logvar) self.var = jnp.exp(self.logvar) if self.deterministic: self.var = self.std = jnp.zeros_like(self.mean) def sample(self, key): return self.mean + self.std * jax.random.normal(key, self.mean.shape) def kl(self, other=None): if self.deterministic: return jnp.array([0.0]) if other is None: return 0.5 * jnp.sum(self.mean**2 + self.var - 1.0 - self.logvar, axis=[1, 2, 3]) return 0.5 * jnp.sum( jnp.square(self.mean - other.mean) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar, axis=[1, 2, 3], )
926
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
def nll(self, sample, axis=[1, 2, 3]): if self.deterministic: return jnp.array([0.0]) logtwopi = jnp.log(2.0 * jnp.pi) return 0.5 * jnp.sum(logtwopi + self.logvar + jnp.square(sample - self.mean) / self.var, axis=axis) def mode(self): return self.mean
926
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
class FlaxAutoencoderKL(nn.Module, FlaxModelMixin, ConfigMixin): r""" Flax implementation of a VAE model with KL loss for decoding latent representations. This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matter related to its general usage and behavior. Inherent JAX features such as the following are supported:
927
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
927
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
Parameters: in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. out_channels (`int`, *optional*, defaults to 3): Number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `(DownEncoderBlock2D)`): Tuple of downsample block types. up_block_types (`Tuple[str]`, *optional*, defaults to `(UpDecoderBlock2D)`): Tuple of upsample block types. block_out_channels (`Tuple[str]`, *optional*, defaults to `(64,)`): Tuple of block output channels. layers_per_block (`int`, *optional*, defaults to `2`): Number of ResNet layer for each block. act_fn (`str`, *optional*, defaults to `silu`): The activation function to use. latent_channels (`int`, *optional*, defaults to `4`): Number of channels in the latent space. norm_num_groups (`int`, *optional*, defaults to `32`):
927
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
The number of groups for normalization. sample_size (`int`, *optional*, defaults to 32): Sample input size. scaling_factor (`float`, *optional*, defaults to 0.18215): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): The `dtype` of the parameters. """
927
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
in_channels: int = 3 out_channels: int = 3 down_block_types: Tuple[str] = ("DownEncoderBlock2D",) up_block_types: Tuple[str] = ("UpDecoderBlock2D",) block_out_channels: Tuple[int] = (64,) layers_per_block: int = 1 act_fn: str = "silu" latent_channels: int = 4 norm_num_groups: int = 32 sample_size: int = 32 scaling_factor: float = 0.18215 dtype: jnp.dtype = jnp.float32
927
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py
def setup(self): self.encoder = FlaxEncoder( in_channels=self.config.in_channels, out_channels=self.config.latent_channels, down_block_types=self.config.down_block_types, block_out_channels=self.config.block_out_channels, layers_per_block=self.config.layers_per_block, act_fn=self.config.act_fn, norm_num_groups=self.config.norm_num_groups, double_z=True, dtype=self.dtype, ) self.decoder = FlaxDecoder( in_channels=self.config.latent_channels, out_channels=self.config.out_channels, up_block_types=self.config.up_block_types, block_out_channels=self.config.block_out_channels, layers_per_block=self.config.layers_per_block, norm_num_groups=self.config.norm_num_groups, act_fn=self.config.act_fn, dtype=self.dtype, ) self.quant_conv = nn.Conv(
927
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/vae_flax.py