text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int, ...]] = 8, num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", projection_class_embeddings_input_dim: Optional[int] = None, controlnet_conditioning_channel_order: str = "rgb", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), global_pool_conditions: bool = False, addition_embed_type_num_heads: int = 64, ):
828
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet.py
deprecation_message = "Importing `ControlNetModel` from `diffusers.models.controlnet` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet import ControlNetModel`, instead." deprecate("diffusers.models.controlnet.ControlNetModel", "0.34", deprecation_message) super().__init__( in_channels=in_channels, conditioning_channels=conditioning_channels, flip_sin_to_cos=flip_sin_to_cos, freq_shift=freq_shift, down_block_types=down_block_types, mid_block_type=mid_block_type, only_cross_attention=only_cross_attention, block_out_channels=block_out_channels, layers_per_block=layers_per_block, downsample_padding=downsample_padding, mid_block_scale_factor=mid_block_scale_factor, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_eps=norm_eps,
828
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet.py
cross_attention_dim=cross_attention_dim, transformer_layers_per_block=transformer_layers_per_block, encoder_hid_dim=encoder_hid_dim, encoder_hid_dim_type=encoder_hid_dim_type, attention_head_dim=attention_head_dim, num_attention_heads=num_attention_heads, use_linear_projection=use_linear_projection, class_embed_type=class_embed_type, addition_embed_type=addition_embed_type, addition_time_embed_dim=addition_time_embed_dim, num_class_embeds=num_class_embeds, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, projection_class_embeddings_input_dim=projection_class_embeddings_input_dim, controlnet_conditioning_channel_order=controlnet_conditioning_channel_order, conditioning_embedding_out_channels=conditioning_embedding_out_channels, global_pool_conditions=global_pool_conditions,
828
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet.py
addition_embed_type_num_heads=addition_embed_type_num_heads, )
828
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet.py
class ControlNetConditioningEmbedding(ControlNetConditioningEmbedding): def __init__(self, *args, **kwargs): deprecation_message = "Importing `ControlNetConditioningEmbedding` from `diffusers.models.controlnet` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet import ControlNetConditioningEmbedding`, instead." deprecate("diffusers.models.controlnet.ControlNetConditioningEmbedding", "0.34", deprecation_message) super().__init__(*args, **kwargs)
829
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet.py
class FlaxModelMixin(PushToHubMixin): r""" Base class for all Flax models. [`FlaxModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and saving models. - **config_name** ([`str`]) -- Filename to save a model to when calling [`~FlaxModelMixin.save_pretrained`]. """ config_name = CONFIG_NAME _automatically_saved_args = ["_diffusers_version", "_class_name", "_name_or_path"] _flax_internal_args = ["name", "parent", "dtype"] @classmethod def _from_config(cls, config, **kwargs): """ All context managers that the model should be initialized under go here. """ return cls(config, **kwargs) def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any: """ Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`. """
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
# taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27 def conditional_cast(param): if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating): param = param.astype(dtype) return param if mask is None: return jax.tree_map(conditional_cast, params) flat_params = flatten_dict(params) flat_mask, _ = jax.tree_flatten(mask) for masked, key in zip(flat_mask, flat_params.keys()): if masked: param = flat_params[key] flat_params[key] = conditional_cast(param) return unflatten_dict(flat_params) def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast the `params` in place.
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
This method can be used on a TPU to explicitly convert the model parameters to bfloat16 precision to do full half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed. Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` for params you want to cast, and `False` for those you want to skip. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
>>> # load model >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision >>> params = model.to_bf16(params) >>> # If you don't want to cast certain parameters (for example layer norm bias and scale) >>> # then pass the mask as follows >>> from flax import traverse_util >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> flat_params = traverse_util.flatten_dict(params) >>> mask = { ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) ... for path in flat_params ... } >>> mask = traverse_util.unflatten_dict(mask) >>> params = model.to_bf16(params, mask) ```""" return self._cast_floating_to(params, jnp.bfloat16, mask)
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `params` to `jax.numpy.float32`. This method can be used to explicitly convert the model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place. Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` for params you want to cast, and `False` for those you want to skip. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
>>> # Download model and configuration from huggingface.co >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # By default, the model params will be in fp32, to illustrate the use of this method, >>> # we'll first cast to fp16 and back to fp32 >>> params = model.to_f16(params) >>> # now cast back to fp32 >>> params = model.to_fp32(params) ```""" return self._cast_floating_to(params, jnp.float32, mask) def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `params` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the `params` in place. This method can be used on a GPU to explicitly convert the model parameters to float16 precision to do full half-precision training or to save weights in float16 for inference in order to save memory and improve speed.
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` for params you want to cast, and `False` for those you want to skip. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel >>> # load model >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # By default, the model params will be in fp32, to cast these to float16 >>> params = model.to_fp16(params) >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale) >>> # then pass the mask as follows >>> from flax import traverse_util
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
>>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> flat_params = traverse_util.flatten_dict(params) >>> mask = { ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) ... for path in flat_params ... } >>> mask = traverse_util.unflatten_dict(mask) >>> params = model.to_fp16(params, mask) ```""" return self._cast_floating_to(params, jnp.float16, mask) def init_weights(self, rng: jax.Array) -> Dict: raise NotImplementedError(f"init_weights method has to be implemented for {self}") @classmethod @validate_hf_hub_args def from_pretrained( cls, pretrained_model_name_or_path: Union[str, os.PathLike], dtype: jnp.dtype = jnp.float32, *model_args, **kwargs, ): r""" Instantiate a pretrained Flax model from a pretrained model configuration.
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved using [`~FlaxModelMixin.save_pretrained`]. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified, all the computation will be performed with the given `dtype`. <Tip>
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
This only specifies the dtype of the *computation* and does not influence the dtype of model parameters. If you wish to change the dtype of the model parameters, see [`~FlaxModelMixin.to_fp16`] and [`~FlaxModelMixin.to_bf16`]. </Tip> model_args (sequence of positional arguments, *optional*): All remaining positional arguments are passed to the underlying model's `__init__` method. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only(`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. from_pt (`bool`, *optional*, defaults to `False`): Load the model weights from a PyTorch checkpoint save file. kwargs (remaining dictionary of keyword arguments, *optional*):
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
Can be used to update the configuration object (after it is loaded) and initiate the model (for example, `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded:
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
- If a configuration is provided with `config`, `kwargs` are directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done). - If a configuration is not provided, `kwargs` are first passed to the configuration class initialization function [`~ConfigMixin.from_config`]. Each key of the `kwargs` that corresponds to a configuration attribute is used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute are passed to the underlying model's `__init__` function. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
>>> # Download model and configuration from huggingface.co and cache. >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). >>> model, params = FlaxUNet2DConditionModel.from_pretrained("./test/saved_model/") ``` If you get the error message below, you need to finetune the weights for your downstream task:
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
```bash Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` """ config = kwargs.pop("config", None) cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) from_pt = kwargs.pop("from_pt", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None)
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
user_agent = { "diffusers": __version__, "file_type": "model", "framework": "flax", } # Load config if we don't provide one if config is None: config, unused_kwargs = cls.load_config( pretrained_model_name_or_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, **kwargs, ) model, model_kwargs = cls.from_config(config, dtype=dtype, return_unused_kwargs=True, **unused_kwargs)
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
# Load model pretrained_path_with_subfolder = ( pretrained_model_name_or_path if subfolder is None else os.path.join(pretrained_model_name_or_path, subfolder) ) if os.path.isdir(pretrained_path_with_subfolder): if from_pt: if not os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)): raise EnvironmentError( f"Error no file named {WEIGHTS_NAME} found in directory {pretrained_path_with_subfolder} " ) model_file = os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)): # Load from a Flax checkpoint model_file = os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME) # Check if pytorch weights exist instead
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)): raise EnvironmentError( f"{WEIGHTS_NAME} file found in directory {pretrained_path_with_subfolder}. Please load the model" " using `from_pt=True`." ) else: raise EnvironmentError( f"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory " f"{pretrained_path_with_subfolder}." ) else: try: model_file = hf_hub_download( pretrained_model_name_or_path, filename=FLAX_WEIGHTS_NAME if not from_pt else WEIGHTS_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, user_agent=user_agent,
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
subfolder=subfolder, revision=revision, )
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
except RepositoryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " "this model name. Check the model page at " f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: raise EnvironmentError(
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
f"{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME}." ) except HTTPError as err: raise EnvironmentError( f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n" f"{err}" ) except ValueError: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" f" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\nCheckout your" " internet connection or see how to run the library in offline mode at" " 'https://huggingface.co/docs/transformers/installation#offline-mode'." )
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
except EnvironmentError: raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}." )
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
if from_pt: if is_torch_available(): from .modeling_utils import load_state_dict else: raise EnvironmentError( "Can't load the model in PyTorch format because PyTorch is not installed. " "Please, install PyTorch or use native Flax weights." ) # Step 1: Get the pytorch file pytorch_model_file = load_state_dict(model_file)
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
# Step 2: Convert the weights state = convert_pytorch_state_dict_to_flax(pytorch_model_file, model) else: try: with open(model_file, "rb") as state_f: state = from_bytes(cls, state_f.read()) except (UnpicklingError, msgpack.exceptions.ExtraData) as e: try: with open(model_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please" " install git-lfs and run `git lfs install` followed by `git lfs pull` in the" " folder you cloned." ) else: raise ValueError from e except (UnicodeDecodeError, ValueError):
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. ") # make sure all arrays are stored as jnp.ndarray # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4: # https://github.com/google/flax/issues/1261 state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.local_devices(backend="cpu")[0]), state)
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
# flatten dicts state = flatten_dict(state) params_shape_tree = jax.eval_shape(model.init_weights, rng=jax.random.PRNGKey(0)) required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys()) shape_state = flatten_dict(unfreeze(params_shape_tree)) missing_keys = required_params - set(state.keys()) unexpected_keys = set(state.keys()) - required_params if missing_keys: logger.warning( f"The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. " "Make sure to call model.init_weights to initialize the missing weights." ) cls._missing_keys = missing_keys
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
for key in state.keys(): if key in shape_state and state[key].shape != shape_state[key].shape: raise ValueError( f"Trying to load the pretrained weight for {key} failed: checkpoint has shape " f"{state[key].shape} which is incompatible with the model shape {shape_state[key].shape}. " ) # remove unexpected keys to not be saved again for unexpected_key in unexpected_keys: del state[unexpected_key]
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" " with another architecture." ) else: logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
if len(missing_keys) > 0: logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) else: logger.info( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" f" was trained on, you can already use {model.__class__.__name__} for predictions without further" " training." ) return model, unflatten_dict(state)
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
def save_pretrained( self, save_directory: Union[str, os.PathLike], params: Union[Dict, FrozenDict], is_main_process: bool = True, push_to_hub: bool = False, **kwargs, ): """ Save a model and its configuration file to a directory so that it can be reloaded using the [`~FlaxModelMixin.from_pretrained`] class method.
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
Arguments: save_directory (`str` or `os.PathLike`): Directory to save a model and its configuration file to. Will be created if it doesn't exist. params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*):
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) private = kwargs.pop("private", None) create_pr = kwargs.pop("create_pr", False) token = kwargs.pop("token", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id model_to_save = self # Attach architecture to the config # Save the config if is_main_process: model_to_save.save_config(save_directory) # save model output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME) with open(output_model_file, "wb") as f: model_bytes = to_bytes(params) f.write(model_bytes) logger.info(f"Model weights saved in {output_model_file}")
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
if push_to_hub: self._upload_folder( save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr, )
830
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_flax_utils.py
class SD3ControlNetOutput(SD3ControlNetOutput): def __init__(self, *args, **kwargs): deprecation_message = "Importing `SD3ControlNetOutput` from `diffusers.models.controlnet_sd3` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sd3 import SD3ControlNetOutput`, instead." deprecate("diffusers.models.controlnet_sd3.SD3ControlNetOutput", "0.34", deprecation_message) super().__init__(*args, **kwargs)
831
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet_sd3.py
class SD3ControlNetModel(SD3ControlNetModel): def __init__( self, sample_size: int = 128, patch_size: int = 2, in_channels: int = 16, num_layers: int = 18, attention_head_dim: int = 64, num_attention_heads: int = 18, joint_attention_dim: int = 4096, caption_projection_dim: int = 1152, pooled_projection_dim: int = 2048, out_channels: int = 16, pos_embed_max_size: int = 96, extra_conditioning_channels: int = 0, ): deprecation_message = "Importing `SD3ControlNetModel` from `diffusers.models.controlnet_sd3` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sd3 import SD3ControlNetModel`, instead." deprecate("diffusers.models.controlnet_sd3.SD3ControlNetModel", "0.34", deprecation_message) super().__init__( sample_size=sample_size, patch_size=patch_size, in_channels=in_channels,
832
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet_sd3.py
num_layers=num_layers, attention_head_dim=attention_head_dim, num_attention_heads=num_attention_heads, joint_attention_dim=joint_attention_dim, caption_projection_dim=caption_projection_dim, pooled_projection_dim=pooled_projection_dim, out_channels=out_channels, pos_embed_max_size=pos_embed_max_size, extra_conditioning_channels=extra_conditioning_channels, )
832
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet_sd3.py
class SD3MultiControlNetModel(SD3MultiControlNetModel): def __init__(self, *args, **kwargs): deprecation_message = "Importing `SD3MultiControlNetModel` from `diffusers.models.controlnet_sd3` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sd3 import SD3MultiControlNetModel`, instead." deprecate("diffusers.models.controlnet_sd3.SD3MultiControlNetModel", "0.34", deprecation_message) super().__init__(*args, **kwargs)
833
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet_sd3.py
class FP32SiLU(nn.Module): r""" SiLU activation function with input upcasted to torch.float32. """ def __init__(self): super().__init__() def forward(self, inputs: torch.Tensor) -> torch.Tensor: return F.silu(inputs.float(), inplace=False).to(inputs.dtype)
834
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/activations.py
class GELU(nn.Module): r""" GELU activation function with tanh approximation support with `approximate="tanh"`. Parameters: dim_in (`int`): The number of channels in the input. dim_out (`int`): The number of channels in the output. approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation. bias (`bool`, defaults to True): Whether to use a bias in the linear layer. """ def __init__(self, dim_in: int, dim_out: int, approximate: str = "none", bias: bool = True): super().__init__() self.proj = nn.Linear(dim_in, dim_out, bias=bias) self.approximate = approximate
835
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/activations.py
def gelu(self, gate: torch.Tensor) -> torch.Tensor: if gate.device.type == "mps" and is_torch_version("<", "2.0.0"): # fp16 gelu not supported on mps before torch 2.0 return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype) return F.gelu(gate, approximate=self.approximate) def forward(self, hidden_states): hidden_states = self.proj(hidden_states) hidden_states = self.gelu(hidden_states) return hidden_states
835
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/activations.py
class GEGLU(nn.Module): r""" A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function. Parameters: dim_in (`int`): The number of channels in the input. dim_out (`int`): The number of channels in the output. bias (`bool`, defaults to True): Whether to use a bias in the linear layer. """ def __init__(self, dim_in: int, dim_out: int, bias: bool = True): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2, bias=bias) def gelu(self, gate: torch.Tensor) -> torch.Tensor: if gate.device.type == "mps" and is_torch_version("<", "2.0.0"): # fp16 gelu not supported on mps before torch 2.0 return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype) return F.gelu(gate)
836
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/activations.py
def forward(self, hidden_states, *args, **kwargs): if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) hidden_states = self.proj(hidden_states) if is_torch_npu_available(): # using torch_npu.npu_geglu can run faster and save memory on NPU. return torch_npu.npu_geglu(hidden_states, dim=-1, approximate=1)[0] else: hidden_states, gate = hidden_states.chunk(2, dim=-1) return hidden_states * self.gelu(gate)
836
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/activations.py
class SwiGLU(nn.Module): r""" A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function. It's similar to `GEGLU` but uses SiLU / Swish instead of GeLU. Parameters: dim_in (`int`): The number of channels in the input. dim_out (`int`): The number of channels in the output. bias (`bool`, defaults to True): Whether to use a bias in the linear layer. """ def __init__(self, dim_in: int, dim_out: int, bias: bool = True): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2, bias=bias) self.activation = nn.SiLU() def forward(self, hidden_states): hidden_states = self.proj(hidden_states) hidden_states, gate = hidden_states.chunk(2, dim=-1) return hidden_states * self.activation(gate)
837
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/activations.py
class ApproximateGELU(nn.Module): r""" The approximate form of the Gaussian Error Linear Unit (GELU). For more details, see section 2 of this [paper](https://arxiv.org/abs/1606.08415). Parameters: dim_in (`int`): The number of channels in the input. dim_out (`int`): The number of channels in the output. bias (`bool`, defaults to True): Whether to use a bias in the linear layer. """ def __init__(self, dim_in: int, dim_out: int, bias: bool = True): super().__init__() self.proj = nn.Linear(dim_in, dim_out, bias=bias) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj(x) return x * torch.sigmoid(1.702 * x)
838
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/activations.py
class LinearActivation(nn.Module): def __init__(self, dim_in: int, dim_out: int, bias: bool = True, activation: str = "silu"): super().__init__() self.proj = nn.Linear(dim_in, dim_out, bias=bias) self.activation = get_activation(activation) def forward(self, hidden_states): hidden_states = self.proj(hidden_states) return self.activation(hidden_states)
839
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/activations.py
class MultiAdapter(ModelMixin): r""" MultiAdapter is a wrapper model that contains multiple adapter models and merges their outputs according to user-assigned weighting. This model inherits from [`ModelMixin`]. Check the superclass documentation for common methods such as downloading or saving. Args: adapters (`List[T2IAdapter]`, *optional*, defaults to None): A list of `T2IAdapter` model instances. """ def __init__(self, adapters: List["T2IAdapter"]): super(MultiAdapter, self).__init__() self.num_adapter = len(adapters) self.adapters = nn.ModuleList(adapters) if len(adapters) == 0: raise ValueError("Expecting at least one adapter") if len(adapters) == 1: raise ValueError("For a single adapter, please use the `T2IAdapter` class instead of `MultiAdapter`")
840
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
# The outputs from each adapter are added together with a weight. # This means that the change in dimensions from downsampling must # be the same for all adapters. Inductively, it also means the # downscale_factor and total_downscale_factor must be the same for all # adapters. first_adapter_total_downscale_factor = adapters[0].total_downscale_factor first_adapter_downscale_factor = adapters[0].downscale_factor for idx in range(1, len(adapters)): if ( adapters[idx].total_downscale_factor != first_adapter_total_downscale_factor or adapters[idx].downscale_factor != first_adapter_downscale_factor ): raise ValueError( f"Expecting all adapters to have the same downscaling behavior, but got:\n" f"adapters[0].total_downscale_factor={first_adapter_total_downscale_factor}\n"
840
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
f"adapters[0].downscale_factor={first_adapter_downscale_factor}\n" f"adapter[`{idx}`].total_downscale_factor={adapters[idx].total_downscale_factor}\n" f"adapter[`{idx}`].downscale_factor={adapters[idx].downscale_factor}" )
840
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
self.total_downscale_factor = first_adapter_total_downscale_factor self.downscale_factor = first_adapter_downscale_factor def forward(self, xs: torch.Tensor, adapter_weights: Optional[List[float]] = None) -> List[torch.Tensor]: r""" Args: xs (`torch.Tensor`): A tensor of shape (batch, channel, height, width) representing input images for multiple adapter models, concatenated along dimension 1(channel dimension). The `channel` dimension should be equal to `num_adapter` * number of channel per image.
840
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
adapter_weights (`List[float]`, *optional*, defaults to None): A list of floats representing the weights which will be multiplied by each adapter's output before summing them together. If `None`, equal weights will be used for all adapters. """ if adapter_weights is None: adapter_weights = torch.tensor([1 / self.num_adapter] * self.num_adapter) else: adapter_weights = torch.tensor(adapter_weights) accume_state = None for x, w, adapter in zip(xs, adapter_weights, self.adapters): features = adapter(x) if accume_state is None: accume_state = features for i in range(len(accume_state)): accume_state[i] = w * accume_state[i] else: for i in range(len(features)): accume_state[i] += w * features[i] return accume_state
840
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
def save_pretrained( self, save_directory: Union[str, os.PathLike], is_main_process: bool = True, save_function: Callable = None, safe_serialization: bool = True, variant: Optional[str] = None, ): """ Save a model and its configuration file to a specified directory, allowing it to be re-loaded with the `[`~models.adapter.MultiAdapter.from_pretrained`]` class method.
840
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
Args: save_directory (`str` or `os.PathLike`): The directory where the model will be saved. If the directory does not exist, it will be created. is_main_process (`bool`, optional, defaults=True): Indicates whether current process is the main process or not. Useful for distributed training (e.g., TPUs) and need to call this function on all processes. In this case, set `is_main_process=True` only for the main process to avoid race conditions. save_function (`Callable`): Function used to save the state dictionary. Useful for distributed training (e.g., TPUs) to replace `torch.save` with another method. Can also be configured using`DIFFUSERS_SAVE_MODE` environment variable. safe_serialization (`bool`, optional, defaults=True): If `True`, save the model using `safetensors`. If `False`, save the model with `pickle`.
840
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
variant (`str`, *optional*): If specified, weights are saved in the format `pytorch_model.<variant>.bin`. """ idx = 0 model_path_to_save = save_directory for adapter in self.adapters: adapter.save_pretrained( model_path_to_save, is_main_process=is_main_process, save_function=save_function, safe_serialization=safe_serialization, variant=variant, )
840
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
idx += 1 model_path_to_save = model_path_to_save + f"_{idx}" @classmethod def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs): r""" Instantiate a pretrained `MultiAdapter` model from multiple pre-trained adapter models. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, set it back to training mode using `model.train()`. Warnings: *Weights from XXX not initialized from pretrained model* means that the weights of XXX are not pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning. *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, so those weights are discarded.
840
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
Args: pretrained_model_path (`os.PathLike`): A path to a *directory* containing model weights saved using [`~diffusers.models.adapter.MultiAdapter.save_pretrained`], e.g., `./my_model_directory/adapter`. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype will be automatically derived from the model's weights. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
840
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
same device.
840
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary mapping device identifiers to their maximum memory. Default to the maximum memory available for each GPU and the available CPU RAM if unset. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading by not initializing the weights and only loading the pre-trained weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch,
840
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
setting this argument to `True` will raise an error. variant (`str`, *optional*): If specified, load weights from a `variant` file (*e.g.* pytorch_model.<variant>.bin). `variant` will be ignored when using `from_flax`. use_safetensors (`bool`, *optional*, defaults to `None`): If `None`, the `safetensors` weights will be downloaded if available **and** if`safetensors` library is installed. If `True`, the model will be forcibly loaded from`safetensors` weights. If `False`, `safetensors` is not used. """ idx = 0 adapters = []
840
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
# load adapter and append to list until no adapter directory exists anymore # first adapter has to be saved under `./mydirectory/adapter` to be compliant with `DiffusionPipeline.from_pretrained` # second, third, ... adapters have to be saved under `./mydirectory/adapter_1`, `./mydirectory/adapter_2`, ... model_path_to_load = pretrained_model_path while os.path.isdir(model_path_to_load): adapter = T2IAdapter.from_pretrained(model_path_to_load, **kwargs) adapters.append(adapter) idx += 1 model_path_to_load = pretrained_model_path + f"_{idx}" logger.info(f"{len(adapters)} adapters loaded from {pretrained_model_path}.") if len(adapters) == 0: raise ValueError( f"No T2IAdapters found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}." ) return cls(adapters)
840
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
class T2IAdapter(ModelMixin, ConfigMixin): r""" A simple ResNet-like model that accepts images containing control signals such as keyposes and depth. The model generates multiple feature maps that are used as additional conditioning in [`UNet2DConditionModel`]. The model's architecture follows the original implementation of [Adapter](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L97) and [AdapterLight](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L235). This model inherits from [`ModelMixin`]. Check the superclass documentation for the common methods, such as downloading or saving.
841
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
Args: in_channels (`int`, *optional*, defaults to `3`): The number of channels in the adapter's input (*control image*). Set it to 1 if you're using a gray scale image. channels (`List[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The number of channels in each downsample block's output hidden state. The `len(block_out_channels)` determines the number of downsample blocks in the adapter. num_res_blocks (`int`, *optional*, defaults to `2`): Number of ResNet blocks in each downsample block. downscale_factor (`int`, *optional*, defaults to `8`): A factor that determines the total downscale factor of the Adapter. adapter_type (`str`, *optional*, defaults to `full_adapter`): Adapter type (`full_adapter` or `full_adapter_xl` or `light_adapter`) to use. """
841
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
@register_to_config def __init__( self, in_channels: int = 3, channels: List[int] = [320, 640, 1280, 1280], num_res_blocks: int = 2, downscale_factor: int = 8, adapter_type: str = "full_adapter", ): super().__init__() if adapter_type == "full_adapter": self.adapter = FullAdapter(in_channels, channels, num_res_blocks, downscale_factor) elif adapter_type == "full_adapter_xl": self.adapter = FullAdapterXL(in_channels, channels, num_res_blocks, downscale_factor) elif adapter_type == "light_adapter": self.adapter = LightAdapter(in_channels, channels, num_res_blocks, downscale_factor) else: raise ValueError( f"Unsupported adapter_type: '{adapter_type}'. Choose either 'full_adapter' or " "'full_adapter_xl' or 'light_adapter'." )
841
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
def forward(self, x: torch.Tensor) -> List[torch.Tensor]: r""" This function processes the input tensor `x` through the adapter model and returns a list of feature tensors, each representing information extracted at a different scale from the input. The length of the list is determined by the number of downsample blocks in the Adapter, as specified by the `channels` and `num_res_blocks` parameters during initialization. """ return self.adapter(x) @property def total_downscale_factor(self): return self.adapter.total_downscale_factor @property def downscale_factor(self): """The downscale factor applied in the T2I-Adapter's initial pixel unshuffle operation. If an input image's dimensions are not evenly divisible by the downscale_factor then an exception will be raised. """ return self.adapter.unshuffle.downscale_factor
841
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
class FullAdapter(nn.Module): r""" See [`T2IAdapter`] for more information. """ def __init__( self, in_channels: int = 3, channels: List[int] = [320, 640, 1280, 1280], num_res_blocks: int = 2, downscale_factor: int = 8, ): super().__init__() in_channels = in_channels * downscale_factor**2 self.unshuffle = nn.PixelUnshuffle(downscale_factor) self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1) self.body = nn.ModuleList( [ AdapterBlock(channels[0], channels[0], num_res_blocks), *[ AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True) for i in range(1, len(channels)) ], ] ) self.total_downscale_factor = downscale_factor * 2 ** (len(channels) - 1)
842
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
def forward(self, x: torch.Tensor) -> List[torch.Tensor]: r""" This method processes the input tensor `x` through the FullAdapter model and performs operations including pixel unshuffling, convolution, and a stack of AdapterBlocks. It returns a list of feature tensors, each capturing information at a different stage of processing within the FullAdapter model. The number of feature tensors in the list is determined by the number of downsample blocks specified during initialization. """ x = self.unshuffle(x) x = self.conv_in(x) features = [] for block in self.body: x = block(x) features.append(x) return features
842
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
class FullAdapterXL(nn.Module): r""" See [`T2IAdapter`] for more information. """ def __init__( self, in_channels: int = 3, channels: List[int] = [320, 640, 1280, 1280], num_res_blocks: int = 2, downscale_factor: int = 16, ): super().__init__() in_channels = in_channels * downscale_factor**2 self.unshuffle = nn.PixelUnshuffle(downscale_factor) self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1)
843
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
self.body = [] # blocks to extract XL features with dimensions of [320, 64, 64], [640, 64, 64], [1280, 32, 32], [1280, 32, 32] for i in range(len(channels)): if i == 1: self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks)) elif i == 2: self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True)) else: self.body.append(AdapterBlock(channels[i], channels[i], num_res_blocks)) self.body = nn.ModuleList(self.body) # XL has only one downsampling AdapterBlock. self.total_downscale_factor = downscale_factor * 2
843
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
def forward(self, x: torch.Tensor) -> List[torch.Tensor]: r""" This method takes the tensor x as input and processes it through FullAdapterXL model. It consists of operations including unshuffling pixels, applying convolution layer and appending each block into list of feature tensors. """ x = self.unshuffle(x) x = self.conv_in(x) features = [] for block in self.body: x = block(x) features.append(x) return features
843
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
class AdapterBlock(nn.Module): r""" An AdapterBlock is a helper model that contains multiple ResNet-like blocks. It is used in the `FullAdapter` and `FullAdapterXL` models. Args: in_channels (`int`): Number of channels of AdapterBlock's input. out_channels (`int`): Number of channels of AdapterBlock's output. num_res_blocks (`int`): Number of ResNet blocks in the AdapterBlock. down (`bool`, *optional*, defaults to `False`): If `True`, perform downsampling on AdapterBlock's input. """ def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False): super().__init__() self.downsample = None if down: self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True) self.in_conv = None if in_channels != out_channels: self.in_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
844
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
self.resnets = nn.Sequential( *[AdapterResnetBlock(out_channels) for _ in range(num_res_blocks)], ) def forward(self, x: torch.Tensor) -> torch.Tensor: r""" This method takes tensor x as input and performs operations downsampling and convolutional layers if the self.downsample and self.in_conv properties of AdapterBlock model are specified. Then it applies a series of residual blocks to the input tensor. """ if self.downsample is not None: x = self.downsample(x) if self.in_conv is not None: x = self.in_conv(x) x = self.resnets(x) return x
844
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
class AdapterResnetBlock(nn.Module): r""" An `AdapterResnetBlock` is a helper model that implements a ResNet-like block. Args: channels (`int`): Number of channels of AdapterResnetBlock's input and output. """ def __init__(self, channels: int): super().__init__() self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.act = nn.ReLU() self.block2 = nn.Conv2d(channels, channels, kernel_size=1) def forward(self, x: torch.Tensor) -> torch.Tensor: r""" This method takes input tensor x and applies a convolutional layer, ReLU activation, and another convolutional layer on the input tensor. It returns addition with the input tensor. """ h = self.act(self.block1(x)) h = self.block2(h) return h + x
845
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
class LightAdapter(nn.Module): r""" See [`T2IAdapter`] for more information. """ def __init__( self, in_channels: int = 3, channels: List[int] = [320, 640, 1280], num_res_blocks: int = 4, downscale_factor: int = 8, ): super().__init__() in_channels = in_channels * downscale_factor**2 self.unshuffle = nn.PixelUnshuffle(downscale_factor) self.body = nn.ModuleList( [ LightAdapterBlock(in_channels, channels[0], num_res_blocks), *[ LightAdapterBlock(channels[i], channels[i + 1], num_res_blocks, down=True) for i in range(len(channels) - 1) ], LightAdapterBlock(channels[-1], channels[-1], num_res_blocks, down=True), ] ) self.total_downscale_factor = downscale_factor * (2 ** len(channels))
846
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
def forward(self, x: torch.Tensor) -> List[torch.Tensor]: r""" This method takes the input tensor x and performs downscaling and appends it in list of feature tensors. Each feature tensor corresponds to a different level of processing within the LightAdapter. """ x = self.unshuffle(x) features = [] for block in self.body: x = block(x) features.append(x) return features
846
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
class LightAdapterBlock(nn.Module): r""" A `LightAdapterBlock` is a helper model that contains multiple `LightAdapterResnetBlocks`. It is used in the `LightAdapter` model. Args: in_channels (`int`): Number of channels of LightAdapterBlock's input. out_channels (`int`): Number of channels of LightAdapterBlock's output. num_res_blocks (`int`): Number of LightAdapterResnetBlocks in the LightAdapterBlock. down (`bool`, *optional*, defaults to `False`): If `True`, perform downsampling on LightAdapterBlock's input. """ def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False): super().__init__() mid_channels = out_channels // 4 self.downsample = None if down: self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True)
847
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
self.in_conv = nn.Conv2d(in_channels, mid_channels, kernel_size=1) self.resnets = nn.Sequential(*[LightAdapterResnetBlock(mid_channels) for _ in range(num_res_blocks)]) self.out_conv = nn.Conv2d(mid_channels, out_channels, kernel_size=1) def forward(self, x: torch.Tensor) -> torch.Tensor: r""" This method takes tensor x as input and performs downsampling if required. Then it applies in convolution layer, a sequence of residual blocks, and out convolutional layer. """ if self.downsample is not None: x = self.downsample(x) x = self.in_conv(x) x = self.resnets(x) x = self.out_conv(x) return x
847
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
class LightAdapterResnetBlock(nn.Module): """ A `LightAdapterResnetBlock` is a helper model that implements a ResNet-like block with a slightly different architecture than `AdapterResnetBlock`. Args: channels (`int`): Number of channels of LightAdapterResnetBlock's input and output. """ def __init__(self, channels: int): super().__init__() self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.act = nn.ReLU() self.block2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) def forward(self, x: torch.Tensor) -> torch.Tensor: r""" This function takes input tensor x and processes it through one convolutional layer, ReLU activation, and another convolutional layer and adds it to input tensor. """ h = self.act(self.block1(x)) h = self.block2(h) return h + x
848
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/adapter.py
class SparseControlNetOutput(SparseControlNetOutput): def __init__(self, *args, **kwargs): deprecation_message = "Importing `SparseControlNetOutput` from `diffusers.models.controlnet_sparsectrl` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sparsectrl import SparseControlNetOutput`, instead." deprecate("diffusers.models.controlnet_sparsectrl.SparseControlNetOutput", "0.34", deprecation_message) super().__init__(*args, **kwargs)
849
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet_sparsectrl.py
class SparseControlNetConditioningEmbedding(SparseControlNetConditioningEmbedding): def __init__(self, *args, **kwargs): deprecation_message = "Importing `SparseControlNetConditioningEmbedding` from `diffusers.models.controlnet_sparsectrl` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sparsectrl import SparseControlNetConditioningEmbedding`, instead." deprecate( "diffusers.models.controlnet_sparsectrl.SparseControlNetConditioningEmbedding", "0.34", deprecation_message ) super().__init__(*args, **kwargs)
850
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet_sparsectrl.py
class SparseControlNetModel(SparseControlNetModel): def __init__( self, in_channels: int = 4, conditioning_channels: int = 4, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlockMotion", "CrossAttnDownBlockMotion", "CrossAttnDownBlockMotion", "DownBlockMotion", ), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), layers_per_block: int = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: int = 768, transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, transformer_layers_per_mid_block: Optional[Union[int, Tuple[int]]] = None,
851
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet_sparsectrl.py
temporal_transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, attention_head_dim: Union[int, Tuple[int, ...]] = 8, num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None, use_linear_projection: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), global_pool_conditions: bool = False, controlnet_conditioning_channel_order: str = "rgb", motion_max_seq_length: int = 32, motion_num_attention_heads: int = 8, concat_conditioning_mask: bool = True, use_simplified_condition_embedding: bool = True, ):
851
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet_sparsectrl.py
deprecation_message = "Importing `SparseControlNetModel` from `diffusers.models.controlnet_sparsectrl` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sparsectrl import SparseControlNetModel`, instead." deprecate("diffusers.models.controlnet_sparsectrl.SparseControlNetModel", "0.34", deprecation_message) super().__init__( in_channels=in_channels, conditioning_channels=conditioning_channels, flip_sin_to_cos=flip_sin_to_cos, freq_shift=freq_shift, down_block_types=down_block_types, only_cross_attention=only_cross_attention, block_out_channels=block_out_channels, layers_per_block=layers_per_block, downsample_padding=downsample_padding, mid_block_scale_factor=mid_block_scale_factor, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_eps=norm_eps,
851
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet_sparsectrl.py
cross_attention_dim=cross_attention_dim, transformer_layers_per_block=transformer_layers_per_block, transformer_layers_per_mid_block=transformer_layers_per_mid_block, temporal_transformer_layers_per_block=temporal_transformer_layers_per_block, attention_head_dim=attention_head_dim, num_attention_heads=num_attention_heads, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, conditioning_embedding_out_channels=conditioning_embedding_out_channels, global_pool_conditions=global_pool_conditions, controlnet_conditioning_channel_order=controlnet_conditioning_channel_order, motion_max_seq_length=motion_max_seq_length, motion_num_attention_heads=motion_num_attention_heads, concat_conditioning_mask=concat_conditioning_mask,
851
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet_sparsectrl.py
use_simplified_condition_embedding=use_simplified_condition_embedding, )
851
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/controlnet_sparsectrl.py
class FlaxAttention(nn.Module): r""" A Flax multi-head attention module as described in: https://arxiv.org/abs/1706.03762
852
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/attention_flax.py
Parameters: query_dim (:obj:`int`): Input hidden states dimension heads (:obj:`int`, *optional*, defaults to 8): Number of heads dim_head (:obj:`int`, *optional*, defaults to 64): Hidden states dimension inside each head dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): enable memory efficient attention https://arxiv.org/abs/2112.05682 split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """
852
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/attention_flax.py
query_dim: int heads: int = 8 dim_head: int = 64 dropout: float = 0.0 use_memory_efficient_attention: bool = False split_head_dim: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): inner_dim = self.dim_head * self.heads self.scale = self.dim_head**-0.5 # Weights were exported with old names {to_q, to_k, to_v, to_out} self.query = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_q") self.key = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_k") self.value = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_v") self.proj_attn = nn.Dense(self.query_dim, dtype=self.dtype, name="to_out_0") self.dropout_layer = nn.Dropout(rate=self.dropout)
852
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/attention_flax.py
def reshape_heads_to_batch_dim(self, tensor): batch_size, seq_len, dim = tensor.shape head_size = self.heads tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size) tensor = jnp.transpose(tensor, (0, 2, 1, 3)) tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size) return tensor def reshape_batch_dim_to_heads(self, tensor): batch_size, seq_len, dim = tensor.shape head_size = self.heads tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) tensor = jnp.transpose(tensor, (0, 2, 1, 3)) tensor = tensor.reshape(batch_size // head_size, seq_len, dim * head_size) return tensor def __call__(self, hidden_states, context=None, deterministic=True): context = hidden_states if context is None else context query_proj = self.query(hidden_states) key_proj = self.key(context) value_proj = self.value(context)
852
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/attention_flax.py
if self.split_head_dim: b = hidden_states.shape[0] query_states = jnp.reshape(query_proj, (b, -1, self.heads, self.dim_head)) key_states = jnp.reshape(key_proj, (b, -1, self.heads, self.dim_head)) value_states = jnp.reshape(value_proj, (b, -1, self.heads, self.dim_head)) else: query_states = self.reshape_heads_to_batch_dim(query_proj) key_states = self.reshape_heads_to_batch_dim(key_proj) value_states = self.reshape_heads_to_batch_dim(value_proj) if self.use_memory_efficient_attention: query_states = query_states.transpose(1, 0, 2) key_states = key_states.transpose(1, 0, 2) value_states = value_states.transpose(1, 0, 2) # this if statement create a chunk size for each layer of the unet # the chunk size is equal to the query_length dimension of the deepest layer of the unet
852
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/attention_flax.py
flatten_latent_dim = query_states.shape[-3] if flatten_latent_dim % 64 == 0: query_chunk_size = int(flatten_latent_dim / 64) elif flatten_latent_dim % 16 == 0: query_chunk_size = int(flatten_latent_dim / 16) elif flatten_latent_dim % 4 == 0: query_chunk_size = int(flatten_latent_dim / 4) else: query_chunk_size = int(flatten_latent_dim)
852
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/attention_flax.py
hidden_states = jax_memory_efficient_attention( query_states, key_states, value_states, query_chunk_size=query_chunk_size, key_chunk_size=4096 * 4 ) hidden_states = hidden_states.transpose(1, 0, 2) hidden_states = self.reshape_batch_dim_to_heads(hidden_states) else: # compute attentions if self.split_head_dim: attention_scores = jnp.einsum("b t n h, b f n h -> b n f t", key_states, query_states) else: attention_scores = jnp.einsum("b i d, b j d->b i j", query_states, key_states) attention_scores = attention_scores * self.scale attention_probs = nn.softmax(attention_scores, axis=-1 if self.split_head_dim else 2)
852
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/attention_flax.py
# attend to values if self.split_head_dim: hidden_states = jnp.einsum("b n f t, b t n h -> b f n h", attention_probs, value_states) b = hidden_states.shape[0] hidden_states = jnp.reshape(hidden_states, (b, -1, self.heads * self.dim_head)) else: hidden_states = jnp.einsum("b i j, b j d -> b i d", attention_probs, value_states) hidden_states = self.reshape_batch_dim_to_heads(hidden_states) hidden_states = self.proj_attn(hidden_states) return self.dropout_layer(hidden_states, deterministic=deterministic)
852
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/attention_flax.py
class FlaxBasicTransformerBlock(nn.Module): r""" A Flax transformer block layer with `GLU` (Gated Linear Unit) activation function as described in: https://arxiv.org/abs/1706.03762
853
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/attention_flax.py
Parameters: dim (:obj:`int`): Inner hidden states dimension n_heads (:obj:`int`): Number of heads d_head (:obj:`int`): Hidden states dimension inside each head dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate only_cross_attention (`bool`, defaults to `False`): Whether to only apply cross attention. dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): enable memory efficient attention https://arxiv.org/abs/2112.05682 split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. """
853
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/attention_flax.py
dim: int n_heads: int d_head: int dropout: float = 0.0 only_cross_attention: bool = False dtype: jnp.dtype = jnp.float32 use_memory_efficient_attention: bool = False split_head_dim: bool = False
853
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/attention_flax.py