text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
class TextToVideoZeroPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
703
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class TextToVideoZeroSDXLPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
704
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class UnCLIPImageVariationPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
705
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class UnCLIPPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
706
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class UniDiffuserModel(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
707
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class UniDiffuserPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
708
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class UniDiffuserTextDecoder(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
709
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class VersatileDiffusionDualGuidedPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
710
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class VersatileDiffusionImageVariationPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
711
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class VersatileDiffusionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
712
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class VersatileDiffusionTextToImagePipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
713
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class VideoToVideoSDPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
714
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class VQDiffusionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
715
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class WuerstchenCombinedPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
716
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class WuerstchenDecoderPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
717
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class WuerstchenPriorPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
718
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py
class PushToHubMixin: """ A Mixin to push a model, scheduler, or pipeline to the Hugging Face Hub. """ def _upload_folder( self, working_dir: Union[str, os.PathLike], repo_id: str, token: Optional[str] = None, commit_message: Optional[str] = None, create_pr: bool = False, ): """ Uploads all files in `working_dir` to `repo_id`. """ if commit_message is None: if "Model" in self.__class__.__name__: commit_message = "Upload model" elif "Scheduler" in self.__class__.__name__: commit_message = "Upload scheduler" else: commit_message = f"Upload {self.__class__.__name__}" logger.info(f"Uploading the files of {working_dir} to {repo_id}.") return upload_folder( repo_id=repo_id, folder_path=working_dir, token=token, commit_message=commit_message, create_pr=create_pr )
719
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/hub_utils.py
def push_to_hub( self, repo_id: str, commit_message: Optional[str] = None, private: Optional[bool] = None, token: Optional[str] = None, create_pr: bool = False, safe_serialization: bool = True, variant: Optional[str] = None, ) -> str: """ Upload model, scheduler, or pipeline files to the 🤗 Hugging Face Hub.
719
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/hub_utils.py
Parameters: repo_id (`str`): The name of the repository you want to push your model, scheduler, or pipeline files to. It should contain your organization name when pushing to an organization. `repo_id` can also be a path to a local directory. commit_message (`str`, *optional*): Message to commit while pushing. Default to `"Upload {object}"`. private (`bool`, *optional*): Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. The token generated when running `huggingface-cli login` (stored in `~/.huggingface`). create_pr (`bool`, *optional*, defaults to `False`):
719
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/hub_utils.py
Whether or not to create a PR with the uploaded files or directly commit. safe_serialization (`bool`, *optional*, defaults to `True`): Whether or not to convert the model weights to the `safetensors` format. variant (`str`, *optional*): If specified, weights are saved in the format `pytorch_model.<variant>.bin`.
719
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/hub_utils.py
Examples: ```python from diffusers import UNet2DConditionModel unet = UNet2DConditionModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="unet") # Push the `unet` to your namespace with the name "my-finetuned-unet". unet.push_to_hub("my-finetuned-unet") # Push the `unet` to an organization with the name "my-finetuned-unet". unet.push_to_hub("your-org/my-finetuned-unet") ``` """ repo_id = create_repo(repo_id, private=private, token=token, exist_ok=True).repo_id # Create a new empty model card and eventually tag it model_card = load_or_create_model_card(repo_id, token=token) model_card = populate_model_card(model_card) # Save all files. save_kwargs = {"safe_serialization": safe_serialization} if "Scheduler" not in self.__class__.__name__: save_kwargs.update({"variant": variant})
719
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/hub_utils.py
with tempfile.TemporaryDirectory() as tmpdir: self.save_pretrained(tmpdir, **save_kwargs) # Update model card if needed: model_card.save(os.path.join(tmpdir, "README.md")) return self._upload_folder( tmpdir, repo_id, token=token, commit_message=commit_message, create_pr=create_pr, )
719
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/hub_utils.py
class MidiProcessor(metaclass=DummyObject): _backends = ["note_seq"] def __init__(self, *args, **kwargs): requires_backends(self, ["note_seq"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["note_seq"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["note_seq"])
720
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_note_seq_objects.py
class KolorsImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers", "sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers", "sentencepiece"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "sentencepiece"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "sentencepiece"])
721
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py
class KolorsPAGPipeline(metaclass=DummyObject): _backends = ["torch", "transformers", "sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers", "sentencepiece"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "sentencepiece"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "sentencepiece"])
722
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py
class KolorsPipeline(metaclass=DummyObject): _backends = ["torch", "transformers", "sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers", "sentencepiece"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "sentencepiece"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "sentencepiece"])
723
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py
class FlaxStableDiffusionControlNetPipeline(metaclass=DummyObject): _backends = ["flax", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"])
724
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_and_transformers_objects.py
class FlaxStableDiffusionImg2ImgPipeline(metaclass=DummyObject): _backends = ["flax", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"])
725
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_and_transformers_objects.py
class FlaxStableDiffusionInpaintPipeline(metaclass=DummyObject): _backends = ["flax", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"])
726
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_and_transformers_objects.py
class FlaxStableDiffusionPipeline(metaclass=DummyObject): _backends = ["flax", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"])
727
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_and_transformers_objects.py
class FlaxStableDiffusionXLPipeline(metaclass=DummyObject): _backends = ["flax", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"])
728
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_and_transformers_objects.py
class SpectrogramDiffusionPipeline(metaclass=DummyObject): _backends = ["transformers", "torch", "note_seq"] def __init__(self, *args, **kwargs): requires_backends(self, ["transformers", "torch", "note_seq"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["transformers", "torch", "note_seq"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["transformers", "torch", "note_seq"])
729
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py
class DummyObject(type): """ Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by `requires_backend` each time a user tries to access any method of that class. """ def __getattr__(cls, key): if key.startswith("_") and key not in ["_load_connected_pipes", "_is_onnx"]: return super().__getattr__(cls, key) requires_backends(cls, cls._backends)
730
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/import_utils.py
class OptionalDependencyNotAvailable(BaseException): """ An error indicating that an optional dependency of Diffusers was not found in the environment. """
731
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/import_utils.py
class _LazyModule(ModuleType): """ Module class that surfaces all objects but only performs associated imports when the objects are requested. """
732
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/import_utils.py
# Very heavily inspired by optuna.integration._IntegrationModule # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): super().__init__(name) self._modules = set(import_structure.keys()) self._class_to_module = {} for key, values in import_structure.items(): for value in values: self._class_to_module[value] = key # Needed for autocompletion in an IDE self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) self.__file__ = module_file self.__spec__ = module_spec self.__path__ = [os.path.dirname(module_file)] self._objects = {} if extra_objects is None else extra_objects self._name = name self._import_structure = import_structure
732
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/import_utils.py
# Needed for autocompletion in an IDE def __dir__(self): result = super().__dir__() # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir. for attr in self.__all__: if attr not in result: result.append(attr) return result def __getattr__(self, name: str) -> Any: if name in self._objects: return self._objects[name] if name in self._modules: value = self._get_module(name) elif name in self._class_to_module.keys(): module = self._get_module(self._class_to_module[name]) value = getattr(module, name) else: raise AttributeError(f"module {self.__name__} has no attribute {name}") setattr(self, name, value) return value
732
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/import_utils.py
def _get_module(self, module_name: str): try: return importlib.import_module("." + module_name, self.__name__) except Exception as e: raise RuntimeError( f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its" f" traceback):\n{e}" ) from e def __reduce__(self): return (self.__class__, (self._name, self.__file__, self._import_structure))
732
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/import_utils.py
class LMSDiscreteScheduler(metaclass=DummyObject): _backends = ["torch", "scipy"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "scipy"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "scipy"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "scipy"])
733
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_torch_and_scipy_objects.py
class FlaxControlNetModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
734
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_objects.py
class FlaxModelMixin(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
735
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_objects.py
class FlaxUNet2DConditionModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
736
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_objects.py
class FlaxAutoencoderKL(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
737
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_objects.py
class FlaxDiffusionPipeline(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
738
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_objects.py
class FlaxDDIMScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
739
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_objects.py
class FlaxDDPMScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
740
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_objects.py
class FlaxDPMSolverMultistepScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
741
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_objects.py
class FlaxEulerDiscreteScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
742
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_objects.py
class FlaxKarrasVeScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
743
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_objects.py
class FlaxLMSDiscreteScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
744
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_objects.py
class FlaxPNDMScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
745
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_objects.py
class FlaxSchedulerMixin(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
746
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_objects.py
class FlaxScoreSdeVeScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
747
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/dummy_flax_objects.py
class BaseOutput(OrderedDict): """ Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular Python dictionary. <Tip warning={true}> You can't unpack a [`BaseOutput`] directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple first. </Tip> """ def __init_subclass__(cls) -> None: """Register subclasses as pytree nodes. This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with `static_graph=True` with modules that output `ModelOutput` subclasses. """ if is_torch_available(): import torch.utils._pytree
748
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/outputs.py
if is_torch_version("<", "2.2"): torch.utils._pytree._register_pytree_node( cls, torch.utils._pytree._dict_flatten, lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)), ) else: torch.utils._pytree.register_pytree_node( cls, torch.utils._pytree._dict_flatten, lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)), ) def __post_init__(self) -> None: class_fields = fields(self) # Safety and consistency checks if not len(class_fields): raise ValueError(f"{self.__class__.__name__} has no fields.") first_field = getattr(self, class_fields[0].name) other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])
748
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/outputs.py
if other_fields_are_none and isinstance(first_field, dict): for key, value in first_field.items(): self[key] = value else: for field in class_fields: v = getattr(self, field.name) if v is not None: self[field.name] = v def __delitem__(self, *args, **kwargs): raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") def setdefault(self, *args, **kwargs): raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") def pop(self, *args, **kwargs): raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") def update(self, *args, **kwargs): raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
748
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/outputs.py
def __getitem__(self, k: Any) -> Any: if isinstance(k, str): inner_dict = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__(self, name: Any, value: Any) -> None: if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(name, value) super().__setattr__(name, value) def __setitem__(self, key, value): # Will raise a KeyException if needed super().__setitem__(key, value) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(key, value) def __reduce__(self): if not is_dataclass(self): return super().__reduce__() callable, _args, *remaining = super().__reduce__() args = tuple(getattr(self, field.name) for field in fields(self)) return callable, args, *remaining
748
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/outputs.py
def to_tuple(self) -> Tuple[Any, ...]: """ Convert self to a tuple containing all the attributes/keys that are not `None`. """ return tuple(self[k] for k in self.keys())
748
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/utils/outputs.py
class AutoencoderKLOutput(BaseOutput): """ Output of AutoencoderKL encoding method. Args: latent_dist (`DiagonalGaussianDistribution`): Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`. `DiagonalGaussianDistribution` allows for sampling latents from the distribution. """ latent_dist: "DiagonalGaussianDistribution" # noqa: F821
749
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_outputs.py
class Transformer2DModelOutput(BaseOutput): """ The output of [`Transformer2DModel`]. Args: sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete): The hidden states output conditioned on the `encoder_hidden_states` input. If discrete, returns probability distributions for the unnoised latent pixels. """ sample: "torch.Tensor" # noqa: F821
750
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/modeling_outputs.py
class Upsample1D(nn.Module): """A 1D upsampling layer with an optional convolution. Parameters: channels (`int`): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. use_conv_transpose (`bool`, default `False`): option to use a convolution transpose. out_channels (`int`, optional): number of output channels. Defaults to `channels`. name (`str`, default `conv`): name of the upsampling 1D layer. """ def __init__( self, channels: int, use_conv: bool = False, use_conv_transpose: bool = False, out_channels: Optional[int] = None, name: str = "conv", ): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.use_conv_transpose = use_conv_transpose self.name = name
751
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
self.conv = None if use_conv_transpose: self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1) elif use_conv: self.conv = nn.Conv1d(self.channels, self.out_channels, 3, padding=1) def forward(self, inputs: torch.Tensor) -> torch.Tensor: assert inputs.shape[1] == self.channels if self.use_conv_transpose: return self.conv(inputs) outputs = F.interpolate(inputs, scale_factor=2.0, mode="nearest") if self.use_conv: outputs = self.conv(outputs) return outputs
751
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
class Upsample2D(nn.Module): """A 2D upsampling layer with an optional convolution. Parameters: channels (`int`): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. use_conv_transpose (`bool`, default `False`): option to use a convolution transpose. out_channels (`int`, optional): number of output channels. Defaults to `channels`. name (`str`, default `conv`): name of the upsampling 2D layer. """
752
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
def __init__( self, channels: int, use_conv: bool = False, use_conv_transpose: bool = False, out_channels: Optional[int] = None, name: str = "conv", kernel_size: Optional[int] = None, padding=1, norm_type=None, eps=None, elementwise_affine=None, bias=True, interpolate=True, ): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.use_conv_transpose = use_conv_transpose self.name = name self.interpolate = interpolate if norm_type == "ln_norm": self.norm = nn.LayerNorm(channels, eps, elementwise_affine) elif norm_type == "rms_norm": self.norm = RMSNorm(channels, eps, elementwise_affine) elif norm_type is None: self.norm = None else: raise ValueError(f"unknown norm_type: {norm_type}")
752
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
conv = None if use_conv_transpose: if kernel_size is None: kernel_size = 4 conv = nn.ConvTranspose2d( channels, self.out_channels, kernel_size=kernel_size, stride=2, padding=padding, bias=bias ) elif use_conv: if kernel_size is None: kernel_size = 3 conv = nn.Conv2d(self.channels, self.out_channels, kernel_size=kernel_size, padding=padding, bias=bias) # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed if name == "conv": self.conv = conv else: self.Conv2d_0 = conv
752
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
def forward(self, hidden_states: torch.Tensor, output_size: Optional[int] = None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) assert hidden_states.shape[1] == self.channels if self.norm is not None: hidden_states = self.norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) if self.use_conv_transpose: return self.conv(hidden_states)
752
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
# Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16 until PyTorch 2.1 # https://github.com/pytorch/pytorch/issues/86679#issuecomment-1783978767 dtype = hidden_states.dtype if dtype == torch.bfloat16 and is_torch_version("<", "2.1"): hidden_states = hidden_states.to(torch.float32) # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 if hidden_states.shape[0] >= 64: hidden_states = hidden_states.contiguous()
752
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
# if `output_size` is passed we force the interpolation output # size and do not make use of `scale_factor=2` if self.interpolate: # upsample_nearest_nhwc also fails when the number of output elements is large # https://github.com/pytorch/pytorch/issues/141831 scale_factor = ( 2 if output_size is None else max([f / s for f, s in zip(output_size, hidden_states.shape[-2:])]) ) if hidden_states.numel() * scale_factor > pow(2, 31): hidden_states = hidden_states.contiguous() if output_size is None: hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest") else: hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest") # Cast back to original dtype if dtype == torch.bfloat16 and is_torch_version("<", "2.1"): hidden_states = hidden_states.to(dtype)
752
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
# TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed if self.use_conv: if self.name == "conv": hidden_states = self.conv(hidden_states) else: hidden_states = self.Conv2d_0(hidden_states) return hidden_states
752
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
class FirUpsample2D(nn.Module): """A 2D FIR upsampling layer with an optional convolution. Parameters: channels (`int`, optional): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. out_channels (`int`, optional): number of output channels. Defaults to `channels`. fir_kernel (`tuple`, default `(1, 3, 3, 1)`): kernel for the FIR filter. """
753
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
def __init__( self, channels: Optional[int] = None, out_channels: Optional[int] = None, use_conv: bool = False, fir_kernel: Tuple[int, int, int, int] = (1, 3, 3, 1), ): super().__init__() out_channels = out_channels if out_channels else channels if use_conv: self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) self.use_conv = use_conv self.fir_kernel = fir_kernel self.out_channels = out_channels def _upsample_2d( self, hidden_states: torch.Tensor, weight: Optional[torch.Tensor] = None, kernel: Optional[torch.Tensor] = None, factor: int = 2, gain: float = 1, ) -> torch.Tensor: """Fused `upsample_2d()` followed by `Conv2d()`.
753
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary order. Args: hidden_states (`torch.Tensor`): Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. weight (`torch.Tensor`, *optional*): Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. kernel (`torch.Tensor`, *optional*): FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor (`int`, *optional*): Integer upsampling factor (default: 2). gain (`float`, *optional*): Scaling factor for signal magnitude (default: 1.0).
753
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
Returns: output (`torch.Tensor`): Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `hidden_states`. """ assert isinstance(factor, int) and factor >= 1 # Setup filter kernel. if kernel is None: kernel = [1] * factor # setup kernel kernel = torch.tensor(kernel, dtype=torch.float32) if kernel.ndim == 1: kernel = torch.outer(kernel, kernel) kernel /= torch.sum(kernel) kernel = kernel * (gain * (factor**2)) if self.use_conv: convH = weight.shape[2] convW = weight.shape[3] inC = weight.shape[1] pad_value = (kernel.shape[0] - factor) - (convW - 1)
753
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
stride = (factor, factor) # Determine data dimensions. output_shape = ( (hidden_states.shape[2] - 1) * factor + convH, (hidden_states.shape[3] - 1) * factor + convW, ) output_padding = ( output_shape[0] - (hidden_states.shape[2] - 1) * stride[0] - convH, output_shape[1] - (hidden_states.shape[3] - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 num_groups = hidden_states.shape[1] // inC # Transpose weights. weight = torch.reshape(weight, (num_groups, -1, inC, convH, convW)) weight = torch.flip(weight, dims=[3, 4]).permute(0, 2, 1, 3, 4) weight = torch.reshape(weight, (num_groups * inC, -1, convH, convW))
753
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
inverse_conv = F.conv_transpose2d( hidden_states, weight, stride=stride, output_padding=output_padding, padding=0, ) output = upfirdn2d_native( inverse_conv, torch.tensor(kernel, device=inverse_conv.device), pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2 + 1), ) else: pad_value = kernel.shape[0] - factor output = upfirdn2d_native( hidden_states, torch.tensor(kernel, device=hidden_states.device), up=factor, pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2), ) return output
753
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if self.use_conv: height = self._upsample_2d(hidden_states, self.Conv2d_0.weight, kernel=self.fir_kernel) height = height + self.Conv2d_0.bias.reshape(1, -1, 1, 1) else: height = self._upsample_2d(hidden_states, kernel=self.fir_kernel, factor=2) return height
753
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
class KUpsample2D(nn.Module): r"""A 2D K-upsampling layer. Parameters: pad_mode (`str`, *optional*, default to `"reflect"`): the padding mode to use. """ def __init__(self, pad_mode: str = "reflect"): super().__init__() self.pad_mode = pad_mode kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]]) * 2 self.pad = kernel_1d.shape[1] // 2 - 1 self.register_buffer("kernel", kernel_1d.T @ kernel_1d, persistent=False)
754
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
def forward(self, inputs: torch.Tensor) -> torch.Tensor: inputs = F.pad(inputs, ((self.pad + 1) // 2,) * 4, self.pad_mode) weight = inputs.new_zeros( [ inputs.shape[1], inputs.shape[1], self.kernel.shape[0], self.kernel.shape[1], ] ) indices = torch.arange(inputs.shape[1], device=inputs.device) kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1) weight[indices, indices] = kernel return F.conv_transpose2d(inputs, weight, stride=2, padding=self.pad * 2 + 1)
754
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
class CogVideoXUpsample3D(nn.Module): r""" A 3D Upsample layer using in CogVideoX by Tsinghua University & ZhipuAI # Todo: Wait for paper relase. Args: in_channels (`int`): Number of channels in the input image. out_channels (`int`): Number of channels produced by the convolution. kernel_size (`int`, defaults to `3`): Size of the convolving kernel. stride (`int`, defaults to `1`): Stride of the convolution. padding (`int`, defaults to `1`): Padding added to all four sides of the input. compress_time (`bool`, defaults to `False`): Whether or not to compress the time dimension. """ def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, padding: int = 1, compress_time: bool = False, ) -> None: super().__init__()
755
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) self.compress_time = compress_time def forward(self, inputs: torch.Tensor) -> torch.Tensor: if self.compress_time: if inputs.shape[2] > 1 and inputs.shape[2] % 2 == 1: # split first frame x_first, x_rest = inputs[:, :, 0], inputs[:, :, 1:]
755
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
x_first = F.interpolate(x_first, scale_factor=2.0) x_rest = F.interpolate(x_rest, scale_factor=2.0) x_first = x_first[:, :, None, :, :] inputs = torch.cat([x_first, x_rest], dim=2) elif inputs.shape[2] > 1: inputs = F.interpolate(inputs, scale_factor=2.0) else: inputs = inputs.squeeze(2) inputs = F.interpolate(inputs, scale_factor=2.0) inputs = inputs[:, :, None, :, :] else: # only interpolate 2D b, c, t, h, w = inputs.shape inputs = inputs.permute(0, 2, 1, 3, 4).reshape(b * t, c, h, w) inputs = F.interpolate(inputs, scale_factor=2.0) inputs = inputs.reshape(b, t, c, *inputs.shape[2:]).permute(0, 2, 1, 3, 4)
755
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
b, c, t, h, w = inputs.shape inputs = inputs.permute(0, 2, 1, 3, 4).reshape(b * t, c, h, w) inputs = self.conv(inputs) inputs = inputs.reshape(b, t, *inputs.shape[1:]).permute(0, 2, 1, 3, 4) return inputs
755
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/upsampling.py
class Downsample1D(nn.Module): """A 1D downsampling layer with an optional convolution. Parameters: channels (`int`): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. out_channels (`int`, optional): number of output channels. Defaults to `channels`. padding (`int`, default `1`): padding for the convolution. name (`str`, default `conv`): name of the downsampling 1D layer. """ def __init__( self, channels: int, use_conv: bool = False, out_channels: Optional[int] = None, padding: int = 1, name: str = "conv", ): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.padding = padding stride = 2 self.name = name
756
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
if use_conv: self.conv = nn.Conv1d(self.channels, self.out_channels, 3, stride=stride, padding=padding) else: assert self.channels == self.out_channels self.conv = nn.AvgPool1d(kernel_size=stride, stride=stride) def forward(self, inputs: torch.Tensor) -> torch.Tensor: assert inputs.shape[1] == self.channels return self.conv(inputs)
756
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
class Downsample2D(nn.Module): """A 2D downsampling layer with an optional convolution. Parameters: channels (`int`): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. out_channels (`int`, optional): number of output channels. Defaults to `channels`. padding (`int`, default `1`): padding for the convolution. name (`str`, default `conv`): name of the downsampling 2D layer. """
757
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
def __init__( self, channels: int, use_conv: bool = False, out_channels: Optional[int] = None, padding: int = 1, name: str = "conv", kernel_size=3, norm_type=None, eps=None, elementwise_affine=None, bias=True, ): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.padding = padding stride = 2 self.name = name if norm_type == "ln_norm": self.norm = nn.LayerNorm(channels, eps, elementwise_affine) elif norm_type == "rms_norm": self.norm = RMSNorm(channels, eps, elementwise_affine) elif norm_type is None: self.norm = None else: raise ValueError(f"unknown norm_type: {norm_type}")
757
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
if use_conv: conv = nn.Conv2d( self.channels, self.out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias ) else: assert self.channels == self.out_channels conv = nn.AvgPool2d(kernel_size=stride, stride=stride) # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed if name == "conv": self.Conv2d_0 = conv self.conv = conv elif name == "Conv2d_0": self.conv = conv else: self.conv = conv
757
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) assert hidden_states.shape[1] == self.channels if self.norm is not None: hidden_states = self.norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) if self.use_conv and self.padding == 0: pad = (0, 1, 0, 1) hidden_states = F.pad(hidden_states, pad, mode="constant", value=0) assert hidden_states.shape[1] == self.channels hidden_states = self.conv(hidden_states) return hidden_states
757
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
class FirDownsample2D(nn.Module): """A 2D FIR downsampling layer with an optional convolution. Parameters: channels (`int`): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. out_channels (`int`, optional): number of output channels. Defaults to `channels`. fir_kernel (`tuple`, default `(1, 3, 3, 1)`): kernel for the FIR filter. """
758
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
def __init__( self, channels: Optional[int] = None, out_channels: Optional[int] = None, use_conv: bool = False, fir_kernel: Tuple[int, int, int, int] = (1, 3, 3, 1), ): super().__init__() out_channels = out_channels if out_channels else channels if use_conv: self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) self.fir_kernel = fir_kernel self.use_conv = use_conv self.out_channels = out_channels
758
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
def _downsample_2d( self, hidden_states: torch.Tensor, weight: Optional[torch.Tensor] = None, kernel: Optional[torch.Tensor] = None, factor: int = 2, gain: float = 1, ) -> torch.Tensor: """Fused `Conv2d()` followed by `downsample_2d()`. Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary order.
758
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
Args: hidden_states (`torch.Tensor`): Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. weight (`torch.Tensor`, *optional*): Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. kernel (`torch.Tensor`, *optional*): FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. factor (`int`, *optional*, default to `2`): Integer downsampling factor. gain (`float`, *optional*, default to `1.0`): Scaling factor for signal magnitude. Returns: output (`torch.Tensor`): Tensor of the shape `[N, C, H // factor, W // factor]` or `[N, H // factor, W // factor, C]`, and same datatype as `x`. """
758
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
assert isinstance(factor, int) and factor >= 1 if kernel is None: kernel = [1] * factor # setup kernel kernel = torch.tensor(kernel, dtype=torch.float32) if kernel.ndim == 1: kernel = torch.outer(kernel, kernel) kernel /= torch.sum(kernel) kernel = kernel * gain
758
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
if self.use_conv: _, _, convH, convW = weight.shape pad_value = (kernel.shape[0] - factor) + (convW - 1) stride_value = [factor, factor] upfirdn_input = upfirdn2d_native( hidden_states, torch.tensor(kernel, device=hidden_states.device), pad=((pad_value + 1) // 2, pad_value // 2), ) output = F.conv2d(upfirdn_input, weight, stride=stride_value, padding=0) else: pad_value = kernel.shape[0] - factor output = upfirdn2d_native( hidden_states, torch.tensor(kernel, device=hidden_states.device), down=factor, pad=((pad_value + 1) // 2, pad_value // 2), ) return output
758
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if self.use_conv: downsample_input = self._downsample_2d(hidden_states, weight=self.Conv2d_0.weight, kernel=self.fir_kernel) hidden_states = downsample_input + self.Conv2d_0.bias.reshape(1, -1, 1, 1) else: hidden_states = self._downsample_2d(hidden_states, kernel=self.fir_kernel, factor=2) return hidden_states
758
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
class KDownsample2D(nn.Module): r"""A 2D K-downsampling layer. Parameters: pad_mode (`str`, *optional*, default to `"reflect"`): the padding mode to use. """ def __init__(self, pad_mode: str = "reflect"): super().__init__() self.pad_mode = pad_mode kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]]) self.pad = kernel_1d.shape[1] // 2 - 1 self.register_buffer("kernel", kernel_1d.T @ kernel_1d, persistent=False)
759
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
def forward(self, inputs: torch.Tensor) -> torch.Tensor: inputs = F.pad(inputs, (self.pad,) * 4, self.pad_mode) weight = inputs.new_zeros( [ inputs.shape[1], inputs.shape[1], self.kernel.shape[0], self.kernel.shape[1], ] ) indices = torch.arange(inputs.shape[1], device=inputs.device) kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1) weight[indices, indices] = kernel return F.conv2d(inputs, weight, stride=2)
759
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
class CogVideoXDownsample3D(nn.Module): # Todo: Wait for paper relase. r""" A 3D Downsampling layer using in [CogVideoX]() by Tsinghua University & ZhipuAI Args: in_channels (`int`): Number of channels in the input image. out_channels (`int`): Number of channels produced by the convolution. kernel_size (`int`, defaults to `3`): Size of the convolving kernel. stride (`int`, defaults to `2`): Stride of the convolution. padding (`int`, defaults to `0`): Padding added to all four sides of the input. compress_time (`bool`, defaults to `False`): Whether or not to compress the time dimension. """ def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 2, padding: int = 0, compress_time: bool = False, ): super().__init__()
760
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) self.compress_time = compress_time def forward(self, x: torch.Tensor) -> torch.Tensor: if self.compress_time: batch_size, channels, frames, height, width = x.shape # (batch_size, channels, frames, height, width) -> (batch_size, height, width, channels, frames) -> (batch_size * height * width, channels, frames) x = x.permute(0, 3, 4, 1, 2).reshape(batch_size * height * width, channels, frames) if x.shape[-1] % 2 == 1: x_first, x_rest = x[..., 0], x[..., 1:] if x_rest.shape[-1] > 0: # (batch_size * height * width, channels, frames - 1) -> (batch_size * height * width, channels, (frames - 1) // 2) x_rest = F.avg_pool1d(x_rest, kernel_size=2, stride=2)
760
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py
x = torch.cat([x_first[..., None], x_rest], dim=-1) # (batch_size * height * width, channels, (frames // 2) + 1) -> (batch_size, height, width, channels, (frames // 2) + 1) -> (batch_size, channels, (frames // 2) + 1, height, width) x = x.reshape(batch_size, height, width, channels, x.shape[-1]).permute(0, 3, 4, 1, 2) else: # (batch_size * height * width, channels, frames) -> (batch_size * height * width, channels, frames // 2) x = F.avg_pool1d(x, kernel_size=2, stride=2) # (batch_size * height * width, channels, frames // 2) -> (batch_size, height, width, channels, frames // 2) -> (batch_size, channels, frames // 2, height, width) x = x.reshape(batch_size, height, width, channels, x.shape[-1]).permute(0, 3, 4, 1, 2)
760
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/models/downsampling.py