text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
short denoising schedules (`LCMScheduler`) and those with full diffusion schedules (`DDIMScheduler`). default_processing_resolution (`int`, *optional*): The recommended value of the `processing_resolution` parameter of the pipeline. This value must be set in the model config. When the pipeline is called without explicitly setting `processing_resolution`, the default value is used. This is required to ensure reasonable results with various model flavors trained with varying optimal processing resolution values. """
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
model_cpu_offload_seq = "text_encoder->unet->vae" supported_prediction_types = ("depth", "disparity") def __init__( self, unet: UNet2DConditionModel, vae: AutoencoderKL, scheduler: Union[DDIMScheduler, LCMScheduler], text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, prediction_type: Optional[str] = None, scale_invariant: Optional[bool] = True, shift_invariant: Optional[bool] = True, default_denoising_steps: Optional[int] = None, default_processing_resolution: Optional[int] = None, ): super().__init__() if prediction_type not in self.supported_prediction_types: logger.warning( f"Potentially unsupported `prediction_type='{prediction_type}'`; values supported by the pipeline: " f"{self.supported_prediction_types}." )
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
self.register_modules( unet=unet, vae=vae, scheduler=scheduler, text_encoder=text_encoder, tokenizer=tokenizer, ) self.register_to_config( prediction_type=prediction_type, scale_invariant=scale_invariant, shift_invariant=shift_invariant, default_denoising_steps=default_denoising_steps, default_processing_resolution=default_processing_resolution, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.scale_invariant = scale_invariant self.shift_invariant = shift_invariant self.default_denoising_steps = default_denoising_steps self.default_processing_resolution = default_processing_resolution self.empty_text_embedding = None self.image_processor = MarigoldImageProcessor(vae_scale_factor=self.vae_scale_factor)
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
def check_inputs( self, image: PipelineImageInput, num_inference_steps: int, ensemble_size: int, processing_resolution: int, resample_method_input: str, resample_method_output: str, batch_size: int, ensembling_kwargs: Optional[Dict[str, Any]], latents: Optional[torch.Tensor], generator: Optional[Union[torch.Generator, List[torch.Generator]]], output_type: str, output_uncertainty: bool, ) -> int: if num_inference_steps is None: raise ValueError("`num_inference_steps` is not specified and could not be resolved from the model config.") if num_inference_steps < 1: raise ValueError("`num_inference_steps` must be positive.") if ensemble_size < 1: raise ValueError("`ensemble_size` must be positive.") if ensemble_size == 2: logger.warning( "`ensemble_size` == 2 results are similar to no ensembling (1); "
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
"consider increasing the value to at least 3." ) if ensemble_size > 1 and (self.scale_invariant or self.shift_invariant) and not is_scipy_available(): raise ImportError("Make sure to install scipy if you want to use ensembling.") if ensemble_size == 1 and output_uncertainty: raise ValueError( "Computing uncertainty by setting `output_uncertainty=True` also requires setting `ensemble_size` " "greater than 1." ) if processing_resolution is None: raise ValueError( "`processing_resolution` is not specified and could not be resolved from the model config." ) if processing_resolution < 0: raise ValueError( "`processing_resolution` must be non-negative: 0 for native resolution, or any positive value for " "downsampled processing." ) if processing_resolution % self.vae_scale_factor != 0:
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
raise ValueError(f"`processing_resolution` must be a multiple of {self.vae_scale_factor}.") if resample_method_input not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"): raise ValueError( "`resample_method_input` takes string values compatible with PIL library: " "nearest, nearest-exact, bilinear, bicubic, area." ) if resample_method_output not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"): raise ValueError( "`resample_method_output` takes string values compatible with PIL library: " "nearest, nearest-exact, bilinear, bicubic, area." ) if batch_size < 1: raise ValueError("`batch_size` must be positive.") if output_type not in ["pt", "np"]: raise ValueError("`output_type` must be one of `pt` or `np`.") if latents is not None and generator is not None:
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
raise ValueError("`latents` and `generator` cannot be used together.") if ensembling_kwargs is not None: if not isinstance(ensembling_kwargs, dict): raise ValueError("`ensembling_kwargs` must be a dictionary.") if "reduction" in ensembling_kwargs and ensembling_kwargs["reduction"] not in ("mean", "median"): raise ValueError("`ensembling_kwargs['reduction']` can be either `'mean'` or `'median'`.")
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
# image checks num_images = 0 W, H = None, None if not isinstance(image, list): image = [image] for i, img in enumerate(image): if isinstance(img, np.ndarray) or torch.is_tensor(img): if img.ndim not in (2, 3, 4): raise ValueError(f"`image[{i}]` has unsupported dimensions or shape: {img.shape}.") H_i, W_i = img.shape[-2:] N_i = 1 if img.ndim == 4: N_i = img.shape[0] elif isinstance(img, Image.Image): W_i, H_i = img.size N_i = 1 else: raise ValueError(f"Unsupported `image[{i}]` type: {type(img)}.") if W is None: W, H = W_i, H_i elif (W, H) != (W_i, H_i): raise ValueError( f"Input `image[{i}]` has incompatible dimensions {(W_i, H_i)} with the previous images {(W, H)}" )
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
num_images += N_i
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
# latents checks if latents is not None: if not torch.is_tensor(latents): raise ValueError("`latents` must be a torch.Tensor.") if latents.dim() != 4: raise ValueError(f"`latents` has unsupported dimensions or shape: {latents.shape}.") if processing_resolution > 0: max_orig = max(H, W) new_H = H * processing_resolution // max_orig new_W = W * processing_resolution // max_orig if new_H == 0 or new_W == 0: raise ValueError(f"Extreme aspect ratio of the input image: [{W} x {H}]") W, H = new_W, new_H w = (W + self.vae_scale_factor - 1) // self.vae_scale_factor h = (H + self.vae_scale_factor - 1) // self.vae_scale_factor shape_expected = (num_images * ensemble_size, self.vae.config.latent_channels, h, w)
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
if latents.shape != shape_expected: raise ValueError(f"`latents` has unexpected shape={latents.shape} expected={shape_expected}.") # generator checks if generator is not None: if isinstance(generator, list): if len(generator) != num_images * ensemble_size: raise ValueError( "The number of generators must match the total number of ensemble members for all input images." ) if not all(g.device.type == generator[0].device.type for g in generator): raise ValueError("`generator` device placement is not consistent in the list.") elif not isinstance(generator, torch.Generator): raise ValueError(f"Unsupported generator type: {type(generator)}.") return num_images
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
def progress_bar(self, iterable=None, total=None, desc=None, leave=True): if not hasattr(self, "_progress_bar_config"): self._progress_bar_config = {} elif not isinstance(self._progress_bar_config, dict): raise ValueError( f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." ) progress_bar_config = dict(**self._progress_bar_config) progress_bar_config["desc"] = progress_bar_config.get("desc", desc) progress_bar_config["leave"] = progress_bar_config.get("leave", leave) if iterable is not None: return tqdm(iterable, **progress_bar_config) elif total is not None: return tqdm(total=total, **progress_bar_config) else: raise ValueError("Either `total` or `iterable` has to be defined.")
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput, num_inference_steps: Optional[int] = None, ensemble_size: int = 1, processing_resolution: Optional[int] = None, match_input_resolution: bool = True, resample_method_input: str = "bilinear", resample_method_output: str = "bilinear", batch_size: int = 1, ensembling_kwargs: Optional[Dict[str, Any]] = None, latents: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, output_type: str = "np", output_uncertainty: bool = False, output_latent: bool = False, return_dict: bool = True, ): """ Function invoked when calling the pipeline.
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
Args: image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`), `List[torch.Tensor]`: An input image or images used as an input for the depth estimation task. For arrays and tensors, the expected value range is between `[0, 1]`. Passing a batch of images is possible by providing a four-dimensional array or a tensor. Additionally, a list of images of two- or three-dimensional arrays or tensors can be passed. In the latter case, all list elements must have the same width and height. num_inference_steps (`int`, *optional*, defaults to `None`): Number of denoising diffusion steps during inference. The default value `None` results in automatic selection. The number of steps should be at least 10 with the full Marigold models, and between 1 and 4 for Marigold-LCM models.
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
ensemble_size (`int`, defaults to `1`): Number of ensemble predictions. Recommended values are 5 and higher for better precision, or 1 for faster inference. processing_resolution (`int`, *optional*, defaults to `None`): Effective processing resolution. When set to `0`, matches the larger input image dimension. This produces crisper predictions, but may also lead to the overall loss of global context. The default value `None` resolves to the optimal value from the model config. match_input_resolution (`bool`, *optional*, defaults to `True`): When enabled, the output prediction is resized to match the input dimensions. When disabled, the longer side of the output will equal to `processing_resolution`. resample_method_input (`str`, *optional*, defaults to `"bilinear"`):
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
Resampling method used to resize input images to `processing_resolution`. The accepted values are: `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`. resample_method_output (`str`, *optional*, defaults to `"bilinear"`): Resampling method used to resize output predictions to match the input resolution. The accepted values are `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`. batch_size (`int`, *optional*, defaults to `1`): Batch size; only matters when setting `ensemble_size` or passing a tensor of images. ensembling_kwargs (`dict`, *optional*, defaults to `None`) Extra dictionary with arguments for precise ensembling control. The following options are available: - reduction (`str`, *optional*, defaults to `"median"`): Defines the ensembling function applied in
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
every pixel location, can be either `"median"` or `"mean"`. - regularizer_strength (`float`, *optional*, defaults to `0.02`): Strength of the regularizer that pulls the aligned predictions to the unit range from 0 to 1. - max_iter (`int`, *optional*, defaults to `2`): Maximum number of the alignment solver steps. Refer to `scipy.optimize.minimize` function, `options` argument. - tol (`float`, *optional*, defaults to `1e-3`): Alignment solver tolerance. The solver stops when the tolerance is reached. - max_res (`int`, *optional*, defaults to `None`): Resolution at which the alignment is performed; `None` matches the `processing_resolution`. latents (`torch.Tensor`, or `List[torch.Tensor]`, *optional*, defaults to `None`): Latent noise tensors to replace the random initialization. These can be taken from the previous
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
function call's output. generator (`torch.Generator`, or `List[torch.Generator]`, *optional*, defaults to `None`): Random number generator object to ensure reproducibility. output_type (`str`, *optional*, defaults to `"np"`): Preferred format of the output's `prediction` and the optional `uncertainty` fields. The accepted values are: `"np"` (numpy array) or `"pt"` (torch tensor). output_uncertainty (`bool`, *optional*, defaults to `False`): When enabled, the output's `uncertainty` field contains the predictive uncertainty map, provided that the `ensemble_size` argument is set to a value above 2. output_latent (`bool`, *optional*, defaults to `False`): When enabled, the output's `latent` field contains the latent codes corresponding to the predictions
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
within the ensemble. These codes can be saved, modified, and used for subsequent calls with the `latents` argument. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.marigold.MarigoldDepthOutput`] instead of a plain tuple.
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
Examples: Returns: [`~pipelines.marigold.MarigoldDepthOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.marigold.MarigoldDepthOutput`] is returned, otherwise a `tuple` is returned where the first element is the prediction, the second element is the uncertainty (or `None`), and the third is the latent (or `None`). """ # 0. Resolving variables. device = self._execution_device dtype = self.dtype # Model-specific optimal default values leading to fast and reasonable results. if num_inference_steps is None: num_inference_steps = self.default_denoising_steps if processing_resolution is None: processing_resolution = self.default_processing_resolution
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
# 1. Check inputs. num_images = self.check_inputs( image, num_inference_steps, ensemble_size, processing_resolution, resample_method_input, resample_method_output, batch_size, ensembling_kwargs, latents, generator, output_type, output_uncertainty, ) # 2. Prepare empty text conditioning. # Model invocation: self.tokenizer, self.text_encoder. if self.empty_text_embedding is None: prompt = "" text_inputs = self.tokenizer( prompt, padding="do_not_pad", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device) self.empty_text_embedding = self.text_encoder(text_input_ids)[0] # [1,2,1024]
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
# 3. Preprocess input images. This function loads input image or images of compatible dimensions `(H, W)`, # optionally downsamples them to the `processing_resolution` `(PH, PW)`, where # `max(PH, PW) == processing_resolution`, and pads the dimensions to `(PPH, PPW)` such that these values are # divisible by the latent space downscaling factor (typically 8 in Stable Diffusion). The default value `None` # of `processing_resolution` resolves to the optimal value from the model config. It is a recommended mode of # operation and leads to the most reasonable results. Using the native image resolution or any other processing # resolution can lead to loss of either fine details or global context in the output predictions. image, padding, original_resolution = self.image_processor.preprocess( image, processing_resolution, resample_method_input, device, dtype ) # [N,3,PPH,PPW]
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
# 4. Encode input image into latent space. At this step, each of the `N` input images is represented with `E` # ensemble members. Each ensemble member is an independent diffused prediction, just initialized independently. # Latents of each such predictions across all input images and all ensemble members are represented in the # `pred_latent` variable. The variable `image_latent` is of the same shape: it contains each input image encoded # into latent space and replicated `E` times. The latents can be either generated (see `generator` to ensure # reproducibility), or passed explicitly via the `latents` argument. The latter can be set outside the pipeline # code. For example, in the Marigold-LCM video processing demo, the latents initialization of a frame is taken # as a convex combination of the latents output of the pipeline for the previous frame and a newly-sampled
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
# noise. This behavior can be achieved by setting the `output_latent` argument to `True`. The latent space # dimensions are `(h, w)`. Encoding into latent space happens in batches of size `batch_size`. # Model invocation: self.vae.encoder. image_latent, pred_latent = self.prepare_latents( image, latents, generator, ensemble_size, batch_size ) # [N*E,4,h,w], [N*E,4,h,w]
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
del image batch_empty_text_embedding = self.empty_text_embedding.to(device=device, dtype=dtype).repeat( batch_size, 1, 1 ) # [B,1024,2] # 5. Process the denoising loop. All `N * E` latents are processed sequentially in batches of size `batch_size`. # The unet model takes concatenated latent spaces of the input image and the predicted modality as an input, and # outputs noise for the predicted modality's latent space. The number of denoising diffusion steps is defined by # `num_inference_steps`. It is either set directly, or resolves to the optimal value specific to the loaded # model. # Model invocation: self.unet. pred_latents = []
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
for i in self.progress_bar( range(0, num_images * ensemble_size, batch_size), leave=True, desc="Marigold predictions..." ): batch_image_latent = image_latent[i : i + batch_size] # [B,4,h,w] batch_pred_latent = pred_latent[i : i + batch_size] # [B,4,h,w] effective_batch_size = batch_image_latent.shape[0] text = batch_empty_text_embedding[:effective_batch_size] # [B,2,1024] self.scheduler.set_timesteps(num_inference_steps, device=device) for t in self.progress_bar(self.scheduler.timesteps, leave=False, desc="Diffusion steps..."): batch_latent = torch.cat([batch_image_latent, batch_pred_latent], dim=1) # [B,8,h,w] noise = self.unet(batch_latent, t, encoder_hidden_states=text, return_dict=False)[0] # [B,4,h,w] batch_pred_latent = self.scheduler.step( noise, t, batch_pred_latent, generator=generator ).prev_sample # [B,4,h,w]
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
if XLA_AVAILABLE: xm.mark_step() pred_latents.append(batch_pred_latent) pred_latent = torch.cat(pred_latents, dim=0) # [N*E,4,h,w] del ( pred_latents, image_latent, batch_empty_text_embedding, batch_image_latent, batch_pred_latent, text, batch_latent, noise, ) # 6. Decode predictions from latent into pixel space. The resulting `N * E` predictions have shape `(PPH, PPW)`, # which requires slight postprocessing. Decoding into pixel space happens in batches of size `batch_size`. # Model invocation: self.vae.decoder. prediction = torch.cat( [ self.decode_prediction(pred_latent[i : i + batch_size]) for i in range(0, pred_latent.shape[0], batch_size) ], dim=0, ) # [N*E,1,PPH,PPW]
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
if not output_latent: pred_latent = None # 7. Remove padding. The output shape is (PH, PW). prediction = self.image_processor.unpad_image(prediction, padding) # [N*E,1,PH,PW]
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
# 8. Ensemble and compute uncertainty (when `output_uncertainty` is set). This code treats each of the `N` # groups of `E` ensemble predictions independently. For each group it computes an ensembled prediction of shape # `(PH, PW)` and an optional uncertainty map of the same dimensions. After computing this pair of outputs for # each group independently, it stacks them respectively into batches of `N` almost final predictions and # uncertainty maps. uncertainty = None if ensemble_size > 1: prediction = prediction.reshape(num_images, ensemble_size, *prediction.shape[1:]) # [N,E,1,PH,PW] prediction = [ self.ensemble_depth( prediction[i], self.scale_invariant, self.shift_invariant, output_uncertainty, **(ensembling_kwargs or {}), ) for i in range(num_images)
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
] # [ [[1,1,PH,PW], [1,1,PH,PW]], ... ] prediction, uncertainty = zip(*prediction) # [[1,1,PH,PW], ... ], [[1,1,PH,PW], ... ] prediction = torch.cat(prediction, dim=0) # [N,1,PH,PW] if output_uncertainty: uncertainty = torch.cat(uncertainty, dim=0) # [N,1,PH,PW] else: uncertainty = None
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
# 9. If `match_input_resolution` is set, the output prediction and the uncertainty are upsampled to match the # input resolution `(H, W)`. This step may introduce upsampling artifacts, and therefore can be disabled. # Depending on the downstream use-case, upsampling can be also chosen based on the tolerated artifacts by # setting the `resample_method_output` parameter (e.g., to `"nearest"`). if match_input_resolution: prediction = self.image_processor.resize_antialias( prediction, original_resolution, resample_method_output, is_aa=False ) # [N,1,H,W] if uncertainty is not None and output_uncertainty: uncertainty = self.image_processor.resize_antialias( uncertainty, original_resolution, resample_method_output, is_aa=False ) # [N,1,H,W]
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
# 10. Prepare the final outputs. if output_type == "np": prediction = self.image_processor.pt_to_numpy(prediction) # [N,H,W,1] if uncertainty is not None and output_uncertainty: uncertainty = self.image_processor.pt_to_numpy(uncertainty) # [N,H,W,1] # 11. Offload all models self.maybe_free_model_hooks() if not return_dict: return (prediction, uncertainty, pred_latent) return MarigoldDepthOutput( prediction=prediction, uncertainty=uncertainty, latent=pred_latent, )
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
def prepare_latents( self, image: torch.Tensor, latents: Optional[torch.Tensor], generator: Optional[torch.Generator], ensemble_size: int, batch_size: int, ) -> Tuple[torch.Tensor, torch.Tensor]: def retrieve_latents(encoder_output): if hasattr(encoder_output, "latent_dist"): return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output")
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
image_latent = torch.cat( [ retrieve_latents(self.vae.encode(image[i : i + batch_size])) for i in range(0, image.shape[0], batch_size) ], dim=0, ) # [N,4,h,w] image_latent = image_latent * self.vae.config.scaling_factor image_latent = image_latent.repeat_interleave(ensemble_size, dim=0) # [N*E,4,h,w] pred_latent = latents if pred_latent is None: pred_latent = randn_tensor( image_latent.shape, generator=generator, device=image_latent.device, dtype=image_latent.dtype, ) # [N*E,4,h,w] return image_latent, pred_latent
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
def decode_prediction(self, pred_latent: torch.Tensor) -> torch.Tensor: if pred_latent.dim() != 4 or pred_latent.shape[1] != self.vae.config.latent_channels: raise ValueError( f"Expecting 4D tensor of shape [B,{self.vae.config.latent_channels},H,W]; got {pred_latent.shape}." ) prediction = self.vae.decode(pred_latent / self.vae.config.scaling_factor, return_dict=False)[0] # [B,3,H,W] prediction = prediction.mean(dim=1, keepdim=True) # [B,1,H,W] prediction = torch.clip(prediction, -1.0, 1.0) # [B,1,H,W] prediction = (prediction + 1.0) / 2.0 return prediction # [B,1,H,W]
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
@staticmethod def ensemble_depth( depth: torch.Tensor, scale_invariant: bool = True, shift_invariant: bool = True, output_uncertainty: bool = False, reduction: str = "median", regularizer_strength: float = 0.02, max_iter: int = 2, tol: float = 1e-3, max_res: int = 1024, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """ Ensembles the depth maps represented by the `depth` tensor with expected shape `(B, 1, H, W)`, where B is the number of ensemble members for a given prediction of size `(H x W)`. Even though the function is designed for depth maps, it can also be used with disparity maps as long as the input tensor values are non-negative. The alignment happens when the predictions have one or more degrees of freedom, that is when they are either affine-invariant (`scale_invariant=True` and `shift_invariant=True`), or just scale-invariant (only
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
`scale_invariant=True`). For absolute predictions (`scale_invariant=False` and `shift_invariant=False`) alignment is skipped and only ensembling is performed.
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
Args: depth (`torch.Tensor`): Input ensemble depth maps. scale_invariant (`bool`, *optional*, defaults to `True`): Whether to treat predictions as scale-invariant. shift_invariant (`bool`, *optional*, defaults to `True`): Whether to treat predictions as shift-invariant. output_uncertainty (`bool`, *optional*, defaults to `False`): Whether to output uncertainty map. reduction (`str`, *optional*, defaults to `"median"`): Reduction method used to ensemble aligned predictions. The accepted values are: `"mean"` and `"median"`. regularizer_strength (`float`, *optional*, defaults to `0.02`): Strength of the regularizer that pulls the aligned predictions to the unit range from 0 to 1. max_iter (`int`, *optional*, defaults to `2`):
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
Maximum number of the alignment solver steps. Refer to `scipy.optimize.minimize` function, `options` argument. tol (`float`, *optional*, defaults to `1e-3`): Alignment solver tolerance. The solver stops when the tolerance is reached. max_res (`int`, *optional*, defaults to `1024`): Resolution at which the alignment is performed; `None` matches the `processing_resolution`. Returns: A tensor of aligned and ensembled depth maps and optionally a tensor of uncertainties of the same shape: `(1, 1, H, W)`. """ if depth.dim() != 4 or depth.shape[1] != 1: raise ValueError(f"Expecting 4D tensor of shape [B,1,H,W]; got {depth.shape}.") if reduction not in ("mean", "median"): raise ValueError(f"Unrecognized reduction method: {reduction}.") if not scale_invariant and shift_invariant:
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
raise ValueError("Pure shift-invariant ensembling is not supported.")
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
def init_param(depth: torch.Tensor): init_min = depth.reshape(ensemble_size, -1).min(dim=1).values init_max = depth.reshape(ensemble_size, -1).max(dim=1).values if scale_invariant and shift_invariant: init_s = 1.0 / (init_max - init_min).clamp(min=1e-6) init_t = -init_s * init_min param = torch.cat((init_s, init_t)).cpu().numpy() elif scale_invariant: init_s = 1.0 / init_max.clamp(min=1e-6) param = init_s.cpu().numpy() else: raise ValueError("Unrecognized alignment.") return param
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
def align(depth: torch.Tensor, param: np.ndarray) -> torch.Tensor: if scale_invariant and shift_invariant: s, t = np.split(param, 2) s = torch.from_numpy(s).to(depth).view(ensemble_size, 1, 1, 1) t = torch.from_numpy(t).to(depth).view(ensemble_size, 1, 1, 1) out = depth * s + t elif scale_invariant: s = torch.from_numpy(param).to(depth).view(ensemble_size, 1, 1, 1) out = depth * s else: raise ValueError("Unrecognized alignment.") return out
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
def ensemble( depth_aligned: torch.Tensor, return_uncertainty: bool = False ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: uncertainty = None if reduction == "mean": prediction = torch.mean(depth_aligned, dim=0, keepdim=True) if return_uncertainty: uncertainty = torch.std(depth_aligned, dim=0, keepdim=True) elif reduction == "median": prediction = torch.median(depth_aligned, dim=0, keepdim=True).values if return_uncertainty: uncertainty = torch.median(torch.abs(depth_aligned - prediction), dim=0, keepdim=True).values else: raise ValueError(f"Unrecognized reduction method: {reduction}.") return prediction, uncertainty def cost_fn(param: np.ndarray, depth: torch.Tensor) -> float: cost = 0.0 depth_aligned = align(depth, param)
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
for i, j in torch.combinations(torch.arange(ensemble_size)): diff = depth_aligned[i] - depth_aligned[j] cost += (diff**2).mean().sqrt().item() if regularizer_strength > 0: prediction, _ = ensemble(depth_aligned, return_uncertainty=False) err_near = (0.0 - prediction.min()).abs().item() err_far = (1.0 - prediction.max()).abs().item() cost += (err_near + err_far) * regularizer_strength return cost def compute_param(depth: torch.Tensor): import scipy depth_to_align = depth.to(torch.float32) if max_res is not None and max(depth_to_align.shape[2:]) > max_res: depth_to_align = MarigoldImageProcessor.resize_to_max_edge(depth_to_align, max_res, "nearest-exact") param = init_param(depth_to_align)
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
res = scipy.optimize.minimize( partial(cost_fn, depth=depth_to_align), param, method="BFGS", tol=tol, options={"maxiter": max_iter, "disp": False}, ) return res.x requires_aligning = scale_invariant or shift_invariant ensemble_size = depth.shape[0] if requires_aligning: param = compute_param(depth) depth = align(depth, param) depth, uncertainty = ensemble(depth, return_uncertainty=output_uncertainty) depth_max = depth.max() if scale_invariant and shift_invariant: depth_min = depth.min() elif scale_invariant: depth_min = 0 else: raise ValueError("Unrecognized alignment.") depth_range = (depth_max - depth_min).clamp(min=1e-6) depth = (depth - depth_min) / depth_range if output_uncertainty: uncertainty /= depth_range
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
return depth, uncertainty # [1,1,H,W], [1,1,H,W]
118
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py
class MarigoldNormalsOutput(BaseOutput): """ Output class for Marigold monocular normals prediction pipeline. Args: prediction (`np.ndarray`, `torch.Tensor`): Predicted normals with values in the range [-1, 1]. The shape is always $numimages \times 3 \times height \times width$, regardless of whether the images were passed as a 4D array or a list. uncertainty (`None`, `np.ndarray`, `torch.Tensor`): Uncertainty maps computed from the ensemble, with values in the range [0, 1]. The shape is $numimages \times 1 \times height \times width$. latent (`None`, `torch.Tensor`): Latent features corresponding to the predictions, compatible with the `latents` argument of the pipeline. The shape is $numimages * numensemble \times 4 \times latentheight \times latentwidth$. """
119
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
prediction: Union[np.ndarray, torch.Tensor] uncertainty: Union[None, np.ndarray, torch.Tensor] latent: Union[None, torch.Tensor]
119
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
class MarigoldNormalsPipeline(DiffusionPipeline): """ Pipeline for monocular normals estimation using the Marigold method: https://marigoldmonodepth.github.io. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
Args: unet (`UNet2DConditionModel`): Conditional U-Net to denoise the normals latent, conditioned on image latent. vae (`AutoencoderKL`): Variational Auto-Encoder (VAE) Model to encode and decode images and predictions to and from latent representations. scheduler (`DDIMScheduler` or `LCMScheduler`): A scheduler to be used in combination with `unet` to denoise the encoded image latents. text_encoder (`CLIPTextModel`): Text-encoder, for empty text embedding. tokenizer (`CLIPTokenizer`): CLIP tokenizer. prediction_type (`str`, *optional*): Type of predictions made by the model. use_full_z_range (`bool`, *optional*): Whether the normals predicted by this model utilize the full range of the Z dimension, or only its positive half. default_denoising_steps (`int`, *optional*):
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
The minimum number of denoising diffusion steps that are required to produce a prediction of reasonable quality with the given model. This value must be set in the model config. When the pipeline is called without explicitly setting `num_inference_steps`, the default value is used. This is required to ensure reasonable results with various model flavors compatible with the pipeline, such as those relying on very short denoising schedules (`LCMScheduler`) and those with full diffusion schedules (`DDIMScheduler`). default_processing_resolution (`int`, *optional*): The recommended value of the `processing_resolution` parameter of the pipeline. This value must be set in the model config. When the pipeline is called without explicitly setting `processing_resolution`, the default value is used. This is required to ensure reasonable results with various model flavors trained
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
with varying optimal processing resolution values. """
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
model_cpu_offload_seq = "text_encoder->unet->vae" supported_prediction_types = ("normals",) def __init__( self, unet: UNet2DConditionModel, vae: AutoencoderKL, scheduler: Union[DDIMScheduler, LCMScheduler], text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, prediction_type: Optional[str] = None, use_full_z_range: Optional[bool] = True, default_denoising_steps: Optional[int] = None, default_processing_resolution: Optional[int] = None, ): super().__init__() if prediction_type not in self.supported_prediction_types: logger.warning( f"Potentially unsupported `prediction_type='{prediction_type}'`; values supported by the pipeline: " f"{self.supported_prediction_types}." )
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
self.register_modules( unet=unet, vae=vae, scheduler=scheduler, text_encoder=text_encoder, tokenizer=tokenizer, ) self.register_to_config( use_full_z_range=use_full_z_range, default_denoising_steps=default_denoising_steps, default_processing_resolution=default_processing_resolution, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.use_full_z_range = use_full_z_range self.default_denoising_steps = default_denoising_steps self.default_processing_resolution = default_processing_resolution self.empty_text_embedding = None self.image_processor = MarigoldImageProcessor(vae_scale_factor=self.vae_scale_factor)
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
def check_inputs( self, image: PipelineImageInput, num_inference_steps: int, ensemble_size: int, processing_resolution: int, resample_method_input: str, resample_method_output: str, batch_size: int, ensembling_kwargs: Optional[Dict[str, Any]], latents: Optional[torch.Tensor], generator: Optional[Union[torch.Generator, List[torch.Generator]]], output_type: str, output_uncertainty: bool, ) -> int: if num_inference_steps is None: raise ValueError("`num_inference_steps` is not specified and could not be resolved from the model config.") if num_inference_steps < 1: raise ValueError("`num_inference_steps` must be positive.") if ensemble_size < 1: raise ValueError("`ensemble_size` must be positive.") if ensemble_size == 2: logger.warning( "`ensemble_size` == 2 results are similar to no ensembling (1); "
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
"consider increasing the value to at least 3." ) if ensemble_size == 1 and output_uncertainty: raise ValueError( "Computing uncertainty by setting `output_uncertainty=True` also requires setting `ensemble_size` " "greater than 1." ) if processing_resolution is None: raise ValueError( "`processing_resolution` is not specified and could not be resolved from the model config." ) if processing_resolution < 0: raise ValueError( "`processing_resolution` must be non-negative: 0 for native resolution, or any positive value for " "downsampled processing." ) if processing_resolution % self.vae_scale_factor != 0: raise ValueError(f"`processing_resolution` must be a multiple of {self.vae_scale_factor}.")
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
if resample_method_input not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"): raise ValueError( "`resample_method_input` takes string values compatible with PIL library: " "nearest, nearest-exact, bilinear, bicubic, area." ) if resample_method_output not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"): raise ValueError( "`resample_method_output` takes string values compatible with PIL library: " "nearest, nearest-exact, bilinear, bicubic, area." ) if batch_size < 1: raise ValueError("`batch_size` must be positive.") if output_type not in ["pt", "np"]: raise ValueError("`output_type` must be one of `pt` or `np`.") if latents is not None and generator is not None: raise ValueError("`latents` and `generator` cannot be used together.") if ensembling_kwargs is not None:
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
if not isinstance(ensembling_kwargs, dict): raise ValueError("`ensembling_kwargs` must be a dictionary.") if "reduction" in ensembling_kwargs and ensembling_kwargs["reduction"] not in ("closest", "mean"): raise ValueError("`ensembling_kwargs['reduction']` can be either `'closest'` or `'mean'`.")
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
# image checks num_images = 0 W, H = None, None if not isinstance(image, list): image = [image] for i, img in enumerate(image): if isinstance(img, np.ndarray) or torch.is_tensor(img): if img.ndim not in (2, 3, 4): raise ValueError(f"`image[{i}]` has unsupported dimensions or shape: {img.shape}.") H_i, W_i = img.shape[-2:] N_i = 1 if img.ndim == 4: N_i = img.shape[0] elif isinstance(img, Image.Image): W_i, H_i = img.size N_i = 1 else: raise ValueError(f"Unsupported `image[{i}]` type: {type(img)}.") if W is None: W, H = W_i, H_i elif (W, H) != (W_i, H_i): raise ValueError( f"Input `image[{i}]` has incompatible dimensions {(W_i, H_i)} with the previous images {(W, H)}" )
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
num_images += N_i
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
# latents checks if latents is not None: if not torch.is_tensor(latents): raise ValueError("`latents` must be a torch.Tensor.") if latents.dim() != 4: raise ValueError(f"`latents` has unsupported dimensions or shape: {latents.shape}.") if processing_resolution > 0: max_orig = max(H, W) new_H = H * processing_resolution // max_orig new_W = W * processing_resolution // max_orig if new_H == 0 or new_W == 0: raise ValueError(f"Extreme aspect ratio of the input image: [{W} x {H}]") W, H = new_W, new_H w = (W + self.vae_scale_factor - 1) // self.vae_scale_factor h = (H + self.vae_scale_factor - 1) // self.vae_scale_factor shape_expected = (num_images * ensemble_size, self.vae.config.latent_channels, h, w)
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
if latents.shape != shape_expected: raise ValueError(f"`latents` has unexpected shape={latents.shape} expected={shape_expected}.") # generator checks if generator is not None: if isinstance(generator, list): if len(generator) != num_images * ensemble_size: raise ValueError( "The number of generators must match the total number of ensemble members for all input images." ) if not all(g.device.type == generator[0].device.type for g in generator): raise ValueError("`generator` device placement is not consistent in the list.") elif not isinstance(generator, torch.Generator): raise ValueError(f"Unsupported generator type: {type(generator)}.") return num_images
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
def progress_bar(self, iterable=None, total=None, desc=None, leave=True): if not hasattr(self, "_progress_bar_config"): self._progress_bar_config = {} elif not isinstance(self._progress_bar_config, dict): raise ValueError( f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." ) progress_bar_config = dict(**self._progress_bar_config) progress_bar_config["desc"] = progress_bar_config.get("desc", desc) progress_bar_config["leave"] = progress_bar_config.get("leave", leave) if iterable is not None: return tqdm(iterable, **progress_bar_config) elif total is not None: return tqdm(total=total, **progress_bar_config) else: raise ValueError("Either `total` or `iterable` has to be defined.")
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput, num_inference_steps: Optional[int] = None, ensemble_size: int = 1, processing_resolution: Optional[int] = None, match_input_resolution: bool = True, resample_method_input: str = "bilinear", resample_method_output: str = "bilinear", batch_size: int = 1, ensembling_kwargs: Optional[Dict[str, Any]] = None, latents: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, output_type: str = "np", output_uncertainty: bool = False, output_latent: bool = False, return_dict: bool = True, ): """ Function invoked when calling the pipeline.
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
Args: image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`), `List[torch.Tensor]`: An input image or images used as an input for the normals estimation task. For arrays and tensors, the expected value range is between `[0, 1]`. Passing a batch of images is possible by providing a four-dimensional array or a tensor. Additionally, a list of images of two- or three-dimensional arrays or tensors can be passed. In the latter case, all list elements must have the same width and height. num_inference_steps (`int`, *optional*, defaults to `None`): Number of denoising diffusion steps during inference. The default value `None` results in automatic selection. The number of steps should be at least 10 with the full Marigold models, and between 1 and 4 for Marigold-LCM models.
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
ensemble_size (`int`, defaults to `1`): Number of ensemble predictions. Recommended values are 5 and higher for better precision, or 1 for faster inference. processing_resolution (`int`, *optional*, defaults to `None`): Effective processing resolution. When set to `0`, matches the larger input image dimension. This produces crisper predictions, but may also lead to the overall loss of global context. The default value `None` resolves to the optimal value from the model config. match_input_resolution (`bool`, *optional*, defaults to `True`): When enabled, the output prediction is resized to match the input dimensions. When disabled, the longer side of the output will equal to `processing_resolution`. resample_method_input (`str`, *optional*, defaults to `"bilinear"`):
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
Resampling method used to resize input images to `processing_resolution`. The accepted values are: `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`. resample_method_output (`str`, *optional*, defaults to `"bilinear"`): Resampling method used to resize output predictions to match the input resolution. The accepted values are `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`. batch_size (`int`, *optional*, defaults to `1`): Batch size; only matters when setting `ensemble_size` or passing a tensor of images. ensembling_kwargs (`dict`, *optional*, defaults to `None`) Extra dictionary with arguments for precise ensembling control. The following options are available: - reduction (`str`, *optional*, defaults to `"closest"`): Defines the ensembling function applied in
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
every pixel location, can be either `"closest"` or `"mean"`. latents (`torch.Tensor`, *optional*, defaults to `None`): Latent noise tensors to replace the random initialization. These can be taken from the previous function call's output. generator (`torch.Generator`, or `List[torch.Generator]`, *optional*, defaults to `None`): Random number generator object to ensure reproducibility. output_type (`str`, *optional*, defaults to `"np"`): Preferred format of the output's `prediction` and the optional `uncertainty` fields. The accepted values are: `"np"` (numpy array) or `"pt"` (torch tensor). output_uncertainty (`bool`, *optional*, defaults to `False`): When enabled, the output's `uncertainty` field contains the predictive uncertainty map, provided that the `ensemble_size` argument is set to a value above 2.
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
output_latent (`bool`, *optional*, defaults to `False`): When enabled, the output's `latent` field contains the latent codes corresponding to the predictions within the ensemble. These codes can be saved, modified, and used for subsequent calls with the `latents` argument. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.marigold.MarigoldDepthOutput`] instead of a plain tuple.
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
Examples: Returns: [`~pipelines.marigold.MarigoldNormalsOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.marigold.MarigoldNormalsOutput`] is returned, otherwise a `tuple` is returned where the first element is the prediction, the second element is the uncertainty (or `None`), and the third is the latent (or `None`). """ # 0. Resolving variables. device = self._execution_device dtype = self.dtype # Model-specific optimal default values leading to fast and reasonable results. if num_inference_steps is None: num_inference_steps = self.default_denoising_steps if processing_resolution is None: processing_resolution = self.default_processing_resolution
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
# 1. Check inputs. num_images = self.check_inputs( image, num_inference_steps, ensemble_size, processing_resolution, resample_method_input, resample_method_output, batch_size, ensembling_kwargs, latents, generator, output_type, output_uncertainty, ) # 2. Prepare empty text conditioning. # Model invocation: self.tokenizer, self.text_encoder. if self.empty_text_embedding is None: prompt = "" text_inputs = self.tokenizer( prompt, padding="do_not_pad", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device) self.empty_text_embedding = self.text_encoder(text_input_ids)[0] # [1,2,1024]
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
# 3. Preprocess input images. This function loads input image or images of compatible dimensions `(H, W)`, # optionally downsamples them to the `processing_resolution` `(PH, PW)`, where # `max(PH, PW) == processing_resolution`, and pads the dimensions to `(PPH, PPW)` such that these values are # divisible by the latent space downscaling factor (typically 8 in Stable Diffusion). The default value `None` # of `processing_resolution` resolves to the optimal value from the model config. It is a recommended mode of # operation and leads to the most reasonable results. Using the native image resolution or any other processing # resolution can lead to loss of either fine details or global context in the output predictions. image, padding, original_resolution = self.image_processor.preprocess( image, processing_resolution, resample_method_input, device, dtype ) # [N,3,PPH,PPW]
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
# 4. Encode input image into latent space. At this step, each of the `N` input images is represented with `E` # ensemble members. Each ensemble member is an independent diffused prediction, just initialized independently. # Latents of each such predictions across all input images and all ensemble members are represented in the # `pred_latent` variable. The variable `image_latent` is of the same shape: it contains each input image encoded # into latent space and replicated `E` times. The latents can be either generated (see `generator` to ensure # reproducibility), or passed explicitly via the `latents` argument. The latter can be set outside the pipeline # code. For example, in the Marigold-LCM video processing demo, the latents initialization of a frame is taken # as a convex combination of the latents output of the pipeline for the previous frame and a newly-sampled
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
# noise. This behavior can be achieved by setting the `output_latent` argument to `True`. The latent space # dimensions are `(h, w)`. Encoding into latent space happens in batches of size `batch_size`. # Model invocation: self.vae.encoder. image_latent, pred_latent = self.prepare_latents( image, latents, generator, ensemble_size, batch_size ) # [N*E,4,h,w], [N*E,4,h,w]
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
del image batch_empty_text_embedding = self.empty_text_embedding.to(device=device, dtype=dtype).repeat( batch_size, 1, 1 ) # [B,1024,2] # 5. Process the denoising loop. All `N * E` latents are processed sequentially in batches of size `batch_size`. # The unet model takes concatenated latent spaces of the input image and the predicted modality as an input, and # outputs noise for the predicted modality's latent space. The number of denoising diffusion steps is defined by # `num_inference_steps`. It is either set directly, or resolves to the optimal value specific to the loaded # model. # Model invocation: self.unet. pred_latents = []
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
for i in self.progress_bar( range(0, num_images * ensemble_size, batch_size), leave=True, desc="Marigold predictions..." ): batch_image_latent = image_latent[i : i + batch_size] # [B,4,h,w] batch_pred_latent = pred_latent[i : i + batch_size] # [B,4,h,w] effective_batch_size = batch_image_latent.shape[0] text = batch_empty_text_embedding[:effective_batch_size] # [B,2,1024] self.scheduler.set_timesteps(num_inference_steps, device=device) for t in self.progress_bar(self.scheduler.timesteps, leave=False, desc="Diffusion steps..."): batch_latent = torch.cat([batch_image_latent, batch_pred_latent], dim=1) # [B,8,h,w] noise = self.unet(batch_latent, t, encoder_hidden_states=text, return_dict=False)[0] # [B,4,h,w] batch_pred_latent = self.scheduler.step( noise, t, batch_pred_latent, generator=generator ).prev_sample # [B,4,h,w]
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
if XLA_AVAILABLE: xm.mark_step() pred_latents.append(batch_pred_latent) pred_latent = torch.cat(pred_latents, dim=0) # [N*E,4,h,w] del ( pred_latents, image_latent, batch_empty_text_embedding, batch_image_latent, batch_pred_latent, text, batch_latent, noise, ) # 6. Decode predictions from latent into pixel space. The resulting `N * E` predictions have shape `(PPH, PPW)`, # which requires slight postprocessing. Decoding into pixel space happens in batches of size `batch_size`. # Model invocation: self.vae.decoder. prediction = torch.cat( [ self.decode_prediction(pred_latent[i : i + batch_size]) for i in range(0, pred_latent.shape[0], batch_size) ], dim=0, ) # [N*E,3,PPH,PPW]
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
if not output_latent: pred_latent = None # 7. Remove padding. The output shape is (PH, PW). prediction = self.image_processor.unpad_image(prediction, padding) # [N*E,3,PH,PW]
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
# 8. Ensemble and compute uncertainty (when `output_uncertainty` is set). This code treats each of the `N` # groups of `E` ensemble predictions independently. For each group it computes an ensembled prediction of shape # `(PH, PW)` and an optional uncertainty map of the same dimensions. After computing this pair of outputs for # each group independently, it stacks them respectively into batches of `N` almost final predictions and # uncertainty maps. uncertainty = None if ensemble_size > 1: prediction = prediction.reshape(num_images, ensemble_size, *prediction.shape[1:]) # [N,E,3,PH,PW] prediction = [ self.ensemble_normals(prediction[i], output_uncertainty, **(ensembling_kwargs or {})) for i in range(num_images) ] # [ [[1,3,PH,PW], [1,1,PH,PW]], ... ] prediction, uncertainty = zip(*prediction) # [[1,3,PH,PW], ... ], [[1,1,PH,PW], ... ]
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
prediction = torch.cat(prediction, dim=0) # [N,3,PH,PW] if output_uncertainty: uncertainty = torch.cat(uncertainty, dim=0) # [N,1,PH,PW] else: uncertainty = None
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
# 9. If `match_input_resolution` is set, the output prediction and the uncertainty are upsampled to match the # input resolution `(H, W)`. This step may introduce upsampling artifacts, and therefore can be disabled. # After upsampling, the native resolution normal maps are renormalized to unit length to reduce the artifacts. # Depending on the downstream use-case, upsampling can be also chosen based on the tolerated artifacts by # setting the `resample_method_output` parameter (e.g., to `"nearest"`). if match_input_resolution: prediction = self.image_processor.resize_antialias( prediction, original_resolution, resample_method_output, is_aa=False ) # [N,3,H,W] prediction = self.normalize_normals(prediction) # [N,3,H,W] if uncertainty is not None and output_uncertainty: uncertainty = self.image_processor.resize_antialias(
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
uncertainty, original_resolution, resample_method_output, is_aa=False ) # [N,1,H,W]
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
# 10. Prepare the final outputs. if output_type == "np": prediction = self.image_processor.pt_to_numpy(prediction) # [N,H,W,3] if uncertainty is not None and output_uncertainty: uncertainty = self.image_processor.pt_to_numpy(uncertainty) # [N,H,W,1] # 11. Offload all models self.maybe_free_model_hooks() if not return_dict: return (prediction, uncertainty, pred_latent) return MarigoldNormalsOutput( prediction=prediction, uncertainty=uncertainty, latent=pred_latent, )
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
# Copied from diffusers.pipelines.marigold.pipeline_marigold_depth.MarigoldDepthPipeline.prepare_latents def prepare_latents( self, image: torch.Tensor, latents: Optional[torch.Tensor], generator: Optional[torch.Generator], ensemble_size: int, batch_size: int, ) -> Tuple[torch.Tensor, torch.Tensor]: def retrieve_latents(encoder_output): if hasattr(encoder_output, "latent_dist"): return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output")
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
image_latent = torch.cat( [ retrieve_latents(self.vae.encode(image[i : i + batch_size])) for i in range(0, image.shape[0], batch_size) ], dim=0, ) # [N,4,h,w] image_latent = image_latent * self.vae.config.scaling_factor image_latent = image_latent.repeat_interleave(ensemble_size, dim=0) # [N*E,4,h,w] pred_latent = latents if pred_latent is None: pred_latent = randn_tensor( image_latent.shape, generator=generator, device=image_latent.device, dtype=image_latent.dtype, ) # [N*E,4,h,w] return image_latent, pred_latent
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
def decode_prediction(self, pred_latent: torch.Tensor) -> torch.Tensor: if pred_latent.dim() != 4 or pred_latent.shape[1] != self.vae.config.latent_channels: raise ValueError( f"Expecting 4D tensor of shape [B,{self.vae.config.latent_channels},H,W]; got {pred_latent.shape}." ) prediction = self.vae.decode(pred_latent / self.vae.config.scaling_factor, return_dict=False)[0] # [B,3,H,W] prediction = torch.clip(prediction, -1.0, 1.0) if not self.use_full_z_range: prediction[:, 2, :, :] *= 0.5 prediction[:, 2, :, :] += 0.5 prediction = self.normalize_normals(prediction) # [B,3,H,W] return prediction # [B,3,H,W] @staticmethod def normalize_normals(normals: torch.Tensor, eps: float = 1e-6) -> torch.Tensor: if normals.dim() != 4 or normals.shape[1] != 3: raise ValueError(f"Expecting 4D tensor of shape [B,3,H,W]; got {normals.shape}.")
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
norm = torch.norm(normals, dim=1, keepdim=True) normals /= norm.clamp(min=eps) return normals @staticmethod def ensemble_normals( normals: torch.Tensor, output_uncertainty: bool, reduction: str = "closest" ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """ Ensembles the normals maps represented by the `normals` tensor with expected shape `(B, 3, H, W)`, where B is the number of ensemble members for a given prediction of size `(H x W)`. Args: normals (`torch.Tensor`): Input ensemble normals maps. output_uncertainty (`bool`, *optional*, defaults to `False`): Whether to output uncertainty map. reduction (`str`, *optional*, defaults to `"closest"`): Reduction method used to ensemble aligned predictions. The accepted values are: `"closest"` and `"mean"`.
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
Returns: A tensor of aligned and ensembled normals maps with shape `(1, 3, H, W)` and optionally a tensor of uncertainties of shape `(1, 1, H, W)`. """ if normals.dim() != 4 or normals.shape[1] != 3: raise ValueError(f"Expecting 4D tensor of shape [B,3,H,W]; got {normals.shape}.") if reduction not in ("closest", "mean"): raise ValueError(f"Unrecognized reduction method: {reduction}.") mean_normals = normals.mean(dim=0, keepdim=True) # [1,3,H,W] mean_normals = MarigoldNormalsPipeline.normalize_normals(mean_normals) # [1,3,H,W] sim_cos = (mean_normals * normals).sum(dim=1, keepdim=True) # [E,1,H,W] sim_cos = sim_cos.clamp(-1, 1) # required to avoid NaN in uncertainty with fp16 uncertainty = None if output_uncertainty: uncertainty = sim_cos.arccos() # [E,1,H,W] uncertainty = uncertainty.mean(dim=0, keepdim=True) / np.pi # [1,1,H,W]
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
if reduction == "mean": return mean_normals, uncertainty # [1,3,H,W], [1,1,H,W] closest_indices = sim_cos.argmax(dim=0, keepdim=True) # [1,1,H,W] closest_indices = closest_indices.repeat(1, 3, 1, 1) # [1,3,H,W] closest_normals = torch.gather(normals, 0, closest_indices) # [1,3,H,W] return closest_normals, uncertainty # [1,3,H,W], [1,1,H,W]
120
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py
class UniDiffuserTextDecoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): """ Text decoder model for a image-text [UniDiffuser](https://arxiv.org/pdf/2303.06555.pdf) model. This is used to generate text from the UniDiffuser image-text embedding.
121
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py
Parameters: prefix_length (`int`): Max number of prefix tokens that will be supplied to the model. prefix_inner_dim (`int`): The hidden size of the incoming prefix embeddings. For UniDiffuser, this would be the hidden dim of the CLIP text encoder. prefix_hidden_dim (`int`, *optional*): Hidden dim of the MLP if we encode the prefix. vocab_size (`int`, *optional*, defaults to 50257): Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`]. n_positions (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_embd (`int`, *optional*, defaults to 768): Dimensionality of the embeddings and hidden states.
121
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py
n_layer (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. n_inner (`int`, *optional*, defaults to None): Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd activation_function (`str`, *optional*, defaults to `"gelu"`): Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention.
121
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): The epsilon to use in the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_attn_weights (`bool`, *optional*, defaults to `True`): Scale attention weights by dividing by sqrt(hidden_size).. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`): Whether to additionally scale attention weights by `1 / layer_idx + 1`. reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`): Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
121
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py
dot-product/softmax to float() when training with mixed precision. """
121
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py
_keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self, prefix_length: int, prefix_inner_dim: int, prefix_hidden_dim: Optional[int] = None, vocab_size: int = 50257, # Start of GPT2 config args n_positions: int = 1024, n_embd: int = 768, n_layer: int = 12, n_head: int = 12, n_inner: Optional[int] = None, activation_function: str = "gelu_new", resid_pdrop: float = 0.1, embd_pdrop: float = 0.1, attn_pdrop: float = 0.1, layer_norm_epsilon: float = 1e-5, initializer_range: float = 0.02, scale_attn_weights: bool = True, use_cache: bool = True, scale_attn_by_inverse_layer_idx: bool = False, reorder_and_upcast_attn: bool = False, ): super().__init__() self.prefix_length = prefix_length
121
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py
if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and" f" `n_embd`: {n_embd} are not equal." ) self.prefix_inner_dim = prefix_inner_dim self.prefix_hidden_dim = prefix_hidden_dim self.encode_prefix = ( nn.Linear(self.prefix_inner_dim, self.prefix_hidden_dim) if self.prefix_hidden_dim is not None else nn.Identity() ) self.decode_prefix = ( nn.Linear(self.prefix_hidden_dim, n_embd) if self.prefix_hidden_dim is not None else nn.Identity() )
121
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py
gpt_config = GPT2Config( vocab_size=vocab_size, n_positions=n_positions, n_embd=n_embd, n_layer=n_layer, n_head=n_head, n_inner=n_inner, activation_function=activation_function, resid_pdrop=resid_pdrop, embd_pdrop=embd_pdrop, attn_pdrop=attn_pdrop, layer_norm_epsilon=layer_norm_epsilon, initializer_range=initializer_range, scale_attn_weights=scale_attn_weights, use_cache=use_cache, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) self.transformer = GPT2LMHeadModel(gpt_config)
121
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py
def forward( self, input_ids: torch.Tensor, prefix_embeds: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, ): """ Args: input_ids (`torch.Tensor` of shape `(N, max_seq_len)`): Text tokens to use for inference. prefix_embeds (`torch.Tensor` of shape `(N, prefix_length, 768)`): Prefix embedding to preprend to the embedded tokens. attention_mask (`torch.Tensor` of shape `(N, prefix_length + max_seq_len, 768)`, *optional*): Attention mask for the prefix embedding. labels (`torch.Tensor`, *optional*): Labels to use for language modeling. """ embedding_text = self.transformer.transformer.wte(input_ids) hidden = self.encode_prefix(prefix_embeds) prefix_embeds = self.decode_prefix(hidden)
121
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py
embedding_cat = torch.cat((prefix_embeds, embedding_text), dim=1)
121
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py
if labels is not None: dummy_token = self.get_dummy_token(input_ids.shape[0], input_ids.device) labels = torch.cat((dummy_token, input_ids), dim=1) out = self.transformer(inputs_embeds=embedding_cat, labels=labels, attention_mask=attention_mask) if self.prefix_hidden_dim is not None: return out, hidden else: return out def get_dummy_token(self, batch_size: int, device: torch.device) -> torch.Tensor: return torch.zeros(batch_size, self.prefix_length, dtype=torch.int64, device=device) def encode(self, prefix): return self.encode_prefix(prefix) @torch.no_grad() def generate_captions(self, features, eos_token_id, device): """ Generate captions given text embedding features. Returns list[L].
121
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py