text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
# Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_inpaint.StableDiffusion3InpaintPipeline._encode_vae_image def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor return image_latents
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
# Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(num_inference_steps * strength, num_inference_steps) t_start = int(max(num_inference_steps - init_timestep, 0)) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
# Copied from diffusers.pipelines.flux.pipeline_flux_img2img.FluxImg2ImgPipeline.check_inputs def check_inputs( self, prompt, prompt_2, strength, height, width, prompt_embeds=None, pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None, ): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: logger.warning( f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" )
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" )
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") @staticmethod # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._prepare_latent_image_ids def _prepare_latent_image_ids(batch_size, height, width, device, dtype): latent_image_ids = torch.zeros(height, width, 3) latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None] latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width)[None, :]
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape latent_image_ids = latent_image_ids.reshape( latent_image_id_height * latent_image_id_width, latent_image_id_channels ) return latent_image_ids.to(device=device, dtype=dtype) @staticmethod # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._pack_latents def _pack_latents(latents, batch_size, num_channels_latents, height, width): latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 2, 4, 1, 3, 5) latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) return latents @staticmethod # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents def _unpack_latents(latents, height, width, vae_scale_factor): batch_size, num_patches, channels = latents.shape
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
# VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (vae_scale_factor * 2)) width = 2 * (int(width) // (vae_scale_factor * 2)) latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2) latents = latents.permute(0, 3, 1, 4, 2, 5) latents = latents.reshape(batch_size, channels // (2 * 2), height, width) return latents def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing()
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling()
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
def prepare_latents( self, image, timestep, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
# VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) if latents is not None: return latents.to(device=device, dtype=dtype), latent_image_ids
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(image=image, generator=generator) if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." ) else: image_latents = torch.cat([image_latents], dim=0)
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self.scheduler.scale_noise(image_latents, timestep, noise) latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) return latents, noise, image_latents, latent_image_ids # Copied from diffusers.pipelines.controlnet_sd3.pipeline_stable_diffusion_3_controlnet.StableDiffusion3ControlNetPipeline.prepare_image def prepare_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): if isinstance(image, torch.Tensor): pass else: image = self.image_processor.preprocess(image, height=height, width=width) image_batch_size = image.shape[0]
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image def prepare_mask_latents( self, image, mask_image, batch_size, num_channels_latents, num_images_per_prompt, height, width, dtype, device, generator, ): # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. image = self.image_processor.preprocess(image, height=height, width=width) mask_image = self.mask_processor.preprocess(mask_image, height=height, width=width)
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
masked_image = image * (1 - mask_image) masked_image = masked_image.to(device=device, dtype=dtype) height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask_image = torch.nn.functional.interpolate(mask_image, size=(height, width)) mask_image = mask_image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt masked_image = masked_image.to(device=device, dtype=dtype) if masked_image.shape[1] == num_channels_latents: masked_image_latents = masked_image else: masked_image_latents = retrieve_latents(self.vae.encode(masked_image), generator=generator)
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
masked_image_latents = (masked_image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask_image.shape[0] < batch_size: if not batch_size % mask_image.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask_image.shape[0]} mask_image were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask_image = mask_image.repeat(batch_size // mask_image.shape[0], 1, 1, 1) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated"
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
# aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) masked_image_latents = self._pack_latents( masked_image_latents, batch_size, num_channels_latents, height, width, ) mask_image = self._pack_latents( mask_image.repeat(1, num_channels_latents, 1, 1), batch_size, num_channels_latents, height, width, ) masked_image_latents = torch.cat((masked_image_latents, mask_image), dim=-1) return mask_image, masked_image_latents @property def guidance_scale(self): return self._guidance_scale @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
@property def interrupt(self): return self._interrupt
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, image: PipelineImageInput = None, control_image: PipelineImageInput = None, mask_image: PipelineImageInput = None, masked_image_latents: PipelineImageInput = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.6, num_inference_steps: int = 28, sigmas: Optional[List[float]] = None, guidance_scale: float = 7.0, num_images_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True,
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
joint_attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 512, ): r""" Function invoked when calling the pipeline for generation.
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is will be used instead image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but if passing latents directly it is not encoded again. control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The ControlNet input condition to provide guidance to the `unet` for generation. If the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`,
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
images must be passed as a list such that each element of the list can be correctly batched for input to a single ControlNet. mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B, H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W, 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`):
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
`Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask latents tensor will ge generated by `mask_image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. strength (`float`, *optional*, defaults to 1.0): Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point and more noise is added the higher the `strength`. The number of denoising steps depends
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 7.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
`guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`.
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
Examples: Returns: [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, strength, height, width, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length, )
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False device = self._execution_device # 3. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
# 3. Prepare text embeddings lora_scale = ( self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None ) ( prompt_embeds, pooled_prompt_embeds, text_ids, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, lora_scale=lora_scale, )
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
# 3. Preprocess mask and image num_channels_latents = self.vae.config.latent_channels if masked_image_latents is not None: # pre computed masked_image_latents and mask_image masked_image_latents = masked_image_latents.to(latents.device) mask = mask_image.to(latents.device) else: mask, masked_image_latents = self.prepare_mask_latents( image, mask_image, batch_size, num_channels_latents, num_images_per_prompt, height, width, prompt_embeds.dtype, device, generator, ) init_image = self.image_processor.preprocess(image, height=height, width=width) init_image = init_image.to(dtype=torch.float32)
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
# 4.Prepare timesteps sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas image_seq_len = (int(height) // self.vae_scale_factor // 2) * (int(width) // self.vae_scale_factor // 2) mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.16), ) timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, mu=mu, ) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
if num_inference_steps < 1: raise ValueError( f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." ) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 5. Prepare latent variables num_channels_latents = self.transformer.config.in_channels // 8 control_image = self.prepare_image( image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.vae.dtype, )
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
if control_image.ndim == 4: control_image = self.vae.encode(control_image).latent_dist.sample(generator=generator) control_image = (control_image - self.vae.config.shift_factor) * self.vae.config.scaling_factor height_control_image, width_control_image = control_image.shape[2:] control_image = self._pack_latents( control_image, batch_size * num_images_per_prompt, num_channels_latents, height_control_image, width_control_image, ) latents, noise, image_latents, latent_image_ids = self.prepare_latents( init_image, latent_timestep, batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, )
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
# VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height_8 = 2 * (int(height) // (self.vae_scale_factor * 2)) width_8 = 2 * (int(width) // (self.vae_scale_factor * 2)) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # handle guidance if self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) else: guidance = None # 6. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents, control_image], dim=2)
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]).to(latents.dtype) noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep / 1000, guidance=guidance, pooled_projections=pooled_prompt_embeds, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, img_ids=latent_image_ids, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False, )[0] # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
# for 64 channel transformer only. init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.scale_noise( image_latents, torch.tensor([noise_timestep]), noise ) else: init_latents_proper = image_latents init_latents_proper = self._pack_latents( init_latents_proper, batch_size * num_images_per_prompt, num_channels_latents, height_8, width_8 ) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype)
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == "latent": image = latents
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
else: latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return FluxPipelineOutput(images=image)
160
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py
class FluxControlNetInpaintPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin): r""" The Flux controlnet pipeline for inpainting. Reference: https://blackforestlabs.ai/announcing-black-forest-labs/
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
Args: transformer ([`FluxTransformer2DModel`]): Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. tokenizer (`CLIPTokenizer`):
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`T5TokenizerFast`): Second Tokenizer of class [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). """
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, text_encoder_2: T5EncoderModel, tokenizer_2: T5TokenizerFast, transformer: FluxTransformer2DModel, controlnet: Union[ FluxControlNetModel, List[FluxControlNetModel], Tuple[FluxControlNetModel], FluxMultiControlNetModel ], ): super().__init__() if isinstance(controlnet, (list, tuple)): controlnet = FluxMultiControlNetModel(controlnet)
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
self.register_modules( scheduler=scheduler, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, transformer=transformer, controlnet=controlnet, )
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 # Flux latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible # by the patch size. So the vae scale factor is multiplied by the patch size to account for this self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) latent_channels = self.vae.config.latent_channels if getattr(self, "vae", None) else 16 self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor * 2, vae_latent_channels=latent_channels, do_normalize=False, do_binarize=True, do_convert_grayscale=True, ) self.tokenizer_max_length = ( self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 ) self.default_sample_size = 128
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_t5_prompt_embeds def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_images_per_prompt: int = 1, max_sequence_length: int = 512, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer_2)
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
text_inputs = self.tokenizer_2( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_length=False, return_overflowing_tokens=False, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0]
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
dtype = self.text_encoder_2.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) _, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_clip_prompt_embeds def _get_clip_prompt_embeds( self, prompt: Union[str, List[str]], num_images_per_prompt: int = 1, device: Optional[torch.device] = None, ): device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer_max_length, truncation=True, return_overflowing_tokens=False, return_length=False, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer_max_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False)
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# Use pooled output of CLIPTextModel prompt_embeds = prompt_embeds.pooler_output prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) return prompt_embeds # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], device: Optional[torch.device] = None, num_images_per_prompt: int = 1, prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, max_sequence_length: int = 512, lora_scale: Optional[float] = None, ): r"""
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in all text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
If not provided, pooled text embeddings will be generated from `prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ device = device or self._execution_device
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# We only use the pooled prompt output from the CLIPTextModel pooled_prompt_embeds = self._get_clip_prompt_embeds( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, ) prompt_embeds = self._get_t5_prompt_embeds( prompt=prompt_2, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device, ) if self.text_encoder is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale)
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
if self.text_encoder_2 is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) return prompt_embeds, pooled_prompt_embeds, text_ids
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_inpaint.StableDiffusion3InpaintPipeline._encode_vae_image def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor return image_latents
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(num_inference_steps * strength, num_inference_steps) t_start = int(max(num_inference_steps - init_timestep, 0)) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
def check_inputs( self, prompt, prompt_2, image, mask_image, strength, height, width, output_type, prompt_embeds=None, pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, padding_mask_crop=None, max_sequence_length=None, ): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: logger.warning( f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" )
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" )
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if padding_mask_crop is not None: if not isinstance(image, PIL.Image.Image): raise ValueError( f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}." ) if not isinstance(mask_image, PIL.Image.Image): raise ValueError( f"The mask image should be a PIL image when inpainting mask crop, but is of type" f" {type(mask_image)}." ) if output_type != "pil": raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.")
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") @staticmethod # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._prepare_latent_image_ids def _prepare_latent_image_ids(batch_size, height, width, device, dtype): latent_image_ids = torch.zeros(height, width, 3) latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None] latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width)[None, :] latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape latent_image_ids = latent_image_ids.reshape( latent_image_id_height * latent_image_id_width, latent_image_id_channels ) return latent_image_ids.to(device=device, dtype=dtype)
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
@staticmethod # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._pack_latents def _pack_latents(latents, batch_size, num_channels_latents, height, width): latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 2, 4, 1, 3, 5) latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) return latents @staticmethod # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents def _unpack_latents(latents, height, width, vae_scale_factor): batch_size, num_patches, channels = latents.shape # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (vae_scale_factor * 2)) width = 2 * (int(width) // (vae_scale_factor * 2))
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2) latents = latents.permute(0, 3, 1, 4, 2, 5) latents = latents.reshape(batch_size, channels // (2 * 2), height, width) return latents # Copied from diffusers.pipelines.flux.pipeline_flux_inpaint.FluxInpaintPipeline.prepare_latents def prepare_latents( self, image, timestep, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(image=image, generator=generator)
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." ) else: image_latents = torch.cat([image_latents], dim=0) if latents is None: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self.scheduler.scale_noise(image_latents, timestep, noise) else: noise = latents.to(device) latents = noise
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
noise = self._pack_latents(noise, batch_size, num_channels_latents, height, width) image_latents = self._pack_latents(image_latents, batch_size, num_channels_latents, height, width) latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) return latents, noise, image_latents, latent_image_ids
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# Copied from diffusers.pipelines.flux.pipeline_flux_inpaint.FluxInpaintPipeline.prepare_mask_latents def prepare_mask_latents( self, mask, masked_image, batch_size, num_channels_latents, num_images_per_prompt, height, width, dtype, device, generator, ): # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask = torch.nn.functional.interpolate(mask, size=(height, width)) mask = mask.to(device=device, dtype=dtype)
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
batch_size = batch_size * num_images_per_prompt masked_image = masked_image.to(device=device, dtype=dtype) if masked_image.shape[1] == 16: masked_image_latents = masked_image else: masked_image_latents = retrieve_latents(self.vae.encode(masked_image), generator=generator) masked_image_latents = (masked_image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated"
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) masked_image_latents = self._pack_latents( masked_image_latents, batch_size, num_channels_latents, height, width, ) mask = self._pack_latents( mask.repeat(1, num_channels_latents, 1, 1), batch_size, num_channels_latents, height, width, ) return mask, masked_image_latents
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# Copied from diffusers.pipelines.controlnet_sd3.pipeline_stable_diffusion_3_controlnet.StableDiffusion3ControlNetPipeline.prepare_image def prepare_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): if isinstance(image, torch.Tensor): pass else: image = self.image_processor.preprocess(image, height=height, width=width) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
@property def guidance_scale(self): return self._guidance_scale @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, image: PipelineImageInput = None, mask_image: PipelineImageInput = None, masked_image_latents: PipelineImageInput = None, control_image: PipelineImageInput = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.6, padding_mask_crop: Optional[int] = None, sigmas: Optional[List[float]] = None, num_inference_steps: int = 28, guidance_scale: float = 7.0, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, control_mode: Optional[Union[int, List[int]]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 1.0, num_images_per_prompt: Optional[int] = 1,
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, joint_attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 512, ): """ Function invoked when calling the pipeline for generation.
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): The image(s) to inpaint. mask_image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): The mask image(s) to use for inpainting. White pixels in the mask will be repainted, while black pixels will be preserved. masked_image_latents (`torch.FloatTensor`, *optional*): Pre-generated masked image latents. control_image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): The ControlNet input condition. Image to control the generation.
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
height (`int`, *optional*, defaults to self.default_sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.default_sample_size * self.vae_scale_factor): The width in pixels of the generated image. strength (`float`, *optional*, defaults to 0.6): Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. padding_mask_crop (`int`, *optional*): The size of the padding to use when cropping the mask. num_inference_steps (`int`, *optional*, defaults to 28): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 7.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the ControlNet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the ControlNet stops applying. control_mode (`int` or `List[int]`, *optional*): The mode for the ControlNet. If multiple ControlNets are used, this should be a list. controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original transformer. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or more [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. joint_attention_kwargs (`dict`, *optional*): Additional keyword arguments to be passed to the joint attention mechanism. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising step during the inference. callback_on_step_end_tensor_inputs (`List[str]`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. max_sequence_length (`int`, *optional*, defaults to 512):
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
The maximum length of the sequence to be generated.
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
Examples: Returns: [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor global_height = height global_width = width
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = len(self.controlnet.nets) if isinstance(self.controlnet, FluxMultiControlNetModel) else 1 control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], )
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# 1. Check inputs self.check_inputs( prompt, prompt_2, image, mask_image, strength, height, width, output_type=output_type, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, padding_mask_crop=padding_mask_crop, max_sequence_length=max_sequence_length, ) self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0]
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
device = self._execution_device dtype = self.transformer.dtype # 3. Encode input prompt lora_scale = ( self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None ) prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, lora_scale=lora_scale, )
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# 4. Preprocess mask and image if padding_mask_crop is not None: crops_coords = self.mask_processor.get_crop_region( mask_image, global_width, global_height, pad=padding_mask_crop ) resize_mode = "fill" else: crops_coords = None resize_mode = "default" original_image = image init_image = self.image_processor.preprocess( image, height=global_height, width=global_width, crops_coords=crops_coords, resize_mode=resize_mode ) init_image = init_image.to(dtype=torch.float32)
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# 5. Prepare control image num_channels_latents = self.transformer.config.in_channels // 4 if isinstance(self.controlnet, FluxControlNetModel): control_image = self.prepare_image( image=control_image, width=height, height=width, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.vae.dtype, ) height, width = control_image.shape[-2:]
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# xlab controlnet has a input_hint_block and instantx controlnet does not controlnet_blocks_repeat = False if self.controlnet.input_hint_block is None else True if self.controlnet.input_hint_block is None: # vae encode control_image = retrieve_latents(self.vae.encode(control_image), generator=generator) control_image = (control_image - self.vae.config.shift_factor) * self.vae.config.scaling_factor # pack height_control_image, width_control_image = control_image.shape[2:] control_image = self._pack_latents( control_image, batch_size * num_images_per_prompt, num_channels_latents, height_control_image, width_control_image, )
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# set control mode if control_mode is not None: control_mode = torch.tensor(control_mode).to(device, dtype=torch.long) control_mode = control_mode.reshape([-1, 1]) elif isinstance(self.controlnet, FluxMultiControlNetModel): control_images = []
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# xlab controlnet has a input_hint_block and instantx controlnet does not controlnet_blocks_repeat = False if self.controlnet.nets[0].input_hint_block is None else True for i, control_image_ in enumerate(control_image): control_image_ = self.prepare_image( image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.vae.dtype, ) height, width = control_image_.shape[-2:]
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
if self.controlnet.nets[0].input_hint_block is None: # vae encode control_image_ = retrieve_latents(self.vae.encode(control_image_), generator=generator) control_image_ = (control_image_ - self.vae.config.shift_factor) * self.vae.config.scaling_factor # pack height_control_image, width_control_image = control_image_.shape[2:] control_image_ = self._pack_latents( control_image_, batch_size * num_images_per_prompt, num_channels_latents, height_control_image, width_control_image, ) control_images.append(control_image_) control_image = control_images
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
# set control mode control_mode_ = [] if isinstance(control_mode, list): for cmode in control_mode: if cmode is None: control_mode_.append(-1) else: control_mode_.append(cmode) control_mode = torch.tensor(control_mode_).to(device, dtype=torch.long) control_mode = control_mode.reshape([-1, 1]) # 6. Prepare timesteps
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py
sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas image_seq_len = (int(global_height) // self.vae_scale_factor // 2) * ( int(global_width) // self.vae_scale_factor // 2 ) mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.16), ) timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, mu=mu, ) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
161
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py