text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): shape = ( batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device)
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
# scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_masked_condition( self, image, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, motion_scale=0, ): shape = ( batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor, ) _, _, _, scaled_height, scaled_width = shape image = self.video_processor.preprocess(image) image = image.to(device, dtype)
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
if isinstance(generator, list): image_latent = [ self.vae.encode(image[k : k + 1]).latent_dist.sample(generator[k]) for k in range(batch_size) ] image_latent = torch.cat(image_latent, dim=0) else: image_latent = self.vae.encode(image).latent_dist.sample(generator) image_latent = image_latent.to(device=device, dtype=dtype) image_latent = torch.nn.functional.interpolate(image_latent, size=[scaled_height, scaled_width]) image_latent_padding = image_latent.clone() * self.vae.config.scaling_factor
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
mask = torch.zeros((batch_size, 1, num_frames, scaled_height, scaled_width)).to(device=device, dtype=dtype) mask_coef = prepare_mask_coef_by_statistics(num_frames, 0, motion_scale) masked_image = torch.zeros(batch_size, 4, num_frames, scaled_height, scaled_width).to( device=device, dtype=self.unet.dtype ) for f in range(num_frames): mask[:, :, f, :, :] = mask_coef[f] masked_image[:, :, f, :, :] = image_latent_padding.clone() mask = torch.cat([mask] * 2) if self.do_classifier_free_guidance else mask masked_image = torch.cat([masked_image] * 2) if self.do_classifier_free_guidance else masked_image return mask, masked_image
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput, prompt: Union[str, List[str]] = None, strength: float = 1.0, num_frames: Optional[int] = 16, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, motion_scale: int = 0, output_type: Optional[str] = "pil",
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], ): r""" The call function to the pipeline for generation.
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
Args: image (`PipelineImageInput`): The input image to be used for video generation. prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. strength (`float`, *optional*, defaults to 1.0): Indicates extent to transform the reference `image`. Must be between 0 and 1. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated video. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated video. num_frames (`int`, *optional*, defaults to 16): The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds amounts to 2 seconds of video.
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality videos at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. Latents should be of shape `(batch_size, num_channel, num_frames, height, width)`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not provided, embeddings are computed from the `ip_adapter_image` input argument.
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
motion_scale: (`int`, *optional*, defaults to 0): Parameter that controls the amount and type of motion that is added to the image. Increasing the value increases the amount of motion, while specific ranges of values control the type of motion that is added. Must be between 0 and 8. Set between 0-2 to only increase the amount of motion. Set between 3-5 to create looping motion. Set between 6-8 to perform motion with image style transfer. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*):
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class.
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
Examples: Returns: [`~pipelines.pia.pipeline_pia.PIAPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.pia.pipeline_pia.PIAPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor num_videos_per_prompt = 1 # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs, )
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
# 3. Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_videos_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0)
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt, self.do_classifier_free_guidance, ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) self._num_timesteps = len(timesteps)
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
# 5. Prepare latent variables latents = self.prepare_latents( batch_size * num_videos_per_prompt, 4, num_frames, height, width, prompt_embeds.dtype, device, generator, latents=latents, ) mask, masked_image = self.prepare_masked_condition( image, batch_size * num_videos_per_prompt, 4, num_frames=num_frames, height=height, width=width, dtype=self.unet.dtype, device=device, generator=generator, motion_scale=motion_scale, ) if strength < 1.0: noise = randn_tensor(latents.shape, generator=generator, device=device, dtype=latents.dtype) latents = self.scheduler.add_noise(masked_image[0], noise, latent_timestep)
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Add image embeds for IP-Adapter added_cond_kwargs = ( {"image_embeds": image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None ) # 8. Denoising loop num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 for free_init_iter in range(num_free_init_iters): if self.free_init_enabled: latents, timesteps = self._apply_free_init( latents, free_init_iter, num_inference_steps, device, latents.dtype, generator ) self._num_timesteps = len(timesteps) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
with self.progress_bar(total=self._num_timesteps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) latent_model_input = torch.cat([latent_model_input, mask, masked_image], dim=1) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, ).sample
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
# perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() # 9. Post processing if output_type == "latent": video = latents else: video_tensor = self.decode_latents(latents) video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) # 10. Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,)
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
return PIAPipelineOutput(frames=video)
48
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pia/pipeline_pia.py
class MochiPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): r""" The mochi pipeline for text-to-video generation. Reference: https://github.com/genmoai/models
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
Args: transformer ([`MochiTransformer3DModel`]): Conditional Transformer architecture to denoise the encoded video latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLMochi`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer (`T5TokenizerFast`): Second Tokenizer of class
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
[T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). """
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
model_cpu_offload_seq = "text_encoder->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKLMochi, text_encoder: T5EncoderModel, tokenizer: T5TokenizerFast, transformer: MochiTransformer3DModel, force_zeros_for_empty_prompt: bool = False, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) # TODO: determine these scaling factors from model parameters self.vae_spatial_scale_factor = 8 self.vae_temporal_scale_factor = 6 self.patch_size = 2
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_scale_factor) self.tokenizer_max_length = ( self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 256 ) self.default_height = 480 self.default_width = 848 self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 256, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt)
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.bool().to(device) # The original Mochi implementation zeros out empty negative prompts # but this can lead to overflow when placing the entire pipeline under the autocast context # adding this here so that we can enable zeroing prompts if necessary if self.config.force_zeros_for_empty_prompt and (prompt == "" or prompt[-1] == ""): text_input_ids = torch.zeros_like(text_input_ids, device=device) prompt_attention_mask = torch.zeros_like(prompt_attention_mask, dtype=torch.bool, device=device)
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask)[0] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
prompt_attention_mask = prompt_attention_mask.view(batch_size, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_videos_per_prompt, 1) return prompt_embeds, prompt_attention_mask # Adapted from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, max_sequence_length: int = 256, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): r""" Encodes the prompt into text encoder hidden states.
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds, negative_prompt_attention_mask = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, )
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask def check_inputs( self, prompt, height, width, callback_on_step_end_tensor_inputs=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" )
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.")
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.")
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError( "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" f" {negative_prompt_attention_mask.shape}." )
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling()
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() def prepare_latents( self, batch_size, num_channels_latents, height, width, num_frames, dtype, device, generator, latents=None, ): height = height // self.vae_spatial_scale_factor width = width // self.vae_spatial_scale_factor num_frames = (num_frames - 1) // self.vae_temporal_scale_factor + 1 shape = (batch_size, num_channels_latents, num_frames, height, width)
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
if latents is not None: return latents.to(device=device, dtype=dtype) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=torch.float32) latents = latents.to(dtype) return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def num_timesteps(self): return self._num_timesteps @property def attention_kwargs(self): return self._attention_kwargs @property def interrupt(self): return self._interrupt
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, negative_prompt: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_frames: int = 19, num_inference_steps: int = 64, timesteps: List[int] = None, guidance_scale: float = 4.5, num_videos_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None,
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 256, ): r""" Function invoked when calling the pipeline for generation.
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to `self.default_height`): The height in pixels of the generated image. This is set to 480 by default for the best results. width (`int`, *optional*, defaults to `self.default_width`): The width in pixels of the generated image. This is set to 848 by default for the best results. num_frames (`int`, defaults to `19`): The number of video frames to generate num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*):
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. guidance_scale (`float`, defaults to `4.5`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt.
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings.
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for negative text embeddings. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.mochi.MochiPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
`self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
`._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to `256`): Maximum sequence length to use with the `prompt`.
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
Examples: Returns: [`~pipelines.mochi.MochiPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.mochi.MochiPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.default_height width = width or self.default_width
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
# 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, height=height, width=width, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, ) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0]
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
device = self._execution_device # 3. Prepare text embeddings ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=max_sequence_length, device=device, ) # 4. Prepare latent variables num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents,
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
height, width, num_frames, prompt_embeds.dtype, device, generator, latents, )
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) # 5. Prepare timestep # from https://github.com/genmoai/models/blob/075b6e36db58f1242921deff83a1066887b9c9e1/src/mochi_preview/infer.py#L77 threshold_noise = 0.025 sigmas = linear_quadratic_schedule(num_inference_steps, threshold_noise) sigmas = np.array(sigmas) timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps)
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
# 6. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]).to(latents.dtype) noise_pred = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, encoder_attention_mask=prompt_attention_mask, attention_kwargs=attention_kwargs, return_dict=False, )[0] # Mochi CFG + Sampling runs in FP32 noise_pred = noise_pred.to(torch.float32)
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents.to(torch.float32), return_dict=False)[0] latents = latents.to(latents_dtype) if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype)
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step()
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
if output_type == "latent": video = latents else: # unscale/denormalize the latents # denormalize with the mean and std if available and not None has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = ( torch.tensor(self.vae.config.latents_mean).view(1, 12, 1, 1, 1).to(latents.device, latents.dtype) ) latents_std = ( torch.tensor(self.vae.config.latents_std).view(1, 12, 1, 1, 1).to(latents.device, latents.dtype) ) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return MochiPipelineOutput(frames=video)
49
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_mochi.py
class MochiPipelineOutput(BaseOutput): r""" Output class for Mochi pipelines. Args: frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape `(batch_size, num_frames, channels, height, width)`. """ frames: torch.Tensor
50
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/mochi/pipeline_output.py
class KolorsImg2ImgPipeline(DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin): r""" Pipeline for text-to-image generation using Kolors. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`ChatGLMModel`]): Frozen text-encoder. Kolors uses [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b). tokenizer (`ChatGLMTokenizer`): Tokenizer of class [ChatGLMTokenizer](https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"False"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
`Kwai-Kolors/Kolors-diffusers`. """
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
model_cpu_offload_seq = "text_encoder->image_encoder-unet->vae" _optional_components = [ "image_encoder", "feature_extractor", ] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "add_text_embeds", "add_time_ids", "negative_pooled_prompt_embeds", "negative_add_time_ids", ] def __init__( self, vae: AutoencoderKL, text_encoder: ChatGLMModel, tokenizer: ChatGLMTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection = None, feature_extractor: CLIPImageProcessor = None, force_zeros_for_empty_prompt: bool = False, ): super().__init__()
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = ( self.unet.config.sample_size if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size") else 128 )
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
# Copied from diffusers.pipelines.kolors.pipeline_kolors.KolorsPipeline.encode_prompt def encode_prompt( self, prompt, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, max_sequence_length: int = 256, ): r""" Encodes the prompt into text encoder hidden states.
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument.
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument.
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. """ # from IPython import embed; embed(); exit() device = device or self._execution_device
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer] text_encoders = [self.text_encoder]
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
if prompt_embeds is None: prompt_embeds_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): text_inputs = tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ).to(device) output = text_encoder( input_ids=text_inputs["input_ids"], attention_mask=text_inputs["attention_mask"], position_ids=text_inputs["position_ids"], output_hidden_states=True, )
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
# [max_sequence_length, batch, hidden_size] -> [batch, max_sequence_length, hidden_size] # clone to have a contiguous tensor prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() # [max_sequence_length, batch, hidden_size] -> [batch, hidden_size] pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_embeds_list.append(prompt_embeds) prompt_embeds = prompt_embeds_list[0] # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
negative_prompt_embeds_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): uncond_input = tokenizer( uncond_tokens, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ).to(device) output = text_encoder( input_ids=uncond_input["input_ids"], attention_mask=uncond_input["attention_mask"], position_ids=uncond_input["position_ids"], output_hidden_states=True, )
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
# [max_sequence_length, batch, hidden_size] -> [batch, max_sequence_length, hidden_size] # clone to have a contiguous tensor negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() # [max_sequence_length, batch, hidden_size] -> [batch, hidden_size] negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device)
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view( batch_size * num_images_per_prompt, seq_len, -1 ) negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = negative_prompt_embeds_list[0] bs_embed = pooled_prompt_embeds.shape[0] pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds( self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance ): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." )
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers ): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) single_image_embeds, single_negative_image_embeds = self.encode_image( single_ip_adapter_image, device, 1, output_hidden_state ) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds)
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
ip_adapter_image_embeds = [] for i, single_image_embeds in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
def check_inputs( self, prompt, strength, num_inference_steps, height, width, negative_prompt=None, prompt_embeds=None, pooled_prompt_embeds=None, negative_prompt_embeds=None, negative_pooled_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None, ): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if not isinstance(num_inference_steps, int) or num_inference_steps <= 0: raise ValueError( f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" f" {type(num_inference_steps)}." )
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" )
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." )
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." )
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError( "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." )
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError( f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" ) elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError( f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" ) if max_sequence_length is not None and max_sequence_length > 256: raise ValueError(f"`max_sequence_length` cannot be greater than 256 but is {max_sequence_length}")
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): # get the original timestep using init_timestep if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
else: # Strength is irrelevant if we directly request a timestep to start at; # that is, strength is determined by the denoising_start instead. discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_start * self.scheduler.config.num_train_timesteps) ) )
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: # if the scheduler is a 2nd order scheduler we might have to do +1 # because `num_inference_steps` might be even given that every timestep # (except the highest one) is duplicated. If `num_inference_steps` is even it would # mean that we cut the timesteps in the middle of the denoising step # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler num_inference_steps = num_inference_steps + 1
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
# because t_n+1 >= t_n, we slice the timesteps starting from the end t_start = len(self.scheduler.timesteps) - num_inference_steps timesteps = self.scheduler.timesteps[t_start:] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start) return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents def prepare_latents( self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True ): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" )
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
latents_mean = latents_std = None if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None: latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) # Offload text encoder if `enable_model_cpu_offload` was enabled if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.text_encoder_2.to("cpu") torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
else: # make sure the VAE is in float32 mode, as it overflows in float16 if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " ) init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype)
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py
init_latents = init_latents.to(dtype) if latents_mean is not None and latents_std is not None: latents_mean = latents_mean.to(device=device, dtype=dtype) latents_std = latents_std.to(device=device, dtype=dtype) init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std else: init_latents = self.vae.config.scaling_factor * init_latents
51
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py