text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) mask = callback_outputs.pop("mask", mask) masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents)
302
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py
# call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step()
302
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py
if not output_type == "latent": condition_kwargs = {} if isinstance(self.vae, AsymmetricAutoencoderKL): init_image = init_image.to(device=device, dtype=masked_image_latents.dtype) init_image_condition = init_image.clone() init_image = self._encode_vae_image(init_image, generator=generator) mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype) condition_kwargs = {"image": init_image_condition, "mask": mask_condition} image = self.vae.decode( latents / self.vae.config.scaling_factor, return_dict=False, generator=generator, **condition_kwargs )[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None
302
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py
if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if padding_mask_crop is not None: image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
302
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py
class I2VGenXLPipelineOutput(BaseOutput): r""" Output class for image-to-video pipeline. Args: frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape `(batch_size, num_frames, channels, height, width)` """ frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]]
303
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
class I2VGenXLPipeline( DiffusionPipeline, StableDiffusionMixin, ): r""" Pipeline for image-to-video generation as proposed in [I2VGenXL](https://i2vgen-xl.github.io/). This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.).
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`I2VGenXLUNet`]): A [`I2VGenXLUNet`] to denoise the encoded video latents. scheduler ([`DDIMScheduler`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. """ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, image_encoder: CLIPVisionModelWithProjection, feature_extractor: CLIPImageProcessor, unet: I2VGenXLUNet, scheduler: DDIMScheduler, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, image_encoder=image_encoder, feature_extractor=feature_extractor, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 # `do_resize=False` as we do custom resizing. self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor, do_resize=False) @property def guidance_scale(self): return self._guidance_scale
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 def encode_prompt( self, prompt, device, num_videos_per_prompt, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states.
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_videos_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument.
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0]
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
if prompt_embeds is None: text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer.
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_videos_per_prompt, seq_len, -1)
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# get unconditional embeddings for classifier free guidance if self.do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
" the batch size of `prompt`." ) else: uncond_tokens = negative_prompt
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# Apply clip_skip to negative prompt embeds if clip_skip is None: negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] else: negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. negative_prompt_embeds = negative_prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. negative_prompt_embeds = self.text_encoder.text_model.final_layer_norm(negative_prompt_embeds)
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
if self.do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_videos_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return prompt_embeds, negative_prompt_embeds def _encode_image(self, image, device, num_videos_per_prompt): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.video_processor.pil_to_numpy(image) image = self.video_processor.numpy_to_pt(image)
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# Normalize the image with CLIP training stats. image = self.feature_extractor( images=image, do_normalize=True, do_center_crop=False, do_resize=False, do_rescale=False, return_tensors="pt", ).pixel_values image = image.to(device=device, dtype=dtype) image_embeddings = self.image_encoder(image).image_embeds image_embeddings = image_embeddings.unsqueeze(1) # duplicate image embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_videos_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1)
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
if self.do_classifier_free_guidance: negative_image_embeddings = torch.zeros_like(image_embeddings) image_embeddings = torch.cat([negative_image_embeddings, image_embeddings]) return image_embeddings def decode_latents(self, latents, decode_chunk_size=None): latents = 1 / self.vae.config.scaling_factor * latents batch_size, channels, num_frames, height, width = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) if decode_chunk_size is not None: frames = [] for i in range(0, latents.shape[0], decode_chunk_size): frame = self.vae.decode(latents[i : i + decode_chunk_size]).sample frames.append(frame) image = torch.cat(frames, dim=0) else: image = self.vae.decode(latents).sample
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
decode_shape = (batch_size, num_frames, -1) + image.shape[2:] video = image[None, :].reshape(decode_shape).permute(0, 2, 1, 3, 4) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 video = video.float() return video # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, image, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." )
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if ( not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list) ): raise ValueError( "`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" f" {type(image)}" )
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
def prepare_image_latents( self, image, device, num_frames, num_videos_per_prompt, ): image = image.to(device=device) image_latents = self.vae.encode(image).latent_dist.sample() image_latents = image_latents * self.vae.config.scaling_factor # Add frames dimension to image latents image_latents = image_latents.unsqueeze(2) # Append a position mask for each subsequent frame # after the intial image latent frame frame_position_mask = [] for frame_idx in range(num_frames - 1): scale = (frame_idx + 1) / (num_frames - 1) frame_position_mask.append(torch.ones_like(image_latents[:, :, :1]) * scale) if frame_position_mask: frame_position_mask = torch.cat(frame_position_mask, dim=2) image_latents = torch.cat([image_latents, frame_position_mask], dim=2)
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# duplicate image_latents for each generation per prompt, using mps friendly method image_latents = image_latents.repeat(num_videos_per_prompt, 1, 1, 1, 1) if self.do_classifier_free_guidance: image_latents = torch.cat([image_latents] * 2) return image_latents
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): shape = ( batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device)
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, image: PipelineImageInput = None, height: Optional[int] = 704, width: Optional[int] = 1280, target_fps: Optional[int] = 16, num_frames: int = 16, num_inference_steps: int = 50, guidance_scale: float = 9.0, negative_prompt: Optional[Union[str, List[str]]] = None, eta: float = 0.0, num_videos_per_prompt: Optional[int] = 1, decode_chunk_size: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None,
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
clip_skip: Optional[int] = 1, ): r""" The call function to the pipeline for image-to-video generation with [`I2VGenXLPipeline`].
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.Tensor`): Image or images to guide image generation. If you provide a tensor, it needs to be compatible with [`CLIPImageProcessor`](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json). height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. target_fps (`int`, *optional*):
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
Frames per second. The rate at which the generated images shall be exported to a video after generation. This is also used as a "micro-condition" while generation. num_frames (`int`, *optional*): The number of video frames to generate. num_inference_steps (`int`, *optional*): The number of denoising steps. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). eta (`float`, *optional*):
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. num_videos_per_prompt (`int`, *optional*): The number of images to generate per prompt. decode_chunk_size (`int`, *optional*): The number of frames to decode at a time. The higher the chunk size, the higher the temporal consistency between frames, but also the higher the memory consumption. By default, the decoder will decode all frames at once for maximal quality. Reduce `decode_chunk_size` to reduce memory usage. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*):
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`):
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings.
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
Examples: Returns: [`pipelines.i2vgen_xl.pipeline_i2vgen_xl.I2VGenXLPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`pipelines.i2vgen_xl.pipeline_i2vgen_xl.I2VGenXLPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, image, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds)
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. self._guidance_scale = guidance_scale
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# 3.1 Encode input text prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_videos_per_prompt, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# 3.2 Encode image prompt # 3.2.1 Image encodings. # https://github.com/ali-vilab/i2vgen-xl/blob/2539c9262ff8a2a22fa9daecbfd13f0a2dbc32d0/tools/inferences/inference_i2vgen_entrance.py#L114 cropped_image = _center_crop_wide(image, (width, width)) cropped_image = _resize_bilinear( cropped_image, (self.feature_extractor.crop_size["width"], self.feature_extractor.crop_size["height"]) ) image_embeddings = self._encode_image(cropped_image, device, num_videos_per_prompt) # 3.2.2 Image latents. resized_image = _center_crop_wide(image, (width, height)) image = self.video_processor.preprocess(resized_image).to(device=device, dtype=image_embeddings.dtype) image_latents = self.prepare_image_latents( image, device=device, num_frames=num_frames, num_videos_per_prompt=num_videos_per_prompt, )
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# 3.3 Prepare additional conditions for the UNet. if self.do_classifier_free_guidance: fps_tensor = torch.tensor([target_fps, target_fps]).to(device) else: fps_tensor = torch.tensor([target_fps]).to(device) fps_tensor = fps_tensor.repeat(batch_size * num_videos_per_prompt, 1).ravel() # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, num_frames, height, width, prompt_embeds.dtype, device, generator, latents, )
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, fps=fps_tensor, image_latents=image_latents, image_embeddings=image_embeddings, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# reshape latents batch_size, channel, frames, width, height = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * frames, channel, width, height) noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(batch_size * frames, channel, width, height) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # reshape latents back latents = latents[None, :].reshape(batch_size, frames, channel, width, height).permute(0, 2, 1, 3, 4) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step()
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
# 8. Post processing if output_type == "latent": video = latents else: video_tensor = self.decode_latents(latents, decode_chunk_size=decode_chunk_size) video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) # 9. Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return I2VGenXLPipelineOutput(frames=video)
304
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py
class DiTPipeline(DiffusionPipeline): r""" Pipeline for image generation based on a Transformer backbone instead of a UNet. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: transformer ([`DiTTransformer2DModel`]): A class conditioned `DiTTransformer2DModel` to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. scheduler ([`DDIMScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. """ model_cpu_offload_seq = "transformer->vae"
305
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dit/pipeline_dit.py
def __init__( self, transformer: DiTTransformer2DModel, vae: AutoencoderKL, scheduler: KarrasDiffusionSchedulers, id2label: Optional[Dict[int, str]] = None, ): super().__init__() self.register_modules(transformer=transformer, vae=vae, scheduler=scheduler) # create a imagenet -> id dictionary for easier use self.labels = {} if id2label is not None: for key, value in id2label.items(): for label in value.split(","): self.labels[label.lstrip().rstrip()] = int(key) self.labels = dict(sorted(self.labels.items())) def get_label_ids(self, label: Union[str, List[str]]) -> List[int]: r""" Map label strings from ImageNet to corresponding class ids. Parameters: label (`str` or `dict` of `str`): Label strings to be mapped to class ids.
305
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dit/pipeline_dit.py
Returns: `list` of `int`: Class ids to be processed by pipeline. """ if not isinstance(label, list): label = list(label) for l in label: if l not in self.labels: raise ValueError( f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self, class_labels: List[int], guidance_scale: float = 4.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, num_inference_steps: int = 50, output_type: Optional[str] = "pil", return_dict: bool = True, ) -> Union[ImagePipelineOutput, Tuple]: r""" The call function to the pipeline for generation.
305
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dit/pipeline_dit.py
Args: class_labels (List[int]): List of ImageNet class labels for the images to be generated. guidance_scale (`float`, *optional*, defaults to 4.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. num_inference_steps (`int`, *optional*, defaults to 250): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`):
305
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dit/pipeline_dit.py
The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple.
305
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dit/pipeline_dit.py
Examples: ```py >>> from diffusers import DiTPipeline, DPMSolverMultistepScheduler >>> import torch >>> pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256", torch_dtype=torch.float16) >>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) >>> pipe = pipe.to("cuda") >>> # pick words from Imagenet class labels >>> pipe.labels # to print all available words >>> # pick words that exist in ImageNet >>> words = ["white shark", "umbrella"] >>> class_ids = pipe.get_label_ids(words) >>> generator = torch.manual_seed(33) >>> output = pipe(class_labels=class_ids, num_inference_steps=25, generator=generator) >>> image = output.images[0] # label 'white shark' ```
305
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dit/pipeline_dit.py
Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images """ batch_size = len(class_labels) latent_size = self.transformer.config.sample_size latent_channels = self.transformer.config.in_channels latents = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size), generator=generator, device=self._execution_device, dtype=self.transformer.dtype, ) latent_model_input = torch.cat([latents] * 2) if guidance_scale > 1 else latents
305
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dit/pipeline_dit.py
class_labels = torch.tensor(class_labels, device=self._execution_device).reshape(-1) class_null = torch.tensor([1000] * batch_size, device=self._execution_device) class_labels_input = torch.cat([class_labels, class_null], 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: half = latent_model_input[: len(latent_model_input) // 2] latent_model_input = torch.cat([half, half], dim=0) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
305
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dit/pipeline_dit.py
timesteps = t if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = latent_model_input.device.type == "mps" if isinstance(timesteps, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=latent_model_input.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output noise_pred = self.transformer(
305
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dit/pipeline_dit.py
latent_model_input, timestep=timesteps, class_labels=class_labels_input ).sample
305
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dit/pipeline_dit.py
# perform guidance if guidance_scale > 1: eps, rest = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0) half_eps = uncond_eps + guidance_scale * (cond_eps - uncond_eps) eps = torch.cat([half_eps, half_eps], dim=0) noise_pred = torch.cat([eps, rest], dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: model_output, _ = torch.split(noise_pred, latent_channels, dim=1) else: model_output = noise_pred # compute previous image: x_t -> x_t-1 latent_model_input = self.scheduler.step(model_output, t, latent_model_input).prev_sample if XLA_AVAILABLE: xm.mark_step()
305
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dit/pipeline_dit.py
if guidance_scale > 1: latents, _ = latent_model_input.chunk(2, dim=0) else: latents = latent_model_input latents = 1 / self.vae.config.scaling_factor * latents samples = self.vae.decode(latents).sample samples = (samples / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 samples = samples.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": samples = self.numpy_to_pil(samples) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (samples,) return ImagePipelineOutput(images=samples)
305
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dit/pipeline_dit.py
class IFImg2ImgSuperResolutionPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): tokenizer: T5Tokenizer text_encoder: T5EncoderModel unet: UNet2DConditionModel scheduler: DDPMScheduler image_noising_scheduler: DDPMScheduler feature_extractor: Optional[CLIPImageProcessor] safety_checker: Optional[IFSafetyChecker] watermarker: Optional[IFWatermarker] bad_punct_regex = re.compile( r"[" + "#®•©™&@·º½¾¿¡§~" + r"\)" + r"\(" + r"\]" + r"\[" + r"\}" + r"\{" + r"\|" + "\\" + r"\/" + r"\*" + r"]{1,}" ) # noqa _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor"] model_cpu_offload_seq = "text_encoder->unet" _exclude_from_cpu_offload = ["watermarker"]
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
def __init__( self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, unet: UNet2DConditionModel, scheduler: DDPMScheduler, image_noising_scheduler: DDPMScheduler, safety_checker: Optional[IFSafetyChecker], feature_extractor: Optional[CLIPImageProcessor], watermarker: Optional[IFWatermarker], requires_safety_checker: bool = True, ): super().__init__()
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the IF license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) if unet is not None and unet.config.in_channels != 6: logger.warning( "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`." )
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, image_noising_scheduler=image_noising_scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, watermarker=watermarker, ) self.register_to_config(requires_safety_checker=requires_safety_checker) # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing def _text_preprocessing(self, text, clean_caption=False): if clean_caption and not is_bs4_available(): logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) logger.warning("Setting `clean_caption` to False...") clean_caption = False
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
if clean_caption and not is_ftfy_available(): logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) logger.warning("Setting `clean_caption` to False...") clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text]
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub("<person>", "person", caption) # urls: caption = re.sub( r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa "", caption, ) # regex for urls caption = re.sub( r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa "", caption, ) # regex for urls # html: caption = BeautifulSoup(caption, features="html.parser").text # @<nickname> caption = re.sub(r"@[\w\d]+\b", "", caption)
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
# 31C0—31EF CJK Strokes # 31F0—31FF Katakana Phonetic Extensions # 3200—32FF Enclosed CJK Letters and Months # 3300—33FF CJK Compatibility # 3400—4DBF CJK Unified Ideographs Extension A # 4DC0—4DFF Yijing Hexagram Symbols # 4E00—9FFF CJK Unified Ideographs caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) caption = re.sub(r"[\u3200-\u32ff]+", "", caption) caption = re.sub(r"[\u3300-\u33ff]+", "", caption) caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) #######################################################
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
# все виды тире / all types of dash --> "-" caption = re.sub( r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa "-", caption, ) # кавычки к одному стандарту caption = re.sub(r"[`´«»“”¨]", '"', caption) caption = re.sub(r"[‘’]", "'", caption) # &quot; caption = re.sub(r"&quot;?", "", caption) # &amp caption = re.sub(r"&amp", "", caption) # ip adresses: caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) # article ids: caption = re.sub(r"\d:\d\d\s+$", "", caption) # \n caption = re.sub(r"\\n", " ", caption)
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
# "#123" caption = re.sub(r"#\d{1,3}\b", "", caption) # "#12345.." caption = re.sub(r"#\d{5,}\b", "", caption) # "123456.." caption = re.sub(r"\b\d{6,}\b", "", caption) # filenames: caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) # caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " # this-is-my-cute-cat / this_is_my_cute_cat regex2 = re.compile(r"(?:\-|\_)") if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, " ", caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption))
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) caption = re.sub(r"\bpage\s+\d+\b", "", caption) caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) caption = re.sub(r"\b\s+\:\s+", r": ", caption) caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) caption = re.sub(r"\s+", " ", caption) caption.strip()
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) caption = re.sub(r"^\.\S+$", "", caption) return caption.strip() @torch.no_grad() # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool = True, num_images_per_prompt: int = 1, device: Optional[torch.device] = None, negative_prompt: Optional[Union[str, List[str]]] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, clean_caption: bool = False, ): r""" Encodes the prompt into text encoder hidden states.
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): whether to use classifier free guidance or not num_images_per_prompt (`int`, *optional*, defaults to 1): number of images that should be generated per prompt device: (`torch.device`, *optional*): torch device to place the resulting embeddings on negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*):
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. clean_caption (bool, defaults to `False`): If `True`, the function will preprocess and clean the provided caption before encoding. """ if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." )
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF max_length = 77 if prompt_embeds is None: prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {max_length} tokens: {removed_text}" ) attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.unet is not None: dtype = self.unet.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
# get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1]
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes else: negative_prompt_embeds = None return prompt_embeds, negative_prompt_embeds
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) image, nsfw_detected, watermark_detected = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype=dtype), ) else: nsfw_detected = None watermark_detected = None return image, nsfw_detected, watermark_detected
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
def check_inputs( self, prompt, image, original_image, batch_size, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." )
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." )
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # image if isinstance(image, list): check_image_type = image[0] else: check_image_type = image
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
if ( not isinstance(check_image_type, torch.Tensor) and not isinstance(check_image_type, PIL.Image.Image) and not isinstance(check_image_type, np.ndarray) ): raise ValueError( "`image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" f" {type(check_image_type)}" ) if isinstance(image, list): image_batch_size = len(image) elif isinstance(image, torch.Tensor): image_batch_size = image.shape[0] elif isinstance(image, PIL.Image.Image): image_batch_size = 1 elif isinstance(image, np.ndarray): image_batch_size = image.shape[0] else: assert False if batch_size != image_batch_size: raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}") # original_image
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
if isinstance(original_image, list): check_image_type = original_image[0] else: check_image_type = original_image if ( not isinstance(check_image_type, torch.Tensor) and not isinstance(check_image_type, PIL.Image.Image) and not isinstance(check_image_type, np.ndarray) ): raise ValueError( "`original_image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" f" {type(check_image_type)}" ) if isinstance(original_image, list): image_batch_size = len(original_image) elif isinstance(original_image, torch.Tensor): image_batch_size = original_image.shape[0] elif isinstance(original_image, PIL.Image.Image): image_batch_size = 1 elif isinstance(original_image, np.ndarray): image_batch_size = original_image.shape[0] else: assert False
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
if batch_size != image_batch_size: raise ValueError( f"original_image batch size: {image_batch_size} must be same as prompt batch size {batch_size}" ) # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.preprocess_image with preprocess_image -> preprocess_original_image def preprocess_original_image(self, image: PIL.Image.Image) -> torch.Tensor: if not isinstance(image, list): image = [image] def numpy_to_pt(images): if images.ndim == 3: images = images[..., None] images = torch.from_numpy(images.transpose(0, 3, 1, 2)) return images if isinstance(image[0], PIL.Image.Image): new_image = []
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
for image_ in image: image_ = image_.convert("RGB") image_ = resize(image_, self.unet.config.sample_size) image_ = np.array(image_) image_ = image_.astype(np.float32) image_ = image_ / 127.5 - 1 new_image.append(image_) image = new_image image = np.stack(image, axis=0) # to np image = numpy_to_pt(image) # to pt elif isinstance(image[0], np.ndarray): image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) image = numpy_to_pt(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) return image
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_superresolution.IFSuperResolutionPipeline.preprocess_image def preprocess_image(self, image: PIL.Image.Image, num_images_per_prompt, device) -> torch.Tensor: if not isinstance(image, torch.Tensor) and not isinstance(image, list): image = [image] if isinstance(image[0], PIL.Image.Image): image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image] image = np.stack(image, axis=0) # to np image = torch.from_numpy(image.transpose(0, 3, 1, 2)) elif isinstance(image[0], np.ndarray): image = np.stack(image, axis=0) # to np if image.ndim == 5: image = image[0] image = torch.from_numpy(image.transpose(0, 3, 1, 2)) elif isinstance(image, list) and isinstance(image[0], torch.Tensor): dims = image[0].ndim
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
if dims == 3: image = torch.stack(image, dim=0) elif dims == 4: image = torch.concat(image, dim=0) else: raise ValueError(f"Image must have 3 or 4 dimensions, instead got {dims}") image = image.to(device=device, dtype=self.unet.dtype) image = image.repeat_interleave(num_images_per_prompt, dim=0) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.prepare_intermediate_images def prepare_intermediate_images( self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None ): _, channels, height, width = image.shape batch_size = batch_size * num_images_per_prompt shape = (batch_size, channels, height, width)
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) image = image.repeat_interleave(num_images_per_prompt, dim=0) image = self.scheduler.add_noise(image, noise, timestep) return image
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: Union[PIL.Image.Image, np.ndarray, torch.Tensor], original_image: Union[ PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray] ] = None, strength: float = 0.8, prompt: Union[str, List[str]] = None, num_inference_steps: int = 50, timesteps: List[int] = None, guidance_scale: float = 4.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, noise_level: int = 250, clean_caption: bool = True, ): """ Function invoked when calling the pipeline for generation.
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
Args: image (`torch.Tensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. original_image (`torch.Tensor` or `PIL.Image.Image`): The original image that `image` was varied from. strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. prompt (`str` or `List[str]`, *optional*):
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py
return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in
306
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py