text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
class UnCLIPImageVariationPipeline(DiffusionPipeline): """ Pipeline to generate image variations from an input image using UnCLIP. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.).
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
Args: text_encoder ([`~transformers.CLIPTextModelWithProjection`]): Frozen text-encoder. tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. feature_extractor ([`~transformers.CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `image_encoder`. image_encoder ([`~transformers.CLIPVisionModelWithProjection`]): Frozen CLIP image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). text_proj ([`UnCLIPTextProjModel`]): Utility class to prepare and combine the embeddings before they are passed to the decoder. decoder ([`UNet2DConditionModel`]): The decoder to invert the image embedding into an image. super_res_first ([`UNet2DModel`]): Super resolution UNet. Used in all but the last step of the super resolution diffusion process.
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
super_res_last ([`UNet2DModel`]): Super resolution UNet. Used in the last step of the super resolution diffusion process. decoder_scheduler ([`UnCLIPScheduler`]): Scheduler used in the decoder denoising process (a modified [`DDPMScheduler`]). super_res_scheduler ([`UnCLIPScheduler`]): Scheduler used in the super resolution denoising process (a modified [`DDPMScheduler`]). """
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
decoder: UNet2DConditionModel text_proj: UnCLIPTextProjModel text_encoder: CLIPTextModelWithProjection tokenizer: CLIPTokenizer feature_extractor: CLIPImageProcessor image_encoder: CLIPVisionModelWithProjection super_res_first: UNet2DModel super_res_last: UNet2DModel decoder_scheduler: UnCLIPScheduler super_res_scheduler: UnCLIPScheduler model_cpu_offload_seq = "text_encoder->image_encoder->text_proj->decoder->super_res_first->super_res_last" def __init__( self, decoder: UNet2DConditionModel, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_proj: UnCLIPTextProjModel, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection, super_res_first: UNet2DModel, super_res_last: UNet2DModel, decoder_scheduler: UnCLIPScheduler, super_res_scheduler: UnCLIPScheduler, ): super().__init__()
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
self.register_modules( decoder=decoder, text_encoder=text_encoder, tokenizer=tokenizer, text_proj=text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=super_res_first, super_res_last=super_res_last, decoder_scheduler=decoder_scheduler, super_res_scheduler=super_res_scheduler, ) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device)
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
latents = latents * scheduler.init_noise_sigma return latents def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance): batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ) text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds text_encoder_hidden_states = text_encoder_output.last_hidden_state
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [""] * batch_size max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device))
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0)
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
# done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_encoder_hidden_states, text_mask def _encode_image(self, image, device, num_images_per_prompt, image_embeddings: Optional[torch.Tensor] = None): dtype = next(self.image_encoder.parameters()).dtype if image_embeddings is None: if not isinstance(image, torch.Tensor): image = self.feature_extractor(images=image, return_tensors="pt").pixel_values
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
image = image.to(device=device, dtype=dtype) image_embeddings = self.image_encoder(image).image_embeds image_embeddings = image_embeddings.repeat_interleave(num_images_per_prompt, dim=0) return image_embeddings @torch.no_grad() def __call__( self, image: Optional[Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor]] = None, num_images_per_prompt: int = 1, decoder_num_inference_steps: int = 25, super_res_num_inference_steps: int = 7, generator: Optional[torch.Generator] = None, decoder_latents: Optional[torch.Tensor] = None, super_res_latents: Optional[torch.Tensor] = None, image_embeddings: Optional[torch.Tensor] = None, decoder_guidance_scale: float = 8.0, output_type: Optional[str] = "pil", return_dict: bool = True, ): """ The call function to the pipeline for generation.
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
Args: image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.Tensor`): `Image` or tensor representing an image batch to be used as the starting point. If you provide a tensor, it needs to be compatible with the [`CLIPImageProcessor`] [configuration](https://huggingface.co/fusing/karlo-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json). Can be left as `None` only when `image_embeddings` are passed. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. decoder_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality image at the expense of slower inference. super_res_num_inference_steps (`int`, *optional*, defaults to 7):
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
The number of denoising steps for super resolution. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. decoder_latents (`torch.Tensor` of shape (batch size, channels, height, width), *optional*): Pre-generated noisy latents to be used as inputs for the decoder. super_res_latents (`torch.Tensor` of shape (batch size, channels, super res height, super res width), *optional*): Pre-generated noisy latents to be used as inputs for the decoder. decoder_guidance_scale (`float`, *optional*, defaults to 4.0): A higher guidance scale value encourages the model to generate images closely linked to the text
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. image_embeddings (`torch.Tensor`, *optional*): Pre-defined image embeddings that can be derived from the image encoder. Pre-defined image embeddings can be passed for tasks like image interpolations. `image` can be left as `None`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if image is not None: if isinstance(image, PIL.Image.Image): batch_size = 1 elif isinstance(image, list): batch_size = len(image) else: batch_size = image.shape[0] else: batch_size = image_embeddings.shape[0] prompt = [""] * batch_size device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = decoder_guidance_scale > 1.0 prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance )
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
image_embeddings = self._encode_image(image, device, num_images_per_prompt, image_embeddings) # decoder text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj( image_embeddings=image_embeddings, prompt_embeds=prompt_embeds, text_encoder_hidden_states=text_encoder_hidden_states, do_classifier_free_guidance=do_classifier_free_guidance, ) if device.type == "mps": # HACK: MPS: There is a panic when padding bool tensors, # so cast to int tensor for the pad and back to bool afterwards text_mask = text_mask.type(torch.int) decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) decoder_text_mask = decoder_text_mask.type(torch.bool) else: decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True)
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) decoder_timesteps_tensor = self.decoder_scheduler.timesteps num_channels_latents = self.decoder.config.in_channels height = self.decoder.config.sample_size width = self.decoder.config.sample_size if decoder_latents is None: decoder_latents = self.prepare_latents( (batch_size, num_channels_latents, height, width), text_encoder_hidden_states.dtype, device, generator, decoder_latents, self.decoder_scheduler, ) for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
noise_pred = self.decoder( sample=latent_model_input, timestep=t, encoder_hidden_states=text_encoder_hidden_states, class_labels=additive_clip_time_embeddings, attention_mask=decoder_text_mask, ).sample if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1) noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if i + 1 == decoder_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = decoder_timesteps_tensor[i + 1]
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
# compute the previous noisy sample x_t -> x_t-1 decoder_latents = self.decoder_scheduler.step( noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator ).prev_sample decoder_latents = decoder_latents.clamp(-1, 1) image_small = decoder_latents # done decoder # super res self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) super_res_timesteps_tensor = self.super_res_scheduler.timesteps channels = self.super_res_first.config.in_channels // 2 height = self.super_res_first.config.sample_size width = self.super_res_first.config.sample_size
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
if super_res_latents is None: super_res_latents = self.prepare_latents( (batch_size, channels, height, width), image_small.dtype, device, generator, super_res_latents, self.super_res_scheduler, ) if device.type == "mps": # MPS does not support many interpolations image_upscaled = F.interpolate(image_small, size=[height, width]) else: interpolate_antialias = {} if "antialias" in inspect.signature(F.interpolate).parameters: interpolate_antialias["antialias"] = True image_upscaled = F.interpolate( image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias ) for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)): # no classifier free guidance
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
if i == super_res_timesteps_tensor.shape[0] - 1: unet = self.super_res_last else: unet = self.super_res_first latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) noise_pred = unet( sample=latent_model_input, timestep=t, ).sample if i + 1 == super_res_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = super_res_timesteps_tensor[i + 1] # compute the previous noisy sample x_t -> x_t-1 super_res_latents = self.super_res_scheduler.step( noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator ).prev_sample if XLA_AVAILABLE: xm.mark_step() image = super_res_latents # done super res self.maybe_free_model_hooks() # post processing
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
336
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py
class UnCLIPTextProjModel(ModelMixin, ConfigMixin): """ Utility class for CLIP embeddings. Used to combine the image and text embeddings into a format usable by the decoder. For more details, see the original paper: https://arxiv.org/abs/2204.06125 section 2.1 """ @register_to_config def __init__( self, *, clip_extra_context_tokens: int = 4, clip_embeddings_dim: int = 768, time_embed_dim: int, cross_attention_dim, ): super().__init__() self.learned_classifier_free_guidance_embeddings = nn.Parameter(torch.zeros(clip_embeddings_dim)) # parameters for additional clip time embeddings self.embedding_proj = nn.Linear(clip_embeddings_dim, time_embed_dim) self.clip_image_embeddings_project_to_time_embeddings = nn.Linear(clip_embeddings_dim, time_embed_dim)
337
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/text_proj.py
# parameters for encoder hidden states self.clip_extra_context_tokens = clip_extra_context_tokens self.clip_extra_context_tokens_proj = nn.Linear( clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim ) self.encoder_hidden_states_proj = nn.Linear(clip_embeddings_dim, cross_attention_dim) self.text_encoder_hidden_states_norm = nn.LayerNorm(cross_attention_dim)
337
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/text_proj.py
def forward(self, *, image_embeddings, prompt_embeds, text_encoder_hidden_states, do_classifier_free_guidance): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings image_embeddings_batch_size = image_embeddings.shape[0] classifier_free_guidance_embeddings = self.learned_classifier_free_guidance_embeddings.unsqueeze(0) classifier_free_guidance_embeddings = classifier_free_guidance_embeddings.expand( image_embeddings_batch_size, -1 ) image_embeddings = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0]
337
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/text_proj.py
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... time_projected_prompt_embeds = self.embedding_proj(prompt_embeds) time_projected_image_embeddings = self.clip_image_embeddings_project_to_time_embeddings(image_embeddings) additive_clip_time_embeddings = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" clip_extra_context_tokens = self.clip_extra_context_tokens_proj(image_embeddings) clip_extra_context_tokens = clip_extra_context_tokens.reshape(batch_size, -1, self.clip_extra_context_tokens) clip_extra_context_tokens = clip_extra_context_tokens.permute(0, 2, 1)
337
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/text_proj.py
text_encoder_hidden_states = self.encoder_hidden_states_proj(text_encoder_hidden_states) text_encoder_hidden_states = self.text_encoder_hidden_states_norm(text_encoder_hidden_states) text_encoder_hidden_states = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1) return text_encoder_hidden_states, additive_clip_time_embeddings
337
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/text_proj.py
class UnCLIPPipeline(DiffusionPipeline): """ Pipeline for text-to-image generation using unCLIP. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.).
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
Args: text_encoder ([`~transformers.CLIPTextModelWithProjection`]): Frozen text-encoder. tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. text_proj ([`UnCLIPTextProjModel`]): Utility class to prepare and combine the embeddings before they are passed to the decoder. decoder ([`UNet2DConditionModel`]): The decoder to invert the image embedding into an image. super_res_first ([`UNet2DModel`]): Super resolution UNet. Used in all but the last step of the super resolution diffusion process. super_res_last ([`UNet2DModel`]): Super resolution UNet. Used in the last step of the super resolution diffusion process. prior_scheduler ([`UnCLIPScheduler`]):
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
Scheduler used in the prior denoising process (a modified [`DDPMScheduler`]). decoder_scheduler ([`UnCLIPScheduler`]): Scheduler used in the decoder denoising process (a modified [`DDPMScheduler`]). super_res_scheduler ([`UnCLIPScheduler`]): Scheduler used in the super resolution denoising process (a modified [`DDPMScheduler`]).
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
""" _exclude_from_cpu_offload = ["prior"] prior: PriorTransformer decoder: UNet2DConditionModel text_proj: UnCLIPTextProjModel text_encoder: CLIPTextModelWithProjection tokenizer: CLIPTokenizer super_res_first: UNet2DModel super_res_last: UNet2DModel prior_scheduler: UnCLIPScheduler decoder_scheduler: UnCLIPScheduler super_res_scheduler: UnCLIPScheduler model_cpu_offload_seq = "text_encoder->text_proj->decoder->super_res_first->super_res_last" def __init__( self, prior: PriorTransformer, decoder: UNet2DConditionModel, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_proj: UnCLIPTextProjModel, super_res_first: UNet2DModel, super_res_last: UNet2DModel, prior_scheduler: UnCLIPScheduler, decoder_scheduler: UnCLIPScheduler, super_res_scheduler: UnCLIPScheduler, ): super().__init__()
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
self.register_modules( prior=prior, decoder=decoder, text_encoder=text_encoder, tokenizer=tokenizer, text_proj=text_proj, super_res_first=super_res_first, super_res_last=super_res_last, prior_scheduler=prior_scheduler, decoder_scheduler=decoder_scheduler, super_res_scheduler=super_res_scheduler, ) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, ): if text_model_output is None: batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds text_enc_hid_states = text_encoder_output.last_hidden_state
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
else: batch_size = text_model_output[0].shape[0] prompt_embeds, text_enc_hid_states = text_model_output[0], text_model_output[1] text_mask = text_attention_mask prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [""] * batch_size uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device))
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds uncond_text_enc_hid_states = negative_prompt_embeds_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_enc_hid_states.shape[1] uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1) uncond_text_enc_hid_states = uncond_text_enc_hid_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
# For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_enc_hid_states, text_mask
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
@torch.no_grad() def __call__( self, prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: int = 1, prior_num_inference_steps: int = 25, decoder_num_inference_steps: int = 25, super_res_num_inference_steps: int = 7, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prior_latents: Optional[torch.Tensor] = None, decoder_latents: Optional[torch.Tensor] = None, super_res_latents: Optional[torch.Tensor] = None, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, prior_guidance_scale: float = 4.0, decoder_guidance_scale: float = 8.0, output_type: Optional[str] = "pil", return_dict: bool = True, ): """ The call function to the pipeline for generation.
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. This can only be left undefined if `text_model_output` and `text_attention_mask` is passed. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. prior_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps for the prior. More denoising steps usually lead to a higher quality image at the expense of slower inference. decoder_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality image at the expense of slower inference. super_res_num_inference_steps (`int`, *optional*, defaults to 7):
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
The number of denoising steps for super resolution. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prior_latents (`torch.Tensor` of shape (batch size, embeddings dimension), *optional*): Pre-generated noisy latents to be used as inputs for the prior. decoder_latents (`torch.Tensor` of shape (batch size, channels, height, width), *optional*): Pre-generated noisy latents to be used as inputs for the decoder. super_res_latents (`torch.Tensor` of shape (batch size, channels, super res height, super res width), *optional*): Pre-generated noisy latents to be used as inputs for the decoder.
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
prior_guidance_scale (`float`, *optional*, defaults to 4.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. decoder_guidance_scale (`float`, *optional*, defaults to 4.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. text_model_output (`CLIPTextModelOutput`, *optional*): Pre-defined [`CLIPTextModel`] outputs that can be derived from the text encoder. Pre-defined text outputs can be passed for tasks like text embedding interpolations. Make sure to also pass `text_attention_mask` in this case. `prompt` can the be left `None`.
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
text_attention_mask (`torch.Tensor`, *optional*): Pre-defined CLIP text attention mask that can be derived from the tokenizer. Pre-defined text attention masks are necessary when passing `text_model_output`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if prompt is not None: if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") else: batch_size = text_model_output[0].shape[0] device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
prompt_embeds, text_enc_hid_states, text_mask = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask ) # prior self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) prior_timesteps_tensor = self.prior_scheduler.timesteps embedding_dim = self.prior.config.embedding_dim prior_latents = self.prepare_latents( (batch_size, embedding_dim), prompt_embeds.dtype, device, generator, prior_latents, self.prior_scheduler, ) for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
predicted_image_embedding = self.prior( latent_model_input, timestep=t, proj_embedding=prompt_embeds, encoder_hidden_states=text_enc_hid_states, attention_mask=text_mask, ).predicted_image_embedding if do_classifier_free_guidance: predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( predicted_image_embedding_text - predicted_image_embedding_uncond ) if i + 1 == prior_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = prior_timesteps_tensor[i + 1]
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
prior_latents = self.prior_scheduler.step( predicted_image_embedding, timestep=t, sample=prior_latents, generator=generator, prev_timestep=prev_timestep, ).prev_sample prior_latents = self.prior.post_process_latents(prior_latents) image_embeddings = prior_latents # done prior # decoder text_enc_hid_states, additive_clip_time_embeddings = self.text_proj( image_embeddings=image_embeddings, prompt_embeds=prompt_embeds, text_encoder_hidden_states=text_enc_hid_states, do_classifier_free_guidance=do_classifier_free_guidance, )
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
if device.type == "mps": # HACK: MPS: There is a panic when padding bool tensors, # so cast to int tensor for the pad and back to bool afterwards text_mask = text_mask.type(torch.int) decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) decoder_text_mask = decoder_text_mask.type(torch.bool) else: decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True) self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) decoder_timesteps_tensor = self.decoder_scheduler.timesteps num_channels_latents = self.decoder.config.in_channels height = self.decoder.config.sample_size width = self.decoder.config.sample_size
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
decoder_latents = self.prepare_latents( (batch_size, num_channels_latents, height, width), text_enc_hid_states.dtype, device, generator, decoder_latents, self.decoder_scheduler, ) for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents noise_pred = self.decoder( sample=latent_model_input, timestep=t, encoder_hidden_states=text_enc_hid_states, class_labels=additive_clip_time_embeddings, attention_mask=decoder_text_mask, ).sample
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1) noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if i + 1 == decoder_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = decoder_timesteps_tensor[i + 1] # compute the previous noisy sample x_t -> x_t-1 decoder_latents = self.decoder_scheduler.step( noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator ).prev_sample decoder_latents = decoder_latents.clamp(-1, 1)
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
image_small = decoder_latents # done decoder # super res self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) super_res_timesteps_tensor = self.super_res_scheduler.timesteps channels = self.super_res_first.config.in_channels // 2 height = self.super_res_first.config.sample_size width = self.super_res_first.config.sample_size super_res_latents = self.prepare_latents( (batch_size, channels, height, width), image_small.dtype, device, generator, super_res_latents, self.super_res_scheduler, )
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
if device.type == "mps": # MPS does not support many interpolations image_upscaled = F.interpolate(image_small, size=[height, width]) else: interpolate_antialias = {} if "antialias" in inspect.signature(F.interpolate).parameters: interpolate_antialias["antialias"] = True image_upscaled = F.interpolate( image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias ) for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)): # no classifier free guidance if i == super_res_timesteps_tensor.shape[0] - 1: unet = self.super_res_last else: unet = self.super_res_first latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) noise_pred = unet( sample=latent_model_input, timestep=t, ).sample
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
if i + 1 == super_res_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = super_res_timesteps_tensor[i + 1] # compute the previous noisy sample x_t -> x_t-1 super_res_latents = self.super_res_scheduler.step( noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator ).prev_sample if XLA_AVAILABLE: xm.mark_step() image = super_res_latents # done super res self.maybe_free_model_hooks() # post processing image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
338
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
class AllegroPipelineOutput(BaseOutput): r""" Output class for Allegro pipelines. Args: frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape `(batch_size, num_frames, channels, height, width)`. """ frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]]
339
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_output.py
class AllegroPipeline(DiffusionPipeline): r""" Pipeline for text-to-video generation using Allegro. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
Args: vae ([`AllegroAutoEncoderKL3D`]): Variational Auto-Encoder (VAE) Model to encode and decode video to and from latent representations. text_encoder ([`T5EncoderModel`]): Frozen text-encoder. PixArt-Alpha uses [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant. tokenizer (`T5Tokenizer`): Tokenizer of class [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). transformer ([`AllegroTransformer3DModel`]): A text conditioned `AllegroTransformer3DModel` to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `transformer` to denoise the encoded video latents. """
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
bad_punct_regex = re.compile( r"[" + "#®•©™&@·º½¾¿¡§~" + r"\)" + r"\(" + r"\]" + r"\[" + r"\}" + r"\{" + r"\|" + "\\" + r"\/" + r"\*" + r"]{1,}" ) # noqa _optional_components = [] model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", ] def __init__( self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, vae: AutoencoderKLAllegro, transformer: AllegroTransformer3DModel, scheduler: KarrasDiffusionSchedulers, ): super().__init__()
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler ) self.vae_scale_factor_spatial = ( 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 ) self.vae_scale_factor_temporal = ( self.vae.config.temporal_compression_ratio if getattr(self, "vae", None) else 4 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial)
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
# Copied from diffusers.pipelines.pixart_alpha.pipeline_pixart_alpha.PixArtAlphaPipeline.encode_prompt with 120->512, num_images_per_prompt->num_videos_per_prompt def encode_prompt( self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool = True, negative_prompt: str = "", num_videos_per_prompt: int = 1, device: Optional[torch.device] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, clean_caption: bool = False, max_sequence_length: int = 512, **kwargs, ): r""" Encodes the prompt into text encoder hidden states.
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For PixArt-Alpha, this should be "". do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): whether to use classifier free guidance or not num_videos_per_prompt (`int`, *optional*, defaults to 1): number of images that should be generated per prompt device: (`torch.device`, *optional*): torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*):
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Alpha, it's should be the embeddings of the "" string. clean_caption (`bool`, defaults to `False`): If `True`, the function will preprocess and clean the provided caption before encoding. max_sequence_length (`int`, defaults to 512): Maximum sequence length to use for the prompt. """
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
if "mask_feature" in kwargs: deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False) if device is None: device = self._execution_device # See Section 3.1. of the paper. max_length = max_sequence_length if prompt_embeds is None: prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because T5 can only handle sequences up to" f" {max_length} tokens: {removed_text}" ) prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_videos_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.repeat(1, num_videos_per_prompt) prompt_attention_mask = prompt_attention_mask.view(bs_embed * num_videos_per_prompt, -1)
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
# get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens = [negative_prompt] * bs_embed if isinstance(negative_prompt, str) else negative_prompt uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) negative_prompt_attention_mask = uncond_input.attention_mask negative_prompt_attention_mask = negative_prompt_attention_mask.to(device)
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_videos_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_videos_per_prompt, seq_len, -1)
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(1, num_videos_per_prompt) negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed * num_videos_per_prompt, -1) else: negative_prompt_embeds = None negative_prompt_attention_mask = None return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1]
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
def check_inputs( self, prompt, num_frames, height, width, callback_on_step_end_tensor_inputs, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, ): if num_frames <= 0: raise ValueError(f"`num_frames` have to be positive but is {num_frames}.") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" )
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." )
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.")
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError( "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" f" {negative_prompt_attention_mask.shape}." )
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing def _text_preprocessing(self, text, clean_caption=False): if clean_caption and not is_bs4_available(): logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) logger.warning("Setting `clean_caption` to False...") clean_caption = False if clean_caption and not is_ftfy_available(): logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) logger.warning("Setting `clean_caption` to False...") clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text]
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub("<person>", "person", caption) # urls: caption = re.sub( r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa "", caption, ) # regex for urls caption = re.sub( r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa "", caption, ) # regex for urls # html: caption = BeautifulSoup(caption, features="html.parser").text # @<nickname> caption = re.sub(r"@[\w\d]+\b", "", caption)
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
# 31C0—31EF CJK Strokes # 31F0—31FF Katakana Phonetic Extensions # 3200—32FF Enclosed CJK Letters and Months # 3300—33FF CJK Compatibility # 3400—4DBF CJK Unified Ideographs Extension A # 4DC0—4DFF Yijing Hexagram Symbols # 4E00—9FFF CJK Unified Ideographs caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) caption = re.sub(r"[\u3200-\u32ff]+", "", caption) caption = re.sub(r"[\u3300-\u33ff]+", "", caption) caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) #######################################################
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
# все виды тире / all types of dash --> "-" caption = re.sub( r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa "-", caption, ) # кавычки к одному стандарту caption = re.sub(r"[`´«»“”¨]", '"', caption) caption = re.sub(r"[‘’]", "'", caption) # &quot; caption = re.sub(r"&quot;?", "", caption) # &amp caption = re.sub(r"&amp", "", caption) # ip adresses: caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) # article ids: caption = re.sub(r"\d:\d\d\s+$", "", caption) # \n caption = re.sub(r"\\n", " ", caption)
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
# "#123" caption = re.sub(r"#\d{1,3}\b", "", caption) # "#12345.." caption = re.sub(r"#\d{5,}\b", "", caption) # "123456.." caption = re.sub(r"\b\d{6,}\b", "", caption) # filenames: caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) # caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " # this-is-my-cute-cat / this_is_my_cute_cat regex2 = re.compile(r"(?:\-|\_)") if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, " ", caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption))
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) caption = re.sub(r"\bpage\s+\d+\b", "", caption) caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) caption = re.sub(r"\b\s+\:\s+", r": ", caption) caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) caption = re.sub(r"\s+", " ", caption) caption.strip()
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) caption = re.sub(r"^\.\S+$", "", caption) return caption.strip() def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if num_frames % 2 == 0: num_frames = math.ceil(num_frames / self.vae_scale_factor_temporal) else: num_frames = math.ceil((num_frames - 1) / self.vae_scale_factor_temporal) + 1
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
shape = ( batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial, ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def decode_latents(self, latents: torch.Tensor) -> torch.Tensor: latents = 1 / self.vae.config.scaling_factor * latents frames = self.vae.decode(latents).sample frames = frames.permute(0, 2, 1, 3, 4) # [batch_size, channels, num_frames, height, width] return frames
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
def _prepare_rotary_positional_embeddings( self, batch_size: int, height: int, width: int, num_frames: int, device: torch.device, ): grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) start, stop = (0, 0), (grid_height, grid_width) freqs_t, freqs_h, freqs_w, grid_t, grid_h, grid_w = get_3d_rotary_pos_embed_allegro( embed_dim=self.transformer.config.attention_head_dim, crops_coords=(start, stop), grid_size=(grid_height, grid_width), temporal_size=num_frames, interpolation_scale=( self.transformer.config.interpolation_scale_t, self.transformer.config.interpolation_scale_h, self.transformer.config.interpolation_scale_w, ), device=device, )
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
grid_t = grid_t.to(dtype=torch.long) grid_h = grid_h.to(dtype=torch.long) grid_w = grid_w.to(dtype=torch.long) pos = torch.cartesian_prod(grid_t, grid_h, grid_w) pos = pos.reshape(-1, 3).transpose(0, 1).reshape(3, 1, -1).contiguous() grid_t, grid_h, grid_w = pos return (freqs_t, freqs_h, freqs_w), (grid_t, grid_h, grid_w) def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing()
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() @property def guidance_scale(self): return self._guidance_scale @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, negative_prompt: str = "", num_inference_steps: int = 100, timesteps: List[int] = None, guidance_scale: float = 7.5, num_frames: Optional[int] = None, height: Optional[int] = None, width: Optional[int] = None, num_videos_per_prompt: int = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback_on_step_end: Optional[
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], clean_caption: bool = True, max_sequence_length: int = 512, ) -> Union[AllegroPipelineOutput, Tuple]: """ Function invoked when calling the pipeline for generation.
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the video generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the video generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality video at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order.
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate videos that are closely linked to the text `prompt`, usually at the expense of lower video quality. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. num_frames: (`int`, *optional*, defaults to 88): The number controls the generated video frames. height (`int`, *optional*, defaults to self.unet.config.sample_size): The height in pixels of the generated video.
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
width (`int`, *optional*, defaults to self.unet.config.sample_size): The width in pixels of the generated video. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): generation. Can be used to tweak the same generation with different prompts. If not provided, a latents Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video tensor will ge generated by sampling using the supplied random `generator`.
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for negative text embeddings. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate video. Choose between
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. clean_caption (`bool`, *optional*, defaults to `True`): Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
be installed. If the dependencies are not installed, the embeddings will be created from the raw prompt. max_sequence_length (`int` defaults to `512`): Maximum sequence length to use with the `prompt`.
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
Examples: Returns: [`~pipelines.allegro.pipeline_output.AllegroPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.allegro.pipeline_output.AllegroPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated videos. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs num_videos_per_prompt = 1 # 1. Check inputs. Raise error if not correct num_frames = num_frames or self.transformer.config.sample_frames * self.vae_scale_factor_temporal height = height or self.transformer.config.sample_height * self.vae_scale_factor_spatial width = width or self.transformer.config.sample_width * self.vae_scale_factor_spatial
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
self.check_inputs( prompt, num_frames, height, width, callback_on_step_end_tensor_inputs, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, ) self._guidance_scale = guidance_scale self._interrupt = False # 2. Default height and width to transformer if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
# 3. Encode input prompt ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt, do_classifier_free_guidance, negative_prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, clean_caption=clean_caption, max_sequence_length=max_sequence_length, ) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) if prompt_embeds.ndim == 3:
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
prompt_embeds = prompt_embeds.unsqueeze(1) # b l d -> b 1 l d
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
# 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) self.scheduler.set_timesteps(num_inference_steps, device=device) # 5. Prepare latents. latent_channels = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, latent_channels, num_frames, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Prepare rotary embeddings image_rotary_emb = self._prepare_rotary_positional_embeddings( batch_size, height, width, latents.size(2), device )
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
# 8. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0])
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
# predict noise model_output noise_pred = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, timestep=timestep, image_rotary_emb=image_rotary_emb, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute previous image: x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
# call the callback, if provided if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step()
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
if not output_type == "latent": latents = latents.to(self.vae.dtype) video = self.decode_latents(latents) video = video[:, :, :num_frames, :height, :width] video = self.video_processor.postprocess_video(video=video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return AllegroPipelineOutput(frames=video)
340
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/allegro/pipeline_allegro.py
class KolorsPAGPipeline( DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin, PAGMixin ): r""" Pipeline for text-to-image generation using Kolors. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
341
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/pag/pipeline_pag_kolors.py