text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
class LDMBertAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, head_dim: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = False, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = head_dim self.inner_dim = head_dim * num_heads self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) self.out_proj = nn.Linear(self.inner_dim, embed_dim)
267
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size()
267
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
# get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention
267
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
267
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape)
267
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1)
267
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
267
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" )
267
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.inner_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value
267
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
class LDMBertEncoderLayer(nn.Module): def __init__(self, config: LDMBertConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = LDMBertAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, head_dim=config.head_dim, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim)
268
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """ Args: hidden_states (`torch.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.Tensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states
268
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states
268
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
268
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
class LDMBertPreTrainedModel(PreTrainedModel): config_class = LDMBertConfig base_model_prefix = "model" _supports_gradient_checkpointing = True _keys_to_ignore_on_load_unexpected = [r"encoder\.version", r"decoder\.version"] def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (LDMBertEncoder,)): module.gradient_checkpointing = value
269
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
@property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = { "attention_mask": input_ids.ne(pad_token), "input_ids": input_ids, } return dummy_inputs
269
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
class LDMBertEncoder(LDMBertPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`LDMBertEncoderLayer`]. Args: config: LDMBertConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: LDMBertConfig): super().__init__(config) self.dropout = config.dropout embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim) self.embed_positions = nn.Embedding(config.max_position_embeddings, embed_dim) self.layers = nn.ModuleList([LDMBertEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init()
270
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.
270
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
Indices can be obtained using [`BartTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**.
270
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.BaseModelOutput`] instead of a plain tuple. """
270
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict
270
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
# retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) seq_len = input_shape[1] if position_ids is None: position_ids = torch.arange(seq_len, dtype=torch.long, device=inputs_embeds.device).expand((1, -1)) embed_pos = self.embed_positions(position_ids) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
270
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
# expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if torch.is_grad_enabled() and self.gradient_checkpointing:
270
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states)
270
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
270
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
class LDMBertModel(LDMBertPreTrainedModel): _no_split_modules = [] def __init__(self, config: LDMBertConfig): super().__init__(config) self.model = LDMBertEncoder(config) self.to_logits = nn.Linear(config.hidden_size, config.vocab_size) def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return outputs
271
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py
class HunyuanVideoPipelineOutput(BaseOutput): r""" Output class for HunyuanVideo pipelines. Args: frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape `(batch_size, num_frames, channels, height, width)`. """ frames: torch.Tensor
272
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_output.py
class HunyuanVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): r""" Pipeline for text-to-video generation using HunyuanVideo. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.).
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
Args: text_encoder ([`LlamaModel`]): [Llava Llama3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). tokenizer (`LlamaTokenizer`): Tokenizer from [Llava Llama3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). transformer ([`HunyuanVideoTransformer3DModel`]): Conditional Transformer to denoise the encoded image latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLHunyuanVideo`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. text_encoder_2 ([`CLIPTextModel`]): [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
tokenizer_2 (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). """
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, text_encoder: LlamaModel, tokenizer: LlamaTokenizerFast, transformer: HunyuanVideoTransformer3DModel, vae: AutoencoderKLHunyuanVideo, scheduler: FlowMatchEulerDiscreteScheduler, text_encoder_2: CLIPTextModel, tokenizer_2: CLIPTokenizer, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, )
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
self.vae_scale_factor_temporal = self.vae.temporal_compression_ratio if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = self.vae.spatial_compression_ratio if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) def _get_llama_prompt_embeds( self, prompt: Union[str, List[str]], prompt_template: Dict[str, Any], num_videos_per_prompt: int = 1, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, max_sequence_length: int = 256, num_hidden_layers_to_skip: int = 2, ) -> Tuple[torch.Tensor, torch.Tensor]: device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) prompt = [prompt_template["template"].format(p) for p in prompt]
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
crop_start = prompt_template.get("crop_start", None) if crop_start is None: prompt_template_input = self.tokenizer( prompt_template["template"], padding="max_length", return_tensors="pt", return_length=False, return_overflowing_tokens=False, return_attention_mask=False, ) crop_start = prompt_template_input["input_ids"].shape[-1] # Remove <|eot_id|> token and placeholder {} crop_start -= 2
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
max_sequence_length += crop_start text_inputs = self.tokenizer( prompt, max_length=max_sequence_length, padding="max_length", truncation=True, return_tensors="pt", return_length=False, return_overflowing_tokens=False, return_attention_mask=True, ) text_input_ids = text_inputs.input_ids.to(device=device) prompt_attention_mask = text_inputs.attention_mask.to(device=device) prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True, ).hidden_states[-(num_hidden_layers_to_skip + 1)] prompt_embeds = prompt_embeds.to(dtype=dtype) if crop_start is not None and crop_start > 0: prompt_embeds = prompt_embeds[:, crop_start:] prompt_attention_mask = prompt_attention_mask[:, crop_start:]
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
# duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.repeat(1, num_videos_per_prompt) prompt_attention_mask = prompt_attention_mask.view(batch_size * num_videos_per_prompt, seq_len) return prompt_embeds, prompt_attention_mask def _get_clip_prompt_embeds( self, prompt: Union[str, List[str]], num_videos_per_prompt: int = 1, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, max_sequence_length: int = 77, ) -> torch.Tensor: device = device or self._execution_device dtype = dtype or self.text_encoder_2.dtype
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer_2( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False).pooler_output
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
# duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, -1) return prompt_embeds
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
def encode_prompt( self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]] = None, prompt_template: Dict[str, Any] = DEFAULT_PROMPT_TEMPLATE, num_videos_per_prompt: int = 1, prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, max_sequence_length: int = 256, ): if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_llama_prompt_embeds( prompt, prompt_template, num_videos_per_prompt, device=device, dtype=dtype, max_sequence_length=max_sequence_length, )
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
if pooled_prompt_embeds is None: if prompt_2 is None and pooled_prompt_embeds is None: prompt_2 = prompt pooled_prompt_embeds = self._get_clip_prompt_embeds( prompt, num_videos_per_prompt, device=device, dtype=dtype, max_sequence_length=77, ) return prompt_embeds, pooled_prompt_embeds, prompt_attention_mask def check_inputs( self, prompt, prompt_2, height, width, prompt_embeds=None, callback_on_step_end_tensor_inputs=None, prompt_template=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" )
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
if prompt_template is not None: if not isinstance(prompt_template, dict): raise ValueError(f"`prompt_template` has to be of type `dict` but is {type(prompt_template)}") if "template" not in prompt_template: raise ValueError( f"`prompt_template` has to contain a key `template` but only found {prompt_template.keys()}" ) def prepare_latents( self, batch_size: int, num_channels_latents: 32, height: int = 720, width: int = 1280, num_frames: int = 129, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, ) -> torch.Tensor: if latents is not None: return latents.to(device=device, dtype=dtype)
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
shape = ( batch_size, num_channels_latents, num_frames, int(height) // self.vae_scale_factor_spatial, int(width) // self.vae_scale_factor_spatial, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing()
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() @property def guidance_scale(self): return self._guidance_scale @property def num_timesteps(self): return self._num_timesteps
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
@property def attention_kwargs(self): return self._attention_kwargs @property def interrupt(self): return self._interrupt
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Union[str, List[str]] = None, height: int = 720, width: int = 1280, num_frames: int = 129, num_inference_steps: int = 50, sigmas: List[float] = None, guidance_scale: float = 6.0, num_videos_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], prompt_template: Dict[str, Any] = DEFAULT_PROMPT_TEMPLATE, max_sequence_length: int = 256, ): r""" The call function to the pipeline for generation.
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is will be used instead. height (`int`, defaults to `720`): The height in pixels of the generated image. width (`int`, defaults to `1280`): The width in pixels of the generated image. num_frames (`int`, defaults to `129`): The number of frames in the generated video. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`List[float]`, *optional*):
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, defaults to `6.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. Note that the only available HunyuanVideo model is CFG-distilled, which means that traditional guidance between unconditional and conditional latent is not applied.
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument.
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`HunyuanVideoPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings.
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class.
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
Examples: Returns: [`~HunyuanVideoPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`HunyuanVideoPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, prompt_embeds, callback_on_step_end_tensor_inputs, prompt_template, )
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._interrupt = False device = self._execution_device # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # 3. Encode input prompt prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, prompt_template=prompt_template, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, prompt_attention_mask=prompt_attention_mask, device=device, max_sequence_length=max_sequence_length, )
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
transformer_dtype = self.transformer.dtype prompt_embeds = prompt_embeds.to(transformer_dtype) prompt_attention_mask = prompt_attention_mask.to(transformer_dtype) if pooled_prompt_embeds is not None: pooled_prompt_embeds = pooled_prompt_embeds.to(transformer_dtype) # 4. Prepare timesteps sigmas = np.linspace(1.0, 0.0, num_inference_steps + 1)[:-1] if sigmas is None else sigmas timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, )
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
# 5. Prepare latent variables num_channels_latents = self.transformer.config.in_channels num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_latent_frames, torch.float32, device, generator, latents, ) # 6. Prepare guidance condition guidance = torch.tensor([guidance_scale] * latents.shape[0], dtype=transformer_dtype, device=device) * 1000.0 # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
latent_model_input = latents.to(transformer_dtype) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]).to(latents.dtype) noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, pooled_projections=pooled_prompt_embeds, guidance=guidance, attention_kwargs=attention_kwargs, return_dict=False, )[0] # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step()
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
if not output_type == "latent": latents = latents.to(self.vae.dtype) / self.vae.config.scaling_factor video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return HunyuanVideoPipelineOutput(frames=video)
273
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py
class ConsistencyModelPipeline(DiffusionPipeline): r""" Pipeline for unconditional or class-conditional image generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only compatible with [`CMStochasticIterativeScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet: UNet2DModel, scheduler: CMStochasticIterativeScheduler) -> None: super().__init__() self.register_modules( unet=unet, scheduler=scheduler, ) self.safety_checker = None
274
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py
def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents
274
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py
# Follows diffusers.VaeImageProcessor.postprocess def postprocess_image(self, sample: torch.Tensor, output_type: str = "pil"): if output_type not in ["pt", "np", "pil"]: raise ValueError( f"output_type={output_type} is not supported. Make sure to choose one of ['pt', 'np', or 'pil']" ) # Equivalent to diffusers.VaeImageProcessor.denormalize sample = (sample / 2 + 0.5).clamp(0, 1) if output_type == "pt": return sample # Equivalent to diffusers.VaeImageProcessor.pt_to_numpy sample = sample.cpu().permute(0, 2, 3, 1).numpy() if output_type == "np": return sample # Output_type must be 'pil' sample = self.numpy_to_pil(sample) return sample
274
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py
def prepare_class_labels(self, batch_size, device, class_labels=None): if self.unet.config.num_class_embeds is not None: if isinstance(class_labels, list): class_labels = torch.tensor(class_labels, dtype=torch.int) elif isinstance(class_labels, int): assert batch_size == 1, "Batch size must be 1 if classes is an int" class_labels = torch.tensor([class_labels], dtype=torch.int) elif class_labels is None: # Randomly generate batch_size class labels # TODO: should use generator here? int analogue of randn_tensor is not exposed in ...utils class_labels = torch.randint(0, self.unet.config.num_class_embeds, size=(batch_size,)) class_labels = class_labels.to(device) else: class_labels = None return class_labels
274
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py
def check_inputs(self, num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps): if num_inference_steps is None and timesteps is None: raise ValueError("Exactly one of `num_inference_steps` or `timesteps` must be supplied.") if num_inference_steps is not None and timesteps is not None: logger.warning( f"Both `num_inference_steps`: {num_inference_steps} and `timesteps`: {timesteps} are supplied;" " `timesteps` will be used over `num_inference_steps`." ) if latents is not None: expected_shape = (batch_size, 3, img_size, img_size) if latents.shape != expected_shape: raise ValueError(f"The shape of latents is {latents.shape} but is expected to be {expected_shape}.")
274
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py
if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." )
274
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, batch_size: int = 1, class_labels: Optional[Union[torch.Tensor, List[int], int]] = None, num_inference_steps: int = 1, timesteps: List[int] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, ): r""" Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. class_labels (`torch.Tensor` or `List[int]` or `int`, *optional*): Optional class labels for conditioning class-conditional consistency models. Not used if the model is not class-conditional.
274
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py
num_inference_steps (`int`, *optional*, defaults to 1): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
274
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py
tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step.
274
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py
Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ # 0. Prepare call parameters img_size = self.unet.config.sample_size device = self._execution_device # 1. Check inputs self.check_inputs(num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps) # 2. Prepare image latents # Sample image latents x_0 ~ N(0, sigma_0^2 * I) sample = self.prepare_latents( batch_size=batch_size, num_channels=self.unet.config.in_channels, height=img_size, width=img_size, dtype=self.unet.dtype, device=device, generator=generator, latents=latents, )
274
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py
# 3. Handle class_labels for class-conditional models class_labels = self.prepare_class_labels(batch_size, device, class_labels=class_labels) # 4. Prepare timesteps if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps) timesteps = self.scheduler.timesteps # 5. Denoising loop # Multistep sampling: implements Algorithm 1 in the paper with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): scaled_sample = self.scheduler.scale_model_input(sample, t) model_output = self.unet(scaled_sample, t, class_labels=class_labels, return_dict=False)[0] sample = self.scheduler.step(model_output, t, sample, generator=generator)[0]
274
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py
# call the callback, if provided progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, sample) if XLA_AVAILABLE: xm.mark_step() # 6. Post-process image sample image = self.postprocess_image(sample, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
274
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py
class DanceDiffusionPipeline(DiffusionPipeline): r""" Pipeline for audio generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet1DModel`]): A `UNet1DModel` to denoise the encoded audio. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of [`IPNDMScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler)
275
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py
@torch.no_grad() def __call__( self, batch_size: int = 1, num_inference_steps: int = 100, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, audio_length_in_s: Optional[float] = None, return_dict: bool = True, ) -> Union[AudioPipelineOutput, Tuple]: r""" The call function to the pipeline for generation.
275
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py
Args: batch_size (`int`, *optional*, defaults to 1): The number of audio samples to generate. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at the expense of slower inference. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`): The length of the generated audio sample in seconds. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. Example:
275
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py
```py from diffusers import DiffusionPipeline from scipy.io.wavfile import write model_id = "harmonai/maestro-150k" pipe = DiffusionPipeline.from_pretrained(model_id) pipe = pipe.to("cuda") audios = pipe(audio_length_in_s=4.0).audios # To save locally for i, audio in enumerate(audios): write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose()) # To dislay in google colab import IPython.display as ipd for audio in audios: display(ipd.Audio(audio, rate=pipe.unet.sample_rate)) ``` Returns: [`~pipelines.AudioPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated audio. """
275
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py
if audio_length_in_s is None: audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate sample_size = audio_length_in_s * self.unet.config.sample_rate down_scale_factor = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError( f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" f" {3 * down_scale_factor / self.unet.config.sample_rate}." )
275
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py
original_sample_size = int(sample_size) if sample_size % down_scale_factor != 0: sample_size = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" " process." ) sample_size = int(sample_size)
275
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py
dtype = next(self.unet.parameters()).dtype shape = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) audio = randn_tensor(shape, generator=generator, device=self._execution_device, dtype=dtype) # set step values self.scheduler.set_timesteps(num_inference_steps, device=audio.device) self.scheduler.timesteps = self.scheduler.timesteps.to(dtype) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output model_output = self.unet(audio, t).sample
275
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py
# 2. compute previous audio sample: x_t -> t_t-1 audio = self.scheduler.step(model_output, t, audio).prev_sample if XLA_AVAILABLE: xm.mark_step() audio = audio.clamp(-1, 1).float().cpu().numpy() audio = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=audio)
275
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py
class Kandinsky3Pipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): model_cpu_offload_seq = "text_encoder->unet->movq" _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "negative_attention_mask", "attention_mask", ] def __init__( self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, unet: Kandinsky3UNet, scheduler: DDPMScheduler, movq: VQModel, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, movq=movq )
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
def process_embeds(self, embeddings, attention_mask, cut_context): if cut_context: embeddings[attention_mask == 0] = torch.zeros_like(embeddings[attention_mask == 0]) max_seq_length = attention_mask.sum(-1).max() + 1 embeddings = embeddings[:, :max_seq_length] attention_mask = attention_mask[:, :max_seq_length] return embeddings, attention_mask @torch.no_grad() def encode_prompt( self, prompt, do_classifier_free_guidance=True, num_images_per_prompt=1, device=None, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, _cut_context=False, attention_mask: Optional[torch.Tensor] = None, negative_attention_mask: Optional[torch.Tensor] = None, ): r""" Encodes the prompt into text encoder hidden states.
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`, *optional*): torch device to place the resulting embeddings on num_images_per_prompt (`int`, *optional*, defaults to 1): number of images that should be generated per prompt do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*):
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask. Must provide if passing `prompt_embeds` directly. negative_attention_mask (`torch.Tensor`, *optional*): Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly. """ if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt):
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." )
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = 128
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
if prompt_embeds is None: text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device) attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = self.text_encoder( text_input_ids, attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] prompt_embeds, attention_mask = self.process_embeds(prompt_embeds, attention_mask, _cut_context) prompt_embeds = prompt_embeds * attention_mask.unsqueeze(2) if self.text_encoder is not None: dtype = self.text_encoder.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) attention_mask = attention_mask.repeat(num_images_per_prompt, 1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str]
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
if negative_prompt is None: uncond_tokens = [""] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt if negative_prompt is not None: uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=128, truncation=True, return_attention_mask=True, return_tensors="pt", )
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
text_input_ids = uncond_input.input_ids.to(device) negative_attention_mask = uncond_input.attention_mask.to(device)
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
negative_prompt_embeds = self.text_encoder( text_input_ids, attention_mask=negative_attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds[:, : prompt_embeds.shape[1]] negative_attention_mask = negative_attention_mask[:, : prompt_embeds.shape[1]] negative_prompt_embeds = negative_prompt_embeds * negative_attention_mask.unsqueeze(2) else: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_attention_mask = torch.zeros_like(attention_mask) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1]
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) if negative_prompt_embeds.shape != prompt_embeds.shape: negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_attention_mask = negative_attention_mask.repeat(num_images_per_prompt, 1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes else: negative_prompt_embeds = None negative_attention_mask = None return prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
def check_inputs( self, prompt, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, attention_mask=None, negative_attention_mask=None, ): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" )
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." )
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if negative_prompt_embeds is not None and negative_attention_mask is None: raise ValueError("Please provide `negative_attention_mask` along with `negative_prompt_embeds`")
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
if negative_prompt_embeds is not None and negative_attention_mask is not None: if negative_prompt_embeds.shape[:2] != negative_attention_mask.shape: raise ValueError( "`negative_prompt_embeds` and `negative_attention_mask` must have the same batch_size and token length when passed directly, but" f" got: `negative_prompt_embeds` {negative_prompt_embeds.shape[:2]} != `negative_attention_mask`" f" {negative_attention_mask.shape}." ) if prompt_embeds is not None and attention_mask is None: raise ValueError("Please provide `attention_mask` along with `prompt_embeds`")
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
if prompt_embeds is not None and attention_mask is not None: if prompt_embeds.shape[:2] != attention_mask.shape: raise ValueError( "`prompt_embeds` and `attention_mask` must have the same batch_size and token length when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape[:2]} != `attention_mask`" f" {attention_mask.shape}." ) @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, num_inference_steps: int = 25, guidance_scale: float = 3.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, height: Optional[int] = 1024, width: Optional[int] = 1024, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, negative_attention_mask: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, latents=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): """
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
Function invoked when calling the pipeline for generation.
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 3.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen
276
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py