text
stringlengths 1
1.02k
| class_index
int64 0
1.38k
| source
stringclasses 431
values |
---|---|---|
prompt_embeds_tuple = self.encode_prompt(
prompt=prompt,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=do_classifier_free_guidance,
negative_prompt=negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=lora_scale,
**kwargs,
)
# concatenate for backwards comp
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
return prompt_embeds | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
def encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
lora_scale: Optional[float] = None,
clip_skip: Optional[int] = None,
):
r"""
Encodes the prompt into text encoder hidden states. | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument. | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
lora_scale (`float`, *optional*):
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
"""
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
self._lora_scale = lora_scale | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
# dynamically adjust the LoRA scale
if not USE_PEFT_BACKEND:
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
else:
scale_lora_layers(self.text_encoder, lora_scale)
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
# textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask.to(device)
else:
attention_mask = None | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
if clip_skip is None:
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
prompt_embeds = prompt_embeds[0]
else:
prompt_embeds = self.text_encoder(
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
)
# Access the `hidden_states` first, that contains a tuple of
# all the hidden states from the encoder layers. Then index into
# the tuple to access the hidden states from the desired layer.
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
# We also need to apply the final LayerNorm here to not mess with the
# representations. The `last_hidden_states` that we typically use for
# obtaining the final prompt representations passes through the LayerNorm
# layer. | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
if self.text_encoder is not None:
prompt_embeds_dtype = self.text_encoder.dtype
elif self.unet is not None:
prompt_embeds_dtype = self.unet.dtype
else:
prompt_embeds_dtype = prompt_embeds.dtype
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
# textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask.to(device)
else:
attention_mask = None
negative_prompt_embeds = self.text_encoder(
uncond_input.input_ids.to(device),
attention_mask=attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds[0] | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder, lora_scale)
return prompt_embeds, negative_prompt_embeds
def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
dtype = next(self.image_encoder.parameters()).dtype | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
if not isinstance(image, torch.Tensor):
image = self.feature_extractor(image, return_tensors="pt").pixel_values | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
image = image.to(device=device, dtype=dtype)
if output_hidden_states:
image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_enc_hidden_states = self.image_encoder(
torch.zeros_like(image), output_hidden_states=True
).hidden_states[-2]
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
num_images_per_prompt, dim=0
)
return image_enc_hidden_states, uncond_image_enc_hidden_states
else:
image_embeds = self.image_encoder(image).image_embeds
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_embeds = torch.zeros_like(image_embeds)
return image_embeds, uncond_image_embeds | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is None:
has_nsfw_concept = None
else:
if torch.is_tensor(image):
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
else:
feature_extractor_input = self.image_processor.numpy_to_pil(image)
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
return image, has_nsfw_concept
def decode_latents(self, latents):
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
height,
width,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
callback_on_step_end_tensor_inputs=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if callback_on_step_end_tensor_inputs is not None and not all(
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
):
raise ValueError(
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (
batch_size,
num_channels_latents,
int(height) // self.vae_scale_factor,
int(width) // self.vae_scale_factor,
)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
"""
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args:
timesteps (`torch.Tensor`):
generate embedding vectors at these timesteps
embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate
dtype:
data type of the generated embeddings
Returns:
`torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
"""
assert len(w.shape) == 1
w = w * 1000.0 | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
half_dim = embedding_dim // 2
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
emb = w.to(dtype)[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1))
assert emb.shape == (w.shape[0], embedding_dim)
return emb
@property
def guidance_scale(self):
return self._guidance_scale
@property
def guidance_rescale(self):
return self._guidance_rescale
@property
def clip_skip(self):
return self._clip_skip | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
@property
def do_classifier_free_guidance(self):
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
@property
def cross_attention_kwargs(self):
return self._cross_attention_kwargs
@property
def num_timesteps(self):
return self._num_timesteps | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
timesteps: List[int] = None,
sigmas: List[float] = None,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.Tensor] = None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
ip_adapter_image: Optional[PipelineImageInput] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0, | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
clip_skip: Optional[int] = None,
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
**kwargs,
):
r"""
The call function to the pipeline for generation. | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
timesteps (`List[int]`, *optional*):
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
passed will be used. Must be in descending order.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0): | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] instead of a
plain tuple.
cross_attention_kwargs (`dict`, *optional*): | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
guidance_rescale (`float`, *optional*, defaults to 0.0):
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
using zero terminal SNR.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
callback_on_step_end (`Callable`, *optional*):
A function that calls at the end of each denoising steps during the inference. The function is called | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
`callback_on_step_end_tensor_inputs`.
callback_on_step_end_tensor_inputs (`List`, *optional*):
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
`._callback_tensor_inputs` attribute of your pipeline class. | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
Examples:
Returns:
[`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] is returned,
otherwise a `tuple` is returned where the first element is a list with the generated images and the
second element is a list of `bool`s indicating whether the corresponding generated image contains
"not-safe-for-work" (nsfw) content.
"""
callback = kwargs.pop("callback", None)
callback_steps = kwargs.pop("callback_steps", None) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
if callback is not None:
deprecate(
"callback",
"1.0.0",
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
)
if callback_steps is not None:
deprecate(
"callback_steps",
"1.0.0",
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
)
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# to deal with lora scaling and other possible forward hooks | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
callback_on_step_end_tensor_inputs,
)
self._guidance_scale = guidance_scale
self._guidance_rescale = guidance_rescale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# 3. Encode input prompt
lora_scale = (
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
prompt,
device,
num_images_per_prompt,
self.do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=lora_scale,
clip_skip=self.clip_skip,
)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
if self.do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
if ip_adapter_image is not None:
output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
image_embeds, negative_image_embeds = self.encode_image(
ip_adapter_image, device, num_images_per_prompt, output_hidden_state
)
if self.do_classifier_free_guidance:
image_embeds = torch.cat([negative_image_embeds, image_embeds])
# 4. Prepare timesteps
timesteps, num_inference_steps = retrieve_timesteps(
self.scheduler, num_inference_steps, device, timesteps, sigmas
)
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 6.1 Add image embeds for IP-Adapter
added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
# 6.2 Optionally get Guidance Scale Embedding
timestep_cond = None
if self.unet.config.time_cond_proj_dim is not None:
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
timestep_cond = self.get_guidance_scale_embedding(
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
).to(device=device, dtype=latents.dtype) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
# 7. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
self._num_timesteps = len(timesteps)
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
timestep_cond=timestep_cond,
cross_attention_kwargs=self.cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0] | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
# perform guidance
if self.do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
if callback_on_step_end is not None:
callback_kwargs = {}
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
0
]
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
# Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (image, has_nsfw_concept)
return AltDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) | 368 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py |
class TransformationModelOutput(ModelOutput):
"""
Base class for text model's outputs that also contains a pooling of the last hidden states.
Args:
text_embeds (`torch.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The text embeddings obtained by applying the projection layer to the pooler_output.
last_hidden_state (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one
for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. | 369 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py |
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
projection_state: Optional[torch.Tensor] = None
last_hidden_state: torch.Tensor = None
hidden_states: Optional[Tuple[torch.Tensor]] = None
attentions: Optional[Tuple[torch.Tensor]] = None | 369 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py |
class RobertaSeriesConfig(XLMRobertaConfig):
def __init__(
self,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
project_dim=512,
pooler_fn="cls",
learn_encoder=False,
use_attention_mask=True,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.project_dim = project_dim
self.pooler_fn = pooler_fn
self.learn_encoder = learn_encoder
self.use_attention_mask = use_attention_mask | 370 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py |
class RobertaSeriesModelWithTransformation(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler", r"logit_scale"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
base_model_prefix = "roberta"
config_class = RobertaSeriesConfig
def __init__(self, config):
super().__init__(config)
self.roberta = XLMRobertaModel(config)
self.transformation = nn.Linear(config.hidden_size, config.project_dim)
self.has_pre_transformation = getattr(config, "has_pre_transformation", False)
if self.has_pre_transformation:
self.transformation_pre = nn.Linear(config.hidden_size, config.project_dim)
self.pre_LN = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.post_init() | 371 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py |
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
):
r""" """
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 371 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py |
outputs = self.base_model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=True if self.has_pre_transformation else output_hidden_states,
return_dict=return_dict,
)
if self.has_pre_transformation:
sequence_output2 = outputs["hidden_states"][-2]
sequence_output2 = self.pre_LN(sequence_output2)
projection_state2 = self.transformation_pre(sequence_output2) | 371 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py |
return TransformationModelOutput(
projection_state=projection_state2,
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
else:
projection_state = self.transformation(outputs.last_hidden_state)
return TransformationModelOutput(
projection_state=projection_state,
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) | 371 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py |
class AudioDiffusionPipeline(DiffusionPipeline):
"""
Pipeline for audio diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Parameters:
vqae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
unet ([`UNet2DConditionModel`]):
A `UNet2DConditionModel` to denoise the encoded image latents.
mel ([`Mel`]):
Transform audio into a spectrogram.
scheduler ([`DDIMScheduler`] or [`DDPMScheduler`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`] or [`DDPMScheduler`].
"""
_optional_components = ["vqvae"] | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
def __init__(
self,
vqvae: AutoencoderKL,
unet: UNet2DConditionModel,
mel: Mel,
scheduler: Union[DDIMScheduler, DDPMScheduler],
):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler, mel=mel, vqvae=vqvae)
def get_default_steps(self) -> int:
"""Returns default number of steps recommended for inference.
Returns:
`int`:
The number of steps.
"""
return 50 if isinstance(self.scheduler, DDIMScheduler) else 1000 | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
audio_file: str = None,
raw_audio: np.ndarray = None,
slice: int = 0,
start_step: int = 0,
steps: int = None,
generator: torch.Generator = None,
mask_start_secs: float = 0,
mask_end_secs: float = 0,
step_generator: torch.Generator = None,
eta: float = 0,
noise: torch.Tensor = None,
encoding: torch.Tensor = None,
return_dict=True,
) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""
The call function to the pipeline for generation. | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
Args:
batch_size (`int`):
Number of samples to generate.
audio_file (`str`):
An audio file that must be on disk due to [Librosa](https://librosa.org/) limitation.
raw_audio (`np.ndarray`):
The raw audio file as a NumPy array.
slice (`int`):
Slice number of audio to convert.
start_step (int):
Step to start diffusion from.
steps (`int`):
Number of denoising steps (defaults to `50` for DDIM and `1000` for DDPM).
generator (`torch.Generator`):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
mask_start_secs (`float`):
Number of seconds of audio to mask (not generate) at start.
mask_end_secs (`float`):
Number of seconds of audio to mask (not generate) at end. | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
step_generator (`torch.Generator`):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) used to denoise.
None
eta (`float`):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
noise (`torch.Tensor`):
A noise tensor of shape `(batch_size, 1, height, width)` or `None`.
encoding (`torch.Tensor`):
A tensor for [`UNet2DConditionModel`] of shape `(batch_size, seq_length, cross_attention_dim)`.
return_dict (`bool`):
Whether or not to return a [`AudioPipelineOutput`], [`ImagePipelineOutput`] or a plain tuple. | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
Examples:
For audio diffusion:
```py
import torch
from IPython.display import Audio
from diffusers import DiffusionPipeline
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-256").to(device)
output = pipe()
display(output.images[0])
display(Audio(output.audios[0], rate=mel.get_sample_rate()))
```
For latent audio diffusion:
```py
import torch
from IPython.display import Audio
from diffusers import DiffusionPipeline
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = DiffusionPipeline.from_pretrained("teticio/latent-audio-diffusion-256").to(device)
output = pipe()
display(output.images[0])
display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate()))
```
For other tasks like variation, inpainting, outpainting, etc: | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
```py
output = pipe(
raw_audio=output.audios[0, 0],
start_step=int(pipe.get_default_steps() / 2),
mask_start_secs=1,
mask_end_secs=1,
)
display(output.images[0])
display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate()))
```
Returns:
`List[PIL Image]`:
A list of Mel spectrograms (`float`, `List[np.ndarray]`) with the sample rate and raw audio.
""" | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
steps = steps or self.get_default_steps()
self.scheduler.set_timesteps(steps)
step_generator = step_generator or generator
# For backwards compatibility
if isinstance(self.unet.config.sample_size, int):
self.unet.config.sample_size = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
noise = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
),
generator=generator,
device=self.device,
)
images = noise
mask = None | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(audio_file, raw_audio)
input_image = self.mel.audio_slice_to_image(slice)
input_image = np.frombuffer(input_image.tobytes(), dtype="uint8").reshape(
(input_image.height, input_image.width)
)
input_image = (input_image / 255) * 2 - 1
input_images = torch.tensor(input_image[np.newaxis, :, :], dtype=torch.float).to(self.device)
if self.vqvae is not None:
input_images = self.vqvae.encode(torch.unsqueeze(input_images, 0)).latent_dist.sample(
generator=generator
)[0]
input_images = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
images[0, 0] = self.scheduler.add_noise(input_images, noise, self.scheduler.timesteps[start_step - 1]) | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
pixels_per_second = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
mask_start = int(mask_start_secs * pixels_per_second)
mask_end = int(mask_end_secs * pixels_per_second)
mask = self.scheduler.add_noise(input_images, noise, torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet, UNet2DConditionModel):
model_output = self.unet(images, t, encoding)["sample"]
else:
model_output = self.unet(images, t)["sample"] | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
if isinstance(self.scheduler, DDIMScheduler):
images = self.scheduler.step(
model_output=model_output,
timestep=t,
sample=images,
eta=eta,
generator=step_generator,
)["prev_sample"]
else:
images = self.scheduler.step(
model_output=model_output,
timestep=t,
sample=images,
generator=step_generator,
)["prev_sample"]
if mask is not None:
if mask_start > 0:
images[:, :, :, :mask_start] = mask[:, step, :, :mask_start]
if mask_end > 0:
images[:, :, :, -mask_end:] = mask[:, step, :, -mask_end:] | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
images = 1 / self.vqvae.config.scaling_factor * images
images = self.vqvae.decode(images)["sample"]
images = (images / 2 + 0.5).clamp(0, 1)
images = images.cpu().permute(0, 2, 3, 1).numpy()
images = (images * 255).round().astype("uint8")
images = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_, mode="RGB").convert("L") for _ in images)
)
audios = [self.mel.image_to_audio(_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(audios)[:, np.newaxis, :]), **ImagePipelineOutput(images)) | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
@torch.no_grad()
def encode(self, images: List[Image.Image], steps: int = 50) -> np.ndarray:
"""
Reverse the denoising step process to recover a noisy image from the generated image.
Args:
images (`List[PIL Image]`):
List of images to encode.
steps (`int`):
Number of encoding steps to perform (defaults to `50`).
Returns:
`np.ndarray`:
A noise tensor of shape `(batch_size, 1, height, width)`.
"""
# Only works with DDIM as this method is deterministic
assert isinstance(self.scheduler, DDIMScheduler)
self.scheduler.set_timesteps(steps)
sample = np.array(
[np.frombuffer(image.tobytes(), dtype="uint8").reshape((1, image.height, image.width)) for image in images]
)
sample = (sample / 255) * 2 - 1
sample = torch.Tensor(sample).to(self.device) | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
for t in self.progress_bar(torch.flip(self.scheduler.timesteps, (0,))):
prev_timestep = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
alpha_prod_t = self.scheduler.alphas_cumprod[t]
alpha_prod_t_prev = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
beta_prod_t = 1 - alpha_prod_t
model_output = self.unet(sample, t)["sample"]
pred_sample_direction = (1 - alpha_prod_t_prev) ** (0.5) * model_output
sample = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
sample = sample * alpha_prod_t ** (0.5) + beta_prod_t ** (0.5) * model_output
return sample
@staticmethod
def slerp(x0: torch.Tensor, x1: torch.Tensor, alpha: float) -> torch.Tensor:
"""Spherical Linear intERPolation. | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
Args:
x0 (`torch.Tensor`):
The first tensor to interpolate between.
x1 (`torch.Tensor`):
Second tensor to interpolate between.
alpha (`float`):
Interpolation between 0 and 1
Returns:
`torch.Tensor`:
The interpolated tensor.
"""
theta = acos(torch.dot(torch.flatten(x0), torch.flatten(x1)) / torch.norm(x0) / torch.norm(x1))
return sin((1 - alpha) * theta) * x0 / sin(theta) + sin(alpha * theta) * x1 / sin(theta) | 372 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py |
class Mel(ConfigMixin, SchedulerMixin):
"""
Parameters:
x_res (`int`):
x resolution of spectrogram (time).
y_res (`int`):
y resolution of spectrogram (frequency bins).
sample_rate (`int`):
Sample rate of audio.
n_fft (`int`):
Number of Fast Fourier Transforms.
hop_length (`int`):
Hop length (a higher number is recommended if `y_res` < 256).
top_db (`int`):
Loudest decibel value.
n_iter (`int`):
Number of iterations for Griffin-Lim Mel inversion.
"""
config_name = "mel_config.json" | 373 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py |
@register_to_config
def __init__(
self,
x_res: int = 256,
y_res: int = 256,
sample_rate: int = 22050,
n_fft: int = 2048,
hop_length: int = 512,
top_db: int = 80,
n_iter: int = 32,
):
self.hop_length = hop_length
self.sr = sample_rate
self.n_fft = n_fft
self.top_db = top_db
self.n_iter = n_iter
self.set_resolution(x_res, y_res)
self.audio = None
if not _librosa_can_be_imported:
raise ValueError(_import_error)
def set_resolution(self, x_res: int, y_res: int):
"""Set resolution.
Args:
x_res (`int`):
x resolution of spectrogram (time).
y_res (`int`):
y resolution of spectrogram (frequency bins).
"""
self.x_res = x_res
self.y_res = y_res
self.n_mels = self.y_res
self.slice_size = self.x_res * self.hop_length - 1 | 373 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py |
def load_audio(self, audio_file: str = None, raw_audio: np.ndarray = None):
"""Load audio.
Args:
audio_file (`str`):
An audio file that must be on disk due to [Librosa](https://librosa.org/) limitation.
raw_audio (`np.ndarray`):
The raw audio file as a NumPy array.
"""
if audio_file is not None:
self.audio, _ = librosa.load(audio_file, mono=True, sr=self.sr)
else:
self.audio = raw_audio
# Pad with silence if necessary.
if len(self.audio) < self.x_res * self.hop_length:
self.audio = np.concatenate([self.audio, np.zeros((self.x_res * self.hop_length - len(self.audio),))])
def get_number_of_slices(self) -> int:
"""Get number of slices in audio.
Returns:
`int`:
Number of spectograms audio can be sliced into.
"""
return len(self.audio) // self.slice_size | 373 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py |
def get_audio_slice(self, slice: int = 0) -> np.ndarray:
"""Get slice of audio.
Args:
slice (`int`):
Slice number of audio (out of `get_number_of_slices()`).
Returns:
`np.ndarray`:
The audio slice as a NumPy array.
"""
return self.audio[self.slice_size * slice : self.slice_size * (slice + 1)]
def get_sample_rate(self) -> int:
"""Get sample rate.
Returns:
`int`:
Sample rate of audio.
"""
return self.sr
def audio_slice_to_image(self, slice: int) -> Image.Image:
"""Convert slice of audio to spectrogram.
Args:
slice (`int`):
Slice number of audio to convert (out of `get_number_of_slices()`). | 373 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py |
Returns:
`PIL Image`:
A grayscale image of `x_res x y_res`.
"""
S = librosa.feature.melspectrogram(
y=self.get_audio_slice(slice), sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_mels=self.n_mels
)
log_S = librosa.power_to_db(S, ref=np.max, top_db=self.top_db)
bytedata = (((log_S + self.top_db) * 255 / self.top_db).clip(0, 255) + 0.5).astype(np.uint8)
image = Image.fromarray(bytedata)
return image
def image_to_audio(self, image: Image.Image) -> np.ndarray:
"""Converts spectrogram to audio.
Args:
image (`PIL Image`):
An grayscale image of `x_res x y_res`. | 373 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py |
Returns:
audio (`np.ndarray`):
The audio as a NumPy array.
"""
bytedata = np.frombuffer(image.tobytes(), dtype="uint8").reshape((image.height, image.width))
log_S = bytedata.astype("float") * self.top_db / 255 - self.top_db
S = librosa.db_to_power(log_S)
audio = librosa.feature.inverse.mel_to_audio(
S, sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_iter=self.n_iter
)
return audio | 373 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py |
class LDMPipeline(DiffusionPipeline):
r"""
Pipeline for unconditional image generation using latent diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Parameters:
vqvae ([`VQModel`]):
Vector-quantized (VQ) model to encode and decode images to and from latent representations.
unet ([`UNet2DModel`]):
A `UNet2DModel` to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
[`DDIMScheduler`] is used in combination with `unet` to denoise the encoded image latents.
"""
def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler):
super().__init__()
self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) | 374 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py |
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
eta: float = 0.0,
num_inference_steps: int = 50,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[Tuple, ImagePipelineOutput]:
r"""
The call function to the pipeline for generation. | 374 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py |
Args:
batch_size (`int`, *optional*, defaults to 1):
Number of images to generate.
generator (`torch.Generator`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
Example:
```py
>>> from diffusers import LDMPipeline | 374 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py |
>>> # load model and scheduler
>>> pipe = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256")
>>> # run pipeline in inference (sample random noise and denoise)
>>> image = pipe().images[0]
```
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
returned where the first element is a list with the generated images
"""
latents = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator,
)
latents = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(num_inference_steps) | 374 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py |
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_kwargs = {}
if accepts_eta:
extra_kwargs["eta"] = eta
for t in self.progress_bar(self.scheduler.timesteps):
latent_model_input = self.scheduler.scale_model_input(latents, t)
# predict the noise residual
noise_prediction = self.unet(latent_model_input, t).sample
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_prediction, t, latents, **extra_kwargs).prev_sample
# adjust latents with inverse of vae scale
latents = latents / self.vqvae.config.scaling_factor
# decode the image latents with the VAE
image = self.vqvae.decode(latents).sample | 374 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py |
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image) | 374 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py |
class SpectrogramNotesEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin):
@register_to_config
def __init__(
self,
max_length: int,
vocab_size: int,
d_model: int,
dropout_rate: float,
num_layers: int,
num_heads: int,
d_kv: int,
d_ff: int,
feed_forward_proj: str,
is_decoder: bool = False,
):
super().__init__()
self.token_embedder = nn.Embedding(vocab_size, d_model)
self.position_encoding = nn.Embedding(max_length, d_model)
self.position_encoding.weight.requires_grad = False
self.dropout_pre = nn.Dropout(p=dropout_rate)
t5config = T5Config(
vocab_size=vocab_size,
d_model=d_model,
num_heads=num_heads,
d_kv=d_kv,
d_ff=d_ff,
dropout_rate=dropout_rate,
feed_forward_proj=feed_forward_proj,
is_decoder=is_decoder,
is_encoder_decoder=False,
) | 375 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py |
self.encoders = nn.ModuleList()
for lyr_num in range(num_layers):
lyr = T5Block(t5config)
self.encoders.append(lyr)
self.layer_norm = T5LayerNorm(d_model)
self.dropout_post = nn.Dropout(p=dropout_rate)
def forward(self, encoder_input_tokens, encoder_inputs_mask):
x = self.token_embedder(encoder_input_tokens)
seq_length = encoder_input_tokens.shape[1]
inputs_positions = torch.arange(seq_length, device=encoder_input_tokens.device)
x += self.position_encoding(inputs_positions)
x = self.dropout_pre(x)
# inverted the attention mask
input_shape = encoder_input_tokens.size()
extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape)
for lyr in self.encoders:
x = lyr(x, extended_attention_mask)[0]
x = self.layer_norm(x)
return self.dropout_post(x), encoder_inputs_mask | 375 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py |
class SpectrogramContEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin):
@register_to_config
def __init__(
self,
input_dims: int,
targets_context_length: int,
d_model: int,
dropout_rate: float,
num_layers: int,
num_heads: int,
d_kv: int,
d_ff: int,
feed_forward_proj: str,
is_decoder: bool = False,
):
super().__init__()
self.input_proj = nn.Linear(input_dims, d_model, bias=False)
self.position_encoding = nn.Embedding(targets_context_length, d_model)
self.position_encoding.weight.requires_grad = False
self.dropout_pre = nn.Dropout(p=dropout_rate) | 376 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py |
t5config = T5Config(
d_model=d_model,
num_heads=num_heads,
d_kv=d_kv,
d_ff=d_ff,
feed_forward_proj=feed_forward_proj,
dropout_rate=dropout_rate,
is_decoder=is_decoder,
is_encoder_decoder=False,
)
self.encoders = nn.ModuleList()
for lyr_num in range(num_layers):
lyr = T5Block(t5config)
self.encoders.append(lyr)
self.layer_norm = T5LayerNorm(d_model)
self.dropout_post = nn.Dropout(p=dropout_rate)
def forward(self, encoder_inputs, encoder_inputs_mask):
x = self.input_proj(encoder_inputs)
# terminal relative positional encodings
max_positions = encoder_inputs.shape[1]
input_positions = torch.arange(max_positions, device=encoder_inputs.device) | 376 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py |
seq_lens = encoder_inputs_mask.sum(-1)
input_positions = torch.roll(input_positions.unsqueeze(0), tuple(seq_lens.tolist()), dims=0)
x += self.position_encoding(input_positions)
x = self.dropout_pre(x)
# inverted the attention mask
input_shape = encoder_inputs.size()
extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape)
for lyr in self.encoders:
x = lyr(x, extended_attention_mask)[0]
x = self.layer_norm(x)
return self.dropout_post(x), encoder_inputs_mask | 376 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py |
class SpectrogramDiffusionPipeline(DiffusionPipeline):
r"""
Pipeline for unconditional audio generation.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Args:
notes_encoder ([`SpectrogramNotesEncoder`]):
continuous_encoder ([`SpectrogramContEncoder`]):
decoder ([`T5FilmDecoder`]):
A [`T5FilmDecoder`] to denoise the encoded audio latents.
scheduler ([`DDPMScheduler`]):
A scheduler to be used in combination with `decoder` to denoise the encoded audio latents.
melgan ([`OnnxRuntimeModel`]):
"""
_optional_components = ["melgan"] | 377 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py |
def __init__(
self,
notes_encoder: SpectrogramNotesEncoder,
continuous_encoder: SpectrogramContEncoder,
decoder: T5FilmDecoder,
scheduler: DDPMScheduler,
melgan: OnnxRuntimeModel if is_onnx_available() else Any,
) -> None:
super().__init__()
# From MELGAN
self.min_value = math.log(1e-5) # Matches MelGAN training.
self.max_value = 4.0 # Largest value for most examples
self.n_dims = 128
self.register_modules(
notes_encoder=notes_encoder,
continuous_encoder=continuous_encoder,
decoder=decoder,
scheduler=scheduler,
melgan=melgan,
) | 377 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py |
def scale_features(self, features, output_range=(-1.0, 1.0), clip=False):
"""Linearly scale features to network outputs range."""
min_out, max_out = output_range
if clip:
features = torch.clip(features, self.min_value, self.max_value)
# Scale to [0, 1].
zero_one = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def scale_to_features(self, outputs, input_range=(-1.0, 1.0), clip=False):
"""Invert by linearly scaling network outputs to features range."""
min_out, max_out = input_range
outputs = torch.clip(outputs, min_out, max_out) if clip else outputs
# Scale to [0, 1].
zero_one = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value | 377 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py |
def encode(self, input_tokens, continuous_inputs, continuous_mask):
tokens_mask = input_tokens > 0
tokens_encoded, tokens_mask = self.notes_encoder(
encoder_input_tokens=input_tokens, encoder_inputs_mask=tokens_mask
)
continuous_encoded, continuous_mask = self.continuous_encoder(
encoder_inputs=continuous_inputs, encoder_inputs_mask=continuous_mask
)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def decode(self, encodings_and_masks, input_tokens, noise_time):
timesteps = noise_time
if not torch.is_tensor(timesteps):
timesteps = torch.tensor([timesteps], dtype=torch.long, device=input_tokens.device)
elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
timesteps = timesteps[None].to(input_tokens.device) | 377 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py |
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps * torch.ones(input_tokens.shape[0], dtype=timesteps.dtype, device=timesteps.device)
logits = self.decoder(
encodings_and_masks=encodings_and_masks, decoder_input_tokens=input_tokens, decoder_noise_time=timesteps
)
return logits | 377 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py |
@torch.no_grad()
def __call__(
self,
input_tokens: List[List[int]],
generator: Optional[torch.Generator] = None,
num_inference_steps: int = 100,
return_dict: bool = True,
output_type: str = "np",
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
callback_steps: int = 1,
) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
r"""
The call function to the pipeline for generation. | 377 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py |
Args:
input_tokens (`List[List[int]]`):
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
num_inference_steps (`int`, *optional*, defaults to 100):
The number of denoising steps. More denoising steps usually lead to a higher quality audio at the
expense of slower inference.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple.
output_type (`str`, *optional*, defaults to `"np"`):
The output format of the generated audio.
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the | 377 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py |
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step. | 377 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py |
Example:
```py
>>> from diffusers import SpectrogramDiffusionPipeline, MidiProcessor
>>> pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion")
>>> pipe = pipe.to("cuda")
>>> processor = MidiProcessor()
>>> # Download MIDI from: wget http://www.piano-midi.de/midis/beethoven/beethoven_hammerklavier_2.mid
>>> output = pipe(processor("beethoven_hammerklavier_2.mid"))
>>> audio = output.audios[0]
```
Returns:
[`pipelines.AudioPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is
returned where the first element is a list with the generated audio.
""" | 377 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py |
pred_mel = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims], dtype=np.float32)
full_pred_mel = np.zeros([1, 0, self.n_dims], np.float32)
ones = torch.ones((1, TARGET_FEATURE_LENGTH), dtype=bool, device=self.device)
for i, encoder_input_tokens in enumerate(input_tokens):
if i == 0:
encoder_continuous_inputs = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device, dtype=self.decoder.dtype
)
# The first chunk has no previous context.
encoder_continuous_mask = torch.zeros((1, TARGET_FEATURE_LENGTH), dtype=bool, device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
encoder_continuous_mask = ones | 377 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py |
encoder_continuous_inputs = self.scale_features(
encoder_continuous_inputs, output_range=[-1.0, 1.0], clip=True
)
encodings_and_masks = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device),
continuous_inputs=encoder_continuous_inputs,
continuous_mask=encoder_continuous_mask,
)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
x = randn_tensor(
shape=encoder_continuous_inputs.shape,
generator=generator,
device=self.device,
dtype=self.decoder.dtype,
)
# set step values
self.scheduler.set_timesteps(num_inference_steps) | 377 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py |
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
output = self.decode(
encodings_and_masks=encodings_and_masks,
input_tokens=x,
noise_time=t / self.scheduler.config.num_train_timesteps, # rescale to [0, 1)
)
# Compute previous output: x_t -> x_t-1
x = self.scheduler.step(output, t, x, generator=generator).prev_sample
mel = self.scale_to_features(x, input_range=[-1.0, 1.0])
encoder_continuous_inputs = mel[:1]
pred_mel = mel.cpu().float().numpy()
full_pred_mel = np.concatenate([full_pred_mel, pred_mel[:1]], axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(i, full_pred_mel)
logger.info("Generated segment", i) | 377 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py |
if output_type == "np" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'."
)
elif output_type == "np" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'."
)
if output_type == "np":
output = self.melgan(input_features=full_pred_mel.astype(np.float32))
else:
output = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=output) | 377 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py |
class NoteRepresentationConfig:
"""Configuration note representations."""
onsets_only: bool
include_ties: bool | 378 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py |
class NoteEventData:
pitch: int
velocity: Optional[int] = None
program: Optional[int] = None
is_drum: Optional[bool] = None
instrument: Optional[int] = None | 379 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py |
class NoteEncodingState:
"""Encoding state for note transcription, keeping track of active pitches."""
# velocity bin for active pitches and programs
active_pitches: MutableMapping[Tuple[int, int], int] = dataclasses.field(default_factory=dict) | 380 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py |
class EventRange:
type: str
min_value: int
max_value: int | 381 | /Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.