text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.reshape(bs_embed, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1)
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
# get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" uncond_tokens = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, truncation=True, max_length=max_length, padding="max_length", return_tensors="pt", ) uncond_input = {k: v.to(device) for k, v in uncond_input.items()} negative_prompt_embeds = self.text_encoder(**uncond_input)[0] negative_prompt_attention_mask = ( uncond_input["attention_mask"].unsqueeze(-1).expand(negative_prompt_embeds.shape) ) negative_prompt_embeds = negative_prompt_embeds * negative_prompt_attention_mask
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.reshape(bs_embed, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) else: negative_prompt_embeds = None negative_prompt_attention_mask = None return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
# Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): if latents is not None: return latents.to(device=device, dtype=dtype) shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype)
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, negative_prompt: Union[str, List[str]] = None, num_inference_steps: int = 50, sigmas: List[float] = None, guidance_scale: float = 3.5, num_images_per_prompt: Optional[int] = 1, height: Optional[int] = 1024, width: Optional[int] = 1024, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, max_sequence_length: int = 256, output_type: Optional[str] = "pil", return_dict: bool = True, ) -> Union[ImagePipelineOutput, Tuple]: r"""
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
Function invoked when calling the pipeline for generation.
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for best results. width (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for best results.
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality.
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for negative text embeddings. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`):
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`.
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ # 1. Check inputs. Raise error if not correct height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor self.check_inputs( prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, )
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
# 2. Determine batch size. if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
# 3. Encode input prompt ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=max_sequence_length, ) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) # 4. Prepare timesteps
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
# sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, sigmas=sigmas) # 5. Prepare latents. latent_channels = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, latent_channels, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
# aura use timestep value between 0 and 1, with t=1 as noise and t=0 as the image # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = torch.tensor([t / 1000]).expand(latent_model_input.shape[0]) timestep = timestep.to(latents.device, dtype=latents.dtype) # predict noise model_output noise_pred = self.transformer( latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
# compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step()
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
if output_type == "latent": image = latents else: # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
218
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
class WuerstchenDiffNeXt(ModelMixin, ConfigMixin): @register_to_config def __init__( self, c_in=4, c_out=4, c_r=64, patch_size=2, c_cond=1024, c_hidden=[320, 640, 1280, 1280], nhead=[-1, 10, 20, 20], blocks=[4, 4, 14, 4], level_config=["CT", "CTA", "CTA", "CTA"], inject_effnet=[False, True, True, True], effnet_embd=16, clip_embd=1024, kernel_size=3, dropout=0.1, ): super().__init__() self.c_r = c_r self.c_cond = c_cond if not isinstance(dropout, list): dropout = [dropout] * len(c_hidden)
219
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
# CONDITIONING self.clip_mapper = nn.Linear(clip_embd, c_cond) self.effnet_mappers = nn.ModuleList( [ nn.Conv2d(effnet_embd, c_cond, kernel_size=1) if inject else None for inject in inject_effnet + list(reversed(inject_effnet)) ] ) self.seq_norm = nn.LayerNorm(c_cond, elementwise_affine=False, eps=1e-6) self.embedding = nn.Sequential( nn.PixelUnshuffle(patch_size), nn.Conv2d(c_in * (patch_size**2), c_hidden[0], kernel_size=1), WuerstchenLayerNorm(c_hidden[0], elementwise_affine=False, eps=1e-6), )
219
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
def get_block(block_type, c_hidden, nhead, c_skip=0, dropout=0): if block_type == "C": return ResBlockStageB(c_hidden, c_skip, kernel_size=kernel_size, dropout=dropout) elif block_type == "A": return AttnBlock(c_hidden, c_cond, nhead, self_attn=True, dropout=dropout) elif block_type == "T": return TimestepBlock(c_hidden, c_r) else: raise ValueError(f"Block type {block_type} not supported")
219
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
# BLOCKS # -- down blocks self.down_blocks = nn.ModuleList() for i in range(len(c_hidden)): down_block = nn.ModuleList() if i > 0: down_block.append( nn.Sequential( WuerstchenLayerNorm(c_hidden[i - 1], elementwise_affine=False, eps=1e-6), nn.Conv2d(c_hidden[i - 1], c_hidden[i], kernel_size=2, stride=2), ) ) for _ in range(blocks[i]): for block_type in level_config[i]: c_skip = c_cond if inject_effnet[i] else 0 down_block.append(get_block(block_type, c_hidden[i], nhead[i], c_skip=c_skip, dropout=dropout[i])) self.down_blocks.append(down_block)
219
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
# -- up blocks self.up_blocks = nn.ModuleList() for i in reversed(range(len(c_hidden))): up_block = nn.ModuleList() for j in range(blocks[i]): for k, block_type in enumerate(level_config[i]): c_skip = c_hidden[i] if i < len(c_hidden) - 1 and j == k == 0 else 0 c_skip += c_cond if inject_effnet[i] else 0 up_block.append(get_block(block_type, c_hidden[i], nhead[i], c_skip=c_skip, dropout=dropout[i])) if i > 0: up_block.append( nn.Sequential( WuerstchenLayerNorm(c_hidden[i], elementwise_affine=False, eps=1e-6), nn.ConvTranspose2d(c_hidden[i], c_hidden[i - 1], kernel_size=2, stride=2), ) ) self.up_blocks.append(up_block)
219
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
# OUTPUT self.clf = nn.Sequential( WuerstchenLayerNorm(c_hidden[0], elementwise_affine=False, eps=1e-6), nn.Conv2d(c_hidden[0], 2 * c_out * (patch_size**2), kernel_size=1), nn.PixelShuffle(patch_size), ) # --- WEIGHT INIT --- self.apply(self._init_weights) def _init_weights(self, m): # General init if isinstance(m, (nn.Conv2d, nn.Linear)): nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) for mapper in self.effnet_mappers: if mapper is not None: nn.init.normal_(mapper.weight, std=0.02) # conditionings nn.init.normal_(self.clip_mapper.weight, std=0.02) # conditionings nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs nn.init.constant_(self.clf[1].weight, 0) # outputs
219
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
# blocks for level_block in self.down_blocks + self.up_blocks: for block in level_block: if isinstance(block, ResBlockStageB): block.channelwise[-1].weight.data *= np.sqrt(1 / sum(self.config.blocks)) elif isinstance(block, TimestepBlock): nn.init.constant_(block.mapper.weight, 0) def gen_r_embedding(self, r, max_positions=10000): r = r * max_positions half_dim = self.c_r // 2 emb = math.log(max_positions) / (half_dim - 1) emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() emb = r[:, None] * emb[None, :] emb = torch.cat([emb.sin(), emb.cos()], dim=1) if self.c_r % 2 == 1: # zero pad emb = nn.functional.pad(emb, (0, 1), mode="constant") return emb.to(dtype=r.dtype) def gen_c_embeddings(self, clip): clip = self.clip_mapper(clip) clip = self.seq_norm(clip) return clip
219
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
def _down_encode(self, x, r_embed, effnet, clip=None): level_outputs = [] for i, down_block in enumerate(self.down_blocks): effnet_c = None for block in down_block: if isinstance(block, ResBlockStageB): if effnet_c is None and self.effnet_mappers[i] is not None: dtype = effnet.dtype effnet_c = self.effnet_mappers[i]( nn.functional.interpolate( effnet.float(), size=x.shape[-2:], mode="bicubic", antialias=True, align_corners=True ).to(dtype) ) skip = effnet_c if self.effnet_mappers[i] is not None else None x = block(x, skip) elif isinstance(block, AttnBlock): x = block(x, clip) elif isinstance(block, TimestepBlock): x = block(x, r_embed) else:
219
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
x = block(x) level_outputs.insert(0, x) return level_outputs
219
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
def _up_decode(self, level_outputs, r_embed, effnet, clip=None): x = level_outputs[0] for i, up_block in enumerate(self.up_blocks): effnet_c = None for j, block in enumerate(up_block): if isinstance(block, ResBlockStageB): if effnet_c is None and self.effnet_mappers[len(self.down_blocks) + i] is not None: dtype = effnet.dtype effnet_c = self.effnet_mappers[len(self.down_blocks) + i]( nn.functional.interpolate( effnet.float(), size=x.shape[-2:], mode="bicubic", antialias=True, align_corners=True ).to(dtype) ) skip = level_outputs[i] if j == 0 and i > 0 else None if effnet_c is not None: if skip is not None: skip = torch.cat([skip, effnet_c], dim=1) else:
219
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
skip = effnet_c x = block(x, skip) elif isinstance(block, AttnBlock): x = block(x, clip) elif isinstance(block, TimestepBlock): x = block(x, r_embed) else: x = block(x) return x
219
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
def forward(self, x, r, effnet, clip=None, x_cat=None, eps=1e-3, return_noise=True): if x_cat is not None: x = torch.cat([x, x_cat], dim=1) # Process the conditioning embeddings r_embed = self.gen_r_embedding(r) if clip is not None: clip = self.gen_c_embeddings(clip) # Model Blocks x_in = x x = self.embedding(x) level_outputs = self._down_encode(x, r_embed, effnet, clip) x = self._up_decode(level_outputs, r_embed, effnet, clip) a, b = self.clf(x).chunk(2, dim=1) b = b.sigmoid() * (1 - eps * 2) + eps if return_noise: return (x_in - a) / b else: return a, b
219
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
class ResBlockStageB(nn.Module): def __init__(self, c, c_skip=0, kernel_size=3, dropout=0.0): super().__init__() self.depthwise = nn.Conv2d(c, c, kernel_size=kernel_size, padding=kernel_size // 2, groups=c) self.norm = WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6) self.channelwise = nn.Sequential( nn.Linear(c + c_skip, c * 4), nn.GELU(), GlobalResponseNorm(c * 4), nn.Dropout(dropout), nn.Linear(c * 4, c), ) def forward(self, x, x_skip=None): x_res = x x = self.norm(self.depthwise(x)) if x_skip is not None: x = torch.cat([x, x_skip], dim=1) x = self.channelwise(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x + x_res
220
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
class MixingResidualBlock(nn.Module): """ Residual block with mixing used by Paella's VQ-VAE. """ def __init__(self, inp_channels, embed_dim): super().__init__() # depthwise self.norm1 = nn.LayerNorm(inp_channels, elementwise_affine=False, eps=1e-6) self.depthwise = nn.Sequential( nn.ReplicationPad2d(1), nn.Conv2d(inp_channels, inp_channels, kernel_size=3, groups=inp_channels) ) # channelwise self.norm2 = nn.LayerNorm(inp_channels, elementwise_affine=False, eps=1e-6) self.channelwise = nn.Sequential( nn.Linear(inp_channels, embed_dim), nn.GELU(), nn.Linear(embed_dim, inp_channels) ) self.gammas = nn.Parameter(torch.zeros(6), requires_grad=True)
221
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py
def forward(self, x): mods = self.gammas x_temp = self.norm1(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * (1 + mods[0]) + mods[1] x = x + self.depthwise(x_temp) * mods[2] x_temp = self.norm2(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * (1 + mods[3]) + mods[4] x = x + self.channelwise(x_temp.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * mods[5] return x
221
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py
class PaellaVQModel(ModelMixin, ConfigMixin): r"""VQ-VAE model from Paella model. This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the model (such as downloading or saving, etc.)
222
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py
Parameters: in_channels (int, *optional*, defaults to 3): Number of channels in the input image. out_channels (int, *optional*, defaults to 3): Number of channels in the output. up_down_scale_factor (int, *optional*, defaults to 2): Up and Downscale factor of the input image. levels (int, *optional*, defaults to 2): Number of levels in the model. bottleneck_blocks (int, *optional*, defaults to 12): Number of bottleneck blocks in the model. embed_dim (int, *optional*, defaults to 384): Number of hidden channels in the model. latent_channels (int, *optional*, defaults to 4): Number of latent channels in the VQ-VAE model. num_vq_embeddings (int, *optional*, defaults to 8192): Number of codebook vectors in the VQ-VAE. scale_factor (float, *optional*, defaults to 0.3764): Scaling factor of the latent space. """
222
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py
@register_to_config def __init__( self, in_channels: int = 3, out_channels: int = 3, up_down_scale_factor: int = 2, levels: int = 2, bottleneck_blocks: int = 12, embed_dim: int = 384, latent_channels: int = 4, num_vq_embeddings: int = 8192, scale_factor: float = 0.3764, ): super().__init__()
222
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py
c_levels = [embed_dim // (2**i) for i in reversed(range(levels))] # Encoder blocks self.in_block = nn.Sequential( nn.PixelUnshuffle(up_down_scale_factor), nn.Conv2d(in_channels * up_down_scale_factor**2, c_levels[0], kernel_size=1), ) down_blocks = [] for i in range(levels): if i > 0: down_blocks.append(nn.Conv2d(c_levels[i - 1], c_levels[i], kernel_size=4, stride=2, padding=1)) block = MixingResidualBlock(c_levels[i], c_levels[i] * 4) down_blocks.append(block) down_blocks.append( nn.Sequential( nn.Conv2d(c_levels[-1], latent_channels, kernel_size=1, bias=False), nn.BatchNorm2d(latent_channels), # then normalize them to have mean 0 and std 1 ) ) self.down_blocks = nn.Sequential(*down_blocks)
222
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py
# Vector Quantizer self.vquantizer = VectorQuantizer(num_vq_embeddings, vq_embed_dim=latent_channels, legacy=False, beta=0.25) # Decoder blocks up_blocks = [nn.Sequential(nn.Conv2d(latent_channels, c_levels[-1], kernel_size=1))] for i in range(levels): for j in range(bottleneck_blocks if i == 0 else 1): block = MixingResidualBlock(c_levels[levels - 1 - i], c_levels[levels - 1 - i] * 4) up_blocks.append(block) if i < levels - 1: up_blocks.append( nn.ConvTranspose2d( c_levels[levels - 1 - i], c_levels[levels - 2 - i], kernel_size=4, stride=2, padding=1 ) ) self.up_blocks = nn.Sequential(*up_blocks) self.out_block = nn.Sequential( nn.Conv2d(c_levels[0], out_channels * up_down_scale_factor**2, kernel_size=1), nn.PixelShuffle(up_down_scale_factor), )
222
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py
@apply_forward_hook def encode(self, x: torch.Tensor, return_dict: bool = True) -> VQEncoderOutput: h = self.in_block(x) h = self.down_blocks(h) if not return_dict: return (h,) return VQEncoderOutput(latents=h) @apply_forward_hook def decode( self, h: torch.Tensor, force_not_quantize: bool = True, return_dict: bool = True ) -> Union[DecoderOutput, torch.Tensor]: if not force_not_quantize: quant, _, _ = self.vquantizer(h) else: quant = h x = self.up_blocks(quant) dec = self.out_block(x) if not return_dict: return (dec,) return DecoderOutput(sample=dec)
222
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py
def forward(self, sample: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: r""" Args: sample (`torch.Tensor`): Input sample. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample h = self.encode(x).latents dec = self.decode(h).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec)
222
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py
class WuerstchenLayerNorm(nn.LayerNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, x): x = x.permute(0, 2, 3, 1) x = super().forward(x) return x.permute(0, 3, 1, 2)
223
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py
class TimestepBlock(nn.Module): def __init__(self, c, c_timestep): super().__init__() self.mapper = nn.Linear(c_timestep, c * 2) def forward(self, x, t): a, b = self.mapper(t)[:, :, None, None].chunk(2, dim=1) return x * (1 + a) + b
224
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py
class ResBlock(nn.Module): def __init__(self, c, c_skip=0, kernel_size=3, dropout=0.0): super().__init__() self.depthwise = nn.Conv2d(c + c_skip, c, kernel_size=kernel_size, padding=kernel_size // 2, groups=c) self.norm = WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6) self.channelwise = nn.Sequential( nn.Linear(c, c * 4), nn.GELU(), GlobalResponseNorm(c * 4), nn.Dropout(dropout), nn.Linear(c * 4, c) ) def forward(self, x, x_skip=None): x_res = x if x_skip is not None: x = torch.cat([x, x_skip], dim=1) x = self.norm(self.depthwise(x)).permute(0, 2, 3, 1) x = self.channelwise(x).permute(0, 3, 1, 2) return x + x_res
225
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py
class GlobalResponseNorm(nn.Module): def __init__(self, dim): super().__init__() self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim)) self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim)) def forward(self, x): agg_norm = torch.norm(x, p=2, dim=(1, 2), keepdim=True) stand_div_norm = agg_norm / (agg_norm.mean(dim=-1, keepdim=True) + 1e-6) return self.gamma * (x * stand_div_norm) + self.beta + x
226
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py
class AttnBlock(nn.Module): def __init__(self, c, c_cond, nhead, self_attn=True, dropout=0.0): super().__init__() self.self_attn = self_attn self.norm = WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6) self.attention = Attention(query_dim=c, heads=nhead, dim_head=c // nhead, dropout=dropout, bias=True) self.kv_mapper = nn.Sequential(nn.SiLU(), nn.Linear(c_cond, c)) def forward(self, x, kv): kv = self.kv_mapper(kv) norm_x = self.norm(x) if self.self_attn: batch_size, channel, _, _ = x.shape kv = torch.cat([norm_x.view(batch_size, channel, -1).transpose(1, 2), kv], dim=1) x = x + self.attention(norm_x, encoder_hidden_states=kv) return x
227
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py
class WuerstchenPrior(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin): unet_name = "prior" _supports_gradient_checkpointing = True @register_to_config def __init__(self, c_in=16, c=1280, c_cond=1024, c_r=64, depth=16, nhead=16, dropout=0.1): super().__init__() self.c_r = c_r self.projection = nn.Conv2d(c_in, c, kernel_size=1) self.cond_mapper = nn.Sequential( nn.Linear(c_cond, c), nn.LeakyReLU(0.2), nn.Linear(c, c), ) self.blocks = nn.ModuleList() for _ in range(depth): self.blocks.append(ResBlock(c, dropout=dropout)) self.blocks.append(TimestepBlock(c, c_r)) self.blocks.append(AttnBlock(c, c, nhead, self_attn=True, dropout=dropout)) self.out = nn.Sequential( WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6), nn.Conv2d(c, c_in * 2, kernel_size=1), )
228
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py
self.gradient_checkpointing = False self.set_default_attn_processor() @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors
228
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py
for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys())
228
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py
if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor)
228
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) def _set_gradient_checkpointing(self, module, value=False): self.gradient_checkpointing = value
228
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py
def gen_r_embedding(self, r, max_positions=10000): r = r * max_positions half_dim = self.c_r // 2 emb = math.log(max_positions) / (half_dim - 1) emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() emb = r[:, None] * emb[None, :] emb = torch.cat([emb.sin(), emb.cos()], dim=1) if self.c_r % 2 == 1: # zero pad emb = nn.functional.pad(emb, (0, 1), mode="constant") return emb.to(dtype=r.dtype) def forward(self, x, r, c): x_in = x x = self.projection(x) c_embed = self.cond_mapper(c) r_embed = self.gen_r_embedding(r) if torch.is_grad_enabled() and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward
228
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py
if is_torch_version(">=", "1.11.0"): for block in self.blocks: if isinstance(block, AttnBlock): x = torch.utils.checkpoint.checkpoint( create_custom_forward(block), x, c_embed, use_reentrant=False ) elif isinstance(block, TimestepBlock): x = torch.utils.checkpoint.checkpoint( create_custom_forward(block), x, r_embed, use_reentrant=False ) else: x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, use_reentrant=False) else: for block in self.blocks: if isinstance(block, AttnBlock): x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, c_embed) elif isinstance(block, TimestepBlock):
228
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py
x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, r_embed) else: x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x) else: for block in self.blocks: if isinstance(block, AttnBlock): x = block(x, c_embed) elif isinstance(block, TimestepBlock): x = block(x, r_embed) else: x = block(x) a, b = self.out(x).chunk(2, dim=1) return (x_in - a) / ((1 - b).abs() + 1e-5)
228
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py
class WuerstchenDecoderPipeline(DiffusionPipeline): """ Pipeline for generating images from the Wuerstchen model. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
Args: tokenizer (`CLIPTokenizer`): The CLIP tokenizer. text_encoder (`CLIPTextModel`): The CLIP text encoder. decoder ([`WuerstchenDiffNeXt`]): The WuerstchenDiffNeXt unet decoder. vqgan ([`PaellaVQModel`]): The VQGAN model. scheduler ([`DDPMWuerstchenScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. latent_dim_scale (float, `optional`, defaults to 10.67): Multiplier to determine the VQ latent space size from the image embeddings. If the image embeddings are height=24 and width=24, the VQ latent shape needs to be height=int(24*10.67)=256 and width=int(24*10.67)=256 in order to match the training conditions. """
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
model_cpu_offload_seq = "text_encoder->decoder->vqgan" _callback_tensor_inputs = [ "latents", "text_encoder_hidden_states", "negative_prompt_embeds", "image_embeddings", ] def __init__( self, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, decoder: WuerstchenDiffNeXt, scheduler: DDPMWuerstchenScheduler, vqgan: PaellaVQModel, latent_dim_scale: float = 10.67, ) -> None: super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, decoder=decoder, scheduler=scheduler, vqgan=vqgan, ) self.register_to_config(latent_dim_scale=latent_dim_scale)
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
# Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, ): batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] attention_mask = attention_mask[:, : self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask.to(device)) text_encoder_hidden_states = text_encoder_output.last_hidden_state text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
uncond_text_encoder_hidden_states = None if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else:
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
uncond_tokens = negative_prompt
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds_text_encoder_output = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=uncond_input.attention_mask.to(device) ) uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes return text_encoder_hidden_states, uncond_text_encoder_hidden_states @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
@property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image_embeddings: Union[torch.Tensor, List[torch.Tensor]], prompt: Union[str, List[str]] = None, num_inference_steps: int = 12, timesteps: Optional[List[float]] = None, guidance_scale: float = 0.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): """ Function invoked when calling the pipeline for generation.
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
Args: image_embedding (`torch.Tensor` or `List[torch.Tensor]`): Image Embeddings either extracted from an image or generated by a Prior Model. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. num_inference_steps (`int`, *optional*, defaults to 12): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 0.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `decoder_guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*):
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class.
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated image embeddings. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", )
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) # 0. Define commonly used variables device = self._execution_device dtype = self.decoder.dtype self._guidance_scale = guidance_scale # 1. Check inputs. Raise error if not correct if not isinstance(prompt, list): if isinstance(prompt, str): prompt = [prompt] else: raise TypeError(f"'prompt' must be of type 'list' or 'str', but got {type(prompt)}.")
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
if self.do_classifier_free_guidance: if negative_prompt is not None and not isinstance(negative_prompt, list): if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] else: raise TypeError( f"'negative_prompt' must be of type 'list' or 'str', but got {type(negative_prompt)}." ) if isinstance(image_embeddings, list): image_embeddings = torch.cat(image_embeddings, dim=0) if isinstance(image_embeddings, np.ndarray): image_embeddings = torch.Tensor(image_embeddings, device=device).to(dtype=dtype) if not isinstance(image_embeddings, torch.Tensor): raise TypeError( f"'image_embeddings' must be of type 'torch.Tensor' or 'np.array', but got {type(image_embeddings)}." )
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
if not isinstance(num_inference_steps, int): raise TypeError( f"'num_inference_steps' must be of type 'int', but got {type(num_inference_steps)}\ In Case you want to provide explicit timesteps, please use the 'timesteps' argument." ) # 2. Encode caption prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, image_embeddings.size(0) * num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, ) text_encoder_hidden_states = ( torch.cat([prompt_embeds, negative_prompt_embeds]) if negative_prompt_embeds is not None else prompt_embeds ) effnet = ( torch.cat([image_embeddings, torch.zeros_like(image_embeddings)]) if self.do_classifier_free_guidance else image_embeddings )
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
# 3. Determine latent shape of latents latent_height = int(image_embeddings.size(2) * self.config.latent_dim_scale) latent_width = int(image_embeddings.size(3) * self.config.latent_dim_scale) latent_features_shape = (image_embeddings.size(0) * num_images_per_prompt, 4, latent_height, latent_width) # 4. Prepare and set timesteps if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latents latents = self.prepare_latents(latent_features_shape, dtype, device, generator, latents, self.scheduler)
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
# 6. Run denoising loop self._num_timesteps = len(timesteps[:-1]) for i, t in enumerate(self.progress_bar(timesteps[:-1])): ratio = t.expand(latents.size(0)).to(dtype) # 7. Denoise latents predicted_latents = self.decoder( torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, r=torch.cat([ratio] * 2) if self.do_classifier_free_guidance else ratio, effnet=effnet, clip=text_encoder_hidden_states, ) # 8. Check for classifier free guidance and apply it if self.do_classifier_free_guidance: predicted_latents_text, predicted_latents_uncond = predicted_latents.chunk(2) predicted_latents = torch.lerp(predicted_latents_uncond, predicted_latents_text, self.guidance_scale)
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
# 9. Renoise latents to next timestep latents = self.scheduler.step( model_output=predicted_latents, timestep=ratio, sample=latents, generator=generator, ).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) image_embeddings = callback_outputs.pop("image_embeddings", image_embeddings) text_encoder_hidden_states = callback_outputs.pop( "text_encoder_hidden_states", text_encoder_hidden_states )
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if output_type not in ["pt", "np", "pil", "latent"]: raise ValueError( f"Only the output types `pt`, `np`, `pil` and `latent` are supported not output_type={output_type}" ) if not output_type == "latent": # 10. Scale and decode the image latents with vq-vae latents = self.vqgan.config.scale_factor * latents images = self.vqgan.decode(latents).sample.clamp(0, 1) if output_type == "np": images = images.permute(0, 2, 3, 1).cpu().float().numpy() elif output_type == "pil": images = images.permute(0, 2, 3, 1).cpu().float().numpy() images = self.numpy_to_pil(images) else: images = latents
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
# Offload all models self.maybe_free_model_hooks() if not return_dict: return images return ImagePipelineOutput(images)
229
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
class WuerstchenPriorPipelineOutput(BaseOutput): """ Output class for WuerstchenPriorPipeline. Args: image_embeddings (`torch.Tensor` or `np.ndarray`) Prior image embeddings for text prompt """ image_embeddings: Union[torch.Tensor, np.ndarray]
230
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
class WuerstchenPriorPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): """ Pipeline for generating image prior for Wuerstchen. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
Args: prior ([`Prior`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). scheduler ([`DDPMWuerstchenScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. latent_mean ('float', *optional*, defaults to 42.0): Mean value for latent diffusers. latent_std ('float', *optional*, defaults to 1.0): Standard value for latent diffusers. resolution_multiple ('float', *optional*, defaults to 42.67): Default resolution for multiple images generated. """
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
unet_name = "prior" text_encoder_name = "text_encoder" model_cpu_offload_seq = "text_encoder->prior" _callback_tensor_inputs = ["latents", "text_encoder_hidden_states", "negative_prompt_embeds"] _lora_loadable_modules = ["prior", "text_encoder"] def __init__( self, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, prior: WuerstchenPrior, scheduler: DDPMWuerstchenScheduler, latent_mean: float = 42.0, latent_std: float = 1.0, resolution_multiple: float = 42.67, ) -> None: super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, prior=prior, scheduler=scheduler, ) self.register_to_config( latent_mean=latent_mean, latent_std=latent_std, resolution_multiple=resolution_multiple )
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
# Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
def encode_prompt( self, device, num_images_per_prompt, do_classifier_free_guidance, prompt=None, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, ): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] attention_mask = attention_mask[:, : self.tokenizer.model_max_length]
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
text_encoder_output = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask.to(device) ) prompt_embeds = text_encoder_output.last_hidden_state prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
if negative_prompt_embeds is None and do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds_text_encoder_output = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=uncond_input.attention_mask.to(device) ) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.last_hidden_state
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # done duplicates return prompt_embeds, negative_prompt_embeds
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
def check_inputs( self, prompt, negative_prompt, num_inference_steps, do_classifier_free_guidance, prompt_embeds=None, negative_prompt_embeds=None, ): if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." )
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
if not isinstance(num_inference_steps, int): raise TypeError( f"'num_inference_steps' must be of type 'int', but got {type(num_inference_steps)}\ In Case you want to provide explicit timesteps, please use the 'timesteps' argument." ) @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Optional[Union[str, List[str]]] = None, height: int = 1024, width: int = 1024, num_inference_steps: int = 60, timesteps: List[float] = None, guidance_scale: float = 8.0, negative_prompt: Optional[Union[str, List[str]]] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, num_images_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pt", return_dict: bool = True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): """
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
Function invoked when calling the pipeline for generation.
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. height (`int`, *optional*, defaults to 1024): The height in pixels of the generated image. width (`int`, *optional*, defaults to 1024): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 60): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 8.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
`decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `decoder_guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*):
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`.
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*):
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class.
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
Examples: Returns: [`~pipelines.WuerstchenPriorPipelineOutput`] or `tuple` [`~pipelines.WuerstchenPriorPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated image embeddings. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", )
231
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py