text
stringlengths
1
1.02k
class_index
int64
0
1.38k
source
stringclasses
431 values
# Create bitmasks between 0 and 255 (inclusive) indicating the state # of the eight corners of each cube. bitmasks = (field > 0).to(torch.uint8) bitmasks = bitmasks[:-1, :, :] | (bitmasks[1:, :, :] << 1) bitmasks = bitmasks[:, :-1, :] | (bitmasks[:, 1:, :] << 2) bitmasks = bitmasks[:, :, :-1] | (bitmasks[:, :, 1:] << 4) # Compute corner coordinates across the entire grid. corner_coords = torch.empty(*grid_size, 3, device=dev, dtype=field.dtype) corner_coords[range(grid_size[0]), :, :, 0] = torch.arange(grid_size[0], device=dev, dtype=field.dtype)[ :, None, None ] corner_coords[:, range(grid_size[1]), :, 1] = torch.arange(grid_size[1], device=dev, dtype=field.dtype)[ :, None ] corner_coords[:, :, range(grid_size[2]), 2] = torch.arange(grid_size[2], device=dev, dtype=field.dtype)
196
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
# Compute all vertices across all edges in the grid, even though we will # throw some out later. We have (X-1)*Y*Z + X*(Y-1)*Z + X*Y*(Z-1) vertices. # These are all midpoints, and don't account for interpolation (which is # done later based on the used edge midpoints). edge_midpoints = torch.cat( [ ((corner_coords[:-1] + corner_coords[1:]) / 2).reshape(-1, 3), ((corner_coords[:, :-1] + corner_coords[:, 1:]) / 2).reshape(-1, 3), ((corner_coords[:, :, :-1] + corner_coords[:, :, 1:]) / 2).reshape(-1, 3), ], dim=0, )
196
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
# Create a flat array of [X, Y, Z] indices for each cube. cube_indices = torch.zeros( grid_size[0] - 1, grid_size[1] - 1, grid_size[2] - 1, 3, device=dev, dtype=torch.long ) cube_indices[range(grid_size[0] - 1), :, :, 0] = torch.arange(grid_size[0] - 1, device=dev)[:, None, None] cube_indices[:, range(grid_size[1] - 1), :, 1] = torch.arange(grid_size[1] - 1, device=dev)[:, None] cube_indices[:, :, range(grid_size[2] - 1), 2] = torch.arange(grid_size[2] - 1, device=dev) flat_cube_indices = cube_indices.reshape(-1, 3) # Create a flat array mapping each cube to 12 global edge indices. edge_indices = _create_flat_edge_indices(flat_cube_indices, grid_size)
196
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
# Apply the LUT to figure out the triangles. flat_bitmasks = bitmasks.reshape(-1).long() # must cast to long for indexing to believe this not a mask local_tris = cases[flat_bitmasks] local_masks = masks[flat_bitmasks] # Compute the global edge indices for the triangles. global_tris = torch.gather(edge_indices, 1, local_tris.reshape(local_tris.shape[0], -1)).reshape( local_tris.shape ) # Select the used triangles for each cube. selected_tris = global_tris.reshape(-1, 3)[local_masks.reshape(-1)]
196
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
# Now we have a bunch of indices into the full list of possible vertices, # but we want to reduce this list to only the used vertices. used_vertex_indices = torch.unique(selected_tris.view(-1)) used_edge_midpoints = edge_midpoints[used_vertex_indices] old_index_to_new_index = torch.zeros(len(edge_midpoints), device=dev, dtype=torch.long) old_index_to_new_index[used_vertex_indices] = torch.arange( len(used_vertex_indices), device=dev, dtype=torch.long ) # Rewrite the triangles to use the new indices faces = torch.gather(old_index_to_new_index, 0, selected_tris.view(-1)).reshape(selected_tris.shape)
196
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
# Compute the actual interpolated coordinates corresponding to edge midpoints. v1 = torch.floor(used_edge_midpoints).to(torch.long) v2 = torch.ceil(used_edge_midpoints).to(torch.long) s1 = field[v1[:, 0], v1[:, 1], v1[:, 2]] s2 = field[v2[:, 0], v2[:, 1], v2[:, 2]] p1 = (v1.float() / (grid_size_tensor - 1)) * size + min_point p2 = (v2.float() / (grid_size_tensor - 1)) * size + min_point # The signs of s1 and s2 should be different. We want to find # t such that t*s2 + (1-t)*s1 = 0. t = (s1 / (s1 - s2))[:, None] verts = t * p2 + (1 - t) * p1 return MeshDecoderOutput(verts=verts, faces=faces, vertex_channels=None)
196
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
class MLPNeRFModelOutput(BaseOutput): density: torch.Tensor signed_distance: torch.Tensor channels: torch.Tensor ts: torch.Tensor
197
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
class MLPNeRSTFModel(ModelMixin, ConfigMixin): @register_to_config def __init__( self, d_hidden: int = 256, n_output: int = 12, n_hidden_layers: int = 6, act_fn: str = "swish", insert_direction_at: int = 4, ): super().__init__() # Instantiate the MLP # Find out the dimension of encoded position and direction dummy = torch.eye(1, 3) d_posenc_pos = encode_position(position=dummy).shape[-1] d_posenc_dir = encode_direction(position=dummy).shape[-1] mlp_widths = [d_hidden] * n_hidden_layers input_widths = [d_posenc_pos] + mlp_widths output_widths = mlp_widths + [n_output] if insert_direction_at is not None: input_widths[insert_direction_at] += d_posenc_dir self.mlp = nn.ModuleList([nn.Linear(d_in, d_out) for d_in, d_out in zip(input_widths, output_widths)])
198
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
if act_fn == "swish": # self.activation = swish # yiyi testing: self.activation = lambda x: F.silu(x) else: raise ValueError(f"Unsupported activation function {act_fn}") self.sdf_activation = torch.tanh self.density_activation = torch.nn.functional.relu self.channel_activation = torch.sigmoid def map_indices_to_keys(self, output): h_map = { "sdf": (0, 1), "density_coarse": (1, 2), "density_fine": (2, 3), "stf": (3, 6), "nerf_coarse": (6, 9), "nerf_fine": (9, 12), } mapped_output = {k: output[..., start:end] for k, (start, end) in h_map.items()} return mapped_output def forward(self, *, position, direction, ts, nerf_level="coarse", rendering_mode="nerf"): h = encode_position(position)
198
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
h_preact = h h_directionless = None for i, layer in enumerate(self.mlp): if i == self.config.insert_direction_at: # 4 in the config h_directionless = h_preact h_direction = encode_direction(position, direction=direction) h = torch.cat([h, h_direction], dim=-1) h = layer(h) h_preact = h if i < len(self.mlp) - 1: h = self.activation(h) h_final = h if h_directionless is None: h_directionless = h_preact activation = self.map_indices_to_keys(h_final) if nerf_level == "coarse": h_density = activation["density_coarse"] else: h_density = activation["density_fine"] if rendering_mode == "nerf": if nerf_level == "coarse": h_channels = activation["nerf_coarse"] else: h_channels = activation["nerf_fine"]
198
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
elif rendering_mode == "stf": h_channels = activation["stf"] density = self.density_activation(h_density) signed_distance = self.sdf_activation(activation["sdf"]) channels = self.channel_activation(h_channels) # yiyi notes: I think signed_distance is not used return MLPNeRFModelOutput(density=density, signed_distance=signed_distance, channels=channels, ts=ts)
198
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
class ChannelsProj(nn.Module): def __init__( self, *, vectors: int, channels: int, d_latent: int, ): super().__init__() self.proj = nn.Linear(d_latent, vectors * channels) self.norm = nn.LayerNorm(channels) self.d_latent = d_latent self.vectors = vectors self.channels = channels def forward(self, x: torch.Tensor) -> torch.Tensor: x_bvd = x w_vcd = self.proj.weight.view(self.vectors, self.channels, self.d_latent) b_vc = self.proj.bias.view(1, self.vectors, self.channels) h = torch.einsum("bvd,vcd->bvc", x_bvd, w_vcd) h = self.norm(h) h = h + b_vc return h
199
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
class ShapEParamsProjModel(ModelMixin, ConfigMixin): """ project the latent representation of a 3D asset to obtain weights of a multi-layer perceptron (MLP). For more details, see the original paper: """ @register_to_config def __init__( self, *, param_names: Tuple[str] = ( "nerstf.mlp.0.weight", "nerstf.mlp.1.weight", "nerstf.mlp.2.weight", "nerstf.mlp.3.weight", ), param_shapes: Tuple[Tuple[int]] = ( (256, 93), (256, 256), (256, 256), (256, 256), ), d_latent: int = 1024, ): super().__init__()
200
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
# check inputs if len(param_names) != len(param_shapes): raise ValueError("Must provide same number of `param_names` as `param_shapes`") self.projections = nn.ModuleDict({}) for k, (vectors, channels) in zip(param_names, param_shapes): self.projections[_sanitize_name(k)] = ChannelsProj( vectors=vectors, channels=channels, d_latent=d_latent, ) def forward(self, x: torch.Tensor): out = {} start = 0 for k, shape in zip(self.config.param_names, self.config.param_shapes): vectors, _ = shape end = start + vectors x_bvd = x[:, start:end] out[k] = self.projections[_sanitize_name(k)](x_bvd).reshape(len(x), *shape) start = end return out
200
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
class ShapERenderer(ModelMixin, ConfigMixin): @register_to_config def __init__( self, *, param_names: Tuple[str] = ( "nerstf.mlp.0.weight", "nerstf.mlp.1.weight", "nerstf.mlp.2.weight", "nerstf.mlp.3.weight", ), param_shapes: Tuple[Tuple[int]] = ( (256, 93), (256, 256), (256, 256), (256, 256), ), d_latent: int = 1024, d_hidden: int = 256, n_output: int = 12, n_hidden_layers: int = 6, act_fn: str = "swish", insert_direction_at: int = 4, background: Tuple[float] = ( 255.0, 255.0, 255.0, ), ): super().__init__()
201
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
self.params_proj = ShapEParamsProjModel( param_names=param_names, param_shapes=param_shapes, d_latent=d_latent, ) self.mlp = MLPNeRSTFModel(d_hidden, n_output, n_hidden_layers, act_fn, insert_direction_at) self.void = VoidNeRFModel(background=background, channel_scale=255.0) self.volume = BoundingBoxVolume(bbox_max=[1.0, 1.0, 1.0], bbox_min=[-1.0, -1.0, -1.0]) self.mesh_decoder = MeshDecoder() @torch.no_grad() def render_rays(self, rays, sampler, n_samples, prev_model_out=None, render_with_direction=False): """ Perform volumetric rendering over a partition of possible t's in the union of rendering volumes (written below with some abuse of notations)
201
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
C(r) := sum( transmittance(t[i]) * integrate( lambda t: density(t) * channels(t) * transmittance(t), [t[i], t[i + 1]], ) for i in range(len(parts)) ) + transmittance(t[-1]) * void_model(t[-1]).channels where
201
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
1) transmittance(s) := exp(-integrate(density, [t[0], s])) calculates the probability of light passing through the volume specified by [t[0], s]. (transmittance of 1 means light can pass freely) 2) density and channels are obtained by evaluating the appropriate part.model at time t. 3) [t[i], t[i + 1]] is defined as the range of t where the ray intersects (parts[i].volume \\ union(part.volume for part in parts[:i])) at the surface of the shell (if bounded). If the ray does not intersect, the integral over this segment is evaluated as 0 and transmittance(t[i + 1]) := transmittance(t[i]). 4) The last term is integration to infinity (e.g. [t[-1], math.inf]) that is evaluated by the void_model (i.e. we consider this space to be empty).
201
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
Args: rays: [batch_size x ... x 2 x 3] origin and direction. sampler: disjoint volume integrals. n_samples: number of ts to sample. prev_model_outputs: model outputs from the previous rendering step, including :return: A tuple of - `channels` - A importance samplers for additional fine-grained rendering - raw model output """ origin, direction = rays[..., 0, :], rays[..., 1, :] # Integrate over [t[i], t[i + 1]] # 1 Intersect the rays with the current volume and sample ts to integrate along. vrange = self.volume.intersect(origin, direction, t0_lower=None) ts = sampler.sample(vrange.t0, vrange.t1, n_samples) ts = ts.to(rays.dtype)
201
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
if prev_model_out is not None: # Append the previous ts now before fprop because previous # rendering used a different model and we can't reuse the output. ts = torch.sort(torch.cat([ts, prev_model_out.ts], dim=-2), dim=-2).values batch_size, *_shape, _t0_dim = vrange.t0.shape _, *ts_shape, _ts_dim = ts.shape # 2. Get the points along the ray and query the model directions = torch.broadcast_to(direction.unsqueeze(-2), [batch_size, *ts_shape, 3]) positions = origin.unsqueeze(-2) + ts * directions directions = directions.to(self.mlp.dtype) positions = positions.to(self.mlp.dtype) optional_directions = directions if render_with_direction else None model_out = self.mlp( position=positions, direction=optional_directions, ts=ts, nerf_level="coarse" if prev_model_out is None else "fine", )
201
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
# 3. Integrate the model results channels, weights, transmittance = integrate_samples( vrange, model_out.ts, model_out.density, model_out.channels ) # 4. Clean up results that do not intersect with the volume. transmittance = torch.where(vrange.intersected, transmittance, torch.ones_like(transmittance)) channels = torch.where(vrange.intersected, channels, torch.zeros_like(channels)) # 5. integration to infinity (e.g. [t[-1], math.inf]) that is evaluated by the void_model (i.e. we consider this space to be empty). channels = channels + transmittance * self.void(origin) weighted_sampler = ImportanceRaySampler(vrange, ts=model_out.ts, weights=weights) return channels, weighted_sampler, model_out
201
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
@torch.no_grad() def decode_to_image( self, latents, device, size: int = 64, ray_batch_size: int = 4096, n_coarse_samples=64, n_fine_samples=128, ): # project the parameters from the generated latents projected_params = self.params_proj(latents) # update the mlp layers of the renderer for name, param in self.mlp.state_dict().items(): if f"nerstf.{name}" in projected_params.keys(): param.copy_(projected_params[f"nerstf.{name}"].squeeze(0)) # create cameras object camera = create_pan_cameras(size) rays = camera.camera_rays rays = rays.to(device) n_batches = rays.shape[1] // ray_batch_size coarse_sampler = StratifiedRaySampler() images = [] for idx in range(n_batches): rays_batch = rays[:, idx * ray_batch_size : (idx + 1) * ray_batch_size]
201
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
# render rays with coarse, stratified samples. _, fine_sampler, coarse_model_out = self.render_rays(rays_batch, coarse_sampler, n_coarse_samples) # Then, render with additional importance-weighted ray samples. channels, _, _ = self.render_rays( rays_batch, fine_sampler, n_fine_samples, prev_model_out=coarse_model_out ) images.append(channels) images = torch.cat(images, dim=1) images = images.view(*camera.shape, camera.height, camera.width, -1).squeeze(0) return images @torch.no_grad() def decode_to_mesh( self, latents, device, grid_size: int = 128, query_batch_size: int = 4096, texture_channels: Tuple = ("R", "G", "B"), ): # 1. project the parameters from the generated latents projected_params = self.params_proj(latents)
201
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
# 2. update the mlp layers of the renderer for name, param in self.mlp.state_dict().items(): if f"nerstf.{name}" in projected_params.keys(): param.copy_(projected_params[f"nerstf.{name}"].squeeze(0)) # 3. decoding with STF rendering # 3.1 query the SDF values at vertices along a regular 128**3 grid query_points = volume_query_points(self.volume, grid_size) query_positions = query_points[None].repeat(1, 1, 1).to(device=device, dtype=self.mlp.dtype) fields = [] for idx in range(0, query_positions.shape[1], query_batch_size): query_batch = query_positions[:, idx : idx + query_batch_size] model_out = self.mlp( position=query_batch, direction=None, ts=None, nerf_level="fine", rendering_mode="stf" ) fields.append(model_out.signed_distance) # predicted SDF values fields = torch.cat(fields, dim=1) fields = fields.float()
201
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
assert ( len(fields.shape) == 3 and fields.shape[-1] == 1 ), f"expected [meta_batch x inner_batch] SDF results, but got {fields.shape}" fields = fields.reshape(1, *([grid_size] * 3)) # create grid 128 x 128 x 128 # - force a negative border around the SDFs to close off all the models. full_grid = torch.zeros( 1, grid_size + 2, grid_size + 2, grid_size + 2, device=fields.device, dtype=fields.dtype, ) full_grid.fill_(-1.0) full_grid[:, 1:-1, 1:-1, 1:-1] = fields fields = full_grid # apply a differentiable implementation of Marching Cubes to construct meshs raw_meshes = [] mesh_mask = [] for field in fields: raw_mesh = self.mesh_decoder(field, self.volume.bbox_min, self.volume.bbox_max - self.volume.bbox_min) mesh_mask.append(True) raw_meshes.append(raw_mesh)
201
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
mesh_mask = torch.tensor(mesh_mask, device=fields.device) max_vertices = max(len(m.verts) for m in raw_meshes) # 3.2. query the texture color head at each vertex of the resulting mesh. texture_query_positions = torch.stack( [m.verts[torch.arange(0, max_vertices) % len(m.verts)] for m in raw_meshes], dim=0, ) texture_query_positions = texture_query_positions.to(device=device, dtype=self.mlp.dtype) textures = [] for idx in range(0, texture_query_positions.shape[1], query_batch_size): query_batch = texture_query_positions[:, idx : idx + query_batch_size] texture_model_out = self.mlp( position=query_batch, direction=None, ts=None, nerf_level="fine", rendering_mode="stf" ) textures.append(texture_model_out.channels) # predict texture color textures = torch.cat(textures, dim=1)
201
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
textures = _convert_srgb_to_linear(textures) textures = textures.float() # 3.3 augument the mesh with texture data assert len(textures.shape) == 3 and textures.shape[-1] == len( texture_channels ), f"expected [meta_batch x inner_batch x texture_channels] field results, but got {textures.shape}" for m, texture in zip(raw_meshes, textures): texture = texture[: len(m.verts)] m.vertex_channels = dict(zip(texture_channels, texture.unbind(-1))) return raw_meshes[0]
201
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/renderer.py
class ShapEPipelineOutput(BaseOutput): """ Output class for [`ShapEPipeline`] and [`ShapEImg2ImgPipeline`]. Args: images (`torch.Tensor`) A list of images for 3D rendering. """ images: Union[List[List[PIL.Image.Image]], List[List[np.ndarray]]]
202
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
class ShapEPipeline(DiffusionPipeline): """ Pipeline for generating latent representation of a 3D asset and rendering with the NeRF method. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.).
203
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
Args: prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. text_encoder ([`~transformers.CLIPTextModelWithProjection`]): Frozen text-encoder. tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. scheduler ([`HeunDiscreteScheduler`]): A scheduler to be used in combination with the `prior` model to generate image embedding. shap_e_renderer ([`ShapERenderer`]): Shap-E renderer projects the generated latents into parameters of a MLP to create 3D objects with the NeRF rendering method. """ model_cpu_offload_seq = "text_encoder->prior" _exclude_from_cpu_offload = ["shap_e_renderer"]
203
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
def __init__( self, prior: PriorTransformer, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, scheduler: HeunDiscreteScheduler, shap_e_renderer: ShapERenderer, ): super().__init__() self.register_modules( prior=prior, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, shap_e_renderer=shap_e_renderer, ) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device)
203
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
latents = latents * scheduler.init_noise_sigma return latents def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, ): len(prompt) if isinstance(prompt, list) else 1 # YiYi Notes: set pad_token_id to be 0, not sure why I can't set in the config file self.tokenizer.pad_token_id = 0 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
203
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) # in Shap-E it normalize the prompt_embeds and then later rescale it prompt_embeds = prompt_embeds / torch.linalg.norm(prompt_embeds, dim=-1, keepdim=True) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(prompt_embeds)
203
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
# For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # Rescale the features to have unit variance prompt_embeds = math.sqrt(prompt_embeds.shape[1]) * prompt_embeds return prompt_embeds
203
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
@torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str, num_images_per_prompt: int = 1, num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, guidance_scale: float = 4.0, frame_size: int = 64, output_type: Optional[str] = "pil", # pil, np, latent, mesh return_dict: bool = True, ): """ The call function to the pipeline for generation.
203
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
203
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
tensor is generated by sampling using the supplied random `generator`. guidance_scale (`float`, *optional*, defaults to 4.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. frame_size (`int`, *optional*, default to 64): The width and height of each image frame of the generated 3D output. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`), `"latent"` (`torch.Tensor`), or mesh ([`MeshDecoderOutput`]). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] instead of a plain tuple.
203
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
Examples: Returns: [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance) # prior self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps
203
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
num_embeddings = self.prior.config.num_embeddings embedding_dim = self.prior.config.embedding_dim latents = self.prepare_latents( (batch_size, num_embeddings * embedding_dim), prompt_embeds.dtype, device, generator, latents, self.scheduler, ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim latents = latents.reshape(latents.shape[0], num_embeddings, embedding_dim) for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t)
203
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
noise_pred = self.prior( scaled_model_input, timestep=t, proj_embedding=prompt_embeds, ).predicted_image_embedding # remove the variance noise_pred, _ = noise_pred.split( scaled_model_input.shape[2], dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance: noise_pred_uncond, noise_pred = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) latents = self.scheduler.step( noise_pred, timestep=t, sample=latents, ).prev_sample if XLA_AVAILABLE: xm.mark_step() # Offload all models self.maybe_free_model_hooks()
203
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
if output_type not in ["np", "pil", "latent", "mesh"]: raise ValueError( f"Only the output types `pil`, `np`, `latent` and `mesh` are supported not output_type={output_type}" ) if output_type == "latent": return ShapEPipelineOutput(images=latents) images = [] if output_type == "mesh": for i, latent in enumerate(latents): mesh = self.shap_e_renderer.decode_to_mesh( latent[None, :], device, ) images.append(mesh) else: # np, pil for i, latent in enumerate(latents): image = self.shap_e_renderer.decode_to_image( latent[None, :], device, size=frame_size, ) images.append(image) images = torch.stack(images) images = images.cpu().numpy()
203
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
if output_type == "pil": images = [self.numpy_to_pil(image) for image in images] if not return_dict: return (images,) return ShapEPipelineOutput(images=images)
203
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e.py
class DifferentiableProjectiveCamera: """ Implements a batch, differentiable, standard pinhole camera """ origin: torch.Tensor # [batch_size x 3] x: torch.Tensor # [batch_size x 3] y: torch.Tensor # [batch_size x 3] z: torch.Tensor # [batch_size x 3] width: int height: int x_fov: float y_fov: float shape: Tuple[int] def __post_init__(self): assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2 def resolution(self): return torch.from_numpy(np.array([self.width, self.height], dtype=np.float32)) def fov(self): return torch.from_numpy(np.array([self.x_fov, self.y_fov], dtype=np.float32))
204
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/camera.py
def get_image_coords(self) -> torch.Tensor: """ :return: coords of shape (width * height, 2) """ pixel_indices = torch.arange(self.height * self.width) coords = torch.stack( [ pixel_indices % self.width, torch.div(pixel_indices, self.width, rounding_mode="trunc"), ], axis=1, ) return coords @property def camera_rays(self): batch_size, *inner_shape = self.shape inner_batch_size = int(np.prod(inner_shape)) coords = self.get_image_coords() coords = torch.broadcast_to(coords.unsqueeze(0), [batch_size * inner_batch_size, *coords.shape]) rays = self.get_camera_rays(coords) rays = rays.view(batch_size, inner_batch_size * self.height * self.width, 2, 3) return rays
204
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/camera.py
def get_camera_rays(self, coords: torch.Tensor) -> torch.Tensor: batch_size, *shape, n_coords = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] flat = coords.view(batch_size, -1, 2) res = self.resolution() fov = self.fov() fracs = (flat.float() / (res - 1)) * 2 - 1 fracs = fracs * torch.tan(fov / 2) fracs = fracs.view(batch_size, -1, 2) directions = ( self.z.view(batch_size, 1, 3) + self.x.view(batch_size, 1, 3) * fracs[:, :, :1] + self.y.view(batch_size, 1, 3) * fracs[:, :, 1:] ) directions = directions / directions.norm(dim=-1, keepdim=True) rays = torch.stack( [ torch.broadcast_to(self.origin.view(batch_size, 1, 3), [batch_size, directions.shape[1], 3]), directions, ], dim=2, ) return rays.view(batch_size, *shape, 2, 3)
204
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/camera.py
def resize_image(self, width: int, height: int) -> "DifferentiableProjectiveCamera": """ Creates a new camera for the resized view assuming the aspect ratio does not change. """ assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin, x=self.x, y=self.y, z=self.z, width=width, height=height, x_fov=self.x_fov, y_fov=self.y_fov, )
204
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/camera.py
class ShapEPipelineOutput(BaseOutput): """ Output class for [`ShapEPipeline`] and [`ShapEImg2ImgPipeline`]. Args: images (`torch.Tensor`) A list of images for 3D rendering. """ images: Union[PIL.Image.Image, np.ndarray]
205
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
class ShapEImg2ImgPipeline(DiffusionPipeline): """ Pipeline for generating latent representation of a 3D asset and rendering with the NeRF method from an image. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.).
206
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
Args: prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. image_encoder ([`~transformers.CLIPVisionModel`]): Frozen image-encoder. image_processor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to process images. scheduler ([`HeunDiscreteScheduler`]): A scheduler to be used in combination with the `prior` model to generate image embedding. shap_e_renderer ([`ShapERenderer`]): Shap-E renderer projects the generated latents into parameters of a MLP to create 3D objects with the NeRF rendering method. """ model_cpu_offload_seq = "image_encoder->prior" _exclude_from_cpu_offload = ["shap_e_renderer"]
206
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
def __init__( self, prior: PriorTransformer, image_encoder: CLIPVisionModel, image_processor: CLIPImageProcessor, scheduler: HeunDiscreteScheduler, shap_e_renderer: ShapERenderer, ): super().__init__() self.register_modules( prior=prior, image_encoder=image_encoder, image_processor=image_processor, scheduler=scheduler, shap_e_renderer=shap_e_renderer, ) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device)
206
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
latents = latents * scheduler.init_noise_sigma return latents def _encode_image( self, image, device, num_images_per_prompt, do_classifier_free_guidance, ): if isinstance(image, List) and isinstance(image[0], torch.Tensor): image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) if not isinstance(image, torch.Tensor): image = self.image_processor(image, return_tensors="pt").pixel_values[0].unsqueeze(0) image = image.to(dtype=self.image_encoder.dtype, device=device) image_embeds = self.image_encoder(image)["last_hidden_state"] image_embeds = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: negative_image_embeds = torch.zeros_like(image_embeds)
206
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
# For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_embeds = torch.cat([negative_image_embeds, image_embeds]) return image_embeds @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: Union[PIL.Image.Image, List[PIL.Image.Image]], num_images_per_prompt: int = 1, num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, guidance_scale: float = 4.0, frame_size: int = 64, output_type: Optional[str] = "pil", # pil, np, latent, mesh return_dict: bool = True, ): """ The call function to the pipeline for generation.
206
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
Args: image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image` or tensor representing an image batch to be used as the starting point. Can also accept image latents as image, but if passing latents directly it is not encoded again. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*):
206
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. guidance_scale (`float`, *optional*, defaults to 4.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. frame_size (`int`, *optional*, default to 64): The width and height of each image frame of the generated 3D output. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `"pil"` (`PIL.Image.Image`), `"np"`
206
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
(`np.array`), `"latent"` (`torch.Tensor`), or mesh ([`MeshDecoderOutput`]). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] instead of a plain tuple.
206
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
Examples: Returns: [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(image, PIL.Image.Image): batch_size = 1 elif isinstance(image, torch.Tensor): batch_size = image.shape[0] elif isinstance(image, list) and isinstance(image[0], (torch.Tensor, PIL.Image.Image)): batch_size = len(image) else: raise ValueError( f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(image)}" ) device = self._execution_device batch_size = batch_size * num_images_per_prompt
206
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
do_classifier_free_guidance = guidance_scale > 1.0 image_embeds = self._encode_image(image, device, num_images_per_prompt, do_classifier_free_guidance) # prior self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_embeddings = self.prior.config.num_embeddings embedding_dim = self.prior.config.embedding_dim if latents is None: latents = self.prepare_latents( (batch_size, num_embeddings * embedding_dim), image_embeds.dtype, device, generator, latents, self.scheduler, ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim latents = latents.reshape(latents.shape[0], num_embeddings, embedding_dim)
206
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.prior( scaled_model_input, timestep=t, proj_embedding=image_embeds, ).predicted_image_embedding # remove the variance noise_pred, _ = noise_pred.split( scaled_model_input.shape[2], dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance: noise_pred_uncond, noise_pred = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
206
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
latents = self.scheduler.step( noise_pred, timestep=t, sample=latents, ).prev_sample if XLA_AVAILABLE: xm.mark_step() if output_type not in ["np", "pil", "latent", "mesh"]: raise ValueError( f"Only the output types `pil`, `np`, `latent` and `mesh` are supported not output_type={output_type}" ) # Offload all models self.maybe_free_model_hooks() if output_type == "latent": return ShapEPipelineOutput(images=latents) images = [] if output_type == "mesh": for i, latent in enumerate(latents): mesh = self.shap_e_renderer.decode_to_mesh( latent[None, :], device, ) images.append(mesh)
206
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
else: # np, pil for i, latent in enumerate(latents): image = self.shap_e_renderer.decode_to_image( latent[None, :], device, size=frame_size, ) images.append(image) images = torch.stack(images) images = images.cpu().numpy() if output_type == "pil": images = [self.numpy_to_pil(image) for image in images] if not return_dict: return (images,) return ShapEPipelineOutput(images=images)
206
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
class SemanticStableDiffusionPipelineOutput(BaseOutput): """ Output class for Stable Diffusion pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`List[bool]`) List indicating whether the corresponding generated image contains “not-safe-for-work” (nsfw) content or `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]]
207
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_output.py
class SemanticStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): r""" Pipeline for text-to-image generation using Stable Diffusion with latent editing. This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.).
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`Q16SafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful.
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__()
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker)
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
# Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" )
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." )
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." )
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device)
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
# scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
@torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: int = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, editing_prompt: Optional[Union[str, List[str]]] = None, editing_prompt_embeddings: Optional[torch.Tensor] = None, reverse_editing_direction: Optional[Union[bool, List[bool]]] = False, edit_guidance_scale: Optional[Union[float, List[float]]] = 5,
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
edit_warmup_steps: Optional[Union[int, List[int]]] = 10, edit_cooldown_steps: Optional[Union[int, List[int]]] = None, edit_threshold: Optional[Union[float, List[float]]] = 0.9, edit_momentum_scale: Optional[float] = 0.1, edit_mom_beta: Optional[float] = 0.4, edit_weights: Optional[List[float]] = None, sem_guidance: Optional[List[torch.Tensor]] = None, ): r""" The call function to the pipeline for generation.
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*):
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1):
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
The frequency at which the `callback` function is called. If not specified, the callback is called at every step. editing_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to use for semantic guidance. Semantic guidance is disabled by setting `editing_prompt = None`. Guidance direction of prompt should be specified via `reverse_editing_direction`. editing_prompt_embeddings (`torch.Tensor`, *optional*): Pre-computed embeddings to use for semantic guidance. Guidance direction of embedding should be specified via `reverse_editing_direction`. reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`): Whether the corresponding prompt in `editing_prompt` should be increased or decreased. edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5):
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
Guidance scale for semantic guidance. If provided as a list, values should correspond to `editing_prompt`. edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10): Number of diffusion steps (for each prompt) for which semantic guidance is not applied. Momentum is calculated for those steps and applied once all warmup periods are over. edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`): Number of diffusion steps (for each prompt) after which semantic guidance is longer applied. edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9): Threshold of semantic guidance. edit_momentum_scale (`float`, *optional*, defaults to 0.1): Scale of the momentum to be added to the semantic guidance at each diffusion step. If set to 0.0,
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
momentum is disabled. Momentum is already built up during warmup (for diffusion steps smaller than `sld_warmup_steps`). Momentum is only added to latent guidance once all warmup periods are finished. edit_mom_beta (`float`, *optional*, defaults to 0.4): Defines how semantic guidance momentum builds up. `edit_mom_beta` indicates how much of the previous momentum is kept. Momentum is already built up during warmup (for diffusion steps smaller than `edit_warmup_steps`). edit_weights (`List[float]`, *optional*, defaults to `None`): Indicates how much each individual concept should influence the overall guidance. If no weights are provided all concepts are applied equally. sem_guidance (`List[torch.Tensor]`, *optional*): List of pre-generated guidance vectors to be applied at generation. Length of the list has to
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
correspond to `num_inference_steps`.
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
Examples: ```py >>> import torch >>> from diffusers import SemanticStableDiffusionPipeline >>> pipe = SemanticStableDiffusionPipeline.from_pretrained( ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda")
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
>>> out = pipe( ... prompt="a photo of the face of a woman", ... num_images_per_prompt=1, ... guidance_scale=7, ... editing_prompt=[ ... "smiling, smile", # Concepts to apply ... "glasses, wearing glasses", ... "curls, wavy hair, curly hair", ... "beard, full beard, mustache", ... ], ... reverse_editing_direction=[ ... False, ... False, ... False, ... False, ... ], # Direction of guidance i.e. increase all concepts ... edit_warmup_steps=[10, 10, 10, 10], # Warmup period for each concept ... edit_guidance_scale=[4, 5, 5, 5.4], # Guidance scale for each concept ... edit_threshold=[ ... 0.99, ... 0.975, ... 0.925, ... 0.96,
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
... ], # Threshold for each concept. Threshold equals the percentile of the latent space that will be discarded. I.e. threshold=0.99 uses 1% of the latent dimensions ... edit_momentum_scale=0.3, # Momentum scale that will be added to the latent guidance ... edit_mom_beta=0.6, # Momentum beta ... edit_weights=[1, 1, 1, 1, 1], # Weights of the individual concepts against each other ... ) >>> image = out.images[0] ```
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
Returns: [`~pipelines.semantic_stable_diffusion.SemanticStableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.semantic_stable_diffusion.SemanticStableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps)
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
# 2. Define call parameters batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device if editing_prompt: enable_edit_guidance = True if isinstance(editing_prompt, str): editing_prompt = [editing_prompt] enabled_editing_prompts = len(editing_prompt) elif editing_prompt_embeddings is not None: enable_edit_guidance = True enabled_editing_prompts = editing_prompt_embeddings.shape[0] else: enabled_editing_prompts = 0 enable_edit_guidance = False # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ) text_input_ids = text_inputs.input_ids
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_embeddings = self.text_encoder(text_input_ids.to(device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
if enable_edit_guidance: # get safety text embeddings if editing_prompt_embeddings is None: edit_concepts_input = self.tokenizer( [x for item in editing_prompt for x in repeat(item, batch_size)], padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ) edit_concepts_input_ids = edit_concepts_input.input_ids
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
if edit_concepts_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode( edit_concepts_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) edit_concepts_input_ids = edit_concepts_input_ids[:, : self.tokenizer.model_max_length] edit_concepts = self.text_encoder(edit_concepts_input_ids.to(device))[0] else: edit_concepts = editing_prompt_embeddings.to(device).repeat(batch_size, 1, 1)
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
# duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed_edit, seq_len_edit, _ = edit_concepts.shape edit_concepts = edit_concepts.repeat(1, num_images_per_prompt, 1) edit_concepts = edit_concepts.view(bs_embed_edit * num_images_per_prompt, seq_len_edit, -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
# For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if enable_edit_guidance: text_embeddings = torch.cat([uncond_embeddings, text_embeddings, edit_concepts]) else: text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
# 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, text_embeddings.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Initialize edit_momentum to None edit_momentum = None self.uncond_estimates = None self.text_estimates = None self.edit_estimates = None self.sem_guidance = None
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = ( torch.cat([latents] * (2 + enabled_editing_prompts)) if do_classifier_free_guidance else latents ) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform guidance if do_classifier_free_guidance: noise_pred_out = noise_pred.chunk(2 + enabled_editing_prompts) # [b,4, 64, 64] noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1] noise_pred_edit_concepts = noise_pred_out[2:]
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py
# default text guidance noise_guidance = guidance_scale * (noise_pred_text - noise_pred_uncond) # noise_guidance = (noise_pred_text - noise_pred_edit_concepts[0]) if self.uncond_estimates is None: self.uncond_estimates = torch.zeros((num_inference_steps + 1, *noise_pred_uncond.shape)) self.uncond_estimates[i] = noise_pred_uncond.detach().cpu() if self.text_estimates is None: self.text_estimates = torch.zeros((num_inference_steps + 1, *noise_pred_text.shape)) self.text_estimates[i] = noise_pred_text.detach().cpu() if self.edit_estimates is None and enable_edit_guidance: self.edit_estimates = torch.zeros( (num_inference_steps + 1, len(noise_pred_edit_concepts), *noise_pred_edit_concepts[0].shape) )
208
/Users/nielsrogge/Documents/python_projecten/diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py