text
stringlengths 0
5.54k
|
---|
)
|
mask_image = mask_image.resize((512, 512))
|
make_image_grid([init_image, mask_image], rows=1, cols=2) Create a function to prepare the control image from the initial and mask images. This’ll create a tensor to mark the pixels in init_image as masked if the corresponding pixel in mask_image is over a certain threshold. Copied import numpy as np
|
import torch
|
def make_inpaint_condition(image, image_mask):
|
image = np.array(image.convert("RGB")).astype(np.float32) / 255.0
|
image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0
|
assert image.shape[0:1] == image_mask.shape[0:1]
|
image[image_mask > 0.5] = -1.0 # set as masked pixel
|
image = np.expand_dims(image, 0).transpose(0, 3, 1, 2)
|
image = torch.from_numpy(image)
|
return image
|
control_image = make_inpaint_condition(init_image, mask_image) original image mask image Load a ControlNet model conditioned on inpainting and pass it to the StableDiffusionControlNetInpaintPipeline. Use the faster UniPCMultistepScheduler and enable model offloading to speed up inference and reduce memory usage. Copied from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, UniPCMultistepScheduler
|
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16, use_safetensors=True)
|
pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True
|
)
|
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
pipe.enable_model_cpu_offload() Now pass your prompt, initial image, mask image, and control image to the pipeline: Copied output = pipe(
|
"corgi face with large ears, detailed, pixar, animated, disney",
|
num_inference_steps=20,
|
eta=1.0,
|
image=init_image,
|
mask_image=mask_image,
|
control_image=control_image,
|
).images[0]
|
make_image_grid([init_image, mask_image, output], rows=1, cols=3) Guess mode Guess mode does not require supplying a prompt to a ControlNet at all! This forces the ControlNet encoder to do it’s best to “guess” the contents of the input control map (depth map, pose estimation, canny edge, etc.). Guess mode adjusts the scale of the output residuals from a ControlNet by a fixed ratio depending on the block depth. The shallowest DownBlock corresponds to 0.1, and as the blocks get deeper, the scale increases exponentially such that the scale of the MidBlock output becomes 1.0. Guess mode does not have any impact on prompt conditioning and you can still provide a prompt if you want. Set guess_mode=True in the pipeline, and it is recommended to set the guidance_scale value between 3.0 and 5.0. Copied from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
|
from diffusers.utils import load_image, make_image_grid
|
import numpy as np
|
import torch
|
from PIL import Image
|
import cv2
|
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", use_safetensors=True)
|
pipe = StableDiffusionControlNetPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", controlnet=controlnet, use_safetensors=True).to("cuda")
|
original_image = load_image("https://huggingface.co/takuma104/controlnet_dev/resolve/main/bird_512x512.png")
|
image = np.array(original_image)
|
low_threshold = 100
|
high_threshold = 200
|
image = cv2.Canny(image, low_threshold, high_threshold)
|
image = image[:, :, None]
|
image = np.concatenate([image, image, image], axis=2)
|
canny_image = Image.fromarray(image)
|
image = pipe("", image=canny_image, guess_mode=True, guidance_scale=3.0).images[0]
|
make_image_grid([original_image, canny_image, image], rows=1, cols=3) regular mode with prompt guess mode without prompt ControlNet with Stable Diffusion XL There aren’t too many ControlNet models compatible with Stable Diffusion XL (SDXL) at the moment, but we’ve trained two full-sized ControlNet models for SDXL conditioned on canny edge detection and depth maps. We’re also experimenting with creating smaller versions of these SDXL-compatible ControlNet models so it is easier to run on resource-constrained hardware. You can find these checkpoints on the 🤗 Diffusers Hub organization! Let’s use a SDXL ControlNet conditioned on canny images to generate an image. Start by loading an image and prepare the canny image: Copied from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL
|
from diffusers.utils import load_image, make_image_grid
|
from PIL import Image
|
import cv2
|
import numpy as np
|
import torch
|
original_image = load_image(
|
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"
|
)
|
image = np.array(original_image)
|
low_threshold = 100
|
high_threshold = 200
|
image = cv2.Canny(image, low_threshold, high_threshold)
|
image = image[:, :, None]
|
image = np.concatenate([image, image, image], axis=2)
|
canny_image = Image.fromarray(image)
|
make_image_grid([original_image, canny_image], rows=1, cols=2) original image canny image Load a SDXL ControlNet model conditioned on canny edge detection and pass it to the StableDiffusionXLControlNetPipeline. You can also enable model offloading to reduce memory usage. Copied controlnet = ControlNetModel.from_pretrained(
|
"diffusers/controlnet-canny-sdxl-1.0",
|
torch_dtype=torch.float16,
|
use_safetensors=True
|
)
|
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, use_safetensors=True)
|
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
"stabilityai/stable-diffusion-xl-base-1.0",
|
controlnet=controlnet,
|
vae=vae,
|
torch_dtype=torch.float16,
|
use_safetensors=True
|
)
|
pipe.enable_model_cpu_offload() Now pass your prompt (and optionally a negative prompt if you’re using one) and canny image to the pipeline: The controlnet_conditioning_scale parameter determines how much weight to assign to the conditioning inputs. A value of 0.5 is recommended for good generalization, but feel free to experiment with this number! Copied prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting"
|
negative_prompt = 'low quality, bad quality, sketches'
|
image = pipe(
|
prompt,
|
negative_prompt=negative_prompt,
|
image=canny_image,
|
controlnet_conditioning_scale=0.5,
|
).images[0]
|
make_image_grid([original_image, canny_image, image], rows=1, cols=3) You can use StableDiffusionXLControlNetPipeline in guess mode as well by setting the parameter to True: Copied from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL
|
from diffusers.utils import load_image, make_image_grid
|
import numpy as np
|
import torch
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.