text
stringlengths 0
5.54k
|
---|
import cv2
|
from PIL import Image
|
prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting"
|
negative_prompt = "low quality, bad quality, sketches"
|
original_image = load_image(
|
"https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"
|
)
|
controlnet = ControlNetModel.from_pretrained(
|
"diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16, use_safetensors=True
|
)
|
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, use_safetensors=True)
|
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
"stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16, use_safetensors=True
|
)
|
pipe.enable_model_cpu_offload()
|
image = np.array(original_image)
|
image = cv2.Canny(image, 100, 200)
|
image = image[:, :, None]
|
image = np.concatenate([image, image, image], axis=2)
|
canny_image = Image.fromarray(image)
|
image = pipe(
|
prompt, negative_prompt=negative_prompt, controlnet_conditioning_scale=0.5, image=canny_image, guess_mode=True,
|
).images[0]
|
make_image_grid([original_image, canny_image, image], rows=1, cols=3) MultiControlNet Replace the SDXL model with a model like runwayml/stable-diffusion-v1-5 to use multiple conditioning inputs with Stable Diffusion models. You can compose multiple ControlNet conditionings from different image inputs to create a MultiControlNet. To get better results, it is often helpful to: mask conditionings such that they don’t overlap (for example, mask the area of a canny image where the pose conditioning is located) experiment with the controlnet_conditioning_scale parameter to determine how much weight to assign to each conditioning input In this example, you’ll combine a canny image and a human pose estimation image to generate a new image. Prepare the canny image conditioning: Copied from diffusers.utils import load_image, make_image_grid
|
from PIL import Image
|
import numpy as np
|
import cv2
|
original_image = load_image(
|
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/landscape.png"
|
)
|
image = np.array(original_image)
|
low_threshold = 100
|
high_threshold = 200
|
image = cv2.Canny(image, low_threshold, high_threshold)
|
# zero out middle columns of image where pose will be overlaid
|
zero_start = image.shape[1] // 4
|
zero_end = zero_start + image.shape[1] // 2
|
image[:, zero_start:zero_end] = 0
|
image = image[:, :, None]
|
image = np.concatenate([image, image, image], axis=2)
|
canny_image = Image.fromarray(image)
|
make_image_grid([original_image, canny_image], rows=1, cols=2) original image canny image For human pose estimation, install controlnet_aux: Copied # uncomment to install the necessary library in Colab
|
#!pip install -q controlnet-aux Prepare the human pose estimation conditioning: Copied from controlnet_aux import OpenposeDetector
|
openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
|
original_image = load_image(
|
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/person.png"
|
)
|
openpose_image = openpose(original_image)
|
make_image_grid([original_image, openpose_image], rows=1, cols=2) original image human pose image Load a list of ControlNet models that correspond to each conditioning, and pass them to the StableDiffusionXLControlNetPipeline. Use the faster UniPCMultistepScheduler and enable model offloading to reduce memory usage. Copied from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL, UniPCMultistepScheduler
|
import torch
|
controlnets = [
|
ControlNetModel.from_pretrained(
|
"thibaud/controlnet-openpose-sdxl-1.0", torch_dtype=torch.float16
|
),
|
ControlNetModel.from_pretrained(
|
"diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16, use_safetensors=True
|
),
|
]
|
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, use_safetensors=True)
|
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
"stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnets, vae=vae, torch_dtype=torch.float16, use_safetensors=True
|
)
|
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
pipe.enable_model_cpu_offload() Now you can pass your prompt (an optional negative prompt if you’re using one), canny image, and pose image to the pipeline: Copied prompt = "a giant standing in a fantasy landscape, best quality"
|
negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
|
generator = torch.manual_seed(1)
|
images = [openpose_image.resize((1024, 1024)), canny_image.resize((1024, 1024))]
|
images = pipe(
|
prompt,
|
image=images,
|
num_inference_steps=25,
|
generator=generator,
|
negative_prompt=negative_prompt,
|
num_images_per_prompt=3,
|
controlnet_conditioning_scale=[1.0, 0.8],
|
).images
|
make_image_grid([original_image, canny_image, openpose_image,
|
images[0].resize((512, 512)), images[1].resize((512, 512)), images[2].resize((512, 512))], rows=2, cols=3)
|
Using Diffusers with other modalities
|
Diffusers is in the process of expanding to modalities other than images.
|
Example type
|
Colab
|
Pipeline
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.