|
|
|
import logging |
|
import torch |
|
import diffusers |
|
|
|
log = logging.getLogger("test") |
|
log.setLevel(logging.DEBUG) |
|
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s | %(name)s | %(levelname)s | %(module)s | %(message)s') |
|
|
|
log.info(f'loaded: torch={torch.__version__} diffusers={diffusers.__version__}') |
|
|
|
prompt_positive = 'futuristic city' |
|
prompt_negative = 'grass' |
|
seeds = [42] |
|
model_path = "runwayml/stable-diffusion-v1-5" |
|
embedding_path_ok = "sd15_text_inv.pt" |
|
|
|
device = 'cuda:0' |
|
load_args = { |
|
"low_cpu_mem_usage": True, |
|
"torch_dtype": torch.float16, |
|
"variant": 'fp16', |
|
"safety_checker": None, |
|
"load_safety_checker": False, |
|
|
|
} |
|
pipe = diffusers.StableDiffusionPipeline.from_pretrained(model_path, **load_args) |
|
pipe.set_progress_bar_config(bar_format='Progress {rate_fmt}{postfix} {bar} {percentage:3.0f}% {n_fmt}/{total_fmt} {elapsed} {remaining} ' + '\x1b[38;5;71m', ncols=80, colour='#327fba') |
|
pipe.enable_model_cpu_offload() |
|
args = { |
|
'prompt': [prompt_positive], |
|
'negative_prompt': [prompt_negative], |
|
'guidance_scale': 6, |
|
'generator': [torch.Generator(device).manual_seed(s) for s in seeds], |
|
'output_type': 'pil', |
|
'num_inference_steps': 10, |
|
'eta': 0.0, |
|
'guidance_rescale': 0.7, |
|
'height': 512, |
|
'width': 512, |
|
} |
|
|
|
|
|
try: |
|
pipe.load_lora_weights(embedding_path_ok, **load_args) |
|
except Exception as e: |
|
log.error(f'failed to load embeddings: {e}') |
|
|
|
output = pipe(**args) |
|
log.info(f'output: {output}') |
|
|
|
|