Upload dc.py
Browse files
dc.py
CHANGED
|
@@ -552,7 +552,7 @@ def dynamic_gpu_duration(func, duration, *args):
|
|
| 552 |
@torch.inference_mode()
|
| 553 |
@spaces.GPU(duration=duration)
|
| 554 |
def wrapped_func():
|
| 555 |
-
|
| 556 |
|
| 557 |
return wrapped_func()
|
| 558 |
|
|
@@ -578,8 +578,8 @@ def sd_gen_generate_pipeline(*args):
|
|
| 578 |
if load_lora_cpu:
|
| 579 |
msg_load_lora = "Updating LoRAs in CPU (Slow but saves GPU usage)..."
|
| 580 |
|
| 581 |
-
|
| 582 |
-
|
| 583 |
|
| 584 |
# Load lora in CPU
|
| 585 |
if load_lora_cpu:
|
|
@@ -609,7 +609,7 @@ def sd_gen_generate_pipeline(*args):
|
|
| 609 |
if verbose_arg:
|
| 610 |
gr.Info(msg_request)
|
| 611 |
print(msg_request)
|
| 612 |
-
|
| 613 |
|
| 614 |
start_time = time.time()
|
| 615 |
|
|
@@ -686,7 +686,6 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
| 686 |
gpu_duration = 59
|
| 687 |
|
| 688 |
images: list[tuple[PIL.Image.Image, str | None]] = []
|
| 689 |
-
info_state = info_images = ""
|
| 690 |
progress(0, desc="Preparing...")
|
| 691 |
|
| 692 |
if randomize_seed:
|
|
@@ -710,8 +709,8 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
| 710 |
progress(1, desc="Preparation completed. Starting inference...")
|
| 711 |
|
| 712 |
progress(0, desc="Loading model...")
|
| 713 |
-
for
|
| 714 |
-
|
| 715 |
progress(1, desc="Model loaded.")
|
| 716 |
progress(0, desc="Starting Inference...")
|
| 717 |
images = None
|
|
@@ -727,6 +726,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
| 727 |
True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, 0.0,
|
| 728 |
load_lora_cpu, verbose_info, gpu_duration
|
| 729 |
):
|
|
|
|
| 730 |
images = stream_images
|
| 731 |
progress(1, desc="Inference completed.")
|
| 732 |
output_image = images[0][0] if images else None
|
|
|
|
| 552 |
@torch.inference_mode()
|
| 553 |
@spaces.GPU(duration=duration)
|
| 554 |
def wrapped_func():
|
| 555 |
+
yield from func(*args)
|
| 556 |
|
| 557 |
return wrapped_func()
|
| 558 |
|
|
|
|
| 578 |
if load_lora_cpu:
|
| 579 |
msg_load_lora = "Updating LoRAs in CPU (Slow but saves GPU usage)..."
|
| 580 |
|
| 581 |
+
if lora_list != sd_gen.model.lora_memory and lora_list != [None] * 5:
|
| 582 |
+
yield msg_load_lora, gr.update(), gr.update()
|
| 583 |
|
| 584 |
# Load lora in CPU
|
| 585 |
if load_lora_cpu:
|
|
|
|
| 609 |
if verbose_arg:
|
| 610 |
gr.Info(msg_request)
|
| 611 |
print(msg_request)
|
| 612 |
+
yield msg_request.replace("\n", "<br>"), gr.update(), gr.update()
|
| 613 |
|
| 614 |
start_time = time.time()
|
| 615 |
|
|
|
|
| 686 |
gpu_duration = 59
|
| 687 |
|
| 688 |
images: list[tuple[PIL.Image.Image, str | None]] = []
|
|
|
|
| 689 |
progress(0, desc="Preparing...")
|
| 690 |
|
| 691 |
if randomize_seed:
|
|
|
|
| 709 |
progress(1, desc="Preparation completed. Starting inference...")
|
| 710 |
|
| 711 |
progress(0, desc="Loading model...")
|
| 712 |
+
for m in sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0]):
|
| 713 |
+
progress(0.5, desc=m)
|
| 714 |
progress(1, desc="Model loaded.")
|
| 715 |
progress(0, desc="Starting Inference...")
|
| 716 |
images = None
|
|
|
|
| 726 |
True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, 0.0,
|
| 727 |
load_lora_cpu, verbose_info, gpu_duration
|
| 728 |
):
|
| 729 |
+
progress(0.5, desc=info_state)
|
| 730 |
images = stream_images
|
| 731 |
progress(1, desc="Inference completed.")
|
| 732 |
output_image = images[0][0] if images else None
|