Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
import json
|
4 |
+
import time
|
5 |
+
import subprocess
|
6 |
+
import os
|
7 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
8 |
+
|
9 |
+
# Initialize DeepSeek
|
10 |
+
model_name = "deepseek-ai/deepseek-coder-1.3b-instruct"
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
12 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
13 |
+
model.to("cpu")
|
14 |
+
|
15 |
+
def generate_prompts_and_image(input_text):
|
16 |
+
start_time = time.time()
|
17 |
+
|
18 |
+
# Generate prompts with DeepSeek
|
19 |
+
prompt = f"""
|
20 |
+
Input: "{input_text}"
|
21 |
+
Task: Generate concise 'Positive' and 'Negative' AI image prompts for Stable Diffusion based on the input above. Output the prompts directly, no extra text or examples.
|
22 |
+
"""
|
23 |
+
inputs = tokenizer(prompt, return_tensors="pt").to("cpu")
|
24 |
+
outputs = model.generate(**inputs, max_new_tokens=50, temperature=0.7, top_p=0.9, do_sample=True)
|
25 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
|
26 |
+
if response.startswith(prompt):
|
27 |
+
response = response[len(prompt):].strip()
|
28 |
+
|
29 |
+
# Split response into positive and negative (assuming two lines)
|
30 |
+
lines = [line.strip() for line in response.split("\n") if line.strip()]
|
31 |
+
positive = lines[0] if lines else "No positive prompt generated"
|
32 |
+
negative = lines[1] if len(lines) > 1 else "No negative prompt generated"
|
33 |
+
|
34 |
+
# Load and modify ComfyUI workflow
|
35 |
+
workflow_path = "workflow.json"
|
36 |
+
temp_workflow_path = "temp_workflow.json"
|
37 |
+
if not os.path.exists(workflow_path):
|
38 |
+
return {"error": "workflow.json not found", "time_taken": f"{time.time() - start_time:.2f} seconds"}
|
39 |
+
|
40 |
+
with open(workflow_path, "r") as f:
|
41 |
+
workflow = json.load(f)
|
42 |
+
|
43 |
+
# Inject prompts into nodes 6 (positive) and 7 (negative) from your workflow.json
|
44 |
+
workflow["6"]["widgets_values"][0] = positive # Positive prompt (CLIPTextEncode)
|
45 |
+
workflow["7"]["widgets_values"][0] = negative # Negative prompt (CLIPTextEncode)
|
46 |
+
|
47 |
+
# Save temporary workflow
|
48 |
+
with open(temp_workflow_path, "w") as f:
|
49 |
+
json.dump(workflow, f)
|
50 |
+
|
51 |
+
# Run ComfyUI via subprocess
|
52 |
+
comfyui_dir = os.path.join(os.path.dirname(__file__), "ComfyUI")
|
53 |
+
comfyui_main = os.path.join(comfyui_dir, "main.py")
|
54 |
+
output_dir = os.path.join(comfyui_dir, "output") # ComfyUI default output dir
|
55 |
+
os.makedirs(output_dir, exist_ok=True)
|
56 |
+
|
57 |
+
try:
|
58 |
+
# Note: ComfyUI's main.py doesn't use --workflow directly; it reads from the workflow file
|
59 |
+
result = subprocess.run(
|
60 |
+
["python", comfyui_main, "--input-directory", comfyui_dir, "--output-directory", output_dir],
|
61 |
+
cwd=comfyui_dir,
|
62 |
+
capture_output=True,
|
63 |
+
text=True,
|
64 |
+
check=True
|
65 |
+
)
|
66 |
+
# ComfyUI saves images as ComfyUI_<timestamp>.png in output/
|
67 |
+
image_files = [f for f in os.listdir(output_dir) if f.startswith("ComfyUI") and f.endswith(".png")]
|
68 |
+
if not image_files:
|
69 |
+
image = f"ComfyUI ran but no image found: {result.stdout}\n{result.stderr}"
|
70 |
+
else:
|
71 |
+
image = os.path.join(output_dir, image_files[-1]) # Use the latest image
|
72 |
+
except subprocess.CalledProcessError as e:
|
73 |
+
image = f"ComfyUI failed: {e.stdout}\n{e.stderr}"
|
74 |
+
|
75 |
+
elapsed_time = time.time() - start_time
|
76 |
+
return {
|
77 |
+
"positive": positive,
|
78 |
+
"negative": negative,
|
79 |
+
"image": image,
|
80 |
+
"time_taken": f"{elapsed_time:.2f} seconds"
|
81 |
+
}
|
82 |
+
|
83 |
+
def gradio_interface(input_text):
|
84 |
+
result = generate_prompts_and_image(input_text)
|
85 |
+
return result["image"], json.dumps({
|
86 |
+
"positive": result["positive"],
|
87 |
+
"negative": result["negative"],
|
88 |
+
"time_taken": result["time_taken"]
|
89 |
+
}, indent=2)
|
90 |
+
|
91 |
+
demo = gr.Interface(
|
92 |
+
fn=gradio_interface,
|
93 |
+
inputs=gr.Textbox(label="Input Text", lines=10, placeholder="Paste your input text here..."),
|
94 |
+
outputs=[
|
95 |
+
gr.Image(label="Generated Image"),
|
96 |
+
gr.Textbox(label="Generated Prompts (JSON)")
|
97 |
+
],
|
98 |
+
title="Prompt and Image Generator"
|
99 |
+
)
|
100 |
+
|
101 |
+
if __name__ == "__main__":
|
102 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|