MOUSE-Workflow / app-BACKUP-LAST.py
openfree's picture
Update app-BACKUP-LAST.py
ce12e70 verified
"""
MOUSE Workflow - Visual Workflow Builder with UI Execution
@Powered by VIDraft
✓ Visual workflow designer with drag-and-drop
✓ Import/Export JSON with copy-paste support
✓ Auto-generate UI from workflow for end-user execution
"""
import os, json, typing, tempfile, traceback
import gradio as gr
from gradio_workflowbuilder import WorkflowBuilder
# Optional imports for LLM APIs
try:
from openai import OpenAI
OPENAI_AVAILABLE = True
except ImportError:
OPENAI_AVAILABLE = False
print("OpenAI library not available. Install with: pip install openai")
# Anthropic 관련 코드 주석 처리
# try:
# import anthropic
# ANTHROPIC_AVAILABLE = True
# except ImportError:
# ANTHROPIC_AVAILABLE = False
# print("Anthropic library not available. Install with: pip install anthropic")
ANTHROPIC_AVAILABLE = False
try:
import requests
REQUESTS_AVAILABLE = True
except ImportError:
REQUESTS_AVAILABLE = False
print("Requests library not available. Install with: pip install requests")
# -------------------------------------------------------------------
# 🛠️ 헬퍼 함수들
# -------------------------------------------------------------------
def export_pretty(data: typing.Dict[str, typing.Any]) -> str:
return json.dumps(data, indent=2, ensure_ascii=False) if data else "No workflow to export"
def export_file(data: typing.Dict[str, typing.Any]) -> typing.Optional[str]:
"""워크플로우를 JSON 파일로 내보내기"""
if not data:
return None
fd, path = tempfile.mkstemp(suffix=".json", prefix="workflow_")
try:
with os.fdopen(fd, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=2)
return path
except Exception as e:
print(f"Error exporting file: {e}")
return None
def load_json_from_text_or_file(json_text: str, file_obj) -> typing.Tuple[typing.Dict[str, typing.Any], str]:
"""텍스트 또는 파일에서 JSON 로드"""
# 파일이 있으면 파일 우선
if file_obj is not None:
try:
with open(file_obj.name, "r", encoding="utf-8") as f:
json_text = f.read()
except Exception as e:
return None, f"❌ Error reading file: {str(e)}"
# JSON 텍스트가 없거나 비어있으면
if not json_text or json_text.strip() == "":
return None, "No JSON data provided"
try:
# JSON 파싱
data = json.loads(json_text.strip())
# 데이터 검증
if not isinstance(data, dict):
return None, "Invalid format: not a dictionary"
# 필수 필드 확인
if 'nodes' not in data:
data['nodes'] = []
if 'edges' not in data:
data['edges'] = []
nodes_count = len(data.get('nodes', []))
edges_count = len(data.get('edges', []))
return data, f"✅ Loaded: {nodes_count} nodes, {edges_count} edges"
except json.JSONDecodeError as e:
return None, f"❌ JSON parsing error: {str(e)}"
except Exception as e:
return None, f"❌ Error: {str(e)}"
def create_sample_workflow(example_type="basic"):
"""샘플 워크플로우 생성"""
if example_type == "basic":
# 기본 예제: 간단한 Q&A
return {
"nodes": [
{
"id": "input_1",
"type": "ChatInput",
"position": {"x": 100, "y": 200},
"data": {
"label": "User Question",
"template": {
"input_value": {"value": "What is the capital of Korea?"}
}
}
},
{
"id": "llm_1",
"type": "llmNode",
"position": {"x": 400, "y": 200},
"data": {
"label": "AI Processing",
"template": {
"provider": {"value": "OpenAI"},
"model": {"value": "gpt-4.1-mini"},
"temperature": {"value": 0.7},
"system_prompt": {"value": "You are a helpful assistant."}
}
}
},
{
"id": "output_1",
"type": "ChatOutput",
"position": {"x": 700, "y": 200},
"data": {"label": "Answer"}
}
],
"edges": [
{"id": "e1", "source": "input_1", "target": "llm_1"},
{"id": "e2", "source": "llm_1", "target": "output_1"}
]
}
elif example_type == "vidraft":
# VIDraft 예제
return {
"nodes": [
{
"id": "input_1",
"type": "ChatInput",
"position": {"x": 100, "y": 200},
"data": {
"label": "User Input",
"template": {
"input_value": {"value": "AI와 머신러닝의 차이점을 설명해주세요."}
}
}
},
{
"id": "llm_1",
"type": "llmNode",
"position": {"x": 400, "y": 200},
"data": {
"label": "VIDraft AI (Gemma)",
"template": {
"provider": {"value": "VIDraft"},
"model": {"value": "Gemma-3-r1984-27B"},
"temperature": {"value": 0.8},
"system_prompt": {"value": "당신은 전문적이고 친절한 AI 교육자입니다. 복잡한 개념을 쉽게 설명해주세요."}
}
}
},
{
"id": "output_1",
"type": "ChatOutput",
"position": {"x": 700, "y": 200},
"data": {"label": "AI Explanation"}
}
],
"edges": [
{"id": "e1", "source": "input_1", "target": "llm_1"},
{"id": "e2", "source": "llm_1", "target": "output_1"}
]
}
elif example_type == "multi_input":
# 다중 입력 예제
return {
"nodes": [
{
"id": "name_input",
"type": "textInput",
"position": {"x": 100, "y": 100},
"data": {
"label": "Your Name",
"template": {
"input_value": {"value": "John"}
}
}
},
{
"id": "topic_input",
"type": "textInput",
"position": {"x": 100, "y": 250},
"data": {
"label": "Topic",
"template": {
"input_value": {"value": "Python programming"}
}
}
},
{
"id": "level_input",
"type": "textInput",
"position": {"x": 100, "y": 400},
"data": {
"label": "Skill Level",
"template": {
"input_value": {"value": "beginner"}
}
}
},
{
"id": "combiner",
"type": "textNode",
"position": {"x": 350, "y": 250},
"data": {
"label": "Combine Inputs",
"template": {
"text": {"value": "Create a personalized learning plan"}
}
}
},
{
"id": "llm_1",
"type": "llmNode",
"position": {"x": 600, "y": 250},
"data": {
"label": "Generate Learning Plan",
"template": {
"provider": {"value": "OpenAI"},
"model": {"value": "gpt-4.1-mini"},
"temperature": {"value": 0.7},
"system_prompt": {"value": "You are an expert educational consultant. Create personalized learning plans based on the user's name, topic of interest, and skill level."}
}
}
},
{
"id": "output_1",
"type": "ChatOutput",
"position": {"x": 900, "y": 250},
"data": {"label": "Your Learning Plan"}
}
],
"edges": [
{"id": "e1", "source": "name_input", "target": "combiner"},
{"id": "e2", "source": "topic_input", "target": "combiner"},
{"id": "e3", "source": "level_input", "target": "combiner"},
{"id": "e4", "source": "combiner", "target": "llm_1"},
{"id": "e5", "source": "llm_1", "target": "output_1"}
]
}
elif example_type == "chain":
# 체인 처리 예제
return {
"nodes": [
{
"id": "input_1",
"type": "ChatInput",
"position": {"x": 50, "y": 200},
"data": {
"label": "Original Text",
"template": {
"input_value": {"value": "The quick brown fox jumps over the lazy dog."}
}
}
},
{
"id": "translator",
"type": "llmNode",
"position": {"x": 300, "y": 200},
"data": {
"label": "Translate to Korean",
"template": {
"provider": {"value": "VIDraft"},
"model": {"value": "Gemma-3-r1984-27B"},
"temperature": {"value": 0.3},
"system_prompt": {"value": "You are a professional translator. Translate the given English text to Korean accurately."}
}
}
},
{
"id": "analyzer",
"type": "llmNode",
"position": {"x": 600, "y": 200},
"data": {
"label": "Analyze Translation",
"template": {
"provider": {"value": "OpenAI"},
"model": {"value": "gpt-4.1-mini"},
"temperature": {"value": 0.5},
"system_prompt": {"value": "You are a linguistic expert. Analyze the Korean translation and explain its nuances and cultural context."}
}
}
},
{
"id": "output_translation",
"type": "ChatOutput",
"position": {"x": 450, "y": 350},
"data": {"label": "Korean Translation"}
},
{
"id": "output_analysis",
"type": "ChatOutput",
"position": {"x": 900, "y": 200},
"data": {"label": "Translation Analysis"}
}
],
"edges": [
{"id": "e1", "source": "input_1", "target": "translator"},
{"id": "e2", "source": "translator", "target": "analyzer"},
{"id": "e3", "source": "translator", "target": "output_translation"},
{"id": "e4", "source": "analyzer", "target": "output_analysis"}
]
}
# 기본값은 basic
return create_sample_workflow("basic")
# UI 실행을 위한 실제 워크플로우 실행 함수
def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
"""워크플로우 실제 실행"""
import traceback
# API 키 확인
vidraft_token = os.getenv("FRIENDLI_TOKEN") # VIDraft/Friendli token
openai_key = os.getenv("OPENAI_API_KEY")
# anthropic_key = os.getenv("ANTHROPIC_API_KEY") # 주석 처리
# OpenAI 라이브러리 확인
try:
from openai import OpenAI
openai_available = True
except ImportError:
openai_available = False
print("OpenAI library not available")
# Anthropic 라이브러리 확인 - 주석 처리
# try:
# import anthropic
# anthropic_available = True
# except ImportError:
# anthropic_available = False
# print("Anthropic library not available")
anthropic_available = False
results = {}
nodes = workflow_data.get("nodes", [])
edges = workflow_data.get("edges", [])
# 노드를 순서대로 처리
for node in nodes:
node_id = node.get("id")
node_type = node.get("type", "")
node_data = node.get("data", {})
try:
if node_type in ["ChatInput", "textInput", "Input"]:
# UI에서 제공된 입력값 사용
if node_id in input_values:
results[node_id] = input_values[node_id]
else:
# 기본값 사용
template = node_data.get("template", {})
default_value = template.get("input_value", {}).get("value", "")
results[node_id] = default_value
elif node_type == "textNode":
# 텍스트 노드는 연결된 모든 입력을 결합
template = node_data.get("template", {})
base_text = template.get("text", {}).get("value", "")
# 연결된 입력들 수집
connected_inputs = []
for edge in edges:
if edge.get("target") == node_id:
source_id = edge.get("source")
if source_id in results:
connected_inputs.append(f"{source_id}: {results[source_id]}")
# 결합된 텍스트 생성
if connected_inputs:
combined_text = f"{base_text}\n\nInputs:\n" + "\n".join(connected_inputs)
results[node_id] = combined_text
else:
results[node_id] = base_text
elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
# LLM 노드 처리
template = node_data.get("template", {})
# 프로바이더 정보 추출 - VIDraft 또는 OpenAI만 허용
provider_info = template.get("provider", {})
provider = provider_info.get("value", "OpenAI") if isinstance(provider_info, dict) else "OpenAI"
# provider가 VIDraft 또는 OpenAI가 아닌 경우 OpenAI로 기본 설정
if provider not in ["VIDraft", "OpenAI"]:
provider = "OpenAI"
# 모델 정보 추출
if provider == "OpenAI":
# OpenAI는 gpt-4.1-mini로 고정
model = "gpt-4.1-mini"
elif provider == "VIDraft":
# VIDraft는 Gemma-3-r1984-27B로 고정
model = "Gemma-3-r1984-27B"
else:
model = "gpt-4.1-mini" # 기본값
# 온도 정보 추출
temp_info = template.get("temperature", {})
temperature = temp_info.get("value", 0.7) if isinstance(temp_info, dict) else 0.7
# 시스템 프롬프트 추출
prompt_info = template.get("system_prompt", {})
system_prompt = prompt_info.get("value", "") if isinstance(prompt_info, dict) else ""
# 입력 텍스트 찾기
input_text = ""
for edge in edges:
if edge.get("target") == node_id:
source_id = edge.get("source")
if source_id in results:
input_text = results[source_id]
break
# 실제 API 호출
if provider == "OpenAI" and openai_key and openai_available:
try:
client = OpenAI(api_key=openai_key)
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": input_text})
response = client.chat.completions.create(
model="gpt-4.1-mini", # 고정된 모델명
messages=messages,
temperature=temperature,
max_tokens=1000
)
results[node_id] = response.choices[0].message.content
except Exception as e:
results[node_id] = f"[OpenAI Error: {str(e)}]"
# Anthropic 관련 코드 주석 처리
# elif provider == "Anthropic" and anthropic_key and anthropic_available:
# try:
# client = anthropic.Anthropic(api_key=anthropic_key)
#
# message = client.messages.create(
# model="claude-3-haiku-20240307",
# max_tokens=1000,
# temperature=temperature,
# system=system_prompt if system_prompt else None,
# messages=[{"role": "user", "content": input_text}]
# )
#
# results[node_id] = message.content[0].text
#
# except Exception as e:
# results[node_id] = f"[Anthropic Error: {str(e)}]"
elif provider == "VIDraft" and vidraft_token:
try:
import requests
headers = {
"Authorization": f"Bearer {vidraft_token}",
"Content-Type": "application/json"
}
# 메시지 구성
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": input_text})
payload = {
"model": "dep89a2fld32mcm", # VIDraft 모델 ID
"messages": messages,
"max_tokens": 16384,
"temperature": temperature,
"top_p": 0.8,
"stream": False # 동기 실행을 위해 False로 설정
}
# VIDraft API endpoint
response = requests.post(
"https://api.friendli.ai/dedicated/v1/chat/completions",
headers=headers,
json=payload,
timeout=30
)
if response.status_code == 200:
response_json = response.json()
results[node_id] = response_json["choices"][0]["message"]["content"]
else:
results[node_id] = f"[VIDraft API Error: {response.status_code} - {response.text}]"
except Exception as e:
results[node_id] = f"[VIDraft Error: {str(e)}]"
else:
# API 키가 없는 경우 시뮬레이션
results[node_id] = f"[Simulated {provider} Response to: {input_text[:50]}...]"
elif node_type in ["ChatOutput", "textOutput", "Output"]:
# 출력 노드는 연결된 노드의 결과를 가져옴
for edge in edges:
if edge.get("target") == node_id:
source_id = edge.get("source")
if source_id in results:
results[node_id] = results[source_id]
break
except Exception as e:
results[node_id] = f"[Node Error: {str(e)}]"
print(f"Error processing node {node_id}: {traceback.format_exc()}")
return results
# -------------------------------------------------------------------
# 🎨 CSS
# -------------------------------------------------------------------
CSS = """
.main-container{max-width:1600px;margin:0 auto;}
.workflow-section{margin-bottom:2rem;min-height:500px;}
.button-row{display:flex;gap:1rem;justify-content:center;margin:1rem 0;}
.status-box{
padding:10px;border-radius:5px;margin-top:10px;
background:#f0f9ff;border:1px solid #3b82f6;color:#1e40af;
}
.component-description{
padding:24px;background:linear-gradient(135deg,#f8fafc 0%,#e2e8f0 100%);
border-left:4px solid #3b82f6;border-radius:12px;
box-shadow:0 2px 8px rgba(0,0,0,.05);margin:16px 0;
}
.workflow-container{position:relative;}
.ui-execution-section{
background:linear-gradient(135deg,#f0fdf4 0%,#dcfce7 100%);
padding:24px;border-radius:12px;margin:24px 0;
border:1px solid #86efac;
}
.powered-by{
text-align:center;color:#64748b;font-size:14px;
margin-top:8px;font-style:italic;
}
.sample-buttons{
display:grid;grid-template-columns:1fr 1fr;gap:0.5rem;
margin-top:0.5rem;
}
"""
# -------------------------------------------------------------------
# 🖥️ Gradio 앱
# -------------------------------------------------------------------
with gr.Blocks(title="🐭 MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as demo:
with gr.Column(elem_classes=["main-container"]):
gr.Markdown("# 🐭 MOUSE Workflow")
gr.Markdown("**Visual Workflow Builder with Interactive UI Execution**")
gr.HTML('<p class="powered-by">@Powered by VIDraft & Huggingface gradio</p>')
gr.HTML(
"""
<div class="component-description">
<p style="font-size:16px;margin:0;">Build sophisticated workflows visually • Import/Export JSON • Generate interactive UI for end-users</p>
</div>
"""
)
# API Status Display
with gr.Accordion("🔌 API Status", open=False):
gr.Markdown(f"""
**Available APIs:**
- FRIENDLI_TOKEN (VIDraft): {'✅ Connected' if os.getenv("FRIENDLI_TOKEN") else '❌ Not found'}
- OPENAI_API_KEY: {'✅ Connected' if os.getenv("OPENAI_API_KEY") else '❌ Not found'}
**Libraries:**
- OpenAI: {'✅ Installed' if OPENAI_AVAILABLE else '❌ Not installed'}
- Requests: {'✅ Installed' if REQUESTS_AVAILABLE else '❌ Not installed'}
**Available Models:**
- OpenAI: gpt-4.1-mini (fixed)
- VIDraft: Gemma-3-r1984-27B (model ID: dep89a2fld32mcm)
**Sample Workflows:**
- Basic Q&A: Simple question-answer flow
- VIDraft: Korean language example with Gemma model
- Multi-Input: Combine multiple inputs for personalized output
- Chain: Sequential processing with multiple outputs
*Note: Without API keys, the UI will simulate AI responses.*
""")
# State for storing workflow data
loaded_data = gr.State(None)
trigger_update = gr.State(False)
# ─── Dynamic Workflow Container ───
with gr.Column(elem_classes=["workflow-container"]):
@gr.render(inputs=[loaded_data, trigger_update])
def render_workflow(data, trigger):
"""동적으로 WorkflowBuilder 렌더링"""
workflow_value = data if data else {"nodes": [], "edges": []}
return WorkflowBuilder(
label="🎨 Visual Workflow Designer",
info="Drag from sidebar → Connect nodes → Edit properties",
value=workflow_value,
elem_id="main_workflow"
)
# ─── Import Section ───
with gr.Accordion("📥 Import Workflow", open=True):
with gr.Row():
with gr.Column(scale=2):
import_json_text = gr.Code(
language="json",
label="Paste JSON here",
lines=8,
value='{\n "nodes": [],\n "edges": []\n}'
)
with gr.Column(scale=1):
file_upload = gr.File(
label="Or upload JSON file",
file_types=[".json"],
type="filepath"
)
btn_load = gr.Button("📥 Load Workflow", variant="primary", size="lg")
# Sample buttons
gr.Markdown("**Sample Workflows:**")
with gr.Row():
btn_sample_basic = gr.Button("🎯 Basic Q&A", variant="secondary", scale=1)
btn_sample_vidraft = gr.Button("🤖 VIDraft", variant="secondary", scale=1)
with gr.Row():
btn_sample_multi = gr.Button("📝 Multi-Input", variant="secondary", scale=1)
btn_sample_chain = gr.Button("🔗 Chain", variant="secondary", scale=1)
# Status
status_text = gr.Textbox(
label="Status",
value="Ready",
elem_classes=["status-box"],
interactive=False
)
# ─── Export Section ───
gr.Markdown("## 💾 Export")
with gr.Row():
with gr.Column(scale=3):
export_preview = gr.Code(
language="json",
label="Current Workflow JSON",
lines=8
)
with gr.Column(scale=1):
btn_preview = gr.Button("👁️ Preview JSON", size="lg")
btn_download = gr.DownloadButton("💾 Download JSON", size="lg")
# ─── UI Execution Section ───
with gr.Column(elem_classes=["ui-execution-section"]):
gr.Markdown("## 🚀 UI Execution")
gr.Markdown("Generate an interactive UI from your workflow for end-users")
btn_execute_ui = gr.Button("▶️ Generate & Run UI", variant="primary", size="lg")
# UI execution state
ui_workflow_data = gr.State(None)
# Dynamic UI container
@gr.render(inputs=[ui_workflow_data])
def render_execution_ui(workflow_data):
if not workflow_data or not workflow_data.get("nodes"):
gr.Markdown("*Load a workflow first, then click 'Generate & Run UI'*")
return
gr.Markdown("### 📋 Generated UI")
# Extract input and output nodes
input_nodes = []
output_nodes = []
for node in workflow_data.get("nodes", []):
node_type = node.get("type", "")
if node_type in ["ChatInput", "textInput", "Input", "numberInput"]:
input_nodes.append(node)
elif node_type in ["ChatOutput", "textOutput", "Output"]:
output_nodes.append(node)
elif node_type == "textNode":
# textNode는 중간 처리 노드로, UI에는 표시하지 않음
pass
# Create input components
input_components = {}
if input_nodes:
gr.Markdown("#### 📥 Inputs")
for node in input_nodes:
node_id = node.get("id")
label = node.get("data", {}).get("label", node_id)
node_type = node.get("type")
# Get default value
template = node.get("data", {}).get("template", {})
default_value = template.get("input_value", {}).get("value", "")
if node_type == "numberInput":
input_components[node_id] = gr.Number(
label=label,
value=float(default_value) if default_value else 0
)
else:
input_components[node_id] = gr.Textbox(
label=label,
value=default_value,
lines=2,
placeholder="Enter your input..."
)
# Execute button
execute_btn = gr.Button("🎯 Execute", variant="primary")
# Create output components
output_components = {}
if output_nodes:
gr.Markdown("#### 📤 Outputs")
for node in output_nodes:
node_id = node.get("id")
label = node.get("data", {}).get("label", node_id)
output_components[node_id] = gr.Textbox(
label=label,
interactive=False,
lines=3
)
# Execution log
gr.Markdown("#### 📊 Execution Log")
log_output = gr.Textbox(
label="Log",
interactive=False,
lines=5
)
# Define execution handler
def execute_ui_workflow(*input_values):
# Create input dictionary
inputs_dict = {}
input_keys = list(input_components.keys())
for i, key in enumerate(input_keys):
if i < len(input_values):
inputs_dict[key] = input_values[i]
# Check API status
log = "=== Workflow Execution Started ===\n"
log += f"Inputs provided: {len(inputs_dict)}\n"
# API 상태 확인
vidraft_token = os.getenv("FRIENDLI_TOKEN")
openai_key = os.getenv("OPENAI_API_KEY")
log += "\nAPI Status:\n"
log += f"- FRIENDLI_TOKEN (VIDraft): {'✅ Found' if vidraft_token else '❌ Not found'}\n"
log += f"- OPENAI_API_KEY: {'✅ Found' if openai_key else '❌ Not found'}\n"
if not vidraft_token and not openai_key:
log += "\n⚠️ No API keys found. Results will be simulated.\n"
log += "To get real AI responses, set API keys in environment variables.\n"
log += "\n--- Processing Nodes ---\n"
try:
results = execute_workflow_simple(workflow_data, inputs_dict)
# Prepare outputs
output_values = []
for node_id in output_components.keys():
value = results.get(node_id, "No output")
output_values.append(value)
# Log 길이 제한
display_value = value[:100] + "..." if len(str(value)) > 100 else value
log += f"\nOutput [{node_id}]: {display_value}\n"
log += "\n=== Execution Completed Successfully! ===\n"
output_values.append(log)
return output_values
except Exception as e:
error_msg = f"❌ Error: {str(e)}"
log += f"\n{error_msg}\n"
log += "=== Execution Failed ===\n"
return [error_msg] * len(output_components) + [log]
# Connect execution
all_inputs = list(input_components.values())
all_outputs = list(output_components.values()) + [log_output]
execute_btn.click(
fn=execute_ui_workflow,
inputs=all_inputs,
outputs=all_outputs
)
# ─── Event Handlers ───
# Load workflow (from text or file)
def load_workflow(json_text, file_obj):
data, status = load_json_from_text_or_file(json_text, file_obj)
if data:
return data, status, json_text if not file_obj else export_pretty(data)
else:
return None, status, gr.update()
btn_load.click(
fn=load_workflow,
inputs=[import_json_text, file_upload],
outputs=[loaded_data, status_text, import_json_text]
).then(
fn=lambda current_trigger: not current_trigger,
inputs=trigger_update,
outputs=trigger_update
)
# Auto-load when file is uploaded
file_upload.change(
fn=load_workflow,
inputs=[import_json_text, file_upload],
outputs=[loaded_data, status_text, import_json_text]
).then(
fn=lambda current_trigger: not current_trigger,
inputs=trigger_update,
outputs=trigger_update
)
# Load samples
btn_sample_basic.click(
fn=lambda: (create_sample_workflow("basic"), "✅ Basic Q&A sample loaded", export_pretty(create_sample_workflow("basic"))),
outputs=[loaded_data, status_text, import_json_text]
).then(
fn=lambda current_trigger: not current_trigger,
inputs=trigger_update,
outputs=trigger_update
)
btn_sample_vidraft.click(
fn=lambda: (create_sample_workflow("vidraft"), "✅ VIDraft sample loaded", export_pretty(create_sample_workflow("vidraft"))),
outputs=[loaded_data, status_text, import_json_text]
).then(
fn=lambda current_trigger: not current_trigger,
inputs=trigger_update,
outputs=trigger_update
)
btn_sample_multi.click(
fn=lambda: (create_sample_workflow("multi_input"), "✅ Multi-input sample loaded", export_pretty(create_sample_workflow("multi_input"))),
outputs=[loaded_data, status_text, import_json_text]
).then(
fn=lambda current_trigger: not current_trigger,
inputs=trigger_update,
outputs=trigger_update
)
btn_sample_chain.click(
fn=lambda: (create_sample_workflow("chain"), "✅ Chain processing sample loaded", export_pretty(create_sample_workflow("chain"))),
outputs=[loaded_data, status_text, import_json_text]
).then(
fn=lambda current_trigger: not current_trigger,
inputs=trigger_update,
outputs=trigger_update
)
# Preview current workflow
btn_preview.click(
fn=export_pretty,
inputs=loaded_data,
outputs=export_preview
)
# Download workflow
btn_download.click(
fn=export_file,
inputs=loaded_data
)
# Generate UI execution
btn_execute_ui.click(
fn=lambda data: data,
inputs=loaded_data,
outputs=ui_workflow_data
)
# Auto-update export preview when workflow changes
loaded_data.change(
fn=export_pretty,
inputs=loaded_data,
outputs=export_preview
)
# -------------------------------------------------------------------
# 🚀 실행
# -------------------------------------------------------------------
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", show_error=True)