openfree commited on
Commit
ce12e70
Β·
verified Β·
1 Parent(s): b6039e9

Update app-BACKUP-LAST.py

Browse files
Files changed (1) hide show
  1. app-BACKUP-LAST.py +298 -40
app-BACKUP-LAST.py CHANGED
@@ -91,47 +91,237 @@ def load_json_from_text_or_file(json_text: str, file_obj) -> typing.Tuple[typing
91
  except Exception as e:
92
  return None, f"❌ Error: {str(e)}"
93
 
94
- def create_sample_workflow():
95
  """μƒ˜ν”Œ μ›Œν¬ν”Œλ‘œμš° 생성"""
96
- return {
97
- "nodes": [
98
- {
99
- "id": "input_1",
100
- "type": "ChatInput",
101
- "position": {"x": 100, "y": 200},
102
- "data": {
103
- "label": "User Question",
104
- "template": {
105
- "input_value": {"value": "What is the capital of Korea?"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  }
 
 
 
 
 
 
107
  }
108
- },
109
- {
110
- "id": "llm_1",
111
- "type": "llmNode",
112
- "position": {"x": 400, "y": 200},
113
- "data": {
114
- "label": "AI Processing",
115
- "template": {
116
- "provider": {"value": "OpenAI"}, # OpenAI or VIDraft
117
- "model": {"value": "gpt-4.1-mini"}, # Fixed to gpt-4.1-mini for OpenAI
118
- "temperature": {"value": 0.7},
119
- "system_prompt": {"value": "You are a helpful assistant."}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  }
 
 
 
 
 
 
121
  }
122
- },
123
- {
124
- "id": "output_1",
125
- "type": "ChatOutput",
126
- "position": {"x": 700, "y": 200},
127
- "data": {"label": "Answer"}
128
- }
129
- ],
130
- "edges": [
131
- {"id": "e1", "source": "input_1", "target": "llm_1"},
132
- {"id": "e2", "source": "llm_1", "target": "output_1"}
133
- ]
134
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
  # UI 싀행을 μœ„ν•œ μ‹€μ œ μ›Œν¬ν”Œλ‘œμš° μ‹€ν–‰ ν•¨μˆ˜
137
  def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
@@ -181,6 +371,26 @@ def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
181
  default_value = template.get("input_value", {}).get("value", "")
182
  results[node_id] = default_value
183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
185
  # LLM λ…Έλ“œ 처리
186
  template = node_data.get("template", {})
@@ -346,6 +556,10 @@ CSS = """
346
  text-align:center;color:#64748b;font-size:14px;
347
  margin-top:8px;font-style:italic;
348
  }
 
 
 
 
349
  """
350
 
351
  # -------------------------------------------------------------------
@@ -381,6 +595,12 @@ with gr.Blocks(title="🐭 MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
381
  - OpenAI: gpt-4.1-mini (fixed)
382
  - VIDraft: Gemma-3-r1984-27B (model ID: dep89a2fld32mcm)
383
 
 
 
 
 
 
 
384
  *Note: Without API keys, the UI will simulate AI responses.*
385
  """)
386
 
@@ -419,7 +639,15 @@ with gr.Blocks(title="🐭 MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
419
  type="filepath"
420
  )
421
  btn_load = gr.Button("πŸ“₯ Load Workflow", variant="primary", size="lg")
422
- btn_sample = gr.Button("🎯 Load Sample", variant="secondary")
 
 
 
 
 
 
 
 
423
 
424
  # Status
425
  status_text = gr.Textbox(
@@ -472,6 +700,9 @@ with gr.Blocks(title="🐭 MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
472
  input_nodes.append(node)
473
  elif node_type in ["ChatOutput", "textOutput", "Output"]:
474
  output_nodes.append(node)
 
 
 
475
 
476
  # Create input components
477
  input_components = {}
@@ -618,9 +849,36 @@ with gr.Blocks(title="🐭 MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
618
  outputs=trigger_update
619
  )
620
 
621
- # Load sample
622
- btn_sample.click(
623
- fn=lambda: (create_sample_workflow(), "βœ… Sample loaded", export_pretty(create_sample_workflow())),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  outputs=[loaded_data, status_text, import_json_text]
625
  ).then(
626
  fn=lambda current_trigger: not current_trigger,
 
91
  except Exception as e:
92
  return None, f"❌ Error: {str(e)}"
93
 
94
+ def create_sample_workflow(example_type="basic"):
95
  """μƒ˜ν”Œ μ›Œν¬ν”Œλ‘œμš° 생성"""
96
+
97
+ if example_type == "basic":
98
+ # κΈ°λ³Έ 예제: κ°„λ‹¨ν•œ Q&A
99
+ return {
100
+ "nodes": [
101
+ {
102
+ "id": "input_1",
103
+ "type": "ChatInput",
104
+ "position": {"x": 100, "y": 200},
105
+ "data": {
106
+ "label": "User Question",
107
+ "template": {
108
+ "input_value": {"value": "What is the capital of Korea?"}
109
+ }
110
+ }
111
+ },
112
+ {
113
+ "id": "llm_1",
114
+ "type": "llmNode",
115
+ "position": {"x": 400, "y": 200},
116
+ "data": {
117
+ "label": "AI Processing",
118
+ "template": {
119
+ "provider": {"value": "OpenAI"},
120
+ "model": {"value": "gpt-4.1-mini"},
121
+ "temperature": {"value": 0.7},
122
+ "system_prompt": {"value": "You are a helpful assistant."}
123
+ }
124
  }
125
+ },
126
+ {
127
+ "id": "output_1",
128
+ "type": "ChatOutput",
129
+ "position": {"x": 700, "y": 200},
130
+ "data": {"label": "Answer"}
131
  }
132
+ ],
133
+ "edges": [
134
+ {"id": "e1", "source": "input_1", "target": "llm_1"},
135
+ {"id": "e2", "source": "llm_1", "target": "output_1"}
136
+ ]
137
+ }
138
+
139
+ elif example_type == "vidraft":
140
+ # VIDraft 예제
141
+ return {
142
+ "nodes": [
143
+ {
144
+ "id": "input_1",
145
+ "type": "ChatInput",
146
+ "position": {"x": 100, "y": 200},
147
+ "data": {
148
+ "label": "User Input",
149
+ "template": {
150
+ "input_value": {"value": "AI와 λ¨Έμ‹ λŸ¬λ‹μ˜ 차이점을 μ„€λͺ…ν•΄μ£Όμ„Έμš”."}
151
+ }
152
+ }
153
+ },
154
+ {
155
+ "id": "llm_1",
156
+ "type": "llmNode",
157
+ "position": {"x": 400, "y": 200},
158
+ "data": {
159
+ "label": "VIDraft AI (Gemma)",
160
+ "template": {
161
+ "provider": {"value": "VIDraft"},
162
+ "model": {"value": "Gemma-3-r1984-27B"},
163
+ "temperature": {"value": 0.8},
164
+ "system_prompt": {"value": "당신은 전문적이고 μΉœμ ˆν•œ AI κ΅μœ‘μžμž…λ‹ˆλ‹€. λ³΅μž‘ν•œ κ°œλ…μ„ μ‰½κ²Œ μ„€λͺ…ν•΄μ£Όμ„Έμš”."}
165
+ }
166
  }
167
+ },
168
+ {
169
+ "id": "output_1",
170
+ "type": "ChatOutput",
171
+ "position": {"x": 700, "y": 200},
172
+ "data": {"label": "AI Explanation"}
173
  }
174
+ ],
175
+ "edges": [
176
+ {"id": "e1", "source": "input_1", "target": "llm_1"},
177
+ {"id": "e2", "source": "llm_1", "target": "output_1"}
178
+ ]
179
+ }
180
+
181
+ elif example_type == "multi_input":
182
+ # 닀쀑 μž…λ ₯ 예제
183
+ return {
184
+ "nodes": [
185
+ {
186
+ "id": "name_input",
187
+ "type": "textInput",
188
+ "position": {"x": 100, "y": 100},
189
+ "data": {
190
+ "label": "Your Name",
191
+ "template": {
192
+ "input_value": {"value": "John"}
193
+ }
194
+ }
195
+ },
196
+ {
197
+ "id": "topic_input",
198
+ "type": "textInput",
199
+ "position": {"x": 100, "y": 250},
200
+ "data": {
201
+ "label": "Topic",
202
+ "template": {
203
+ "input_value": {"value": "Python programming"}
204
+ }
205
+ }
206
+ },
207
+ {
208
+ "id": "level_input",
209
+ "type": "textInput",
210
+ "position": {"x": 100, "y": 400},
211
+ "data": {
212
+ "label": "Skill Level",
213
+ "template": {
214
+ "input_value": {"value": "beginner"}
215
+ }
216
+ }
217
+ },
218
+ {
219
+ "id": "combiner",
220
+ "type": "textNode",
221
+ "position": {"x": 350, "y": 250},
222
+ "data": {
223
+ "label": "Combine Inputs",
224
+ "template": {
225
+ "text": {"value": "Create a personalized learning plan"}
226
+ }
227
+ }
228
+ },
229
+ {
230
+ "id": "llm_1",
231
+ "type": "llmNode",
232
+ "position": {"x": 600, "y": 250},
233
+ "data": {
234
+ "label": "Generate Learning Plan",
235
+ "template": {
236
+ "provider": {"value": "OpenAI"},
237
+ "model": {"value": "gpt-4.1-mini"},
238
+ "temperature": {"value": 0.7},
239
+ "system_prompt": {"value": "You are an expert educational consultant. Create personalized learning plans based on the user's name, topic of interest, and skill level."}
240
+ }
241
+ }
242
+ },
243
+ {
244
+ "id": "output_1",
245
+ "type": "ChatOutput",
246
+ "position": {"x": 900, "y": 250},
247
+ "data": {"label": "Your Learning Plan"}
248
+ }
249
+ ],
250
+ "edges": [
251
+ {"id": "e1", "source": "name_input", "target": "combiner"},
252
+ {"id": "e2", "source": "topic_input", "target": "combiner"},
253
+ {"id": "e3", "source": "level_input", "target": "combiner"},
254
+ {"id": "e4", "source": "combiner", "target": "llm_1"},
255
+ {"id": "e5", "source": "llm_1", "target": "output_1"}
256
+ ]
257
+ }
258
+
259
+ elif example_type == "chain":
260
+ # 체인 처리 예제
261
+ return {
262
+ "nodes": [
263
+ {
264
+ "id": "input_1",
265
+ "type": "ChatInput",
266
+ "position": {"x": 50, "y": 200},
267
+ "data": {
268
+ "label": "Original Text",
269
+ "template": {
270
+ "input_value": {"value": "The quick brown fox jumps over the lazy dog."}
271
+ }
272
+ }
273
+ },
274
+ {
275
+ "id": "translator",
276
+ "type": "llmNode",
277
+ "position": {"x": 300, "y": 200},
278
+ "data": {
279
+ "label": "Translate to Korean",
280
+ "template": {
281
+ "provider": {"value": "VIDraft"},
282
+ "model": {"value": "Gemma-3-r1984-27B"},
283
+ "temperature": {"value": 0.3},
284
+ "system_prompt": {"value": "You are a professional translator. Translate the given English text to Korean accurately."}
285
+ }
286
+ }
287
+ },
288
+ {
289
+ "id": "analyzer",
290
+ "type": "llmNode",
291
+ "position": {"x": 600, "y": 200},
292
+ "data": {
293
+ "label": "Analyze Translation",
294
+ "template": {
295
+ "provider": {"value": "OpenAI"},
296
+ "model": {"value": "gpt-4.1-mini"},
297
+ "temperature": {"value": 0.5},
298
+ "system_prompt": {"value": "You are a linguistic expert. Analyze the Korean translation and explain its nuances and cultural context."}
299
+ }
300
+ }
301
+ },
302
+ {
303
+ "id": "output_translation",
304
+ "type": "ChatOutput",
305
+ "position": {"x": 450, "y": 350},
306
+ "data": {"label": "Korean Translation"}
307
+ },
308
+ {
309
+ "id": "output_analysis",
310
+ "type": "ChatOutput",
311
+ "position": {"x": 900, "y": 200},
312
+ "data": {"label": "Translation Analysis"}
313
+ }
314
+ ],
315
+ "edges": [
316
+ {"id": "e1", "source": "input_1", "target": "translator"},
317
+ {"id": "e2", "source": "translator", "target": "analyzer"},
318
+ {"id": "e3", "source": "translator", "target": "output_translation"},
319
+ {"id": "e4", "source": "analyzer", "target": "output_analysis"}
320
+ ]
321
+ }
322
+
323
+ # 기본값은 basic
324
+ return create_sample_workflow("basic")
325
 
326
  # UI 싀행을 μœ„ν•œ μ‹€μ œ μ›Œν¬ν”Œλ‘œμš° μ‹€ν–‰ ν•¨μˆ˜
327
  def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
 
371
  default_value = template.get("input_value", {}).get("value", "")
372
  results[node_id] = default_value
373
 
374
+ elif node_type == "textNode":
375
+ # ν…μŠ€νŠΈ λ…Έλ“œλŠ” μ—°κ²°λœ λͺ¨λ“  μž…λ ₯을 κ²°ν•©
376
+ template = node_data.get("template", {})
377
+ base_text = template.get("text", {}).get("value", "")
378
+
379
+ # μ—°κ²°λœ μž…λ ₯λ“€ μˆ˜μ§‘
380
+ connected_inputs = []
381
+ for edge in edges:
382
+ if edge.get("target") == node_id:
383
+ source_id = edge.get("source")
384
+ if source_id in results:
385
+ connected_inputs.append(f"{source_id}: {results[source_id]}")
386
+
387
+ # κ²°ν•©λœ ν…μŠ€νŠΈ 생성
388
+ if connected_inputs:
389
+ combined_text = f"{base_text}\n\nInputs:\n" + "\n".join(connected_inputs)
390
+ results[node_id] = combined_text
391
+ else:
392
+ results[node_id] = base_text
393
+
394
  elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
395
  # LLM λ…Έλ“œ 처리
396
  template = node_data.get("template", {})
 
556
  text-align:center;color:#64748b;font-size:14px;
557
  margin-top:8px;font-style:italic;
558
  }
559
+ .sample-buttons{
560
+ display:grid;grid-template-columns:1fr 1fr;gap:0.5rem;
561
+ margin-top:0.5rem;
562
+ }
563
  """
564
 
565
  # -------------------------------------------------------------------
 
595
  - OpenAI: gpt-4.1-mini (fixed)
596
  - VIDraft: Gemma-3-r1984-27B (model ID: dep89a2fld32mcm)
597
 
598
+ **Sample Workflows:**
599
+ - Basic Q&A: Simple question-answer flow
600
+ - VIDraft: Korean language example with Gemma model
601
+ - Multi-Input: Combine multiple inputs for personalized output
602
+ - Chain: Sequential processing with multiple outputs
603
+
604
  *Note: Without API keys, the UI will simulate AI responses.*
605
  """)
606
 
 
639
  type="filepath"
640
  )
641
  btn_load = gr.Button("πŸ“₯ Load Workflow", variant="primary", size="lg")
642
+
643
+ # Sample buttons
644
+ gr.Markdown("**Sample Workflows:**")
645
+ with gr.Row():
646
+ btn_sample_basic = gr.Button("🎯 Basic Q&A", variant="secondary", scale=1)
647
+ btn_sample_vidraft = gr.Button("πŸ€– VIDraft", variant="secondary", scale=1)
648
+ with gr.Row():
649
+ btn_sample_multi = gr.Button("πŸ“ Multi-Input", variant="secondary", scale=1)
650
+ btn_sample_chain = gr.Button("πŸ”— Chain", variant="secondary", scale=1)
651
 
652
  # Status
653
  status_text = gr.Textbox(
 
700
  input_nodes.append(node)
701
  elif node_type in ["ChatOutput", "textOutput", "Output"]:
702
  output_nodes.append(node)
703
+ elif node_type == "textNode":
704
+ # textNodeλŠ” 쀑간 처리 λ…Έλ“œλ‘œ, UIμ—λŠ” ν‘œμ‹œν•˜μ§€ μ•ŠμŒ
705
+ pass
706
 
707
  # Create input components
708
  input_components = {}
 
849
  outputs=trigger_update
850
  )
851
 
852
+ # Load samples
853
+ btn_sample_basic.click(
854
+ fn=lambda: (create_sample_workflow("basic"), "βœ… Basic Q&A sample loaded", export_pretty(create_sample_workflow("basic"))),
855
+ outputs=[loaded_data, status_text, import_json_text]
856
+ ).then(
857
+ fn=lambda current_trigger: not current_trigger,
858
+ inputs=trigger_update,
859
+ outputs=trigger_update
860
+ )
861
+
862
+ btn_sample_vidraft.click(
863
+ fn=lambda: (create_sample_workflow("vidraft"), "βœ… VIDraft sample loaded", export_pretty(create_sample_workflow("vidraft"))),
864
+ outputs=[loaded_data, status_text, import_json_text]
865
+ ).then(
866
+ fn=lambda current_trigger: not current_trigger,
867
+ inputs=trigger_update,
868
+ outputs=trigger_update
869
+ )
870
+
871
+ btn_sample_multi.click(
872
+ fn=lambda: (create_sample_workflow("multi_input"), "βœ… Multi-input sample loaded", export_pretty(create_sample_workflow("multi_input"))),
873
+ outputs=[loaded_data, status_text, import_json_text]
874
+ ).then(
875
+ fn=lambda current_trigger: not current_trigger,
876
+ inputs=trigger_update,
877
+ outputs=trigger_update
878
+ )
879
+
880
+ btn_sample_chain.click(
881
+ fn=lambda: (create_sample_workflow("chain"), "βœ… Chain processing sample loaded", export_pretty(create_sample_workflow("chain"))),
882
  outputs=[loaded_data, status_text, import_json_text]
883
  ).then(
884
  fn=lambda current_trigger: not current_trigger,