fantaxy commited on
Commit
f0fedc9
·
verified ·
1 Parent(s): d92b784

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +355 -0
app.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ 나의 첫번째 챗봇 AI
3
+ A workflow application created with MOUSE Workflow builder.
4
+ Generated by MOUSE Workflow
5
+ """
6
+
7
+ import os
8
+ import json
9
+ import gradio as gr
10
+ import requests
11
+
12
+ # Workflow configuration
13
+ WORKFLOW_DATA = {
14
+ "nodes": [
15
+ {
16
+ "id": "name_input",
17
+ "type": "textInput",
18
+ "position": {
19
+ "x": 100,
20
+ "y": 100
21
+ },
22
+ "data": {
23
+ "label": "Your Name",
24
+ "template": {
25
+ "input_value": {
26
+ "value": "John"
27
+ }
28
+ }
29
+ }
30
+ },
31
+ {
32
+ "id": "topic_input",
33
+ "type": "textInput",
34
+ "position": {
35
+ "x": 100,
36
+ "y": 250
37
+ },
38
+ "data": {
39
+ "label": "Topic",
40
+ "template": {
41
+ "input_value": {
42
+ "value": "Python programming"
43
+ }
44
+ }
45
+ }
46
+ },
47
+ {
48
+ "id": "level_input",
49
+ "type": "textInput",
50
+ "position": {
51
+ "x": 100,
52
+ "y": 400
53
+ },
54
+ "data": {
55
+ "label": "Skill Level",
56
+ "template": {
57
+ "input_value": {
58
+ "value": "beginner"
59
+ }
60
+ }
61
+ }
62
+ },
63
+ {
64
+ "id": "combiner",
65
+ "type": "textNode",
66
+ "position": {
67
+ "x": 350,
68
+ "y": 250
69
+ },
70
+ "data": {
71
+ "label": "Combine Inputs",
72
+ "template": {
73
+ "text": {
74
+ "value": "Create a personalized learning plan"
75
+ }
76
+ }
77
+ }
78
+ },
79
+ {
80
+ "id": "llm_1",
81
+ "type": "llmNode",
82
+ "position": {
83
+ "x": 600,
84
+ "y": 250
85
+ },
86
+ "data": {
87
+ "label": "Generate Learning Plan",
88
+ "template": {
89
+ "provider": {
90
+ "value": "VIDraft"
91
+ },
92
+ "model": {
93
+ "value": "Gemma-3-r1984-27B"
94
+ },
95
+ "temperature": {
96
+ "value": 0.7
97
+ },
98
+ "system_prompt": {
99
+ "value": "You are an expert educational consultant. Create personalized learning plans based on the user's name, topic of interest, and skill level."
100
+ }
101
+ }
102
+ }
103
+ },
104
+ {
105
+ "id": "output_1",
106
+ "type": "ChatOutput",
107
+ "position": {
108
+ "x": 900,
109
+ "y": 250
110
+ },
111
+ "data": {
112
+ "label": "Your Learning Plan"
113
+ }
114
+ }
115
+ ],
116
+ "edges": [
117
+ {
118
+ "id": "e1",
119
+ "source": "name_input",
120
+ "target": "combiner"
121
+ },
122
+ {
123
+ "id": "e2",
124
+ "source": "topic_input",
125
+ "target": "combiner"
126
+ },
127
+ {
128
+ "id": "e3",
129
+ "source": "level_input",
130
+ "target": "combiner"
131
+ },
132
+ {
133
+ "id": "e4",
134
+ "source": "combiner",
135
+ "target": "llm_1"
136
+ },
137
+ {
138
+ "id": "e5",
139
+ "source": "llm_1",
140
+ "target": "output_1"
141
+ }
142
+ ]
143
+ }
144
+
145
+ def execute_workflow(*input_values):
146
+ """Execute the workflow with given inputs"""
147
+
148
+ # API keys from environment
149
+ vidraft_token = os.getenv("FRIENDLI_TOKEN")
150
+ openai_key = os.getenv("OPENAI_API_KEY")
151
+
152
+ nodes = WORKFLOW_DATA.get("nodes", [])
153
+ edges = WORKFLOW_DATA.get("edges", [])
154
+
155
+ results = {}
156
+
157
+ # Get input nodes
158
+ input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "textInput", "Input", "numberInput"]]
159
+
160
+ # Map inputs to node IDs
161
+ for i, node in enumerate(input_nodes):
162
+ if i < len(input_values):
163
+ results[node["id"]] = input_values[i]
164
+
165
+ # Process nodes
166
+ for node in nodes:
167
+ node_id = node.get("id")
168
+ node_type = node.get("type", "")
169
+ node_data = node.get("data", {})
170
+ template = node_data.get("template", {})
171
+
172
+ if node_type == "textNode":
173
+ # Combine connected inputs
174
+ base_text = template.get("text", {}).get("value", "")
175
+ connected_inputs = []
176
+
177
+ for edge in edges:
178
+ if edge.get("target") == node_id:
179
+ source_id = edge.get("source")
180
+ if source_id in results:
181
+ connected_inputs.append(f"{source_id}: {results[source_id]}")
182
+
183
+ if connected_inputs:
184
+ results[node_id] = f"{base_text}\n\nInputs:\n" + "\n".join(connected_inputs)
185
+ else:
186
+ results[node_id] = base_text
187
+
188
+ elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
189
+ # Get provider and model - VIDraft as default
190
+ provider = template.get("provider", {}).get("value", "VIDraft")
191
+ if provider not in ["VIDraft", "OpenAI"]:
192
+ provider = "VIDraft" # Default to VIDraft
193
+ temperature = template.get("temperature", {}).get("value", 0.7)
194
+ system_prompt = template.get("system_prompt", {}).get("value", "")
195
+
196
+ # Get input text
197
+ input_text = ""
198
+ for edge in edges:
199
+ if edge.get("target") == node_id:
200
+ source_id = edge.get("source")
201
+ if source_id in results:
202
+ input_text = results[source_id]
203
+ break
204
+
205
+ # Call API
206
+ if provider == "OpenAI" and openai_key:
207
+ try:
208
+ from openai import OpenAI
209
+ client = OpenAI(api_key=openai_key)
210
+
211
+ messages = []
212
+ if system_prompt:
213
+ messages.append({"role": "system", "content": system_prompt})
214
+ messages.append({"role": "user", "content": input_text})
215
+
216
+ response = client.chat.completions.create(
217
+ model="gpt-4.1-mini",
218
+ messages=messages,
219
+ temperature=temperature,
220
+ max_tokens=1000
221
+ )
222
+
223
+ results[node_id] = response.choices[0].message.content
224
+ except Exception as e:
225
+ results[node_id] = f"[OpenAI Error: {str(e)}]"
226
+
227
+ elif provider == "VIDraft" and vidraft_token:
228
+ try:
229
+ headers = {
230
+ "Authorization": f"Bearer {vidraft_token}",
231
+ "Content-Type": "application/json"
232
+ }
233
+
234
+ messages = []
235
+ if system_prompt:
236
+ messages.append({"role": "system", "content": system_prompt})
237
+ messages.append({"role": "user", "content": input_text})
238
+
239
+ payload = {
240
+ "model": "dep89a2fld32mcm",
241
+ "messages": messages,
242
+ "max_tokens": 16384,
243
+ "temperature": temperature,
244
+ "top_p": 0.8,
245
+ "stream": False
246
+ }
247
+
248
+ response = requests.post(
249
+ "https://api.friendli.ai/dedicated/v1/chat/completions",
250
+ headers=headers,
251
+ json=payload,
252
+ timeout=30
253
+ )
254
+
255
+ if response.status_code == 200:
256
+ results[node_id] = response.json()["choices"][0]["message"]["content"]
257
+ else:
258
+ results[node_id] = f"[VIDraft Error: {response.status_code}]"
259
+ except Exception as e:
260
+ results[node_id] = f"[VIDraft Error: {str(e)}]"
261
+ else:
262
+ # Show which API key is missing
263
+ if provider == "OpenAI":
264
+ results[node_id] = "[OpenAI API key not found. Please set OPENAI_API_KEY in Space secrets]"
265
+ elif provider == "VIDraft":
266
+ results[node_id] = "[VIDraft API key not found. Please set FRIENDLI_TOKEN in Space secrets]"
267
+ else:
268
+ results[node_id] = f"[No API key found for {provider}. Using simulated response: {input_text[:50]}...]"
269
+
270
+ elif node_type in ["ChatOutput", "textOutput", "Output"]:
271
+ # Get connected result
272
+ for edge in edges:
273
+ if edge.get("target") == node_id:
274
+ source_id = edge.get("source")
275
+ if source_id in results:
276
+ results[node_id] = results[source_id]
277
+ break
278
+
279
+ # Return outputs
280
+ output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "textOutput", "Output"]]
281
+ return [results.get(n["id"], "") for n in output_nodes]
282
+
283
+ # Build UI
284
+ with gr.Blocks(title="나의 첫번째 챗봇 AI", theme=gr.themes.Soft()) as demo:
285
+ gr.Markdown("# 나의 첫번째 챗봇 AI")
286
+ gr.Markdown("A workflow application created with MOUSE Workflow builder.")
287
+
288
+ # API Status Check
289
+ vidraft_token = os.getenv("FRIENDLI_TOKEN")
290
+ openai_key = os.getenv("OPENAI_API_KEY")
291
+
292
+ with gr.Accordion("🔑 API Status", open=False):
293
+ if vidraft_token:
294
+ gr.Markdown("✅ **VIDraft API**: Connected (Gemma-3-r1984-27B)")
295
+ else:
296
+ gr.Markdown("❌ **VIDraft API**: Not configured")
297
+
298
+ if openai_key:
299
+ gr.Markdown("✅ **OpenAI API**: Connected (gpt-4.1-mini)")
300
+ else:
301
+ gr.Markdown("⚠️ **OpenAI API**: Not configured (optional)")
302
+
303
+ if not vidraft_token:
304
+ gr.Markdown("""
305
+ **⚠️ Important**: Please add FRIENDLI_TOKEN to Space secrets for the app to work properly.
306
+
307
+ Go to: Space settings → Repository secrets → Add secret
308
+ """)
309
+ elif not openai_key:
310
+ gr.Markdown("""
311
+ **💡 Tip**: The app will work with VIDraft alone. Add OPENAI_API_KEY if you need OpenAI features.
312
+ """)
313
+ else:
314
+ gr.Markdown("**✨ All APIs configured! Your app is fully functional.**")
315
+
316
+ # Extract nodes
317
+ nodes = WORKFLOW_DATA.get("nodes", [])
318
+ input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "textInput", "Input", "numberInput"]]
319
+ output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "textOutput", "Output"]]
320
+
321
+ # Create inputs
322
+ inputs = []
323
+ if input_nodes:
324
+ gr.Markdown("### 📥 Inputs")
325
+ for node in input_nodes:
326
+ label = node.get("data", {}).get("label", node.get("id"))
327
+ template = node.get("data", {}).get("template", {})
328
+ default_value = template.get("input_value", {}).get("value", "")
329
+
330
+ if node.get("type") == "numberInput":
331
+ inp = gr.Number(label=label, value=float(default_value) if default_value else 0)
332
+ else:
333
+ inp = gr.Textbox(label=label, value=default_value, lines=2)
334
+ inputs.append(inp)
335
+
336
+ # Execute button
337
+ btn = gr.Button("🚀 Execute Workflow", variant="primary")
338
+
339
+ # Create outputs
340
+ outputs = []
341
+ if output_nodes:
342
+ gr.Markdown("### 📤 Outputs")
343
+ for node in output_nodes:
344
+ label = node.get("data", {}).get("label", node.get("id"))
345
+ out = gr.Textbox(label=label, interactive=False, lines=3)
346
+ outputs.append(out)
347
+
348
+ # Connect
349
+ btn.click(fn=execute_workflow, inputs=inputs, outputs=outputs)
350
+
351
+ gr.Markdown("---")
352
+ gr.Markdown("*Powered by MOUSE Workflow*")
353
+
354
+ if __name__ == "__main__":
355
+ demo.launch()