Javier-Jimenez99 commited on
Commit
7b42080
·
1 Parent(s): 4a379a8

Agregar seguimiento de historial de ejecución en la función de chat y mejorar la interfaz de usuario con un historial detallado.

Browse files
Files changed (2) hide show
  1. app.py +300 -9
  2. requirements.txt +1 -1
app.py CHANGED
@@ -8,9 +8,71 @@ import gradio as gr
8
  import re
9
  from dotenv import load_dotenv
10
  import os
 
 
 
11
 
12
  load_dotenv()
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  async def initialize_tools():
15
  """
16
  Initializes the SSE connection and loads the MCP tools.
@@ -49,10 +111,16 @@ tools = asyncio.get_event_loop().run_until_complete(initialize_tools())
49
 
50
  async def chat(history: list, tab_id: str=None, anthropic_api_key: str=None):
51
  """
 
52
  history: list of messages [{"role": "user"/"assistant", "content": "..."}]
53
  tab_id: a string that the client wants to correlate
54
  anthropic_api_key: the key sent by the client in each request
55
  """
 
 
 
 
 
56
  if tab_id:
57
  history[-1]["content"] += f"\nThis is your tab_id: {tab_id}"
58
 
@@ -64,19 +132,234 @@ async def chat(history: list, tab_id: str=None, anthropic_api_key: str=None):
64
 
65
  try:
66
  agent = await create_agent_with_llm(llm_provider, anthropic_api_key, ollama_model, tools)
 
67
  except ValueError as e:
68
- return str(e)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
- result = await agent.ainvoke({"messages": history})
71
- output = result["messages"][-1].content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
- cleaned = re.sub(r'<think>.*?</think>', '', output, flags=re.DOTALL).strip()
 
74
 
75
- print(f"Processed output: {cleaned}")
76
- return cleaned
77
 
78
- # 6) Final interface definition, adding the API key textbox
79
- demo = gr.Interface(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  fn=chat,
81
  inputs=[
82
  gr.JSON(label="history"),
@@ -84,7 +367,15 @@ demo = gr.Interface(
84
  gr.Textbox(label="anthropic_api_key"),
85
  ],
86
  outputs="text",
 
 
 
 
 
 
 
 
87
  )
88
 
89
  if __name__ == "__main__":
90
- demo.launch(server_port=os.getenv("GRADIO_PORT", 7860))
 
8
  import re
9
  from dotenv import load_dotenv
10
  import os
11
+ import json
12
+ from datetime import datetime
13
+ from typing import Dict, List, Any
14
 
15
  load_dotenv()
16
 
17
+ # Global variable to store execution history
18
+ execution_history = []
19
+
20
+ def format_message_for_display(message):
21
+ """Format a message for display in the chat interface"""
22
+ if hasattr(message, 'content'):
23
+ content = message.content
24
+ else:
25
+ content = str(message)
26
+
27
+ if hasattr(message, 'tool_calls') and message.tool_calls:
28
+ tool_info = []
29
+ for tool_call in message.tool_calls:
30
+ tool_info.append(f"🔧 **Tool Call**: {tool_call['name']}")
31
+ if 'args' in tool_call:
32
+ tool_info.append(f" **Args**: {json.dumps(tool_call['args'], indent=2)}")
33
+ content += "\n\n" + "\n".join(tool_info)
34
+
35
+ return content
36
+
37
+ def add_to_execution_history(step_type: str, data: Any, tab_id: str = None):
38
+ """Add a step to the execution history"""
39
+ timestamp = datetime.now().strftime("%H:%M:%S")
40
+ execution_history.append({
41
+ "timestamp": timestamp,
42
+ "type": step_type,
43
+ "data": data,
44
+ "tab_id": tab_id
45
+ })
46
+
47
+ def format_execution_history():
48
+ """Format the execution history for display"""
49
+ if not execution_history:
50
+ return "No hay historial de ejecución aún."
51
+
52
+ formatted_history = []
53
+ for entry in execution_history:
54
+ timestamp = entry["timestamp"]
55
+ step_type = entry["type"]
56
+ tab_id = entry.get("tab_id", "N/A")
57
+
58
+ if step_type == "user_input":
59
+ formatted_history.append(f"**[{timestamp}] 👤 Usuario (Tab: {tab_id})**\n{entry['data']}\n")
60
+ elif step_type == "agent_response":
61
+ formatted_history.append(f"**[{timestamp}] 🤖 Agente**\n{entry['data']}\n")
62
+ elif step_type == "tool_call":
63
+ tool_data = entry['data']
64
+ formatted_history.append(f"**[{timestamp}] 🔧 Llamada a Herramienta**\n**Herramienta**: {tool_data['name']}\n**Argumentos**: ```json\n{json.dumps(tool_data.get('args', {}), indent=2)}\n```\n")
65
+ elif step_type == "tool_result":
66
+ formatted_history.append(f"**[{timestamp}] ✅ Resultado de Herramienta**\n```\n{entry['data']}\n```\n")
67
+ elif step_type == "intermediate_step":
68
+ formatted_history.append(f"**[{timestamp}] 🧠 Paso Intermedio**\n{entry['data']}\n")
69
+ elif step_type == "error":
70
+ formatted_history.append(f"**[{timestamp}] ❌ Error**\n{entry['data']}\n")
71
+
72
+ formatted_history.append("---\n")
73
+
74
+ return "\n".join(formatted_history)
75
+
76
  async def initialize_tools():
77
  """
78
  Initializes the SSE connection and loads the MCP tools.
 
111
 
112
  async def chat(history: list, tab_id: str=None, anthropic_api_key: str=None):
113
  """
114
+ Original API function for compatibility - now with history tracking
115
  history: list of messages [{"role": "user"/"assistant", "content": "..."}]
116
  tab_id: a string that the client wants to correlate
117
  anthropic_api_key: the key sent by the client in each request
118
  """
119
+ # Extract the last message to add to execution history
120
+ if history:
121
+ last_message = history[-1]["content"]
122
+ add_to_execution_history("user_input", last_message, tab_id)
123
+
124
  if tab_id:
125
  history[-1]["content"] += f"\nThis is your tab_id: {tab_id}"
126
 
 
132
 
133
  try:
134
  agent = await create_agent_with_llm(llm_provider, anthropic_api_key, ollama_model, tools)
135
+ add_to_execution_history("intermediate_step", f"Agente creado con provider: {llm_provider}", tab_id)
136
  except ValueError as e:
137
+ error_msg = str(e)
138
+ add_to_execution_history("error", error_msg, tab_id)
139
+ return error_msg
140
+
141
+ try:
142
+ result = await agent.ainvoke({"messages": history})
143
+
144
+ # Process all messages in the result to track tool calls
145
+ all_messages = result["messages"]
146
+
147
+ # Track tool calls and responses
148
+ for msg in all_messages:
149
+ if hasattr(msg, 'tool_calls') and msg.tool_calls:
150
+ for tool_call in msg.tool_calls:
151
+ add_to_execution_history("tool_call", {
152
+ "name": tool_call.get("name", "unknown"),
153
+ "args": tool_call.get("args", {})
154
+ }, tab_id)
155
+
156
+ # Check if it's a tool message (result of tool execution)
157
+ if hasattr(msg, 'name') and msg.name:
158
+ add_to_execution_history("tool_result", msg.content, tab_id)
159
+
160
+ output = all_messages[-1].content
161
+ cleaned = re.sub(r'<think>.*?</think>', '', output, flags=re.DOTALL).strip()
162
+
163
+ add_to_execution_history("agent_response", cleaned, tab_id)
164
+
165
+ print(f"Processed output: {cleaned}")
166
+ return cleaned
167
+
168
+ except Exception as e:
169
+ error_msg = f"Error durante la ejecución: {str(e)}"
170
+ add_to_execution_history("error", error_msg, tab_id)
171
+ return error_msg
172
 
173
+ async def chat_with_history_tracking(message: str, history: List, tab_id: str = None, anthropic_api_key: str = None):
174
+ """
175
+ Enhanced chat function that tracks all execution steps
176
+ """
177
+ # Add user input to execution history
178
+ add_to_execution_history("user_input", message, tab_id)
179
+
180
+ # Convert history format for LangGraph (keeping compatibility)
181
+ messages = []
182
+ for h in history:
183
+ if isinstance(h, dict):
184
+ messages.append(h)
185
+ else:
186
+ # Convert tuple format to dict format
187
+ role = "user" if h[0] == "user" else "assistant"
188
+ messages.append({"role": role, "content": h[1]})
189
+
190
+ # Add current message
191
+ messages.append({"role": "user", "content": message})
192
+
193
+ if tab_id:
194
+ messages[-1]["content"] += f"\nThis is your tab_id: {tab_id}"
195
 
196
+ print(f"Received message: {message}")
197
+ print(f"Complete chat history: {messages}")
198
 
199
+ llm_provider = os.getenv("LLM_PROVIDER", "ollama").lower()
200
+ ollama_model = os.getenv("OLLAMA_MODEL", "qwen3:8b")
201
 
202
+ try:
203
+ agent = await create_agent_with_llm(llm_provider, anthropic_api_key, ollama_model, tools)
204
+ add_to_execution_history("intermediate_step", f"Agente creado con provider: {llm_provider}", tab_id)
205
+ except ValueError as e:
206
+ error_msg = str(e)
207
+ add_to_execution_history("error", error_msg, tab_id)
208
+ history.append([message, error_msg])
209
+ return history, format_execution_history()
210
+
211
+ try:
212
+ # Stream the agent execution to capture intermediate steps
213
+ result = await agent.ainvoke({"messages": messages})
214
+
215
+ # Process all messages in the result
216
+ all_messages = result["messages"]
217
+
218
+ # Track tool calls and responses
219
+ for msg in all_messages:
220
+ if hasattr(msg, 'tool_calls') and msg.tool_calls:
221
+ for tool_call in msg.tool_calls:
222
+ add_to_execution_history("tool_call", {
223
+ "name": tool_call.get("name", "unknown"),
224
+ "args": tool_call.get("args", {})
225
+ }, tab_id)
226
+
227
+ # Check if it's a tool message (result of tool execution)
228
+ if hasattr(msg, 'name') and msg.name:
229
+ add_to_execution_history("tool_result", msg.content, tab_id)
230
+
231
+ # Get the final output
232
+ output = all_messages[-1].content
233
+ cleaned = re.sub(r'<think>.*?</think>', '', output, flags=re.DOTALL).strip()
234
+
235
+ add_to_execution_history("agent_response", cleaned, tab_id)
236
+
237
+ print(f"Processed output: {cleaned}")
238
+ history.append([message, cleaned])
239
+
240
+ return history, format_execution_history()
241
+
242
+ except Exception as e:
243
+ error_msg = f"Error durante la ejecución: {str(e)}"
244
+ add_to_execution_history("error", error_msg, tab_id)
245
+ history.append([message, error_msg])
246
+ return history, format_execution_history()
247
+
248
+ def clear_history():
249
+ """Clear the execution history"""
250
+ global execution_history
251
+ execution_history = []
252
+ return [], "Historial de ejecución limpiado."
253
+
254
+ # Create the enhanced Gradio interface
255
+ with gr.Blocks(title="OwlBear Agent - Historial Completo", theme=gr.themes.Default()) as demo:
256
+ gr.Markdown("# 🦉 OwlBear Agent - Vista Completa de Ejecución")
257
+ gr.Markdown("Esta interfaz muestra todo el proceso de ejecución del agente, incluyendo llamadas a herramientas y pasos intermedios.")
258
+ gr.Markdown("**Nota:** Todos los mensajes enviados a la API original también aparecen aquí automáticamente.")
259
+
260
+ with gr.Row():
261
+ with gr.Column(scale=1):
262
+ gr.Markdown("## 💬 Chat")
263
+ chatbot = gr.Chatbot(
264
+ label="Conversación",
265
+ height=400,
266
+ show_label=True,
267
+ container=True,
268
+ )
269
+
270
+ with gr.Row():
271
+ msg = gr.Textbox(
272
+ label="Mensaje",
273
+ placeholder="Escribe tu mensaje aquí...",
274
+ lines=2,
275
+ scale=4
276
+ )
277
+ send_btn = gr.Button("Enviar", variant="primary", scale=1)
278
+
279
+ with gr.Row():
280
+ tab_id = gr.Textbox(
281
+ label="Tab ID",
282
+ placeholder="ID de pestaña (opcional)",
283
+ value="main",
284
+ scale=1
285
+ )
286
+ anthropic_key = gr.Textbox(
287
+ label="Anthropic API Key",
288
+ placeholder="Clave API de Anthropic (opcional)",
289
+ type="password",
290
+ scale=2
291
+ )
292
+
293
+ clear_btn = gr.Button("Limpiar Chat", variant="secondary")
294
+
295
+ with gr.Column(scale=1):
296
+ gr.Markdown("## 📊 Historial de Ejecución Detallado")
297
+ gr.Markdown("*Se actualiza automáticamente cada 2 segundos*")
298
+ execution_display = gr.Markdown(
299
+ value="No hay historial de ejecución aún.",
300
+ label="Historial Completo",
301
+ height=600,
302
+ container=True,
303
+ )
304
+
305
+ refresh_btn = gr.Button("Actualizar Historial", variant="secondary")
306
+ clear_history_btn = gr.Button("Limpiar Historial", variant="secondary")
307
+
308
+ # Auto-refresh timer for execution history
309
+ timer = gr.Timer(value=2) # Refresh every 2 seconds
310
+ timer.tick(lambda: format_execution_history(), outputs=[execution_display], show_api=False)
311
+
312
+ # Event handlers
313
+ def send_message(message, history, tab_id, anthropic_key):
314
+ if not message.strip():
315
+ return history, "", format_execution_history()
316
+
317
+ # Run the async function
318
+ import asyncio
319
+ loop = asyncio.new_event_loop()
320
+ asyncio.set_event_loop(loop)
321
+ try:
322
+ new_history, execution_history_display = loop.run_until_complete(
323
+ chat_with_history_tracking(message, history, tab_id, anthropic_key)
324
+ )
325
+ return new_history, "", execution_history_display
326
+ finally:
327
+ loop.close()
328
+
329
+ send_btn.click(
330
+ send_message,
331
+ inputs=[msg, chatbot, tab_id, anthropic_key],
332
+ outputs=[chatbot, msg, execution_display],
333
+ show_api=False
334
+ )
335
+
336
+ msg.submit(
337
+ send_message,
338
+ inputs=[msg, chatbot, tab_id, anthropic_key],
339
+ outputs=[chatbot, msg, execution_display],
340
+ show_api=False
341
+ )
342
+
343
+ clear_btn.click(
344
+ lambda: ([], ""),
345
+ outputs=[chatbot, msg],
346
+ show_api=False
347
+ )
348
+
349
+ refresh_btn.click(
350
+ lambda: format_execution_history(),
351
+ outputs=[execution_display],
352
+ show_api=False
353
+ )
354
+
355
+ clear_history_btn.click(
356
+ clear_history,
357
+ outputs=[chatbot, execution_display],
358
+ show_api=False
359
+ )
360
+
361
+ # Original API interface for backward compatibility
362
+ api_demo = gr.Interface(
363
  fn=chat,
364
  inputs=[
365
  gr.JSON(label="history"),
 
367
  gr.Textbox(label="anthropic_api_key"),
368
  ],
369
  outputs="text",
370
+ title="OwlBear Agent - API Original"
371
+ )
372
+
373
+ # Combined interface with tabs
374
+ combined_demo = gr.TabbedInterface(
375
+ [demo, api_demo],
376
+ ["Vista Completa con Historial", "API Original"],
377
+ title="🦉 OwlBear Agent - Interfaz Completa"
378
  )
379
 
380
  if __name__ == "__main__":
381
+ combined_demo.launch(server_port=int(os.getenv("GRADIO_PORT", 7860)))
requirements.txt CHANGED
@@ -5,4 +5,4 @@ langchain-ollama
5
  langchain-mcp-adapters
6
  langgraph
7
  mcp
8
- asyncio
 
5
  langchain-mcp-adapters
6
  langgraph
7
  mcp
8
+ python-dotenv