Commit
·
0b1cfe4
1
Parent(s):
86a7b55
Eliminar pasos intermedios del historial de ejecución y limpiar mensajes de depuración en la función de chat.
Browse files
app.py
CHANGED
@@ -10,7 +10,7 @@ from dotenv import load_dotenv
|
|
10 |
import os
|
11 |
import json
|
12 |
from datetime import datetime
|
13 |
-
from typing import
|
14 |
|
15 |
load_dotenv()
|
16 |
|
@@ -64,8 +64,6 @@ def format_execution_history():
|
|
64 |
formatted_history.append(f"**[{timestamp}] 🔧 Llamada a Herramienta**\n\n**Herramienta**: {tool_data['name']}\n\n**Argumentos**: \n\n```json\n{json.dumps(tool_data.get('args', {}), indent=2)}\n```\n\n")
|
65 |
elif step_type == "tool_result":
|
66 |
formatted_history.append(f"**[{timestamp}] ✅ Resultado de Herramienta**\n\n```\n{entry['data']}\n```\n\n")
|
67 |
-
elif step_type == "intermediate_step":
|
68 |
-
formatted_history.append(f"**[{timestamp}] 🧠 Paso Intermedio**\n\n{entry['data']}\n\n")
|
69 |
elif step_type == "error":
|
70 |
formatted_history.append(f"**[{timestamp}] ❌ Error**\n\n{entry['data']}\n\n")
|
71 |
|
@@ -124,15 +122,11 @@ async def chat(history: list, tab_id: str=None, anthropic_api_key: str=None):
|
|
124 |
if tab_id:
|
125 |
history[-1]["content"] += f"\nThis is your tab_id: {tab_id}"
|
126 |
|
127 |
-
print(f"Received message: {history[-1]['content']}")
|
128 |
-
print(f"Complete chat history: {history}")
|
129 |
-
|
130 |
llm_provider = os.getenv("LLM_PROVIDER", "ollama").lower()
|
131 |
ollama_model = os.getenv("OLLAMA_MODEL", "qwen3:8b")
|
132 |
|
133 |
try:
|
134 |
agent = await create_agent_with_llm(llm_provider, anthropic_api_key, ollama_model, tools)
|
135 |
-
add_to_execution_history("intermediate_step", f"Agente creado con provider: {llm_provider}", tab_id)
|
136 |
except ValueError as e:
|
137 |
error_msg = str(e)
|
138 |
add_to_execution_history("error", error_msg, tab_id)
|
@@ -161,8 +155,6 @@ async def chat(history: list, tab_id: str=None, anthropic_api_key: str=None):
|
|
161 |
cleaned = re.sub(r'<think>.*?</think>', '', output, flags=re.DOTALL).strip()
|
162 |
|
163 |
add_to_execution_history("agent_response", cleaned, tab_id)
|
164 |
-
|
165 |
-
print(f"Processed output: {cleaned}")
|
166 |
return cleaned
|
167 |
|
168 |
except Exception as e:
|
@@ -193,15 +185,11 @@ async def chat_with_history_tracking(message: str, history: List, tab_id: str =
|
|
193 |
if tab_id:
|
194 |
messages[-1]["content"] += f"\nThis is your tab_id: {tab_id}"
|
195 |
|
196 |
-
print(f"Received message: {message}")
|
197 |
-
print(f"Complete chat history: {messages}")
|
198 |
-
|
199 |
llm_provider = os.getenv("LLM_PROVIDER", "ollama").lower()
|
200 |
ollama_model = os.getenv("OLLAMA_MODEL", "qwen3:8b")
|
201 |
|
202 |
try:
|
203 |
agent = await create_agent_with_llm(llm_provider, anthropic_api_key, ollama_model, tools)
|
204 |
-
add_to_execution_history("intermediate_step", f"Agente creado con provider: {llm_provider}", tab_id)
|
205 |
except ValueError as e:
|
206 |
error_msg = str(e)
|
207 |
add_to_execution_history("error", error_msg, tab_id)
|
@@ -233,8 +221,6 @@ async def chat_with_history_tracking(message: str, history: List, tab_id: str =
|
|
233 |
cleaned = re.sub(r'<think>.*?</think>', '', output, flags=re.DOTALL).strip()
|
234 |
|
235 |
add_to_execution_history("agent_response", cleaned, tab_id)
|
236 |
-
|
237 |
-
print(f"Processed output: {cleaned}")
|
238 |
history.append([message, cleaned])
|
239 |
|
240 |
return history, format_execution_history()
|
|
|
10 |
import os
|
11 |
import json
|
12 |
from datetime import datetime
|
13 |
+
from typing import List, Any
|
14 |
|
15 |
load_dotenv()
|
16 |
|
|
|
64 |
formatted_history.append(f"**[{timestamp}] 🔧 Llamada a Herramienta**\n\n**Herramienta**: {tool_data['name']}\n\n**Argumentos**: \n\n```json\n{json.dumps(tool_data.get('args', {}), indent=2)}\n```\n\n")
|
65 |
elif step_type == "tool_result":
|
66 |
formatted_history.append(f"**[{timestamp}] ✅ Resultado de Herramienta**\n\n```\n{entry['data']}\n```\n\n")
|
|
|
|
|
67 |
elif step_type == "error":
|
68 |
formatted_history.append(f"**[{timestamp}] ❌ Error**\n\n{entry['data']}\n\n")
|
69 |
|
|
|
122 |
if tab_id:
|
123 |
history[-1]["content"] += f"\nThis is your tab_id: {tab_id}"
|
124 |
|
|
|
|
|
|
|
125 |
llm_provider = os.getenv("LLM_PROVIDER", "ollama").lower()
|
126 |
ollama_model = os.getenv("OLLAMA_MODEL", "qwen3:8b")
|
127 |
|
128 |
try:
|
129 |
agent = await create_agent_with_llm(llm_provider, anthropic_api_key, ollama_model, tools)
|
|
|
130 |
except ValueError as e:
|
131 |
error_msg = str(e)
|
132 |
add_to_execution_history("error", error_msg, tab_id)
|
|
|
155 |
cleaned = re.sub(r'<think>.*?</think>', '', output, flags=re.DOTALL).strip()
|
156 |
|
157 |
add_to_execution_history("agent_response", cleaned, tab_id)
|
|
|
|
|
158 |
return cleaned
|
159 |
|
160 |
except Exception as e:
|
|
|
185 |
if tab_id:
|
186 |
messages[-1]["content"] += f"\nThis is your tab_id: {tab_id}"
|
187 |
|
|
|
|
|
|
|
188 |
llm_provider = os.getenv("LLM_PROVIDER", "ollama").lower()
|
189 |
ollama_model = os.getenv("OLLAMA_MODEL", "qwen3:8b")
|
190 |
|
191 |
try:
|
192 |
agent = await create_agent_with_llm(llm_provider, anthropic_api_key, ollama_model, tools)
|
|
|
193 |
except ValueError as e:
|
194 |
error_msg = str(e)
|
195 |
add_to_execution_history("error", error_msg, tab_id)
|
|
|
221 |
cleaned = re.sub(r'<think>.*?</think>', '', output, flags=re.DOTALL).strip()
|
222 |
|
223 |
add_to_execution_history("agent_response", cleaned, tab_id)
|
|
|
|
|
224 |
history.append([message, cleaned])
|
225 |
|
226 |
return history, format_execution_history()
|