JuanjoSG5 commited on
Commit
14c9c39
·
1 Parent(s): 100ea5d

doc: removed debugging logs

Browse files
Files changed (2) hide show
  1. agent_test.py +21 -141
  2. gradio_interface/app.py +3 -23
agent_test.py CHANGED
@@ -22,7 +22,7 @@ class MCPClientWrapper:
22
  def __init__(self):
23
  self.session = None
24
  self.exit_stack = None
25
- self.mistral = ChatOpenAI(model_name="mistralai/mistral-small", temperature=0.7, openai_api_key=os.getenv("OPENROUTER_API_KEY"), openai_api_base=os.getenv("OPENROUTER_API_BASE_URL"))
26
  self.tools = []
27
 
28
  def connect(self, server_path: str) -> str:
@@ -191,165 +191,45 @@ class MCPClientWrapper:
191
 
192
  return result_messages
193
 
194
- # New methods for image processing
195
- def image_to_base64(self, image):
196
- """Convert PIL image to base64 string"""
197
- if image is None:
198
- return None
199
- buffered = BytesIO()
200
- image.save(buffered, format="PNG")
201
- img_str = base64.b64encode(buffered.getvalue()).decode()
202
- return img_str
203
-
204
- async def process_image(self, image, operation, target_format=None, width=None, height=None):
205
- """Process an image using MCP tools"""
206
- if not self.session:
207
- return None, "Please connect to an MCP server first."
208
-
209
- if image is None:
210
- return None, "No image provided."
211
-
212
- try:
213
- img_base64 = self.image_to_base64(image)
214
-
215
- if operation == "Remove Background":
216
- result = await self.session.call_tool("remove_background_from_url", {"url": img_base64})
217
-
218
- elif operation == "Change Format":
219
- if not target_format:
220
- return None, "Please select a target format."
221
- result = await self.session.call_tool("change_format", {
222
- "image_base64": img_base64,
223
- "target_format": target_format.lower()
224
- })
225
-
226
- elif operation == "Resize Image":
227
- if not width or not height:
228
- return None, "Please provide width and height."
229
- result = await self.session.call_tool("resize_image", {
230
- "image_base64": img_base64,
231
- "width": int(width),
232
- "height": int(height)
233
- })
234
-
235
- elif operation == "Visualize Image":
236
- result = await self.session.call_tool("visualize_base64_image", {"image_base64": img_base64})
237
-
238
- else:
239
- return None, "Unknown operation."
240
-
241
- # Process the result
242
- result_content = result.content
243
- if isinstance(result_content, str):
244
- try:
245
- result_data = json.loads(result_content)
246
- if "image_base64" in result_data:
247
- # Convert result base64 back to image
248
- img_data = base64.b64decode(result_data["image_base64"])
249
- result_img = Image.open(BytesIO(img_data))
250
- return result_img, "Image processed successfully."
251
- else:
252
- return None, f"Unexpected result format: {result_content}"
253
- except json.JSONDecodeError:
254
- return None, f"Error decoding result: {result_content}"
255
- else:
256
- return None, f"Unexpected result type: {type(result_content)}"
257
-
258
- except Exception as e:
259
- return None, f"Error processing image: {str(e)}"
260
-
261
  client = MCPClientWrapper()
262
 
263
  def gradio_interface():
264
- with gr.Blocks(title="MCP Assistant") as demo:
265
- gr.Markdown("# MCP Assistant")
266
- gr.Markdown("Connect to your MCP server to chat or process images")
267
 
268
  with gr.Row(equal_height=True):
269
  with gr.Column(scale=4):
270
  server_path = gr.Textbox(
271
  label="Server Script Path",
272
- placeholder="Enter path to server script",
273
- value="mcp_server.py"
274
  )
275
  with gr.Column(scale=1):
276
  connect_btn = gr.Button("Connect")
277
 
278
  status = gr.Textbox(label="Connection Status", interactive=False)
279
 
280
- with gr.Tabs() as tabs:
281
- with gr.TabItem("Chat Interface"):
282
- chatbot = gr.Chatbot(
283
- value=[],
284
- height=500,
285
- type="messages",
286
- show_copy_button=True,
287
- avatar_images=("👤", "🤖")
288
- )
289
-
290
- with gr.Row(equal_height=True):
291
- msg = gr.Textbox(
292
- label="Your Question",
293
- placeholder="Ask about the available tools or how to process images",
294
- scale=4
295
- )
296
- clear_btn = gr.Button("Clear Chat", scale=1)
297
-
298
- with gr.TabItem("Image Processing"):
299
- with gr.Row():
300
- with gr.Column():
301
- input_image = gr.Image(label="Input Image", type="pil")
302
- operation = gr.Radio(
303
- ["Remove Background", "Change Format", "Resize Image", "Visualize Image"],
304
- label="Select Operation",
305
- value="Visualize Image"
306
- )
307
-
308
- with gr.Group() as format_options:
309
- target_format = gr.Dropdown(
310
- ["png", "jpeg", "webp"],
311
- label="Target Format",
312
- value="png",
313
- visible=False
314
- )
315
-
316
- with gr.Group() as resize_options:
317
- with gr.Row():
318
- width = gr.Number(label="Width", value=300, visible=False)
319
- height = gr.Number(label="Height", value=300, visible=False)
320
-
321
- process_btn = gr.Button("Process Image")
322
-
323
- with gr.Column():
324
- output_image = gr.Image(label="Processed Image")
325
- output_message = gr.Textbox(label="Status")
326
 
327
- # Connect to server
328
- connect_btn.click(client.connect, inputs=server_path, outputs=status)
 
 
 
 
 
329
 
330
- # Chat functionality
331
  msg.submit(client.process_message, [msg, chatbot], [chatbot, msg])
332
  clear_btn.click(lambda: [], None, chatbot)
333
 
334
- # Image processing functionality
335
- def update_options(op):
336
- return {
337
- target_format: op == "Change Format",
338
- width: op == "Resize Image",
339
- height: op == "Resize Image"
340
- }
341
-
342
- operation.change(update_options, inputs=operation, outputs=[target_format, width, height])
343
-
344
- def process_image_wrapper(image, operation, target_format, width, height):
345
- return loop.run_until_complete(client.process_image(image, operation, target_format, width, height))
346
-
347
- process_btn.click(
348
- process_image_wrapper,
349
- inputs=[input_image, operation, target_format, width, height],
350
- outputs=[output_image, output_message]
351
- )
352
-
353
  return demo
354
 
355
  if __name__ == "__main__":
 
22
  def __init__(self):
23
  self.session = None
24
  self.exit_stack = None
25
+ self.mistral = ChatOpenAI(model_name="mistralai/mistral-small", temperature=0.7, openai_api_key=os.getenv("OPENROUTER_API_KEY"))
26
  self.tools = []
27
 
28
  def connect(self, server_path: str) -> str:
 
191
 
192
  return result_messages
193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  client = MCPClientWrapper()
195
 
196
  def gradio_interface():
197
+ with gr.Blocks(title="MCP Weather Client") as demo:
198
+ gr.Markdown("# MCP Weather Assistant")
199
+ gr.Markdown("Connect to your MCP weather server and chat with the assistant")
200
 
201
  with gr.Row(equal_height=True):
202
  with gr.Column(scale=4):
203
  server_path = gr.Textbox(
204
  label="Server Script Path",
205
+ placeholder="Enter path to server script (e.g., weather.py)",
206
+ value="gradio_mcp_server.py"
207
  )
208
  with gr.Column(scale=1):
209
  connect_btn = gr.Button("Connect")
210
 
211
  status = gr.Textbox(label="Connection Status", interactive=False)
212
 
213
+ chatbot = gr.Chatbot(
214
+ value=[],
215
+ height=500,
216
+ type="messages",
217
+ show_copy_button=True,
218
+ avatar_images=("👤", "🤖")
219
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
 
221
+ with gr.Row(equal_height=True):
222
+ msg = gr.Textbox(
223
+ label="Your Question",
224
+ placeholder="Ask about weather or alerts (e.g., What's the weather in New York?)",
225
+ scale=4
226
+ )
227
+ clear_btn = gr.Button("Clear Chat", scale=1)
228
 
229
+ connect_btn.click(client.connect, inputs=server_path, outputs=status)
230
  msg.submit(client.process_message, [msg, chatbot], [chatbot, msg])
231
  clear_btn.click(lambda: [], None, chatbot)
232
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
  return demo
234
 
235
  if __name__ == "__main__":
gradio_interface/app.py CHANGED
@@ -13,29 +13,17 @@ from langchain_openai import ChatOpenAI
13
  from langchain_core.messages import HumanMessage, AIMessage
14
  from langchain_core.callbacks import StreamingStdOutCallbackHandler
15
 
16
- # Configure logging
17
- logging.basicConfig(level=logging.INFO)
18
- logger = logging.getLogger(__name__)
19
-
20
  # Load environment
21
  dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
22
  load_dotenv(dotenv_path=dotenv_path)
23
 
24
- # Debug env
25
- logger.info(f"OPENROUTER_BASE_URL: {getenv('OPENROUTER_BASE_URL')}")
26
- logger.info(f"OPENROUTER_API_KEY: {'Found' if getenv('OPENROUTER_API_KEY') else 'Missing'}")
27
-
28
  # Connectivity test
29
  def test_connectivity(url="https://openrouter.helicone.ai/api/v1"):
30
  try:
31
  return requests.get(url, timeout=5).status_code == 200
32
- except (requests.RequestException, socket.error) as e:
33
- logger.error(f"Connectivity test failed: {e}")
34
  return False
35
 
36
- if not test_connectivity():
37
- logger.warning("No network to OpenRouter; responses may fail.")
38
-
39
  # Helper to make direct API calls to OpenRouter when LangChain fails
40
  def direct_api_call(messages, api_key, base_url):
41
  headers = {
@@ -64,7 +52,6 @@ def direct_api_call(messages, api_key, base_url):
64
  response.raise_for_status()
65
  return response.json()["choices"][0]["message"]["content"]
66
  except Exception as e:
67
- logger.error(f"Direct API call failed: {e}")
68
  return f"Error: {str(e)}"
69
 
70
  # Initialize LLM with streaming and retry logic
@@ -86,7 +73,6 @@ def init_llm():
86
  try:
87
  llm = init_llm()
88
  except Exception as e:
89
- logger.error(f"Failed to initialize LLM: {e}")
90
  llm = None
91
 
92
  # Helpers
@@ -148,7 +134,6 @@ def generate_response(message, chat_history, image):
148
  # First try with LangChain
149
  if llm:
150
  try:
151
- # Try streaming first
152
  try:
153
  stream_iter = llm.stream(lc_messages)
154
  partial = ""
@@ -164,7 +149,7 @@ def generate_response(message, chat_history, image):
164
  # If we got this far, streaming worked
165
  return
166
  except Exception as e:
167
- logger.warning(f"Streaming failed: {e}. Falling back to non-streaming mode")
168
 
169
  # Try non-streaming
170
  try:
@@ -172,13 +157,10 @@ def generate_response(message, chat_history, image):
172
  yield response.content
173
  return
174
  except Exception as e:
175
- logger.warning(f"Non-streaming LangChain invoke failed: {e}")
176
  raise e
177
  except Exception as e:
178
- logger.warning(f"LangChain approach failed: {e}. Trying direct API call")
179
 
180
- # Fallback to direct API call
181
- logger.info("Using direct API call as fallback")
182
  response_text = direct_api_call(
183
  api_messages,
184
  getenv("OPENROUTER_API_KEY"),
@@ -189,8 +171,6 @@ def generate_response(message, chat_history, image):
189
  except Exception as e:
190
  import traceback
191
  error_trace = traceback.format_exc()
192
- logger.exception(f"All approaches failed during response generation: {e}")
193
- logger.error(f"Full traceback: {error_trace}")
194
  yield f"⚠️ Error al generar respuesta: {str(e)}. Intenta más tarde."
195
 
196
  # Gradio interface
 
13
  from langchain_core.messages import HumanMessage, AIMessage
14
  from langchain_core.callbacks import StreamingStdOutCallbackHandler
15
 
 
 
 
 
16
  # Load environment
17
  dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
18
  load_dotenv(dotenv_path=dotenv_path)
19
 
 
 
 
 
20
  # Connectivity test
21
  def test_connectivity(url="https://openrouter.helicone.ai/api/v1"):
22
  try:
23
  return requests.get(url, timeout=5).status_code == 200
24
+ except (requests.RequestException, socket.error):
 
25
  return False
26
 
 
 
 
27
  # Helper to make direct API calls to OpenRouter when LangChain fails
28
  def direct_api_call(messages, api_key, base_url):
29
  headers = {
 
52
  response.raise_for_status()
53
  return response.json()["choices"][0]["message"]["content"]
54
  except Exception as e:
 
55
  return f"Error: {str(e)}"
56
 
57
  # Initialize LLM with streaming and retry logic
 
73
  try:
74
  llm = init_llm()
75
  except Exception as e:
 
76
  llm = None
77
 
78
  # Helpers
 
134
  # First try with LangChain
135
  if llm:
136
  try:
 
137
  try:
138
  stream_iter = llm.stream(lc_messages)
139
  partial = ""
 
149
  # If we got this far, streaming worked
150
  return
151
  except Exception as e:
152
+ print(f"Streaming failed: {e}. Falling back to non-streaming mode")
153
 
154
  # Try non-streaming
155
  try:
 
157
  yield response.content
158
  return
159
  except Exception as e:
 
160
  raise e
161
  except Exception as e:
162
+ raise e
163
 
 
 
164
  response_text = direct_api_call(
165
  api_messages,
166
  getenv("OPENROUTER_API_KEY"),
 
171
  except Exception as e:
172
  import traceback
173
  error_trace = traceback.format_exc()
 
 
174
  yield f"⚠️ Error al generar respuesta: {str(e)}. Intenta más tarde."
175
 
176
  # Gradio interface