Spaces:
Sleeping
Sleeping
ABDALLALSWAITI
commited on
Commit
·
b967600
1
Parent(s):
ea99dd5
Add application file
Browse files- requirements.txt +2 -0
- server.py +33 -0
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
gradio[mcp]
|
2 |
+
smolagents[mcp]
|
server.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
|
4 |
+
from mcp import StdioServerParameters
|
5 |
+
from smolagents import InferenceClientModel, CodeAgent, ToolCollection, MCPClient
|
6 |
+
from smolagents import LiteLLMModel
|
7 |
+
|
8 |
+
|
9 |
+
try:
|
10 |
+
mcp_client = MCPClient(
|
11 |
+
{"url": "http://localhost:7860/gradio_api/mcp/sse"} # This is the MCP Server we created in the previous section
|
12 |
+
)
|
13 |
+
tools = mcp_client.get_tools()
|
14 |
+
|
15 |
+
model = InferenceClientModel(token=os.getenv("HUGGINGFACE_API_TOKEN"))
|
16 |
+
# model = LiteLLMModel(
|
17 |
+
# model_id="gemini/gemini-2.5-flash-preview-05-20", # Gemini model name for LiteLLM
|
18 |
+
# temperature=0.2,
|
19 |
+
# api_key=os.environ["GOOGLE_API_KEY"] # Must have access to Gemini via Vertex AI
|
20 |
+
# )
|
21 |
+
agent = CodeAgent(tools=[*tools], model=model)
|
22 |
+
|
23 |
+
demo = gr.ChatInterface(
|
24 |
+
fn=lambda message, history: str(agent.run(message)),
|
25 |
+
type="messages",
|
26 |
+
examples=["Prime factorization of 68"],
|
27 |
+
title="Agent with MCP Tools",
|
28 |
+
description="This is a simple agent that uses MCP tools to answer questions.",
|
29 |
+
)
|
30 |
+
|
31 |
+
demo.launch()
|
32 |
+
finally:
|
33 |
+
mcp_client.disconnect()
|