Spaces:
Sleeping
Sleeping
File size: 1,144 Bytes
b967600 0c6ca7b b967600 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
import gradio as gr
import os
from mcp import StdioServerParameters
from smolagents import InferenceClientModel, CodeAgent, ToolCollection, MCPClient
from smolagents import LiteLLMModel
try:
mcp_client = MCPClient(
{"url": "https://abdallalswaiti-mcp-sentiment.hf.space/gradio_api/mcp/sse"} # This is the MCP Server we created in the previous section
)
tools = mcp_client.get_tools()
model = InferenceClientModel(token=os.getenv("HUGGINGFACE_API_TOKEN"))
# model = LiteLLMModel(
# model_id="gemini/gemini-2.5-flash-preview-05-20", # Gemini model name for LiteLLM
# temperature=0.2,
# api_key=os.environ["GOOGLE_API_KEY"] # Must have access to Gemini via Vertex AI
# )
agent = CodeAgent(tools=[*tools], model=model)
demo = gr.ChatInterface(
fn=lambda message, history: str(agent.run(message)),
type="messages",
examples=["Prime factorization of 68"],
title="Agent with MCP Tools",
description="This is a simple agent that uses MCP tools to answer questions.",
)
demo.launch()
finally:
mcp_client.disconnect() |