Spaces:
Sleeping
Sleeping
import gradio as gr | |
import os | |
from mcp import StdioServerParameters | |
from smolagents import InferenceClientModel, CodeAgent, ToolCollection, MCPClient | |
from smolagents import LiteLLMModel | |
try: | |
mcp_client = MCPClient( | |
{"url": "https://abdallalswaiti-mcp-sentiment.hf.space/gradio_api/mcp/sse"} # This is the MCP Server we created in the previous section | |
) | |
tools = mcp_client.get_tools() | |
model = InferenceClientModel(token=os.getenv("HUGGINGFACE_API_TOKEN")) | |
# model = LiteLLMModel( | |
# model_id="gemini/gemini-2.5-flash-preview-05-20", # Gemini model name for LiteLLM | |
# temperature=0.2, | |
# api_key=os.environ["GOOGLE_API_KEY"] # Must have access to Gemini via Vertex AI | |
# ) | |
agent = CodeAgent(tools=[*tools], model=model) | |
demo = gr.ChatInterface( | |
fn=lambda message, history: str(agent.run(message)), | |
type="messages", | |
examples=["Prime factorization of 68"], | |
title="Agent with MCP Tools", | |
description="This is a simple agent that uses MCP tools to answer questions.", | |
) | |
demo.launch() | |
finally: | |
mcp_client.disconnect() |