CalleCaje's picture
enhance README documentation
ba3dfc3
import os
from langchain_mcp_adapters.client import MultiServerMCPClient
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import ToolNode
from langgraph.graph import MessagesState, END, StateGraph
from langgraph.checkpoint.memory import MemorySaver
from pmcp.agents.executor import ExecutorAgent
from pmcp.agents.trello_agent import TrelloAgent
from pmcp.agents.github_agent import GithubAgent
from pmcp.agents.planner import PlannerAgent
from pmcp.nodes.human_interrupt_node import HumanInterruptNode
from pmcp.nodes.human_resume_node import HumanResumeNode
from pmcp.models.state import PlanningState
async def setup_graph(github_token: str, trello_api:str, trello_token:str):
mcp_client_github = MultiServerMCPClient(
{
"github": {
"command": "python",
"args": ["pmcp/mcp_server/github_server/mcp_github_main.py", "--api-key", github_token],
"transport": "stdio",
}
}
)
mcp_client_trello = MultiServerMCPClient(
{
"trello": {
"command": "python",
"args": ["pmcp/mcp_server/trello_server/mcp_trello_main.py", "--api-key", trello_api, "--token", trello_token],
"transport": "stdio",
}
}
)
memory = MemorySaver()
trello_tools = await mcp_client_trello.get_tools()
github_tools = await mcp_client_github.get_tools()
tool_node = ToolNode(github_tools + trello_tools)
llm = ChatOpenAI(
model="Qwen/Qwen2.5-32B-Instruct",
temperature=0.0,
api_key=os.getenv("NEBIUS_API_KEY"),
base_url="https://api.studio.nebius.com/v1/",
)
trello_agent = TrelloAgent(
tools=trello_tools,
llm=llm,
)
github_agent = GithubAgent(llm=llm, tools=github_tools)
planner_agent = PlannerAgent(
llm=llm,
)
executor_agent = ExecutorAgent(llm=llm)
human_interrupt_node = HumanInterruptNode(
llm=llm,
)
human_resume_node = HumanResumeNode(llm=llm)
graph = StateGraph(PlanningState)
graph.add_node(planner_agent.agent.agent_name, planner_agent.acall_planner_agent)
graph.add_node(trello_agent.agent.agent_name, trello_agent.acall_trello_agent)
graph.add_node(github_agent.agent.agent_name, github_agent.acall_github_agent)
graph.add_node(executor_agent.agent.agent_name, executor_agent.acall_executor_agent)
graph.add_node("tool", tool_node)
graph.add_node("human_interrupt", human_interrupt_node.call_human_interrupt_agent)
graph.set_entry_point(planner_agent.agent.agent_name)
def should_continue(state: PlanningState):
last_message = state.messages[-1]
if last_message.tool_calls:
return "human_interrupt"
return executor_agent.agent.agent_name
def execute_agent(state: PlanningState):
if state.current_step:
return state.current_step.agent
return END
graph.add_conditional_edges(trello_agent.agent.agent_name, should_continue)
graph.add_conditional_edges(github_agent.agent.agent_name, should_continue)
graph.add_conditional_edges(executor_agent.agent.agent_name, execute_agent)
graph.add_edge("tool", trello_agent.agent.agent_name)
graph.add_edge("tool", github_agent.agent.agent_name)
graph.add_edge(planner_agent.agent.agent_name, executor_agent.agent.agent_name)
app = graph.compile(checkpointer=memory)
app.get_graph(xray=True).draw_mermaid()
return app, human_resume_node