File size: 3,565 Bytes
3b78b4e 9ad58d0 3b78b4e 9ad58d0 3b78b4e 9ad58d0 3b78b4e 9ad58d0 3b78b4e 9ad58d0 3b78b4e 9ad58d0 3b78b4e 9ad58d0 3b78b4e ba3dfc3 3b78b4e 9ad58d0 3b78b4e a380d94 3b78b4e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
import os
from langchain_mcp_adapters.client import MultiServerMCPClient
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import ToolNode
from langgraph.graph import MessagesState, END, StateGraph
from langgraph.checkpoint.memory import MemorySaver
from pmcp.agents.executor import ExecutorAgent
from pmcp.agents.trello_agent import TrelloAgent
from pmcp.agents.github_agent import GithubAgent
from pmcp.agents.planner import PlannerAgent
from pmcp.nodes.human_interrupt_node import HumanInterruptNode
from pmcp.nodes.human_resume_node import HumanResumeNode
from pmcp.models.state import PlanningState
async def setup_graph(github_token: str, trello_api:str, trello_token:str):
mcp_client_github = MultiServerMCPClient(
{
"github": {
"command": "python",
"args": ["pmcp/mcp_server/github_server/mcp_github_main.py", "--api-key", github_token],
"transport": "stdio",
}
}
)
mcp_client_trello = MultiServerMCPClient(
{
"trello": {
"command": "python",
"args": ["pmcp/mcp_server/trello_server/mcp_trello_main.py", "--api-key", trello_api, "--token", trello_token],
"transport": "stdio",
}
}
)
memory = MemorySaver()
trello_tools = await mcp_client_trello.get_tools()
github_tools = await mcp_client_github.get_tools()
tool_node = ToolNode(github_tools + trello_tools)
llm = ChatOpenAI(
model="Qwen/Qwen2.5-32B-Instruct",
temperature=0.0,
api_key=os.getenv("NEBIUS_API_KEY"),
base_url="https://api.studio.nebius.com/v1/",
)
trello_agent = TrelloAgent(
tools=trello_tools,
llm=llm,
)
github_agent = GithubAgent(llm=llm, tools=github_tools)
planner_agent = PlannerAgent(
llm=llm,
)
executor_agent = ExecutorAgent(llm=llm)
human_interrupt_node = HumanInterruptNode(
llm=llm,
)
human_resume_node = HumanResumeNode(llm=llm)
graph = StateGraph(PlanningState)
graph.add_node(planner_agent.agent.agent_name, planner_agent.acall_planner_agent)
graph.add_node(trello_agent.agent.agent_name, trello_agent.acall_trello_agent)
graph.add_node(github_agent.agent.agent_name, github_agent.acall_github_agent)
graph.add_node(executor_agent.agent.agent_name, executor_agent.acall_executor_agent)
graph.add_node("tool", tool_node)
graph.add_node("human_interrupt", human_interrupt_node.call_human_interrupt_agent)
graph.set_entry_point(planner_agent.agent.agent_name)
def should_continue(state: PlanningState):
last_message = state.messages[-1]
if last_message.tool_calls:
return "human_interrupt"
return executor_agent.agent.agent_name
def execute_agent(state: PlanningState):
if state.current_step:
return state.current_step.agent
return END
graph.add_conditional_edges(trello_agent.agent.agent_name, should_continue)
graph.add_conditional_edges(github_agent.agent.agent_name, should_continue)
graph.add_conditional_edges(executor_agent.agent.agent_name, execute_agent)
graph.add_edge("tool", trello_agent.agent.agent_name)
graph.add_edge("tool", github_agent.agent.agent_name)
graph.add_edge(planner_agent.agent.agent_name, executor_agent.agent.agent_name)
app = graph.compile(checkpointer=memory)
app.get_graph(xray=True).draw_mermaid()
return app, human_resume_node |