GerlandoRex commited on
Commit
cb09459
·
1 Parent(s): 2513120

add(agents): loggers + system prompt in base agent + filtering AI Messages in human resume

Browse files
pmcp/agents/executor.py CHANGED
@@ -5,8 +5,8 @@ from langchain_core.tools import BaseTool
5
  from langchain_openai import ChatOpenAI
6
  from langchain_core.messages import HumanMessage
7
 
8
- from pmcp.models.plan import PlanStep
9
  from pmcp.models.state import PlanningState
 
10
 
11
 
12
  class ExecutorAgent:
@@ -18,16 +18,21 @@ class ExecutorAgent:
18
  )
19
 
20
  def call_executor_agent(self, state: PlanningState):
21
- current_step = state.plan_step
22
- plan_step: PlanStep = state.plan.steps[current_step]
23
- return {
24
- "plan_step": current_step + 1,
25
- "messages": [
 
26
  HumanMessage(
27
- content=f"The {plan_step.agent} agent should perform the following action:\n{plan_step.description}"
28
  )
29
- ],
30
- "current_step": plan_step,
 
 
 
 
31
  }
32
 
33
  async def acall_executor_agent(self, state: PlanningState):
@@ -36,10 +41,12 @@ class ExecutorAgent:
36
  messages = []
37
  if len(state.plan.steps) > plan_step_index:
38
  current_step = state.plan.steps[plan_step_index]
39
- messages = [HumanMessage(
 
40
  content=f"The {current_step.agent} agent should perform the following action:\n{current_step.description}"
41
- )]
42
-
 
43
  return {
44
  "plan_step": plan_step_index + 1,
45
  "messages": messages,
 
5
  from langchain_openai import ChatOpenAI
6
  from langchain_core.messages import HumanMessage
7
 
 
8
  from pmcp.models.state import PlanningState
9
+ from loguru import logger
10
 
11
 
12
  class ExecutorAgent:
 
18
  )
19
 
20
  def call_executor_agent(self, state: PlanningState):
21
+ plan_step_index = state.plan_step
22
+ current_step = None
23
+ messages = []
24
+ if len(state.plan.steps) > plan_step_index:
25
+ current_step = state.plan.steps[plan_step_index]
26
+ messages = [
27
  HumanMessage(
28
+ content=f"The {current_step.agent} agent should perform the following action:\n{current_step.description}"
29
  )
30
+ ]
31
+ logger.info(f"The Executor is executing step: {current_step}")
32
+ return {
33
+ "plan_step": plan_step_index + 1,
34
+ "messages": messages,
35
+ "current_step": current_step,
36
  }
37
 
38
  async def acall_executor_agent(self, state: PlanningState):
 
41
  messages = []
42
  if len(state.plan.steps) > plan_step_index:
43
  current_step = state.plan.steps[plan_step_index]
44
+ messages = [
45
+ HumanMessage(
46
  content=f"The {current_step.agent} agent should perform the following action:\n{current_step.description}"
47
+ )
48
+ ]
49
+ logger.info(f"The Executor is executing step: {current_step}")
50
  return {
51
  "plan_step": plan_step_index + 1,
52
  "messages": messages,
pmcp/agents/github_agent.py CHANGED
@@ -2,10 +2,10 @@ from typing import List, Optional
2
 
3
  from pmcp.agents.agent_base import AgentBlueprint
4
  from langchain_core.tools import BaseTool
5
- from langchain_core.messages import SystemMessage
6
  from langchain_openai import ChatOpenAI
7
 
8
  from pmcp.models.state import PlanningState
 
9
 
10
  SYSTEM_PROMPT = """
11
  You are an assistant that can manage Trello boards and projects.
@@ -24,13 +24,11 @@ class GithubAgent:
24
  )
25
 
26
  def call_github_agent(self, state: PlanningState):
27
- response = self.agent.call_agent(
28
- [SystemMessage(content=self.agent.system_prompt)] + state.messages
29
- )
30
  return {"messages": [response]}
31
 
32
  async def acall_github_agent(self, state: PlanningState):
33
- response = await self.agent.acall_agent(
34
- [SystemMessage(content=self.agent.system_prompt)] + state.messages
35
- )
36
  return {"messages": [response]}
 
2
 
3
  from pmcp.agents.agent_base import AgentBlueprint
4
  from langchain_core.tools import BaseTool
 
5
  from langchain_openai import ChatOpenAI
6
 
7
  from pmcp.models.state import PlanningState
8
+ from loguru import logger
9
 
10
  SYSTEM_PROMPT = """
11
  You are an assistant that can manage Trello boards and projects.
 
24
  )
25
 
26
  def call_github_agent(self, state: PlanningState):
27
+ logger.info("Calling Github agent...")
28
+ response = self.agent.call_agent(state.messages)
 
29
  return {"messages": [response]}
30
 
31
  async def acall_github_agent(self, state: PlanningState):
32
+ logger.info("Calling Github agent...")
33
+ response = await self.agent.acall_agent(state.messages)
 
34
  return {"messages": [response]}
pmcp/agents/planner.py CHANGED
@@ -2,11 +2,11 @@ from typing import List, Optional
2
 
3
  from pmcp.agents.agent_base import AgentBlueprint
4
  from langchain_core.tools import BaseTool
5
- from langchain_core.messages import SystemMessage
6
  from langchain_openai import ChatOpenAI
7
 
8
  from pmcp.models.plan import Plan
9
  from pmcp.models.state import PlanningState
 
10
 
11
  SYSTEM_PROMPT = """
12
  You are a Planner Agent responsible for breaking down high-level project goals into clear, actionable steps. You do not execute tasks yourself — instead, you delegate them to two specialized agents:
@@ -38,15 +38,19 @@ class PlannerAgent:
38
  )
39
 
40
  def call_planner_agent(self, state: PlanningState):
 
41
  response = self.agent.call_agent_structured(
42
- messages=[SystemMessage(content=self.agent.system_prompt)] + state.messages,
43
  clazz=Plan,
44
  )
 
45
  return {"plan": response, "plan_step": 0, "current_step": None}
46
 
47
  async def acall_planner_agent(self, state: PlanningState):
 
48
  response = await self.agent.acall_agent_structured(
49
- messages=[SystemMessage(content=self.agent.system_prompt)] + state.messages,
50
  clazz=Plan,
51
  )
 
52
  return {"plan": response, "plan_step": 0, "current_step": None}
 
2
 
3
  from pmcp.agents.agent_base import AgentBlueprint
4
  from langchain_core.tools import BaseTool
 
5
  from langchain_openai import ChatOpenAI
6
 
7
  from pmcp.models.plan import Plan
8
  from pmcp.models.state import PlanningState
9
+ from loguru import logger
10
 
11
  SYSTEM_PROMPT = """
12
  You are a Planner Agent responsible for breaking down high-level project goals into clear, actionable steps. You do not execute tasks yourself — instead, you delegate them to two specialized agents:
 
38
  )
39
 
40
  def call_planner_agent(self, state: PlanningState):
41
+ logger.info("Calling Planner agent...")
42
  response = self.agent.call_agent_structured(
43
+ messages=state.messages,
44
  clazz=Plan,
45
  )
46
+ logger.info(f"Building plan: {response}")
47
  return {"plan": response, "plan_step": 0, "current_step": None}
48
 
49
  async def acall_planner_agent(self, state: PlanningState):
50
+ logger.info("Calling Planner agent...")
51
  response = await self.agent.acall_agent_structured(
52
+ messages=state.messages,
53
  clazz=Plan,
54
  )
55
+ logger.info(f"Building plan: {response}")
56
  return {"plan": response, "plan_step": 0, "current_step": None}
pmcp/agents/trello_agent.py CHANGED
@@ -2,10 +2,11 @@ from typing import List
2
 
3
  from pmcp.agents.agent_base import AgentBlueprint
4
  from langchain_core.tools import BaseTool
5
- from langchain_core.messages import SystemMessage
6
  from langchain_openai import ChatOpenAI
7
 
8
  from pmcp.models.state import PlanningState
 
 
9
 
10
  SYSTEM_PROMPT = """
11
  You are an assistant that can manage Trello boards and projects.
@@ -24,13 +25,11 @@ class TrelloAgent:
24
  )
25
 
26
  def call_trello_agent(self, state: PlanningState):
27
- response = self.agent.call_agent(
28
- [SystemMessage(content=self.agent.system_prompt)] + state.messages
29
- )
30
  return {"messages": [response]}
31
 
32
  async def acall_trello_agent(self, state: PlanningState):
33
- response = await self.agent.acall_agent(
34
- [SystemMessage(content=self.agent.system_prompt)] + state.messages
35
- )
36
  return {"messages": [response]}
 
2
 
3
  from pmcp.agents.agent_base import AgentBlueprint
4
  from langchain_core.tools import BaseTool
 
5
  from langchain_openai import ChatOpenAI
6
 
7
  from pmcp.models.state import PlanningState
8
+ from loguru import logger
9
+
10
 
11
  SYSTEM_PROMPT = """
12
  You are an assistant that can manage Trello boards and projects.
 
25
  )
26
 
27
  def call_trello_agent(self, state: PlanningState):
28
+ logger.info("Calling Trello Agent...")
29
+ response = self.agent.call_agent(state.messages)
 
30
  return {"messages": [response]}
31
 
32
  async def acall_trello_agent(self, state: PlanningState):
33
+ logger.info("Calling Trello Agent...")
34
+ response = await self.agent.acall_agent(state.messages)
 
35
  return {"messages": [response]}
pmcp/nodes/human_interrupt_node.py CHANGED
@@ -2,9 +2,10 @@ from typing import List, Optional
2
 
3
  from pmcp.agents.agent_base import AgentBlueprint
4
  from langchain_core.tools import BaseTool
5
- from langchain_core.messages import SystemMessage, AIMessage
6
  from langchain_openai import ChatOpenAI
7
  from langgraph.types import Command, interrupt
 
8
 
9
  from pmcp.models.state import PlanningState
10
 
@@ -27,28 +28,31 @@ class HumanInterruptNode:
27
  )
28
 
29
  def call_human_interrupt_agent(self, state: PlanningState):
30
- last_message = state.messages[-1]
31
-
32
- #TODO: chiedere a Gerlax lo strumento
33
  try:
34
  tool_call = last_message.tool_calls[-1]
35
  except Exception:
36
- last_message = state.messages[-2]
 
 
37
  tool_call = last_message.tool_calls[-1]
38
 
39
-
40
-
41
  if tool_call.get("name", "").startswith("get_"):
42
- return Command(goto="tool")
43
-
44
  response = self.agent.call_agent(
45
- messages=[SystemMessage(content=self.agent.system_prompt), AIMessage(content= f"Tool Calling details: {str(tool_call)}")] + state.messages,
 
 
 
46
  )
47
  human_review = interrupt(response.content)
48
 
49
  confirm_action = human_review.confirm_action
50
  changes_description = human_review.changes_description
51
-
52
  if confirm_action:
53
  return Command(goto="tool")
54
 
 
2
 
3
  from pmcp.agents.agent_base import AgentBlueprint
4
  from langchain_core.tools import BaseTool
5
+ from langchain_core.messages import AIMessage
6
  from langchain_openai import ChatOpenAI
7
  from langgraph.types import Command, interrupt
8
+ from langchain_core.messages.utils import filter_messages
9
 
10
  from pmcp.models.state import PlanningState
11
 
 
28
  )
29
 
30
  def call_human_interrupt_agent(self, state: PlanningState):
31
+ last_message = filter_messages(state.messages, include_types=[AIMessage])[-1]
32
+
33
+ # TODO: chiedere a Gerlax lo strumento
34
  try:
35
  tool_call = last_message.tool_calls[-1]
36
  except Exception:
37
+ last_message = filter_messages(state.messages, include_types=[AIMessage])[
38
+ -2
39
+ ]
40
  tool_call = last_message.tool_calls[-1]
41
 
 
 
42
  if tool_call.get("name", "").startswith("get_"):
43
+ return Command(goto="tool")
44
+
45
  response = self.agent.call_agent(
46
+ messages=[
47
+ AIMessage(content=f"Tool Calling details: {str(tool_call)}"),
48
+ ]
49
+ + state.messages,
50
  )
51
  human_review = interrupt(response.content)
52
 
53
  confirm_action = human_review.confirm_action
54
  changes_description = human_review.changes_description
55
+
56
  if confirm_action:
57
  return Command(goto="tool")
58
 
pmcp/nodes/human_resume_node.py CHANGED
@@ -2,11 +2,12 @@ from typing import List, Optional
2
 
3
  from pmcp.agents.agent_base import AgentBlueprint
4
  from langchain_core.tools import BaseTool
5
- from langchain_core.messages import SystemMessage, HumanMessage
6
  from langchain_openai import ChatOpenAI
7
  from langgraph.types import Command
8
 
9
  from pmcp.models.resume_trigger import ResumeTrigger
 
10
 
11
 
12
  SYSTEM_PROMPT = """
@@ -29,8 +30,11 @@ class HumanResumeNode:
29
  )
30
 
31
  def call_human_interrupt_agent(self, user_message: str):
 
32
  response = self.agent.call_agent_structured(
33
- [SystemMessage(content=self.agent.system_prompt), HumanMessage(content= user_message)],
 
 
34
  clazz=ResumeTrigger,
35
  )
36
 
 
2
 
3
  from pmcp.agents.agent_base import AgentBlueprint
4
  from langchain_core.tools import BaseTool
5
+ from langchain_core.messages import HumanMessage
6
  from langchain_openai import ChatOpenAI
7
  from langgraph.types import Command
8
 
9
  from pmcp.models.resume_trigger import ResumeTrigger
10
+ from loguru import logger
11
 
12
 
13
  SYSTEM_PROMPT = """
 
30
  )
31
 
32
  def call_human_interrupt_agent(self, user_message: str):
33
+ logger.info("Human resumer agent...")
34
  response = self.agent.call_agent_structured(
35
+ [
36
+ HumanMessage(content=user_message),
37
+ ],
38
  clazz=ResumeTrigger,
39
  )
40