File size: 1,982 Bytes
0235be8 cb09459 0235be8 cb09459 0235be8 cb09459 0235be8 cb09459 0235be8 36383cc cb09459 36383cc cb09459 0235be8 36383cc 0235be8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
from typing import List, Optional
from pmcp.agents.agent_base import AgentBlueprint
from langchain_core.tools import BaseTool
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage
from pmcp.models.state import PlanningState
from loguru import logger
class ExecutorAgent:
def __init__(self, llm: ChatOpenAI, tools: Optional[List[BaseTool]] = None):
self.agent = AgentBlueprint(
agent_name="EXECUTOR_AGENT",
description="The agent that executes all the steps for the planner",
llm=llm,
)
def call_executor_agent(self, state: PlanningState):
plan_step_index = state.plan_step
current_step = None
messages = []
if len(state.plan.steps) > plan_step_index:
current_step = state.plan.steps[plan_step_index]
messages = [
HumanMessage(
content=f"The {current_step.agent} agent should perform the following action:\n{current_step.description}"
)
]
logger.info(f"The Executor is executing step: {current_step}")
return {
"plan_step": plan_step_index + 1,
"messages": messages,
"current_step": current_step,
}
async def acall_executor_agent(self, state: PlanningState):
plan_step_index = state.plan_step
current_step = None
messages = []
if len(state.plan.steps) > plan_step_index:
current_step = state.plan.steps[plan_step_index]
messages = [
HumanMessage(
content=f"The {current_step.agent} agent should perform the following action:\n{current_step.description}"
)
]
logger.info(f"The Executor is executing step: {current_step}")
return {
"plan_step": plan_step_index + 1,
"messages": messages,
"current_step": current_step,
}
|