avfranco commited on
Commit
4a6af9d
·
1 Parent(s): 83bc7c7

ea4all-mcp-lgs-sync-UAT-passed

Browse files
ea4all/ea4all_mcp.py CHANGED
@@ -8,8 +8,6 @@
8
  ## APPLY latest version of LGS Agentic AI - tbs
9
 
10
  from langchain.callbacks.tracers import LangChainTracer
11
- from langchain.callbacks.tracers.langchain import wait_for_all_tracers
12
- from langchain_core.messages import HumanMessage
13
  from langchain_core.runnables import RunnableConfig
14
 
15
  from ea4all.src.shared.configuration import BaseConfiguration, APM_MOCK_QNA, PMO_MOCK_QNA
@@ -23,7 +21,6 @@ from ea4all.src.shared.utils import (
23
  get_vqa_examples,
24
  _join_paths,
25
  EA4ALL_ARCHITECTURE,
26
- EA4ALL_PODCAST,
27
  )
28
 
29
  #from ea4all.src.pmo_crew.crew_runner import run_pmo_crew
@@ -78,6 +75,8 @@ async def run_qna_agentic_system(question: str) -> AsyncGenerator[list, None]:
78
  Returns:
79
  reponse: Response to user's architectural question.
80
  """
 
 
81
 
82
  format_response = ""
83
  chat_memory = []
@@ -86,7 +85,11 @@ async def run_qna_agentic_system(question: str) -> AsyncGenerator[list, None]:
86
  chat_memory.append(ChatMessage(role="assistant", content=format_response))
87
  else:
88
  index = await call_indexer_apm(config) #call indexer to update the index
89
- response = await apm_graph.ainvoke({"question": question}, config=config)
 
 
 
 
90
  chat_memory.append(ChatMessage(role="assistant", content=response['generation']))
91
 
92
  yield chat_memory
@@ -102,6 +105,8 @@ async def run_vqa_agentic_system(question: str, diagram: str, request: gr.Reques
102
  Returns:
103
  response: Response to user's question.
104
  """
 
 
105
 
106
  #capture user ip
107
  #ea4all_user = e4u.get_user_identification(request)
@@ -122,7 +127,7 @@ async def run_vqa_agentic_system(question: str, diagram: str, request: gr.Reques
122
  chat_memory.append(ChatMessage(role="assistant", content="Please upload an Architecture PNG, JPEG diagram to start!"))
123
  yield chat_memory
124
  else:
125
- diagram = message['files'][-1] ##chat_memory[-1]['content'][-1]
126
  msg = message['text'] ##chat_memory[-2]['content']
127
  print(f"---DIAGRAM: {diagram}---")
128
  try:
@@ -133,11 +138,15 @@ async def run_vqa_agentic_system(question: str, diagram: str, request: gr.Reques
133
  if diagram_.format not in allowed_file_types:
134
  #chat_memory.append(ChatMessage(role="assistant", content="Invalid file type. Allowed file types are JPEG and PNG."))
135
  print(f"---DIAGRAM: {diagram.format} is not a valid file type. Allowed file types are JPEG and PNG.---")
136
- #else:
137
- #'vqa_image = e4u.get_raw_image(diagram) #MOVED into Graph
138
 
139
  vqa_image = diagram
140
- response = await diagram_graph.ainvoke({"question":msg, "image": vqa_image}, config)
 
 
 
 
 
 
141
  chat_memory.append(ChatMessage(role="assistant", content=response['messages'][-1].content if len(response['messages']) else response['safety_status']['description']))
142
 
143
  yield chat_memory
@@ -157,10 +166,10 @@ async def run_reference_architecture_agentic_system(business_query: str) -> Asyn
157
  """
158
 
159
  if len(business_query) < 20:
160
- agent_response = "Please provide a valid Business Requirement content to start!"
161
  yield([agent_response, None])
162
  else:
163
- inputs = {"business_query": [{"role": "user", "content": business_query}]} #user response
164
  index = await call_indexer_apm(config) #call indexer to update the index
165
  response = await togaf_graph.ainvoke(
166
  input=inputs,
 
8
  ## APPLY latest version of LGS Agentic AI - tbs
9
 
10
  from langchain.callbacks.tracers import LangChainTracer
 
 
11
  from langchain_core.runnables import RunnableConfig
12
 
13
  from ea4all.src.shared.configuration import BaseConfiguration, APM_MOCK_QNA, PMO_MOCK_QNA
 
21
  get_vqa_examples,
22
  _join_paths,
23
  EA4ALL_ARCHITECTURE,
 
24
  )
25
 
26
  #from ea4all.src.pmo_crew.crew_runner import run_pmo_crew
 
75
  Returns:
76
  reponse: Response to user's architectural question.
77
  """
78
+ from ea4all.src.ea4all_vqa.state import InputState
79
+ from langchain_core.messages import HumanMessage
80
 
81
  format_response = ""
82
  chat_memory = []
 
85
  chat_memory.append(ChatMessage(role="assistant", content=format_response))
86
  else:
87
  index = await call_indexer_apm(config) #call indexer to update the index
88
+
89
+ input_data = InputState(
90
+ messages=[HumanMessage(content=question)]
91
+ )
92
+ response = await apm_graph.ainvoke(input=input_data, config=config)
93
  chat_memory.append(ChatMessage(role="assistant", content=response['generation']))
94
 
95
  yield chat_memory
 
105
  Returns:
106
  response: Response to user's question.
107
  """
108
+ from ea4all.src.ea4all_vqa.state import InputState
109
+ from langchain_core.messages import HumanMessage
110
 
111
  #capture user ip
112
  #ea4all_user = e4u.get_user_identification(request)
 
127
  chat_memory.append(ChatMessage(role="assistant", content="Please upload an Architecture PNG, JPEG diagram to start!"))
128
  yield chat_memory
129
  else:
130
+ diagram = str(message['files'][-1]) ##chat_memory[-1]['content'][-1]
131
  msg = message['text'] ##chat_memory[-2]['content']
132
  print(f"---DIAGRAM: {diagram}---")
133
  try:
 
138
  if diagram_.format not in allowed_file_types:
139
  #chat_memory.append(ChatMessage(role="assistant", content="Invalid file type. Allowed file types are JPEG and PNG."))
140
  print(f"---DIAGRAM: {diagram.format} is not a valid file type. Allowed file types are JPEG and PNG.---")
 
 
141
 
142
  vqa_image = diagram
143
+ # Ensure input matches expected type for ainvoke
144
+ input_data = InputState(
145
+ messages=[HumanMessage(content=msg)],
146
+ image=vqa_image
147
+ )
148
+ response = await diagram_graph.ainvoke(input=input_data, config=config)
149
+
150
  chat_memory.append(ChatMessage(role="assistant", content=response['messages'][-1].content if len(response['messages']) else response['safety_status']['description']))
151
 
152
  yield chat_memory
 
166
  """
167
 
168
  if len(business_query) < 20:
169
+ agent_response = "Please provide a valid Business Requirement to start!"
170
  yield([agent_response, None])
171
  else:
172
+ inputs = {"messages": [{"role": "user", "content": business_query}]} #user response
173
  index = await call_indexer_apm(config) #call indexer to update the index
174
  response = await togaf_graph.ainvoke(
175
  input=inputs,
ea4all/src/ea4all_apm/graph.py CHANGED
@@ -13,6 +13,7 @@ about an IT Landscape or Websearch.
13
  import os
14
 
15
  from langgraph.graph import END, StateGraph
 
16
 
17
  #core libraries
18
  from langchain_core.runnables import RunnableConfig
@@ -30,7 +31,7 @@ from langchain.load import dumps, loads
30
  from langchain.hub import pull
31
 
32
  from operator import itemgetter
33
- from typing import AsyncGenerator, AsyncIterator
34
 
35
  #compute amount of tokens used
36
  import tiktoken
@@ -46,6 +47,7 @@ from ea4all.src.shared.utils import (
46
  extract_structured_output,
47
  extract_topic_from_business_input,
48
  _join_paths,
 
49
  )
50
  from ea4all.src.shared import vectorstore
51
  from ea4all.src.tools.tools import (
@@ -70,7 +72,7 @@ async def retrieve_documents(
70
  dict[str, list[Document]]: A dictionary with a 'documents' key containing the list of retrieved documents.
71
  """
72
  with vectorstore.make_retriever(config) as retriever:
73
- response = await retriever.ainvoke(state.question, config)
74
  return {"messages": response}
75
 
76
  async def apm_retriever(config: RunnableConfig):
@@ -82,7 +84,11 @@ async def apm_retriever(config: RunnableConfig):
82
  # Few Shot Examples
83
  few_shot_step_back_examples = [
84
  {
85
- "input": "Who can I talk to about innovation?",
 
 
 
 
86
  "output": '{"datasource": "vectorstore, "topic":"who can I talk to"}"}',
87
  },
88
  {
@@ -351,7 +357,7 @@ def user_query_rephrasing(
351
  state: OverallState, _prompt=None, *, config: RunnableConfig
352
  ) -> dict[str,str]:
353
 
354
- question = getattr(state,'question')
355
 
356
  configuration = AgentConfiguration.from_runnable_config(config)
357
  # 'model = load_chat_model(configuration.query_model)
@@ -373,14 +379,10 @@ def user_query_rephrasing(
373
  try:
374
  question = result['rephrased']
375
  except Exception:
376
- question = state.question
377
 
378
  return {"question": question}
379
 
380
- # Post-processing
381
- def format_docs(docs):
382
- return "\n".join(doc.page_content for doc in docs)
383
-
384
  def identify_task_category(
385
  question,chat_memory,config: RunnableConfig
386
  ):
@@ -466,9 +468,9 @@ async def grade_documents(state, config: RunnableConfig):
466
  configuration = AgentConfiguration.from_runnable_config(config)
467
 
468
  print("---CHECK DOCUMENT RELEVANCE TO QUESTION---")
469
- question = state.question
470
- documents = state.messages
471
- source = state.source
472
  llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
473
 
474
  # Score each doc
@@ -504,9 +506,9 @@ def decide_to_generate(state):
504
  """
505
 
506
  print("---ASSESS GRADED DOCUMENTS---")
507
- state.question
508
- source = state.source
509
- getattr(state,'documents')
510
 
511
  if source == "websearch":
512
  # All documents have been filtered check_relevance
@@ -534,12 +536,12 @@ def grade_generation_v_documents_and_question(
534
 
535
  configuration = AgentConfiguration.from_runnable_config(config)
536
 
537
- question = getattr(state,'question')
538
- documents = getattr(state,'messages')
539
- generation = getattr(state,'generation')
540
  llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
541
 
542
- if getattr(state,'source') == "websearch":
543
  #print("---CHECK HALLUCINATIONS---")
544
  hallucination_grader_instance = hallucination_grader(llm)
545
  #for output in hallucination_grader_instance.stream(
@@ -622,7 +624,7 @@ async def retrieve(
622
  configuration = AgentConfiguration.from_runnable_config(config)
623
 
624
  #print("---RETRIEVE---")
625
- question = getattr(state,'question')
626
 
627
  llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
628
 
@@ -646,7 +648,7 @@ async def retrieve(
646
  }
647
 
648
  # Retrieval
649
- rag_input = int(getattr(state,'rag'))
650
  retrieval_chain = await get_retrieval_chain(rag_input,"ea4all_agent",question,retriever, config=config)
651
 
652
  retrieved_documents = {
@@ -659,15 +661,15 @@ async def retrieve(
659
 
660
  documents = await final_chain.ainvoke({"user_question": question, "chat_memory":[]})
661
 
662
- return {"messages": format_docs(documents['cdocs']), "question": question, "rag":getattr(state,'rag')}
663
 
664
  ### Edges ###
665
  def route_to_node(state:OverallState):
666
 
667
- if state.source == "websearch":
668
  #print("---ROUTE QUESTION TO WEB SEARCH---")
669
  return "websearch"
670
- elif state.source == "vectorstore":
671
  #print("---ROUTE QUESTION TO RAG---")
672
  return "vectorstore"
673
 
@@ -701,10 +703,10 @@ async def stream_generation(
701
  source = None
702
  chat_memory = None
703
  async for s in state:
704
- documents = getattr(s,"messages")
705
- question = getattr(s,"question")
706
- source = getattr(s,"source")
707
- chat_memory = getattr(s,"chat_memory")
708
 
709
  # Prompt Web Search generation
710
  if source == "websearch":
@@ -743,14 +745,15 @@ async def generate(
743
  """
744
  #print("---GENERATE---")
745
 
746
- #documents = getattr(state,'messages')[-1].content #documents
747
- source = getattr(state,'source')
748
- #question = getattr(state,'question')
 
749
 
750
  ##Triggered by hallucination_grade? 2025-02-21 - NOT USER being edged to END atm
751
  #2025-02-21: it's being triggered by super_graph supervisor as well - need to review as calling web_search twice
752
- #if getattr(state,'generation') is None:
753
- # if getattr(state,'web_search') == "Yes":
754
  # await websearch(state, config)
755
  # else:
756
  # state.rag = "1"
@@ -772,8 +775,8 @@ async def apm_agentic_qna(
772
 
773
  configuration = AgentConfiguration.from_runnable_config(config)
774
 
775
- question = getattr(state,'question')
776
- chat_memory = getattr(state,'chat_memory')
777
 
778
  llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
779
 
@@ -821,16 +824,16 @@ async def final(state: OverallState):
821
  return {"safety_status": state}
822
 
823
  async def choose_next(state: OverallState):
824
- if state.safety_status is not None and len(state.safety_status) > 0 and state.safety_status[0] == 'no':
 
825
  return "exit"
826
  else:
827
  return "route"
828
 
829
  class SafetyCheck:
830
- def apm_safety_check(self,state: OverallState, config: RunnableConfig):
831
 
832
  configuration = AgentConfiguration.from_runnable_config(config)
833
- question = state.question
834
 
835
  safety_prompt = pull('learn-it-all-do-it-all/ea4all_apm_safety_check')
836
 
@@ -853,15 +856,17 @@ class SafetyCheck:
853
  self._safety_run = self.apm_safety_check
854
 
855
  def __call__(self, state: OverallState, config: RunnableConfig) -> dict[str, list]:
 
 
856
  try:
857
- response = getattr(self, '_safety_run')(state, config)
858
- return {"safety_status": [response['safety_status'][0], "", state.question]}
859
  except Exception as e:
860
- return {"safety_status": ['no', e, state.question]}
861
 
862
  ##BUILD APM Graph
863
  # Build graph
864
- workflow = StateGraph(OverallState, input=InputState, output=OutputState, config_schema=AgentConfiguration)
865
 
866
  # Define the nodes
867
  workflow.add_node("safety_check",SafetyCheck())
@@ -903,4 +908,4 @@ workflow.add_edge("final", END)
903
 
904
  # Compile
905
  apm_graph = workflow.compile()
906
- apm_graph.name = "APMGraph"
 
13
  import os
14
 
15
  from langgraph.graph import END, StateGraph
16
+ from langgraph.types import Command
17
 
18
  #core libraries
19
  from langchain_core.runnables import RunnableConfig
 
31
  from langchain.hub import pull
32
 
33
  from operator import itemgetter
34
+ from typing import AsyncGenerator
35
 
36
  #compute amount of tokens used
37
  import tiktoken
 
47
  extract_structured_output,
48
  extract_topic_from_business_input,
49
  _join_paths,
50
+ format_docs,
51
  )
52
  from ea4all.src.shared import vectorstore
53
  from ea4all.src.tools.tools import (
 
72
  dict[str, list[Document]]: A dictionary with a 'documents' key containing the list of retrieved documents.
73
  """
74
  with vectorstore.make_retriever(config) as retriever:
75
+ response = await retriever.ainvoke(state.get('question'), config)
76
  return {"messages": response}
77
 
78
  async def apm_retriever(config: RunnableConfig):
 
84
  # Few Shot Examples
85
  few_shot_step_back_examples = [
86
  {
87
+ "input": "Who can I talk about innovation?",
88
+ "output": '{"datasource": "vectorstore, "topic":"who can I talk to"}"}',
89
+ },
90
+ {
91
+ "input": "Who's the architect responsible for AI landscape?",
92
  "output": '{"datasource": "vectorstore, "topic":"who can I talk to"}"}',
93
  },
94
  {
 
357
  state: OverallState, _prompt=None, *, config: RunnableConfig
358
  ) -> dict[str,str]:
359
 
360
+ question = state.get('question')
361
 
362
  configuration = AgentConfiguration.from_runnable_config(config)
363
  # 'model = load_chat_model(configuration.query_model)
 
379
  try:
380
  question = result['rephrased']
381
  except Exception:
382
+ question = state.get('question')
383
 
384
  return {"question": question}
385
 
 
 
 
 
386
  def identify_task_category(
387
  question,chat_memory,config: RunnableConfig
388
  ):
 
468
  configuration = AgentConfiguration.from_runnable_config(config)
469
 
470
  print("---CHECK DOCUMENT RELEVANCE TO QUESTION---")
471
+ question = state.get('question')
472
+ documents = state.get('messages')
473
+ source = state.get('source')
474
  llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
475
 
476
  # Score each doc
 
506
  """
507
 
508
  print("---ASSESS GRADED DOCUMENTS---")
509
+ state.get('question')
510
+ source = state.get('source')
511
+ state.get('documents')
512
 
513
  if source == "websearch":
514
  # All documents have been filtered check_relevance
 
536
 
537
  configuration = AgentConfiguration.from_runnable_config(config)
538
 
539
+ question = state.get('question')
540
+ documents = state.get('messages')
541
+ generation = state.get('generation')
542
  llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
543
 
544
+ if state.get('source') == "websearch":
545
  #print("---CHECK HALLUCINATIONS---")
546
  hallucination_grader_instance = hallucination_grader(llm)
547
  #for output in hallucination_grader_instance.stream(
 
624
  configuration = AgentConfiguration.from_runnable_config(config)
625
 
626
  #print("---RETRIEVE---")
627
+ question = state.get('question')
628
 
629
  llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
630
 
 
648
  }
649
 
650
  # Retrieval
651
+ rag_input = state.get('rag',1)
652
  retrieval_chain = await get_retrieval_chain(rag_input,"ea4all_agent",question,retriever, config=config)
653
 
654
  retrieved_documents = {
 
661
 
662
  documents = await final_chain.ainvoke({"user_question": question, "chat_memory":[]})
663
 
664
+ return {"messages": format_docs(documents['cdocs']), "question": question, "rag":state.get('rag')}
665
 
666
  ### Edges ###
667
  def route_to_node(state:OverallState):
668
 
669
+ if state.get('source') == "websearch":
670
  #print("---ROUTE QUESTION TO WEB SEARCH---")
671
  return "websearch"
672
+ elif state.get('source') == "vectorstore":
673
  #print("---ROUTE QUESTION TO RAG---")
674
  return "vectorstore"
675
 
 
703
  source = None
704
  chat_memory = None
705
  async for s in state:
706
+ documents = s.get("messages")
707
+ question = s.get("question")
708
+ source = s.get("source")
709
+ chat_memory = s.get("chat_memory")
710
 
711
  # Prompt Web Search generation
712
  if source == "websearch":
 
745
  """
746
  #print("---GENERATE---")
747
 
748
+ #documents = state.get('messages')[-1].content #documents
749
+ #source = state.get('source')
750
+ #question = state.get('question')
751
+ source = state.get('source')
752
 
753
  ##Triggered by hallucination_grade? 2025-02-21 - NOT USER being edged to END atm
754
  #2025-02-21: it's being triggered by super_graph supervisor as well - need to review as calling web_search twice
755
+ #if state.get('generation') is None:
756
+ # if state.get('web_search') == "Yes":
757
  # await websearch(state, config)
758
  # else:
759
  # state.rag = "1"
 
775
 
776
  configuration = AgentConfiguration.from_runnable_config(config)
777
 
778
+ question = state.get('question')
779
+ chat_memory = state.get('chat_memory')
780
 
781
  llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
782
 
 
824
  return {"safety_status": state}
825
 
826
  async def choose_next(state: OverallState):
827
+ # Removed and len(state.get('safety_status')) > 0
828
+ if state.get('safety_status') is not None and state.get('safety_status'[0]) == 'no':
829
  return "exit"
830
  else:
831
  return "route"
832
 
833
  class SafetyCheck:
834
+ def apm_safety_check(self, question: str, config: RunnableConfig):
835
 
836
  configuration = AgentConfiguration.from_runnable_config(config)
 
837
 
838
  safety_prompt = pull('learn-it-all-do-it-all/ea4all_apm_safety_check')
839
 
 
856
  self._safety_run = self.apm_safety_check
857
 
858
  def __call__(self, state: OverallState, config: RunnableConfig) -> dict[str, list]:
859
+ question = state['messages'][-1].content
860
+
861
  try:
862
+ response = getattr(self, '_safety_run')(question, config)
863
+ return {"safety_status": [response['safety_status'][0], "", question], "question": [question]}
864
  except Exception as e:
865
+ return {"safety_status": ['no', e, question]}
866
 
867
  ##BUILD APM Graph
868
  # Build graph
869
+ workflow = StateGraph(state_schema=OverallState, input_schema=InputState, output_schema=OutputState, config_schema=AgentConfiguration)
870
 
871
  # Define the nodes
872
  workflow.add_node("safety_check",SafetyCheck())
 
908
 
909
  # Compile
910
  apm_graph = workflow.compile()
911
+ apm_graph.name = "ea4all_talk_to_your_landscape"
ea4all/src/ea4all_apm/state.py CHANGED
@@ -4,9 +4,9 @@ This module defines the state structures used in the APM graph. It includes
4
  definitions for agent state, input state, and router classification schema.
5
  """
6
 
7
- from dataclasses import dataclass, field
8
  from typing import Optional, Literal, List, Tuple
9
  from typing_extensions import TypedDict
 
10
 
11
  class Router(TypedDict):
12
  """Classify a user query."""
@@ -16,8 +16,7 @@ class Router(TypedDict):
16
  # Optional, the InputState is a restricted version of the State that is used to
17
  # define a narrower interface to the outside world vs. what is maintained
18
  # internally.
19
- @dataclass(kw_only=True)
20
- class InputState:
21
  """Represents the input state for the agent.
22
 
23
  This class defines the structure of the input state, which includes
@@ -27,20 +26,17 @@ class InputState:
27
  """
28
 
29
  """Attributes:
30
- question: user question
31
  """
32
- question: str
33
 
34
- @dataclass(kw_only=True)
35
- class OutputState:
36
  """Represents the output schema for the APM agent."""
37
  question: str
38
- messages: Optional[List[str]] = None
39
- generation: Optional[str] = None
40
- source: Optional[str] = None
41
  """Answer to user's Architecture IT Landscape question about ."""
42
 
43
- @dataclass(kw_only=True)
44
  class OverallState(InputState, OutputState):
45
  """State of the APM graph / agent."""
46
 
@@ -52,8 +48,8 @@ class OverallState(InputState, OutputState):
52
  rag: last RAG approach used
53
  chat_memory: user chat memory
54
  """
55
- safety_status: Optional[Tuple[str, str, str]] = None
56
- router: Optional[Router] = None
57
- rag: Optional[str] = None
58
- chat_memory: Optional[str] = None
59
- retrieved: Optional[List[str]] = None
 
4
  definitions for agent state, input state, and router classification schema.
5
  """
6
 
 
7
  from typing import Optional, Literal, List, Tuple
8
  from typing_extensions import TypedDict
9
+ from langgraph.graph import MessagesState
10
 
11
  class Router(TypedDict):
12
  """Classify a user query."""
 
16
  # Optional, the InputState is a restricted version of the State that is used to
17
  # define a narrower interface to the outside world vs. what is maintained
18
  # internally.
19
+ class InputState(MessagesState):
 
20
  """Represents the input state for the agent.
21
 
22
  This class defines the structure of the input state, which includes
 
26
  """
27
 
28
  """Attributes:
29
+ message: user question
30
  """
31
+ pass
32
 
33
+ class OutputState(TypedDict):
 
34
  """Represents the output schema for the APM agent."""
35
  question: str
36
+ generation: Optional[str]
37
+ source: Optional[str]
 
38
  """Answer to user's Architecture IT Landscape question about ."""
39
 
 
40
  class OverallState(InputState, OutputState):
41
  """State of the APM graph / agent."""
42
 
 
48
  rag: last RAG approach used
49
  chat_memory: user chat memory
50
  """
51
+ safety_status: Optional[Tuple[str, str, str]]
52
+ router: Optional[Router]
53
+ rag: Optional[str]
54
+ chat_memory: Optional[str]
55
+ retrieved: Optional[List[str]]
ea4all/src/ea4all_gra/configuration.py CHANGED
@@ -25,7 +25,7 @@ class AgentConfiguration(BaseConfiguration):
25
  )
26
 
27
  recursion_limit: Annotated[int, {"__template_metadata__": {"kind": "integer"}}] = field(
28
- default=10,
29
  metadata={
30
  "description": "The maximum number of times the agent can recursively call itself."
31
  },
@@ -39,7 +39,7 @@ class AgentConfiguration(BaseConfiguration):
39
  )
40
 
41
  ea4all_ask_human: Annotated[str, {"__template_metadata__": {"kind": "integration"}}] = field(
42
- default="Frontend",
43
  metadata={
44
  "description": "Trigger EA4ALL ask human input via interruption or receive from external frontend."
45
  },
 
25
  )
26
 
27
  recursion_limit: Annotated[int, {"__template_metadata__": {"kind": "integer"}}] = field(
28
+ default=5,
29
  metadata={
30
  "description": "The maximum number of times the agent can recursively call itself."
31
  },
 
39
  )
40
 
41
  ea4all_ask_human: Annotated[str, {"__template_metadata__": {"kind": "integration"}}] = field(
42
+ default="frontend", #"interrupt", #
43
  metadata={
44
  "description": "Trigger EA4ALL ask human input via interruption or receive from external frontend."
45
  },
ea4all/src/ea4all_gra/graph.py CHANGED
@@ -32,20 +32,23 @@ for required libraries and modules.
32
  - Refactored State classes to OverallState, InputState, OutputState
33
  - Task-1, Task-2, Task-3 State classes changed to TypedDicts
34
  - Review what's best content to provide Retrieve with requirements or intent
 
 
 
 
 
 
35
  """
36
 
37
-
38
  #core libraries
39
  from langchain_core.runnables import RunnableConfig
40
  from langchain_core.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
41
  from langchain_core.prompts import ChatPromptTemplate
42
  from langchain_core.messages import (
43
  AIMessage,
44
- SystemMessage,
45
  HumanMessage,
46
  )
47
  from langchain_core.output_parsers import (
48
- PydanticOutputParser,
49
  JsonOutputKeyToolsParser
50
  )
51
  from langgraph.graph import (
@@ -53,7 +56,6 @@ from langgraph.graph import (
53
  StateGraph,
54
  )
55
  from langgraph.types import Command, interrupt
56
- from langgraph.checkpoint.memory import MemorySaver
57
 
58
  from langchain import hub
59
 
@@ -108,7 +110,7 @@ async def togaf_ask_human(state: OverallState, config: RunnableConfig):
108
 
109
  print(f"--- TOGAF AGENTIC team --- got an answer and processing user input: {response}")
110
 
111
- business_query = load_mock_content(response['user_feedback'])
112
  else:
113
  business_query = state.get('business_query')
114
 
@@ -251,7 +253,7 @@ def get_last_message(state: OverallState) -> dict:
251
 
252
  def join_graph(state: OverallState) -> dict:
253
  results = {}
254
- results['messages'] = [state.get('business_query')[-1]]
255
  results['next'] = state.get('next')
256
  if state.get('business_query'):
257
  results['business_query'] = state.get('business_query')
@@ -344,7 +346,9 @@ def return_2user(state:OverallState):
344
 
345
  async def enter_graph(state:dict, config: RunnableConfig) -> dict:
346
 
347
- print(f"--- Entered TOGAF AGENTIC team to --- {state.get('business_query')}") #state.get('business_query')[-1].content
 
 
348
  #if isinstance(state, dict):
349
  # user_feedback = state.get('user_feedback') if state.get('user_feedback') else state['messages'][-1].content
350
  #else:
@@ -352,7 +356,8 @@ async def enter_graph(state:dict, config: RunnableConfig) -> dict:
352
 
353
  #busines_query = load_mock_content(state.get('user_feedback')),
354
 
355
- business_query = state['business_query'][-1]['content']
 
356
 
357
  return {"business_query": business_query}
358
 
@@ -407,4 +412,4 @@ workflow.set_entry_point("enter_graph")
407
 
408
  #memory = MemorySaver()
409
  togaf_graph = workflow.compile() #checkpointer=memory)
410
- togaf_graph.name = "Togaf_reference_architecture_graph"
 
32
  - Refactored State classes to OverallState, InputState, OutputState
33
  - Task-1, Task-2, Task-3 State classes changed to TypedDicts
34
  - Review what's best content to provide Retrieve with requirements or intent
35
+ #20250614
36
+ - ask_human input changed to business_query from user_feedback key
37
+ - enter_graph: (line 358) business_query = state.get('business_query') needs to be reviewed when using w/ Gradio (build & MCP versions)
38
+ - assess_asis: 'Recursion limit of 25 reached without hitting a stop condition'
39
+ #20250615
40
+ - Recursion limit fixed w/ RemainingSteps, https://langchain-ai.github.io/langgraph/how-tos/graph-api/?h=remainingsteps
41
  """
42
 
 
43
  #core libraries
44
  from langchain_core.runnables import RunnableConfig
45
  from langchain_core.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
46
  from langchain_core.prompts import ChatPromptTemplate
47
  from langchain_core.messages import (
48
  AIMessage,
 
49
  HumanMessage,
50
  )
51
  from langchain_core.output_parsers import (
 
52
  JsonOutputKeyToolsParser
53
  )
54
  from langgraph.graph import (
 
56
  StateGraph,
57
  )
58
  from langgraph.types import Command, interrupt
 
59
 
60
  from langchain import hub
61
 
 
110
 
111
  print(f"--- TOGAF AGENTIC team --- got an answer and processing user input: {response}")
112
 
113
+ business_query = load_mock_content(response['business_query'])
114
  else:
115
  business_query = state.get('business_query')
116
 
 
253
 
254
  def join_graph(state: OverallState) -> dict:
255
  results = {}
256
+ #results['messages'] = [state.get('business_query')]
257
  results['next'] = state.get('next')
258
  if state.get('business_query'):
259
  results['business_query'] = state.get('business_query')
 
346
 
347
  async def enter_graph(state:dict, config: RunnableConfig) -> dict:
348
 
349
+ business_query = state['messages'][-1].content if not isinstance(state['messages'][-1].content, List) else state['messages'][-1].content[0]['text']
350
+
351
+ print(f"--- Entered TOGAF AGENTIC team to --- {business_query}") #state.get('business_query')[-1].content
352
  #if isinstance(state, dict):
353
  # user_feedback = state.get('user_feedback') if state.get('user_feedback') else state['messages'][-1].content
354
  #else:
 
356
 
357
  #busines_query = load_mock_content(state.get('user_feedback')),
358
 
359
+ #business_query = state.get('business_query')
360
+ #business_query = state['business_query'][-1]['content']
361
 
362
  return {"business_query": business_query}
363
 
 
412
 
413
  #memory = MemorySaver()
414
  togaf_graph = workflow.compile() #checkpointer=memory)
415
+ togaf_graph.name = "ea4all_architecture_blueprint"
ea4all/src/ea4all_gra/state.py CHANGED
@@ -1,16 +1,15 @@
1
- from pydantic import Field
2
  from typing_extensions import (
3
- Annotated,
4
  TypedDict,
5
  List
6
  )
7
- import operator
8
  from typing import (
9
  Optional,
10
  )
11
  from dataclasses import dataclass, field
12
  from typing import Optional
13
 
 
 
14
  from ea4all.src.ea4all_gra.data import (
15
  ListRequirement,
16
  ListObjective,
@@ -25,8 +24,7 @@ from ea4all.src.ea4all_gra.data import (
25
  # Optional, the InputState is a restricted version of the State that is used to
26
  # define a narrower interface to the outside world vs. what is maintained
27
  # internally.
28
- @dataclass(kw_only=True)
29
- class InputState(TypedDict):
30
  """Represents the input state for the agent.
31
 
32
  This class defines the structure of the input state, which includes
@@ -40,9 +38,9 @@ class InputState(TypedDict):
40
  """
41
  #business_query: Optional[Annotated[List[str], Field(
42
  # description="A business requirement is the starting point of the TOGAF process."), operator.add]]
43
- business_query: str
 
44
 
45
- @dataclass(kw_only=True)
46
  class OutputState(TypedDict):
47
  """Represents te output state for the agent."""
48
  vision_target: Optional[str]
@@ -72,6 +70,7 @@ class OverallState(InputState, OutputState):
72
  - next (Optional[str]): Represents the next step in the Togaf system.
73
  """
74
 
 
75
  query_status: Optional[bool]
76
  stakeholder: Optional[StakeholderList]
77
  principles: Optional[Principles]
 
 
1
  from typing_extensions import (
 
2
  TypedDict,
3
  List
4
  )
 
5
  from typing import (
6
  Optional,
7
  )
8
  from dataclasses import dataclass, field
9
  from typing import Optional
10
 
11
+ from langgraph.graph import MessagesState
12
+
13
  from ea4all.src.ea4all_gra.data import (
14
  ListRequirement,
15
  ListObjective,
 
24
  # Optional, the InputState is a restricted version of the State that is used to
25
  # define a narrower interface to the outside world vs. what is maintained
26
  # internally.
27
+ class InputState(MessagesState):
 
28
  """Represents the input state for the agent.
29
 
30
  This class defines the structure of the input state, which includes
 
38
  """
39
  #business_query: Optional[Annotated[List[str], Field(
40
  # description="A business requirement is the starting point of the TOGAF process."), operator.add]]
41
+ #business_query: str
42
+ pass
43
 
 
44
  class OutputState(TypedDict):
45
  """Represents te output state for the agent."""
46
  vision_target: Optional[str]
 
70
  - next (Optional[str]): Represents the next step in the Togaf system.
71
  """
72
 
73
+ business_query: str
74
  query_status: Optional[bool]
75
  stakeholder: Optional[StakeholderList]
76
  principles: Optional[Principles]
ea4all/src/ea4all_gra/togaf_task1/graph.py CHANGED
@@ -122,4 +122,4 @@ task1_builder.set_finish_point('AssessBusinessQuery')
122
 
123
  # Compile
124
  task1_graph = task1_builder.compile()
125
- task1_graph.name = "togaf_assess_business_query_graph"
 
122
 
123
  # Compile
124
  task1_graph = task1_builder.compile()
125
+ task1_graph.name = "togaf_identify_business_requirements"
ea4all/src/ea4all_gra/togaf_task1/state.py CHANGED
@@ -43,7 +43,8 @@ class Task1State(InputState):
43
  capability: list of business capabilities to deliver intent and requirements
44
  """
45
 
46
- messages: Optional[Annotated[Sequence[BaseMessage], operator.add]]
 
47
  team_members: Optional[List[str]]
48
  requirement: Optional[ListRequirement]
49
  intent: Optional[ListObjective]
 
43
  capability: list of business capabilities to deliver intent and requirements
44
  """
45
 
46
+ #messages: Optional[Annotated[Sequence[BaseMessage], operator.add]]
47
+ business_query: str
48
  team_members: Optional[List[str]]
49
  requirement: Optional[ListRequirement]
50
  intent: Optional[ListObjective]
ea4all/src/ea4all_gra/togaf_task2/graph.py CHANGED
@@ -1,7 +1,7 @@
1
  import ast
2
 
3
  #core libraries
4
- from langchain_core.runnables import RunnableConfig, RunnableSerializable
5
 
6
  from langchain_core.messages import (
7
  AIMessage,
@@ -42,9 +42,6 @@ from ea4all.src.shared import vectorstore
42
  from ea4all.src.ea4all_gra.togaf_task2.state import Task2State
43
 
44
  from ea4all.src.ea4all_apm.graph import get_retrieval_chain
45
- from ea4all.src.ea4all_apm import configuration as apm_config
46
-
47
- from ea4all.src.ea4all_gra.state import OverallState
48
 
49
  # Retrieval Grader score whether retrieved IT Landscape address business query
50
  def retrieval_grader(model):
@@ -145,7 +142,6 @@ async def retrieve(state:Task2State, config: RunnableConfig):
145
 
146
  # Retrieval
147
  rag_input = 5
148
- #faiss_index = set_faiss_index(config)
149
  with vectorstore.make_retriever(config) as _retriever:
150
  retriever = _retriever
151
 
@@ -155,13 +151,15 @@ async def retrieve(state:Task2State, config: RunnableConfig):
155
  {"standalone_question": business_query},
156
  config={"recursion_limit":configuration.ea4all_recursion_limit})
157
 
158
- name = state['next']
159
-
160
  ## return Document page_content
161
  content = ';'.join(asis.page_content.strip() for asis in landscape_asis)
 
 
 
 
162
  return {
163
  "messages": [AIMessage(content=content, name=name)],
164
- "landscape_asis": landscape_asis,
165
  "business_query": business_query
166
  }
167
 
@@ -324,6 +322,8 @@ def grade_generation_v_documents_and_question(state:Task2State, config: Runnable
324
  configuration = AgentConfiguration.from_runnable_config(config)
325
  model = get_llm_client(configuration.togaf_model, configuration.api_base_url)
326
 
 
 
327
 
328
  print("---CHECK HALLUCINATIONS---")
329
  business_query = state['business_query']
@@ -393,10 +393,7 @@ def grade_landscape_asis_v_capability_gap(state:Task2State, config: RunnableConf
393
  content = "No applications identified"
394
 
395
  if state['biz_capability']:
396
- capability = ', '.join(ast.literal_eval(state['biz_capability'])).replace("'", ", ")
397
- #bcm = ast.literal_eval(str(state['biz_capability']))
398
- #capability = bcm[1:-1].replace("'","")
399
- #capability = state['biz_capability']
400
  else:
401
  capability = "No business capabilities identified"
402
 
@@ -447,6 +444,7 @@ task2_builder.add_conditional_edges(
447
  "not supported": "generate",
448
  "useful": "grade_landscape_gap",
449
  "not useful": "transform_query",
 
450
  },
451
  )
452
 
@@ -454,4 +452,4 @@ task2_builder.add_edge("grade_landscape_gap", END)
454
 
455
  # Compile
456
  task2_graph = task2_builder.compile()
457
- task2_graph.name = "Togaf_assess_asis_graph"
 
1
  import ast
2
 
3
  #core libraries
4
+ from langchain_core.runnables import RunnableConfig
5
 
6
  from langchain_core.messages import (
7
  AIMessage,
 
42
  from ea4all.src.ea4all_gra.togaf_task2.state import Task2State
43
 
44
  from ea4all.src.ea4all_apm.graph import get_retrieval_chain
 
 
 
45
 
46
  # Retrieval Grader score whether retrieved IT Landscape address business query
47
  def retrieval_grader(model):
 
142
 
143
  # Retrieval
144
  rag_input = 5
 
145
  with vectorstore.make_retriever(config) as _retriever:
146
  retriever = _retriever
147
 
 
151
  {"standalone_question": business_query},
152
  config={"recursion_limit":configuration.ea4all_recursion_limit})
153
 
 
 
154
  ## return Document page_content
155
  content = ';'.join(asis.page_content.strip() for asis in landscape_asis)
156
+
157
+ name = state['next']
158
+
159
+
160
  return {
161
  "messages": [AIMessage(content=content, name=name)],
162
+ "landscape_asis": landscape_asis,
163
  "business_query": business_query
164
  }
165
 
 
322
  configuration = AgentConfiguration.from_runnable_config(config)
323
  model = get_llm_client(configuration.togaf_model, configuration.api_base_url)
324
 
325
+ if state.get('remaining_steps') <= 2:
326
+ return "no match"
327
 
328
  print("---CHECK HALLUCINATIONS---")
329
  business_query = state['business_query']
 
393
  content = "No applications identified"
394
 
395
  if state['biz_capability']:
396
+ capability = ', '.join(ast.literal_eval(str(state['biz_capability']))).replace("'", ", ")
 
 
 
397
  else:
398
  capability = "No business capabilities identified"
399
 
 
444
  "not supported": "generate",
445
  "useful": "grade_landscape_gap",
446
  "not useful": "transform_query",
447
+ "no match": "grade_landscape_gap"
448
  },
449
  )
450
 
 
452
 
453
  # Compile
454
  task2_graph = task2_builder.compile()
455
+ task2_graph.name = "togaf_assess_current_landscape"
ea4all/src/ea4all_gra/togaf_task2/state.py CHANGED
@@ -16,6 +16,8 @@ from langchain_core.messages import (
16
 
17
  from langchain_core.documents import Document
18
 
 
 
19
  from ea4all.src.ea4all_gra.data import (
20
  BusinessCapability,
21
  CapabilityGap,
@@ -41,10 +43,13 @@ class Task2State(InputState):
41
  landscape_gap: business capability support gap
42
  """
43
 
44
- messages: Optional[Annotated[Sequence[BaseMessage], operator.add]]
 
45
  team_members: Optional[List[str]]
46
  landscape_asis: Optional[List[Document]]
47
  identified_asis: Optional[LandscapeAsIs]
48
  biz_capability: Optional[BusinessCapability]
49
  landscape_gap: Optional[CapabilityGap]
50
  next: Optional[str]
 
 
 
16
 
17
  from langchain_core.documents import Document
18
 
19
+ from langgraph.managed.is_last_step import RemainingSteps
20
+
21
  from ea4all.src.ea4all_gra.data import (
22
  BusinessCapability,
23
  CapabilityGap,
 
43
  landscape_gap: business capability support gap
44
  """
45
 
46
+ #messages: Optional[Annotated[Sequence[BaseMessage], operator.add]]
47
+ business_query: str
48
  team_members: Optional[List[str]]
49
  landscape_asis: Optional[List[Document]]
50
  identified_asis: Optional[LandscapeAsIs]
51
  biz_capability: Optional[BusinessCapability]
52
  landscape_gap: Optional[CapabilityGap]
53
  next: Optional[str]
54
+ remaining_steps: RemainingSteps
55
+
ea4all/src/ea4all_gra/togaf_task3/graph.py CHANGED
@@ -244,4 +244,4 @@ workflow.set_entry_point("generate_reference_architecture")
244
 
245
  # Compile
246
  task3_graph = workflow.compile()
247
- task3_graph.name = "Togaf_generate_tobe_graph"
 
244
 
245
  # Compile
246
  task3_graph = workflow.compile()
247
+ task3_graph.name = "togaf_generate_blueprint"
ea4all/src/ea4all_gra/togaf_task3/state.py CHANGED
@@ -49,7 +49,8 @@ class Task3State(InputState):
49
  landscape_gap: list of capabilities not supported by as-is landscape
50
  """
51
 
52
- messages: Optional[Annotated[Sequence[BaseMessage], operator.add]]
 
53
  team_members: Optional[List[str]]
54
  landscape_asis: Optional[List[str]]
55
  identified_asis: Optional[LandscapeAsIs]
 
49
  landscape_gap: list of capabilities not supported by as-is landscape
50
  """
51
 
52
+ #messages: Optional[Annotated[Sequence[BaseMessage], operator.add]]
53
+ business_query: str
54
  team_members: Optional[List[str]]
55
  landscape_asis: Optional[List[str]]
56
  identified_asis: Optional[LandscapeAsIs]
ea4all/src/ea4all_indexer/graph.py CHANGED
@@ -54,4 +54,4 @@ builder.add_edge(START, "apm_indexer")
54
 
55
  # Compile into a graph object that you can invoke and deploy.
56
  indexer_graph = builder.compile()
57
- indexer_graph.name = "EA4ALL APM Indexer"
 
54
 
55
  # Compile into a graph object that you can invoke and deploy.
56
  indexer_graph = builder.compile()
57
+ indexer_graph.name = "ea4all_bring_your_onw_data"
ea4all/src/ea4all_mcp/configuration.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Define the configurable parameters for the graph."""
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import Annotated
5
+
6
+ from ea4all.src.shared.configuration import BaseConfiguration
7
+
8
+ @dataclass(kw_only=True)
9
+ class AgentConfiguration(BaseConfiguration):
10
+ """Configuration class.
11
+ """
12
+ mcp_docs_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
13
+ default="gpt-4o-mini",
14
+ metadata={
15
+ "description": "The language model used by ea4all_aws_ms react agent."
16
+ },
17
+ )
18
+
19
+
20
+
21
+ pass
ea4all/src/ea4all_mcp/graph.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ graph.py - MCP Agent Graph for AWS and Microsoft Documentation
3
+
4
+ This module defines the LangGraph agent for the MCP (Multi-Cloud Platform) assistant,
5
+ enabling intelligent routing and answering of queries related to AWS and Microsoft technologies.
6
+
7
+ Key Features:
8
+ - Integrates with AWS and Microsoft MCP servers for documentation and diagram generation.
9
+ - Dynamically loads available tools from both AWS and Microsoft MCP endpoints.
10
+ - Provides a unified agent that can answer questions about AWS services, Microsoft technologies, and generate AWS diagrams.
11
+ - Uses OpenAI GPT-4o-mini as the LLM backend for reasoning and tool selection.
12
+ - Returns expert-level answers or defers when information is insufficient.
13
+
14
+ Main Functions:
15
+ - load_tools(): Asynchronously loads tool definitions from AWS and Microsoft MCP servers.
16
+ - build_graph(): Constructs and returns the LangGraph agent configured with the appropriate tools and system prompt.
17
+
18
+ Intended Usage:
19
+ Import and call `build_graph()` to obtain a ready-to-use agent for enterprise architecture, cloud, and documentation queries.
20
+
21
+ References:
22
+ - [AWS MCP Servers](https://github.com/awslabs/mcp/tree/main/src)
23
+ - [MicrosoftDocs MCP Server](https://github.com/microsoftdocs/mcp)
24
+ """
25
+ import asyncio
26
+
27
+ from langchain_mcp_adapters.client import MultiServerMCPClient
28
+ from langgraph.prebuilt import create_react_agent
29
+
30
+ from ea4all.src.ea4all_mcp.configuration import AgentConfiguration
31
+ from ea4all.src.ea4all_mcp.prompts import MCP_AWS_MS_SYSTEM_PROMPT
32
+
33
+ from ea4all.src.shared.utils import (
34
+ get_llm_client,
35
+ )
36
+
37
+ async def load_tools():
38
+ # Set up the MCP client for AWS Documentation
39
+ mcp_client = MultiServerMCPClient(
40
+ {
41
+ "awslabs.aws-documentation-mcp-server": {
42
+ "transport": "stdio",
43
+ "command": "uvx",
44
+ "args": ["awslabs.aws-documentation-mcp-server@latest"],
45
+ "env": {
46
+ "FASTMCP_LOG_LEVEL": "ERROR",
47
+ "AWS_DOCUMENTATION_PARTITION": "aws"
48
+ },
49
+ "disabled": False,
50
+ "autoApprove": [],
51
+ },
52
+ "awslabs.aws-diagram-mcp-server": {
53
+ "transport": "stdio",
54
+ "command": "uvx",
55
+ "args": ["awslabs.aws-diagram-mcp-server"],
56
+ "env": {
57
+ "FASTMCP_LOG_LEVEL": "ERROR"
58
+ },
59
+ "autoApprove": [],
60
+ "disabled": False
61
+ },
62
+ "microsoft_mcp_tools": {
63
+ "url": "https://learn.microsoft.com/api/mcp",
64
+ "transport": "streamable_http",
65
+ }
66
+ }
67
+ )
68
+
69
+ # Fetch the tools from the AWS MCP server
70
+ mcp_tools = await mcp_client.get_tools()
71
+
72
+ return mcp_tools
73
+
74
+ async def build_graph():
75
+ """Build the LangGraph graph for the MCP agent."""
76
+ # Define the graph structure
77
+ system_prompt = """
78
+ """
79
+
80
+ mcp_tools = await load_tools()
81
+
82
+ llm = get_llm_client(model=AgentConfiguration.mcp_docs_model)
83
+
84
+ # Create the LangGraph agent for AWS documentation
85
+ ea4all_docs = create_react_agent(
86
+ model=llm,
87
+ prompt=MCP_AWS_MS_SYSTEM_PROMPT,
88
+ tools=mcp_tools,
89
+ debug=True,
90
+ #state_schema=OverallState,
91
+ )
92
+
93
+ ea4all_docs.name = "ea4all_mcp_aws_microsoft_docs_agent"
94
+
95
+ return ea4all_docs
96
+
97
+ mcp_docs_graph = asyncio.run(build_graph())
ea4all/src/ea4all_mcp/prompts.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ################################
2
+ ##COLLECTION of MCP react_agents system prompt
3
+ ################################
4
+
5
+ ##AWS & Microsoft Documentation system prompt
6
+ MCP_AWS_MS_SYSTEM_PROMPT = """
7
+ ## Querying Microsoft Documentation, AWS Documentation, and AWS Diagram Generation
8
+ You have access to the following MCP server
9
+ #1 `microsoft_mcp_tools` - this tool allows you to search through Microsoft's latest official documentation, and that information might be more detailed or newer than what's in your training data set.
10
+ When handling questions around how to work with native Microsoft technologies, such as C#, F#, ASP.NET Core, Microsoft.Extensions, NuGet, Entity Framework, the `dotnet` runtime - please use this tool for research purposes when dealing with specific / narrowly defined questions that may occur.
11
+ #2 `awslabs.aws-diagram-mcp-server` - this tool allows you to generate AWS diagrams, sequence diagrams, flow diagrams, and class diagrams.
12
+ #3 `awslabs.aws-documentation-mcp-server` - this tool allows you to search through AWS's latest official documentation, which may be more up-to-date than your training data. For any questions about AWS products, services, APIs, or best practices, use this tool to provide accurate and current information.
13
+
14
+ For any other questions, reply with "Hey, I am an expert assistant on AWS and Microsoft! I don't know or I don't have enough information to answer that question.
15
+ """
ea4all/src/ea4all_mcp/state.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """State management for the MCP graph."""
2
+ from typing_extensions import TypedDict
3
+
4
+ from langgraph.prebuilt.chat_agent_executor import AgentState
5
+
6
+ class InputState(TypedDict):
7
+ """Represents the input state for the graph.
8
+
9
+ This class is used to pass the user question to the graph.
10
+ It contains a single field, `question`
11
+ """
12
+
13
+ pass
14
+
15
+ # The index state defines the simple IO for the single-node index graph
16
+ class OutputState(TypedDict):
17
+ """Represents the output schema for the graph.
18
+ """
19
+
20
+ pass
21
+
22
+ class OverallState(AgentState, InputState, OutputState):
23
+ """Represents the overall state of the graph.
24
+
25
+ This class combines the input and output states, allowing for
26
+ both input and output to be managed within
27
+ the same state.
28
+ """
29
+ pass
ea4all/src/ea4all_vqa/configuration.py CHANGED
@@ -40,3 +40,10 @@ class AgentConfiguration(BaseConfiguration):
40
  "description": "Trigger EA4ALL ask human input via interruption or receive from external frontend."
41
  },
42
  )
 
 
 
 
 
 
 
 
40
  "description": "Trigger EA4ALL ask human input via interruption or receive from external frontend."
41
  },
42
  )
43
+
44
+ vqa_images: Annotated[list[str], {"__template_metadata__": {"kind": "file"}}] = field(
45
+ default_factory=lambda: [".png"],
46
+ metadata={
47
+ "description": "List of image files to be used for visual question and answering. Should be in the form: [\"path/to/image2.png\"]."
48
+ },
49
+ )
ea4all/src/ea4all_vqa/graph.py CHANGED
@@ -60,8 +60,6 @@ from ea4all.src.shared.utils import (
60
  _join_paths,
61
  )
62
 
63
- import spaces
64
-
65
  ##Diagram Graph Tools
66
  #Data model Sageguarding
67
  @tool("diagram_safeguard")
@@ -72,8 +70,8 @@ class DiagramV2S(BaseModel):
72
  description: str = Field(description="One sentence describing the reason for being categorised as unsafe or not an architecture image.")
73
 
74
  @tool("vqa_diagram", response_format="content")
75
- @spaces.GPU
76
- async def vqa_diagram(next:str, state: Annotated[OverallState, InjectedState], config: RunnableConfig):
77
  """Diagram Vision Question Answering"""
78
 
79
  print(f"---AGENT VQA PROCESSING QUESTION & ANSWERING---")
@@ -86,8 +84,9 @@ async def vqa_diagram(next:str, state: Annotated[OverallState, InjectedState], c
86
  streaming=configuration.streaming,
87
  )
88
 
89
- question = getattr(state, "question")
90
- raw_image = get_raw_image(getattr(state,'image'))
 
91
 
92
  user_message = HumanMessage(
93
  content=[
@@ -168,8 +167,8 @@ def safeguard_check(state:OverallState, config:RunnableConfig) -> dict:
168
  llm = get_llm_client(configuration.supervisor_model)
169
 
170
  #'raw_image = state.messages[0].content[0]['image_url']['url'].split(',')[1]
171
- question = getattr(state, "question", "Describe the image")
172
- raw_image = get_raw_image(getattr(state,'image', _join_paths(configuration.ea4all_images,'multi-app-architecture.png')))
173
 
174
  system_message = (
175
  "Act as a safeguarding agent to check whether the image provided is an architecture diagram or flowchart and safe to be processed. "
@@ -193,7 +192,8 @@ def safeguard_check(state:OverallState, config:RunnableConfig) -> dict:
193
  input = {"question": question, "raw_image": raw_image}
194
  result = safeguard_checker.invoke(input=input, config=config)
195
 
196
- return {"safety_status": result}
 
197
 
198
  def call_finish(state:OverallState, config:RunnableConfig) -> dict:
199
 
@@ -209,7 +209,7 @@ def make_supervisor_node(model: BaseChatModel, members: list[str]) -> RunnableLa
209
  "You are an enterprise architecture team supervisor tasked to manage a conversation between the following members: "
210
  "[diagram_description, diagram_object, diagram_improvement, diagram_risk]. "
211
  "Given the user request, use the function below to respond with team member to act next. "
212
- " If none of team member can be used, select 'FINISH'."
213
  )
214
 
215
  class Router(TypedDict):
@@ -221,13 +221,14 @@ def make_supervisor_node(model: BaseChatModel, members: list[str]) -> RunnableLa
221
  """An LLM-based router."""
222
  messages = [
223
  {"role": "system", "content": system_prompt},
224
- ] + getattr(state, 'messages')
225
 
226
  response = await model.with_structured_output(Router, include_raw=True).ainvoke(messages, config=config)
227
 
228
  if isinstance(response, dict):
229
- if response['parsed']['next'] == "FINISH":
230
- return AgentFinish(return_values={"output": response['raw']}, log=response['raw']['content'])
 
231
 
232
  # If the DiagramTagging function was selected, return to the user with the function inputs
233
  tool_call = {"name": "vqa_diagram", "args": {"next": response['parsed']['next']}, "id": "1", "type": "tool_call"}
@@ -294,7 +295,7 @@ def create_team_supervisor(state:OverallState, config:RunnableConfig) -> Runnabl
294
  prompt = ChatPromptTemplate.from_messages(messages).partial(
295
  user_prompt=user_prompt)
296
 
297
- llm.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(llm, prompt, {"question":state.question}))
298
 
299
  supervisor_agent = (
300
  prompt |
@@ -311,41 +312,28 @@ def enter_graph(state:OverallState, config:RunnableConfig) -> Command[Literal['s
311
 
312
  configuration = AgentConfiguration.from_runnable_config(config)
313
 
314
- messages = [
315
- HumanMessage(content=state.question) #messages[-1]['content']),
316
- ]
317
-
318
- #if not configuration.ea4all_ask_human == "interrupt":
319
- # raw_image = state.messages[0].content[0]['image_url']['url'].split(',')[1]
320
- #else:
321
- # image = getattr(state,'image', "")
322
- # raw_image = image if image else _join_paths(configuration.ea4all_images,'multi-app-architecture.png')
323
-
324
- image = getattr(state,'image', None)
325
- if image:
326
- raw_image = state.image #['image_url']['url'].split(',')[1]
327
- else:
328
- raw_image = _join_paths(configuration.ea4all_images,'multi-app-architecture.png')
329
 
330
  return Command(
331
  update={
332
- "messages": messages,
333
- "question": state.question, #messages[-1].content,
334
  "image": raw_image
335
  },
336
  goto='safeguard_check',
337
  )
338
 
339
- return {
340
- "messages": state.messages,
341
- "question": messages[-1].content,
342
- "image": raw_image,
343
- }
344
-
345
  async def choose_next(state: OverallState):
346
  """Choose the next node based on the safety status."""
347
- isArcihitectureImage = getattr(state, 'safety_status', {}).get('isArchitectureImage', False)
348
- isSafe = getattr(state, 'safety_status', {}).get('isSafe', False)
 
 
 
 
 
 
349
 
350
  return "diagram_supervisor" if isArcihitectureImage and isSafe else "final"
351
 
@@ -353,7 +341,7 @@ def build_vqa_graph():
353
  model = get_llm_client(BaseConfiguration.supervisor_model, api_base_url="", streaming=BaseConfiguration.streaming)
354
  teams_supervisor_node = make_supervisor_node(model, ['diagram_description', 'diagram_object', 'diagram_improvement', 'diagram_risk'])
355
 
356
- workflow = StateGraph(OverallState, input=InputState, output=OutputState,config_schema=AgentConfiguration) #input=InputState
357
 
358
  #Setup Graph nodes
359
  #Node name CANNOT have blank space - pattern: \'^[a-zA-Z0-9_-]+$\'.", \'type\'
@@ -390,11 +378,11 @@ def build_vqa_graph():
390
  )
391
 
392
  workflow.add_edge("final", END)
393
- workflow.add_edge("tools", END)
394
 
395
  #memory = MemorySaver()
396
  diagram_graph = workflow.compile() #checkpointer=memory)
397
- diagram_graph.name = "DiagramGraph"
398
 
399
  return diagram_graph
400
 
 
60
  _join_paths,
61
  )
62
 
 
 
63
  ##Diagram Graph Tools
64
  #Data model Sageguarding
65
  @tool("diagram_safeguard")
 
70
  description: str = Field(description="One sentence describing the reason for being categorised as unsafe or not an architecture image.")
71
 
72
  @tool("vqa_diagram", response_format="content")
73
+ #@spaces.GPU
74
+ async def vqa_diagram(next:str, state: Annotated[InputState, InjectedState], config: RunnableConfig):
75
  """Diagram Vision Question Answering"""
76
 
77
  print(f"---AGENT VQA PROCESSING QUESTION & ANSWERING---")
 
84
  streaming=configuration.streaming,
85
  )
86
 
87
+ #question = state.get('question')
88
+ question = state.get('messages')[0].content
89
+ raw_image = get_raw_image(state.get('image'))
90
 
91
  user_message = HumanMessage(
92
  content=[
 
167
  llm = get_llm_client(configuration.supervisor_model)
168
 
169
  #'raw_image = state.messages[0].content[0]['image_url']['url'].split(',')[1]
170
+ question = state.get('question')
171
+ raw_image = get_raw_image(state.get('image', _join_paths(configuration.ea4all_images,'multi-app-architecture.png')))
172
 
173
  system_message = (
174
  "Act as a safeguarding agent to check whether the image provided is an architecture diagram or flowchart and safe to be processed. "
 
192
  input = {"question": question, "raw_image": raw_image}
193
  result = safeguard_checker.invoke(input=input, config=config)
194
 
195
+ #return Command(update={"safety_status": result, "question": question})
196
+ return {'safety_status': result}
197
 
198
  def call_finish(state:OverallState, config:RunnableConfig) -> dict:
199
 
 
209
  "You are an enterprise architecture team supervisor tasked to manage a conversation between the following members: "
210
  "[diagram_description, diagram_object, diagram_improvement, diagram_risk]. "
211
  "Given the user request, use the function below to respond with team member to act next. "
212
+ " If none of team member can be used or task has been completed select 'FINISH'."
213
  )
214
 
215
  class Router(TypedDict):
 
221
  """An LLM-based router."""
222
  messages = [
223
  {"role": "system", "content": system_prompt},
224
+ ] + (state.get('messages') or [])
225
 
226
  response = await model.with_structured_output(Router, include_raw=True).ainvoke(messages, config=config)
227
 
228
  if isinstance(response, dict):
229
+ if response['parsed']['next'] == "FINISH":
230
+ output = state['messages'][-1].content if state['messages'][-1].type == 'tool' and state['messages'][-1].status == 'success' else response['raw']
231
+ return AgentFinish(return_values={"output": response['raw'].content}, log=str(output))
232
 
233
  # If the DiagramTagging function was selected, return to the user with the function inputs
234
  tool_call = {"name": "vqa_diagram", "args": {"next": response['parsed']['next']}, "id": "1", "type": "tool_call"}
 
295
  prompt = ChatPromptTemplate.from_messages(messages).partial(
296
  user_prompt=user_prompt)
297
 
298
+ llm.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(llm, prompt, {"question":state.get('question')}))
299
 
300
  supervisor_agent = (
301
  prompt |
 
312
 
313
  configuration = AgentConfiguration.from_runnable_config(config)
314
 
315
+ raw_image = state.get('image', _join_paths(configuration.ea4all_images,'multi-app-architecture.png'))
316
+ question = state['messages'][-1].content if state['messages'] else "Describe the image"
 
 
 
 
 
 
 
 
 
 
 
 
 
317
 
318
  return Command(
319
  update={
320
+ "messages": state['messages'][-1],
321
+ "question": question,
322
  "image": raw_image
323
  },
324
  goto='safeguard_check',
325
  )
326
 
 
 
 
 
 
 
327
  async def choose_next(state: OverallState):
328
  """Choose the next node based on the safety status."""
329
+ isArcihitectureImage = ''
330
+ isSafe = ""
331
+
332
+ safety_status = state.get('safety_status', {})
333
+
334
+ if safety_status:
335
+ isArcihitectureImage = safety_status.get('isArchitectureImage', False)
336
+ isSafe = safety_status.get('isSafe', False)
337
 
338
  return "diagram_supervisor" if isArcihitectureImage and isSafe else "final"
339
 
 
341
  model = get_llm_client(BaseConfiguration.supervisor_model, api_base_url="", streaming=BaseConfiguration.streaming)
342
  teams_supervisor_node = make_supervisor_node(model, ['diagram_description', 'diagram_object', 'diagram_improvement', 'diagram_risk'])
343
 
344
+ workflow = StateGraph(OverallState, input_schema=InputState, output_schema=OutputState,config_schema=AgentConfiguration) #input=InputState
345
 
346
  #Setup Graph nodes
347
  #Node name CANNOT have blank space - pattern: \'^[a-zA-Z0-9_-]+$\'.", \'type\'
 
378
  )
379
 
380
  workflow.add_edge("final", END)
381
+ workflow.add_edge("tools", "diagram_supervisor")
382
 
383
  #memory = MemorySaver()
384
  diagram_graph = workflow.compile() #checkpointer=memory)
385
+ diagram_graph.name = "ea4all_talk_to_your_diagram"
386
 
387
  return diagram_graph
388
 
ea4all/src/ea4all_vqa/state.py CHANGED
@@ -3,25 +3,24 @@
3
  This module defines the state structures used in the VQA graph. It includes
4
  definitions for agent state, input state.
5
  """
 
6
 
7
- import operator
8
  from dataclasses import dataclass
9
  from typing import (
10
  Optional,
11
  Annotated,
12
- Sequence,
13
  )
14
 
15
- from langchain_core.messages import (
16
- BaseMessage,
17
- )
18
  from langgraph.graph import MessagesState
19
 
 
20
  # Optional, the InputState is a restricted version of the State that is used to
21
  # define a narrower interface to the outside world vs. what is maintained
22
  # internally.
23
- @dataclass(kw_only=True)
24
- class InputState:
25
  """Represents the input state for the agent.
26
 
27
  This class defines the structure of the input state, which includes
@@ -34,17 +33,16 @@ class InputState:
34
  question: user question
35
  image: architecture diagram
36
  """
37
- question: str
38
- image: str
39
 
40
  # The index state defines the simple IO for the single-node index graph
41
- @dataclass(kw_only=True)
42
- class OutputState:
43
  """Represents the output schema for the Diagram agent.
44
  """
45
-
46
- messages: Optional[Annotated[Sequence[MessagesState], operator.add]] = None
47
- safety_status: Optional[dict[str,str]] = None
48
 
49
  """Attributes:
50
  safety_status: safety status of the diagram provided by the user
@@ -60,5 +58,5 @@ class OverallState(InputState, OutputState):
60
  next: next tool to be called
61
  """
62
 
63
- error: Optional[str] = None
64
- next: Optional[str] = None
 
3
  This module defines the state structures used in the VQA graph. It includes
4
  definitions for agent state, input state.
5
  """
6
+ from typing_extensions import TypedDict, NotRequired
7
 
 
8
  from dataclasses import dataclass
9
  from typing import (
10
  Optional,
11
  Annotated,
 
12
  )
13
 
14
+ from langchain_core.messages import AnyMessage
15
+ from langgraph.graph.message import add_messages
 
16
  from langgraph.graph import MessagesState
17
 
18
+
19
  # Optional, the InputState is a restricted version of the State that is used to
20
  # define a narrower interface to the outside world vs. what is maintained
21
  # internally.
22
+ #@dataclass(kw_only=True)
23
+ class InputState(MessagesState):
24
  """Represents the input state for the agent.
25
 
26
  This class defines the structure of the input state, which includes
 
33
  question: user question
34
  image: architecture diagram
35
  """
36
+ image: NotRequired[str]
 
37
 
38
  # The index state defines the simple IO for the single-node index graph
39
+ #@dataclass(kw_only=True)
40
+ class OutputState(TypedDict):
41
  """Represents the output schema for the Diagram agent.
42
  """
43
+ safety_status: dict[str,str]
44
+ question: str
45
+ messages: Annotated[list[AnyMessage], add_messages]
46
 
47
  """Attributes:
48
  safety_status: safety status of the diagram provided by the user
 
58
  next: next tool to be called
59
  """
60
 
61
+ error: NotRequired[str]
62
+ next: NotRequired[str]
ea4all/src/graph.py CHANGED
@@ -22,15 +22,43 @@ Note: This module depends on other modules and packages such as langchain_core,
22
  - lanchain_openapi: 0.2.9 (0.3.6 issue with max_tokens for HF models)
23
  #2025-06-03
24
  - Refactored code to fix problems with linter and type checking (Standard mode)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  """
26
 
27
  from langgraph.types import Command
28
  from langchain_core.messages import (
29
- HumanMessage,
30
  AIMessage
31
  )
32
  from langchain_core.language_models.chat_models import BaseChatModel
33
  from langchain_core.runnables import RunnableConfig
 
34
 
35
  from langchain import hub
36
 
@@ -39,22 +67,22 @@ from langgraph.graph import (
39
  END,
40
  StateGraph,
41
  )
42
- from langgraph.checkpoint.memory import MemorySaver
43
 
44
  from typing_extensions import Literal, TypedDict
45
  import uuid
46
 
47
  from ea4all.src.shared.configuration import BaseConfiguration
48
- from ea4all.src.shared.utils import get_llm_client
49
- from ea4all.src.shared.state import State
50
  from ea4all.src.tools.tools import websearch
51
 
52
  from ea4all.src.ea4all_indexer.graph import indexer_graph
53
  from ea4all.src.ea4all_apm.graph import apm_graph
54
  from ea4all.src.ea4all_vqa.graph import diagram_graph
55
  from ea4all.src.ea4all_gra.graph import togaf_graph
 
56
 
57
- async def call_indexer_apm(state: State, config: RunnableConfig):
58
  response = await indexer_graph.ainvoke(input={"docs":[]}, config=config)
59
 
60
  def make_supervisor_node(model: BaseChatModel, members: list[str]):
@@ -64,36 +92,37 @@ def make_supervisor_node(model: BaseChatModel, members: list[str]):
64
 
65
  class Router(TypedDict):
66
  """Worker to route to next. If no workers needed, route to FINISH."""
67
- next: Literal["FINISH", "portfolio_team", "diagram_team", "blueprint_team", "websearch_team"]
68
 
69
- async def supervisor_node(state: State, config: RunnableConfig) -> Command[Literal["portfolio_team", "diagram_team", "blueprint_team", "websearch_team", '__end__']]:
70
 
71
  """An LLM-based router."""
72
  messages = [
73
  {"role": "system", "content": system_prompt},
74
- ] + [state["messages"][-1]]
75
 
76
- response = await model.with_structured_output(Router).ainvoke(messages, config=config)
 
 
77
 
78
  _goto = "__end__"
79
 
80
  if isinstance(response, dict):
81
  _goto = response["next"]
82
  # Ensure _goto is one of the allowed Literal values
83
- if _goto not in ["portfolio_team", "diagram_team", "blueprint_team", "websearch_team"]:
84
  _goto = "__end__"
85
 
86
- print(f"---Supervisor got a request--- Question: {state['messages'][-1].content} ==> Routing to {_goto}\n")
 
87
 
88
- return Command(
89
- #update={"next": _goto},
90
- goto=_goto
91
- )
92
 
93
  return supervisor_node
94
 
95
- async def call_landscape_agentic(state: State, config: RunnableConfig) -> Command[Literal['__end__']]: ##2025-02-21: NOT passing CHAT MEMORY to the APM_graph
96
- response = await apm_graph.ainvoke({"question": state["messages"][-1].content}, config=config)
 
97
  return Command(
98
  update={
99
  "messages": [
@@ -105,33 +134,50 @@ async def call_landscape_agentic(state: State, config: RunnableConfig) -> Comman
105
  goto="__end__",
106
  )
107
 
108
- async def call_diagram_agentic(state: State, config: RunnableConfig) -> Command[Literal['__end__']]: #NOT passing CHAT MEMORY to the Diagram_graph
109
-
110
- inputs = {
111
- "messages": [{"role": "user", "content": state.get('messages')[-1].content}],
112
- "question": state['messages'][-1].content, "image":""
113
- } #user response
114
 
115
- response = await diagram_graph.ainvoke(
116
- input=inputs,
117
- config=config
118
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
  return Command(
121
  update={
122
  "messages": [
123
  AIMessage(
124
- content=response['messages'][-1].content, name="landscape_agentic"
125
  )
126
  ]
127
  },
128
  goto="__end__",
129
  )
130
 
131
- async def call_togaf_agentic(state: State, config: RunnableConfig) -> Command[Literal["__end__"]]: #NOT passing CHAT MEMORY to the Togaf_graph
132
- print(f"---TOGAF ROUTE team node ready to --- CALL_TOGAF_AGENTIC Routing to {state['next']} with User Question: {state['messages'][-1].content}")
133
 
134
- inputs = {"messages": [{"role": "user", "content": state.get('messages')[-1].content}]} #user response
 
 
 
 
 
 
 
135
 
136
  response = await togaf_graph.ainvoke(
137
  input=inputs,
@@ -142,7 +188,7 @@ async def call_togaf_agentic(state: State, config: RunnableConfig) -> Command[Li
142
  update={
143
  "messages": [
144
  AIMessage(
145
- content=response["messages"][-1].content, name="togaf_route"
146
  )
147
  ]
148
  },
@@ -150,18 +196,19 @@ async def call_togaf_agentic(state: State, config: RunnableConfig) -> Command[Li
150
  )
151
 
152
  # Wrap-up websearch answer to user's question
153
- async def call_generate_websearch(state:State, config: RunnableConfig) -> Command[Literal["__end__"]]:
154
  from ea4all.src.ea4all_apm.state import OverallState
155
 
156
  if config is not None:
157
  source = config.get('metadata', {}).get('langgraph_node', 'unknown')
158
 
159
  # Invoke GENERATOR node in the APMGraph
 
 
160
  state_dict = {
161
- "documents": state['messages'][-1].content,
162
- "web_search": "Yes",
163
- "question": state['messages'][-2].content,
164
- "source": source
165
  }
166
 
167
  apm_state = OverallState(**state_dict)
@@ -178,17 +225,54 @@ async def call_generate_websearch(state:State, config: RunnableConfig) -> Comman
178
  goto="__end__",
179
  )
180
 
181
- async def blueprint_team(state: State) -> Command[Literal["togaf_route"]]:
182
  print("---Blueprint team got a request--- Routing to TOGAF_ROUTE node")
183
 
184
  return Command(update={**state}, goto="togaf_route")
185
 
186
- async def diagram_team(state: State) -> Command[Literal["diagram_route"]]:
187
  print("---Diagram team got a request--- Routing to DIAGRAM_ROUTE node")
188
 
189
  return Command(update={**state}, goto="diagram_route")
190
 
191
- async def super_graph_entry_point(state: State):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  # Generate a unique thread ID
193
  thread_config = RunnableConfig({"configurable": {"thread_id": str(uuid.uuid4())}})
194
 
@@ -219,18 +303,18 @@ def build_super_graph():
219
  model = get_llm_client(BaseConfiguration.supervisor_model, api_base_url="", streaming=BaseConfiguration.streaming)
220
  teams_supervisor_node = make_supervisor_node(model, ["portfolio_team", "diagram_team", "blueprint_team","websearch_team"])
221
 
222
- super_builder = StateGraph(State, config_schema=BaseConfiguration)
223
-
224
  super_builder.add_node("apm_indexer", call_indexer_apm)
225
  super_builder.add_node("supervisor", teams_supervisor_node)
226
  super_builder.add_node("portfolio_team", call_landscape_agentic)
227
- super_builder.add_node("websearch_team", websearch)
228
  super_builder.add_node("diagram_team", diagram_team)
229
  super_builder.add_node("blueprint_team", blueprint_team)
230
  super_builder.add_node("generate_websearch", call_generate_websearch)
231
  super_builder.add_node("diagram_route", call_diagram_agentic)
232
  super_builder.add_node("togaf_route", call_togaf_agentic)
233
-
234
 
235
  super_builder.add_edge(START, "apm_indexer")
236
  super_builder.add_edge("apm_indexer", "supervisor")
@@ -243,10 +327,10 @@ def build_super_graph():
243
  super_builder.add_edge("generate_websearch", END)
244
  super_builder.add_edge("togaf_route", END)
245
  super_builder.add_edge("diagram_route", END)
 
246
 
247
- #memory = MemorySaver() #With LangGraph API, inMemmory is handled directly by the platform
248
- super_graph = super_builder.compile() #checkpointer=memory)
249
- super_graph.name = "EA4ALL Agentic Workflow Graph"
250
 
251
  return super_graph
252
 
 
22
  - lanchain_openapi: 0.2.9 (0.3.6 issue with max_tokens for HF models)
23
  #2025-06-03
24
  - Refactored code to fix problems with linter and type checking (Standard mode)
25
+ #2025-06-13
26
+ – Deployed to ea4all-langgraph-lgs
27
+ - Renamed graphs: after removing .langgraph_api folder the errors caused by old-graph names being still exposed was FIXED.
28
+ #20250624
29
+ - Retrofitted StateGraph to use OverallState, InputState, and OutputState
30
+ #20250626
31
+ - Call_generate_websearch State dict updated to match APMGraph OverallState
32
+ #20250627
33
+ - Diagram graph invoke returning empty message - fixed 30-Jun
34
+ #20250707
35
+ - AgentState refactored to OverallState, InputState, and OutputState
36
+ - Microsoft and AWS MCP tools added to the EA4ALL Agentic Workflow Graph
37
+ #20250717
38
+ - AgentState class is exclusively used for React Agent
39
+ #20250718
40
+ - Refactored super graph InputState as MessagesState schema and OutputState with question attribute
41
+ - Refactored websearch & apm_grap to MessagesState
42
+ - get_llm_client updated with new HF Inference API endpoint https://router.huggingface.co/v1
43
+ - ISSUE: APM Graph not passing internally the Question attribute coming from the Supervisor Node - fixed
44
+ #20250719
45
+ - Refactored ea4all_vqa to use MessagesState
46
+ - Added extracted_and_validated_path function to utils.py for extracting and validating file paths from user input
47
+ - ISSUE: VQA Diagram not returning response but safety_check status only
48
+ #20250720
49
+ - Refactored ea4all_mcp to use standard AgentState state schema as react_agent does not use InputState schema
50
+ - ISSUE: VQA Diagram not returning response but safety_check status only - fixed
51
+ -- Re-added messages attribute to OutputState schema
52
+ -- Response added to AgentFinish log
53
  """
54
 
55
  from langgraph.types import Command
56
  from langchain_core.messages import (
 
57
  AIMessage
58
  )
59
  from langchain_core.language_models.chat_models import BaseChatModel
60
  from langchain_core.runnables import RunnableConfig
61
+ from langchain_core.messages import HumanMessage
62
 
63
  from langchain import hub
64
 
 
67
  END,
68
  StateGraph,
69
  )
 
70
 
71
  from typing_extensions import Literal, TypedDict
72
  import uuid
73
 
74
  from ea4all.src.shared.configuration import BaseConfiguration
75
+ from ea4all.src.shared.utils import get_llm_client, extract_text_from_message_content, extract_and_validate_path
76
+ from ea4all.src.shared.state import InputState, OutputState, OverallState
77
  from ea4all.src.tools.tools import websearch
78
 
79
  from ea4all.src.ea4all_indexer.graph import indexer_graph
80
  from ea4all.src.ea4all_apm.graph import apm_graph
81
  from ea4all.src.ea4all_vqa.graph import diagram_graph
82
  from ea4all.src.ea4all_gra.graph import togaf_graph
83
+ from ea4all.src.ea4all_mcp.graph import mcp_docs_graph
84
 
85
+ async def call_indexer_apm(state: OverallState, config: RunnableConfig):
86
  response = await indexer_graph.ainvoke(input={"docs":[]}, config=config)
87
 
88
  def make_supervisor_node(model: BaseChatModel, members: list[str]):
 
92
 
93
  class Router(TypedDict):
94
  """Worker to route to next. If no workers needed, route to FINISH."""
95
+ next: Literal["FINISH", "portfolio_team", "diagram_team", "blueprint_team", "websearch_team", "mcp_docs_team"]
96
 
97
+ async def supervisor_node(state: OverallState, config: RunnableConfig) -> Command[Literal["portfolio_team", "diagram_team", "blueprint_team", "websearch_team", "mcp_docs_team", '__end__']]:
98
 
99
  """An LLM-based router."""
100
  messages = [
101
  {"role": "system", "content": system_prompt},
102
+ ]
103
 
104
+ messages.append({"role": "user", "content": state['messages'][-1].content})
105
+
106
+ response = await model.with_structured_output(Router).ainvoke(input=messages, config=config)
107
 
108
  _goto = "__end__"
109
 
110
  if isinstance(response, dict):
111
  _goto = response["next"]
112
  # Ensure _goto is one of the allowed Literal values
113
+ if _goto not in ["portfolio_team", "diagram_team", "blueprint_team", "websearch_team", "mcp_docs_team"]:
114
  _goto = "__end__"
115
 
116
+ question = extract_text_from_message_content(state['messages'][-1].content)
117
+ print(f"---Supervisor got a request--- Question: {question} ==> Routing to {_goto}\n")
118
 
119
+ return Command(update={'question':question}, goto=_goto)
 
 
 
120
 
121
  return supervisor_node
122
 
123
+ async def call_landscape_agentic(state: OverallState, config: RunnableConfig) -> Command[Literal['__end__']]: ##2025-02-21: NOT passing CHAT MEMORY to the APM_graph
124
+
125
+ response = await apm_graph.ainvoke(state, config=config)
126
  return Command(
127
  update={
128
  "messages": [
 
134
  goto="__end__",
135
  )
136
 
137
+ async def call_diagram_agentic(state: OverallState, config: RunnableConfig) -> Command[Literal['__end__']]: #NOT passing CHAT MEMORY to the Diagram_graph
138
+ from ea4all.src.ea4all_vqa.configuration import AgentConfiguration
139
+ from ea4all.src.ea4all_vqa.state import InputState
 
 
 
140
 
141
+ image_files = AgentConfiguration.from_runnable_config(config).vqa_images
142
+ user_input = extract_and_validate_path(text=state.get('question'), allowed_extensions=image_files)
143
+
144
+ if user_input['found'] and user_input['is_file']:
145
+ inputs = InputState(
146
+ #question=user_input['message'],
147
+ messages=[HumanMessage(content=user_input['message'])],
148
+ image=user_input['path']
149
+ )
150
+
151
+ response = await diagram_graph.ainvoke(
152
+ input=inputs,
153
+ config=config
154
+ )
155
+ msg = response['messages'][-1].content if response['messages'] and hasattr(response['messages'][-1], 'content') else "No valid response from Diagram agentic graph."
156
+ else:
157
+ msg = f"No valid image file provided. Please provide one of the accepted file types: {image_files}."
158
 
159
  return Command(
160
  update={
161
  "messages": [
162
  AIMessage(
163
+ content=msg, name="diagram_agentic"
164
  )
165
  ]
166
  },
167
  goto="__end__",
168
  )
169
 
170
+ async def call_togaf_agentic(state: OverallState, config: RunnableConfig) -> Command[Literal["__end__"]]: #NOT passing CHAT MEMORY to the Togaf_graph
171
+ print(f"---TOGAF ROUTE team node ready to --- CALL_TOGAF_AGENTIC Routing to {getattr(state, 'next')} with User Question: {getattr(state, 'question')}")
172
 
173
+ inputs = {
174
+ "messages": [
175
+ {
176
+ "role": "user",
177
+ "content": getattr(state, "question")[-1].content
178
+ }
179
+ ]
180
+ } #user response
181
 
182
  response = await togaf_graph.ainvoke(
183
  input=inputs,
 
188
  update={
189
  "messages": [
190
  AIMessage(
191
+ content=response["messages"][-1].content, name="togaf_agentic"
192
  )
193
  ]
194
  },
 
196
  )
197
 
198
  # Wrap-up websearch answer to user's question
199
+ async def call_generate_websearch(state:OverallState, config: RunnableConfig) -> Command[Literal["__end__"]]:
200
  from ea4all.src.ea4all_apm.state import OverallState
201
 
202
  if config is not None:
203
  source = config.get('metadata', {}).get('langgraph_node', 'unknown')
204
 
205
  # Invoke GENERATOR node in the APMGraph
206
+ messages = state.get('messages')
207
+ last_message_content = messages[-1].content if messages and hasattr(messages[-1], "content") else ""
208
  state_dict = {
209
+ "messages": last_message_content,
210
+ "source": "websearch",
211
+ "question": state.get('question'),
 
212
  }
213
 
214
  apm_state = OverallState(**state_dict)
 
225
  goto="__end__",
226
  )
227
 
228
+ async def blueprint_team(state: OverallState) -> Command[Literal["togaf_route"]]:
229
  print("---Blueprint team got a request--- Routing to TOGAF_ROUTE node")
230
 
231
  return Command(update={**state}, goto="togaf_route")
232
 
233
+ async def diagram_team(state: OverallState) -> Command[Literal["diagram_route"]]:
234
  print("---Diagram team got a request--- Routing to DIAGRAM_ROUTE node")
235
 
236
  return Command(update={**state}, goto="diagram_route")
237
 
238
+ async def mcp_docs_team(state: OverallState) -> Command[Literal["__end__"]]:
239
+ print("---MCP Docs team got a request--- Routing to MCP_DOCS_AGENTIC node")
240
+
241
+ # Invoke the EA4ALL MCP Docs graph
242
+ mcp_response = await mcp_docs_graph.ainvoke(
243
+ {"messages": [{"role": "user", "content": state.get('question')}]}
244
+ )
245
+
246
+ return Command(
247
+ update={
248
+ "messages": [
249
+ AIMessage(
250
+ content=mcp_response['messages'][-1].content, name="mcp_docs_agentic"
251
+ )
252
+ ]
253
+ },
254
+ goto="__end__",
255
+ )
256
+
257
+ async def call_websearch(state: OverallState, config: RunnableConfig) -> Command[Literal["generate_websearch"]]:
258
+ """
259
+ Web search based on the re-phrased question.
260
+
261
+ Args:
262
+ state (OverallState): The current graph state
263
+ config (RunnableConfig): Configuration with the model used for query analysis.
264
+
265
+ Returns:
266
+ Command: Command to route to the generate_websearch node.
267
+ """
268
+ print(f"---Websearch team got a request--- Routing to GENERATE_WEBSEARCH node with User Question: {state.get('question')}")
269
+
270
+ _state = dict(state)
271
+ response = await websearch(_state)
272
+
273
+ return Command(update=response, goto="generate_websearch")
274
+
275
+ async def super_graph_entry_point(state: OverallState):
276
  # Generate a unique thread ID
277
  thread_config = RunnableConfig({"configurable": {"thread_id": str(uuid.uuid4())}})
278
 
 
303
  model = get_llm_client(BaseConfiguration.supervisor_model, api_base_url="", streaming=BaseConfiguration.streaming)
304
  teams_supervisor_node = make_supervisor_node(model, ["portfolio_team", "diagram_team", "blueprint_team","websearch_team"])
305
 
306
+ super_builder = StateGraph(state_schema=OverallState, input_schema=InputState, output_schema=OutputState, config_schema=BaseConfiguration)
307
+
308
  super_builder.add_node("apm_indexer", call_indexer_apm)
309
  super_builder.add_node("supervisor", teams_supervisor_node)
310
  super_builder.add_node("portfolio_team", call_landscape_agentic)
311
+ super_builder.add_node("websearch_team", call_websearch)
312
  super_builder.add_node("diagram_team", diagram_team)
313
  super_builder.add_node("blueprint_team", blueprint_team)
314
  super_builder.add_node("generate_websearch", call_generate_websearch)
315
  super_builder.add_node("diagram_route", call_diagram_agentic)
316
  super_builder.add_node("togaf_route", call_togaf_agentic)
317
+ super_builder.add_node("mcp_docs_team", mcp_docs_team)
318
 
319
  super_builder.add_edge(START, "apm_indexer")
320
  super_builder.add_edge("apm_indexer", "supervisor")
 
327
  super_builder.add_edge("generate_websearch", END)
328
  super_builder.add_edge("togaf_route", END)
329
  super_builder.add_edge("diagram_route", END)
330
+ super_builder.add_edge("mcp_docs_team", END)
331
 
332
+ super_graph = super_builder.compile()
333
+ super_graph.name = "ea4all_supervisor"
 
334
 
335
  return super_graph
336
 
ea4all/src/shared/configuration.py CHANGED
@@ -31,7 +31,7 @@ class BaseConfiguration:
31
  )
32
 
33
  api_base_url: Annotated[str, {"__template_metadata__": {"kind": "hosting"}}] = field(
34
- default="https://api-inference.huggingface.co/models/",
35
  metadata={
36
  "description": "The base url for models hosted on Hugging Face's model hub."
37
  },
@@ -80,7 +80,7 @@ class BaseConfiguration:
80
  )
81
 
82
  ea4all_recursion_limit: Annotated[int, {"__template_metadata__": {"kind": "graph"}}] = field(
83
- default=25,
84
  metadata={
85
  "description": "Maximum recursion allowed for EA4ALL graphs."
86
  },
 
31
  )
32
 
33
  api_base_url: Annotated[str, {"__template_metadata__": {"kind": "hosting"}}] = field(
34
+ default="https://router.huggingface.co/v1", #"https://api-inference.huggingface.co/models/",
35
  metadata={
36
  "description": "The base url for models hosted on Hugging Face's model hub."
37
  },
 
80
  )
81
 
82
  ea4all_recursion_limit: Annotated[int, {"__template_metadata__": {"kind": "graph"}}] = field(
83
+ default=5,
84
  metadata={
85
  "description": "Maximum recursion allowed for EA4ALL graphs."
86
  },
ea4all/src/shared/prompts.py CHANGED
@@ -71,7 +71,7 @@ def ea4all_chat_prompt(template):
71
 
72
  ##select best prompt based on user inquiry's category
73
  @traceable(
74
- tags={os.environ["EA4ALL_ENV"]}
75
  )
76
  def ea4ll_prompt_selector(category):
77
  QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
 
71
 
72
  ##select best prompt based on user inquiry's category
73
  @traceable(
74
+ tags=[os.getenv("EA4ALL_ENV", "build")]
75
  )
76
  def ea4ll_prompt_selector(category):
77
  QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
ea4all/src/shared/state.py CHANGED
@@ -2,13 +2,27 @@
2
 
3
  import hashlib
4
  import uuid
5
- from typing import Any, Literal, Optional, Union
 
 
 
 
 
 
 
6
 
 
7
  from langgraph.graph import MessagesState
8
  from langchain_core.documents import Document
9
 
10
- class State(MessagesState):
 
 
 
 
11
  next: Optional[str]
 
 
12
  user_feedback: Optional[str]
13
 
14
  def _generate_uuid(page_content: str) -> str:
 
2
 
3
  import hashlib
4
  import uuid
5
+ from typing import (
6
+ Any,
7
+ Literal,
8
+ Union,
9
+ Optional,
10
+ Annotated
11
+ )
12
+ from typing_extensions import TypedDict
13
 
14
+ from langgraph.graph.message import add_messages
15
  from langgraph.graph import MessagesState
16
  from langchain_core.documents import Document
17
 
18
+ class InputState(MessagesState):
19
+ pass
20
+
21
+ class OutputState(TypedDict):
22
+ question: str
23
  next: Optional[str]
24
+
25
+ class OverallState(InputState, OutputState):
26
  user_feedback: Optional[str]
27
 
28
  def _generate_uuid(page_content: str) -> str:
ea4all/src/shared/utils.py CHANGED
@@ -14,7 +14,9 @@ from dotenv import load_dotenv, find_dotenv
14
  import markdown
15
  from markdownify import markdownify as md2text
16
  from io import BytesIO
17
- import pandas as pd
 
 
18
 
19
  from pydantic import BaseModel, SecretStr
20
 
@@ -175,7 +177,7 @@ def get_llm_client(model, api_base_url=None,temperature=0, streaming=False, toke
175
  client = ChatOpenAI(
176
  model=model,
177
  api_key=SecretStr(os.environ['HUGGINGFACEHUB_API_TOKEN']),
178
- base_url=_join_paths(api_base_url, model, "v1/"),
179
  temperature=temperature,
180
  streaming=streaming,
181
  max_completion_tokens=tokens,
@@ -485,3 +487,85 @@ def get_relevant_questions(source: str) -> list:
485
  for line in mock.splitlines(): relevant_questions += [line]
486
 
487
  return relevant_questions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  import markdown
15
  from markdownify import markdownify as md2text
16
  from io import BytesIO
17
+ import string
18
+ from typing import List, Dict, Optional
19
+
20
 
21
  from pydantic import BaseModel, SecretStr
22
 
 
177
  client = ChatOpenAI(
178
  model=model,
179
  api_key=SecretStr(os.environ['HUGGINGFACEHUB_API_TOKEN']),
180
+ base_url=api_base_url, #_join_paths(api_base_url, model, "v1/"),
181
  temperature=temperature,
182
  streaming=streaming,
183
  max_completion_tokens=tokens,
 
487
  for line in mock.splitlines(): relevant_questions += [line]
488
 
489
  return relevant_questions
490
+
491
+ def extract_text_from_message_content(content):
492
+ # content is expected to be a list of dicts with 'type' and 'text' keys
493
+ if isinstance(content, list):
494
+ texts = [part['text'] for part in content if part.get('type') == 'text']
495
+ return " ".join(texts)
496
+ # fallback if content is just a string
497
+ elif isinstance(content, str):
498
+ return content
499
+ else:
500
+ return ""
501
+
502
+ def extract_and_validate_path(
503
+ text: str,
504
+ allowed_extensions: Optional[List[str]] = None
505
+ ) -> Dict:
506
+ """
507
+ Extracts the first valid file path or filename from text based on a list
508
+ of allowed extensions, validates its existence, and returns detailed info.
509
+
510
+ Args:
511
+ text: The input string to search for a path.
512
+ allowed_extensions: A list of allowed extensions (e.g., ['.png', '.csv']).
513
+ If None, finds any path-like string.
514
+
515
+ Returns:
516
+ A dictionary containing the results.
517
+ """
518
+ path_str = None
519
+ msg_str = None
520
+ for word in text.split():
521
+ # Clean trailing punctuation (like a comma or question mark)
522
+ cleaned_word = word.rstrip(string.punctuation)
523
+
524
+ # A word is a candidate if it contains a path separator or a file extension dot.
525
+ is_candidate = '/' in cleaned_word or '.' in os.path.basename(cleaned_word)
526
+
527
+ if is_candidate:
528
+ # If a whitelist of extensions is provided, the candidate MUST match.
529
+ if allowed_extensions:
530
+ basename = os.path.basename(cleaned_word)
531
+ _, extension = os.path.splitext(basename)
532
+
533
+ # Check if the extracted extension is in the allowed list (case-insensitive)
534
+ if extension.lower() in [ext.lower() for ext in allowed_extensions]:
535
+ path_str = cleaned_word
536
+ break # Found a valid file, stop searching.
537
+ else:
538
+ # No whitelist, so the first candidate is our match.
539
+ path_str = cleaned_word
540
+ break
541
+ else:
542
+ # If the word is not a candidate, it might be part of a message.
543
+ if msg_str is None:
544
+ msg_str = word
545
+ else:
546
+ msg_str += ' ' + word
547
+
548
+ if not path_str:
549
+ return {'found': False}
550
+
551
+ # --- If a path was found, gather all details ---
552
+ basename = os.path.basename(path_str)
553
+ dirname = os.path.dirname(path_str)
554
+ _, extension = os.path.splitext(basename)
555
+
556
+ # Filesystem validation
557
+ path_exists = os.path.exists(path_str)
558
+ is_file = os.path.isfile(path_str)
559
+ is_dir = os.path.isdir(path_str)
560
+
561
+ return {
562
+ 'found': True,
563
+ 'path': path_str,
564
+ 'dirname': dirname if dirname else ('/' if path_str.startswith('/') else '.'),
565
+ 'basename': basename,
566
+ 'extension': extension,
567
+ 'exists': path_exists,
568
+ 'is_file': is_file,
569
+ 'is_dir': is_dir,
570
+ 'message': msg_str if msg_str else None
571
+ }
ea4all/src/tools/tools.py CHANGED
@@ -1,5 +1,6 @@
1
- from typing import Literal, Annotated
2
- from typing_extensions import TypedDict
 
3
  import json
4
  import tempfile
5
  import os
@@ -8,20 +9,17 @@ from langchain_core.runnables import RunnableLambda, RunnableConfig
8
 
9
  from langgraph.graph import END
10
  from langgraph.types import Command
11
- from langgraph.prebuilt import InjectedState
12
 
13
  from langchain_community.utilities import BingSearchAPIWrapper
14
  from langchain_community.tools.bing_search.tool import BingSearchResults
15
  from langchain_community.document_loaders import JSONLoader
16
 
17
- from langchain.agents import tool
18
-
19
  from ea4all.src.shared.configuration import (
20
  BaseConfiguration
21
  )
22
 
23
  from ea4all.src.shared.state import (
24
- State
25
  )
26
 
27
  from ea4all.src.shared.utils import (
@@ -29,6 +27,9 @@ from ea4all.src.shared.utils import (
29
  format_docs,
30
  )
31
 
 
 
 
32
  def make_supervisor_node(config: RunnableConfig, members: list[str]) -> RunnableLambda:
33
  options = ["FINISH"] + members
34
  system_prompt = (
@@ -45,16 +46,20 @@ def make_supervisor_node(config: RunnableConfig, members: list[str]) -> Runnable
45
  api_base_url="",
46
  )
47
 
48
- class Router(TypedDict):
 
 
49
  """Worker to route to next. If no workers needed, route to FINISH."""
50
 
51
- next: Literal[*options]
 
 
52
 
53
- def supervisor_node(state: State) -> Command[Literal[*members, "__end__"]]:
54
  """An LLM-based router."""
55
  messages = [
56
  {"role": "system", "content": system_prompt},
57
- ] + state["messages"]
58
  response = model.with_structured_output(Router).invoke(messages)
59
  goto = response["next"]
60
  if goto == "FINISH":
@@ -64,7 +69,7 @@ def make_supervisor_node(config: RunnableConfig, members: list[str]) -> Runnable
64
 
65
  return RunnableLambda(supervisor_node)
66
 
67
- async def websearch(state: dict[str, dict | str]) -> dict[str,dict[str,str]]:
68
  """
69
  Web search based on the re-phrased question.
70
 
@@ -84,8 +89,9 @@ async def websearch(state: dict[str, dict | str]) -> dict[str,dict[str,str]]:
84
  bing_search_url=bing_search_url
85
  )
86
 
87
- question = getattr(state,'messages')[-1].content if getattr(state,'messages', False) else getattr(state,'question')
88
-
 
89
  ##Bing Search Results
90
  web_results = BingSearchResults(
91
  api_wrapper=search,
 
1
+ from typing import Literal
2
+ from typing_extensions import TypedDict, Literal as ExtLiteral
3
+
4
  import json
5
  import tempfile
6
  import os
 
9
 
10
  from langgraph.graph import END
11
  from langgraph.types import Command
 
12
 
13
  from langchain_community.utilities import BingSearchAPIWrapper
14
  from langchain_community.tools.bing_search.tool import BingSearchResults
15
  from langchain_community.document_loaders import JSONLoader
16
 
 
 
17
  from ea4all.src.shared.configuration import (
18
  BaseConfiguration
19
  )
20
 
21
  from ea4all.src.shared.state import (
22
+ OverallState
23
  )
24
 
25
  from ea4all.src.shared.utils import (
 
27
  format_docs,
28
  )
29
 
30
+ def make_literal_type(options):
31
+ return ExtLiteral[tuple(options)]
32
+
33
  def make_supervisor_node(config: RunnableConfig, members: list[str]) -> RunnableLambda:
34
  options = ["FINISH"] + members
35
  system_prompt = (
 
46
  api_base_url="",
47
  )
48
 
49
+ NextLiteral = make_literal_type(options)
50
+
51
+ class Router():
52
  """Worker to route to next. If no workers needed, route to FINISH."""
53
 
54
+ def __init__(self) -> None:
55
+ """Initialize the Router with the next worker."""
56
+ self.next = make_literal_type(options)
57
 
58
+ def supervisor_node(state: OverallState) -> Command:
59
  """An LLM-based router."""
60
  messages = [
61
  {"role": "system", "content": system_prompt},
62
+ ] + getattr(state,"messages"),
63
  response = model.with_structured_output(Router).invoke(messages)
64
  goto = response["next"]
65
  if goto == "FINISH":
 
69
 
70
  return RunnableLambda(supervisor_node)
71
 
72
+ async def websearch(state: dict[str, list | object]) -> dict[str, object]:
73
  """
74
  Web search based on the re-phrased question.
75
 
 
89
  bing_search_url=bing_search_url
90
  )
91
 
92
+ #question = getattr(state,'messages')[-1].content if getattr(state,'messages', False) else getattr(state,'question')
93
+ question = state.get('question')
94
+
95
  ##Bing Search Results
96
  web_results = BingSearchResults(
97
  api_wrapper=search,
lgs_requirements.txt ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ appnope==0.1.4
2
+ asttokens==3.0.0
3
+ blockbuster==1.5.25
4
+ certifi==2025.4.26
5
+ click==8.1.8
6
+ comm==0.2.2
7
+ decorator==5.2.1
8
+ executing==2.2.0
9
+ fsspec==2025.5.1
10
+ greenlet==3.2.3
11
+ hf_xet==1.1.2
12
+ huggingface_hub==0.32.0
13
+ ipykernel==6.29.5
14
+ ipython==9.4.0
15
+ ipython_pygments_lexers==1.1.1
16
+ jedi==0.19.2
17
+ jupyter_client==8.6.3
18
+ jupyter_core==5.8.1
19
+ langchain==0.3.26
20
+ langchain_core==0.3.69
21
+ langchain_huggingface==0.3.0
22
+ langchain_openai==0.3.28
23
+ langgraph==0.5.3
24
+ langgraph_api==0.2.94
25
+ langgraph_checkpoint==2.1.1
26
+ langgraph_prebuilt==0.5.2
27
+ langgraph_runtime_inmem==0.5.0
28
+ langgraph_sdk==0.1.73
29
+ langsmith==0.4.7
30
+ matplotlib_inline==0.1.7
31
+ mcp==1.10.1
32
+ openai==1.97.0
33
+ orjson==3.10.18
34
+ packaging==24.2
35
+ parso==0.8.4
36
+ pexpect==4.9.0
37
+ platformdirs==4.3.8
38
+ prompt_toolkit==3.0.51
39
+ ptyprocess==0.7.0
40
+ pure_eval==0.2.3
41
+ pydantic==2.11.5
42
+ python_dotenv==1.1.0
43
+ pyzmq==27.0.0
44
+ requests==2.32.3
45
+ sse_starlette==2.1.3
46
+ stack_data==0.6.3
47
+ starlette==0.46.2
48
+ tokenizers==0.21.2
49
+ tornado==6.5.1
50
+ traitlets==5.14.3
51
+ typing_extensions==4.13.2
52
+ urllib3==2.4.0
53
+ uvicorn==0.34.2
54
+ wcwidth==0.2.13
requirements.txt CHANGED
@@ -1,42 +1,162 @@
 
 
 
 
 
 
 
 
1
  atlassian-python-api==4.0.4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  faiss-cpu==1.11.0
3
- gradio==5.32.1
4
- gradio_client==1.10.2
5
- graphviz
6
- huggingface-hub
7
- jq>=1.8.0
8
- langchain==0.3.25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  langchain-community==0.3.24
10
- langchain-core==0.3.61
 
11
  langchain-mcp-adapters==0.1.4
12
- langchain-openai==0.3.18
13
  langchain-text-splitters==0.3.8
14
- langgraph==0.4.7
15
- langgraph-api==0.2.34
16
- langgraph-checkpoint==2.0.26
17
  langgraph-cli==0.2.10
18
- langgraph-prebuilt==0.2.1
19
- langgraph-runtime-inmem==0.2.0
20
- langgraph-sdk==0.1.70
21
- langsmith==0.3.42
22
- markdown
23
- markdownify
24
- mcp==1.9.2
 
 
 
 
 
 
 
 
 
 
25
  numpy==2.2.6
26
- openai
27
- openpyxl
 
 
 
28
  packaging==24.2
29
- pandas
30
- pillow
31
- pydantic
32
- pydantic-core
33
- pydantic-settings
34
- pydub
35
- python-dateutil
36
- python-dotenv
37
- python-multipart
38
- regex
39
- spaces
40
- sqlalchemy
41
- tiktoken
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  torch==2.2.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==24.1.0
2
+ aiohappyeyeballs==2.6.1
3
+ aiohttp==3.12.9
4
+ aiosignal==1.3.2
5
+ annotated-types==0.7.0
6
+ anyio==4.9.0
7
+ appnope==0.1.4
8
+ asttokens==3.0.0
9
  atlassian-python-api==4.0.4
10
+ attrs==25.3.0
11
+ beautifulsoup4==4.13.4
12
+ blockbuster==1.5.25
13
+ certifi==2025.4.26
14
+ cffi==1.17.1
15
+ charset-normalizer==3.4.2
16
+ click==8.1.8
17
+ cloudpickle==3.1.1
18
+ comm==0.2.2
19
+ cryptography==44.0.3
20
+ dataclasses-json==0.6.7
21
+ debugpy==1.8.15
22
+ decorator==5.2.1
23
+ deprecated==1.2.18
24
+ distro==1.9.0
25
+ et-xmlfile==2.0.0
26
+ executing==2.2.0
27
  faiss-cpu==1.11.0
28
+ fastapi==0.115.12
29
+ ffmpy==0.6.0
30
+ filelock==3.18.0
31
+ forbiddenfruit==0.1.4
32
+ frozenlist==1.6.2
33
+ fsspec==2025.5.1
34
+ gradio==5.33.0
35
+ gradio-client==1.10.2
36
+ graphviz==0.20.3
37
+ greenlet==3.2.3
38
+ groovy==0.1.2
39
+ h11==0.16.0
40
+ hf-xet==1.1.2
41
+ httpcore==1.0.9
42
+ httpx==0.28.1
43
+ httpx-sse==0.4.0
44
+ huggingface-hub==0.32.0
45
+ idna==3.10
46
+ ipykernel==6.29.5
47
+ ipython==9.4.0
48
+ ipython-pygments-lexers==1.1.1
49
+ jedi==0.19.2
50
+ jinja2==3.1.6
51
+ jiter==0.10.0
52
+ jmespath==1.0.1
53
+ jq==1.8.0
54
+ jsonpatch==1.33
55
+ jsonpointer==3.0.0
56
+ jsonschema==4.25.0
57
+ jsonschema-rs==0.29.1
58
+ jsonschema-specifications==2025.4.1
59
+ jupyter-client==8.6.3
60
+ jupyter-core==5.8.1
61
+ langchain==0.3.26
62
  langchain-community==0.3.24
63
+ langchain-core==0.3.69
64
+ langchain-huggingface==0.3.0
65
  langchain-mcp-adapters==0.1.4
66
+ langchain-openai==0.3.28
67
  langchain-text-splitters==0.3.8
68
+ langgraph==0.5.3
69
+ langgraph-api==0.2.94
70
+ langgraph-checkpoint==2.1.1
71
  langgraph-cli==0.2.10
72
+ langgraph-prebuilt==0.5.2
73
+ langgraph-runtime-inmem==0.5.0
74
+ langgraph-sdk==0.1.73
75
+ langsmith==0.4.7
76
+ markdown==3.8
77
+ markdown-it-py==3.0.0
78
+ markdownify==1.1.0
79
+ markupsafe==3.0.2
80
+ marshmallow==3.26.1
81
+ matplotlib-inline==0.1.7
82
+ mcp==1.10.1
83
+ mdurl==0.1.2
84
+ mpmath==1.3.0
85
+ multidict==6.4.4
86
+ mypy-extensions==1.1.0
87
+ nest-asyncio==1.6.0
88
+ networkx==3.5
89
  numpy==2.2.6
90
+ oauthlib==3.2.2
91
+ openai==1.97.0
92
+ openpyxl==3.1.5
93
+ orjson==3.10.18
94
+ ormsgpack==1.10.0
95
  packaging==24.2
96
+ pandas==2.2.3
97
+ parso==0.8.4
98
+ pexpect==4.9.0
99
+ pillow==11.2.1
100
+ platformdirs==4.3.8
101
+ prompt-toolkit==3.0.51
102
+ propcache==0.3.1
103
+ psutil==5.9.8
104
+ ptyprocess==0.7.0
105
+ pure-eval==0.2.3
106
+ pycparser==2.22
107
+ pydantic==2.11.5
108
+ pydantic-core==2.33.2
109
+ pydantic-settings==2.9.1
110
+ pydub==0.25.1
111
+ pygments==2.19.1
112
+ pyjwt==2.10.1
113
+ python-dateutil==2.9.0.post0
114
+ python-dotenv==1.1.0
115
+ python-multipart==0.0.20
116
+ pytz==2025.2
117
+ pyyaml==6.0.2
118
+ pyzmq==27.0.0
119
+ referencing==0.36.2
120
+ regex==2024.11.6
121
+ requests==2.32.3
122
+ requests-oauthlib==2.0.0
123
+ requests-toolbelt==1.0.0
124
+ rich==14.0.0
125
+ rpds-py==0.26.0
126
+ ruff==0.11.12
127
+ safehttpx==0.1.6
128
+ semantic-version==2.10.0
129
+ shellingham==1.5.4
130
+ six==1.17.0
131
+ sniffio==1.3.1
132
+ soupsieve==2.7
133
+ spaces==0.36.0
134
+ sqlalchemy==2.0.41
135
+ sse-starlette==2.1.3
136
+ stack-data==0.6.3
137
+ starlette==0.46.2
138
+ structlog==25.4.0
139
+ sympy==1.14.0
140
+ tenacity==9.1.2
141
+ tiktoken==0.9.0
142
+ tokenizers==0.21.2
143
+ tomlkit==0.13.2
144
  torch==2.2.2
145
+ tornado==6.5.1
146
+ tqdm==4.67.1
147
+ traitlets==5.14.3
148
+ truststore==0.10.1
149
+ typer==0.16.0
150
+ typing-extensions==4.13.2
151
+ typing-inspect==0.9.0
152
+ typing-inspection==0.4.1
153
+ tzdata==2025.2
154
+ urllib3==2.4.0
155
+ uvicorn==0.34.2
156
+ watchfiles==1.0.5
157
+ wcwidth==0.2.13
158
+ websockets==15.0.1
159
+ wrapt==1.17.2
160
+ xxhash==3.5.0
161
+ yarl==1.20.0
162
+ zstandard==0.23.0