ea4all-gradio-agents-mcp-hackathon-tools-refactoring-apm-websearch-state
Browse files- ea4all/ea4all_mcp.py +1 -1
- ea4all/src/ea4all_apm/graph.py +27 -20
- ea4all/src/ea4all_apm/state.py +2 -3
- ea4all/src/ea4all_vqa/graph.py +12 -12
- ea4all/src/tools/tools.py +2 -2
- ea4all/utils/utils.py +2 -2
ea4all/ea4all_mcp.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
#2025-06-04
|
2 |
## Gradio Agents MCP Hackathon: retrofit to expose EA4ALL Agentic System Agents only
|
3 |
## Greetings message not working
|
4 |
## UI exposing too much tools, need to be refactored
|
|
|
1 |
+
#CHANGELOG: 2025-06-04
|
2 |
## Gradio Agents MCP Hackathon: retrofit to expose EA4ALL Agentic System Agents only
|
3 |
## Greetings message not working
|
4 |
## UI exposing too much tools, need to be refactored
|
ea4all/src/ea4all_apm/graph.py
CHANGED
@@ -6,6 +6,10 @@ and key functions for processing & routing user queries, generating answer to
|
|
6 |
Enterprise Architecture related user questions
|
7 |
about an IT Landscape or Websearch.
|
8 |
"""
|
|
|
|
|
|
|
|
|
9 |
import os
|
10 |
|
11 |
from langgraph.graph import END, StateGraph
|
@@ -67,7 +71,7 @@ async def retrieve_documents(
|
|
67 |
"""
|
68 |
with vectorstore.make_retriever(config) as retriever:
|
69 |
response = await retriever.ainvoke(state.question, config)
|
70 |
-
return {"
|
71 |
|
72 |
async def apm_retriever(config: RunnableConfig):
|
73 |
with vectorstore.make_retriever(config) as retriever:
|
@@ -463,12 +467,12 @@ async def grade_documents(state, config: RunnableConfig):
|
|
463 |
|
464 |
print("---CHECK DOCUMENT RELEVANCE TO QUESTION---")
|
465 |
question = state.question
|
466 |
-
documents = state.
|
|
|
467 |
llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
|
468 |
|
469 |
# Score each doc
|
470 |
filtered_docs = []
|
471 |
-
web_search = "No"
|
472 |
for d in documents:
|
473 |
score = retrieval_grader(llm).ainvoke(
|
474 |
{"user_question": question, "document": d.page_content}
|
@@ -483,9 +487,10 @@ async def grade_documents(state, config: RunnableConfig):
|
|
483 |
print("---GRADE: DOCUMENT NOT RELEVANT---")
|
484 |
# We do not include the document in filtered_docs
|
485 |
# We set a flag to indicate that we want to run web search
|
486 |
-
web_search = "Yes"
|
|
|
487 |
|
488 |
-
return {"documents": filtered_docs, "question": question, "
|
489 |
|
490 |
def decide_to_generate(state):
|
491 |
"""
|
@@ -500,10 +505,10 @@ def decide_to_generate(state):
|
|
500 |
|
501 |
print("---ASSESS GRADED DOCUMENTS---")
|
502 |
state.question
|
503 |
-
|
504 |
getattr(state,'documents')
|
505 |
|
506 |
-
if
|
507 |
# All documents have been filtered check_relevance
|
508 |
# We will re-generate a new query
|
509 |
print(
|
@@ -530,11 +535,11 @@ def grade_generation_v_documents_and_question(
|
|
530 |
configuration = AgentConfiguration.from_runnable_config(config)
|
531 |
|
532 |
question = getattr(state,'question')
|
533 |
-
documents = getattr(state,'
|
534 |
generation = getattr(state,'generation')
|
535 |
llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
|
536 |
|
537 |
-
if getattr(state,'
|
538 |
#print("---CHECK HALLUCINATIONS---")
|
539 |
hallucination_grader_instance = hallucination_grader(llm)
|
540 |
#for output in hallucination_grader_instance.stream(
|
@@ -654,7 +659,7 @@ async def retrieve(
|
|
654 |
|
655 |
documents = await final_chain.ainvoke({"user_question": question, "chat_memory":[]})
|
656 |
|
657 |
-
return {"
|
658 |
|
659 |
### Edges ###
|
660 |
def route_to_node(state:OverallState):
|
@@ -692,17 +697,17 @@ async def stream_generation(
|
|
692 |
llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url,streaming=configuration.streaming)
|
693 |
|
694 |
documents = None
|
695 |
-
web_search = None
|
696 |
question = None
|
|
|
697 |
chat_memory = None
|
698 |
async for s in state:
|
699 |
-
documents = getattr(s,"
|
700 |
-
web_search = getattr(s,"web_search")
|
701 |
question = getattr(s,"question")
|
|
|
702 |
chat_memory = getattr(s,"chat_memory")
|
703 |
|
704 |
# Prompt Web Search generation
|
705 |
-
if
|
706 |
prompt = PromptTemplate(
|
707 |
template="""<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are an enterprise architect assistant for question-answering tasks.
|
708 |
Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know.
|
@@ -738,9 +743,9 @@ async def generate(
|
|
738 |
"""
|
739 |
#print("---GENERATE---")
|
740 |
|
741 |
-
documents = getattr(state,'
|
742 |
-
|
743 |
-
question = getattr(state,'question')
|
744 |
|
745 |
##Triggered by hallucination_grade? 2025-02-21 - NOT USER being edged to END atm
|
746 |
#2025-02-21: it's being triggered by super_graph supervisor as well - need to review as calling web_search twice
|
@@ -752,13 +757,14 @@ async def generate(
|
|
752 |
# await retrieve(state, config)
|
753 |
|
754 |
# Generate answer
|
755 |
-
tags = ["websearch_stream"] if
|
756 |
gen = RunnableGenerator(stream_generation).with_config(tags=tags)
|
757 |
generation=""
|
758 |
async for message in gen.astream(state):
|
759 |
generation = ''.join([generation,message])
|
760 |
|
761 |
-
return {"
|
|
|
762 |
|
763 |
#ea4all-qna-agent-conversational-with-memory
|
764 |
async def apm_agentic_qna(
|
@@ -808,7 +814,8 @@ async def apm_agentic_qna(
|
|
808 |
|
809 |
documents = await final_chain.ainvoke({"user_question": question, "chat_memory":chat_memory})
|
810 |
|
811 |
-
return {"documents": format_docs(documents['cdocs']), "question": question, "rag":5, "
|
|
|
812 |
|
813 |
async def final(state: OverallState):
|
814 |
return {"safety_status": state}
|
|
|
6 |
Enterprise Architecture related user questions
|
7 |
about an IT Landscape or Websearch.
|
8 |
"""
|
9 |
+
|
10 |
+
#CHANGELOG: 2025-06-08
|
11 |
+
# Refactored to use tools.websearch (changes State, removed web_search)
|
12 |
+
|
13 |
import os
|
14 |
|
15 |
from langgraph.graph import END, StateGraph
|
|
|
71 |
"""
|
72 |
with vectorstore.make_retriever(config) as retriever:
|
73 |
response = await retriever.ainvoke(state.question, config)
|
74 |
+
return {"messages": response}
|
75 |
|
76 |
async def apm_retriever(config: RunnableConfig):
|
77 |
with vectorstore.make_retriever(config) as retriever:
|
|
|
467 |
|
468 |
print("---CHECK DOCUMENT RELEVANCE TO QUESTION---")
|
469 |
question = state.question
|
470 |
+
documents = state.messages
|
471 |
+
source = state.source
|
472 |
llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
|
473 |
|
474 |
# Score each doc
|
475 |
filtered_docs = []
|
|
|
476 |
for d in documents:
|
477 |
score = retrieval_grader(llm).ainvoke(
|
478 |
{"user_question": question, "document": d.page_content}
|
|
|
487 |
print("---GRADE: DOCUMENT NOT RELEVANT---")
|
488 |
# We do not include the document in filtered_docs
|
489 |
# We set a flag to indicate that we want to run web search
|
490 |
+
#web_search = "Yes"
|
491 |
+
source = "websearch"
|
492 |
|
493 |
+
return {"documents": filtered_docs, "question": question, "source": source}
|
494 |
|
495 |
def decide_to_generate(state):
|
496 |
"""
|
|
|
505 |
|
506 |
print("---ASSESS GRADED DOCUMENTS---")
|
507 |
state.question
|
508 |
+
source = state.source
|
509 |
getattr(state,'documents')
|
510 |
|
511 |
+
if source == "websearch":
|
512 |
# All documents have been filtered check_relevance
|
513 |
# We will re-generate a new query
|
514 |
print(
|
|
|
535 |
configuration = AgentConfiguration.from_runnable_config(config)
|
536 |
|
537 |
question = getattr(state,'question')
|
538 |
+
documents = getattr(state,'messages')
|
539 |
generation = getattr(state,'generation')
|
540 |
llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
|
541 |
|
542 |
+
if getattr(state,'source') == "websearch":
|
543 |
#print("---CHECK HALLUCINATIONS---")
|
544 |
hallucination_grader_instance = hallucination_grader(llm)
|
545 |
#for output in hallucination_grader_instance.stream(
|
|
|
659 |
|
660 |
documents = await final_chain.ainvoke({"user_question": question, "chat_memory":[]})
|
661 |
|
662 |
+
return {"messages": format_docs(documents['cdocs']), "question": question, "rag":getattr(state,'rag')}
|
663 |
|
664 |
### Edges ###
|
665 |
def route_to_node(state:OverallState):
|
|
|
697 |
llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url,streaming=configuration.streaming)
|
698 |
|
699 |
documents = None
|
|
|
700 |
question = None
|
701 |
+
source = None
|
702 |
chat_memory = None
|
703 |
async for s in state:
|
704 |
+
documents = getattr(s,"messages")
|
|
|
705 |
question = getattr(s,"question")
|
706 |
+
source = getattr(s,"source")
|
707 |
chat_memory = getattr(s,"chat_memory")
|
708 |
|
709 |
# Prompt Web Search generation
|
710 |
+
if source == "websearch":
|
711 |
prompt = PromptTemplate(
|
712 |
template="""<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are an enterprise architect assistant for question-answering tasks.
|
713 |
Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know.
|
|
|
743 |
"""
|
744 |
#print("---GENERATE---")
|
745 |
|
746 |
+
#documents = getattr(state,'messages')[-1].content #documents
|
747 |
+
source = getattr(state,'source')
|
748 |
+
#question = getattr(state,'question')
|
749 |
|
750 |
##Triggered by hallucination_grade? 2025-02-21 - NOT USER being edged to END atm
|
751 |
#2025-02-21: it's being triggered by super_graph supervisor as well - need to review as calling web_search twice
|
|
|
757 |
# await retrieve(state, config)
|
758 |
|
759 |
# Generate answer
|
760 |
+
tags = ["websearch_stream"] if source == "websearch" else ["apm_stream"]
|
761 |
gen = RunnableGenerator(stream_generation).with_config(tags=tags)
|
762 |
generation=""
|
763 |
async for message in gen.astream(state):
|
764 |
generation = ''.join([generation,message])
|
765 |
|
766 |
+
#return {"messages": documents.content, "question": question, "generation": generation, "web_search": web_search}
|
767 |
+
return {"generation": generation}
|
768 |
|
769 |
#ea4all-qna-agent-conversational-with-memory
|
770 |
async def apm_agentic_qna(
|
|
|
814 |
|
815 |
documents = await final_chain.ainvoke({"user_question": question, "chat_memory":chat_memory})
|
816 |
|
817 |
+
#return {"documents": format_docs(documents['cdocs']), "question": question, "rag":5, "generation": None}
|
818 |
+
return {"messages": format_docs(documents['cdocs']), "rag":5}
|
819 |
|
820 |
async def final(state: OverallState):
|
821 |
return {"safety_status": state}
|
ea4all/src/ea4all_apm/state.py
CHANGED
@@ -35,9 +35,9 @@ class InputState:
|
|
35 |
class OutputState:
|
36 |
"""Represents the output schema for the APM agent."""
|
37 |
question: str
|
38 |
-
|
39 |
generation: Optional[str] = None
|
40 |
-
|
41 |
"""Answer to user's Architecture IT Landscape question about ."""
|
42 |
|
43 |
@dataclass(kw_only=True)
|
@@ -54,7 +54,6 @@ class OverallState(InputState, OutputState):
|
|
54 |
"""
|
55 |
safety_status: Optional[Tuple[str, str, str]] = None
|
56 |
router: Optional[Router] = None
|
57 |
-
source: Optional[str] = None
|
58 |
rag: Optional[str] = None
|
59 |
chat_memory: Optional[str] = None
|
60 |
retrieved: Optional[List[str]] = None
|
|
|
35 |
class OutputState:
|
36 |
"""Represents the output schema for the APM agent."""
|
37 |
question: str
|
38 |
+
messages: Optional[List[str]] = None
|
39 |
generation: Optional[str] = None
|
40 |
+
source: Optional[str] = None
|
41 |
"""Answer to user's Architecture IT Landscape question about ."""
|
42 |
|
43 |
@dataclass(kw_only=True)
|
|
|
54 |
"""
|
55 |
safety_status: Optional[Tuple[str, str, str]] = None
|
56 |
router: Optional[Router] = None
|
|
|
57 |
rag: Optional[str] = None
|
58 |
chat_memory: Optional[str] = None
|
59 |
retrieved: Optional[List[str]] = None
|
ea4all/src/ea4all_vqa/graph.py
CHANGED
@@ -21,7 +21,6 @@ from langchain_core.messages import (
|
|
21 |
AIMessage,
|
22 |
HumanMessage,
|
23 |
ToolMessage,
|
24 |
-
BaseMessage
|
25 |
)
|
26 |
|
27 |
#pydantic
|
@@ -44,7 +43,6 @@ from langgraph.graph import (
|
|
44 |
)
|
45 |
from langgraph.prebuilt import ToolNode, tools_condition, InjectedState
|
46 |
from langgraph.types import Command
|
47 |
-
from langgraph.checkpoint.memory import MemorySaver
|
48 |
|
49 |
#import APMGraph packages
|
50 |
from ea4all.src.ea4all_vqa.configuration import AgentConfiguration
|
@@ -52,7 +50,6 @@ from ea4all.src.ea4all_vqa.state import InputState, OutputState, OverallState
|
|
52 |
|
53 |
#import shared packages
|
54 |
from ea4all.src.shared.configuration import BaseConfiguration
|
55 |
-
from ea4all.src.shared.state import State
|
56 |
from ea4all.src.shared.utils import (
|
57 |
get_llm_client,
|
58 |
_get_formatted_date,
|
@@ -72,7 +69,7 @@ class DiagramV2S(BaseModel):
|
|
72 |
"""Check whether the image provided is an architecture diagram or flowchart and safe to be processed."""
|
73 |
isArchitectureImage: bool = Field(...,description="Should be True if an image is an architecture diagram or flowchart, otherwise False.")
|
74 |
isSafe: bool = Field(...,description="Should be True if image or question are safe to be processed, False otherwise")
|
75 |
-
description: str = Field(description="
|
76 |
|
77 |
@tool("vqa_diagram", response_format="content")
|
78 |
@spaces.GPU
|
@@ -174,9 +171,17 @@ def safeguard_check(state:OverallState, config:RunnableConfig) -> dict:
|
|
174 |
question = getattr(state, "question", "Describe the image")
|
175 |
raw_image = get_raw_image(getattr(state,'image', _join_paths(configuration.ea4all_images,'multi-app-architecture.png')))
|
176 |
|
177 |
-
system_message =
|
178 |
-
|
179 |
-
""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
|
181 |
safeguard_checker = create_safeguarding_agent(
|
182 |
llm,
|
@@ -188,11 +193,6 @@ Given the conversation above, is the image safe to be processed? Does the image
|
|
188 |
input = {"question": question, "raw_image": raw_image}
|
189 |
result = safeguard_checker.invoke(input=input, config=config)
|
190 |
|
191 |
-
# Parse out the function call
|
192 |
-
architecture_image = result['isArchitectureImage']
|
193 |
-
safe_request = result['isSafe']
|
194 |
-
description = result['description']
|
195 |
-
|
196 |
return {"safety_status": result}
|
197 |
|
198 |
def call_finish(state:OverallState, config:RunnableConfig) -> dict:
|
|
|
21 |
AIMessage,
|
22 |
HumanMessage,
|
23 |
ToolMessage,
|
|
|
24 |
)
|
25 |
|
26 |
#pydantic
|
|
|
43 |
)
|
44 |
from langgraph.prebuilt import ToolNode, tools_condition, InjectedState
|
45 |
from langgraph.types import Command
|
|
|
46 |
|
47 |
#import APMGraph packages
|
48 |
from ea4all.src.ea4all_vqa.configuration import AgentConfiguration
|
|
|
50 |
|
51 |
#import shared packages
|
52 |
from ea4all.src.shared.configuration import BaseConfiguration
|
|
|
53 |
from ea4all.src.shared.utils import (
|
54 |
get_llm_client,
|
55 |
_get_formatted_date,
|
|
|
69 |
"""Check whether the image provided is an architecture diagram or flowchart and safe to be processed."""
|
70 |
isArchitectureImage: bool = Field(...,description="Should be True if an image is an architecture diagram or flowchart, otherwise False.")
|
71 |
isSafe: bool = Field(...,description="Should be True if image or question are safe to be processed, False otherwise")
|
72 |
+
description: str = Field(description="One sentence describing the reason for being categorised as unsafe or not an architecture image.")
|
73 |
|
74 |
@tool("vqa_diagram", response_format="content")
|
75 |
@spaces.GPU
|
|
|
171 |
question = getattr(state, "question", "Describe the image")
|
172 |
raw_image = get_raw_image(getattr(state,'image', _join_paths(configuration.ea4all_images,'multi-app-architecture.png')))
|
173 |
|
174 |
+
system_message = (
|
175 |
+
"Act as a safeguarding agent to check whether the image provided is an architecture diagram or flowchart and safe to be processed. "
|
176 |
+
"You will be provided with a question and an image. "
|
177 |
+
"You should return a JSON object with the following fields: "
|
178 |
+
"'isArchitectureImage':bool, 'isSafe': bool, 'description': str. "
|
179 |
+
"The 'isArchitectureImage' field should be True if the image is an architecture diagram or flowchart, otherwise False. "
|
180 |
+
"The 'isSafe' field should be True if the image or question are safe to be processed, False otherwise. "
|
181 |
+
"The 'description' field should contain a one sentence description of the reason for being categorised as unsafe or not an architecture image. "
|
182 |
+
"If the image is not an architecture diagram or flowchart, you should say it is not an architecture image as 'description' field. "
|
183 |
+
"If the image is not safe to be processed, you should say it is unsafe as 'description' field. "
|
184 |
+
)
|
185 |
|
186 |
safeguard_checker = create_safeguarding_agent(
|
187 |
llm,
|
|
|
193 |
input = {"question": question, "raw_image": raw_image}
|
194 |
result = safeguard_checker.invoke(input=input, config=config)
|
195 |
|
|
|
|
|
|
|
|
|
|
|
196 |
return {"safety_status": result}
|
197 |
|
198 |
def call_finish(state:OverallState, config:RunnableConfig) -> dict:
|
ea4all/src/tools/tools.py
CHANGED
@@ -64,7 +64,7 @@ def make_supervisor_node(config: RunnableConfig, members: list[str]) -> Runnable
|
|
64 |
|
65 |
return RunnableLambda(supervisor_node)
|
66 |
|
67 |
-
async def websearch(state:
|
68 |
"""
|
69 |
Web search based on the re-phrased question.
|
70 |
|
@@ -84,7 +84,7 @@ async def websearch(state: State) -> dict[str,dict[str,str]]:
|
|
84 |
bing_search_url=bing_search_url
|
85 |
)
|
86 |
|
87 |
-
question = state
|
88 |
|
89 |
##Bing Search Results
|
90 |
web_results = BingSearchResults(
|
|
|
64 |
|
65 |
return RunnableLambda(supervisor_node)
|
66 |
|
67 |
+
async def websearch(state: dict[str, dict | str]) -> dict[str,dict[str,str]]:
|
68 |
"""
|
69 |
Web search based on the re-phrased question.
|
70 |
|
|
|
84 |
bing_search_url=bing_search_url
|
85 |
)
|
86 |
|
87 |
+
question = getattr(state,'messages')[-1].content if getattr(state,'messages', False) else getattr(state,'question')
|
88 |
|
89 |
##Bing Search Results
|
90 |
web_results = BingSearchResults(
|
ea4all/utils/utils.py
CHANGED
@@ -115,11 +115,11 @@ async def ea4all_agent_init(request:gr.Request):
|
|
115 |
|
116 |
agentic_qna_desc="""Hi,
|
117 |
improve effieciency, knowledge sharing, and get valuable insights from your IT landscape using natural language.
|
118 |
-
As an Enterprise Architect Agentic
|
119 |
|
120 |
#capture user IP address
|
121 |
#ea4all_user = e4u.get_user_identification(request)
|
122 |
-
gr.Info("Thank you for
|
123 |
|
124 |
# Set initial landscape vectorstore
|
125 |
|
|
|
115 |
|
116 |
agentic_qna_desc="""Hi,
|
117 |
improve effieciency, knowledge sharing, and get valuable insights from your IT landscape using natural language.
|
118 |
+
As an Enterprise Architect Agentic Companion I can answer questions related to Enterprise Architecture, Technology, plus the following IT Landscape sample dataset: """
|
119 |
|
120 |
#capture user IP address
|
121 |
#ea4all_user = e4u.get_user_identification(request)
|
122 |
+
gr.Info("Thank you for using the EA4ALL Agentic MCP Server!")
|
123 |
|
124 |
# Set initial landscape vectorstore
|
125 |
|