ea4all-gradio-agents-mcp-hackathon-ui-retrofit
Browse files- .python-version +1 -0
- ea4all/ea4all_mcp.py +62 -38
- ea4all/src/shared/utils.py +11 -3
- pyproject.toml +7 -0
.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.12
|
ea4all/ea4all_mcp.py
CHANGED
@@ -67,6 +67,7 @@ from langchain.callbacks.tracers import LangChainTracer
|
|
67 |
from langchain.callbacks.tracers.langchain import wait_for_all_tracers
|
68 |
from langchain_community.document_loaders import ConfluenceLoader
|
69 |
from langchain_core.messages import HumanMessage
|
|
|
70 |
|
71 |
from ea4all.src.shared.configuration import BaseConfiguration, APM_MOCK_QNA, PMO_MOCK_QNA
|
72 |
from ea4all.src.shared import vectorstore
|
@@ -82,25 +83,35 @@ from ea4all.src.graph import super_graph
|
|
82 |
|
83 |
import gradio as gr
|
84 |
from gradio import ChatMessage
|
|
|
|
|
|
|
|
|
85 |
|
86 |
from utils.utils import (
|
87 |
get_user_feedback, add_message, unload_dbr, on_dbrtext, load_dbr, off_dbrtext,
|
88 |
-
ea4all_agent_init, get_image, filter_page, ea4all_about, init_dbr
|
89 |
)
|
90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
#Set LangSmith project
|
92 |
tracer = LangChainTracer(project_name=os.getenv('LANGCHAIN_PROJECT'))
|
93 |
|
94 |
-
config =
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
"
|
101 |
-
|
102 |
-
"stream_mode": "messages"
|
103 |
-
}
|
104 |
|
105 |
#Blocks w/ ChatInterface, BYOD, About
|
106 |
with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as ea4all_mcp:
|
@@ -173,12 +184,18 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
|
|
173 |
# chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}`"))
|
174 |
# yield chat_memory
|
175 |
if kind == "on_chain_stream":
|
176 |
-
|
177 |
-
if
|
178 |
-
chat_memory.append(ChatMessage(role="assistant", content=f"
|
|
|
|
|
179 |
yield chat_memory
|
180 |
if kind == "on_chain_end" and name == "route_question":
|
181 |
-
|
|
|
|
|
|
|
|
|
182 |
yield chat_memory
|
183 |
if kind == "on_chain_start" and name == "retrieve":
|
184 |
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}` RAG\n\n"))
|
@@ -189,7 +206,7 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
|
|
189 |
if kind == "on_chain_stream" and name == "stream_generation":
|
190 |
data = event["data"]
|
191 |
# Accumulate the chunk of data
|
192 |
-
partial_message += data
|
193 |
chat_memory[-1].content = partial_message
|
194 |
time.sleep(0.05)
|
195 |
yield chat_memory
|
@@ -198,7 +215,12 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
|
|
198 |
chat_memory.append(ChatMessage(role="assistant", content=f"\n\n- `{name}`: "))
|
199 |
yield chat_memory
|
200 |
if kind == "on_chain_end":
|
201 |
-
|
|
|
|
|
|
|
|
|
|
|
202 |
yield chat_memory
|
203 |
if "stream_hallucination" in tags and kind == "on_chain_start":
|
204 |
chat_memory.append(ChatMessage(role="assistant", content=f"- `{tags[-1]}`"))
|
@@ -211,10 +233,14 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
|
|
211 |
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}` "))
|
212 |
yield chat_memory
|
213 |
if kind == "on_chain_stream":
|
214 |
-
|
215 |
-
|
|
|
|
|
216 |
|
217 |
-
|
|
|
|
|
218 |
wait_for_all_tracers()
|
219 |
|
220 |
#Trigger Solution Architecture Diagram QnA
|
@@ -260,17 +286,20 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
|
|
260 |
):
|
261 |
if (
|
262 |
event["event"] == "on_chat_model_stream"
|
263 |
-
and "vqa_stream" in event
|
264 |
#and event["metadata"].get("langgraph_node") == "tools"
|
265 |
):
|
266 |
-
|
267 |
-
|
|
|
|
|
268 |
time.sleep(e4u.CFG.STREAM_SLEEP)
|
269 |
yield chat_memory #, message to update prompt
|
270 |
elif not partial_message:
|
271 |
yield chat_memory #, message
|
272 |
|
273 |
-
|
|
|
274 |
wait_for_all_tracers()
|
275 |
|
276 |
except Exception as e:
|
@@ -309,11 +338,14 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
|
|
309 |
if kind == "on_chain_stream":
|
310 |
data = s["data"]
|
311 |
# Accumulate the chunk of data
|
312 |
-
|
|
|
|
|
313 |
time.sleep(e4u.CFG.STREAM_SLEEP)
|
314 |
yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
|
315 |
elif name == "save_diagram" and kind == 'on_chain_end': #MOVED INTO Togaf_Task3
|
316 |
-
|
|
|
317 |
elif ("assess_business_query" in tags or "assess_landscape" in tags) and kind == 'on_chain_start': ##'on_chat_model_stream':
|
318 |
agent_response += f"\n\n`{tags[-1]}:{name}`"
|
319 |
|
@@ -376,7 +408,7 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
|
|
376 |
pmo_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
|
377 |
pmo_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
|
378 |
with gr.Accordion("Open for prompt examples", open=False):
|
379 |
-
pmo_examples = gr.Dropdown(e4u.
|
380 |
gr.ClearButton([pmo_chatbot,pmo_prompt], value="Clear", size="sm", visible=False)
|
381 |
with gr.Tab(label="Project Portfolio Sample Dataset", id="id_pmo_ds"):
|
382 |
pmo_df = gr.Dataframe()
|
@@ -387,7 +419,7 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
|
|
387 |
ea4all_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
|
388 |
qna_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
|
389 |
with gr.Accordion("Open for prompt examples", open=False):
|
390 |
-
qna_examples = gr.Dropdown(e4u.
|
391 |
gr.ClearButton([ea4all_chatbot,qna_prompt], value="Clear", size="sm", visible=False)
|
392 |
with gr.Tab(label="Sample Dataset", id="id_apm_ds"):
|
393 |
apm_df = gr.Dataframe()
|
@@ -396,7 +428,7 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
|
|
396 |
ea4all_vqa = gr.Chatbot(label="EA4ALL your AI Multimodal Architect Companion", type="messages")
|
397 |
vqa_prompt = gr.MultimodalTextbox(interactive=True, show_label=False, submit_btn=True, stop_btn=True, autofocus=True, placeholder="Upload your diagram and type your message or select an example...")
|
398 |
with gr.Accordion("Open for prompt examples", open=False):
|
399 |
-
vqa_examples = gr.Dropdown(e4u.
|
400 |
gr.ClearButton([ea4all_vqa,vqa_prompt,vqa_examples], value="Clear", size="sm", visible=True)
|
401 |
with gr.Tab(label="Reference Architecture", id="id_refarch"):
|
402 |
with gr.Tabs(selected="id_dbr") as tabs_reference_architecture:
|
@@ -437,15 +469,7 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
|
|
437 |
diagram_header=gr.Markdown(visible=True)
|
438 |
architecture_runway=gr.Image(label="Target Architecture Runway",interactive=False,visible=True, scale=10)
|
439 |
with gr.Tab(label="Overview", id="how_to"):
|
440 |
-
gr.Markdown(
|
441 |
-
"""
|
442 |
-
# Title
|
443 |
-
|
444 |
-
**Explore, Share, Together:** harness the value of `Enterprise Architecture in the era of Generative AI` to positively impact individuals and organisations.\n
|
445 |
-
|
446 |
-
## Overview
|
447 |
-
"""
|
448 |
-
),
|
449 |
gr.Image(
|
450 |
get_image(e4u.CFG.EA4ALL_ARCHITECTURE),
|
451 |
show_download_button=False,
|
|
|
67 |
from langchain.callbacks.tracers.langchain import wait_for_all_tracers
|
68 |
from langchain_community.document_loaders import ConfluenceLoader
|
69 |
from langchain_core.messages import HumanMessage
|
70 |
+
from langchain_core.runnables import RunnableConfig
|
71 |
|
72 |
from ea4all.src.shared.configuration import BaseConfiguration, APM_MOCK_QNA, PMO_MOCK_QNA
|
73 |
from ea4all.src.shared import vectorstore
|
|
|
83 |
|
84 |
import gradio as gr
|
85 |
from gradio import ChatMessage
|
86 |
+
import os
|
87 |
+
import uuid
|
88 |
+
import time
|
89 |
+
from PIL import Image
|
90 |
|
91 |
from utils.utils import (
|
92 |
get_user_feedback, add_message, unload_dbr, on_dbrtext, load_dbr, off_dbrtext,
|
93 |
+
ea4all_agent_init, get_image, filter_page, ea4all_about, init_dbr
|
94 |
)
|
95 |
|
96 |
+
TITLE = """
|
97 |
+
# Title
|
98 |
+
|
99 |
+
**Explore, Share, Together:** harness the value of `Enterprise Architecture in the era of Generative AI` to positively impact individuals and organisations.\n
|
100 |
+
|
101 |
+
## Overview
|
102 |
+
"""
|
103 |
+
|
104 |
#Set LangSmith project
|
105 |
tracer = LangChainTracer(project_name=os.getenv('LANGCHAIN_PROJECT'))
|
106 |
|
107 |
+
config = RunnableConfig(
|
108 |
+
run_name = os.getenv('LANGCHAIN_RUNNAME', "ea4all-mcp"),
|
109 |
+
tags = [os.getenv('EA4ALL_ENV', "MCP")],
|
110 |
+
callbacks = [tracer],
|
111 |
+
recursion_limit = 25,
|
112 |
+
configurable = {"thread_id": uuid.uuid4()},
|
113 |
+
#stream_mode = "messages"
|
114 |
+
)
|
|
|
|
|
115 |
|
116 |
#Blocks w/ ChatInterface, BYOD, About
|
117 |
with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as ea4all_mcp:
|
|
|
184 |
# chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}`"))
|
185 |
# yield chat_memory
|
186 |
if kind == "on_chain_stream":
|
187 |
+
chunk = event['data'].get('chunk')
|
188 |
+
if chunk and 'safety_status' in chunk and len(chunk['safety_status']) > 0:
|
189 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}`: {chunk['safety_status'][0]}"))
|
190 |
+
if chunk['safety_status'][0] == 'no' and len(chunk['safety_status']) > 1:
|
191 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"Safety-status: {chunk['safety_status'][1]}"))
|
192 |
yield chat_memory
|
193 |
if kind == "on_chain_end" and name == "route_question":
|
194 |
+
output = event['data'].get('output')
|
195 |
+
if output and 'source' in output:
|
196 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}:` {output['source']}"))
|
197 |
+
else:
|
198 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}:` (no source available)"))
|
199 |
yield chat_memory
|
200 |
if kind == "on_chain_start" and name == "retrieve":
|
201 |
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}` RAG\n\n"))
|
|
|
206 |
if kind == "on_chain_stream" and name == "stream_generation":
|
207 |
data = event["data"]
|
208 |
# Accumulate the chunk of data
|
209 |
+
partial_message += data.get('chunk', '')
|
210 |
chat_memory[-1].content = partial_message
|
211 |
time.sleep(0.05)
|
212 |
yield chat_memory
|
|
|
215 |
chat_memory.append(ChatMessage(role="assistant", content=f"\n\n- `{name}`: "))
|
216 |
yield chat_memory
|
217 |
if kind == "on_chain_end":
|
218 |
+
input_data = event['data'].get('input')
|
219 |
+
if input_data and hasattr(input_data, 'source'):
|
220 |
+
output_value = event['data'].get('output', '')
|
221 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"`{input_data.source}:` {output_value}"))
|
222 |
+
else:
|
223 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"`{event['data'].get('output', '')}`"))
|
224 |
yield chat_memory
|
225 |
if "stream_hallucination" in tags and kind == "on_chain_start":
|
226 |
chat_memory.append(ChatMessage(role="assistant", content=f"- `{tags[-1]}`"))
|
|
|
233 |
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}` "))
|
234 |
yield chat_memory
|
235 |
if kind == "on_chain_stream":
|
236 |
+
chunk = event['data'].get('chunk')
|
237 |
+
if chunk is not None:
|
238 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"{chunk}"))
|
239 |
+
yield chat_memory
|
240 |
|
241 |
+
# Set environment variable only when 'event' is defined
|
242 |
+
os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
|
243 |
+
|
244 |
wait_for_all_tracers()
|
245 |
|
246 |
#Trigger Solution Architecture Diagram QnA
|
|
|
286 |
):
|
287 |
if (
|
288 |
event["event"] == "on_chat_model_stream"
|
289 |
+
and "vqa_stream" in event.get('tags', [])
|
290 |
#and event["metadata"].get("langgraph_node") == "tools"
|
291 |
):
|
292 |
+
chunk = event["data"].get("chunk")
|
293 |
+
if chunk is not None and hasattr(chunk, "content"):
|
294 |
+
partial_message += chunk.content
|
295 |
+
chat_memory[-1].content = partial_message
|
296 |
time.sleep(e4u.CFG.STREAM_SLEEP)
|
297 |
yield chat_memory #, message to update prompt
|
298 |
elif not partial_message:
|
299 |
yield chat_memory #, message
|
300 |
|
301 |
+
os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
|
302 |
+
|
303 |
wait_for_all_tracers()
|
304 |
|
305 |
except Exception as e:
|
|
|
338 |
if kind == "on_chain_stream":
|
339 |
data = s["data"]
|
340 |
# Accumulate the chunk of data
|
341 |
+
chunk = data.get('chunk')
|
342 |
+
if chunk is not None and hasattr(chunk, "content"):
|
343 |
+
vision_message += chunk.content
|
344 |
time.sleep(e4u.CFG.STREAM_SLEEP)
|
345 |
yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
|
346 |
elif name == "save_diagram" and kind == 'on_chain_end': #MOVED INTO Togaf_Task3
|
347 |
+
output = s['data'].get('output', {})
|
348 |
+
final_diagram = output.get('architecture_runway', "")
|
349 |
elif ("assess_business_query" in tags or "assess_landscape" in tags) and kind == 'on_chain_start': ##'on_chat_model_stream':
|
350 |
agent_response += f"\n\n`{tags[-1]}:{name}`"
|
351 |
|
|
|
408 |
pmo_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
|
409 |
pmo_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
|
410 |
with gr.Accordion("Open for prompt examples", open=False):
|
411 |
+
pmo_examples = gr.Dropdown(e4u.get_relevant_questions(PMO_MOCK_QNA), value=None,label="Questions", interactive=True)
|
412 |
gr.ClearButton([pmo_chatbot,pmo_prompt], value="Clear", size="sm", visible=False)
|
413 |
with gr.Tab(label="Project Portfolio Sample Dataset", id="id_pmo_ds"):
|
414 |
pmo_df = gr.Dataframe()
|
|
|
419 |
ea4all_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
|
420 |
qna_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
|
421 |
with gr.Accordion("Open for prompt examples", open=False):
|
422 |
+
qna_examples = gr.Dropdown(e4u.get_relevant_questions(APM_MOCK_QNA), value=None,label="Questions", interactive=True)
|
423 |
gr.ClearButton([ea4all_chatbot,qna_prompt], value="Clear", size="sm", visible=False)
|
424 |
with gr.Tab(label="Sample Dataset", id="id_apm_ds"):
|
425 |
apm_df = gr.Dataframe()
|
|
|
428 |
ea4all_vqa = gr.Chatbot(label="EA4ALL your AI Multimodal Architect Companion", type="messages")
|
429 |
vqa_prompt = gr.MultimodalTextbox(interactive=True, show_label=False, submit_btn=True, stop_btn=True, autofocus=True, placeholder="Upload your diagram and type your message or select an example...")
|
430 |
with gr.Accordion("Open for prompt examples", open=False):
|
431 |
+
vqa_examples = gr.Dropdown(e4u.get_vqa_examples(), value=None,label="Diagram and Questions", interactive=True)
|
432 |
gr.ClearButton([ea4all_vqa,vqa_prompt,vqa_examples], value="Clear", size="sm", visible=True)
|
433 |
with gr.Tab(label="Reference Architecture", id="id_refarch"):
|
434 |
with gr.Tabs(selected="id_dbr") as tabs_reference_architecture:
|
|
|
469 |
diagram_header=gr.Markdown(visible=True)
|
470 |
architecture_runway=gr.Image(label="Target Architecture Runway",interactive=False,visible=True, scale=10)
|
471 |
with gr.Tab(label="Overview", id="how_to"):
|
472 |
+
gr.Markdown(value=TITLE)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
473 |
gr.Image(
|
474 |
get_image(e4u.CFG.EA4ALL_ARCHITECTURE),
|
475 |
show_download_button=False,
|
ea4all/src/shared/utils.py
CHANGED
@@ -233,7 +233,7 @@ def get_history_gradio(history, chat_history=[]):
|
|
233 |
return history
|
234 |
|
235 |
#retrieve relevant questions based on user interaction
|
236 |
-
def
|
237 |
examples=[
|
238 |
{"text": "Describe this image.", "files": ["ea4all/images/multi-app-architecture.png"]},
|
239 |
{"text": "Assess any risk and vulnerabilities in the current solution.", "files": ["ea4all/images/ea4all_architecture.png"]},
|
@@ -314,13 +314,13 @@ def get_raw_image(image_path):
|
|
314 |
|
315 |
return raw_image
|
316 |
|
317 |
-
def load_mock_content(file_path):
|
318 |
try:
|
319 |
with open(_join_paths(ea4all_config.ea4all_store,file_path), "r") as file:
|
320 |
content = file.read()
|
321 |
return content
|
322 |
except ValueError as e:
|
323 |
-
return e
|
324 |
|
325 |
def print_json_to_md(data, indent=0, column=None):
|
326 |
try:
|
@@ -476,3 +476,11 @@ def extract_detailed_business_requirements(llm, topic: type[BaseModel], name:str
|
|
476 |
# Post-processing
|
477 |
def format_docs(docs):
|
478 |
return "\n".join(doc.page_content for doc in docs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
233 |
return history
|
234 |
|
235 |
#retrieve relevant questions based on user interaction
|
236 |
+
def get_vqa_examples() -> list:
|
237 |
examples=[
|
238 |
{"text": "Describe this image.", "files": ["ea4all/images/multi-app-architecture.png"]},
|
239 |
{"text": "Assess any risk and vulnerabilities in the current solution.", "files": ["ea4all/images/ea4all_architecture.png"]},
|
|
|
314 |
|
315 |
return raw_image
|
316 |
|
317 |
+
def load_mock_content(file_path) -> str:
|
318 |
try:
|
319 |
with open(_join_paths(ea4all_config.ea4all_store,file_path), "r") as file:
|
320 |
content = file.read()
|
321 |
return content
|
322 |
except ValueError as e:
|
323 |
+
return str(e)
|
324 |
|
325 |
def print_json_to_md(data, indent=0, column=None):
|
326 |
try:
|
|
|
476 |
# Post-processing
|
477 |
def format_docs(docs):
|
478 |
return "\n".join(doc.page_content for doc in docs)
|
479 |
+
|
480 |
+
#load mock data
|
481 |
+
def get_relevant_questions(source: str) -> list:
|
482 |
+
relevant_questions = []
|
483 |
+
mock = load_mock_content(source)
|
484 |
+
for line in mock.splitlines(): relevant_questions += [line]
|
485 |
+
|
486 |
+
return relevant_questions
|
pyproject.toml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "ea4all-gradio-agent-mcp-hackathon"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "EA4ALL Agentic System MCP Server"
|
5 |
+
readme = "README.md"
|
6 |
+
requires-python = ">=3.12"
|
7 |
+
dependencies = []
|