ea4all-gradio-agents-mcp-hackathon-kickoff
Browse files- ea4all/__main__.py +2 -2
- ea4all/app_ea4all_agent.py +0 -655
- ea4all/ea4all_mcp.py +650 -23
ea4all/__main__.py
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
-
from ea4all import
|
2 |
import os
|
3 |
|
4 |
def main() -> None:
|
5 |
#Launch UI
|
6 |
try:
|
7 |
-
|
8 |
server_name=os.getenv("GRADIO_SERVER_NAME","0.0.0.0"),
|
9 |
server_port=os.getenv("GRADIO_SERVER_PORT",None),
|
10 |
debug=os.getenv("GRADIO_DEBUG",True),
|
|
|
1 |
+
from ea4all import ea4all_mcp as e4m
|
2 |
import os
|
3 |
|
4 |
def main() -> None:
|
5 |
#Launch UI
|
6 |
try:
|
7 |
+
e4m.ea4all_mcp.launch(
|
8 |
server_name=os.getenv("GRADIO_SERVER_NAME","0.0.0.0"),
|
9 |
server_port=os.getenv("GRADIO_SERVER_PORT",None),
|
10 |
debug=os.getenv("GRADIO_DEBUG",True),
|
ea4all/app_ea4all_agent.py
DELETED
@@ -1,655 +0,0 @@
|
|
1 |
-
#Added agentic-workflow-collaboration-agents
|
2 |
-
#Multimodal ChatInterface - not working
|
3 |
-
#Added new QA Tab
|
4 |
-
#Added new agent Well-Architected
|
5 |
-
#Added Supervisor Agent workflow
|
6 |
-
#ISSUE with VQA fixed
|
7 |
-
#LLMChain refactored
|
8 |
-
#Updated with changes as result of ea4all_agent Gradio Space deployment issues
|
9 |
-
#VQA Safeguardings - JPEG, PNG images only
|
10 |
-
#Deployed version to Live
|
11 |
-
#Library import refactoring, ea4all-architecture, empty message
|
12 |
-
#Bring your own IT Landscape data: discontinued
|
13 |
-
#Added upload your Business Requirement
|
14 |
-
#Load user's APM - disabled 2024-06-22
|
15 |
-
#TEST E2E Togaf Agentic system 2024-06-24
|
16 |
-
#MIGRATION TO HF Open Source using TGI and Meta-Llama-3-8B-Instruct 2024-06-25
|
17 |
-
#ADDED GENERATE_ARCHITECTURE_RUNWAY diagram: graphviz 2024-07-03
|
18 |
-
#REFERENCE ARCHITECTURE DYNAMIC TABS 2024-07-05
|
19 |
-
#ADDED Business Query grader 2024-07-07
|
20 |
-
#RCA Togaf Supervisor: increase reliability 2024-07-08 - ISSUE FIXED BY NOW
|
21 |
-
#EA4ALL-agentic-system-container updated 2024-07-10
|
22 |
-
###APM Agentic system: 2024-07-25 - Safety check added
|
23 |
-
##Sub-graph node stream 204-07-26
|
24 |
-
# Stream arbitrary nested content: https://langchain-ai.github.io/langgraph/how-tos/streaming-content/
|
25 |
-
## Prompt refinement task_router, user_question_routing, prompt_category 2024-07-27
|
26 |
-
## WebSearch Hallucination issue - recursion looping - solution: routing to route_question 2024-07-28
|
27 |
-
## Safety_check greetings, msgs, APM Sample Dataset 2024-09-29
|
28 |
-
# VQA issue - image not recognised 2024-07-30
|
29 |
-
# Constants IMAGES (Architecture, Overview) 2024-07-31
|
30 |
-
# About, QnA Examples moved to mock files 2024-08-01 - deployed to build
|
31 |
-
## 2024-08-03: VQA Streaming, Diagrams' EDGE nodes changed to END - one task at a time: 2024-08-03
|
32 |
-
## VQA Llama-3.2-11B-Vision-Instruct 2024-10-25
|
33 |
-
#RELEASE 2024-11-15
|
34 |
-
## CHANGES 2024-11-22
|
35 |
-
# MIGRATION to Gradio 5
|
36 |
-
# Chatbot UI migrated to gr.Chatbot
|
37 |
-
# run_qna_agentic_system, run_vqa_agentic_system updated: ChatMessage, chat_memory, UI events
|
38 |
-
# chat_memory VQA missing image - fixed - needs improvement
|
39 |
-
## RELEASE 2024-11-23
|
40 |
-
#pip freeze > requirements.txt to keep libraries synched local and HF Spaces
|
41 |
-
#gr.Image issue: caused by __main__ root_path=str(Path.cwd())
|
42 |
-
## RELEASE 2024-12-09
|
43 |
-
#Confluence Integration
|
44 |
-
#Llama-3.2-11B-Vision-Instruct max_token issue <=4096 stills
|
45 |
-
#Safety-check refinement
|
46 |
-
#TOGAF Vision streaming
|
47 |
-
## Release update 2024-12-11
|
48 |
-
#EA4ALL Podcast
|
49 |
-
#2025-02-03 RELEASE V1
|
50 |
-
##RETROFIT & INTEGRATION w/ EA4ALL-dev-studio-structure
|
51 |
-
#2025-02-09
|
52 |
-
##UAT EA4ALL-LGS-RETRIEVER-REFACTORED
|
53 |
-
#2025-03-10
|
54 |
-
##AI-Assistant-UI-Message-Stream refactor
|
55 |
-
#2025-12-04
|
56 |
-
## Add EA4ALL-PMO-Demand-Management CrewAI Agents
|
57 |
-
#2025-05-06
|
58 |
-
## Add MCP Server
|
59 |
-
#2025-05-17
|
60 |
-
## Added PMO_MOCK_QNA examples,
|
61 |
-
## get_relevant_questions() - moved to utils, constants moved to configuration
|
62 |
-
#2025-05-19
|
63 |
-
## EA4ALL Diagram refactored, vqa_max_tokens updated
|
64 |
-
from langchain.callbacks.tracers import LangChainTracer
|
65 |
-
from langchain.callbacks.tracers.langchain import wait_for_all_tracers
|
66 |
-
from langchain_community.document_loaders import ConfluenceLoader
|
67 |
-
from langchain_core.messages import HumanMessage
|
68 |
-
from langsmith import Client
|
69 |
-
|
70 |
-
from ea4all.src.shared.configuration import BaseConfiguration, APM_MOCK_QNA, PMO_MOCK_QNA
|
71 |
-
from ea4all.src.shared import vectorstore
|
72 |
-
from ea4all.src.ea4all_gra.configuration import AgentConfiguration as gra
|
73 |
-
from ea4all.src.ea4all_indexer.configuration import IndexConfiguration
|
74 |
-
import ea4all.src.ea4all_apm.graph as e4a
|
75 |
-
import ea4all.src.ea4all_vqa.graph as e4v
|
76 |
-
import ea4all.src.ea4all_gra.graph as e4t
|
77 |
-
import ea4all.src.shared.utils as e4u
|
78 |
-
from ea4all.src.ea4all_indexer.graph import indexer_graph
|
79 |
-
from ea4all.src.graph import super_graph
|
80 |
-
#from ea4all.src.pmo_crew.crew_runner import run_pmo_crew
|
81 |
-
|
82 |
-
import uuid
|
83 |
-
import os
|
84 |
-
import pandas as pd
|
85 |
-
|
86 |
-
import gradio as gr
|
87 |
-
from gradio import ChatMessage
|
88 |
-
import time
|
89 |
-
from PIL import Image
|
90 |
-
|
91 |
-
#Set LangSmith project
|
92 |
-
tracer = LangChainTracer(project_name=os.getenv('LANGCHAIN_PROJECT'))
|
93 |
-
|
94 |
-
config = {
|
95 |
-
"run_name": os.getenv('LANGCHAIN_RUNNAME'),
|
96 |
-
"tags": [os.getenv('EA4ALL_ENV')],
|
97 |
-
"callbacks":[tracer],
|
98 |
-
"recursion_limit": 25,
|
99 |
-
"configurable": {
|
100 |
-
"thread_id": uuid.uuid4(),
|
101 |
-
},
|
102 |
-
"stream_mode": "messages"
|
103 |
-
}
|
104 |
-
|
105 |
-
#Blocks w/ ChatInterface, BYOD, About
|
106 |
-
with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as ea4all_mcp:
|
107 |
-
|
108 |
-
agentic_pmo_desc="""
|
109 |
-
Hi,
|
110 |
-
Provide project resource estimation for architecture work based on business requirements, skillset,
|
111 |
-
architects allocation, and any other relevant information to enable successful project solution delivery."""
|
112 |
-
|
113 |
-
agentic_qna_desc="""
|
114 |
-
Hi,
|
115 |
-
improve effieciency, knowledge sharing, and get valuable insights from your IT landscape using natural language.
|
116 |
-
As an Enterprise Architect Agentic System I can answer questions related to Enterprise Architecture, Technology, plus the following IT Landscape sample dataset: """
|
117 |
-
|
118 |
-
agentic_vqa_desc="""
|
119 |
-
Hi, talk to your Architecture Diagram using natural language. Gain rapid knowledge and insights translating image to meaningful description.
|
120 |
-
**Disclaimer**:
|
121 |
-
- This feature should NOT BE USED to process inappropriate content, but ONLY FOR Architecture Diagrams
|
122 |
-
"""
|
123 |
-
|
124 |
-
agentic_togaf_desc="""
|
125 |
-
Hi,
|
126 |
-
in a click of button create a reference architecture that serves as a blueprint for designing and implementing IT solutions.
|
127 |
-
Standardise, increase efficiency and productivity to architecture solution development.
|
128 |
-
Generate context-specific reference and minimal viable architectures to support business and IT strategy and digital transformation.
|
129 |
-
Streamline the architecture operating model, taking the best of agentic workflows and architects working together.
|
130 |
-
"""
|
131 |
-
|
132 |
-
#ea4all-about
|
133 |
-
|
134 |
-
def ea4all_about():
|
135 |
-
readme = e4u.load_mock_content(e4u.CFG.EA4ALL_ABOUT)
|
136 |
-
return readme
|
137 |
-
|
138 |
-
#Load demo business requirements
|
139 |
-
def init_dbr():
|
140 |
-
# Open the file in read mode ('r')
|
141 |
-
with open(e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock), 'r') as file:
|
142 |
-
# Read the contents of the file
|
143 |
-
contents = file.read()
|
144 |
-
return contents
|
145 |
-
|
146 |
-
def init_df():
|
147 |
-
return vectorstore.apm_dataframe_loader(e4u._join_paths(BaseConfiguration.ea4all_store, IndexConfiguration.apm_catalogue))
|
148 |
-
|
149 |
-
#load core-architecture image
|
150 |
-
#fix the issue with gr.Image(path) inside a docker containder
|
151 |
-
def get_image(_image):
|
152 |
-
#from PIL import Image
|
153 |
-
# Load an image
|
154 |
-
image = e4u._join_paths(BaseConfiguration.ea4all_images,_image)
|
155 |
-
print(f"Full path: {image}")
|
156 |
-
|
157 |
-
return image
|
158 |
-
|
159 |
-
#ea4all-qna-agent-conversational-with-memory
|
160 |
-
async def run_qna_agentic_system(prompt, chat_memory, request:gr.Request):
|
161 |
-
|
162 |
-
format_response = ""
|
163 |
-
|
164 |
-
if not prompt:
|
165 |
-
format_response = "Hi, how are you today? To start our conversation, please chat your message!"
|
166 |
-
chat_memory.append(ChatMessage(role="assistant", content=format_response))
|
167 |
-
yield chat_memory
|
168 |
-
|
169 |
-
if not chat_memory:
|
170 |
-
chat_memory.append(ChatMessage(role="user", content=prompt))
|
171 |
-
yield chat_memory
|
172 |
-
|
173 |
-
if prompt:
|
174 |
-
#capture user ip
|
175 |
-
ea4all_user = e4u.get_user_identification(request)
|
176 |
-
|
177 |
-
##Initialise APM Graph
|
178 |
-
#apm_graph = e4a.apm_graph
|
179 |
-
#inputs = {"question": prompt, "chat_memory":chat_memory}
|
180 |
-
inputs = {"messages": [{"role": "user", "content": prompt}]}
|
181 |
-
|
182 |
-
#add prompt to memory
|
183 |
-
chat_memory.append(ChatMessage(role="user", content=prompt))
|
184 |
-
|
185 |
-
partial_message = ""
|
186 |
-
async for event in super_graph.astream_events(input=inputs, config=config, version="v2"):
|
187 |
-
#async for event in super_graph.astream(input=inputs, config=config, subgraphs=True):
|
188 |
-
# chat_memory.append(ChatMessage(role="assistant", content=str(event)))
|
189 |
-
# yield chat_memory
|
190 |
-
|
191 |
-
kind = event["event"]
|
192 |
-
tags = event.get("tags", [])
|
193 |
-
name = event['name']
|
194 |
-
|
195 |
-
#chat_memory.append(ChatMessage(role="assistant", content=f"Running: {name}"))
|
196 |
-
#yield chat_memory
|
197 |
-
|
198 |
-
if name == "safety_check":
|
199 |
-
#if kind == "on_chain_start":
|
200 |
-
# chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}`"))
|
201 |
-
# yield chat_memory
|
202 |
-
if kind == "on_chain_stream":
|
203 |
-
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}`: {event['data']['chunk']['safety_status'][0]}"))
|
204 |
-
if event['data']['chunk']['safety_status'][0] == 'no':
|
205 |
-
chat_memory.append(ChatMessage(role="assistant", content=f"Safety-status: {event['data']['chunk']['safety_status'][1]}"))
|
206 |
-
yield chat_memory
|
207 |
-
if kind == "on_chain_end" and name == "route_question":
|
208 |
-
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}:` {event['data']['output']['source']}"))
|
209 |
-
yield chat_memory
|
210 |
-
if kind == "on_chain_start" and name == "retrieve":
|
211 |
-
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}` RAG\n\n"))
|
212 |
-
yield chat_memory
|
213 |
-
if kind == "on_chain_start" and name in ("generate_web_search", "websearch", "stream_generation"):
|
214 |
-
chat_memory.append(ChatMessage(role="assistant", content= f"\n\n- `{name}`\n\n"))
|
215 |
-
yield chat_memory
|
216 |
-
if kind == "on_chain_stream" and name == "stream_generation":
|
217 |
-
data = event["data"]
|
218 |
-
# Accumulate the chunk of data
|
219 |
-
partial_message += data['chunk']
|
220 |
-
chat_memory[-1].content = partial_message
|
221 |
-
time.sleep(0.05)
|
222 |
-
yield chat_memory
|
223 |
-
if name == "grade_generation_v_documents_and_question":
|
224 |
-
if kind == "on_chain_start":
|
225 |
-
chat_memory.append(ChatMessage(role="assistant", content=f"\n\n- `{name}`: "))
|
226 |
-
yield chat_memory
|
227 |
-
if kind == "on_chain_end":
|
228 |
-
chat_memory.append(ChatMessage(role="assistant", content=f"`{event['data']['input'].source}:` {event['data']['output']}"))
|
229 |
-
yield chat_memory
|
230 |
-
if "stream_hallucination" in tags and kind == "on_chain_start":
|
231 |
-
chat_memory.append(ChatMessage(role="assistant", content=f"- `{tags[-1]}`"))
|
232 |
-
yield chat_memory
|
233 |
-
if "stream_grade_answer" in tags and kind == "on_chain_start":
|
234 |
-
chat_memory.append(ChatMessage(role="assistant", content=f"- `{tags[-1]}`"))
|
235 |
-
yield chat_memory
|
236 |
-
if name == "supervisor":
|
237 |
-
if kind == "on_chain_start":
|
238 |
-
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}` "))
|
239 |
-
yield chat_memory
|
240 |
-
if kind == "on_chain_stream":
|
241 |
-
chat_memory.append(ChatMessage(role="assistant", content=f"{event['data']['chunk']}"))
|
242 |
-
yield chat_memory
|
243 |
-
|
244 |
-
os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
|
245 |
-
wait_for_all_tracers()
|
246 |
-
|
247 |
-
#Trigger Solution Architecture Diagram QnA
|
248 |
-
async def run_vqa_agentic_system(message, chat_memory, request:gr.Request):
|
249 |
-
#capture user ip
|
250 |
-
ea4all_user = e4u.get_user_identification(request)
|
251 |
-
|
252 |
-
"""Handle file uploads and validate their types."""
|
253 |
-
allowed_file_types = ('JPEG', 'PNG')
|
254 |
-
|
255 |
-
print("---CALLING VISUAL QUESTION ANSWERING AGENTIC SYSTEM---")
|
256 |
-
print(f"Prompt: {message}")
|
257 |
-
|
258 |
-
if message['files'] == []:
|
259 |
-
chat_memory.append(ChatMessage(role="assistant", content="Please upload an Architecture PNG, JPEG diagram to start!"))
|
260 |
-
yield chat_memory
|
261 |
-
else:
|
262 |
-
diagram = message['files'][-1] ##chat_memory[-1]['content'][-1]
|
263 |
-
msg = message['text'] ##chat_memory[-2]['content']
|
264 |
-
print(f"---DIAGRAM: {diagram}---")
|
265 |
-
try:
|
266 |
-
if msg == "":
|
267 |
-
msg = "Please describe this diagram."
|
268 |
-
|
269 |
-
with Image.open(diagram) as diagram_:
|
270 |
-
if diagram_.format not in allowed_file_types:
|
271 |
-
chat_memory.append(ChatMessage(role="assistant", content="Invalid file type. Allowed file types are JPEG and PNG."))
|
272 |
-
yield chat_memory
|
273 |
-
else:
|
274 |
-
#'vqa_image = e4u.get_raw_image(diagram) #MOVED into Graph
|
275 |
-
vqa_image = diagram
|
276 |
-
|
277 |
-
#Setup Quality Assurance Agentic System
|
278 |
-
#graph = e4v.ea4all_graph(config['configurable']['vqa_model'])
|
279 |
-
|
280 |
-
#Setup enter graph
|
281 |
-
diagram_graph = e4v.diagram_graph
|
282 |
-
|
283 |
-
partial_message = ""
|
284 |
-
chat_memory.append(ChatMessage(role="assistant", content="Hi, I am working on your question..."))
|
285 |
-
async for event in diagram_graph.astream_events(
|
286 |
-
{"question":msg, "image": vqa_image}, config, version="v2"
|
287 |
-
):
|
288 |
-
if (
|
289 |
-
event["event"] == "on_chat_model_stream"
|
290 |
-
and "vqa_stream" in event['tags']
|
291 |
-
#and event["metadata"].get("langgraph_node") == "tools"
|
292 |
-
):
|
293 |
-
partial_message += event["data"]["chunk"].content
|
294 |
-
chat_memory[-1].content = partial_message
|
295 |
-
time.sleep(e4u.CFG.STREAM_SLEEP)
|
296 |
-
yield chat_memory #, message to update prompt
|
297 |
-
elif not partial_message:
|
298 |
-
yield chat_memory #, message
|
299 |
-
|
300 |
-
os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
|
301 |
-
wait_for_all_tracers()
|
302 |
-
|
303 |
-
except Exception as e:
|
304 |
-
yield (e.args[-1])
|
305 |
-
|
306 |
-
#Run Togaf Agentic System
|
307 |
-
async def run_reference_architecture_agentic_system(business_query, request:gr.Request):
|
308 |
-
|
309 |
-
if len(business_query) < 50:
|
310 |
-
agent_response = "Please provide a valid Business Requirement content to start!"
|
311 |
-
yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
|
312 |
-
else:
|
313 |
-
plain_text = e4u.markdown_to_plain_text(business_query)
|
314 |
-
agent_response = "Generating Architecture Blueprint ---TOGAF VISION TARGET--- \n\nI am working on your request..."
|
315 |
-
togaf_chain = e4t.togaf_graph
|
316 |
-
final_diagram = ""
|
317 |
-
vision_message = ""
|
318 |
-
try:
|
319 |
-
async for s in togaf_chain.astream_events(
|
320 |
-
{
|
321 |
-
"messages": [
|
322 |
-
HumanMessage(
|
323 |
-
content=plain_text
|
324 |
-
)
|
325 |
-
],
|
326 |
-
"business_query": business_query,
|
327 |
-
},
|
328 |
-
config=config,
|
329 |
-
version="v2"
|
330 |
-
):
|
331 |
-
kind = s["event"]
|
332 |
-
tags = s.get("tags", [])
|
333 |
-
name = s['name']
|
334 |
-
|
335 |
-
if "gra_stream" in tags and name == "stream_vision_target":
|
336 |
-
if kind == "on_chain_stream":
|
337 |
-
data = s["data"]
|
338 |
-
# Accumulate the chunk of data
|
339 |
-
vision_message += data['chunk'].content
|
340 |
-
time.sleep(e4u.CFG.STREAM_SLEEP)
|
341 |
-
yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
|
342 |
-
elif name == "save_diagram" and kind == 'on_chain_end': #MOVED INTO Togaf_Task3
|
343 |
-
final_diagram = s['data']['output']['architecture_runway']
|
344 |
-
elif ("assess_business_query" in tags or "assess_landscape" in tags) and kind == 'on_chain_start': ##'on_chat_model_stream':
|
345 |
-
agent_response += f"\n\n`{tags[-1]}:{name}`"
|
346 |
-
|
347 |
-
yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
|
348 |
-
|
349 |
-
if vision_message=="":
|
350 |
-
agent_response = "I cannot generate the Architecture Vision. Please provide a valid Business Requirement content to start!"
|
351 |
-
yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
|
352 |
-
elif "Error" not in final_diagram:
|
353 |
-
yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),final_diagram, None, gr.Tabs(visible=True)])
|
354 |
-
else:
|
355 |
-
yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, final_diagram, gr.Tabs(visible=True)])
|
356 |
-
|
357 |
-
except Exception as e:
|
358 |
-
yield(
|
359 |
-
[
|
360 |
-
e.args[-1],
|
361 |
-
gr.Tabs(visible=True),
|
362 |
-
gr.Tabs(selected="id_togaf"),
|
363 |
-
None,
|
364 |
-
None,
|
365 |
-
gr.Tabs(visible=False)
|
366 |
-
]
|
367 |
-
)
|
368 |
-
|
369 |
-
async def run_pmo_agentic_system(prompt, chat_memory):
|
370 |
-
"""
|
371 |
-
Answer a question about Project Portfolio Management and Architect Demand Management.
|
372 |
-
|
373 |
-
Args:
|
374 |
-
prompt (str): The propject portfolio user question
|
375 |
-
chat_memory (list): The tool message history
|
376 |
-
|
377 |
-
Returns:
|
378 |
-
str: A summary answering the user question
|
379 |
-
"""
|
380 |
-
format_response = ""
|
381 |
-
|
382 |
-
if not prompt:
|
383 |
-
format_response = "Hi, how are you today? To start our conversation, please chat your message!"
|
384 |
-
chat_memory.append(ChatMessage(role="assistant", content=format_response))
|
385 |
-
yield chat_memory
|
386 |
-
|
387 |
-
if not chat_memory:
|
388 |
-
chat_memory.append(ChatMessage(role="user", content=prompt))
|
389 |
-
yield chat_memory
|
390 |
-
|
391 |
-
inputs = {
|
392 |
-
"question": prompt,
|
393 |
-
"verbose": True, # optional flags
|
394 |
-
}
|
395 |
-
|
396 |
-
#yield run_pmo_crew(inputs)
|
397 |
-
|
398 |
-
def ea4all_confluence():
|
399 |
-
|
400 |
-
#Confluence API Key
|
401 |
-
confluence_api_key = os.environ['CONFLUENCE_API_KEY']
|
402 |
-
|
403 |
-
loader = ConfluenceLoader(
|
404 |
-
url="https://learnitall.atlassian.net/wiki", username="learn-it-all@outlook.com", api_key=confluence_api_key,
|
405 |
-
space_key="~71202000cd55f36336455f8c07afa1860ba810",
|
406 |
-
include_attachments=False, limit=10,
|
407 |
-
keep_markdown_format=True
|
408 |
-
)
|
409 |
-
|
410 |
-
documents = loader.load()
|
411 |
-
|
412 |
-
data = {
|
413 |
-
"title": [doc.metadata["title"] for doc in documents],
|
414 |
-
"source": [doc.metadata["source"] for doc in documents],
|
415 |
-
"page_content": [doc.page_content for doc in documents],
|
416 |
-
}
|
417 |
-
|
418 |
-
df = pd.DataFrame(data)
|
419 |
-
|
420 |
-
return df
|
421 |
-
|
422 |
-
def filter_page(page_list, title):
|
423 |
-
x = page_list[page_list["title"] == title]
|
424 |
-
return x.iloc[0]['page_content']
|
425 |
-
|
426 |
-
#EA4ALL-Agentic system menu
|
427 |
-
with gr.Tabs(selected="how_to") as tabs:
|
428 |
-
with gr.Tab(label="Architect Demand Management"):
|
429 |
-
with gr.Tab(label="Architect Project Planning", id="pmo_qna_1"):
|
430 |
-
ea4all_pmo_description = gr.Markdown(value=agentic_pmo_desc)
|
431 |
-
pmo_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
|
432 |
-
pmo_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
|
433 |
-
with gr.Accordion("Open for prompt examples", open=False):
|
434 |
-
pmo_examples = gr.Dropdown(e4u.load_mock_content(PMO_MOCK_QNA), value=None,label="Questions", interactive=True)
|
435 |
-
gr.ClearButton([pmo_chatbot,pmo_prompt], value="Clear", size="sm", visible=False)
|
436 |
-
with gr.Tab(label="Project Portfolio Sample Dataset", id="id_pmo_ds"):
|
437 |
-
pmo_df = gr.Dataframe()
|
438 |
-
with gr.Tab(label="Application Landscape QnA"):
|
439 |
-
with gr.Tabs() as tabs_apm_qna:
|
440 |
-
with gr.Tab(label="Connect, Explore, Together", id="app_qna_1"):
|
441 |
-
ea4all_agent_metadata = gr.Markdown(value=agentic_qna_desc)
|
442 |
-
ea4all_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
|
443 |
-
qna_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
|
444 |
-
with gr.Accordion("Open for prompt examples", open=False):
|
445 |
-
qna_examples = gr.Dropdown(e4u.load_mock_content(APM_MOCK_QNA), value=None,label="Questions", interactive=True)
|
446 |
-
gr.ClearButton([ea4all_chatbot,qna_prompt], value="Clear", size="sm", visible=False)
|
447 |
-
with gr.Tab(label="Sample Dataset", id="id_apm_ds"):
|
448 |
-
apm_df = gr.Dataframe()
|
449 |
-
with gr.Tab(label="Diagram Question and Answering"):
|
450 |
-
gr.Markdown(value=agentic_vqa_desc)
|
451 |
-
ea4all_vqa = gr.Chatbot(label="EA4ALL your AI Multimodal Architect Companion", type="messages")
|
452 |
-
vqa_prompt = gr.MultimodalTextbox(interactive=True, show_label=False, submit_btn=True, stop_btn=True, autofocus=True, placeholder="Upload your diagram and type your message or select an example...")
|
453 |
-
with gr.Accordion("Open for prompt examples", open=False):
|
454 |
-
vqa_examples = gr.Dropdown(e4u.get_vaq_examples(), value=None,label="Diagram and Questions", interactive=True)
|
455 |
-
gr.ClearButton([ea4all_vqa,vqa_prompt,vqa_examples], value="Clear", size="sm", visible=True)
|
456 |
-
with gr.Tab(label="Reference Architecture", id="id_refarch"):
|
457 |
-
with gr.Tabs(selected="id_dbr") as tabs_reference_architecture:
|
458 |
-
with gr.Tab(label='Business Requirement', id="id_dbr"):
|
459 |
-
gr.Markdown(value=agentic_togaf_desc)
|
460 |
-
dbr_text=gr.TextArea(value=init_dbr, lines=14, interactive=True)
|
461 |
-
with gr.Row():
|
462 |
-
dbr_file=gr.File(
|
463 |
-
value=e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock),
|
464 |
-
label="Business Requirement",
|
465 |
-
height=35,
|
466 |
-
show_label=False,
|
467 |
-
file_count="single",
|
468 |
-
file_types=['text'],
|
469 |
-
interactive=True,
|
470 |
-
type='binary'
|
471 |
-
)
|
472 |
-
dbr_run=gr.Button(scale=None,value="Run Reference Architecture")
|
473 |
-
dbr_cls=gr.ClearButton([dbr_file,dbr_text])
|
474 |
-
with gr.Tab(label='Confluence Integration', id="id_confluence"):
|
475 |
-
confluence_list = gr.Dropdown(value=None, label="Confluence Pages", interactive=True)
|
476 |
-
confluence_df = gr.DataFrame(visible=False, headers=["title", "source", "page_content"])
|
477 |
-
@gr.render(inputs=[confluence_list,confluence_df])
|
478 |
-
def show_page(page, df):
|
479 |
-
if page:
|
480 |
-
with gr.Row():
|
481 |
-
with gr.Column():
|
482 |
-
dbr_confluence =gr.Button(scale=None,value="Run Reference Architecture")
|
483 |
-
with gr.Column():
|
484 |
-
btn=gr.Button("Clear")
|
485 |
-
with gr.Row(variant='default', show_progress=True):
|
486 |
-
page_content = gr.Markdown(filter_page(df,page), line_breaks=True)
|
487 |
-
btn.click(lambda: gr.Dropdown(value=None), None, confluence_list)
|
488 |
-
dbr_confluence.click(run_reference_architecture_agentic_system,show_progress='full', inputs=[page_content],outputs=[togaf_vision,tabs_togaf,tabs_reference_architecture, architecture_runway, diagram_header, tab_diagram])
|
489 |
-
with gr.Tab(label='Reference Architecture',visible=False, id="id_togaf") as tabs_togaf:
|
490 |
-
togaf_vision=gr.Markdown(value='### Reference Architecture: Vision and Target')
|
491 |
-
with gr.Tab(label="Target Architecture Runway",visible=False, id="id_runway") as tab_diagram:
|
492 |
-
diagram_header=gr.Markdown(visible=True)
|
493 |
-
architecture_runway=gr.Image(label="Target Architecture Runway",interactive=False,visible=True, scale=10)
|
494 |
-
with gr.Tab(label="Overview", id="how_to"):
|
495 |
-
gr.Markdown(
|
496 |
-
"""
|
497 |
-
# Title
|
498 |
-
|
499 |
-
**Explore, Share, Together:** harness the value of `Enterprise Architecture in the era of Generative AI` to positively impact individuals and organisations.\n
|
500 |
-
|
501 |
-
## Overview
|
502 |
-
"""
|
503 |
-
),
|
504 |
-
gr.Image(
|
505 |
-
get_image(e4u.CFG.EA4ALL_ARCHITECTURE),
|
506 |
-
show_download_button=False,
|
507 |
-
container=False,
|
508 |
-
show_share_button=False,
|
509 |
-
)
|
510 |
-
gr.Markdown(
|
511 |
-
"""
|
512 |
-
## Journey
|
513 |
-
|
514 |
-
Audio overview summarising the key learnings, challenges, so what, stats from day-1 to last sprint. (**Powered by Google NoteBookLM**)
|
515 |
-
|
516 |
-
"""
|
517 |
-
)
|
518 |
-
podcast = gr.Audio(
|
519 |
-
type="filepath",
|
520 |
-
value=os.path.join(BaseConfiguration.ea4all_store,e4u.CFG.EA4ALL_PODCAST),
|
521 |
-
label="EA4ALL Journey Podcast",
|
522 |
-
show_download_button=False,
|
523 |
-
autoplay=False,
|
524 |
-
container=True,
|
525 |
-
interactive=False,
|
526 |
-
)
|
527 |
-
gr.Markdown(ea4all_about)
|
528 |
-
|
529 |
-
#get LLM response user's feedback
|
530 |
-
def get_user_feedback(evt: gr.SelectData, request:gr.Request):
|
531 |
-
##{evt.index} {evt.value} {evt._data['liked']}
|
532 |
-
try:
|
533 |
-
uuid_str = os.environ["EA4ALL_" + e4u.get_user_identification(request).replace(".","_")]
|
534 |
-
gr.Info("Thanks for your feedback - run_id: " + uuid_str)
|
535 |
-
run_id = uuid.UUID(uuid_str)
|
536 |
-
client = Client()
|
537 |
-
client.create_feedback(
|
538 |
-
run_id,
|
539 |
-
key="feedback-key",
|
540 |
-
score= 1.0 if evt._data['liked'] == True else 0,
|
541 |
-
comment=str(evt.value)
|
542 |
-
)
|
543 |
-
except Exception as e:
|
544 |
-
gr.Warning(f"Couldn't capture a feedback: {e}")
|
545 |
-
|
546 |
-
#Set initial state of apm, llm and capture user-ip
|
547 |
-
async def ea4all_agent_init(request:gr.Request):
|
548 |
-
|
549 |
-
#capture user IP address
|
550 |
-
#ea4all_user = e4u.get_user_identification(request)
|
551 |
-
gr.Info("Thank you for connecting! I'd love to hear your feedback! Thumbs up or Thumbs down. LinkedIn comment.")
|
552 |
-
|
553 |
-
# Set initial landscape vectorstore
|
554 |
-
|
555 |
-
await indexer_graph.ainvoke(input={"docs":[]}, config=config)
|
556 |
-
|
557 |
-
#set chatbot description w/ user apm columns
|
558 |
-
df = vectorstore.apm_dataframe_loader(e4u._join_paths(BaseConfiguration.ea4all_store, IndexConfiguration.apm_catalogue))
|
559 |
-
columns_string = ', '.join(df.columns)
|
560 |
-
apm_columns = agentic_qna_desc + columns_string
|
561 |
-
|
562 |
-
prompt=ChatMessage(role='assistant', content='Hi, I am your Architect Copilot! How can I help you today?')
|
563 |
-
|
564 |
-
page_list = ea4all_confluence()
|
565 |
-
|
566 |
-
#Load gradio.dataframe with Portfolio sample dataset
|
567 |
-
pmo_df = pd.read_csv("ea4all/ea4all_store/ea4all-portfolio-management.csv")
|
568 |
-
|
569 |
-
return (
|
570 |
-
apm_columns,
|
571 |
-
[prompt],
|
572 |
-
[prompt],
|
573 |
-
[prompt],
|
574 |
-
gr.Dropdown(choices=page_list['title'].values.tolist()),
|
575 |
-
gr.DataFrame(value=page_list),
|
576 |
-
gr.DataFrame(value=df),
|
577 |
-
gr.DataFrame(value=pmo_df),
|
578 |
-
)
|
579 |
-
|
580 |
-
#authentication
|
581 |
-
def ea4all_login(username, password):
|
582 |
-
return (username==password)
|
583 |
-
|
584 |
-
#TABS & Reference Architecture look-and-feel control
|
585 |
-
def off_dbrtext():
|
586 |
-
return gr.TextArea(visible=False), gr.Tab(visible=False), gr.Tab(visible=False)
|
587 |
-
|
588 |
-
def on_dbrtext(file):
|
589 |
-
if file:
|
590 |
-
return gr.TextArea(visible=True)
|
591 |
-
return gr.TextArea(visible=False)
|
592 |
-
|
593 |
-
#Upload & clear business requirement
|
594 |
-
def load_dbr(file):
|
595 |
-
return file.decode()
|
596 |
-
|
597 |
-
def unload_dbr():
|
598 |
-
return gr.TextArea(visible=False)
|
599 |
-
|
600 |
-
def on_dbrtext(file):
|
601 |
-
if file:
|
602 |
-
return gr.TextArea(visible=True)
|
603 |
-
return gr.TextArea(visible=False)
|
604 |
-
|
605 |
-
#Upload & clear business requirement
|
606 |
-
def load_dbr(file):
|
607 |
-
return file.decode()
|
608 |
-
|
609 |
-
def unload_dbr():
|
610 |
-
return gr.TextArea(visible=False)
|
611 |
-
|
612 |
-
#Podcast upload progress
|
613 |
-
podcast.change(show_progress='full')
|
614 |
-
|
615 |
-
#Togaf upload file
|
616 |
-
dbr_file.clear(unload_dbr,outputs=dbr_text)
|
617 |
-
dbr_file.change(on_dbrtext,inputs=dbr_file,outputs=dbr_text)
|
618 |
-
dbr_file.upload(load_dbr,inputs=dbr_file, outputs=dbr_text)
|
619 |
-
dbr_cls.click(off_dbrtext,outputs=[dbr_text, tabs_togaf, tab_diagram])
|
620 |
-
|
621 |
-
#Refactored ea4all_chatbot / vqa_chatbot (ChatInterface -> Chatbot)
|
622 |
-
qna_prompt.submit(run_qna_agentic_system,[qna_prompt,ea4all_chatbot],ea4all_chatbot)
|
623 |
-
qna_prompt.submit(lambda: "", None, [qna_prompt])
|
624 |
-
ea4all_chatbot.like(fn=get_user_feedback)
|
625 |
-
qna_examples.input(lambda value: value, qna_examples, qna_prompt)
|
626 |
-
|
627 |
-
#Execute Reference Architecture
|
628 |
-
dbr_run.click(run_reference_architecture_agentic_system,show_progress='full',inputs=[dbr_text],outputs=[togaf_vision,tabs_togaf,tabs_reference_architecture, architecture_runway, diagram_header, tab_diagram])
|
629 |
-
|
630 |
-
#vqa_chatbot (ChatInterface -> Chatbot)
|
631 |
-
def add_message(message, history):
|
632 |
-
if message["text"] is not None:
|
633 |
-
history.append({"role": "user", "content": message["text"]})
|
634 |
-
|
635 |
-
if len(message['files']) > 0:
|
636 |
-
history.append({"role": "user", "content": {"path": message['files'][-1]}})
|
637 |
-
|
638 |
-
return (
|
639 |
-
gr.MultimodalTextbox(value=message, interactive=True, placeholder="Upload a diagram and type your message..."),
|
640 |
-
history
|
641 |
-
)
|
642 |
-
|
643 |
-
chat_msg = vqa_prompt.submit(add_message, [vqa_prompt, ea4all_vqa], [vqa_prompt, ea4all_vqa])
|
644 |
-
bot_msg = chat_msg.then(run_vqa_agentic_system, [vqa_prompt, ea4all_vqa], ea4all_vqa, api_name="bot_response")
|
645 |
-
|
646 |
-
ea4all_vqa.like(fn=get_user_feedback)
|
647 |
-
vqa_examples.input(lambda value: value, vqa_examples, vqa_prompt)
|
648 |
-
|
649 |
-
#Invoke CrewAI PMO Agentic System
|
650 |
-
pmo_prompt.submit(run_pmo_agentic_system,[pmo_prompt,pmo_chatbot],pmo_chatbot)
|
651 |
-
pmo_prompt.submit(lambda: "", None, [pmo_prompt])
|
652 |
-
pmo_examples.input(lambda value: value, pmo_examples, pmo_prompt)
|
653 |
-
|
654 |
-
#Set initial state of apm and llm
|
655 |
-
ea4all_mcp.load(ea4all_agent_init, outputs=[ea4all_agent_metadata,ea4all_chatbot, ea4all_vqa, pmo_chatbot, confluence_list, confluence_df, apm_df, pmo_df])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ea4all/ea4all_mcp.py
CHANGED
@@ -1,28 +1,655 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
letter (str): The letter to search for
|
10 |
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
"""
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#Added agentic-workflow-collaboration-agents
|
2 |
+
#Multimodal ChatInterface - not working
|
3 |
+
#Added new QA Tab
|
4 |
+
#Added new agent Well-Architected
|
5 |
+
#Added Supervisor Agent workflow
|
6 |
+
#ISSUE with VQA fixed
|
7 |
+
#LLMChain refactored
|
8 |
+
#Updated with changes as result of ea4all_agent Gradio Space deployment issues
|
9 |
+
#VQA Safeguardings - JPEG, PNG images only
|
10 |
+
#Deployed version to Live
|
11 |
+
#Library import refactoring, ea4all-architecture, empty message
|
12 |
+
#Bring your own IT Landscape data: discontinued
|
13 |
+
#Added upload your Business Requirement
|
14 |
+
#Load user's APM - disabled 2024-06-22
|
15 |
+
#TEST E2E Togaf Agentic system 2024-06-24
|
16 |
+
#MIGRATION TO HF Open Source using TGI and Meta-Llama-3-8B-Instruct 2024-06-25
|
17 |
+
#ADDED GENERATE_ARCHITECTURE_RUNWAY diagram: graphviz 2024-07-03
|
18 |
+
#REFERENCE ARCHITECTURE DYNAMIC TABS 2024-07-05
|
19 |
+
#ADDED Business Query grader 2024-07-07
|
20 |
+
#RCA Togaf Supervisor: increase reliability 2024-07-08 - ISSUE FIXED BY NOW
|
21 |
+
#EA4ALL-agentic-system-container updated 2024-07-10
|
22 |
+
###APM Agentic system: 2024-07-25 - Safety check added
|
23 |
+
##Sub-graph node stream 204-07-26
|
24 |
+
# Stream arbitrary nested content: https://langchain-ai.github.io/langgraph/how-tos/streaming-content/
|
25 |
+
## Prompt refinement task_router, user_question_routing, prompt_category 2024-07-27
|
26 |
+
## WebSearch Hallucination issue - recursion looping - solution: routing to route_question 2024-07-28
|
27 |
+
## Safety_check greetings, msgs, APM Sample Dataset 2024-09-29
|
28 |
+
# VQA issue - image not recognised 2024-07-30
|
29 |
+
# Constants IMAGES (Architecture, Overview) 2024-07-31
|
30 |
+
# About, QnA Examples moved to mock files 2024-08-01 - deployed to build
|
31 |
+
## 2024-08-03: VQA Streaming, Diagrams' EDGE nodes changed to END - one task at a time: 2024-08-03
|
32 |
+
## VQA Llama-3.2-11B-Vision-Instruct 2024-10-25
|
33 |
+
#RELEASE 2024-11-15
|
34 |
+
## CHANGES 2024-11-22
|
35 |
+
# MIGRATION to Gradio 5
|
36 |
+
# Chatbot UI migrated to gr.Chatbot
|
37 |
+
# run_qna_agentic_system, run_vqa_agentic_system updated: ChatMessage, chat_memory, UI events
|
38 |
+
# chat_memory VQA missing image - fixed - needs improvement
|
39 |
+
## RELEASE 2024-11-23
|
40 |
+
#pip freeze > requirements.txt to keep libraries synched local and HF Spaces
|
41 |
+
#gr.Image issue: caused by __main__ root_path=str(Path.cwd())
|
42 |
+
## RELEASE 2024-12-09
|
43 |
+
#Confluence Integration
|
44 |
+
#Llama-3.2-11B-Vision-Instruct max_token issue <=4096 stills
|
45 |
+
#Safety-check refinement
|
46 |
+
#TOGAF Vision streaming
|
47 |
+
## Release update 2024-12-11
|
48 |
+
#EA4ALL Podcast
|
49 |
+
#2025-02-03 RELEASE V1
|
50 |
+
##RETROFIT & INTEGRATION w/ EA4ALL-dev-studio-structure
|
51 |
+
#2025-02-09
|
52 |
+
##UAT EA4ALL-LGS-RETRIEVER-REFACTORED
|
53 |
+
#2025-03-10
|
54 |
+
##AI-Assistant-UI-Message-Stream refactor
|
55 |
+
#2025-12-04
|
56 |
+
## Add EA4ALL-PMO-Demand-Management CrewAI Agents
|
57 |
+
#2025-05-06
|
58 |
+
## Add MCP Server
|
59 |
+
#2025-05-17
|
60 |
+
## Added PMO_MOCK_QNA examples,
|
61 |
+
## get_relevant_questions() - moved to utils, constants moved to configuration
|
62 |
+
#2025-05-19
|
63 |
+
## EA4ALL Diagram refactored, vqa_max_tokens updated
|
64 |
+
from langchain.callbacks.tracers import LangChainTracer
|
65 |
+
from langchain.callbacks.tracers.langchain import wait_for_all_tracers
|
66 |
+
from langchain_community.document_loaders import ConfluenceLoader
|
67 |
+
from langchain_core.messages import HumanMessage
|
68 |
+
from langsmith import Client
|
69 |
+
|
70 |
+
from ea4all.src.shared.configuration import BaseConfiguration, APM_MOCK_QNA, PMO_MOCK_QNA
|
71 |
+
from ea4all.src.shared import vectorstore
|
72 |
+
from ea4all.src.ea4all_gra.configuration import AgentConfiguration as gra
|
73 |
+
from ea4all.src.ea4all_indexer.configuration import IndexConfiguration
|
74 |
+
import ea4all.src.ea4all_apm.graph as e4a
|
75 |
+
import ea4all.src.ea4all_vqa.graph as e4v
|
76 |
+
import ea4all.src.ea4all_gra.graph as e4t
|
77 |
+
import ea4all.src.shared.utils as e4u
|
78 |
+
from ea4all.src.ea4all_indexer.graph import indexer_graph
|
79 |
+
from ea4all.src.graph import super_graph
|
80 |
+
#from ea4all.src.pmo_crew.crew_runner import run_pmo_crew
|
81 |
+
|
82 |
+
import uuid
|
83 |
+
import os
|
84 |
+
import pandas as pd
|
85 |
+
|
86 |
import gradio as gr
|
87 |
+
from gradio import ChatMessage
|
88 |
+
import time
|
89 |
+
from PIL import Image
|
90 |
|
91 |
+
#Set LangSmith project
|
92 |
+
tracer = LangChainTracer(project_name=os.getenv('LANGCHAIN_PROJECT'))
|
93 |
+
|
94 |
+
config = {
|
95 |
+
"run_name": os.getenv('LANGCHAIN_RUNNAME'),
|
96 |
+
"tags": [os.getenv('EA4ALL_ENV')],
|
97 |
+
"callbacks":[tracer],
|
98 |
+
"recursion_limit": 25,
|
99 |
+
"configurable": {
|
100 |
+
"thread_id": uuid.uuid4(),
|
101 |
+
},
|
102 |
+
"stream_mode": "messages"
|
103 |
+
}
|
104 |
|
105 |
+
#Blocks w/ ChatInterface, BYOD, About
|
106 |
+
with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as ea4all_mcp:
|
|
|
107 |
|
108 |
+
agentic_pmo_desc="""
|
109 |
+
Hi,
|
110 |
+
Provide project resource estimation for architecture work based on business requirements, skillset,
|
111 |
+
architects allocation, and any other relevant information to enable successful project solution delivery."""
|
112 |
+
|
113 |
+
agentic_qna_desc="""
|
114 |
+
Hi,
|
115 |
+
improve effieciency, knowledge sharing, and get valuable insights from your IT landscape using natural language.
|
116 |
+
As an Enterprise Architect Agentic System I can answer questions related to Enterprise Architecture, Technology, plus the following IT Landscape sample dataset: """
|
117 |
+
|
118 |
+
agentic_vqa_desc="""
|
119 |
+
Hi, talk to your Architecture Diagram using natural language. Gain rapid knowledge and insights translating image to meaningful description.
|
120 |
+
**Disclaimer**:
|
121 |
+
- This feature should NOT BE USED to process inappropriate content, but ONLY FOR Architecture Diagrams
|
122 |
+
"""
|
123 |
+
|
124 |
+
agentic_togaf_desc="""
|
125 |
+
Hi,
|
126 |
+
in a click of button create a reference architecture that serves as a blueprint for designing and implementing IT solutions.
|
127 |
+
Standardise, increase efficiency and productivity to architecture solution development.
|
128 |
+
Generate context-specific reference and minimal viable architectures to support business and IT strategy and digital transformation.
|
129 |
+
Streamline the architecture operating model, taking the best of agentic workflows and architects working together.
|
130 |
"""
|
131 |
+
|
132 |
+
#ea4all-about
|
133 |
+
|
134 |
+
def ea4all_about():
|
135 |
+
readme = e4u.load_mock_content(e4u.CFG.EA4ALL_ABOUT)
|
136 |
+
return readme
|
137 |
+
|
138 |
+
#Load demo business requirements
|
139 |
+
def init_dbr():
|
140 |
+
# Open the file in read mode ('r')
|
141 |
+
with open(e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock), 'r') as file:
|
142 |
+
# Read the contents of the file
|
143 |
+
contents = file.read()
|
144 |
+
return contents
|
145 |
+
|
146 |
+
def init_df():
|
147 |
+
return vectorstore.apm_dataframe_loader(e4u._join_paths(BaseConfiguration.ea4all_store, IndexConfiguration.apm_catalogue))
|
148 |
+
|
149 |
+
#load core-architecture image
|
150 |
+
#fix the issue with gr.Image(path) inside a docker containder
|
151 |
+
def get_image(_image):
|
152 |
+
#from PIL import Image
|
153 |
+
# Load an image
|
154 |
+
image = e4u._join_paths(BaseConfiguration.ea4all_images,_image)
|
155 |
+
print(f"Full path: {image}")
|
156 |
+
|
157 |
+
return image
|
158 |
+
|
159 |
+
#ea4all-qna-agent-conversational-with-memory
|
160 |
+
async def run_qna_agentic_system(prompt, chat_memory, request:gr.Request):
|
161 |
+
|
162 |
+
format_response = ""
|
163 |
+
|
164 |
+
if not prompt:
|
165 |
+
format_response = "Hi, how are you today? To start our conversation, please chat your message!"
|
166 |
+
chat_memory.append(ChatMessage(role="assistant", content=format_response))
|
167 |
+
yield chat_memory
|
168 |
+
|
169 |
+
if not chat_memory:
|
170 |
+
chat_memory.append(ChatMessage(role="user", content=prompt))
|
171 |
+
yield chat_memory
|
172 |
+
|
173 |
+
if prompt:
|
174 |
+
#capture user ip
|
175 |
+
ea4all_user = e4u.get_user_identification(request)
|
176 |
+
|
177 |
+
##Initialise APM Graph
|
178 |
+
#apm_graph = e4a.apm_graph
|
179 |
+
#inputs = {"question": prompt, "chat_memory":chat_memory}
|
180 |
+
inputs = {"messages": [{"role": "user", "content": prompt}]}
|
181 |
+
|
182 |
+
#add prompt to memory
|
183 |
+
chat_memory.append(ChatMessage(role="user", content=prompt))
|
184 |
+
|
185 |
+
partial_message = ""
|
186 |
+
async for event in super_graph.astream_events(input=inputs, config=config, version="v2"):
|
187 |
+
#async for event in super_graph.astream(input=inputs, config=config, subgraphs=True):
|
188 |
+
# chat_memory.append(ChatMessage(role="assistant", content=str(event)))
|
189 |
+
# yield chat_memory
|
190 |
+
|
191 |
+
kind = event["event"]
|
192 |
+
tags = event.get("tags", [])
|
193 |
+
name = event['name']
|
194 |
+
|
195 |
+
#chat_memory.append(ChatMessage(role="assistant", content=f"Running: {name}"))
|
196 |
+
#yield chat_memory
|
197 |
+
|
198 |
+
if name == "safety_check":
|
199 |
+
#if kind == "on_chain_start":
|
200 |
+
# chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}`"))
|
201 |
+
# yield chat_memory
|
202 |
+
if kind == "on_chain_stream":
|
203 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}`: {event['data']['chunk']['safety_status'][0]}"))
|
204 |
+
if event['data']['chunk']['safety_status'][0] == 'no':
|
205 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"Safety-status: {event['data']['chunk']['safety_status'][1]}"))
|
206 |
+
yield chat_memory
|
207 |
+
if kind == "on_chain_end" and name == "route_question":
|
208 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}:` {event['data']['output']['source']}"))
|
209 |
+
yield chat_memory
|
210 |
+
if kind == "on_chain_start" and name == "retrieve":
|
211 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}` RAG\n\n"))
|
212 |
+
yield chat_memory
|
213 |
+
if kind == "on_chain_start" and name in ("generate_web_search", "websearch", "stream_generation"):
|
214 |
+
chat_memory.append(ChatMessage(role="assistant", content= f"\n\n- `{name}`\n\n"))
|
215 |
+
yield chat_memory
|
216 |
+
if kind == "on_chain_stream" and name == "stream_generation":
|
217 |
+
data = event["data"]
|
218 |
+
# Accumulate the chunk of data
|
219 |
+
partial_message += data['chunk']
|
220 |
+
chat_memory[-1].content = partial_message
|
221 |
+
time.sleep(0.05)
|
222 |
+
yield chat_memory
|
223 |
+
if name == "grade_generation_v_documents_and_question":
|
224 |
+
if kind == "on_chain_start":
|
225 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"\n\n- `{name}`: "))
|
226 |
+
yield chat_memory
|
227 |
+
if kind == "on_chain_end":
|
228 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"`{event['data']['input'].source}:` {event['data']['output']}"))
|
229 |
+
yield chat_memory
|
230 |
+
if "stream_hallucination" in tags and kind == "on_chain_start":
|
231 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{tags[-1]}`"))
|
232 |
+
yield chat_memory
|
233 |
+
if "stream_grade_answer" in tags and kind == "on_chain_start":
|
234 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{tags[-1]}`"))
|
235 |
+
yield chat_memory
|
236 |
+
if name == "supervisor":
|
237 |
+
if kind == "on_chain_start":
|
238 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}` "))
|
239 |
+
yield chat_memory
|
240 |
+
if kind == "on_chain_stream":
|
241 |
+
chat_memory.append(ChatMessage(role="assistant", content=f"{event['data']['chunk']}"))
|
242 |
+
yield chat_memory
|
243 |
+
|
244 |
+
os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
|
245 |
+
wait_for_all_tracers()
|
246 |
+
|
247 |
+
#Trigger Solution Architecture Diagram QnA
|
248 |
+
async def run_vqa_agentic_system(message, chat_memory, request:gr.Request):
|
249 |
+
#capture user ip
|
250 |
+
ea4all_user = e4u.get_user_identification(request)
|
251 |
+
|
252 |
+
"""Handle file uploads and validate their types."""
|
253 |
+
allowed_file_types = ('JPEG', 'PNG')
|
254 |
+
|
255 |
+
print("---CALLING VISUAL QUESTION ANSWERING AGENTIC SYSTEM---")
|
256 |
+
print(f"Prompt: {message}")
|
257 |
+
|
258 |
+
if message['files'] == []:
|
259 |
+
chat_memory.append(ChatMessage(role="assistant", content="Please upload an Architecture PNG, JPEG diagram to start!"))
|
260 |
+
yield chat_memory
|
261 |
+
else:
|
262 |
+
diagram = message['files'][-1] ##chat_memory[-1]['content'][-1]
|
263 |
+
msg = message['text'] ##chat_memory[-2]['content']
|
264 |
+
print(f"---DIAGRAM: {diagram}---")
|
265 |
+
try:
|
266 |
+
if msg == "":
|
267 |
+
msg = "Please describe this diagram."
|
268 |
+
|
269 |
+
with Image.open(diagram) as diagram_:
|
270 |
+
if diagram_.format not in allowed_file_types:
|
271 |
+
chat_memory.append(ChatMessage(role="assistant", content="Invalid file type. Allowed file types are JPEG and PNG."))
|
272 |
+
yield chat_memory
|
273 |
+
else:
|
274 |
+
#'vqa_image = e4u.get_raw_image(diagram) #MOVED into Graph
|
275 |
+
vqa_image = diagram
|
276 |
+
|
277 |
+
#Setup Quality Assurance Agentic System
|
278 |
+
#graph = e4v.ea4all_graph(config['configurable']['vqa_model'])
|
279 |
+
|
280 |
+
#Setup enter graph
|
281 |
+
diagram_graph = e4v.diagram_graph
|
282 |
+
|
283 |
+
partial_message = ""
|
284 |
+
chat_memory.append(ChatMessage(role="assistant", content="Hi, I am working on your question..."))
|
285 |
+
async for event in diagram_graph.astream_events(
|
286 |
+
{"question":msg, "image": vqa_image}, config, version="v2"
|
287 |
+
):
|
288 |
+
if (
|
289 |
+
event["event"] == "on_chat_model_stream"
|
290 |
+
and "vqa_stream" in event['tags']
|
291 |
+
#and event["metadata"].get("langgraph_node") == "tools"
|
292 |
+
):
|
293 |
+
partial_message += event["data"]["chunk"].content
|
294 |
+
chat_memory[-1].content = partial_message
|
295 |
+
time.sleep(e4u.CFG.STREAM_SLEEP)
|
296 |
+
yield chat_memory #, message to update prompt
|
297 |
+
elif not partial_message:
|
298 |
+
yield chat_memory #, message
|
299 |
+
|
300 |
+
os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
|
301 |
+
wait_for_all_tracers()
|
302 |
+
|
303 |
+
except Exception as e:
|
304 |
+
yield (e.args[-1])
|
305 |
+
|
306 |
+
#Run Togaf Agentic System
|
307 |
+
async def run_reference_architecture_agentic_system(business_query, request:gr.Request):
|
308 |
+
|
309 |
+
if len(business_query) < 50:
|
310 |
+
agent_response = "Please provide a valid Business Requirement content to start!"
|
311 |
+
yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
|
312 |
+
else:
|
313 |
+
plain_text = e4u.markdown_to_plain_text(business_query)
|
314 |
+
agent_response = "Generating Architecture Blueprint ---TOGAF VISION TARGET--- \n\nI am working on your request..."
|
315 |
+
togaf_chain = e4t.togaf_graph
|
316 |
+
final_diagram = ""
|
317 |
+
vision_message = ""
|
318 |
+
try:
|
319 |
+
async for s in togaf_chain.astream_events(
|
320 |
+
{
|
321 |
+
"messages": [
|
322 |
+
HumanMessage(
|
323 |
+
content=plain_text
|
324 |
+
)
|
325 |
+
],
|
326 |
+
"business_query": business_query,
|
327 |
+
},
|
328 |
+
config=config,
|
329 |
+
version="v2"
|
330 |
+
):
|
331 |
+
kind = s["event"]
|
332 |
+
tags = s.get("tags", [])
|
333 |
+
name = s['name']
|
334 |
+
|
335 |
+
if "gra_stream" in tags and name == "stream_vision_target":
|
336 |
+
if kind == "on_chain_stream":
|
337 |
+
data = s["data"]
|
338 |
+
# Accumulate the chunk of data
|
339 |
+
vision_message += data['chunk'].content
|
340 |
+
time.sleep(e4u.CFG.STREAM_SLEEP)
|
341 |
+
yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
|
342 |
+
elif name == "save_diagram" and kind == 'on_chain_end': #MOVED INTO Togaf_Task3
|
343 |
+
final_diagram = s['data']['output']['architecture_runway']
|
344 |
+
elif ("assess_business_query" in tags or "assess_landscape" in tags) and kind == 'on_chain_start': ##'on_chat_model_stream':
|
345 |
+
agent_response += f"\n\n`{tags[-1]}:{name}`"
|
346 |
+
|
347 |
+
yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
|
348 |
+
|
349 |
+
if vision_message=="":
|
350 |
+
agent_response = "I cannot generate the Architecture Vision. Please provide a valid Business Requirement content to start!"
|
351 |
+
yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
|
352 |
+
elif "Error" not in final_diagram:
|
353 |
+
yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),final_diagram, None, gr.Tabs(visible=True)])
|
354 |
+
else:
|
355 |
+
yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, final_diagram, gr.Tabs(visible=True)])
|
356 |
+
|
357 |
+
except Exception as e:
|
358 |
+
yield(
|
359 |
+
[
|
360 |
+
e.args[-1],
|
361 |
+
gr.Tabs(visible=True),
|
362 |
+
gr.Tabs(selected="id_togaf"),
|
363 |
+
None,
|
364 |
+
None,
|
365 |
+
gr.Tabs(visible=False)
|
366 |
+
]
|
367 |
+
)
|
368 |
+
|
369 |
+
async def run_pmo_agentic_system(prompt, chat_memory):
|
370 |
+
"""
|
371 |
+
Answer a question about Project Portfolio Management and Architect Demand Management.
|
372 |
+
|
373 |
+
Args:
|
374 |
+
prompt (str): The propject portfolio user question
|
375 |
+
chat_memory (list): The tool message history
|
376 |
+
|
377 |
+
Returns:
|
378 |
+
str: A summary answering the user question
|
379 |
+
"""
|
380 |
+
format_response = ""
|
381 |
+
|
382 |
+
if not prompt:
|
383 |
+
format_response = "Hi, how are you today? To start our conversation, please chat your message!"
|
384 |
+
chat_memory.append(ChatMessage(role="assistant", content=format_response))
|
385 |
+
yield chat_memory
|
386 |
+
|
387 |
+
if not chat_memory:
|
388 |
+
chat_memory.append(ChatMessage(role="user", content=prompt))
|
389 |
+
yield chat_memory
|
390 |
+
|
391 |
+
inputs = {
|
392 |
+
"question": prompt,
|
393 |
+
"verbose": True, # optional flags
|
394 |
+
}
|
395 |
+
|
396 |
+
#yield run_pmo_crew(inputs)
|
397 |
+
|
398 |
+
def ea4all_confluence():
|
399 |
+
|
400 |
+
#Confluence API Key
|
401 |
+
confluence_api_key = os.environ['CONFLUENCE_API_KEY']
|
402 |
+
|
403 |
+
loader = ConfluenceLoader(
|
404 |
+
url="https://learnitall.atlassian.net/wiki", username="learn-it-all@outlook.com", api_key=confluence_api_key,
|
405 |
+
space_key="~71202000cd55f36336455f8c07afa1860ba810",
|
406 |
+
include_attachments=False, limit=10,
|
407 |
+
keep_markdown_format=True
|
408 |
+
)
|
409 |
+
|
410 |
+
documents = loader.load()
|
411 |
+
|
412 |
+
data = {
|
413 |
+
"title": [doc.metadata["title"] for doc in documents],
|
414 |
+
"source": [doc.metadata["source"] for doc in documents],
|
415 |
+
"page_content": [doc.page_content for doc in documents],
|
416 |
+
}
|
417 |
+
|
418 |
+
df = pd.DataFrame(data)
|
419 |
+
|
420 |
+
return df
|
421 |
+
|
422 |
+
def filter_page(page_list, title):
|
423 |
+
x = page_list[page_list["title"] == title]
|
424 |
+
return x.iloc[0]['page_content']
|
425 |
+
|
426 |
+
#EA4ALL-Agentic system menu
|
427 |
+
with gr.Tabs(selected="how_to") as tabs:
|
428 |
+
with gr.Tab(label="Architect Demand Management"):
|
429 |
+
with gr.Tab(label="Architect Project Planning", id="pmo_qna_1"):
|
430 |
+
ea4all_pmo_description = gr.Markdown(value=agentic_pmo_desc)
|
431 |
+
pmo_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
|
432 |
+
pmo_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
|
433 |
+
with gr.Accordion("Open for prompt examples", open=False):
|
434 |
+
pmo_examples = gr.Dropdown(e4u.load_mock_content(PMO_MOCK_QNA), value=None,label="Questions", interactive=True)
|
435 |
+
gr.ClearButton([pmo_chatbot,pmo_prompt], value="Clear", size="sm", visible=False)
|
436 |
+
with gr.Tab(label="Project Portfolio Sample Dataset", id="id_pmo_ds"):
|
437 |
+
pmo_df = gr.Dataframe()
|
438 |
+
with gr.Tab(label="Application Landscape QnA"):
|
439 |
+
with gr.Tabs() as tabs_apm_qna:
|
440 |
+
with gr.Tab(label="Connect, Explore, Together", id="app_qna_1"):
|
441 |
+
ea4all_agent_metadata = gr.Markdown(value=agentic_qna_desc)
|
442 |
+
ea4all_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
|
443 |
+
qna_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
|
444 |
+
with gr.Accordion("Open for prompt examples", open=False):
|
445 |
+
qna_examples = gr.Dropdown(e4u.load_mock_content(APM_MOCK_QNA), value=None,label="Questions", interactive=True)
|
446 |
+
gr.ClearButton([ea4all_chatbot,qna_prompt], value="Clear", size="sm", visible=False)
|
447 |
+
with gr.Tab(label="Sample Dataset", id="id_apm_ds"):
|
448 |
+
apm_df = gr.Dataframe()
|
449 |
+
with gr.Tab(label="Diagram Question and Answering"):
|
450 |
+
gr.Markdown(value=agentic_vqa_desc)
|
451 |
+
ea4all_vqa = gr.Chatbot(label="EA4ALL your AI Multimodal Architect Companion", type="messages")
|
452 |
+
vqa_prompt = gr.MultimodalTextbox(interactive=True, show_label=False, submit_btn=True, stop_btn=True, autofocus=True, placeholder="Upload your diagram and type your message or select an example...")
|
453 |
+
with gr.Accordion("Open for prompt examples", open=False):
|
454 |
+
vqa_examples = gr.Dropdown(e4u.get_vaq_examples(), value=None,label="Diagram and Questions", interactive=True)
|
455 |
+
gr.ClearButton([ea4all_vqa,vqa_prompt,vqa_examples], value="Clear", size="sm", visible=True)
|
456 |
+
with gr.Tab(label="Reference Architecture", id="id_refarch"):
|
457 |
+
with gr.Tabs(selected="id_dbr") as tabs_reference_architecture:
|
458 |
+
with gr.Tab(label='Business Requirement', id="id_dbr"):
|
459 |
+
gr.Markdown(value=agentic_togaf_desc)
|
460 |
+
dbr_text=gr.TextArea(value=init_dbr, lines=14, interactive=True)
|
461 |
+
with gr.Row():
|
462 |
+
dbr_file=gr.File(
|
463 |
+
value=e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock),
|
464 |
+
label="Business Requirement",
|
465 |
+
height=35,
|
466 |
+
show_label=False,
|
467 |
+
file_count="single",
|
468 |
+
file_types=['text'],
|
469 |
+
interactive=True,
|
470 |
+
type='binary'
|
471 |
+
)
|
472 |
+
dbr_run=gr.Button(scale=None,value="Run Reference Architecture")
|
473 |
+
dbr_cls=gr.ClearButton([dbr_file,dbr_text])
|
474 |
+
with gr.Tab(label='Confluence Integration', id="id_confluence"):
|
475 |
+
confluence_list = gr.Dropdown(value=None, label="Confluence Pages", interactive=True)
|
476 |
+
confluence_df = gr.DataFrame(visible=False, headers=["title", "source", "page_content"])
|
477 |
+
@gr.render(inputs=[confluence_list,confluence_df])
|
478 |
+
def show_page(page, df):
|
479 |
+
if page:
|
480 |
+
with gr.Row():
|
481 |
+
with gr.Column():
|
482 |
+
dbr_confluence =gr.Button(scale=None,value="Run Reference Architecture")
|
483 |
+
with gr.Column():
|
484 |
+
btn=gr.Button("Clear")
|
485 |
+
with gr.Row(variant='default', show_progress=True):
|
486 |
+
page_content = gr.Markdown(filter_page(df,page), line_breaks=True)
|
487 |
+
btn.click(lambda: gr.Dropdown(value=None), None, confluence_list)
|
488 |
+
dbr_confluence.click(run_reference_architecture_agentic_system,show_progress='full', inputs=[page_content],outputs=[togaf_vision,tabs_togaf,tabs_reference_architecture, architecture_runway, diagram_header, tab_diagram])
|
489 |
+
with gr.Tab(label='Reference Architecture',visible=False, id="id_togaf") as tabs_togaf:
|
490 |
+
togaf_vision=gr.Markdown(value='### Reference Architecture: Vision and Target')
|
491 |
+
with gr.Tab(label="Target Architecture Runway",visible=False, id="id_runway") as tab_diagram:
|
492 |
+
diagram_header=gr.Markdown(visible=True)
|
493 |
+
architecture_runway=gr.Image(label="Target Architecture Runway",interactive=False,visible=True, scale=10)
|
494 |
+
with gr.Tab(label="Overview", id="how_to"):
|
495 |
+
gr.Markdown(
|
496 |
+
"""
|
497 |
+
# Title
|
498 |
+
|
499 |
+
**Explore, Share, Together:** harness the value of `Enterprise Architecture in the era of Generative AI` to positively impact individuals and organisations.\n
|
500 |
+
|
501 |
+
## Overview
|
502 |
+
"""
|
503 |
+
),
|
504 |
+
gr.Image(
|
505 |
+
get_image(e4u.CFG.EA4ALL_ARCHITECTURE),
|
506 |
+
show_download_button=False,
|
507 |
+
container=False,
|
508 |
+
show_share_button=False,
|
509 |
+
)
|
510 |
+
gr.Markdown(
|
511 |
+
"""
|
512 |
+
## Journey
|
513 |
+
|
514 |
+
Audio overview summarising the key learnings, challenges, so what, stats from day-1 to last sprint. (**Powered by Google NoteBookLM**)
|
515 |
+
|
516 |
+
"""
|
517 |
+
)
|
518 |
+
podcast = gr.Audio(
|
519 |
+
type="filepath",
|
520 |
+
value=os.path.join(BaseConfiguration.ea4all_store,e4u.CFG.EA4ALL_PODCAST),
|
521 |
+
label="EA4ALL Journey Podcast",
|
522 |
+
show_download_button=False,
|
523 |
+
autoplay=False,
|
524 |
+
container=True,
|
525 |
+
interactive=False,
|
526 |
+
)
|
527 |
+
gr.Markdown(ea4all_about)
|
528 |
+
|
529 |
+
#get LLM response user's feedback
|
530 |
+
def get_user_feedback(evt: gr.SelectData, request:gr.Request):
|
531 |
+
##{evt.index} {evt.value} {evt._data['liked']}
|
532 |
+
try:
|
533 |
+
uuid_str = os.environ["EA4ALL_" + e4u.get_user_identification(request).replace(".","_")]
|
534 |
+
gr.Info("Thanks for your feedback - run_id: " + uuid_str)
|
535 |
+
run_id = uuid.UUID(uuid_str)
|
536 |
+
client = Client()
|
537 |
+
client.create_feedback(
|
538 |
+
run_id,
|
539 |
+
key="feedback-key",
|
540 |
+
score= 1.0 if evt._data['liked'] == True else 0,
|
541 |
+
comment=str(evt.value)
|
542 |
+
)
|
543 |
+
except Exception as e:
|
544 |
+
gr.Warning(f"Couldn't capture a feedback: {e}")
|
545 |
+
|
546 |
+
#Set initial state of apm, llm and capture user-ip
|
547 |
+
async def ea4all_agent_init(request:gr.Request):
|
548 |
+
|
549 |
+
#capture user IP address
|
550 |
+
#ea4all_user = e4u.get_user_identification(request)
|
551 |
+
gr.Info("Thank you for connecting! I'd love to hear your feedback! Thumbs up or Thumbs down. LinkedIn comment.")
|
552 |
+
|
553 |
+
# Set initial landscape vectorstore
|
554 |
+
|
555 |
+
await indexer_graph.ainvoke(input={"docs":[]}, config=config)
|
556 |
+
|
557 |
+
#set chatbot description w/ user apm columns
|
558 |
+
df = vectorstore.apm_dataframe_loader(e4u._join_paths(BaseConfiguration.ea4all_store, IndexConfiguration.apm_catalogue))
|
559 |
+
columns_string = ', '.join(df.columns)
|
560 |
+
apm_columns = agentic_qna_desc + columns_string
|
561 |
+
|
562 |
+
prompt=ChatMessage(role='assistant', content='Hi, I am your Architect Copilot! How can I help you today?')
|
563 |
+
|
564 |
+
page_list = ea4all_confluence()
|
565 |
+
|
566 |
+
#Load gradio.dataframe with Portfolio sample dataset
|
567 |
+
pmo_df = pd.read_csv("ea4all/ea4all_store/ea4all-portfolio-management.csv")
|
568 |
+
|
569 |
+
return (
|
570 |
+
apm_columns,
|
571 |
+
[prompt],
|
572 |
+
[prompt],
|
573 |
+
[prompt],
|
574 |
+
gr.Dropdown(choices=page_list['title'].values.tolist()),
|
575 |
+
gr.DataFrame(value=page_list),
|
576 |
+
gr.DataFrame(value=df),
|
577 |
+
gr.DataFrame(value=pmo_df),
|
578 |
+
)
|
579 |
+
|
580 |
+
#authentication
|
581 |
+
def ea4all_login(username, password):
|
582 |
+
return (username==password)
|
583 |
+
|
584 |
+
#TABS & Reference Architecture look-and-feel control
|
585 |
+
def off_dbrtext():
|
586 |
+
return gr.TextArea(visible=False), gr.Tab(visible=False), gr.Tab(visible=False)
|
587 |
+
|
588 |
+
def on_dbrtext(file):
|
589 |
+
if file:
|
590 |
+
return gr.TextArea(visible=True)
|
591 |
+
return gr.TextArea(visible=False)
|
592 |
+
|
593 |
+
#Upload & clear business requirement
|
594 |
+
def load_dbr(file):
|
595 |
+
return file.decode()
|
596 |
+
|
597 |
+
def unload_dbr():
|
598 |
+
return gr.TextArea(visible=False)
|
599 |
+
|
600 |
+
def on_dbrtext(file):
|
601 |
+
if file:
|
602 |
+
return gr.TextArea(visible=True)
|
603 |
+
return gr.TextArea(visible=False)
|
604 |
+
|
605 |
+
#Upload & clear business requirement
|
606 |
+
def load_dbr(file):
|
607 |
+
return file.decode()
|
608 |
+
|
609 |
+
def unload_dbr():
|
610 |
+
return gr.TextArea(visible=False)
|
611 |
+
|
612 |
+
#Podcast upload progress
|
613 |
+
podcast.change(show_progress='full')
|
614 |
+
|
615 |
+
#Togaf upload file
|
616 |
+
dbr_file.clear(unload_dbr,outputs=dbr_text)
|
617 |
+
dbr_file.change(on_dbrtext,inputs=dbr_file,outputs=dbr_text)
|
618 |
+
dbr_file.upload(load_dbr,inputs=dbr_file, outputs=dbr_text)
|
619 |
+
dbr_cls.click(off_dbrtext,outputs=[dbr_text, tabs_togaf, tab_diagram])
|
620 |
+
|
621 |
+
#Refactored ea4all_chatbot / vqa_chatbot (ChatInterface -> Chatbot)
|
622 |
+
qna_prompt.submit(run_qna_agentic_system,[qna_prompt,ea4all_chatbot],ea4all_chatbot)
|
623 |
+
qna_prompt.submit(lambda: "", None, [qna_prompt])
|
624 |
+
ea4all_chatbot.like(fn=get_user_feedback)
|
625 |
+
qna_examples.input(lambda value: value, qna_examples, qna_prompt)
|
626 |
+
|
627 |
+
#Execute Reference Architecture
|
628 |
+
dbr_run.click(run_reference_architecture_agentic_system,show_progress='full',inputs=[dbr_text],outputs=[togaf_vision,tabs_togaf,tabs_reference_architecture, architecture_runway, diagram_header, tab_diagram])
|
629 |
+
|
630 |
+
#vqa_chatbot (ChatInterface -> Chatbot)
|
631 |
+
def add_message(message, history):
|
632 |
+
if message["text"] is not None:
|
633 |
+
history.append({"role": "user", "content": message["text"]})
|
634 |
+
|
635 |
+
if len(message['files']) > 0:
|
636 |
+
history.append({"role": "user", "content": {"path": message['files'][-1]}})
|
637 |
+
|
638 |
+
return (
|
639 |
+
gr.MultimodalTextbox(value=message, interactive=True, placeholder="Upload a diagram and type your message..."),
|
640 |
+
history
|
641 |
+
)
|
642 |
+
|
643 |
+
chat_msg = vqa_prompt.submit(add_message, [vqa_prompt, ea4all_vqa], [vqa_prompt, ea4all_vqa])
|
644 |
+
bot_msg = chat_msg.then(run_vqa_agentic_system, [vqa_prompt, ea4all_vqa], ea4all_vqa, api_name="bot_response")
|
645 |
+
|
646 |
+
ea4all_vqa.like(fn=get_user_feedback)
|
647 |
+
vqa_examples.input(lambda value: value, vqa_examples, vqa_prompt)
|
648 |
+
|
649 |
+
#Invoke CrewAI PMO Agentic System
|
650 |
+
pmo_prompt.submit(run_pmo_agentic_system,[pmo_prompt,pmo_chatbot],pmo_chatbot)
|
651 |
+
pmo_prompt.submit(lambda: "", None, [pmo_prompt])
|
652 |
+
pmo_examples.input(lambda value: value, pmo_examples, pmo_prompt)
|
653 |
+
|
654 |
+
#Set initial state of apm and llm
|
655 |
+
ea4all_mcp.load(ea4all_agent_init, outputs=[ea4all_agent_metadata,ea4all_chatbot, ea4all_vqa, pmo_chatbot, confluence_list, confluence_df, apm_df, pmo_df])
|