avfranco commited on
Commit
e1c6f35
·
1 Parent(s): a38c9c4

ea4all-gradio-agents-mcp-hackathon-ui-retrofit

Browse files
Files changed (2) hide show
  1. ea4all/ea4all_mcp.py +312 -277
  2. ea4all/utils/utils.py +36 -34
ea4all/ea4all_mcp.py CHANGED
@@ -15,6 +15,7 @@ import ea4all.src.shared.utils as e4u
15
  from ea4all.src.graph import super_graph
16
  #from ea4all.src.pmo_crew.crew_runner import run_pmo_crew
17
 
 
18
  import gradio as gr
19
  from gradio import ChatMessage
20
  import os
@@ -23,8 +24,8 @@ import time
23
  from PIL import Image
24
 
25
  from ea4all.utils.utils import (
26
- get_user_feedback, add_message, unload_dbr, on_dbrtext, load_dbr, off_dbrtext,
27
- ea4all_agent_init, get_image, filter_page, ea4all_about, init_dbr
28
  )
29
 
30
  TITLE = """
@@ -47,6 +48,300 @@ config = RunnableConfig(
47
  #stream_mode = "messages"
48
  )
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  #Blocks w/ ChatInterface, BYOD, About
51
  with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as ea4all_mcp:
52
 
@@ -74,266 +369,6 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
74
  Streamline the architecture operating model, taking the best of agentic workflows and architects working together.
75
  """
76
 
77
- #ea4all-qna-agent-conversational-with-memory
78
- async def run_qna_agentic_system(prompt, chat_memory, request:gr.Request):
79
-
80
- format_response = ""
81
-
82
- if not prompt:
83
- format_response = "Hi, how are you today? To start our conversation, please chat your message!"
84
- chat_memory.append(ChatMessage(role="assistant", content=format_response))
85
- yield chat_memory
86
-
87
- if not chat_memory:
88
- chat_memory.append(ChatMessage(role="user", content=prompt))
89
- yield chat_memory
90
-
91
- if prompt:
92
- #capture user ip
93
- ea4all_user = e4u.get_user_identification(request)
94
-
95
- ##Initialise APM Graph
96
- #apm_graph = e4a.apm_graph
97
- #inputs = {"question": prompt, "chat_memory":chat_memory}
98
- inputs = {"messages": [{"role": "user", "content": prompt}]}
99
-
100
- #add prompt to memory
101
- chat_memory.append(ChatMessage(role="user", content=prompt))
102
-
103
- partial_message = ""
104
- async for event in super_graph.astream_events(input=inputs, config=config, version="v2"):
105
- #async for event in super_graph.astream(input=inputs, config=config, subgraphs=True):
106
- # chat_memory.append(ChatMessage(role="assistant", content=str(event)))
107
- # yield chat_memory
108
-
109
- kind = event["event"]
110
- tags = event.get("tags", [])
111
- name = event['name']
112
-
113
- #chat_memory.append(ChatMessage(role="assistant", content=f"Running: {name}"))
114
- #yield chat_memory
115
-
116
- if name == "safety_check":
117
- #if kind == "on_chain_start":
118
- # chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}`"))
119
- # yield chat_memory
120
- if kind == "on_chain_stream":
121
- chunk = event['data'].get('chunk')
122
- if chunk and 'safety_status' in chunk and len(chunk['safety_status']) > 0:
123
- chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}`: {chunk['safety_status'][0]}"))
124
- if chunk['safety_status'][0] == 'no' and len(chunk['safety_status']) > 1:
125
- chat_memory.append(ChatMessage(role="assistant", content=f"Safety-status: {chunk['safety_status'][1]}"))
126
- yield chat_memory
127
- if kind == "on_chain_end" and name == "route_question":
128
- output = event['data'].get('output')
129
- if output and 'source' in output:
130
- chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}:` {output['source']}"))
131
- else:
132
- chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}:` (no source available)"))
133
- yield chat_memory
134
- if kind == "on_chain_start" and name == "retrieve":
135
- chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}` RAG\n\n"))
136
- yield chat_memory
137
- if kind == "on_chain_start" and name in ("generate_web_search", "websearch", "stream_generation"):
138
- chat_memory.append(ChatMessage(role="assistant", content= f"\n\n- `{name}`\n\n"))
139
- yield chat_memory
140
- if kind == "on_chain_stream" and name == "stream_generation":
141
- data = event["data"]
142
- # Accumulate the chunk of data
143
- partial_message += data.get('chunk', '')
144
- chat_memory[-1].content = partial_message
145
- time.sleep(0.05)
146
- yield chat_memory
147
- if name == "grade_generation_v_documents_and_question":
148
- if kind == "on_chain_start":
149
- chat_memory.append(ChatMessage(role="assistant", content=f"\n\n- `{name}`: "))
150
- yield chat_memory
151
- if kind == "on_chain_end":
152
- input_data = event['data'].get('input')
153
- if input_data and hasattr(input_data, 'source'):
154
- output_value = event['data'].get('output', '')
155
- chat_memory.append(ChatMessage(role="assistant", content=f"`{input_data.source}:` {output_value}"))
156
- else:
157
- chat_memory.append(ChatMessage(role="assistant", content=f"`{event['data'].get('output', '')}`"))
158
- yield chat_memory
159
- if "stream_hallucination" in tags and kind == "on_chain_start":
160
- chat_memory.append(ChatMessage(role="assistant", content=f"- `{tags[-1]}`"))
161
- yield chat_memory
162
- if "stream_grade_answer" in tags and kind == "on_chain_start":
163
- chat_memory.append(ChatMessage(role="assistant", content=f"- `{tags[-1]}`"))
164
- yield chat_memory
165
- if name == "supervisor":
166
- if kind == "on_chain_start":
167
- chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}` "))
168
- yield chat_memory
169
- if kind == "on_chain_stream":
170
- chunk = event['data'].get('chunk')
171
- if chunk is not None:
172
- chat_memory.append(ChatMessage(role="assistant", content=f"{chunk}"))
173
- yield chat_memory
174
-
175
- # Set environment variable only when 'event' is defined
176
- os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
177
-
178
- wait_for_all_tracers()
179
-
180
- #Trigger Solution Architecture Diagram QnA
181
- async def run_vqa_agentic_system(message, chat_memory, request:gr.Request):
182
- #capture user ip
183
- ea4all_user = e4u.get_user_identification(request)
184
-
185
- """Handle file uploads and validate their types."""
186
- allowed_file_types = ('JPEG', 'PNG')
187
-
188
- print("---CALLING VISUAL QUESTION ANSWERING AGENTIC SYSTEM---")
189
- print(f"Prompt: {message}")
190
-
191
- if message['files'] == []:
192
- chat_memory.append(ChatMessage(role="assistant", content="Please upload an Architecture PNG, JPEG diagram to start!"))
193
- yield chat_memory
194
- else:
195
- diagram = message['files'][-1] ##chat_memory[-1]['content'][-1]
196
- msg = message['text'] ##chat_memory[-2]['content']
197
- print(f"---DIAGRAM: {diagram}---")
198
- try:
199
- if msg == "":
200
- msg = "Please describe this diagram."
201
-
202
- with Image.open(diagram) as diagram_:
203
- if diagram_.format not in allowed_file_types:
204
- chat_memory.append(ChatMessage(role="assistant", content="Invalid file type. Allowed file types are JPEG and PNG."))
205
- yield chat_memory
206
- else:
207
- #'vqa_image = e4u.get_raw_image(diagram) #MOVED into Graph
208
- vqa_image = diagram
209
-
210
- #Setup Quality Assurance Agentic System
211
- #graph = e4v.ea4all_graph(config['configurable']['vqa_model'])
212
-
213
- #Setup enter graph
214
- diagram_graph = e4v.diagram_graph
215
-
216
- partial_message = ""
217
- chat_memory.append(ChatMessage(role="assistant", content="Hi, I am working on your question..."))
218
- async for event in diagram_graph.astream_events(
219
- {"question":msg, "image": vqa_image}, config, version="v2"
220
- ):
221
- if (
222
- event["event"] == "on_chat_model_stream"
223
- and "vqa_stream" in event.get('tags', [])
224
- #and event["metadata"].get("langgraph_node") == "tools"
225
- ):
226
- chunk = event["data"].get("chunk")
227
- if chunk is not None and hasattr(chunk, "content"):
228
- partial_message += chunk.content
229
- chat_memory[-1].content = partial_message
230
- time.sleep(e4u.CFG.STREAM_SLEEP)
231
- yield chat_memory #, message to update prompt
232
- elif not partial_message:
233
- yield chat_memory #, message
234
-
235
- os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
236
-
237
- wait_for_all_tracers()
238
-
239
- except Exception as e:
240
- yield (e.args[-1])
241
-
242
- #Run Togaf Agentic System
243
- async def run_reference_architecture_agentic_system(business_query, request:gr.Request):
244
-
245
- if len(business_query) < 50:
246
- agent_response = "Please provide a valid Business Requirement content to start!"
247
- yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
248
- else:
249
- plain_text = e4u.markdown_to_plain_text(business_query)
250
- agent_response = "Generating Architecture Blueprint ---TOGAF VISION TARGET--- \n\nI am working on your request..."
251
- togaf_chain = e4t.togaf_graph
252
- final_diagram = ""
253
- vision_message = ""
254
- try:
255
- async for s in togaf_chain.astream_events(
256
- {
257
- "messages": [
258
- HumanMessage(
259
- content=plain_text
260
- )
261
- ],
262
- "business_query": business_query,
263
- },
264
- config=config,
265
- version="v2"
266
- ):
267
- kind = s["event"]
268
- tags = s.get("tags", [])
269
- name = s['name']
270
-
271
- if "gra_stream" in tags and name == "stream_vision_target":
272
- if kind == "on_chain_stream":
273
- data = s["data"]
274
- # Accumulate the chunk of data
275
- chunk = data.get('chunk')
276
- if chunk is not None and hasattr(chunk, "content"):
277
- vision_message += chunk.content
278
- time.sleep(e4u.CFG.STREAM_SLEEP)
279
- yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
280
- elif name == "save_diagram" and kind == 'on_chain_end': #MOVED INTO Togaf_Task3
281
- output = s['data'].get('output', {})
282
- final_diagram = output.get('architecture_runway', "")
283
- elif ("assess_business_query" in tags or "assess_landscape" in tags) and kind == 'on_chain_start': ##'on_chat_model_stream':
284
- agent_response += f"\n\n`{tags[-1]}:{name}`"
285
-
286
- yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
287
-
288
- if vision_message=="":
289
- agent_response = "I cannot generate the Architecture Vision. Please provide a valid Business Requirement content to start!"
290
- yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
291
- elif "Error" not in final_diagram:
292
- yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),final_diagram, None, gr.Tabs(visible=True)])
293
- else:
294
- yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, final_diagram, gr.Tabs(visible=True)])
295
-
296
- except Exception as e:
297
- yield(
298
- [
299
- e.args[-1],
300
- gr.Tabs(visible=True),
301
- gr.Tabs(selected="id_togaf"),
302
- None,
303
- None,
304
- gr.Tabs(visible=False)
305
- ]
306
- )
307
-
308
- async def run_pmo_agentic_system(prompt, chat_memory):
309
- """
310
- Answer a question about Project Portfolio Management and Architect Demand Management.
311
-
312
- Args:
313
- prompt (str): The propject portfolio user question
314
- chat_memory (list): The tool message history
315
-
316
- Returns:
317
- str: A summary answering the user question
318
- """
319
- format_response = ""
320
-
321
- if not prompt:
322
- format_response = "Hi, how are you today? To start our conversation, please chat your message!"
323
- chat_memory.append(ChatMessage(role="assistant", content=format_response))
324
- yield chat_memory
325
-
326
- if not chat_memory:
327
- chat_memory.append(ChatMessage(role="user", content=prompt))
328
- yield chat_memory
329
-
330
- inputs = {
331
- "question": prompt,
332
- "verbose": True, # optional flags
333
- }
334
-
335
- #yield run_pmo_crew(inputs)
336
-
337
  #EA4ALL-Agentic system menu
338
  with gr.Tabs(selected="how_to") as tabs:
339
  with gr.Tab(label="Architect Demand Management"):
@@ -427,36 +462,36 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
427
  container=True,
428
  interactive=False,
429
  )
430
- gr.Markdown(ea4all_about)
431
 
432
  #Podcast upload progress
433
- podcast.change(show_progress='full')
434
 
435
  #Togaf upload file
436
- dbr_file.clear(unload_dbr,outputs=dbr_text)
437
- dbr_file.change(on_dbrtext,inputs=dbr_file,outputs=dbr_text)
438
- dbr_file.upload(load_dbr,inputs=dbr_file, outputs=dbr_text)
439
- dbr_cls.click(off_dbrtext,outputs=[dbr_text, tabs_togaf, tab_diagram])
440
 
441
  #Refactored ea4all_chatbot / vqa_chatbot (ChatInterface -> Chatbot)
442
  qna_prompt.submit(run_qna_agentic_system,[qna_prompt,ea4all_chatbot],ea4all_chatbot)
443
- qna_prompt.submit(lambda: "", None, [qna_prompt])
444
- ea4all_chatbot.like(fn=get_user_feedback)
445
- qna_examples.input(lambda value: value, qna_examples, qna_prompt)
446
 
447
  #Execute Reference Architecture
448
  dbr_run.click(run_reference_architecture_agentic_system,show_progress='full',inputs=[dbr_text],outputs=[togaf_vision,tabs_togaf,tabs_reference_architecture, architecture_runway, diagram_header, tab_diagram])
449
 
450
- chat_msg = vqa_prompt.submit(add_message, [vqa_prompt, ea4all_vqa], [vqa_prompt, ea4all_vqa])
451
  bot_msg = chat_msg.then(run_vqa_agentic_system, [vqa_prompt, ea4all_vqa], ea4all_vqa, api_name="bot_response")
452
 
453
- ea4all_vqa.like(fn=get_user_feedback)
454
- vqa_examples.input(lambda value: value, vqa_examples, vqa_prompt)
455
 
456
  #Invoke CrewAI PMO Agentic System
457
  pmo_prompt.submit(run_pmo_agentic_system,[pmo_prompt,pmo_chatbot],pmo_chatbot)
458
- pmo_prompt.submit(lambda: "", None, [pmo_prompt])
459
- pmo_examples.input(lambda value: value, pmo_examples, pmo_prompt)
460
 
461
  #Set initial state of apm and llm
462
- ea4all_mcp.load(ea4all_agent_init, outputs=[ea4all_agent_metadata,ea4all_chatbot, ea4all_vqa, pmo_chatbot, confluence_list, confluence_df, apm_df, pmo_df])
 
15
  from ea4all.src.graph import super_graph
16
  #from ea4all.src.pmo_crew.crew_runner import run_pmo_crew
17
 
18
+ from typing import AsyncGenerator
19
  import gradio as gr
20
  from gradio import ChatMessage
21
  import os
 
24
  from PIL import Image
25
 
26
  from ea4all.utils.utils import (
27
+ UIUtils,
28
+ ea4all_agent_init, get_image, filter_page, init_dbr
29
  )
30
 
31
  TITLE = """
 
48
  #stream_mode = "messages"
49
  )
50
 
51
+ #ea4all-qna-agent-conversational-with-memory
52
+ async def run_qna_agentic_system(prompt: str, chat_memory: list, request:gr.Request) -> AsyncGenerator[list, None]:
53
+ """
54
+ Handles conversational Q&A for the Application Landscape using an agentic system.
55
+
56
+ Args:
57
+ prompt (str): The user's question or message.
58
+ chat_memory (list): The conversation history as a list of ChatMessage objects.
59
+ request (gr.Request): The Gradio request object for user identification.
60
+
61
+ Returns:
62
+ list: Response to user's question.
63
+ """
64
+
65
+ format_response = ""
66
+
67
+ if not prompt:
68
+ format_response = "Hi, how are you today? To start our conversation, please chat your message!"
69
+ chat_memory.append(ChatMessage(role="assistant", content=format_response))
70
+ yield chat_memory
71
+
72
+ if not chat_memory:
73
+ chat_memory.append(ChatMessage(role="user", content=prompt))
74
+ yield chat_memory
75
+
76
+ if prompt:
77
+ #capture user ip
78
+ ea4all_user = e4u.get_user_identification(request)
79
+
80
+ ##Initialise APM Graph
81
+ #apm_graph = e4a.apm_graph
82
+ #inputs = {"question": prompt, "chat_memory":chat_memory}
83
+ inputs = {"messages": [{"role": "user", "content": prompt}]}
84
+
85
+ #add prompt to memory
86
+ chat_memory.append(ChatMessage(role="user", content=prompt))
87
+
88
+ partial_message = ""
89
+ async for event in super_graph.astream_events(input=inputs, config=config, version="v2"):
90
+ #async for event in super_graph.astream(input=inputs, config=config, subgraphs=True):
91
+ # chat_memory.append(ChatMessage(role="assistant", content=str(event)))
92
+ # yield chat_memory
93
+
94
+ kind = event["event"]
95
+ tags = event.get("tags", [])
96
+ name = event['name']
97
+
98
+ #chat_memory.append(ChatMessage(role="assistant", content=f"Running: {name}"))
99
+ #yield chat_memory
100
+
101
+ if name == "safety_check":
102
+ #if kind == "on_chain_start":
103
+ # chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}`"))
104
+ # yield chat_memory
105
+ if kind == "on_chain_stream":
106
+ chunk = event['data'].get('chunk')
107
+ if chunk and 'safety_status' in chunk and len(chunk['safety_status']) > 0:
108
+ chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}`: {chunk['safety_status'][0]}"))
109
+ if chunk['safety_status'][0] == 'no' and len(chunk['safety_status']) > 1:
110
+ chat_memory.append(ChatMessage(role="assistant", content=f"Safety-status: {chunk['safety_status'][1]}"))
111
+ yield chat_memory
112
+ if kind == "on_chain_end" and name == "route_question":
113
+ output = event['data'].get('output')
114
+ if output and 'source' in output:
115
+ chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}:` {output['source']}"))
116
+ else:
117
+ chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}:` (no source available)"))
118
+ yield chat_memory
119
+ if kind == "on_chain_start" and name == "retrieve":
120
+ chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}` RAG\n\n"))
121
+ yield chat_memory
122
+ if kind == "on_chain_start" and name in ("generate_web_search", "websearch", "stream_generation"):
123
+ chat_memory.append(ChatMessage(role="assistant", content= f"\n\n- `{name}`\n\n"))
124
+ yield chat_memory
125
+ if kind == "on_chain_stream" and name == "stream_generation":
126
+ data = event["data"]
127
+ # Accumulate the chunk of data
128
+ partial_message += data.get('chunk', '')
129
+ chat_memory[-1].content = partial_message
130
+ time.sleep(0.05)
131
+ yield chat_memory
132
+ if name == "grade_generation_v_documents_and_question":
133
+ if kind == "on_chain_start":
134
+ chat_memory.append(ChatMessage(role="assistant", content=f"\n\n- `{name}`: "))
135
+ yield chat_memory
136
+ if kind == "on_chain_end":
137
+ input_data = event['data'].get('input')
138
+ if input_data and hasattr(input_data, 'source'):
139
+ output_value = event['data'].get('output', '')
140
+ chat_memory.append(ChatMessage(role="assistant", content=f"`{input_data.source}:` {output_value}"))
141
+ else:
142
+ chat_memory.append(ChatMessage(role="assistant", content=f"`{event['data'].get('output', '')}`"))
143
+ yield chat_memory
144
+ if "stream_hallucination" in tags and kind == "on_chain_start":
145
+ chat_memory.append(ChatMessage(role="assistant", content=f"- `{tags[-1]}`"))
146
+ yield chat_memory
147
+ if "stream_grade_answer" in tags and kind == "on_chain_start":
148
+ chat_memory.append(ChatMessage(role="assistant", content=f"- `{tags[-1]}`"))
149
+ yield chat_memory
150
+ if name == "supervisor":
151
+ if kind == "on_chain_start":
152
+ chat_memory.append(ChatMessage(role="assistant", content=f"- `{name}` "))
153
+ yield chat_memory
154
+ if kind == "on_chain_stream":
155
+ chunk = event['data'].get('chunk')
156
+ if chunk is not None:
157
+ chat_memory.append(ChatMessage(role="assistant", content=f"{chunk}"))
158
+ yield chat_memory
159
+
160
+ # Set environment variable only when 'event' is defined
161
+ os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
162
+
163
+ wait_for_all_tracers()
164
+
165
+ #Trigger Solution Architecture Diagram QnA
166
+ async def run_vqa_agentic_system(message: dict, chat_memory: list, request:gr.Request) -> AsyncGenerator[list, None]:
167
+ """
168
+ Handles Visual Question Answering (VQA) for uploaded architecture diagrams.
169
+
170
+ Args:
171
+ message (dict): Contains 'files' (list of file paths) and 'text' (user's question).
172
+ chat_memory (list): The conversation history as a list of ChatMessage objects.
173
+ request (gr.Request): The Gradio request object for user identification.
174
+
175
+ Returns:
176
+ list: Response to user's question.
177
+ """
178
+
179
+ #capture user ip
180
+ ea4all_user = e4u.get_user_identification(request)
181
+
182
+ """Handle file uploads and validate their types."""
183
+ allowed_file_types = ('JPEG', 'PNG')
184
+
185
+ print("---CALLING VISUAL QUESTION ANSWERING AGENTIC SYSTEM---")
186
+ print(f"Prompt: {message}")
187
+
188
+ if message['files'] == []:
189
+ chat_memory.append(ChatMessage(role="assistant", content="Please upload an Architecture PNG, JPEG diagram to start!"))
190
+ yield chat_memory
191
+ else:
192
+ diagram = message['files'][-1] ##chat_memory[-1]['content'][-1]
193
+ msg = message['text'] ##chat_memory[-2]['content']
194
+ print(f"---DIAGRAM: {diagram}---")
195
+ try:
196
+ if msg == "":
197
+ msg = "Please describe this diagram."
198
+
199
+ with Image.open(diagram) as diagram_:
200
+ if diagram_.format not in allowed_file_types:
201
+ chat_memory.append(ChatMessage(role="assistant", content="Invalid file type. Allowed file types are JPEG and PNG."))
202
+ yield chat_memory
203
+ else:
204
+ #'vqa_image = e4u.get_raw_image(diagram) #MOVED into Graph
205
+ vqa_image = diagram
206
+
207
+ #Setup Quality Assurance Agentic System
208
+ #graph = e4v.ea4all_graph(config['configurable']['vqa_model'])
209
+
210
+ #Setup enter graph
211
+ diagram_graph = e4v.diagram_graph
212
+
213
+ partial_message = ""
214
+ chat_memory.append(ChatMessage(role="assistant", content="Hi, I am working on your question..."))
215
+ async for event in diagram_graph.astream_events(
216
+ {"question":msg, "image": vqa_image}, config, version="v2"
217
+ ):
218
+ if (
219
+ event["event"] == "on_chat_model_stream"
220
+ and "vqa_stream" in event.get('tags', [])
221
+ #and event["metadata"].get("langgraph_node") == "tools"
222
+ ):
223
+ chunk = event["data"].get("chunk")
224
+ if chunk is not None and hasattr(chunk, "content"):
225
+ partial_message += chunk.content
226
+ chat_memory[-1].content = partial_message
227
+ time.sleep(e4u.CFG.STREAM_SLEEP)
228
+ yield chat_memory #, message to update prompt
229
+ elif not partial_message:
230
+ yield chat_memory #, message
231
+
232
+ os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
233
+
234
+ wait_for_all_tracers()
235
+
236
+ except Exception as e:
237
+ yield (e.args[-1])
238
+
239
+ #Run Togaf Agentic System
240
+ async def run_reference_architecture_agentic_system(business_query: str, request:gr.Request) -> AsyncGenerator[list, None]:
241
+ """
242
+ Generates a reference architecture blueprint based on a business requirement using the TOGAF agentic system.
243
+
244
+ Args:
245
+ business_query (str): The business requirement or query provided by the user.
246
+ request (gr.Request): The Gradio request object for user identification.
247
+
248
+ Returns:
249
+ list: Response to user's question.
250
+ """
251
+
252
+ if len(business_query) < 50:
253
+ agent_response = "Please provide a valid Business Requirement content to start!"
254
+ yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
255
+ else:
256
+ plain_text = e4u.markdown_to_plain_text(business_query)
257
+ agent_response = "Generating Architecture Blueprint ---TOGAF VISION TARGET--- \n\nI am working on your request..."
258
+ togaf_chain = e4t.togaf_graph
259
+ final_diagram = ""
260
+ vision_message = ""
261
+ try:
262
+ async for s in togaf_chain.astream_events(
263
+ {
264
+ "messages": [
265
+ HumanMessage(
266
+ content=plain_text
267
+ )
268
+ ],
269
+ "business_query": business_query,
270
+ },
271
+ config=config,
272
+ version="v2"
273
+ ):
274
+ kind = s["event"]
275
+ tags = s.get("tags", [])
276
+ name = s['name']
277
+
278
+ if "gra_stream" in tags and name == "stream_vision_target":
279
+ if kind == "on_chain_stream":
280
+ data = s["data"]
281
+ # Accumulate the chunk of data
282
+ chunk = data.get('chunk')
283
+ if chunk is not None and hasattr(chunk, "content"):
284
+ vision_message += chunk.content
285
+ time.sleep(e4u.CFG.STREAM_SLEEP)
286
+ yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
287
+ elif name == "save_diagram" and kind == 'on_chain_end': #MOVED INTO Togaf_Task3
288
+ output = s['data'].get('output', {})
289
+ final_diagram = output.get('architecture_runway', "")
290
+ elif ("assess_business_query" in tags or "assess_landscape" in tags) and kind == 'on_chain_start': ##'on_chat_model_stream':
291
+ agent_response += f"\n\n`{tags[-1]}:{name}`"
292
+
293
+ yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
294
+
295
+ if vision_message=="":
296
+ agent_response = "I cannot generate the Architecture Vision. Please provide a valid Business Requirement content to start!"
297
+ yield([agent_response,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, None, gr.Tabs(visible=False)])
298
+ elif "Error" not in final_diagram:
299
+ yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),final_diagram, None, gr.Tabs(visible=True)])
300
+ else:
301
+ yield([vision_message,gr.Tabs(visible=True), gr.Tabs(selected="id_togaf"),None, final_diagram, gr.Tabs(visible=True)])
302
+
303
+ except Exception as e:
304
+ yield(
305
+ [
306
+ e.args[-1],
307
+ gr.Tabs(visible=True),
308
+ gr.Tabs(selected="id_togaf"),
309
+ None,
310
+ None,
311
+ gr.Tabs(visible=False)
312
+ ]
313
+ )
314
+
315
+ async def run_pmo_agentic_system(prompt, chat_memory):
316
+ """
317
+ Answers questions about Project Portfolio Management and Architect Demand Management.
318
+
319
+ Args:
320
+ prompt (str): The user's question about project portfolio or resource management.
321
+ chat_memory (list): The conversation history as a list of ChatMessage objects.
322
+
323
+ Yields:
324
+ list: Updated chat memory after each event or response.
325
+ """
326
+
327
+ format_response = ""
328
+
329
+ if not prompt:
330
+ format_response = "Hi, how are you today? To start our conversation, please chat your message!"
331
+ chat_memory.append(ChatMessage(role="assistant", content=format_response))
332
+ yield chat_memory
333
+
334
+ if not chat_memory:
335
+ chat_memory.append(ChatMessage(role="user", content=prompt))
336
+ yield chat_memory
337
+
338
+ inputs = {
339
+ "question": prompt,
340
+ "verbose": True, # optional flags
341
+ }
342
+
343
+ #yield run_pmo_crew(inputs)
344
+
345
  #Blocks w/ ChatInterface, BYOD, About
346
  with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as ea4all_mcp:
347
 
 
369
  Streamline the architecture operating model, taking the best of agentic workflows and architects working together.
370
  """
371
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
372
  #EA4ALL-Agentic system menu
373
  with gr.Tabs(selected="how_to") as tabs:
374
  with gr.Tab(label="Architect Demand Management"):
 
462
  container=True,
463
  interactive=False,
464
  )
465
+ gr.Markdown(UIUtils.ea4all_about)
466
 
467
  #Podcast upload progress
468
+ podcast.change(show_progress='full', show_api=False,)
469
 
470
  #Togaf upload file
471
+ #dbr_file.clear(unload_dbr,outputs=dbr_text)
472
+ #dbr_file.change(on_dbrtext,inputs=dbr_file,outputs=dbr_text)
473
+ dbr_file.upload(UIUtils.load_dbr,inputs=dbr_file, outputs=dbr_text, show_api=False)
474
+ #dbr_cls.click(off_dbrtext,outputs=[dbr_text, tabs_togaf, tab_diagram])
475
 
476
  #Refactored ea4all_chatbot / vqa_chatbot (ChatInterface -> Chatbot)
477
  qna_prompt.submit(run_qna_agentic_system,[qna_prompt,ea4all_chatbot],ea4all_chatbot)
478
+ #qna_prompt.submit(lambda: "", None, [qna_prompt])
479
+ #ea4all_chatbot.like(fn=get_user_feedback)
480
+ #qna_examples.input(lambda value: value, qna_examples, qna_prompt)
481
 
482
  #Execute Reference Architecture
483
  dbr_run.click(run_reference_architecture_agentic_system,show_progress='full',inputs=[dbr_text],outputs=[togaf_vision,tabs_togaf,tabs_reference_architecture, architecture_runway, diagram_header, tab_diagram])
484
 
485
+ chat_msg = vqa_prompt.submit(UIUtils.add_message, [vqa_prompt, ea4all_vqa], [vqa_prompt, ea4all_vqa], show_api=False)
486
  bot_msg = chat_msg.then(run_vqa_agentic_system, [vqa_prompt, ea4all_vqa], ea4all_vqa, api_name="bot_response")
487
 
488
+ #ea4all_vqa.like(fn=get_user_feedback)
489
+ #vqa_examples.input(lambda value: value, vqa_examples, vqa_prompt)
490
 
491
  #Invoke CrewAI PMO Agentic System
492
  pmo_prompt.submit(run_pmo_agentic_system,[pmo_prompt,pmo_chatbot],pmo_chatbot)
493
+ pmo_prompt.submit(lambda: "", None, [pmo_prompt], show_api=False)
494
+ #pmo_examples.input(lambda value: value, pmo_examples, pmo_prompt)
495
 
496
  #Set initial state of apm and llm
497
+ ea4all_mcp.load(ea4all_agent_init, outputs=[ea4all_agent_metadata,ea4all_chatbot, ea4all_vqa, pmo_chatbot, confluence_list, confluence_df, apm_df, pmo_df], show_api=False)
ea4all/utils/utils.py CHANGED
@@ -18,27 +18,47 @@ import os
18
  import time
19
  import pandas as pd
20
  import gradio as gr
21
- from PIL import Image
22
 
23
- #ea4all-about
24
- def ea4all_about():
25
- readme = e4u.load_mock_content(e4u.CFG.EA4ALL_ABOUT)
26
- return readme
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  #Load demo business requirements
29
- def init_dbr():
30
  # Open the file in read mode ('r')
31
  with open(e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock), 'r') as file:
32
  # Read the contents of the file
33
  contents = file.read()
34
  return contents
35
 
36
- def init_df():
37
  return vectorstore.apm_dataframe_loader(e4u._join_paths(BaseConfiguration.ea4all_store, IndexConfiguration.apm_catalogue))
38
 
39
  #load core-architecture image
40
  #fix the issue with gr.Image(path) inside a docker containder
41
- def get_image(_image):
42
  #from PIL import Image
43
  # Load an image
44
  image = e4u._join_paths(BaseConfiguration.ea4all_images,_image)
@@ -46,7 +66,7 @@ def get_image(_image):
46
 
47
  return image
48
 
49
- def ea4all_confluence():
50
 
51
  #Confluence API Key
52
  confluence_api_key = os.environ['CONFLUENCE_API_KEY']
@@ -70,12 +90,12 @@ def ea4all_confluence():
70
 
71
  return df
72
 
73
- def filter_page(page_list, title):
74
  x = page_list[page_list["title"] == title]
75
  return x.iloc[0]['page_content']
76
 
77
  #get LLM response user's feedback
78
- def get_user_feedback(evt: gr.SelectData, request:gr.Request):
79
  ##{evt.index} {evt.value} {evt._data['liked']}
80
  try:
81
  uuid_str = os.environ["EA4ALL_" + e4u.get_user_identification(request).replace(".","_")]
@@ -92,7 +112,7 @@ def get_user_feedback(evt: gr.SelectData, request:gr.Request):
92
  gr.Warning(f"Couldn't capture a feedback: {e}")
93
 
94
  #Set initial state of apm, llm and capture user-ip
95
- async def ea4all_agent_init(request:gr.Request):
96
 
97
  agentic_qna_desc="""Hi,
98
  improve effieciency, knowledge sharing, and get valuable insights from your IT landscape using natural language.
@@ -130,35 +150,17 @@ async def ea4all_agent_init(request:gr.Request):
130
  )
131
 
132
  #authentication
133
- def ea4all_login(username, password):
134
  return (username==password)
135
 
136
  #TABS & Reference Architecture look-and-feel control
137
- def off_dbrtext():
138
  return gr.TextArea(visible=False), gr.Tab(visible=False), gr.Tab(visible=False)
139
 
140
- def on_dbrtext(file):
141
  if file:
142
  return gr.TextArea(visible=True)
143
  return gr.TextArea(visible=False)
144
 
145
- #Upload & clear business requirement
146
- def load_dbr(file):
147
- return file.decode()
148
-
149
- def unload_dbr():
150
  return gr.TextArea(visible=False)
151
-
152
- #vqa_chatbot (ChatInterface -> Chatbot)
153
- def add_message(message, history):
154
- if message["text"] is not None:
155
- history.append({"role": "user", "content": message["text"]})
156
-
157
- if len(message['files']) > 0:
158
- history.append({"role": "user", "content": {"path": message['files'][-1]}})
159
-
160
- return (
161
- gr.MultimodalTextbox(value=message, interactive=True, placeholder="Upload a diagram and type your message..."),
162
- history
163
- )
164
-
 
18
  import time
19
  import pandas as pd
20
  import gradio as gr
 
21
 
22
+ class UIUtils:
23
+ #ea4all-about
24
+ @staticmethod
25
+ def ea4all_about(show_api=False):
26
+ readme = e4u.load_mock_content(e4u.CFG.EA4ALL_ABOUT)
27
+ return readme
28
+
29
+ #vqa_chatbot (ChatInterface -> Chatbot)
30
+ @staticmethod
31
+ def add_message(message, history, show_api=False):
32
+ if message["text"] is not None:
33
+ history.append({"role": "user", "content": message["text"]})
34
+
35
+ if len(message['files']) > 0:
36
+ history.append({"role": "user", "content": {"path": message['files'][-1]}})
37
+
38
+ return (
39
+ gr.MultimodalTextbox(value=message, interactive=True, placeholder="Upload a diagram and type your message..."),
40
+ history
41
+ )
42
+
43
+ #Upload & clear business requirement
44
+ @staticmethod
45
+ def load_dbr(file, show_api=False):
46
+ return file.decode()
47
 
48
  #Load demo business requirements
49
+ def init_dbr(show_api=False):
50
  # Open the file in read mode ('r')
51
  with open(e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock), 'r') as file:
52
  # Read the contents of the file
53
  contents = file.read()
54
  return contents
55
 
56
+ def init_df(show_api=False):
57
  return vectorstore.apm_dataframe_loader(e4u._join_paths(BaseConfiguration.ea4all_store, IndexConfiguration.apm_catalogue))
58
 
59
  #load core-architecture image
60
  #fix the issue with gr.Image(path) inside a docker containder
61
+ def get_image(_image, show_api=False):
62
  #from PIL import Image
63
  # Load an image
64
  image = e4u._join_paths(BaseConfiguration.ea4all_images,_image)
 
66
 
67
  return image
68
 
69
+ def ea4all_confluence(show_api=False):
70
 
71
  #Confluence API Key
72
  confluence_api_key = os.environ['CONFLUENCE_API_KEY']
 
90
 
91
  return df
92
 
93
+ def filter_page(page_list, title, show_api=False):
94
  x = page_list[page_list["title"] == title]
95
  return x.iloc[0]['page_content']
96
 
97
  #get LLM response user's feedback
98
+ def get_user_feedback(evt: gr.SelectData, request:gr.Request, show_api=False):
99
  ##{evt.index} {evt.value} {evt._data['liked']}
100
  try:
101
  uuid_str = os.environ["EA4ALL_" + e4u.get_user_identification(request).replace(".","_")]
 
112
  gr.Warning(f"Couldn't capture a feedback: {e}")
113
 
114
  #Set initial state of apm, llm and capture user-ip
115
+ async def ea4all_agent_init(request:gr.Request, show_api=False):
116
 
117
  agentic_qna_desc="""Hi,
118
  improve effieciency, knowledge sharing, and get valuable insights from your IT landscape using natural language.
 
150
  )
151
 
152
  #authentication
153
+ def ea4all_login(username, password, show_api=False):
154
  return (username==password)
155
 
156
  #TABS & Reference Architecture look-and-feel control
157
+ def off_dbrtext(show_api=False):
158
  return gr.TextArea(visible=False), gr.Tab(visible=False), gr.Tab(visible=False)
159
 
160
+ def on_dbrtext(file,show_api=False):
161
  if file:
162
  return gr.TextArea(visible=True)
163
  return gr.TextArea(visible=False)
164
 
165
+ def unload_dbr(show_api=False):
 
 
 
 
166
  return gr.TextArea(visible=False)