avfranco commited on
Commit
c7bd440
·
1 Parent(s): f8ef08b

ea4all-gradio-agents-mcp-hackathon-tools-refactoring

Browse files
Files changed (1) hide show
  1. ea4all/ea4all_mcp.py +40 -38
ea4all/ea4all_mcp.py CHANGED
@@ -49,40 +49,39 @@ config = RunnableConfig(
49
  )
50
 
51
  #ea4all-qna-agent-conversational-with-memory
52
- async def run_qna_agentic_system(prompt: str, chat_memory: list, request:gr.Request) -> AsyncGenerator[list, None]:
53
  """
54
  description:
55
  Handles conversational Q&A for the Application Landscape using an agentic system.
56
  Args:
57
- prompt (str): The user's question or message.
58
- chat_memory (list): The conversation history as a list of ChatMessage objects.
59
  request (gr.Request): The Gradio request object for user identification.
60
  Returns:
61
- list: Response to user's question.
62
  """
63
 
64
  format_response = ""
65
-
66
- if not prompt:
67
  format_response = "Hi, how are you today? To start our conversation, please chat your message!"
68
  chat_memory.append(ChatMessage(role="assistant", content=format_response))
69
  yield chat_memory
70
 
71
  if not chat_memory:
72
- chat_memory.append(ChatMessage(role="user", content=prompt))
73
  yield chat_memory
74
 
75
- if prompt:
76
  #capture user ip
77
- ea4all_user = e4u.get_user_identification(request)
78
 
79
  ##Initialise APM Graph
80
  #apm_graph = e4a.apm_graph
81
- #inputs = {"question": prompt, "chat_memory":chat_memory}
82
- inputs = {"messages": [{"role": "user", "content": prompt}]}
83
 
84
- #add prompt to memory
85
- chat_memory.append(ChatMessage(role="user", content=prompt))
86
 
87
  partial_message = ""
88
  async for event in super_graph.astream_events(input=inputs, config=config, version="v2"):
@@ -157,29 +156,34 @@ async def run_qna_agentic_system(prompt: str, chat_memory: list, request:gr.Requ
157
  yield chat_memory
158
 
159
  # Set environment variable only when 'event' is defined
160
- os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
161
 
162
  wait_for_all_tracers()
163
 
164
  #Trigger Solution Architecture Diagram QnA
165
- async def run_vqa_agentic_system(message: dict, chat_memory: list, request:gr.Request) -> AsyncGenerator[list, None]:
166
  """
167
  description:
168
  Handles Visual Question Answering (VQA) for uploaded architecture diagrams.
169
  Args:
170
- message (dict): Contains 'files' (list of file paths) and 'text' (user's question).
171
- chat_memory (list): The conversation history as a list of ChatMessage objects.
172
- request (gr.Request): The Gradio request object for user identification.
173
  Returns:
174
- list: Response to user's question.
175
  """
176
 
177
  #capture user ip
178
- ea4all_user = e4u.get_user_identification(request)
179
 
180
  """Handle file uploads and validate their types."""
181
  allowed_file_types = ('JPEG', 'PNG')
182
 
 
 
 
 
 
183
  print("---CALLING VISUAL QUESTION ANSWERING AGENTIC SYSTEM---")
184
  print(f"Prompt: {message}")
185
 
@@ -223,11 +227,11 @@ async def run_vqa_agentic_system(message: dict, chat_memory: list, request:gr.Re
223
  partial_message += chunk.content
224
  chat_memory[-1].content = partial_message
225
  time.sleep(e4u.CFG.STREAM_SLEEP)
226
- yield chat_memory #, message to update prompt
227
  elif not partial_message:
228
  yield chat_memory #, message
229
 
230
- os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
231
 
232
  wait_for_all_tracers()
233
 
@@ -235,15 +239,14 @@ async def run_vqa_agentic_system(message: dict, chat_memory: list, request:gr.Re
235
  yield (e.args[-1])
236
 
237
  #Run Togaf Agentic System
238
- async def run_reference_architecture_agentic_system(business_query: str, request:gr.Request) -> AsyncGenerator[list, None]:
239
  """
240
  description:
241
  Generates a reference architecture blueprint based on a business requirement using the TOGAF agentic system.
242
  Args:
243
- business_query (str): The business requirement or query provided by the user.
244
- request (gr.Request): The Gradio request object for user identification.
245
  Returns:
246
- list: Response to user's question.
247
  """
248
 
249
  if len(business_query) < 50:
@@ -309,30 +312,29 @@ async def run_reference_architecture_agentic_system(business_query: str, request
309
  ]
310
  )
311
 
312
- async def run_pmo_agentic_system(prompt, chat_memory):
313
  """
314
  description:
315
  Answers questions about Project Portfolio Management and Architect Demand Management.
316
  Args:
317
- prompt (str): The user's question about project portfolio or resource management.
318
- chat_memory (list): The conversation history as a list of ChatMessage objects.
319
  Returns:
320
- list: Updated chat memory after each event or response.
321
  """
322
 
323
  format_response = ""
324
-
325
- if not prompt:
326
  format_response = "Hi, how are you today? To start our conversation, please chat your message!"
327
  chat_memory.append(ChatMessage(role="assistant", content=format_response))
328
  yield chat_memory
329
 
330
  if not chat_memory:
331
- chat_memory.append(ChatMessage(role="user", content=prompt))
332
  yield chat_memory
333
 
334
  inputs = {
335
- "question": prompt,
336
  "verbose": True, # optional flags
337
  }
338
 
@@ -376,7 +378,7 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
376
  ea4all_pmo_description = gr.Markdown(value=agentic_pmo_desc)
377
  pmo_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
378
  pmo_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
379
- with gr.Accordion("Open for prompt examples", open=False):
380
  pmo_examples = gr.Dropdown(e4u.get_relevant_questions(PMO_MOCK_QNA), value=None,label="Questions", interactive=True)
381
  gr.ClearButton([pmo_chatbot,pmo_prompt], value="Clear", size="sm", visible=False)
382
  with gr.Tab(label="Project Portfolio Sample Dataset", id="id_pmo_ds"):
@@ -387,7 +389,7 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
387
  ea4all_agent_metadata = gr.Markdown(value=agentic_qna_desc)
388
  ea4all_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
389
  qna_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
390
- with gr.Accordion("Open for prompt examples", open=False):
391
  qna_examples = gr.Dropdown(e4u.get_relevant_questions(APM_MOCK_QNA), value=None,label="Questions", interactive=True)
392
  gr.ClearButton([ea4all_chatbot,qna_prompt], value="Clear", size="sm", visible=False)
393
  with gr.Tab(label="Sample Dataset", id="id_apm_ds"):
@@ -396,14 +398,14 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
396
  gr.Markdown(value=agentic_vqa_desc)
397
  ea4all_vqa = gr.Chatbot(label="EA4ALL your AI Multimodal Architect Companion", type="messages")
398
  vqa_prompt = gr.MultimodalTextbox(interactive=True, show_label=False, submit_btn=True, stop_btn=True, autofocus=True, placeholder="Upload your diagram and type your message or select an example...")
399
- with gr.Accordion("Open for prompt examples", open=False):
400
  vqa_examples = gr.Dropdown(e4u.get_vqa_examples(), value=None,label="Diagram and Questions", interactive=True)
401
  gr.ClearButton([ea4all_vqa,vqa_prompt,vqa_examples], value="Clear", size="sm", visible=True)
402
  with gr.Tab(label="Reference Architecture", id="id_refarch"):
403
  with gr.Tabs(selected="id_dbr") as tabs_reference_architecture:
404
  with gr.Tab(label='Business Requirement', id="id_dbr"):
405
  gr.Markdown(value=agentic_togaf_desc)
406
- dbr_text=gr.TextArea(value="Provide a Business Requirement Specification or Select the exemple provided.", lines=14, interactive=True)
407
  with gr.Row():
408
  dbr_file=gr.File(
409
  value=e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock),
 
49
  )
50
 
51
  #ea4all-qna-agent-conversational-with-memory
52
+ async def run_qna_agentic_system(question: str, chat_memory: list) -> AsyncGenerator[list, None]:
53
  """
54
  description:
55
  Handles conversational Q&A for the Application Landscape using an agentic system.
56
  Args:
57
+ question (str): The user's question or message.
58
+ chat_memory (list): The conversation history.
59
  request (gr.Request): The Gradio request object for user identification.
60
  Returns:
61
+ reponse: Response to user's architectural question.
62
  """
63
 
64
  format_response = ""
65
+ if not question:
 
66
  format_response = "Hi, how are you today? To start our conversation, please chat your message!"
67
  chat_memory.append(ChatMessage(role="assistant", content=format_response))
68
  yield chat_memory
69
 
70
  if not chat_memory:
71
+ chat_memory.append(ChatMessage(role="user", content=question))
72
  yield chat_memory
73
 
74
+ if question:
75
  #capture user ip
76
+ #ea4all_user = e4u.get_user_identification(request)
77
 
78
  ##Initialise APM Graph
79
  #apm_graph = e4a.apm_graph
80
+ #inputs = {"question": question, "chat_memory":chat_memory}
81
+ inputs = {"messages": [{"role": "user", "content": question}]}
82
 
83
+ #add question to memory
84
+ chat_memory.append(ChatMessage(role="user", content=question))
85
 
86
  partial_message = ""
87
  async for event in super_graph.astream_events(input=inputs, config=config, version="v2"):
 
156
  yield chat_memory
157
 
158
  # Set environment variable only when 'event' is defined
159
+ #os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
160
 
161
  wait_for_all_tracers()
162
 
163
  #Trigger Solution Architecture Diagram QnA
164
+ async def run_vqa_agentic_system(question: str, diagram: str, chat_memory: list, request: gr.Request) -> AsyncGenerator[list, None]:
165
  """
166
  description:
167
  Handles Visual Question Answering (VQA) for uploaded architecture diagrams.
168
  Args:
169
+ question (str): User's question about the Architecture Diagram.
170
+ diagram (str): Path to the diagram file.
171
+ chat_memory: The conversation history.
172
  Returns:
173
+ response: Response to user's question.
174
  """
175
 
176
  #capture user ip
177
+ #ea4all_user = e4u.get_user_identification(request)
178
 
179
  """Handle file uploads and validate their types."""
180
  allowed_file_types = ('JPEG', 'PNG')
181
 
182
+ message = {
183
+ 'text': question,
184
+ 'files': [diagram] if isinstance(diagram, str) else diagram
185
+ }
186
+
187
  print("---CALLING VISUAL QUESTION ANSWERING AGENTIC SYSTEM---")
188
  print(f"Prompt: {message}")
189
 
 
227
  partial_message += chunk.content
228
  chat_memory[-1].content = partial_message
229
  time.sleep(e4u.CFG.STREAM_SLEEP)
230
+ yield chat_memory #, message to update question
231
  elif not partial_message:
232
  yield chat_memory #, message
233
 
234
+ #os.environ["EA4ALL_" + ea4all_user.replace(".", "_")] = str(event['run_id'])
235
 
236
  wait_for_all_tracers()
237
 
 
239
  yield (e.args[-1])
240
 
241
  #Run Togaf Agentic System
242
+ async def run_reference_architecture_agentic_system(business_query: str) -> AsyncGenerator[list, None]:
243
  """
244
  description:
245
  Generates a reference architecture blueprint based on a business requirement using the TOGAF agentic system.
246
  Args:
247
+ business_query (str): Description of a business problem / requirement.
 
248
  Returns:
249
+ response: High-level architecture blueprint and target diagram.
250
  """
251
 
252
  if len(business_query) < 50:
 
312
  ]
313
  )
314
 
315
+ async def run_pmo_agentic_system(question:str, chat_memory: list) -> AsyncGenerator[list, None]:
316
  """
317
  description:
318
  Answers questions about Project Portfolio Management and Architect Demand Management.
319
  Args:
320
+ question (str): The user's question about project portfolio or resource management.
321
+ chat_memory: The conversation history.
322
  Returns:
323
+ response: Architect Demand Allocation Report
324
  """
325
 
326
  format_response = ""
327
+ if not question:
 
328
  format_response = "Hi, how are you today? To start our conversation, please chat your message!"
329
  chat_memory.append(ChatMessage(role="assistant", content=format_response))
330
  yield chat_memory
331
 
332
  if not chat_memory:
333
+ chat_memory.append(ChatMessage(role="user", content=question))
334
  yield chat_memory
335
 
336
  inputs = {
337
+ "question": question,
338
  "verbose": True, # optional flags
339
  }
340
 
 
378
  ea4all_pmo_description = gr.Markdown(value=agentic_pmo_desc)
379
  pmo_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
380
  pmo_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
381
+ with gr.Accordion("Open for question examples", open=False):
382
  pmo_examples = gr.Dropdown(e4u.get_relevant_questions(PMO_MOCK_QNA), value=None,label="Questions", interactive=True)
383
  gr.ClearButton([pmo_chatbot,pmo_prompt], value="Clear", size="sm", visible=False)
384
  with gr.Tab(label="Project Portfolio Sample Dataset", id="id_pmo_ds"):
 
389
  ea4all_agent_metadata = gr.Markdown(value=agentic_qna_desc)
390
  ea4all_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
391
  qna_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
392
+ with gr.Accordion("Open for question examples", open=False):
393
  qna_examples = gr.Dropdown(e4u.get_relevant_questions(APM_MOCK_QNA), value=None,label="Questions", interactive=True)
394
  gr.ClearButton([ea4all_chatbot,qna_prompt], value="Clear", size="sm", visible=False)
395
  with gr.Tab(label="Sample Dataset", id="id_apm_ds"):
 
398
  gr.Markdown(value=agentic_vqa_desc)
399
  ea4all_vqa = gr.Chatbot(label="EA4ALL your AI Multimodal Architect Companion", type="messages")
400
  vqa_prompt = gr.MultimodalTextbox(interactive=True, show_label=False, submit_btn=True, stop_btn=True, autofocus=True, placeholder="Upload your diagram and type your message or select an example...")
401
+ with gr.Accordion("Open for question examples", open=False):
402
  vqa_examples = gr.Dropdown(e4u.get_vqa_examples(), value=None,label="Diagram and Questions", interactive=True)
403
  gr.ClearButton([ea4all_vqa,vqa_prompt,vqa_examples], value="Clear", size="sm", visible=True)
404
  with gr.Tab(label="Reference Architecture", id="id_refarch"):
405
  with gr.Tabs(selected="id_dbr") as tabs_reference_architecture:
406
  with gr.Tab(label='Business Requirement', id="id_dbr"):
407
  gr.Markdown(value=agentic_togaf_desc)
408
+ dbr_text=gr.TextArea(value="Provide a Business Problem / Requirement Specification or select an example provided.", lines=14, interactive=True)
409
  with gr.Row():
410
  dbr_file=gr.File(
411
  value=e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock),