avfranco commited on
Commit
af36e79
·
1 Parent(s): c7bd440

ea4all-gradio-agents-mcp-hackathon-tools-refactoring-vqa

Browse files
Files changed (3) hide show
  1. ea4all/__main__.py +2 -0
  2. ea4all/ea4all_mcp.py +39 -15
  3. ea4all/utils/utils.py +11 -11
ea4all/__main__.py CHANGED
@@ -11,6 +11,8 @@ def main() -> None:
11
  ssr_mode=False,
12
  mcp_server=True,
13
  inbrowser=os.getenv("GRADIO_INBROWSER", "True").lower() in ("true", "1", "yes"),
 
 
14
  )
15
  except Exception as e:
16
  print(f"Error loading: {e}")
 
11
  ssr_mode=False,
12
  mcp_server=True,
13
  inbrowser=os.getenv("GRADIO_INBROWSER", "True").lower() in ("true", "1", "yes"),
14
+ auth=("ea4all", "ea4a@@"),
15
+ auth_message="Please login with your credentials. Under development, will be public soon.",
16
  )
17
  except Exception as e:
18
  print(f"Error loading: {e}")
ea4all/ea4all_mcp.py CHANGED
@@ -49,19 +49,19 @@ config = RunnableConfig(
49
  )
50
 
51
  #ea4all-qna-agent-conversational-with-memory
52
- async def run_qna_agentic_system(question: str, chat_memory: list) -> AsyncGenerator[list, None]:
53
  """
54
  description:
55
  Handles conversational Q&A for the Application Landscape using an agentic system.
56
  Args:
57
  question (str): The user's question or message.
58
- chat_memory (list): The conversation history.
59
  request (gr.Request): The Gradio request object for user identification.
60
  Returns:
61
  reponse: Response to user's architectural question.
62
  """
63
 
64
  format_response = ""
 
65
  if not question:
66
  format_response = "Hi, how are you today? To start our conversation, please chat your message!"
67
  chat_memory.append(ChatMessage(role="assistant", content=format_response))
@@ -161,14 +161,13 @@ async def run_qna_agentic_system(question: str, chat_memory: list) -> AsyncGener
161
  wait_for_all_tracers()
162
 
163
  #Trigger Solution Architecture Diagram QnA
164
- async def run_vqa_agentic_system(question: str, diagram: str, chat_memory: list, request: gr.Request) -> AsyncGenerator[list, None]:
165
  """
166
  description:
167
  Handles Visual Question Answering (VQA) for uploaded architecture diagrams.
168
  Args:
169
  question (str): User's question about the Architecture Diagram.
170
  diagram (str): Path to the diagram file.
171
- chat_memory: The conversation history.
172
  Returns:
173
  response: Response to user's question.
174
  """
@@ -187,6 +186,7 @@ async def run_vqa_agentic_system(question: str, diagram: str, chat_memory: list,
187
  print("---CALLING VISUAL QUESTION ANSWERING AGENTIC SYSTEM---")
188
  print(f"Prompt: {message}")
189
 
 
190
  if message['files'] == []:
191
  chat_memory.append(ChatMessage(role="assistant", content="Please upload an Architecture PNG, JPEG diagram to start!"))
192
  yield chat_memory
@@ -312,7 +312,7 @@ async def run_reference_architecture_agentic_system(business_query: str) -> Asyn
312
  ]
313
  )
314
 
315
- async def run_pmo_agentic_system(question:str, chat_memory: list) -> AsyncGenerator[list, None]:
316
  """
317
  description:
318
  Answers questions about Project Portfolio Management and Architect Demand Management.
@@ -324,6 +324,7 @@ async def run_pmo_agentic_system(question:str, chat_memory: list) -> AsyncGenera
324
  """
325
 
326
  format_response = ""
 
327
  if not question:
328
  format_response = "Hi, how are you today? To start our conversation, please chat your message!"
329
  chat_memory.append(ChatMessage(role="assistant", content=format_response))
@@ -376,7 +377,11 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
376
  with gr.Tab(label="Architect Demand Management"):
377
  with gr.Tab(label="Architect Project Planning", id="pmo_qna_1"):
378
  ea4all_pmo_description = gr.Markdown(value=agentic_pmo_desc)
379
- pmo_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
 
 
 
 
380
  pmo_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
381
  with gr.Accordion("Open for question examples", open=False):
382
  pmo_examples = gr.Dropdown(e4u.get_relevant_questions(PMO_MOCK_QNA), value=None,label="Questions", interactive=True)
@@ -387,7 +392,11 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
387
  with gr.Tabs() as tabs_apm_qna:
388
  with gr.Tab(label="Connect, Explore, Together", id="app_qna_1"):
389
  ea4all_agent_metadata = gr.Markdown(value=agentic_qna_desc)
390
- ea4all_chatbot = gr.Chatbot(label="EA4ALL your AI Architect Companion", type="messages")
 
 
 
 
391
  qna_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
392
  with gr.Accordion("Open for question examples", open=False):
393
  qna_examples = gr.Dropdown(e4u.get_relevant_questions(APM_MOCK_QNA), value=None,label="Questions", interactive=True)
@@ -396,11 +405,25 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
396
  apm_df = gr.Dataframe()
397
  with gr.Tab(label="Diagram Question and Answering"):
398
  gr.Markdown(value=agentic_vqa_desc)
399
- ea4all_vqa = gr.Chatbot(label="EA4ALL your AI Multimodal Architect Companion", type="messages")
400
- vqa_prompt = gr.MultimodalTextbox(interactive=True, show_label=False, submit_btn=True, stop_btn=True, autofocus=True, placeholder="Upload your diagram and type your message or select an example...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
401
  with gr.Accordion("Open for question examples", open=False):
402
  vqa_examples = gr.Dropdown(e4u.get_vqa_examples(), value=None,label="Diagram and Questions", interactive=True)
403
- gr.ClearButton([ea4all_vqa,vqa_prompt,vqa_examples], value="Clear", size="sm", visible=True)
404
  with gr.Tab(label="Reference Architecture", id="id_refarch"):
405
  with gr.Tabs(selected="id_dbr") as tabs_reference_architecture:
406
  with gr.Tab(label='Business Requirement', id="id_dbr"):
@@ -479,7 +502,7 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
479
  #dbr_cls.click(off_dbrtext,outputs=[dbr_text, tabs_togaf, tab_diagram])
480
 
481
  #Refactored ea4all_chatbot / vqa_chatbot (ChatInterface -> Chatbot)
482
- qna_prompt.submit(run_qna_agentic_system,[qna_prompt,ea4all_chatbot],ea4all_chatbot, api_name="landscape_answering_agent")
483
  #qna_prompt.submit(lambda: "", None, [qna_prompt])
484
  #ea4all_chatbot.like(fn=get_user_feedback)
485
  #qna_examples.input(lambda value: value, qna_examples, qna_prompt)
@@ -487,14 +510,15 @@ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as e
487
  #Execute Reference Architecture
488
  dbr_run.click(run_reference_architecture_agentic_system,show_progress='full',inputs=[dbr_text],outputs=[togaf_vision,tabs_togaf,tabs_reference_architecture, architecture_runway, diagram_header, tab_diagram], api_name="togaf_blueprint_generation")
489
 
490
- chat_msg = vqa_prompt.submit(UIUtils.add_message, [vqa_prompt, ea4all_vqa], [vqa_prompt, ea4all_vqa], show_api=False)
491
- bot_msg = chat_msg.then(run_vqa_agentic_system, [vqa_prompt, ea4all_vqa], ea4all_vqa, api_name="diagram_answering_agent")
 
492
 
493
  #ea4all_vqa.like(fn=get_user_feedback)
494
- #vqa_examples.input(lambda value: value, vqa_examples, vqa_prompt)
495
 
496
  #Invoke CrewAI PMO Agentic System
497
- pmo_prompt.submit(run_pmo_agentic_system,[pmo_prompt,pmo_chatbot],pmo_chatbot, api_name="architect_demand_agent")
498
  pmo_prompt.submit(lambda: "", None, [pmo_prompt], show_api=False)
499
  #pmo_examples.input(lambda value: value, pmo_examples, pmo_prompt)
500
 
 
49
  )
50
 
51
  #ea4all-qna-agent-conversational-with-memory
52
+ async def run_qna_agentic_system(question: str) -> AsyncGenerator[list, None]:
53
  """
54
  description:
55
  Handles conversational Q&A for the Application Landscape using an agentic system.
56
  Args:
57
  question (str): The user's question or message.
 
58
  request (gr.Request): The Gradio request object for user identification.
59
  Returns:
60
  reponse: Response to user's architectural question.
61
  """
62
 
63
  format_response = ""
64
+ chat_memory = []
65
  if not question:
66
  format_response = "Hi, how are you today? To start our conversation, please chat your message!"
67
  chat_memory.append(ChatMessage(role="assistant", content=format_response))
 
161
  wait_for_all_tracers()
162
 
163
  #Trigger Solution Architecture Diagram QnA
164
+ async def run_vqa_agentic_system(question: str, diagram: str, request: gr.Request) -> AsyncGenerator[list, None]:
165
  """
166
  description:
167
  Handles Visual Question Answering (VQA) for uploaded architecture diagrams.
168
  Args:
169
  question (str): User's question about the Architecture Diagram.
170
  diagram (str): Path to the diagram file.
 
171
  Returns:
172
  response: Response to user's question.
173
  """
 
186
  print("---CALLING VISUAL QUESTION ANSWERING AGENTIC SYSTEM---")
187
  print(f"Prompt: {message}")
188
 
189
+ chat_memory = []
190
  if message['files'] == []:
191
  chat_memory.append(ChatMessage(role="assistant", content="Please upload an Architecture PNG, JPEG diagram to start!"))
192
  yield chat_memory
 
312
  ]
313
  )
314
 
315
+ async def run_pmo_agentic_system(question:str) -> AsyncGenerator[list, None]:
316
  """
317
  description:
318
  Answers questions about Project Portfolio Management and Architect Demand Management.
 
324
  """
325
 
326
  format_response = ""
327
+ chat_memory = []
328
  if not question:
329
  format_response = "Hi, how are you today? To start our conversation, please chat your message!"
330
  chat_memory.append(ChatMessage(role="assistant", content=format_response))
 
377
  with gr.Tab(label="Architect Demand Management"):
378
  with gr.Tab(label="Architect Project Planning", id="pmo_qna_1"):
379
  ea4all_pmo_description = gr.Markdown(value=agentic_pmo_desc)
380
+ pmo_chatbot = gr.Chatbot(
381
+ label="EA4ALL your AI Demand Management Architect Companion", type="messages",
382
+ max_height=160,
383
+ layout="bubble",
384
+ )
385
  pmo_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
386
  with gr.Accordion("Open for question examples", open=False):
387
  pmo_examples = gr.Dropdown(e4u.get_relevant_questions(PMO_MOCK_QNA), value=None,label="Questions", interactive=True)
 
392
  with gr.Tabs() as tabs_apm_qna:
393
  with gr.Tab(label="Connect, Explore, Together", id="app_qna_1"):
394
  ea4all_agent_metadata = gr.Markdown(value=agentic_qna_desc)
395
+ ea4all_chatbot = gr.Chatbot(
396
+ label="EA4ALL your AI Landscape Architect Companion", type="messages",
397
+ max_height=160,
398
+ layout="bubble",
399
+ )
400
  qna_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
401
  with gr.Accordion("Open for question examples", open=False):
402
  qna_examples = gr.Dropdown(e4u.get_relevant_questions(APM_MOCK_QNA), value=None,label="Questions", interactive=True)
 
405
  apm_df = gr.Dataframe()
406
  with gr.Tab(label="Diagram Question and Answering"):
407
  gr.Markdown(value=agentic_vqa_desc)
408
+ ea4all_vqa = gr.Chatbot(
409
+ label="EA4ALL your AI Multimodal Architect Companion", type="messages",
410
+ max_height=160,
411
+ layout="bubble",
412
+ )
413
+ vqa_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here and upload your diagram...")
414
+ vqa_image = gr.Image(
415
+ label="Architecture Diagram",
416
+ type="filepath",
417
+ format="jpeg, png",
418
+ interactive=True,
419
+ show_download_button=False,
420
+ show_share_button=False,
421
+ visible=True,
422
+ )
423
+ #vqa_prompt = gr.MultimodalTextbox(interactive=True, show_label=False, submit_btn=True, stop_btn=True, autofocus=True, placeholder="Upload your diagram and type your message or select an example...")
424
  with gr.Accordion("Open for question examples", open=False):
425
  vqa_examples = gr.Dropdown(e4u.get_vqa_examples(), value=None,label="Diagram and Questions", interactive=True)
426
+ gr.ClearButton([ea4all_vqa,vqa_prompt,vqa_image, vqa_examples], value="Clear", size="sm", visible=True)
427
  with gr.Tab(label="Reference Architecture", id="id_refarch"):
428
  with gr.Tabs(selected="id_dbr") as tabs_reference_architecture:
429
  with gr.Tab(label='Business Requirement', id="id_dbr"):
 
502
  #dbr_cls.click(off_dbrtext,outputs=[dbr_text, tabs_togaf, tab_diagram])
503
 
504
  #Refactored ea4all_chatbot / vqa_chatbot (ChatInterface -> Chatbot)
505
+ qna_prompt.submit(run_qna_agentic_system,[qna_prompt],ea4all_chatbot, api_name="landscape_answering_agent")
506
  #qna_prompt.submit(lambda: "", None, [qna_prompt])
507
  #ea4all_chatbot.like(fn=get_user_feedback)
508
  #qna_examples.input(lambda value: value, qna_examples, qna_prompt)
 
510
  #Execute Reference Architecture
511
  dbr_run.click(run_reference_architecture_agentic_system,show_progress='full',inputs=[dbr_text],outputs=[togaf_vision,tabs_togaf,tabs_reference_architecture, architecture_runway, diagram_header, tab_diagram], api_name="togaf_blueprint_generation")
512
 
513
+ #chat_msg = vqa_prompt.submit(UIUtils.add_message, [vqa_prompt, vqa_image], [vqa_prompt, ea4all_vqa], show_api=False)
514
+ #bot_msg = chat_msg.then(run_vqa_agentic_system, [vqa_prompt, vqa_image], ea4all_vqa, api_name="diagram_answering_agent")
515
+ vqa_prompt.submit(run_vqa_agentic_system,[vqa_prompt, vqa_image], ea4all_vqa, api_name="diagram_answering_agent")
516
 
517
  #ea4all_vqa.like(fn=get_user_feedback)
518
+ vqa_examples.input(lambda value: [value['text'], value['files'][-1]], vqa_examples, outputs=[vqa_prompt, vqa_image])
519
 
520
  #Invoke CrewAI PMO Agentic System
521
+ pmo_prompt.submit(run_pmo_agentic_system,[pmo_prompt],pmo_chatbot, api_name="architect_demand_agent")
522
  pmo_prompt.submit(lambda: "", None, [pmo_prompt], show_api=False)
523
  #pmo_examples.input(lambda value: value, pmo_examples, pmo_prompt)
524
 
ea4all/utils/utils.py CHANGED
@@ -28,7 +28,7 @@ class UIUtils:
28
 
29
  #vqa_chatbot (ChatInterface -> Chatbot)
30
  @staticmethod
31
- def add_message(message, history, show_api=False):
32
  if message["text"] is not None:
33
  history.append({"role": "user", "content": message["text"]})
34
 
@@ -42,11 +42,11 @@ class UIUtils:
42
 
43
  #Upload & clear business requirement
44
  @staticmethod
45
- def load_dbr(file, show_api=False):
46
  return file.decode()
47
 
48
  #Load demo business requirements
49
- def init_dbr(show_api=False):
50
  # Open the file in read mode ('r')
51
  with open(e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock), 'r') as file:
52
  # Read the contents of the file
@@ -58,7 +58,7 @@ def init_df(show_api=False):
58
 
59
  #load core-architecture image
60
  #fix the issue with gr.Image(path) inside a docker containder
61
- def get_image(_image, show_api=False):
62
  #from PIL import Image
63
  # Load an image
64
  image = e4u._join_paths(BaseConfiguration.ea4all_images,_image)
@@ -90,12 +90,12 @@ def ea4all_confluence(show_api=False):
90
 
91
  return df
92
 
93
- def filter_page(page_list, title, show_api=False):
94
  x = page_list[page_list["title"] == title]
95
  return x.iloc[0]['page_content']
96
 
97
  #get LLM response user's feedback
98
- def get_user_feedback(evt: gr.SelectData, request:gr.Request, show_api=False):
99
  ##{evt.index} {evt.value} {evt._data['liked']}
100
  try:
101
  uuid_str = os.environ["EA4ALL_" + e4u.get_user_identification(request).replace(".","_")]
@@ -112,7 +112,7 @@ def get_user_feedback(evt: gr.SelectData, request:gr.Request, show_api=False):
112
  gr.Warning(f"Couldn't capture a feedback: {e}")
113
 
114
  #Set initial state of apm, llm and capture user-ip
115
- async def ea4all_agent_init(request:gr.Request, show_api=False):
116
 
117
  agentic_qna_desc="""Hi,
118
  improve effieciency, knowledge sharing, and get valuable insights from your IT landscape using natural language.
@@ -150,17 +150,17 @@ async def ea4all_agent_init(request:gr.Request, show_api=False):
150
  )
151
 
152
  #authentication
153
- def ea4all_login(username, password, show_api=False):
154
  return (username==password)
155
 
156
  #TABS & Reference Architecture look-and-feel control
157
- def off_dbrtext(show_api=False):
158
  return gr.TextArea(visible=False), gr.Tab(visible=False), gr.Tab(visible=False)
159
 
160
- def on_dbrtext(file,show_api=False):
161
  if file:
162
  return gr.TextArea(visible=True)
163
  return gr.TextArea(visible=False)
164
 
165
- def unload_dbr(show_api=False):
166
  return gr.TextArea(visible=False)
 
28
 
29
  #vqa_chatbot (ChatInterface -> Chatbot)
30
  @staticmethod
31
+ def add_message(message, history):
32
  if message["text"] is not None:
33
  history.append({"role": "user", "content": message["text"]})
34
 
 
42
 
43
  #Upload & clear business requirement
44
  @staticmethod
45
+ def load_dbr(file):
46
  return file.decode()
47
 
48
  #Load demo business requirements
49
+ def init_dbr():
50
  # Open the file in read mode ('r')
51
  with open(e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock), 'r') as file:
52
  # Read the contents of the file
 
58
 
59
  #load core-architecture image
60
  #fix the issue with gr.Image(path) inside a docker containder
61
+ def get_image(_image):
62
  #from PIL import Image
63
  # Load an image
64
  image = e4u._join_paths(BaseConfiguration.ea4all_images,_image)
 
90
 
91
  return df
92
 
93
+ def filter_page(page_list, title):
94
  x = page_list[page_list["title"] == title]
95
  return x.iloc[0]['page_content']
96
 
97
  #get LLM response user's feedback
98
+ def get_user_feedback(evt: gr.SelectData, request:gr.Request):
99
  ##{evt.index} {evt.value} {evt._data['liked']}
100
  try:
101
  uuid_str = os.environ["EA4ALL_" + e4u.get_user_identification(request).replace(".","_")]
 
112
  gr.Warning(f"Couldn't capture a feedback: {e}")
113
 
114
  #Set initial state of apm, llm and capture user-ip
115
+ async def ea4all_agent_init(request:gr.Request):
116
 
117
  agentic_qna_desc="""Hi,
118
  improve effieciency, knowledge sharing, and get valuable insights from your IT landscape using natural language.
 
150
  )
151
 
152
  #authentication
153
+ def ea4all_login(username, password):
154
  return (username==password)
155
 
156
  #TABS & Reference Architecture look-and-feel control
157
+ def off_dbrtext():
158
  return gr.TextArea(visible=False), gr.Tab(visible=False), gr.Tab(visible=False)
159
 
160
+ def on_dbrtext(file):
161
  if file:
162
  return gr.TextArea(visible=True)
163
  return gr.TextArea(visible=False)
164
 
165
+ def unload_dbr():
166
  return gr.TextArea(visible=False)