avfranco's picture
ea4all-gradio-agents-mcp-hackathon-tools-deploy-v0
d9d2cfb
import ea4all.src.ea4all_apm.graph as e4a
import ea4all.src.ea4all_vqa.graph as e4v
import ea4all.src.ea4all_gra.graph as e4t
import ea4all.src.shared.utils as e4u
from ea4all.src.ea4all_gra.configuration import AgentConfiguration as gra
from ea4all.src.shared import vectorstore
from ea4all.src.shared.configuration import BaseConfiguration
from ea4all.src.ea4all_indexer.configuration import IndexConfiguration
from ea4all.src.ea4all_indexer.graph import indexer_graph
from langchain_community.document_loaders import ConfluenceLoader
from langchain_core.messages import ChatMessage
from langsmith import Client
import uuid
import os
import time
import pandas as pd
import gradio as gr
class UIUtils:
#ea4all-about
@staticmethod
def ea4all_about(show_api=False):
readme = e4u.load_mock_content(e4u.CFG.EA4ALL_ABOUT)
return readme
#vqa_chatbot (ChatInterface -> Chatbot)
@staticmethod
def add_message(message, history):
if message["text"] is not None:
history.append({"role": "user", "content": message["text"]})
if len(message['files']) > 0:
history.append({"role": "user", "content": {"path": message['files'][-1]}})
return (
gr.MultimodalTextbox(value=message, interactive=True, placeholder="Upload a diagram and type your message..."),
history
)
#Upload & clear business requirement
@staticmethod
def load_dbr(file):
return file.decode()
#Load demo business requirements
def init_dbr():
# Open the file in read mode ('r')
with open(e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock), 'r') as file:
# Read the contents of the file
contents = file.read()
return contents
def init_df(show_api=False):
return vectorstore.apm_dataframe_loader(e4u._join_paths(BaseConfiguration.ea4all_store, IndexConfiguration.apm_catalogue))
#load core-architecture image
#fix the issue with gr.Image(path) inside a docker containder
def get_image(_image):
#from PIL import Image
# Load an image
image = e4u._join_paths(BaseConfiguration.ea4all_images,_image)
print(f"Full path: {image}")
return image
def ea4all_confluence(show_api=False):
#Confluence API Key
confluence_api_key = os.environ['CONFLUENCE_API_KEY']
loader = ConfluenceLoader(
url="https://learnitall.atlassian.net/wiki", username="learn-it-all@outlook.com", api_key=confluence_api_key,
space_key="~71202000cd55f36336455f8c07afa1860ba810",
include_attachments=False, limit=10,
keep_markdown_format=True
)
documents = loader.load()
data = {
"title": [doc.metadata["title"] for doc in documents],
"source": [doc.metadata["source"] for doc in documents],
"page_content": [doc.page_content for doc in documents],
}
df = pd.DataFrame(data)
return df
def filter_page(page_list, title):
x = page_list[page_list["title"] == title]
return x.iloc[0]['page_content']
#get LLM response user's feedback
def get_user_feedback(evt: gr.SelectData, request:gr.Request):
##{evt.index} {evt.value} {evt._data['liked']}
try:
uuid_str = os.environ["EA4ALL_" + e4u.get_user_identification(request).replace(".","_")]
gr.Info("Thanks for your feedback - run_id: " + uuid_str)
run_id = uuid.UUID(uuid_str)
client = Client()
client.create_feedback(
run_id,
key="feedback-key",
score= 1.0 if evt._data['liked'] == True else 0,
comment=str(evt.value)
)
except Exception as e:
gr.Warning(f"Couldn't capture a feedback: {e}")
#Set initial state of apm, llm and capture user-ip
async def ea4all_agent_init(request:gr.Request):
agentic_qna_desc="""Hi,
improve effieciency, knowledge sharing, and get valuable insights from your IT landscape using natural language.
As an Enterprise Architect Agentic Companion I can answer questions related to Enterprise Architecture, Technology, plus the following IT Landscape sample dataset: """
#capture user IP address
#ea4all_user = e4u.get_user_identification(request)
gr.Info("Thank you for using the EA4ALL Agentic MCP Server!")
# Set initial landscape vectorstore
#await indexer_graph.ainvoke(input={"docs":[]}, config=config)
#set chatbot description w/ user apm columns
df = vectorstore.apm_dataframe_loader(e4u._join_paths(BaseConfiguration.ea4all_store, IndexConfiguration.apm_catalogue))
columns_string = ', '.join(df.columns)
apm_columns = agentic_qna_desc + columns_string
prompt=ChatMessage(role='assistant', content='Hi, I am your Architect Copilot! How can I help you today?')
page_list = ea4all_confluence()
#Load gradio.dataframe with Portfolio sample dataset
pmo_df = pd.read_csv("ea4all/ea4all_store/ea4all-portfolio-management.csv")
dbr_text = init_dbr()
return (
apm_columns,
[{"role": "system", "content": "You are a helpful assistant."}],
[{"role": "system", "content": "You are a helpful assistant."}],
[{"role": "system", "content": "You are a helpful assistant."}],
gr.DataFrame(value=df),
gr.DataFrame(value=pmo_df),
dbr_text
)
#authentication
def ea4all_login(username, password):
return (username==password)
#TABS & Reference Architecture look-and-feel control
def off_dbrtext():
return gr.TextArea(visible=False), gr.Tab(visible=False), gr.Tab(visible=False)
def on_dbrtext(file):
if file:
return gr.TextArea(visible=True)
return gr.TextArea(visible=False)
def unload_dbr():
return gr.TextArea(visible=False)
def get_question_diagram_from_example(value) -> list:
"""
Extracts the question and diagram from the selected example.
"""
if value:
return [value['text'], value['files'][-1]] if 'files' in value else [value['text'], None]
return ["", None]
def on_image_update(image: gr.Image) -> gr.Image:
"""
Callback to handle image updates.
"""
visible = True if image is not None else False
return gr.Image(visible=visible)