File size: 6,228 Bytes
6f82ab1
 
 
 
4410abc
6f82ab1
4410abc
 
 
 
 
 
 
6f82ab1
 
 
 
 
 
 
e1c6f35
 
 
 
 
 
 
 
 
af36e79
e1c6f35
 
 
 
 
 
 
 
 
 
 
 
 
af36e79
e1c6f35
6f82ab1
 
af36e79
6f82ab1
 
 
 
 
 
e1c6f35
6f82ab1
 
 
 
af36e79
6f82ab1
 
 
 
 
 
 
e1c6f35
6f82ab1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
af36e79
6f82ab1
 
 
 
af36e79
6f82ab1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
af36e79
6f82ab1
4410abc
 
f40da4b
4410abc
6f82ab1
 
f40da4b
6f82ab1
 
 
4410abc
6f82ab1
 
 
 
 
 
 
 
 
 
 
 
 
d9d2cfb
 
6f82ab1
 
a38c9c4
 
 
6f82ab1
 
d9d2cfb
6f82ab1
 
 
af36e79
6f82ab1
 
 
af36e79
6f82ab1
 
af36e79
6f82ab1
 
 
 
af36e79
6f82ab1
ba8e285
 
 
 
 
 
 
 
d9d2cfb
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
import ea4all.src.ea4all_apm.graph as e4a
import ea4all.src.ea4all_vqa.graph as e4v
import ea4all.src.ea4all_gra.graph as e4t
import ea4all.src.shared.utils as e4u
from ea4all.src.ea4all_gra.configuration import AgentConfiguration as gra
from ea4all.src.shared import vectorstore
from ea4all.src.shared.configuration import BaseConfiguration
from ea4all.src.ea4all_indexer.configuration import IndexConfiguration
from ea4all.src.ea4all_indexer.graph import indexer_graph

from langchain_community.document_loaders import ConfluenceLoader
from langchain_core.messages import ChatMessage
from langsmith import Client

import uuid
import os
import time
import pandas as pd
import gradio as gr

class UIUtils:
    #ea4all-about
    @staticmethod
    def ea4all_about(show_api=False):
        readme = e4u.load_mock_content(e4u.CFG.EA4ALL_ABOUT)
        return readme

    #vqa_chatbot (ChatInterface -> Chatbot)
    @staticmethod
    def add_message(message, history):
        if message["text"] is not None:
            history.append({"role": "user", "content": message["text"]})
        
        if len(message['files']) > 0:
            history.append({"role": "user", "content": {"path": message['files'][-1]}})
        
        return (
            gr.MultimodalTextbox(value=message, interactive=True, placeholder="Upload a diagram and type your message..."),
            history
        )

    #Upload & clear business requirement
    @staticmethod
    def load_dbr(file):
        return file.decode()

#Load demo business requirements
def init_dbr():
    # Open the file in read mode ('r')
    with open(e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock), 'r') as file:
        # Read the contents of the file
        contents = file.read()
    return contents

def init_df(show_api=False):
    return vectorstore.apm_dataframe_loader(e4u._join_paths(BaseConfiguration.ea4all_store, IndexConfiguration.apm_catalogue))

#load core-architecture image
#fix the issue with gr.Image(path) inside a docker containder
def get_image(_image):
    #from PIL import Image
    # Load an image
    image = e4u._join_paths(BaseConfiguration.ea4all_images,_image)
    print(f"Full path: {image}")

    return image

def ea4all_confluence(show_api=False):

    #Confluence API Key
    confluence_api_key = os.environ['CONFLUENCE_API_KEY']

    loader = ConfluenceLoader(
        url="https://learnitall.atlassian.net/wiki", username="learn-it-all@outlook.com", api_key=confluence_api_key,
        space_key="~71202000cd55f36336455f8c07afa1860ba810", 
        include_attachments=False, limit=10,
        keep_markdown_format=True
    )

    documents = loader.load()

    data = {
            "title": [doc.metadata["title"] for doc in documents],
            "source": [doc.metadata["source"] for doc in documents],
            "page_content": [doc.page_content for doc in documents],
        }

    df = pd.DataFrame(data)

    return df        

def filter_page(page_list, title):
    x =  page_list[page_list["title"] == title]
    return x.iloc[0]['page_content']

#get LLM response user's feedback
def get_user_feedback(evt: gr.SelectData, request:gr.Request):
    ##{evt.index} {evt.value} {evt._data['liked']}
    try:     
        uuid_str = os.environ["EA4ALL_" + e4u.get_user_identification(request).replace(".","_")]
        gr.Info("Thanks for your feedback - run_id: " + uuid_str)
        run_id = uuid.UUID(uuid_str)
        client = Client()
        client.create_feedback(
            run_id,
            key="feedback-key",
            score= 1.0 if evt._data['liked'] == True else 0,
            comment=str(evt.value)
        )
    except Exception as e:
        gr.Warning(f"Couldn't capture a feedback: {e}")

#Set initial state of apm, llm and capture user-ip
async def ea4all_agent_init(request:gr.Request):

    agentic_qna_desc="""Hi,
        improve effieciency, knowledge sharing, and get valuable insights from your IT landscape using natural language.
        As an Enterprise Architect Agentic Companion I can answer questions related to Enterprise Architecture, Technology, plus the following IT Landscape sample dataset: """

    #capture user IP address
    #ea4all_user = e4u.get_user_identification(request)
    gr.Info("Thank you for using the EA4ALL Agentic MCP Server!")

    # Set initial landscape vectorstore

    #await indexer_graph.ainvoke(input={"docs":[]}, config=config)

    #set chatbot description w/ user apm columns
    df = vectorstore.apm_dataframe_loader(e4u._join_paths(BaseConfiguration.ea4all_store, IndexConfiguration.apm_catalogue))
    columns_string = ', '.join(df.columns)
    apm_columns = agentic_qna_desc + columns_string

    prompt=ChatMessage(role='assistant', content='Hi, I am your Architect Copilot! How can I help you today?')

    page_list = ea4all_confluence()

    #Load gradio.dataframe with Portfolio sample dataset
    pmo_df = pd.read_csv("ea4all/ea4all_store/ea4all-portfolio-management.csv")

    dbr_text = init_dbr()

    return (
        apm_columns, 
        [{"role": "system", "content": "You are a helpful assistant."}],
        [{"role": "system", "content": "You are a helpful assistant."}],
        [{"role": "system", "content": "You are a helpful assistant."}],
        gr.DataFrame(value=df),
        gr.DataFrame(value=pmo_df),
        dbr_text
    )

#authentication
def ea4all_login(username, password):
    return (username==password)

#TABS & Reference Architecture look-and-feel control
def off_dbrtext():
    return gr.TextArea(visible=False), gr.Tab(visible=False), gr.Tab(visible=False)

def on_dbrtext(file):
    if file:
        return gr.TextArea(visible=True)
    return gr.TextArea(visible=False)

def unload_dbr():
    return gr.TextArea(visible=False)

def get_question_diagram_from_example(value) -> list:
    """
    Extracts the question and diagram from the selected example.
    """
    if value:
        return [value['text'], value['files'][-1]] if 'files' in value else [value['text'], None]
    return ["", None]

def on_image_update(image: gr.Image) -> gr.Image:
    """
    Callback to handle image updates.
    """
    visible = True if image is not None else False

    return gr.Image(visible=visible)